Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-for-davem-2015-04-01' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
Major changes:

ath9k:

* add Active Interference Cancellation, a method implemented in the HW
to counter WLAN RX > sensitivity degradation when BT is transmitting
at the same time. This feature is supported by cards like WB222
based on AR9462.

iwlwifi:

* Location Aware Regulatory was added by Arik
* 8000 device family work
* update to the BT Coex firmware API

brmcfmac:

* add new BCM43455 and BCM43457 SDIO device support
* add new BCM43430 SDIO device support

wil6210:

* take care of AP bridging
* fix NAPI behavior
* found approach to achieve 4*n+2 alignment of Rx frames

rt2x00:

* add new rt2800usb device DWA 130

rtlwifi:

* add USB ID for D-Link DWA-131
* add USB ID ASUS N10 WiFi dongle

mwifiex:

* throughput enhancements
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+4466 -1351
+3 -3
drivers/bcma/Kconfig
··· 1 1 config BCMA_POSSIBLE 2 2 bool 3 - depends on HAS_IOMEM && HAS_DMA && PCI 3 + depends on HAS_IOMEM && HAS_DMA 4 4 default y 5 5 6 6 menu "Broadcom specific AMBA" ··· 45 45 46 46 If unsure, say N 47 47 48 - # TODO: make it depend on PCI when ready 49 48 config BCMA_DRIVER_PCI 50 - bool 49 + bool "BCMA Broadcom PCI core driver" 50 + depends on BCMA && PCI 51 51 default y 52 52 help 53 53 BCMA bus may have many versions of PCIe core. This driver
+20
drivers/bcma/bcma_private.h
··· 106 106 #endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */ 107 107 108 108 /* driver_pci.c */ 109 + #ifdef CONFIG_BCMA_DRIVER_PCI 109 110 u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address); 110 111 void bcma_core_pci_early_init(struct bcma_drv_pci *pc); 111 112 void bcma_core_pci_init(struct bcma_drv_pci *pc); 112 113 void bcma_core_pci_up(struct bcma_drv_pci *pc); 113 114 void bcma_core_pci_down(struct bcma_drv_pci *pc); 115 + #else 116 + static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc) 117 + { 118 + WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI); 119 + } 120 + static inline void bcma_core_pci_init(struct bcma_drv_pci *pc) 121 + { 122 + /* Initialization is required for PCI hosted bus */ 123 + WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI); 124 + } 125 + #endif 114 126 115 127 /* driver_pcie2.c */ 128 + #ifdef CONFIG_BCMA_DRIVER_PCI 116 129 void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); 117 130 void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2); 131 + #else 132 + static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2) 133 + { 134 + /* Initialization is required for PCI hosted bus */ 135 + WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI); 136 + } 137 + #endif 118 138 119 139 extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); 120 140
+15 -8
drivers/bcma/driver_gpio.c
··· 17 17 18 18 #include "bcma_private.h" 19 19 20 + #define BCMA_GPIO_MAX_PINS 32 21 + 20 22 static inline struct bcma_drv_cc *bcma_gpio_get_cc(struct gpio_chip *chip) 21 23 { 22 24 return container_of(chip, struct bcma_drv_cc, gpio); ··· 206 204 207 205 int bcma_gpio_init(struct bcma_drv_cc *cc) 208 206 { 207 + struct bcma_bus *bus = cc->core->bus; 209 208 struct gpio_chip *chip = &cc->gpio; 210 209 int err; 211 210 ··· 225 222 if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) 226 223 chip->of_node = cc->core->dev.of_node; 227 224 #endif 228 - switch (cc->core->bus->chipinfo.id) { 225 + switch (bus->chipinfo.id) { 229 226 case BCMA_CHIP_ID_BCM5357: 230 227 case BCMA_CHIP_ID_BCM53572: 231 228 chip->ngpio = 32; ··· 234 231 chip->ngpio = 16; 235 232 } 236 233 237 - /* There is just one SoC in one device and its GPIO addresses should be 238 - * deterministic to address them more easily. The other buses could get 239 - * a random base number. */ 240 - if (cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC) 241 - chip->base = 0; 242 - else 243 - chip->base = -1; 234 + /* 235 + * On MIPS we register GPIO devices (LEDs, buttons) using absolute GPIO 236 + * pin numbers. We don't have Device Tree there and we can't really use 237 + * relative (per chip) numbers. 238 + * So let's use predictable base for BCM47XX and "random" for all other. 239 + */ 240 + #if IS_BUILTIN(CONFIG_BCM47XX) 241 + chip->base = bus->num * BCMA_GPIO_MAX_PINS; 242 + #else 243 + chip->base = -1; 244 + #endif 244 245 245 246 err = bcma_gpio_irq_domain_init(cc); 246 247 if (err)
-33
drivers/bcma/driver_pci.c
··· 282 282 } 283 283 EXPORT_SYMBOL_GPL(bcma_core_pci_power_save); 284 284 285 - int bcma_core_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core, 286 - bool enable) 287 - { 288 - struct pci_dev *pdev; 289 - u32 coremask, tmp; 290 - int err = 0; 291 - 292 - if (bus->hosttype != BCMA_HOSTTYPE_PCI) { 293 - /* This bcma device is not on a PCI host-bus. So the IRQs are 294 - * not routed through the PCI core. 295 - * So we must not enable routing through the PCI core. */ 296 - goto out; 297 - } 298 - 299 - pdev = bus->host_pci; 300 - 301 - err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 302 - if (err) 303 - goto out; 304 - 305 - coremask = BIT(core->core_index) << 8; 306 - if (enable) 307 - tmp |= coremask; 308 - else 309 - tmp &= ~coremask; 310 - 311 - err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp); 312 - 313 - out: 314 - return err; 315 - } 316 - EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl); 317 - 318 285 static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend) 319 286 { 320 287 u32 w;
+34
drivers/bcma/host_pci.c
··· 351 351 bcma_core_pci_down(&bus->drv_pci[0]); 352 352 } 353 353 EXPORT_SYMBOL_GPL(bcma_host_pci_down); 354 + 355 + /* See also si_pci_setup */ 356 + int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core, 357 + bool enable) 358 + { 359 + struct pci_dev *pdev; 360 + u32 coremask, tmp; 361 + int err = 0; 362 + 363 + if (bus->hosttype != BCMA_HOSTTYPE_PCI) { 364 + /* This bcma device is not on a PCI host-bus. So the IRQs are 365 + * not routed through the PCI core. 366 + * So we must not enable routing through the PCI core. */ 367 + goto out; 368 + } 369 + 370 + pdev = bus->host_pci; 371 + 372 + err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 373 + if (err) 374 + goto out; 375 + 376 + coremask = BIT(core->core_index) << 8; 377 + if (enable) 378 + tmp |= coremask; 379 + else 380 + tmp &= ~coremask; 381 + 382 + err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp); 383 + 384 + out: 385 + return err; 386 + } 387 + EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
+5 -4
drivers/net/wireless/ath/ar5523/ar5523.c
··· 779 779 ieee80211_stop_queues(hw); 780 780 } 781 781 782 - data->skb = skb; 783 - 784 782 spin_lock_irqsave(&ar->tx_data_list_lock, flags); 785 783 list_add_tail(&data->list, &ar->tx_queue_pending); 786 784 spin_unlock_irqrestore(&ar->tx_data_list_lock, flags); ··· 815 817 if (!data) 816 818 break; 817 819 818 - skb = data->skb; 820 + txi = container_of((void *)data, struct ieee80211_tx_info, 821 + driver_data); 819 822 txqid = 0; 820 - txi = IEEE80211_SKB_CB(skb); 823 + 824 + skb = container_of((void *)txi, struct sk_buff, cb); 821 825 paylen = skb->len; 826 + 822 827 urb = usb_alloc_urb(0, GFP_KERNEL); 823 828 if (!urb) { 824 829 ar5523_err(ar, "Failed to allocate TX urb\n");
-1
drivers/net/wireless/ath/ar5523/ar5523.h
··· 74 74 struct ar5523_tx_data { 75 75 struct list_head list; 76 76 struct ar5523 *ar; 77 - struct sk_buff *skb; 78 77 struct urb *urb; 79 78 }; 80 79
+3
drivers/net/wireless/ath/ath.h
··· 131 131 void (*enable_write_buffer)(void *); 132 132 void (*write_flush) (void *); 133 133 u32 (*rmw)(void *, u32 reg_offset, u32 set, u32 clr); 134 + void (*enable_rmw_buffer)(void *); 135 + void (*rmw_flush) (void *); 136 + 134 137 }; 135 138 136 139 struct ath_common;
+1
drivers/net/wireless/ath/ath5k/ath5k.h
··· 1283 1283 #define ATH_STAT_PROMISC 1 1284 1284 #define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */ 1285 1285 #define ATH_STAT_STARTED 3 /* opened & irqs enabled */ 1286 + #define ATH_STAT_RESET 4 /* hw reset */ 1286 1287 1287 1288 unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */ 1288 1289 unsigned int fif_filter_flags; /* Current FIF_* filter flags */
+28 -3
drivers/net/wireless/ath/ath5k/base.c
··· 1523 1523 enum ath5k_int imask; 1524 1524 unsigned long flags; 1525 1525 1526 + if (test_bit(ATH_STAT_RESET, ah->status)) 1527 + return; 1528 + 1526 1529 spin_lock_irqsave(&ah->irqlock, flags); 1527 1530 imask = ah->imask; 1528 1531 if (ah->rx_pending) ··· 2861 2858 { 2862 2859 struct ath_common *common = ath5k_hw_common(ah); 2863 2860 int ret, ani_mode; 2864 - bool fast; 2861 + bool fast = chan && modparam_fastchanswitch ? 1 : 0; 2865 2862 2866 2863 ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n"); 2864 + 2865 + __set_bit(ATH_STAT_RESET, ah->status); 2867 2866 2868 2867 ath5k_hw_set_imr(ah, 0); 2869 2868 synchronize_irq(ah->irq); ··· 2881 2876 * so we should also free any remaining 2882 2877 * tx buffers */ 2883 2878 ath5k_drain_tx_buffs(ah); 2879 + 2880 + /* Stop PCU */ 2881 + ath5k_hw_stop_rx_pcu(ah); 2882 + 2883 + /* Stop DMA 2884 + * 2885 + * Note: If DMA didn't stop continue 2886 + * since only a reset will fix it. 2887 + */ 2888 + ret = ath5k_hw_dma_stop(ah); 2889 + 2890 + /* RF Bus grant won't work if we have pending 2891 + * frames 2892 + */ 2893 + if (ret && fast) { 2894 + ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 2895 + "DMA didn't stop, falling back to normal reset\n"); 2896 + fast = false; 2897 + } 2898 + 2884 2899 if (chan) 2885 2900 ah->curchan = chan; 2886 - 2887 - fast = ((chan != NULL) && modparam_fastchanswitch) ? 1 : 0; 2888 2901 2889 2902 ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu); 2890 2903 if (ret) { ··· 2956 2933 * XXX needed? 2957 2934 */ 2958 2935 /* ath5k_chan_change(ah, c); */ 2936 + 2937 + __clear_bit(ATH_STAT_RESET, ah->status); 2959 2938 2960 2939 ath5k_beacon_config(ah); 2961 2940 /* intrs are enabled by ath5k_beacon_config */
-24
drivers/net/wireless/ath/ath5k/reset.c
··· 1169 1169 if (ah->ah_version == AR5K_AR5212) 1170 1170 ath5k_hw_set_sleep_clock(ah, false); 1171 1171 1172 - /* 1173 - * Stop PCU 1174 - */ 1175 - ath5k_hw_stop_rx_pcu(ah); 1176 - 1177 - /* 1178 - * Stop DMA 1179 - * 1180 - * Note: If DMA didn't stop continue 1181 - * since only a reset will fix it. 1182 - */ 1183 - ret = ath5k_hw_dma_stop(ah); 1184 - 1185 - /* RF Bus grant won't work if we have pending 1186 - * frames */ 1187 - if (ret && fast) { 1188 - ATH5K_DBG(ah, ATH5K_DEBUG_RESET, 1189 - "DMA didn't stop, falling back to normal reset\n"); 1190 - fast = false; 1191 - /* Non fatal, just continue with 1192 - * normal reset */ 1193 - ret = 0; 1194 - } 1195 - 1196 1172 mode = channel->hw_value; 1197 1173 switch (mode) { 1198 1174 case AR5K_MODE_11A:
+2 -1
drivers/net/wireless/ath/ath9k/Makefile
··· 46 46 ath9k_hw-$(CONFIG_ATH9K_WOW) += ar9003_wow.o 47 47 48 48 ath9k_hw-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += btcoex.o \ 49 - ar9003_mci.o 49 + ar9003_mci.o \ 50 + ar9003_aic.o 50 51 51 52 ath9k_hw-$(CONFIG_ATH9K_PCOEM) += ar9003_rtt.o 52 53
+15 -5
drivers/net/wireless/ath/ath9k/ani.c
··· 107 107 static void ath9k_hw_update_mibstats(struct ath_hw *ah, 108 108 struct ath9k_mib_stats *stats) 109 109 { 110 - stats->ackrcv_bad += REG_READ(ah, AR_ACK_FAIL); 111 - stats->rts_bad += REG_READ(ah, AR_RTS_FAIL); 112 - stats->fcs_bad += REG_READ(ah, AR_FCS_FAIL); 113 - stats->rts_good += REG_READ(ah, AR_RTS_OK); 114 - stats->beacons += REG_READ(ah, AR_BEACON_CNT); 110 + u32 addr[5] = {AR_RTS_OK, AR_RTS_FAIL, AR_ACK_FAIL, 111 + AR_FCS_FAIL, AR_BEACON_CNT}; 112 + u32 data[5]; 113 + 114 + REG_READ_MULTI(ah, &addr[0], &data[0], 5); 115 + /* AR_RTS_OK */ 116 + stats->rts_good += data[0]; 117 + /* AR_RTS_FAIL */ 118 + stats->rts_bad += data[1]; 119 + /* AR_ACK_FAIL */ 120 + stats->ackrcv_bad += data[2]; 121 + /* AR_FCS_FAIL */ 122 + stats->fcs_bad += data[3]; 123 + /* AR_BEACON_CNT */ 124 + stats->beacons += data[4]; 115 125 } 116 126 117 127 static void ath9k_ani_restart(struct ath_hw *ah)
+3 -2
drivers/net/wireless/ath/ath9k/ar5008_phy.c
··· 681 681 phymode |= AR_PHY_FC_DYN2040_PRI_CH; 682 682 683 683 } 684 + ENABLE_REGWRITE_BUFFER(ah); 684 685 REG_WRITE(ah, AR_PHY_TURBO, phymode); 685 686 687 + /* This function do only REG_WRITE, so 688 + * we can include it to REGWRITE_BUFFER. */ 686 689 ath9k_hw_set11nmac2040(ah, chan); 687 - 688 - ENABLE_REGWRITE_BUFFER(ah); 689 690 690 691 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); 691 692 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
+39 -40
drivers/net/wireless/ath/ath9k/ar9002_calib.c
··· 430 430 u32 regVal; 431 431 unsigned int i; 432 432 u32 regList[][2] = { 433 - { 0x786c, 0 }, 434 - { 0x7854, 0 }, 435 - { 0x7820, 0 }, 436 - { 0x7824, 0 }, 437 - { 0x7868, 0 }, 438 - { 0x783c, 0 }, 439 - { 0x7838, 0 } , 440 - { 0x7828, 0 } , 433 + { AR9285_AN_TOP3, 0 }, 434 + { AR9285_AN_RXTXBB1, 0 }, 435 + { AR9285_AN_RF2G1, 0 }, 436 + { AR9285_AN_RF2G2, 0 }, 437 + { AR9285_AN_TOP2, 0 }, 438 + { AR9285_AN_RF2G8, 0 }, 439 + { AR9285_AN_RF2G7, 0 }, 440 + { AR9285_AN_RF2G3, 0 }, 441 441 }; 442 442 443 - for (i = 0; i < ARRAY_SIZE(regList); i++) 444 - regList[i][1] = REG_READ(ah, regList[i][0]); 443 + REG_READ_ARRAY(ah, regList, ARRAY_SIZE(regList)); 445 444 446 - regVal = REG_READ(ah, 0x7834); 447 - regVal &= (~(0x1)); 448 - REG_WRITE(ah, 0x7834, regVal); 449 - regVal = REG_READ(ah, 0x9808); 450 - regVal |= (0x1 << 27); 451 - REG_WRITE(ah, 0x9808, regVal); 452 - 445 + ENABLE_REG_RMW_BUFFER(ah); 446 + /* 7834, b1=0 */ 447 + REG_CLR_BIT(ah, AR9285_AN_RF2G6, 1 << 0); 448 + /* 9808, b27=1 */ 449 + REG_SET_BIT(ah, 0x9808, 1 << 27); 453 450 /* 786c,b23,1, pwddac=1 */ 454 - REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1); 451 + REG_SET_BIT(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC); 455 452 /* 7854, b5,1, pdrxtxbb=1 */ 456 - REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1); 453 + REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1); 457 454 /* 7854, b7,1, pdv2i=1 */ 458 - REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1); 455 + REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I); 459 456 /* 7854, b8,1, pddacinterface=1 */ 460 - REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1); 457 + REG_SET_BIT(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF); 461 458 /* 7824,b12,0, offcal=0 */ 462 - REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0); 459 + REG_CLR_BIT(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL); 463 460 /* 7838, b1,0, pwddb=0 */ 464 - REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0); 461 + REG_CLR_BIT(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB); 465 462 /* 7820,b11,0, enpacal=0 */ 466 - REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0); 463 + REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL); 467 464 /* 7820,b25,1, pdpadrv1=0 */ 468 - REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0); 465 + REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1); 469 466 /* 7820,b24,0, pdpadrv2=0 */ 470 - REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0); 467 + REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2); 471 468 /* 7820,b23,0, pdpaout=0 */ 472 - REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0); 469 + REG_CLR_BIT(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT); 473 470 /* 783c,b14-16,7, padrvgn2tab_0=7 */ 474 471 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7); 475 472 /* ··· 474 477 * does not matter since we turn it off 475 478 */ 476 479 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0); 477 - 480 + /* 7828, b0-11, ccom=fff */ 478 481 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff); 482 + REG_RMW_BUFFER_FLUSH(ah); 479 483 480 484 /* Set: 481 485 * localmode=1,bmode=1,bmoderxtx=1,synthon=1, ··· 488 490 489 491 /* find off_6_1; */ 490 492 for (i = 6; i > 0; i--) { 491 - regVal = REG_READ(ah, 0x7834); 493 + regVal = REG_READ(ah, AR9285_AN_RF2G6); 492 494 regVal |= (1 << (20 + i)); 493 - REG_WRITE(ah, 0x7834, regVal); 495 + REG_WRITE(ah, AR9285_AN_RF2G6, regVal); 494 496 udelay(1); 495 497 /* regVal = REG_READ(ah, 0x7834); */ 496 498 regVal &= (~(0x1 << (20 + i))); 497 - regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9) 499 + regVal |= (MS(REG_READ(ah, AR9285_AN_RF2G9), 500 + AR9285_AN_RXTXBB1_SPARE9) 498 501 << (20 + i)); 499 - REG_WRITE(ah, 0x7834, regVal); 502 + REG_WRITE(ah, AR9285_AN_RF2G6, regVal); 500 503 } 501 504 502 505 regVal = (regVal >> 20) & 0x7f; ··· 514 515 ah->pacal_info.prev_offset = regVal; 515 516 } 516 517 518 + 519 + ENABLE_REG_RMW_BUFFER(ah); 520 + /* 7834, b1=1 */ 521 + REG_SET_BIT(ah, AR9285_AN_RF2G6, 1 << 0); 522 + /* 9808, b27=0 */ 523 + REG_CLR_BIT(ah, 0x9808, 1 << 27); 524 + REG_RMW_BUFFER_FLUSH(ah); 525 + 517 526 ENABLE_REGWRITE_BUFFER(ah); 518 - 519 - regVal = REG_READ(ah, 0x7834); 520 - regVal |= 0x1; 521 - REG_WRITE(ah, 0x7834, regVal); 522 - regVal = REG_READ(ah, 0x9808); 523 - regVal &= (~(0x1 << 27)); 524 - REG_WRITE(ah, 0x9808, regVal); 525 - 526 527 for (i = 0; i < ARRAY_SIZE(regList); i++) 527 528 REG_WRITE(ah, regList[i][0], regList[i][1]); 528 529
+599
drivers/net/wireless/ath/ath9k/ar9003_aic.c
··· 1 + /* 2 + * Copyright (c) 2015 Qualcomm Atheros Inc. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + 17 + #include "hw.h" 18 + #include "hw-ops.h" 19 + #include "ar9003_mci.h" 20 + #include "ar9003_aic.h" 21 + #include "ar9003_phy.h" 22 + #include "reg_aic.h" 23 + 24 + static const u8 com_att_db_table[ATH_AIC_MAX_COM_ATT_DB_TABLE] = { 25 + 0, 3, 9, 15, 21, 27 26 + }; 27 + 28 + static const u16 aic_lin_table[ATH_AIC_MAX_AIC_LIN_TABLE] = { 29 + 8191, 7300, 6506, 5799, 5168, 4606, 4105, 3659, 30 + 3261, 2906, 2590, 2309, 2057, 1834, 1634, 1457, 31 + 1298, 1157, 1031, 919, 819, 730, 651, 580, 32 + 517, 461, 411, 366, 326, 291, 259, 231, 33 + 206, 183, 163, 146, 130, 116, 103, 92, 34 + 82, 73, 65, 58, 52, 46, 41, 37, 35 + 33, 29, 26, 23, 21, 18, 16, 15, 36 + 13, 12, 10, 9, 8, 7, 7, 6, 37 + 5, 5, 4, 4, 3 38 + }; 39 + 40 + static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah) 41 + { 42 + struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; 43 + 44 + /* 45 + * Disable AIC for now, until we have all the 46 + * HW code and the driver-layer support ready. 47 + */ 48 + return false; 49 + 50 + if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_AIC) 51 + return false; 52 + 53 + return true; 54 + } 55 + 56 + static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram, 57 + bool dir, u8 index) 58 + { 59 + int16_t i; 60 + 61 + if (dir) { 62 + for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 63 + if (cal_sram[i].valid) 64 + break; 65 + } 66 + } else { 67 + for (i = index - 1; i >= 0; i--) { 68 + if (cal_sram[i].valid) 69 + break; 70 + } 71 + } 72 + 73 + if ((i >= ATH_AIC_MAX_BT_CHANNEL) || (i < 0)) 74 + i = -1; 75 + 76 + return i; 77 + } 78 + 79 + /* 80 + * type 0: aic_lin_table, 1: com_att_db_table 81 + */ 82 + static int16_t ar9003_aic_find_index(u8 type, int16_t value) 83 + { 84 + int16_t i = -1; 85 + 86 + if (type == 0) { 87 + for (i = ATH_AIC_MAX_AIC_LIN_TABLE - 1; i >= 0; i--) { 88 + if (aic_lin_table[i] >= value) 89 + break; 90 + } 91 + } else if (type == 1) { 92 + for (i = 0; i < ATH_AIC_MAX_COM_ATT_DB_TABLE; i++) { 93 + if (com_att_db_table[i] > value) { 94 + i--; 95 + break; 96 + } 97 + } 98 + 99 + if (i >= ATH_AIC_MAX_COM_ATT_DB_TABLE) 100 + i = -1; 101 + } 102 + 103 + return i; 104 + } 105 + 106 + static void ar9003_aic_gain_table(struct ath_hw *ah) 107 + { 108 + u32 aic_atten_word[19], i; 109 + 110 + /* Config LNA gain difference */ 111 + REG_WRITE(ah, AR_PHY_BT_COEX_4, 0x2c200a00); 112 + REG_WRITE(ah, AR_PHY_BT_COEX_5, 0x5c4e4438); 113 + 114 + /* Program gain table */ 115 + aic_atten_word[0] = (0x1 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x0 & 0xf) << 5 | 116 + (0x1f & 0x1f); /* -01 dB: 4'd1, 5'd31, 00 dB: 4'd0, 5'd31 */ 117 + aic_atten_word[1] = (0x3 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x2 & 0xf) << 5 | 118 + (0x1f & 0x1f); /* -03 dB: 4'd3, 5'd31, -02 dB: 4'd2, 5'd31 */ 119 + aic_atten_word[2] = (0x5 & 0xf) << 14 | (0x1f & 0x1f) << 9 | (0x4 & 0xf) << 5 | 120 + (0x1f & 0x1f); /* -05 dB: 4'd5, 5'd31, -04 dB: 4'd4, 5'd31 */ 121 + aic_atten_word[3] = (0x1 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x0 & 0xf) << 5 | 122 + (0x1e & 0x1f); /* -07 dB: 4'd1, 5'd30, -06 dB: 4'd0, 5'd30 */ 123 + aic_atten_word[4] = (0x3 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x2 & 0xf) << 5 | 124 + (0x1e & 0x1f); /* -09 dB: 4'd3, 5'd30, -08 dB: 4'd2, 5'd30 */ 125 + aic_atten_word[5] = (0x5 & 0xf) << 14 | (0x1e & 0x1f) << 9 | (0x4 & 0xf) << 5 | 126 + (0x1e & 0x1f); /* -11 dB: 4'd5, 5'd30, -10 dB: 4'd4, 5'd30 */ 127 + aic_atten_word[6] = (0x1 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x0 & 0xf) << 5 | 128 + (0xf & 0x1f); /* -13 dB: 4'd1, 5'd15, -12 dB: 4'd0, 5'd15 */ 129 + aic_atten_word[7] = (0x3 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x2 & 0xf) << 5 | 130 + (0xf & 0x1f); /* -15 dB: 4'd3, 5'd15, -14 dB: 4'd2, 5'd15 */ 131 + aic_atten_word[8] = (0x5 & 0xf) << 14 | (0xf & 0x1f) << 9 | (0x4 & 0xf) << 5 | 132 + (0xf & 0x1f); /* -17 dB: 4'd5, 5'd15, -16 dB: 4'd4, 5'd15 */ 133 + aic_atten_word[9] = (0x1 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x0 & 0xf) << 5 | 134 + (0x7 & 0x1f); /* -19 dB: 4'd1, 5'd07, -18 dB: 4'd0, 5'd07 */ 135 + aic_atten_word[10] = (0x3 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x2 & 0xf) << 5 | 136 + (0x7 & 0x1f); /* -21 dB: 4'd3, 5'd07, -20 dB: 4'd2, 5'd07 */ 137 + aic_atten_word[11] = (0x5 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x4 & 0xf) << 5 | 138 + (0x7 & 0x1f); /* -23 dB: 4'd5, 5'd07, -22 dB: 4'd4, 5'd07 */ 139 + aic_atten_word[12] = (0x7 & 0xf) << 14 | (0x7 & 0x1f) << 9 | (0x6 & 0xf) << 5 | 140 + (0x7 & 0x1f); /* -25 dB: 4'd7, 5'd07, -24 dB: 4'd6, 5'd07 */ 141 + aic_atten_word[13] = (0x3 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x2 & 0xf) << 5 | 142 + (0x3 & 0x1f); /* -27 dB: 4'd3, 5'd03, -26 dB: 4'd2, 5'd03 */ 143 + aic_atten_word[14] = (0x5 & 0xf) << 14 | (0x3 & 0x1f) << 9 | (0x4 & 0xf) << 5 | 144 + (0x3 & 0x1f); /* -29 dB: 4'd5, 5'd03, -28 dB: 4'd4, 5'd03 */ 145 + aic_atten_word[15] = (0x1 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x0 & 0xf) << 5 | 146 + (0x1 & 0x1f); /* -31 dB: 4'd1, 5'd01, -30 dB: 4'd0, 5'd01 */ 147 + aic_atten_word[16] = (0x3 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x2 & 0xf) << 5 | 148 + (0x1 & 0x1f); /* -33 dB: 4'd3, 5'd01, -32 dB: 4'd2, 5'd01 */ 149 + aic_atten_word[17] = (0x5 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x4 & 0xf) << 5 | 150 + (0x1 & 0x1f); /* -35 dB: 4'd5, 5'd01, -34 dB: 4'd4, 5'd01 */ 151 + aic_atten_word[18] = (0x7 & 0xf) << 14 | (0x1 & 0x1f) << 9 | (0x6 & 0xf) << 5 | 152 + (0x1 & 0x1f); /* -37 dB: 4'd7, 5'd01, -36 dB: 4'd6, 5'd01 */ 153 + 154 + /* Write to Gain table with auto increment enabled. */ 155 + REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000), 156 + (ATH_AIC_SRAM_AUTO_INCREMENT | 157 + ATH_AIC_SRAM_GAIN_TABLE_OFFSET)); 158 + 159 + for (i = 0; i < 19; i++) { 160 + REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 161 + aic_atten_word[i]); 162 + } 163 + } 164 + 165 + static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count) 166 + { 167 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 168 + int i; 169 + 170 + /* Write to Gain table with auto increment enabled. */ 171 + REG_WRITE(ah, (AR_PHY_AIC_SRAM_ADDR_B0 + 0x3000), 172 + (ATH_AIC_SRAM_AUTO_INCREMENT | 173 + ATH_AIC_SRAM_CAL_OFFSET)); 174 + 175 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 176 + REG_WRITE(ah, (AR_PHY_AIC_SRAM_DATA_B0 + 0x3000), 0); 177 + aic->aic_sram[i] = 0; 178 + } 179 + 180 + REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B0, 181 + (SM(0, AR_PHY_AIC_MON_ENABLE) | 182 + SM(127, AR_PHY_AIC_CAL_MAX_HOP_COUNT) | 183 + SM(min_valid_count, AR_PHY_AIC_CAL_MIN_VALID_COUNT) | 184 + SM(37, AR_PHY_AIC_F_WLAN) | 185 + SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) | 186 + SM(0, AR_PHY_AIC_CAL_ENABLE) | 187 + SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) | 188 + SM(0, AR_PHY_AIC_ENABLE))); 189 + 190 + REG_WRITE(ah, AR_PHY_AIC_CTRL_0_B1, 191 + (SM(0, AR_PHY_AIC_MON_ENABLE) | 192 + SM(1, AR_PHY_AIC_CAL_CH_VALID_RESET) | 193 + SM(0, AR_PHY_AIC_CAL_ENABLE) | 194 + SM(0x40, AR_PHY_AIC_BTTX_PWR_THR) | 195 + SM(0, AR_PHY_AIC_ENABLE))); 196 + 197 + REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B0, 198 + (SM(8, AR_PHY_AIC_CAL_BT_REF_DELAY) | 199 + SM(0, AR_PHY_AIC_BT_IDLE_CFG) | 200 + SM(1, AR_PHY_AIC_STDBY_COND) | 201 + SM(37, AR_PHY_AIC_STDBY_ROT_ATT_DB) | 202 + SM(5, AR_PHY_AIC_STDBY_COM_ATT_DB) | 203 + SM(15, AR_PHY_AIC_RSSI_MAX) | 204 + SM(0, AR_PHY_AIC_RSSI_MIN))); 205 + 206 + REG_WRITE(ah, AR_PHY_AIC_CTRL_1_B1, 207 + (SM(15, AR_PHY_AIC_RSSI_MAX) | 208 + SM(0, AR_PHY_AIC_RSSI_MIN))); 209 + 210 + REG_WRITE(ah, AR_PHY_AIC_CTRL_2_B0, 211 + (SM(44, AR_PHY_AIC_RADIO_DELAY) | 212 + SM(8, AR_PHY_AIC_CAL_STEP_SIZE_CORR) | 213 + SM(12, AR_PHY_AIC_CAL_ROT_IDX_CORR) | 214 + SM(2, AR_PHY_AIC_CAL_CONV_CHECK_FACTOR) | 215 + SM(5, AR_PHY_AIC_ROT_IDX_COUNT_MAX) | 216 + SM(0, AR_PHY_AIC_CAL_SYNTH_TOGGLE) | 217 + SM(0, AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX) | 218 + SM(200, AR_PHY_AIC_CAL_SYNTH_SETTLING))); 219 + 220 + REG_WRITE(ah, AR_PHY_AIC_CTRL_3_B0, 221 + (SM(2, AR_PHY_AIC_MON_MAX_HOP_COUNT) | 222 + SM(1, AR_PHY_AIC_MON_MIN_STALE_COUNT) | 223 + SM(1, AR_PHY_AIC_MON_PWR_EST_LONG) | 224 + SM(2, AR_PHY_AIC_MON_PD_TALLY_SCALING) | 225 + SM(10, AR_PHY_AIC_MON_PERF_THR) | 226 + SM(2, AR_PHY_AIC_CAL_TARGET_MAG_SETTING) | 227 + SM(1, AR_PHY_AIC_CAL_PERF_CHECK_FACTOR) | 228 + SM(1, AR_PHY_AIC_CAL_PWR_EST_LONG))); 229 + 230 + REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B0, 231 + (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) | 232 + SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) | 233 + SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) | 234 + SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) | 235 + SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED))); 236 + 237 + REG_WRITE(ah, AR_PHY_AIC_CTRL_4_B1, 238 + (SM(2, AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO) | 239 + SM(3, AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO) | 240 + SM(0, AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING) | 241 + SM(2, AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF) | 242 + SM(1, AR_PHY_AIC_CAL_COM_ATT_DB_FIXED))); 243 + 244 + ar9003_aic_gain_table(ah); 245 + 246 + /* Need to enable AIC reference signal in BT modem. */ 247 + REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL, 248 + (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) | 249 + ATH_AIC_BT_AIC_ENABLE)); 250 + 251 + aic->aic_cal_start_time = REG_READ(ah, AR_TSF_L32); 252 + 253 + /* Start calibration */ 254 + REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE); 255 + REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_CH_VALID_RESET); 256 + REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE); 257 + 258 + aic->aic_caled_chan = 0; 259 + aic->aic_cal_state = AIC_CAL_STATE_STARTED; 260 + 261 + return aic->aic_cal_state; 262 + } 263 + 264 + static bool ar9003_aic_cal_post_process(struct ath_hw *ah) 265 + { 266 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 267 + struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL]; 268 + struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL]; 269 + u32 dir_path_gain_idx, quad_path_gain_idx, value; 270 + u32 fixed_com_att_db; 271 + int8_t dir_path_sign, quad_path_sign; 272 + int16_t i; 273 + bool ret = true; 274 + 275 + memset(&cal_sram, 0, sizeof(cal_sram)); 276 + memset(&aic_sram, 0, sizeof(aic_sram)); 277 + 278 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 279 + value = aic->aic_sram[i]; 280 + 281 + cal_sram[i].valid = 282 + MS(value, AR_PHY_AIC_SRAM_VALID); 283 + cal_sram[i].rot_quad_att_db = 284 + MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB); 285 + cal_sram[i].vga_quad_sign = 286 + MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN); 287 + cal_sram[i].rot_dir_att_db = 288 + MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB); 289 + cal_sram[i].vga_dir_sign = 290 + MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN); 291 + cal_sram[i].com_att_6db = 292 + MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB); 293 + 294 + if (cal_sram[i].valid) { 295 + dir_path_gain_idx = cal_sram[i].rot_dir_att_db + 296 + com_att_db_table[cal_sram[i].com_att_6db]; 297 + quad_path_gain_idx = cal_sram[i].rot_quad_att_db + 298 + com_att_db_table[cal_sram[i].com_att_6db]; 299 + 300 + dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1; 301 + quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1; 302 + 303 + aic_sram[i].dir_path_gain_lin = dir_path_sign * 304 + aic_lin_table[dir_path_gain_idx]; 305 + aic_sram[i].quad_path_gain_lin = quad_path_sign * 306 + aic_lin_table[quad_path_gain_idx]; 307 + } 308 + } 309 + 310 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 311 + int16_t start_idx, end_idx; 312 + 313 + if (cal_sram[i].valid) 314 + continue; 315 + 316 + start_idx = ar9003_aic_find_valid(cal_sram, 0, i); 317 + end_idx = ar9003_aic_find_valid(cal_sram, 1, i); 318 + 319 + if (start_idx < 0) { 320 + /* extrapolation */ 321 + start_idx = end_idx; 322 + end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx); 323 + 324 + if (end_idx < 0) { 325 + ret = false; 326 + break; 327 + } 328 + 329 + aic_sram[i].dir_path_gain_lin = 330 + ((aic_sram[start_idx].dir_path_gain_lin - 331 + aic_sram[end_idx].dir_path_gain_lin) * 332 + (start_idx - i) + ((end_idx - i) >> 1)) / 333 + (end_idx - i) + 334 + aic_sram[start_idx].dir_path_gain_lin; 335 + aic_sram[i].quad_path_gain_lin = 336 + ((aic_sram[start_idx].quad_path_gain_lin - 337 + aic_sram[end_idx].quad_path_gain_lin) * 338 + (start_idx - i) + ((end_idx - i) >> 1)) / 339 + (end_idx - i) + 340 + aic_sram[start_idx].quad_path_gain_lin; 341 + } 342 + 343 + if (end_idx < 0) { 344 + /* extrapolation */ 345 + end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx); 346 + 347 + if (end_idx < 0) { 348 + ret = false; 349 + break; 350 + } 351 + 352 + aic_sram[i].dir_path_gain_lin = 353 + ((aic_sram[start_idx].dir_path_gain_lin - 354 + aic_sram[end_idx].dir_path_gain_lin) * 355 + (i - start_idx) + ((start_idx - end_idx) >> 1)) / 356 + (start_idx - end_idx) + 357 + aic_sram[start_idx].dir_path_gain_lin; 358 + aic_sram[i].quad_path_gain_lin = 359 + ((aic_sram[start_idx].quad_path_gain_lin - 360 + aic_sram[end_idx].quad_path_gain_lin) * 361 + (i - start_idx) + ((start_idx - end_idx) >> 1)) / 362 + (start_idx - end_idx) + 363 + aic_sram[start_idx].quad_path_gain_lin; 364 + 365 + } else if (start_idx >= 0){ 366 + /* interpolation */ 367 + aic_sram[i].dir_path_gain_lin = 368 + (((end_idx - i) * aic_sram[start_idx].dir_path_gain_lin) + 369 + ((i - start_idx) * aic_sram[end_idx].dir_path_gain_lin) + 370 + ((end_idx - start_idx) >> 1)) / 371 + (end_idx - start_idx); 372 + aic_sram[i].quad_path_gain_lin = 373 + (((end_idx - i) * aic_sram[start_idx].quad_path_gain_lin) + 374 + ((i - start_idx) * aic_sram[end_idx].quad_path_gain_lin) + 375 + ((end_idx - start_idx) >> 1))/ 376 + (end_idx - start_idx); 377 + } 378 + } 379 + 380 + /* From dir/quad_path_gain_lin to sram. */ 381 + i = ar9003_aic_find_valid(cal_sram, 1, 0); 382 + if (i < 0) { 383 + i = 0; 384 + ret = false; 385 + } 386 + fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db]; 387 + 388 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 389 + int16_t rot_dir_path_att_db, rot_quad_path_att_db; 390 + 391 + aic_sram[i].sram.vga_dir_sign = 392 + (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0; 393 + aic_sram[i].sram.vga_quad_sign= 394 + (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0; 395 + 396 + rot_dir_path_att_db = 397 + ar9003_aic_find_index(0, abs(aic_sram[i].dir_path_gain_lin)) - 398 + fixed_com_att_db; 399 + rot_quad_path_att_db = 400 + ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) - 401 + fixed_com_att_db; 402 + 403 + aic_sram[i].sram.com_att_6db = 404 + ar9003_aic_find_index(1, fixed_com_att_db); 405 + 406 + aic_sram[i].sram.valid = 1; 407 + 408 + aic_sram[i].sram.rot_dir_att_db = 409 + min(max(rot_dir_path_att_db, 410 + (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB), 411 + ATH_AIC_MAX_ROT_DIR_ATT_DB); 412 + aic_sram[i].sram.rot_quad_att_db = 413 + min(max(rot_quad_path_att_db, 414 + (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB), 415 + ATH_AIC_MAX_ROT_QUAD_ATT_DB); 416 + } 417 + 418 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 419 + aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign, 420 + AR_PHY_AIC_SRAM_VGA_DIR_SIGN) | 421 + SM(aic_sram[i].sram.vga_quad_sign, 422 + AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) | 423 + SM(aic_sram[i].sram.com_att_6db, 424 + AR_PHY_AIC_SRAM_COM_ATT_6DB) | 425 + SM(aic_sram[i].sram.valid, 426 + AR_PHY_AIC_SRAM_VALID) | 427 + SM(aic_sram[i].sram.rot_dir_att_db, 428 + AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) | 429 + SM(aic_sram[i].sram.rot_quad_att_db, 430 + AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB)); 431 + } 432 + 433 + return ret; 434 + } 435 + 436 + static void ar9003_aic_cal_done(struct ath_hw *ah) 437 + { 438 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 439 + 440 + /* Disable AIC reference signal in BT modem. */ 441 + REG_WRITE(ah, ATH_AIC_BT_JUPITER_CTRL, 442 + (REG_READ(ah, ATH_AIC_BT_JUPITER_CTRL) & 443 + ~ATH_AIC_BT_AIC_ENABLE)); 444 + 445 + if (ar9003_aic_cal_post_process(ah)) 446 + aic->aic_cal_state = AIC_CAL_STATE_DONE; 447 + else 448 + aic->aic_cal_state = AIC_CAL_STATE_ERROR; 449 + } 450 + 451 + static u8 ar9003_aic_cal_continue(struct ath_hw *ah, bool cal_once) 452 + { 453 + struct ath_common *common = ath9k_hw_common(ah); 454 + struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; 455 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 456 + int i, num_chan; 457 + 458 + num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN); 459 + 460 + if (!num_chan) { 461 + aic->aic_cal_state = AIC_CAL_STATE_ERROR; 462 + return aic->aic_cal_state; 463 + } 464 + 465 + if (cal_once) { 466 + for (i = 0; i < 10000; i++) { 467 + if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) & 468 + AR_PHY_AIC_CAL_ENABLE) == 0) 469 + break; 470 + 471 + udelay(100); 472 + } 473 + } 474 + 475 + /* 476 + * Use AR_PHY_AIC_CAL_ENABLE bit instead of AR_PHY_AIC_CAL_DONE. 477 + * Sometimes CAL_DONE bit is not asserted. 478 + */ 479 + if ((REG_READ(ah, AR_PHY_AIC_CTRL_0_B1) & 480 + AR_PHY_AIC_CAL_ENABLE) != 0) { 481 + ath_dbg(common, MCI, "AIC cal is not done after 40ms"); 482 + goto exit; 483 + } 484 + 485 + REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, 486 + (ATH_AIC_SRAM_CAL_OFFSET | ATH_AIC_SRAM_AUTO_INCREMENT)); 487 + 488 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 489 + u32 value; 490 + 491 + value = REG_READ(ah, AR_PHY_AIC_SRAM_DATA_B1); 492 + 493 + if (value & 0x01) { 494 + if (aic->aic_sram[i] == 0) 495 + aic->aic_caled_chan++; 496 + 497 + aic->aic_sram[i] = value; 498 + 499 + if (!cal_once) 500 + break; 501 + } 502 + } 503 + 504 + if ((aic->aic_caled_chan >= num_chan) || cal_once) { 505 + ar9003_aic_cal_done(ah); 506 + } else { 507 + /* Start calibration */ 508 + REG_CLR_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE); 509 + REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, 510 + AR_PHY_AIC_CAL_CH_VALID_RESET); 511 + REG_SET_BIT(ah, AR_PHY_AIC_CTRL_0_B1, AR_PHY_AIC_CAL_ENABLE); 512 + } 513 + exit: 514 + return aic->aic_cal_state; 515 + 516 + } 517 + 518 + u8 ar9003_aic_calibration(struct ath_hw *ah) 519 + { 520 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 521 + u8 cal_ret = AIC_CAL_STATE_ERROR; 522 + 523 + switch (aic->aic_cal_state) { 524 + case AIC_CAL_STATE_IDLE: 525 + cal_ret = ar9003_aic_cal_start(ah, 1); 526 + break; 527 + case AIC_CAL_STATE_STARTED: 528 + cal_ret = ar9003_aic_cal_continue(ah, false); 529 + break; 530 + case AIC_CAL_STATE_DONE: 531 + cal_ret = AIC_CAL_STATE_DONE; 532 + break; 533 + default: 534 + break; 535 + } 536 + 537 + return cal_ret; 538 + } 539 + 540 + u8 ar9003_aic_start_normal(struct ath_hw *ah) 541 + { 542 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 543 + int16_t i; 544 + 545 + if (aic->aic_cal_state != AIC_CAL_STATE_DONE) 546 + return 1; 547 + 548 + ar9003_aic_gain_table(ah); 549 + 550 + REG_WRITE(ah, AR_PHY_AIC_SRAM_ADDR_B1, ATH_AIC_SRAM_AUTO_INCREMENT); 551 + 552 + for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { 553 + REG_WRITE(ah, AR_PHY_AIC_SRAM_DATA_B1, aic->aic_sram[i]); 554 + } 555 + 556 + /* FIXME: Replace these with proper register names */ 557 + REG_WRITE(ah, 0xa6b0, 0x80); 558 + REG_WRITE(ah, 0xa6b4, 0x5b2df0); 559 + REG_WRITE(ah, 0xa6b8, 0x10762cc8); 560 + REG_WRITE(ah, 0xa6bc, 0x1219a4b); 561 + REG_WRITE(ah, 0xa6c0, 0x1e01); 562 + REG_WRITE(ah, 0xb6b4, 0xf0); 563 + REG_WRITE(ah, 0xb6c0, 0x1e01); 564 + REG_WRITE(ah, 0xb6b0, 0x81); 565 + REG_WRITE(ah, AR_PHY_65NM_CH1_RXTX4, 0x40000000); 566 + 567 + aic->aic_enabled = true; 568 + 569 + return 0; 570 + } 571 + 572 + u8 ar9003_aic_cal_reset(struct ath_hw *ah) 573 + { 574 + struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; 575 + 576 + aic->aic_cal_state = AIC_CAL_STATE_IDLE; 577 + return aic->aic_cal_state; 578 + } 579 + 580 + u8 ar9003_aic_calibration_single(struct ath_hw *ah) 581 + { 582 + struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; 583 + u8 cal_ret; 584 + int num_chan; 585 + 586 + num_chan = MS(mci_hw->config, ATH_MCI_CONFIG_AIC_CAL_NUM_CHAN); 587 + 588 + (void) ar9003_aic_cal_start(ah, num_chan); 589 + cal_ret = ar9003_aic_cal_continue(ah, true); 590 + 591 + return cal_ret; 592 + } 593 + 594 + void ar9003_hw_attach_aic_ops(struct ath_hw *ah) 595 + { 596 + struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 597 + 598 + priv_ops->is_aic_enabled = ar9003_hw_is_aic_enabled; 599 + }
+61
drivers/net/wireless/ath/ath9k/ar9003_aic.h
··· 1 + /* 2 + * Copyright (c) 2015 Qualcomm Atheros Inc. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + 17 + #ifndef AR9003_AIC_H 18 + #define AR9003_AIC_H 19 + 20 + #define ATH_AIC_MAX_COM_ATT_DB_TABLE 6 21 + #define ATH_AIC_MAX_AIC_LIN_TABLE 69 22 + #define ATH_AIC_MIN_ROT_DIR_ATT_DB 0 23 + #define ATH_AIC_MIN_ROT_QUAD_ATT_DB 0 24 + #define ATH_AIC_MAX_ROT_DIR_ATT_DB 37 25 + #define ATH_AIC_MAX_ROT_QUAD_ATT_DB 37 26 + #define ATH_AIC_SRAM_AUTO_INCREMENT 0x80000000 27 + #define ATH_AIC_SRAM_GAIN_TABLE_OFFSET 0x280 28 + #define ATH_AIC_SRAM_CAL_OFFSET 0x140 29 + #define ATH_AIC_SRAM_OFFSET 0x00 30 + #define ATH_AIC_MEAS_MAG_THRESH 20 31 + #define ATH_AIC_BT_JUPITER_CTRL 0x66820 32 + #define ATH_AIC_BT_AIC_ENABLE 0x02 33 + 34 + enum aic_cal_state { 35 + AIC_CAL_STATE_IDLE = 0, 36 + AIC_CAL_STATE_STARTED, 37 + AIC_CAL_STATE_DONE, 38 + AIC_CAL_STATE_ERROR 39 + }; 40 + 41 + struct ath_aic_sram_info { 42 + bool valid:1; 43 + bool vga_quad_sign:1; 44 + bool vga_dir_sign:1; 45 + u8 rot_quad_att_db; 46 + u8 rot_dir_att_db; 47 + u8 com_att_6db; 48 + }; 49 + 50 + struct ath_aic_out_info { 51 + int16_t dir_path_gain_lin; 52 + int16_t quad_path_gain_lin; 53 + struct ath_aic_sram_info sram; 54 + }; 55 + 56 + u8 ar9003_aic_calibration(struct ath_hw *ah); 57 + u8 ar9003_aic_start_normal(struct ath_hw *ah); 58 + u8 ar9003_aic_cal_reset(struct ath_hw *ah); 59 + u8 ar9003_aic_calibration_single(struct ath_hw *ah); 60 + 61 + #endif /* AR9003_AIC_H */
+63 -21
drivers/net/wireless/ath/ath9k/ar9003_hw.c
··· 195 195 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 196 196 ar9485_1_1_baseband_core_txfir_coeff_japan_2484); 197 197 198 - if (ah->config.no_pll_pwrsave) { 198 + if (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) { 199 199 INIT_INI_ARRAY(&ah->iniPcieSerdes, 200 - ar9485_1_1_pcie_phy_clkreq_disable_L1); 200 + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); 201 201 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 202 - ar9485_1_1_pcie_phy_clkreq_disable_L1); 202 + ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); 203 203 } else { 204 204 INIT_INI_ARRAY(&ah->iniPcieSerdes, 205 - ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); 205 + ar9485_1_1_pcie_phy_clkreq_disable_L1); 206 206 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 207 - ar9485_1_1_pll_on_cdr_on_clkreq_disable_L1); 207 + ar9485_1_1_pcie_phy_clkreq_disable_L1); 208 208 } 209 209 } else if (AR_SREV_9462_21(ah)) { 210 210 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ··· 231 231 ar9462_2p1_modes_fast_clock); 232 232 INIT_INI_ARRAY(&ah->iniCckfirJapan2484, 233 233 ar9462_2p1_baseband_core_txfir_coeff_japan_2484); 234 - INIT_INI_ARRAY(&ah->iniPcieSerdes, 235 - ar9462_2p1_pciephy_clkreq_disable_L1); 236 - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 237 - ar9462_2p1_pciephy_clkreq_disable_L1); 234 + 235 + /* Awake -> Sleep Setting */ 236 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 237 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) { 238 + INIT_INI_ARRAY(&ah->iniPcieSerdes, 239 + ar9462_2p1_pciephy_clkreq_disable_L1); 240 + } 241 + 242 + /* Sleep -> Awake Setting */ 243 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 244 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) { 245 + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 246 + ar9462_2p1_pciephy_clkreq_disable_L1); 247 + } 238 248 } else if (AR_SREV_9462_20(ah)) { 239 249 240 250 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], ar9462_2p0_mac_core); ··· 272 262 ar9462_2p0_common_rx_gain); 273 263 274 264 /* Awake -> Sleep Setting */ 275 - INIT_INI_ARRAY(&ah->iniPcieSerdes, 276 - ar9462_2p0_pciephy_clkreq_disable_L1); 265 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 266 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) { 267 + INIT_INI_ARRAY(&ah->iniPcieSerdes, 268 + ar9462_2p0_pciephy_clkreq_disable_L1); 269 + } 270 + 277 271 /* Sleep -> Awake Setting */ 278 - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 279 - ar9462_2p0_pciephy_clkreq_disable_L1); 272 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 273 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) { 274 + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 275 + ar9462_2p0_pciephy_clkreq_disable_L1); 276 + } 280 277 281 278 /* Fast clock modal settings */ 282 279 INIT_INI_ARRAY(&ah->iniModesFastClock, ··· 473 456 INIT_INI_ARRAY(&ah->iniModesTxGain, 474 457 ar9565_1p1_Modes_lowest_ob_db_tx_gain_table); 475 458 476 - INIT_INI_ARRAY(&ah->iniPcieSerdes, 477 - ar9565_1p1_pciephy_clkreq_disable_L1); 478 - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 479 - ar9565_1p1_pciephy_clkreq_disable_L1); 459 + /* Awake -> Sleep Setting */ 460 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 461 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) { 462 + INIT_INI_ARRAY(&ah->iniPcieSerdes, 463 + ar9565_1p1_pciephy_clkreq_disable_L1); 464 + } 465 + 466 + /* Sleep -> Awake Setting */ 467 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 468 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) { 469 + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 470 + ar9565_1p1_pciephy_clkreq_disable_L1); 471 + } 480 472 481 473 INIT_INI_ARRAY(&ah->iniModesFastClock, 482 474 ar9565_1p1_modes_fast_clock); ··· 517 491 INIT_INI_ARRAY(&ah->iniModesTxGain, 518 492 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table); 519 493 520 - INIT_INI_ARRAY(&ah->iniPcieSerdes, 521 - ar9565_1p0_pciephy_clkreq_disable_L1); 522 - INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 523 - ar9565_1p0_pciephy_clkreq_disable_L1); 494 + /* Awake -> Sleep Setting */ 495 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 496 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D3)) { 497 + INIT_INI_ARRAY(&ah->iniPcieSerdes, 498 + ar9565_1p0_pciephy_clkreq_disable_L1); 499 + } 500 + 501 + /* Sleep -> Awake Setting */ 502 + if ((ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_CONTROL) && 503 + (ah->config.pll_pwrsave & AR_PCIE_PLL_PWRSAVE_ON_D0)) { 504 + INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 505 + ar9565_1p0_pciephy_clkreq_disable_L1); 506 + } 524 507 525 508 INIT_INI_ARRAY(&ah->iniModesFastClock, 526 509 ar9565_1p0_modes_fast_clock); ··· 1165 1130 struct ath_hw_ops *ops = ath9k_hw_ops(ah); 1166 1131 1167 1132 ar9003_hw_init_mode_regs(ah); 1133 + 1134 + if (AR_SREV_9003_PCOEM(ah)) { 1135 + WARN_ON(!ah->iniPcieSerdes.ia_array); 1136 + WARN_ON(!ah->iniPcieSerdesLowPower.ia_array); 1137 + } 1138 + 1168 1139 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs; 1169 1140 priv_ops->init_hang_checks = ar9003_hw_init_hang_checks; 1170 1141 priv_ops->detect_mac_hang = ar9003_hw_detect_mac_hang; ··· 1180 1139 ar9003_hw_attach_phy_ops(ah); 1181 1140 ar9003_hw_attach_calib_ops(ah); 1182 1141 ar9003_hw_attach_mac_ops(ah); 1142 + ar9003_hw_attach_aic_ops(ah); 1183 1143 }
+20
drivers/net/wireless/ath/ath9k/ar9003_mci.c
··· 19 19 #include "hw-ops.h" 20 20 #include "ar9003_phy.h" 21 21 #include "ar9003_mci.h" 22 + #include "ar9003_aic.h" 22 23 23 24 static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah) 24 25 { ··· 1017 1016 if (en_int) 1018 1017 ar9003_mci_enable_interrupt(ah); 1019 1018 1019 + if (ath9k_hw_is_aic_enabled(ah)) 1020 + ar9003_aic_start_normal(ah); 1021 + 1020 1022 return 0; 1021 1023 } 1022 1024 ··· 1365 1361 case MCI_STATE_NEED_FLUSH_BT_INFO: 1366 1362 value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0; 1367 1363 mci->need_flush_btinfo = false; 1364 + break; 1365 + case MCI_STATE_AIC_CAL: 1366 + if (ath9k_hw_is_aic_enabled(ah)) 1367 + value = ar9003_aic_calibration(ah); 1368 + break; 1369 + case MCI_STATE_AIC_START: 1370 + if (ath9k_hw_is_aic_enabled(ah)) 1371 + ar9003_aic_start_normal(ah); 1372 + break; 1373 + case MCI_STATE_AIC_CAL_RESET: 1374 + if (ath9k_hw_is_aic_enabled(ah)) 1375 + value = ar9003_aic_cal_reset(ah); 1376 + break; 1377 + case MCI_STATE_AIC_CAL_SINGLE: 1378 + if (ath9k_hw_is_aic_enabled(ah)) 1379 + value = ar9003_aic_calibration_single(ah); 1368 1380 break; 1369 1381 default: 1370 1382 break;
-25
drivers/net/wireless/ath/ath9k/ar9003_phy.h
··· 640 640 #define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE 0x0000ff00 641 641 #define AR_PHY_BB_THERM_ADC_4_LATEST_VOLT_VALUE_S 8 642 642 643 - /* AIC Registers */ 644 - #define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0) 645 - #define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4) 646 - #define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8) 647 - #define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc) 648 - #define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4)) 649 - #define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8)) 650 - #define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0) 651 - #define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc) 652 - 653 643 #define AR_PHY_65NM_CH0_TXRF3 0x16048 654 644 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e 655 645 #define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1 ··· 978 988 #define AR_PHY_TPC_19_B1_ALPHA_THERM_S 0 979 989 #define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) 980 990 #define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM1_BASE + 0x450 + ((_i) << 2)) 981 - 982 - /* SM 1 AIC Registers */ 983 - 984 - #define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0) 985 - #define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4) 986 - #define AR_PHY_AIC_CTRL_2_B1 (AR_SM1_BASE + 0x4b8) 987 - #define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \ 988 - 0x4c0 : 0x4c4)) 989 - #define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + (AR_SREV_9462_10(ah) ? \ 990 - 0x4c4 : 0x4c8)) 991 - #define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0) 992 - #define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc) 993 - 994 - #define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0) 995 - #define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4) 996 991 997 992 #define AR_PHY_RTT_TABLE_SW_INTF_B(i) (0x384 + ((i) ? \ 998 993 AR_SM1_BASE : AR_SM_BASE))
+3 -3
drivers/net/wireless/ath/ath9k/ar9003_rtt.c
··· 106 106 int chain, i; 107 107 108 108 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 109 - if (!(ah->rxchainmask & (1 << chain))) 109 + if (!(ah->caps.rx_chainmask & (1 << chain))) 110 110 continue; 111 111 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { 112 112 ar9003_hw_rtt_load_hist_entry(ah, chain, i, ··· 171 171 int chain, i; 172 172 173 173 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 174 - if (!(ah->rxchainmask & (1 << chain))) 174 + if (!(ah->caps.rx_chainmask & (1 << chain))) 175 175 continue; 176 176 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) { 177 177 ah->caldata->rtt_table[chain][i] = ··· 193 193 int chain, i; 194 194 195 195 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 196 - if (!(ah->rxchainmask & (1 << chain))) 196 + if (!(ah->caps.rx_chainmask & (1 << chain))) 197 197 continue; 198 198 for (i = 0; i < MAX_RTT_TABLE_ENTRY; i++) 199 199 ar9003_hw_rtt_load_hist_entry(ah, chain, i, 0);
+1 -1
drivers/net/wireless/ath/ath9k/ath9k.h
··· 184 184 struct ath_buf *bf; 185 185 u16 framelen; 186 186 s8 txq; 187 - enum ath9k_key_type keytype; 188 187 u8 keyix; 189 188 u8 rtscts_rate; 190 189 u8 retries : 7; 191 190 u8 baw_tracked : 1; 192 191 u8 tx_power; 192 + enum ath9k_key_type keytype:2; 193 193 }; 194 194 195 195 struct ath_rxbuf {
+12
drivers/net/wireless/ath/ath9k/btcoex.h
··· 44 44 45 45 #define AR9300_NUM_BT_WEIGHTS 4 46 46 #define AR9300_NUM_WLAN_WEIGHTS 4 47 + 48 + #define ATH_AIC_MAX_BT_CHANNEL 79 49 + 47 50 /* Defines the BT AR_BT_COEX_WGHT used */ 48 51 enum ath_stomp_type { 49 52 ATH_BTCOEX_STOMP_ALL, ··· 96 93 u32 last_recovery; 97 94 }; 98 95 96 + struct ath9k_hw_aic { 97 + bool aic_enabled; 98 + u8 aic_cal_state; 99 + u8 aic_caled_chan; 100 + u32 aic_sram[ATH_AIC_MAX_BT_CHANNEL]; 101 + u32 aic_cal_start_time; 102 + }; 103 + 99 104 struct ath_btcoex_hw { 100 105 enum ath_btcoex_scheme scheme; 101 106 struct ath9k_hw_mci mci; 107 + struct ath9k_hw_aic aic; 102 108 bool enabled; 103 109 u8 wlanactive_gpio; 104 110 u8 btactive_gpio;
+8 -11
drivers/net/wireless/ath/ath9k/calib.c
··· 238 238 { 239 239 struct ath9k_nfcal_hist *h = NULL; 240 240 unsigned i, j; 241 - int32_t val; 242 241 u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; 243 242 struct ath_common *common = ath9k_hw_common(ah); 244 243 s16 default_nf = ath9k_hw_get_default_nf(ah, chan); ··· 245 246 if (ah->caldata) 246 247 h = ah->caldata->nfCalHist; 247 248 249 + ENABLE_REG_RMW_BUFFER(ah); 248 250 for (i = 0; i < NUM_NF_READINGS; i++) { 249 251 if (chainmask & (1 << i)) { 250 252 s16 nfval; ··· 258 258 else 259 259 nfval = default_nf; 260 260 261 - val = REG_READ(ah, ah->nf_regs[i]); 262 - val &= 0xFFFFFE00; 263 - val |= (((u32) nfval << 1) & 0x1ff); 264 - REG_WRITE(ah, ah->nf_regs[i], val); 261 + REG_RMW(ah, ah->nf_regs[i], 262 + (((u32) nfval << 1) & 0x1ff), 0x1ff); 265 263 } 266 264 } 267 265 ··· 272 274 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, 273 275 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 274 276 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 277 + REG_RMW_BUFFER_FLUSH(ah); 275 278 276 279 /* 277 280 * Wait for load to complete, should be fast, a few 10s of us. ··· 308 309 * by the median we just loaded. This will be initial (and max) value 309 310 * of next noise floor calibration the baseband does. 310 311 */ 311 - ENABLE_REGWRITE_BUFFER(ah); 312 + ENABLE_REG_RMW_BUFFER(ah); 312 313 for (i = 0; i < NUM_NF_READINGS; i++) { 313 314 if (chainmask & (1 << i)) { 314 315 if ((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(chan)) 315 316 continue; 316 317 317 - val = REG_READ(ah, ah->nf_regs[i]); 318 - val &= 0xFFFFFE00; 319 - val |= (((u32) (-50) << 1) & 0x1ff); 320 - REG_WRITE(ah, ah->nf_regs[i], val); 318 + REG_RMW(ah, ah->nf_regs[i], 319 + (((u32) (-50) << 1) & 0x1ff), 0x1ff); 321 320 } 322 321 } 323 - REGWRITE_BUFFER_FLUSH(ah); 322 + REG_RMW_BUFFER_FLUSH(ah); 324 323 325 324 return 0; 326 325 }
+29 -15
drivers/net/wireless/ath/ath9k/dfs.c
··· 126 126 DFS_STAT_INC(sc, pulses_detected); 127 127 return true; 128 128 } 129 - #undef PRI_CH_RADAR_FOUND 130 - #undef EXT_CH_RADAR_FOUND 129 + 130 + static void 131 + ath9k_dfs_process_radar_pulse(struct ath_softc *sc, struct pulse_event *pe) 132 + { 133 + struct dfs_pattern_detector *pd = sc->dfs_detector; 134 + DFS_STAT_INC(sc, pulses_processed); 135 + if (pd == NULL) 136 + return; 137 + if (!pd->add_pulse(pd, pe)) 138 + return; 139 + DFS_STAT_INC(sc, radar_detected); 140 + ieee80211_radar_detected(sc->hw); 141 + } 131 142 132 143 /* 133 144 * DFS: check PHY-error for radar pulse and feed the detector ··· 187 176 ard.pulse_length_pri = vdata_end[-3]; 188 177 pe.freq = ah->curchan->channel; 189 178 pe.ts = mactime; 190 - if (ath9k_postprocess_radar_event(sc, &ard, &pe)) { 191 - struct dfs_pattern_detector *pd = sc->dfs_detector; 192 - ath_dbg(common, DFS, 193 - "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " 194 - "width=%d, rssi=%d, delta_ts=%llu\n", 195 - pe.freq, pe.ts, pe.width, pe.rssi, 196 - pe.ts - sc->dfs_prev_pulse_ts); 197 - sc->dfs_prev_pulse_ts = pe.ts; 198 - DFS_STAT_INC(sc, pulses_processed); 199 - if (pd != NULL && pd->add_pulse(pd, &pe)) { 200 - DFS_STAT_INC(sc, radar_detected); 201 - ieee80211_radar_detected(sc->hw); 202 - } 179 + if (!ath9k_postprocess_radar_event(sc, &ard, &pe)) 180 + return; 181 + 182 + ath_dbg(common, DFS, 183 + "ath9k_dfs_process_phyerr: type=%d, freq=%d, ts=%llu, " 184 + "width=%d, rssi=%d, delta_ts=%llu\n", 185 + ard.pulse_bw_info, pe.freq, pe.ts, pe.width, pe.rssi, 186 + pe.ts - sc->dfs_prev_pulse_ts); 187 + sc->dfs_prev_pulse_ts = pe.ts; 188 + if (ard.pulse_bw_info & PRI_CH_RADAR_FOUND) 189 + ath9k_dfs_process_radar_pulse(sc, &pe); 190 + if (ard.pulse_bw_info & EXT_CH_RADAR_FOUND) { 191 + pe.freq += IS_CHAN_HT40PLUS(ah->curchan) ? 20 : -20; 192 + ath9k_dfs_process_radar_pulse(sc, &pe); 203 193 } 204 194 } 195 + #undef PRI_CH_RADAR_FOUND 196 + #undef EXT_CH_RADAR_FOUND
+1 -6
drivers/net/wireless/ath/ath9k/eeprom.c
··· 27 27 void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask, 28 28 u32 shift, u32 val) 29 29 { 30 - u32 regVal; 31 - 32 - regVal = REG_READ(ah, reg) & ~mask; 33 - regVal |= (val << shift) & mask; 34 - 35 - REG_WRITE(ah, reg, regVal); 30 + REG_RMW(ah, reg, ((val << shift) & mask), mask); 36 31 37 32 if (ah->config.analog_shiftreg) 38 33 udelay(100);
+22 -14
drivers/net/wireless/ath/ath9k/eeprom_4k.c
··· 389 389 } 390 390 } 391 391 392 + ENABLE_REG_RMW_BUFFER(ah); 392 393 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, 393 394 (numXpdGain - 1) & 0x3); 394 395 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, ··· 397 396 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, 398 397 xpdGainValues[1]); 399 398 REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); 399 + REG_RMW_BUFFER_FLUSH(ah); 400 400 401 401 for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { 402 402 regChainOffset = i * 0x1000; ··· 772 770 struct ar5416_eeprom_4k *eep, 773 771 u8 txRxAttenLocal) 774 772 { 775 - REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, 776 - pModal->antCtrlChain[0]); 773 + ENABLE_REG_RMW_BUFFER(ah); 774 + REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 775 + pModal->antCtrlChain[0], 0); 777 776 778 - REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), 779 - (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & 780 - ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | 781 - AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | 782 - SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | 783 - SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); 777 + REG_RMW(ah, AR_PHY_TIMING_CTRL4(0), 778 + SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | 779 + SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF), 780 + AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF); 784 781 785 782 if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 786 783 AR5416_EEP_MINOR_VER_3) { ··· 818 817 AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); 819 818 REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, 820 819 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); 820 + REG_RMW_BUFFER_FLUSH(ah); 821 821 } 822 822 823 823 /* ··· 930 928 } 931 929 } 932 930 931 + ENABLE_REG_RMW_BUFFER(ah); 933 932 if (AR_SREV_9271(ah)) { 934 933 ath9k_hw_analog_shift_rmw(ah, 935 934 AR9285_AN_RF2G3, ··· 1035 1032 AR9285_AN_RF2G4_DB2_4_S, 1036 1033 db2[4]); 1037 1034 } 1035 + REG_RMW_BUFFER_FLUSH(ah); 1038 1036 1039 - 1037 + ENABLE_REG_RMW_BUFFER(ah); 1040 1038 REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, 1041 1039 pModal->switchSettling); 1042 1040 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, 1043 1041 pModal->adcDesiredSize); 1044 1042 1045 - REG_WRITE(ah, AR_PHY_RF_CTL4, 1046 - SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | 1047 - SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | 1048 - SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | 1049 - SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); 1043 + REG_RMW(ah, AR_PHY_RF_CTL4, 1044 + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | 1045 + SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | 1046 + SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | 1047 + SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON), 0); 1050 1048 1051 1049 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 1052 1050 pModal->txEndToRxOn); ··· 1076 1072 pModal->swSettleHt40); 1077 1073 } 1078 1074 1075 + REG_RMW_BUFFER_FLUSH(ah); 1076 + 1079 1077 bb_desired_scale = (pModal->bb_scale_smrt_antenna & 1080 1078 EEP_4K_BB_DESIRED_SCALE_MASK); 1081 1079 if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) { ··· 1086 1080 mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25); 1087 1081 pwrctrl = mask * bb_desired_scale; 1088 1082 clr = mask * 0x1f; 1083 + ENABLE_REG_RMW_BUFFER(ah); 1089 1084 REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr); 1090 1085 REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr); 1091 1086 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr); ··· 1101 1094 clr = mask * 0x1f; 1102 1095 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr); 1103 1096 REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr); 1097 + REG_RMW_BUFFER_FLUSH(ah); 1104 1098 } 1105 1099 } 1106 1100
+14 -20
drivers/net/wireless/ath/ath9k/eeprom_def.c
··· 466 466 struct ar5416_eeprom_def *eep, 467 467 u8 txRxAttenLocal, int regChainOffset, int i) 468 468 { 469 + ENABLE_REG_RMW_BUFFER(ah); 469 470 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_3) { 470 471 txRxAttenLocal = pModal->txRxAttenCh[i]; 471 472 ··· 484 483 AR_PHY_GAIN_2GHZ_XATTEN2_DB, 485 484 pModal->xatten2Db[i]); 486 485 } else { 487 - REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 488 - (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & 489 - ~AR_PHY_GAIN_2GHZ_BSW_MARGIN) 490 - | SM(pModal-> bswMargin[i], 491 - AR_PHY_GAIN_2GHZ_BSW_MARGIN)); 492 - REG_WRITE(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 493 - (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & 494 - ~AR_PHY_GAIN_2GHZ_BSW_ATTEN) 495 - | SM(pModal->bswAtten[i], 496 - AR_PHY_GAIN_2GHZ_BSW_ATTEN)); 486 + REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 487 + SM(pModal-> bswMargin[i], AR_PHY_GAIN_2GHZ_BSW_MARGIN), 488 + AR_PHY_GAIN_2GHZ_BSW_MARGIN); 489 + REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 490 + SM(pModal->bswAtten[i], AR_PHY_GAIN_2GHZ_BSW_ATTEN), 491 + AR_PHY_GAIN_2GHZ_BSW_ATTEN); 497 492 } 498 493 } 499 494 ··· 501 504 AR_PHY_RXGAIN + regChainOffset, 502 505 AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[i]); 503 506 } else { 504 - REG_WRITE(ah, 505 - AR_PHY_RXGAIN + regChainOffset, 506 - (REG_READ(ah, AR_PHY_RXGAIN + regChainOffset) & 507 - ~AR_PHY_RXGAIN_TXRX_ATTEN) 508 - | SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN)); 509 - REG_WRITE(ah, 510 - AR_PHY_GAIN_2GHZ + regChainOffset, 511 - (REG_READ(ah, AR_PHY_GAIN_2GHZ + regChainOffset) & 512 - ~AR_PHY_GAIN_2GHZ_RXTX_MARGIN) | 513 - SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN)); 507 + REG_RMW(ah, AR_PHY_RXGAIN + regChainOffset, 508 + SM(txRxAttenLocal, AR_PHY_RXGAIN_TXRX_ATTEN), 509 + AR_PHY_RXGAIN_TXRX_ATTEN); 510 + REG_RMW(ah, AR_PHY_GAIN_2GHZ + regChainOffset, 511 + SM(pModal->rxTxMarginCh[i], AR_PHY_GAIN_2GHZ_RXTX_MARGIN), 512 + AR_PHY_GAIN_2GHZ_RXTX_MARGIN); 514 513 } 514 + REG_RMW_BUFFER_FLUSH(ah); 515 515 } 516 516 517 517 static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
+5
drivers/net/wireless/ath/ath9k/htc.h
··· 444 444 #define OP_BT_SCAN BIT(4) 445 445 #define OP_TSF_RESET BIT(6) 446 446 447 + enum htc_op_flags { 448 + HTC_FWFLAG_NO_RMW, 449 + }; 450 + 447 451 struct ath9k_htc_priv { 448 452 struct device *dev; 449 453 struct ieee80211_hw *hw; ··· 486 482 bool reconfig_beacon; 487 483 unsigned int rxfilter; 488 484 unsigned long op_flags; 485 + unsigned long fw_flags; 489 486 490 487 struct ath9k_hw_cal_data caldata; 491 488 struct ath_spec_scan_priv spec_priv;
+136 -6
drivers/net/wireless/ath/ath9k/htc_drv_init.c
··· 376 376 mutex_unlock(&priv->wmi->multi_write_mutex); 377 377 } 378 378 379 + static void ath9k_reg_rmw_buffer(void *hw_priv, 380 + u32 reg_offset, u32 set, u32 clr) 381 + { 382 + struct ath_hw *ah = (struct ath_hw *) hw_priv; 383 + struct ath_common *common = ath9k_hw_common(ah); 384 + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 385 + u32 rsp_status; 386 + int r; 387 + 388 + mutex_lock(&priv->wmi->multi_rmw_mutex); 389 + 390 + /* Store the register/value */ 391 + priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].reg = 392 + cpu_to_be32(reg_offset); 393 + priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].set = 394 + cpu_to_be32(set); 395 + priv->wmi->multi_rmw[priv->wmi->multi_rmw_idx].clr = 396 + cpu_to_be32(clr); 397 + 398 + priv->wmi->multi_rmw_idx++; 399 + 400 + /* If the buffer is full, send it out. */ 401 + if (priv->wmi->multi_rmw_idx == MAX_RMW_CMD_NUMBER) { 402 + r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID, 403 + (u8 *) &priv->wmi->multi_rmw, 404 + sizeof(struct register_write) * priv->wmi->multi_rmw_idx, 405 + (u8 *) &rsp_status, sizeof(rsp_status), 406 + 100); 407 + if (unlikely(r)) { 408 + ath_dbg(common, WMI, 409 + "REGISTER RMW FAILED, multi len: %d\n", 410 + priv->wmi->multi_rmw_idx); 411 + } 412 + priv->wmi->multi_rmw_idx = 0; 413 + } 414 + 415 + mutex_unlock(&priv->wmi->multi_rmw_mutex); 416 + } 417 + 418 + static void ath9k_reg_rmw_flush(void *hw_priv) 419 + { 420 + struct ath_hw *ah = (struct ath_hw *) hw_priv; 421 + struct ath_common *common = ath9k_hw_common(ah); 422 + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 423 + u32 rsp_status; 424 + int r; 425 + 426 + if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) 427 + return; 428 + 429 + atomic_dec(&priv->wmi->m_rmw_cnt); 430 + 431 + mutex_lock(&priv->wmi->multi_rmw_mutex); 432 + 433 + if (priv->wmi->multi_rmw_idx) { 434 + r = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID, 435 + (u8 *) &priv->wmi->multi_rmw, 436 + sizeof(struct register_rmw) * priv->wmi->multi_rmw_idx, 437 + (u8 *) &rsp_status, sizeof(rsp_status), 438 + 100); 439 + if (unlikely(r)) { 440 + ath_dbg(common, WMI, 441 + "REGISTER RMW FAILED, multi len: %d\n", 442 + priv->wmi->multi_rmw_idx); 443 + } 444 + priv->wmi->multi_rmw_idx = 0; 445 + } 446 + 447 + mutex_unlock(&priv->wmi->multi_rmw_mutex); 448 + } 449 + 450 + static void ath9k_enable_rmw_buffer(void *hw_priv) 451 + { 452 + struct ath_hw *ah = (struct ath_hw *) hw_priv; 453 + struct ath_common *common = ath9k_hw_common(ah); 454 + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 455 + 456 + if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) 457 + return; 458 + 459 + atomic_inc(&priv->wmi->m_rmw_cnt); 460 + } 461 + 462 + static u32 ath9k_reg_rmw_single(void *hw_priv, 463 + u32 reg_offset, u32 set, u32 clr) 464 + { 465 + struct ath_hw *ah = (struct ath_hw *) hw_priv; 466 + struct ath_common *common = ath9k_hw_common(ah); 467 + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 468 + struct register_rmw buf, buf_ret; 469 + int ret; 470 + u32 val = 0; 471 + 472 + buf.reg = cpu_to_be32(reg_offset); 473 + buf.set = cpu_to_be32(set); 474 + buf.clr = cpu_to_be32(clr); 475 + 476 + ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_RMW_CMDID, 477 + (u8 *) &buf, sizeof(buf), 478 + (u8 *) &buf_ret, sizeof(buf_ret), 479 + 100); 480 + if (unlikely(ret)) { 481 + ath_dbg(common, WMI, "REGISTER RMW FAILED:(0x%04x, %d)\n", 482 + reg_offset, ret); 483 + } 484 + return val; 485 + } 486 + 379 487 static u32 ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr) 380 488 { 381 - u32 val; 489 + struct ath_hw *ah = (struct ath_hw *) hw_priv; 490 + struct ath_common *common = ath9k_hw_common(ah); 491 + struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 382 492 383 - val = ath9k_regread(hw_priv, reg_offset); 384 - val &= ~clr; 385 - val |= set; 386 - ath9k_regwrite(hw_priv, val, reg_offset); 387 - return val; 493 + if (test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags)) { 494 + u32 val; 495 + 496 + val = REG_READ(ah, reg_offset); 497 + val &= ~clr; 498 + val |= set; 499 + REG_WRITE(ah, reg_offset, val); 500 + 501 + return 0; 502 + } 503 + 504 + if (atomic_read(&priv->wmi->m_rmw_cnt)) 505 + ath9k_reg_rmw_buffer(hw_priv, reg_offset, set, clr); 506 + else 507 + ath9k_reg_rmw_single(hw_priv, reg_offset, set, clr); 508 + 509 + return 0; 388 510 } 389 511 390 512 static void ath_usb_read_cachesize(struct ath_common *common, int *csz) ··· 623 501 ah->reg_ops.write = ath9k_regwrite; 624 502 ah->reg_ops.enable_write_buffer = ath9k_enable_regwrite_buffer; 625 503 ah->reg_ops.write_flush = ath9k_regwrite_flush; 504 + ah->reg_ops.enable_rmw_buffer = ath9k_enable_rmw_buffer; 505 + ah->reg_ops.rmw_flush = ath9k_reg_rmw_flush; 626 506 ah->reg_ops.rmw = ath9k_reg_rmw; 627 507 priv->ah = ah; 628 508 ··· 809 685 MAJOR_VERSION_REQ, MINOR_VERSION_REQ); 810 686 return -EINVAL; 811 687 } 688 + 689 + if (priv->fw_version_major == 1 && priv->fw_version_minor < 4) 690 + set_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags); 691 + 692 + dev_info(priv->dev, "FW RMW support: %s\n", 693 + test_bit(HTC_FWFLAG_NO_RMW, &priv->fw_flags) ? "Off" : "On"); 812 694 813 695 return 0; 814 696 }
+8
drivers/net/wireless/ath/ath9k/hw-ops.h
··· 108 108 ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable); 109 109 } 110 110 111 + static inline bool ath9k_hw_is_aic_enabled(struct ath_hw *ah) 112 + { 113 + if (ath9k_hw_private_ops(ah)->is_aic_enabled) 114 + return ath9k_hw_private_ops(ah)->is_aic_enabled(ah); 115 + 116 + return false; 117 + } 118 + 111 119 #endif 112 120 113 121 /* Private hardware call ops */
+37
drivers/net/wireless/ath/ath9k/hw.c
··· 121 121 REGWRITE_BUFFER_FLUSH(ah); 122 122 } 123 123 124 + void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size) 125 + { 126 + u32 *tmp_reg_list, *tmp_data; 127 + int i; 128 + 129 + tmp_reg_list = kmalloc(size * sizeof(u32), GFP_KERNEL); 130 + if (!tmp_reg_list) { 131 + dev_err(ah->dev, "%s: tmp_reg_list: alloc filed\n", __func__); 132 + return; 133 + } 134 + 135 + tmp_data = kmalloc(size * sizeof(u32), GFP_KERNEL); 136 + if (!tmp_data) { 137 + dev_err(ah->dev, "%s tmp_data: alloc filed\n", __func__); 138 + goto error_tmp_data; 139 + } 140 + 141 + for (i = 0; i < size; i++) 142 + tmp_reg_list[i] = array[i][0]; 143 + 144 + REG_READ_MULTI(ah, tmp_reg_list, tmp_data, size); 145 + 146 + for (i = 0; i < size; i++) 147 + array[i][1] = tmp_data[i]; 148 + 149 + kfree(tmp_data); 150 + error_tmp_data: 151 + kfree(tmp_reg_list); 152 + } 153 + 124 154 u32 ath9k_hw_reverse_bits(u32 val, u32 n) 125 155 { 126 156 u32 retval; ··· 395 365 ah->config.rimt_last = 250; 396 366 ah->config.rimt_first = 700; 397 367 } 368 + 369 + if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) 370 + ah->config.pll_pwrsave = 7; 398 371 399 372 /* 400 373 * We need this for PCI devices only (Cardbus, PCI, miniPCI) ··· 1230 1197 u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC; 1231 1198 u32 set = AR_STA_ID1_KSRCH_MODE; 1232 1199 1200 + ENABLE_REG_RMW_BUFFER(ah); 1233 1201 switch (opmode) { 1234 1202 case NL80211_IFTYPE_ADHOC: 1235 1203 if (!AR_SREV_9340_13(ah)) { ··· 1252 1218 break; 1253 1219 } 1254 1220 REG_RMW(ah, AR_STA_ID1, set, mask); 1221 + REG_RMW_BUFFER_FLUSH(ah); 1255 1222 } 1256 1223 1257 1224 void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, ··· 1965 1930 if (!ath9k_hw_mci_is_enabled(ah)) 1966 1931 REG_WRITE(ah, AR_OBS, 8); 1967 1932 1933 + ENABLE_REG_RMW_BUFFER(ah); 1968 1934 if (ah->config.rx_intr_mitigation) { 1969 1935 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, ah->config.rimt_last); 1970 1936 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, ah->config.rimt_first); ··· 1975 1939 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300); 1976 1940 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750); 1977 1941 } 1942 + REG_RMW_BUFFER_FLUSH(ah); 1978 1943 1979 1944 ath9k_hw_init_bb(ah, chan); 1980 1945
+30 -1
drivers/net/wireless/ath/ath9k/hw.h
··· 100 100 (_ah)->reg_ops.write_flush((_ah)); \ 101 101 } while (0) 102 102 103 + #define ENABLE_REG_RMW_BUFFER(_ah) \ 104 + do { \ 105 + if ((_ah)->reg_ops.enable_rmw_buffer) \ 106 + (_ah)->reg_ops.enable_rmw_buffer((_ah)); \ 107 + } while (0) 108 + 109 + #define REG_RMW_BUFFER_FLUSH(_ah) \ 110 + do { \ 111 + if ((_ah)->reg_ops.rmw_flush) \ 112 + (_ah)->reg_ops.rmw_flush((_ah)); \ 113 + } while (0) 114 + 103 115 #define PR_EEP(_s, _val) \ 104 116 do { \ 105 117 len += scnprintf(buf + len, size - len, "%20s : %10d\n",\ ··· 138 126 139 127 #define REG_WRITE_ARRAY(iniarray, column, regWr) \ 140 128 ath9k_hw_write_array(ah, iniarray, column, &(regWr)) 129 + #define REG_READ_ARRAY(ah, array, size) \ 130 + ath9k_hw_read_array(ah, array, size) 141 131 142 132 #define AR_GPIO_OUTPUT_MUX_AS_OUTPUT 0 143 133 #define AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED 1 ··· 323 309 HW_MAC_HANG = BIT(5), 324 310 }; 325 311 312 + #define AR_PCIE_PLL_PWRSAVE_CONTROL BIT(0) 313 + #define AR_PCIE_PLL_PWRSAVE_ON_D3 BIT(1) 314 + #define AR_PCIE_PLL_PWRSAVE_ON_D0 BIT(2) 315 + #define AR_PCIE_CDR_PWRSAVE_ON_D3 BIT(3) 316 + #define AR_PCIE_CDR_PWRSAVE_ON_D0 BIT(4) 317 + 326 318 struct ath9k_ops_config { 327 319 int dma_beacon_response_time; 328 320 int sw_beacon_response_time; ··· 355 335 u32 ant_ctrl_comm2g_switch_enable; 356 336 bool xatten_margin_cfg; 357 337 bool alt_mingainidx; 358 - bool no_pll_pwrsave; 338 + u8 pll_pwrsave; 359 339 bool tx_gain_buffalo; 360 340 bool led_active_high; 361 341 }; ··· 667 647 668 648 /* ANI */ 669 649 void (*ani_cache_ini_regs)(struct ath_hw *ah); 650 + 651 + #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 652 + bool (*is_aic_enabled)(struct ath_hw *ah); 653 + #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ 670 654 }; 671 655 672 656 /** ··· 1032 1008 bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 1033 1009 void ath9k_hw_write_array(struct ath_hw *ah, const struct ar5416IniArray *array, 1034 1010 int column, unsigned int *writecnt); 1011 + void ath9k_hw_read_array(struct ath_hw *ah, u32 array[][2], int size); 1035 1012 u32 ath9k_hw_reverse_bits(u32 val, u32 n); 1036 1013 u16 ath9k_hw_computetxtime(struct ath_hw *ah, 1037 1014 u8 phy, int kbps, ··· 1142 1117 void ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 1143 1118 1144 1119 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 1120 + void ar9003_hw_attach_aic_ops(struct ath_hw *ah); 1145 1121 static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah) 1146 1122 { 1147 1123 return ah->btcoex_hw.enabled; ··· 1160 1134 return ah->btcoex_hw.scheme; 1161 1135 } 1162 1136 #else 1137 + static inline void ar9003_hw_attach_aic_ops(struct ath_hw *ah) 1138 + { 1139 + } 1163 1140 static inline bool ath9k_hw_btcoex_is_enabled(struct ath_hw *ah) 1164 1141 { 1165 1142 return false;
+21 -2
drivers/net/wireless/ath/ath9k/init.c
··· 141 141 return val; 142 142 } 143 143 144 + static void ath9k_multi_ioread32(void *hw_priv, u32 *addr, 145 + u32 *val, u16 count) 146 + { 147 + int i; 148 + 149 + for (i = 0; i < count; i++) 150 + val[i] = ath9k_ioread32(hw_priv, addr[i]); 151 + } 152 + 153 + 144 154 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset, 145 155 u32 set, u32 clr) 146 156 { ··· 447 437 ath_info(common, "Enable WAR for ASPM D3/L1\n"); 448 438 } 449 439 440 + /* 441 + * The default value of pll_pwrsave is 1. 442 + * For certain AR9485 cards, it is set to 0. 443 + * For AR9462, AR9565 it's set to 7. 444 + */ 445 + ah->config.pll_pwrsave = 1; 446 + 450 447 if (sc->driver_data & ATH9K_PCI_NO_PLL_PWRSAVE) { 451 - ah->config.no_pll_pwrsave = true; 448 + ah->config.pll_pwrsave = 0; 452 449 ath_info(common, "Disable PLL PowerSave\n"); 453 450 } 454 451 ··· 547 530 ah->hw = sc->hw; 548 531 ah->hw_version.devid = devid; 549 532 ah->reg_ops.read = ath9k_ioread32; 533 + ah->reg_ops.multi_read = ath9k_multi_ioread32; 550 534 ah->reg_ops.write = ath9k_iowrite32; 551 535 ah->reg_ops.rmw = ath9k_reg_rmw; 552 536 pCap = &ah->caps; ··· 781 763 .num_different_channels = 1, 782 764 .beacon_int_infra_match = true, 783 765 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 784 - BIT(NL80211_CHAN_WIDTH_20), 766 + BIT(NL80211_CHAN_WIDTH_20) | 767 + BIT(NL80211_CHAN_WIDTH_40), 785 768 } 786 769 #endif 787 770 };
+168
drivers/net/wireless/ath/ath9k/reg_aic.h
··· 1 + /* 2 + * Copyright (c) 2015 Qualcomm Atheros Inc. 3 + * 4 + * Permission to use, copy, modify, and/or distribute this software for any 5 + * purpose with or without fee is hereby granted, provided that the above 6 + * copyright notice and this permission notice appear in all copies. 7 + * 8 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 + */ 16 + 17 + #ifndef REG_AIC_H 18 + #define REG_AIC_H 19 + 20 + #define AR_SM_BASE 0xa200 21 + #define AR_SM1_BASE 0xb200 22 + #define AR_AGC_BASE 0x9e00 23 + 24 + #define AR_PHY_AIC_CTRL_0_B0 (AR_SM_BASE + 0x4b0) 25 + #define AR_PHY_AIC_CTRL_1_B0 (AR_SM_BASE + 0x4b4) 26 + #define AR_PHY_AIC_CTRL_2_B0 (AR_SM_BASE + 0x4b8) 27 + #define AR_PHY_AIC_CTRL_3_B0 (AR_SM_BASE + 0x4bc) 28 + #define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0) 29 + 30 + #define AR_PHY_AIC_STAT_0_B0 (AR_SM_BASE + 0x4c4) 31 + #define AR_PHY_AIC_STAT_1_B0 (AR_SM_BASE + 0x4c8) 32 + #define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc) 33 + 34 + #define AR_PHY_AIC_CTRL_0_B1 (AR_SM1_BASE + 0x4b0) 35 + #define AR_PHY_AIC_CTRL_1_B1 (AR_SM1_BASE + 0x4b4) 36 + #define AR_PHY_AIC_CTRL_4_B1 (AR_SM1_BASE + 0x4c0) 37 + 38 + #define AR_PHY_AIC_STAT_0_B1 (AR_SM1_BASE + 0x4c4) 39 + #define AR_PHY_AIC_STAT_1_B1 (AR_SM1_BASE + 0x4c8) 40 + #define AR_PHY_AIC_STAT_2_B1 (AR_SM1_BASE + 0x4cc) 41 + 42 + #define AR_PHY_AIC_SRAM_ADDR_B0 (AR_SM_BASE + 0x5f0) 43 + #define AR_PHY_AIC_SRAM_DATA_B0 (AR_SM_BASE + 0x5f4) 44 + 45 + #define AR_PHY_AIC_SRAM_ADDR_B1 (AR_SM1_BASE + 0x5f0) 46 + #define AR_PHY_AIC_SRAM_DATA_B1 (AR_SM1_BASE + 0x5f4) 47 + 48 + #define AR_PHY_BT_COEX_4 (AR_AGC_BASE + 0x60) 49 + #define AR_PHY_BT_COEX_5 (AR_AGC_BASE + 0x64) 50 + 51 + /* AIC fields */ 52 + #define AR_PHY_AIC_MON_ENABLE 0x80000000 53 + #define AR_PHY_AIC_MON_ENABLE_S 31 54 + #define AR_PHY_AIC_CAL_MAX_HOP_COUNT 0x7F000000 55 + #define AR_PHY_AIC_CAL_MAX_HOP_COUNT_S 24 56 + #define AR_PHY_AIC_CAL_MIN_VALID_COUNT 0x00FE0000 57 + #define AR_PHY_AIC_CAL_MIN_VALID_COUNT_S 17 58 + #define AR_PHY_AIC_F_WLAN 0x0001FC00 59 + #define AR_PHY_AIC_F_WLAN_S 10 60 + #define AR_PHY_AIC_CAL_CH_VALID_RESET 0x00000200 61 + #define AR_PHY_AIC_CAL_CH_VALID_RESET_S 9 62 + #define AR_PHY_AIC_CAL_ENABLE 0x00000100 63 + #define AR_PHY_AIC_CAL_ENABLE_S 8 64 + #define AR_PHY_AIC_BTTX_PWR_THR 0x000000FE 65 + #define AR_PHY_AIC_BTTX_PWR_THR_S 1 66 + #define AR_PHY_AIC_ENABLE 0x00000001 67 + #define AR_PHY_AIC_ENABLE_S 0 68 + #define AR_PHY_AIC_CAL_BT_REF_DELAY 0x00F00000 69 + #define AR_PHY_AIC_CAL_BT_REF_DELAY_S 20 70 + #define AR_PHY_AIC_BT_IDLE_CFG 0x00080000 71 + #define AR_PHY_AIC_BT_IDLE_CFG_S 19 72 + #define AR_PHY_AIC_STDBY_COND 0x00060000 73 + #define AR_PHY_AIC_STDBY_COND_S 17 74 + #define AR_PHY_AIC_STDBY_ROT_ATT_DB 0x0001F800 75 + #define AR_PHY_AIC_STDBY_ROT_ATT_DB_S 11 76 + #define AR_PHY_AIC_STDBY_COM_ATT_DB 0x00000700 77 + #define AR_PHY_AIC_STDBY_COM_ATT_DB_S 8 78 + #define AR_PHY_AIC_RSSI_MAX 0x000000F0 79 + #define AR_PHY_AIC_RSSI_MAX_S 4 80 + #define AR_PHY_AIC_RSSI_MIN 0x0000000F 81 + #define AR_PHY_AIC_RSSI_MIN_S 0 82 + #define AR_PHY_AIC_RADIO_DELAY 0x7F000000 83 + #define AR_PHY_AIC_RADIO_DELAY_S 24 84 + #define AR_PHY_AIC_CAL_STEP_SIZE_CORR 0x00F00000 85 + #define AR_PHY_AIC_CAL_STEP_SIZE_CORR_S 20 86 + #define AR_PHY_AIC_CAL_ROT_IDX_CORR 0x000F8000 87 + #define AR_PHY_AIC_CAL_ROT_IDX_CORR_S 15 88 + #define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR 0x00006000 89 + #define AR_PHY_AIC_CAL_CONV_CHECK_FACTOR_S 13 90 + #define AR_PHY_AIC_ROT_IDX_COUNT_MAX 0x00001C00 91 + #define AR_PHY_AIC_ROT_IDX_COUNT_MAX_S 10 92 + #define AR_PHY_AIC_CAL_SYNTH_TOGGLE 0x00000200 93 + #define AR_PHY_AIC_CAL_SYNTH_TOGGLE_S 9 94 + #define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX 0x00000100 95 + #define AR_PHY_AIC_CAL_SYNTH_AFTER_BTRX_S 8 96 + #define AR_PHY_AIC_CAL_SYNTH_SETTLING 0x000000FF 97 + #define AR_PHY_AIC_CAL_SYNTH_SETTLING_S 0 98 + #define AR_PHY_AIC_MON_MAX_HOP_COUNT 0x07F00000 99 + #define AR_PHY_AIC_MON_MAX_HOP_COUNT_S 20 100 + #define AR_PHY_AIC_MON_MIN_STALE_COUNT 0x000FE000 101 + #define AR_PHY_AIC_MON_MIN_STALE_COUNT_S 13 102 + #define AR_PHY_AIC_MON_PWR_EST_LONG 0x00001000 103 + #define AR_PHY_AIC_MON_PWR_EST_LONG_S 12 104 + #define AR_PHY_AIC_MON_PD_TALLY_SCALING 0x00000C00 105 + #define AR_PHY_AIC_MON_PD_TALLY_SCALING_S 10 106 + #define AR_PHY_AIC_MON_PERF_THR 0x000003E0 107 + #define AR_PHY_AIC_MON_PERF_THR_S 5 108 + #define AR_PHY_AIC_CAL_TARGET_MAG_SETTING 0x00000018 109 + #define AR_PHY_AIC_CAL_TARGET_MAG_SETTING_S 3 110 + #define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR 0x00000006 111 + #define AR_PHY_AIC_CAL_PERF_CHECK_FACTOR_S 1 112 + #define AR_PHY_AIC_CAL_PWR_EST_LONG 0x00000001 113 + #define AR_PHY_AIC_CAL_PWR_EST_LONG_S 0 114 + #define AR_PHY_AIC_MON_DONE 0x80000000 115 + #define AR_PHY_AIC_MON_DONE_S 31 116 + #define AR_PHY_AIC_MON_ACTIVE 0x40000000 117 + #define AR_PHY_AIC_MON_ACTIVE_S 30 118 + #define AR_PHY_AIC_MEAS_COUNT 0x3F000000 119 + #define AR_PHY_AIC_MEAS_COUNT_S 24 120 + #define AR_PHY_AIC_CAL_ANT_ISO_EST 0x00FC0000 121 + #define AR_PHY_AIC_CAL_ANT_ISO_EST_S 18 122 + #define AR_PHY_AIC_CAL_HOP_COUNT 0x0003F800 123 + #define AR_PHY_AIC_CAL_HOP_COUNT_S 11 124 + #define AR_PHY_AIC_CAL_VALID_COUNT 0x000007F0 125 + #define AR_PHY_AIC_CAL_VALID_COUNT_S 4 126 + #define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR 0x00000008 127 + #define AR_PHY_AIC_CAL_BT_TOO_WEAK_ERR_S 3 128 + #define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR 0x00000004 129 + #define AR_PHY_AIC_CAL_BT_TOO_STRONG_ERR_S 2 130 + #define AR_PHY_AIC_CAL_DONE 0x00000002 131 + #define AR_PHY_AIC_CAL_DONE_S 1 132 + #define AR_PHY_AIC_CAL_ACTIVE 0x00000001 133 + #define AR_PHY_AIC_CAL_ACTIVE_S 0 134 + 135 + #define AR_PHY_AIC_MEAS_MAG_MIN 0xFFC00000 136 + #define AR_PHY_AIC_MEAS_MAG_MIN_S 22 137 + #define AR_PHY_AIC_MON_STALE_COUNT 0x003F8000 138 + #define AR_PHY_AIC_MON_STALE_COUNT_S 15 139 + #define AR_PHY_AIC_MON_HOP_COUNT 0x00007F00 140 + #define AR_PHY_AIC_MON_HOP_COUNT_S 8 141 + #define AR_PHY_AIC_CAL_AIC_SM 0x000000F8 142 + #define AR_PHY_AIC_CAL_AIC_SM_S 3 143 + #define AR_PHY_AIC_SM 0x00000007 144 + #define AR_PHY_AIC_SM_S 0 145 + #define AR_PHY_AIC_SRAM_VALID 0x00000001 146 + #define AR_PHY_AIC_SRAM_VALID_S 0 147 + #define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB 0x0000007E 148 + #define AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB_S 1 149 + #define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN 0x00000080 150 + #define AR_PHY_AIC_SRAM_VGA_QUAD_SIGN_S 7 151 + #define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB 0x00003F00 152 + #define AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB_S 8 153 + #define AR_PHY_AIC_SRAM_VGA_DIR_SIGN 0x00004000 154 + #define AR_PHY_AIC_SRAM_VGA_DIR_SIGN_S 14 155 + #define AR_PHY_AIC_SRAM_COM_ATT_6DB 0x00038000 156 + #define AR_PHY_AIC_SRAM_COM_ATT_6DB_S 15 157 + #define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO 0x0000E000 158 + #define AR_PHY_AIC_CAL_ROT_ATT_DB_EST_ISO_S 13 159 + #define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO 0x00001E00 160 + #define AR_PHY_AIC_CAL_COM_ATT_DB_EST_ISO_S 9 161 + #define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING 0x000001F8 162 + #define AR_PHY_AIC_CAL_ISO_EST_INIT_SETTING_S 3 163 + #define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF 0x00000006 164 + #define AR_PHY_AIC_CAL_COM_ATT_DB_BACKOFF_S 1 165 + #define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED 0x00000001 166 + #define AR_PHY_AIC_CAL_COM_ATT_DB_FIXED_S 0 167 + 168 + #endif /* REG_AIC_H */
+3
drivers/net/wireless/ath/ath9k/wmi.c
··· 61 61 return "WMI_REG_READ_CMDID"; 62 62 case WMI_REG_WRITE_CMDID: 63 63 return "WMI_REG_WRITE_CMDID"; 64 + case WMI_REG_RMW_CMDID: 65 + return "WMI_REG_RMW_CMDID"; 64 66 case WMI_RC_STATE_CHANGE_CMDID: 65 67 return "WMI_RC_STATE_CHANGE_CMDID"; 66 68 case WMI_RC_RATE_UPDATE_CMDID: ··· 103 101 spin_lock_init(&wmi->event_lock); 104 102 mutex_init(&wmi->op_mutex); 105 103 mutex_init(&wmi->multi_write_mutex); 104 + mutex_init(&wmi->multi_rmw_mutex); 106 105 init_completion(&wmi->cmd_wait); 107 106 INIT_LIST_HEAD(&wmi->pending_tx_events); 108 107 tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
+16
drivers/net/wireless/ath/ath9k/wmi.h
··· 112 112 WMI_TX_STATS_CMDID, 113 113 WMI_RX_STATS_CMDID, 114 114 WMI_BITRATE_MASK_CMDID, 115 + WMI_REG_RMW_CMDID, 115 116 }; 116 117 117 118 enum wmi_event_id { ··· 126 125 }; 127 126 128 127 #define MAX_CMD_NUMBER 62 128 + #define MAX_RMW_CMD_NUMBER 15 129 129 130 130 struct register_write { 131 131 __be32 reg; 132 132 __be32 val; 133 133 }; 134 + 135 + struct register_rmw { 136 + __be32 reg; 137 + __be32 set; 138 + __be32 clr; 139 + } __packed; 134 140 135 141 struct ath9k_htc_tx_event { 136 142 int count; ··· 164 156 165 157 spinlock_t wmi_lock; 166 158 159 + /* multi write section */ 167 160 atomic_t mwrite_cnt; 168 161 struct register_write multi_write[MAX_CMD_NUMBER]; 169 162 u32 multi_write_idx; 170 163 struct mutex multi_write_mutex; 164 + 165 + /* multi rmw section */ 166 + atomic_t m_rmw_cnt; 167 + struct register_rmw multi_rmw[MAX_RMW_CMD_NUMBER]; 168 + u32 multi_rmw_idx; 169 + struct mutex multi_rmw_mutex; 170 + 171 171 }; 172 172 173 173 struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
+1 -1
drivers/net/wireless/ath/dfs_pattern_detector.c
··· 289 289 "count=%d, count_false=%d\n", 290 290 event->freq, pd->rs->type_id, 291 291 ps->pri, ps->count, ps->count_falses); 292 - channel_detector_reset(dpd, cd); 292 + pd->reset(pd, dpd->last_pulse_ts); 293 293 return true; 294 294 } 295 295 }
+30 -4
drivers/net/wireless/ath/wil6210/cfg80211.c
··· 14 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 15 */ 16 16 17 + #include <linux/etherdevice.h> 17 18 #include "wil6210.h" 18 19 #include "wmi.h" 19 20 ··· 218 217 if (cid < 0) 219 218 return -ENOENT; 220 219 221 - memcpy(mac, wil->sta[cid].addr, ETH_ALEN); 220 + ether_addr_copy(mac, wil->sta[cid].addr); 222 221 wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid); 223 222 224 223 rc = wil_cid_fill_sinfo(wil, cid, sinfo); ··· 479 478 } 480 479 conn.channel = ch - 1; 481 480 482 - memcpy(conn.bssid, bss->bssid, ETH_ALEN); 483 - memcpy(conn.dst_mac, bss->bssid, ETH_ALEN); 481 + ether_addr_copy(conn.bssid, bss->bssid); 482 + ether_addr_copy(conn.dst_mac, bss->bssid); 484 483 485 484 set_bit(wil_status_fwconnecting, wil->status); 486 485 ··· 783 782 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype, 784 783 channel->hw_value); 785 784 if (rc) 786 - netif_carrier_off(ndev); 785 + goto err_pcp_start; 787 786 787 + rc = wil_bcast_init(wil); 788 + if (rc) 789 + goto err_bcast; 790 + 791 + goto out; /* success */ 792 + err_bcast: 793 + wmi_pcp_stop(wil); 794 + err_pcp_start: 795 + netif_carrier_off(ndev); 788 796 out: 789 797 mutex_unlock(&wil->mutex); 790 798 return rc; ··· 927 917 return 0; 928 918 } 929 919 920 + static int wil_cfg80211_change_bss(struct wiphy *wiphy, 921 + struct net_device *dev, 922 + struct bss_parameters *params) 923 + { 924 + struct wil6210_priv *wil = wiphy_to_wil(wiphy); 925 + 926 + if (params->ap_isolate >= 0) { 927 + wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__, 928 + wil->ap_isolate, params->ap_isolate); 929 + wil->ap_isolate = params->ap_isolate; 930 + } 931 + 932 + return 0; 933 + } 934 + 930 935 static struct cfg80211_ops wil_cfg80211_ops = { 931 936 .scan = wil_cfg80211_scan, 932 937 .connect = wil_cfg80211_connect, ··· 962 937 .stop_ap = wil_cfg80211_stop_ap, 963 938 .del_station = wil_cfg80211_del_station, 964 939 .probe_client = wil_cfg80211_probe_client, 940 + .change_bss = wil_cfg80211_change_bss, 965 941 }; 966 942 967 943 static void wil_wiphy_init(struct wiphy *wiphy)
+13 -6
drivers/net/wireless/ath/wil6210/debugfs.c
··· 121 121 122 122 snprintf(name, sizeof(name), "tx_%2d", i); 123 123 124 - seq_printf(s, 125 - "\n%pM CID %d TID %d BACK([%d] %d TU A%s) [%3d|%3d] idle %s\n", 126 - wil->sta[cid].addr, cid, tid, 127 - txdata->agg_wsize, txdata->agg_timeout, 128 - txdata->agg_amsdu ? "+" : "-", 129 - used, avail, sidle); 124 + if (cid < WIL6210_MAX_CID) 125 + seq_printf(s, 126 + "\n%pM CID %d TID %d BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", 127 + wil->sta[cid].addr, cid, tid, 128 + txdata->agg_wsize, 129 + txdata->agg_timeout, 130 + txdata->agg_amsdu ? "+" : "-", 131 + used, avail, sidle); 132 + else 133 + seq_printf(s, 134 + "\nBroadcast [%3d|%3d] idle %s\n", 135 + used, avail, sidle); 130 136 131 137 wil_print_vring(s, wil, name, vring, '_', 'H'); 132 138 } ··· 1411 1405 WIL_FIELD(fw_version, S_IRUGO, doff_u32), 1412 1406 WIL_FIELD(hw_version, S_IRUGO, doff_x32), 1413 1407 WIL_FIELD(recovery_count, S_IRUGO, doff_u32), 1408 + WIL_FIELD(ap_isolate, S_IRUGO, doff_u32), 1414 1409 {}, 1415 1410 }; 1416 1411
+36
drivers/net/wireless/ath/wil6210/main.c
··· 68 68 69 69 static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; 70 70 static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT; 71 + static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT; 71 72 72 73 static int ring_order_set(const char *val, const struct kernel_param *kp) 73 74 { ··· 217 216 switch (wdev->iftype) { 218 217 case NL80211_IFTYPE_STATION: 219 218 case NL80211_IFTYPE_P2P_CLIENT: 219 + wil_bcast_fini(wil); 220 220 netif_tx_stop_all_queues(ndev); 221 221 netif_carrier_off(ndev); 222 222 ··· 362 360 return -EINVAL; 363 361 } 364 362 363 + int wil_bcast_init(struct wil6210_priv *wil) 364 + { 365 + int ri = wil->bcast_vring, rc; 366 + 367 + if ((ri >= 0) && wil->vring_tx[ri].va) 368 + return 0; 369 + 370 + ri = wil_find_free_vring(wil); 371 + if (ri < 0) 372 + return ri; 373 + 374 + rc = wil_vring_init_bcast(wil, ri, 1 << bcast_ring_order); 375 + if (rc == 0) 376 + wil->bcast_vring = ri; 377 + 378 + return rc; 379 + } 380 + 381 + void wil_bcast_fini(struct wil6210_priv *wil) 382 + { 383 + int ri = wil->bcast_vring; 384 + 385 + if (ri < 0) 386 + return; 387 + 388 + wil->bcast_vring = -1; 389 + wil_vring_fini_tx(wil, ri); 390 + } 391 + 365 392 static void wil_connect_worker(struct work_struct *work) 366 393 { 367 394 int rc; ··· 438 407 init_completion(&wil->wmi_call); 439 408 440 409 wil->pending_connect_cid = -1; 410 + wil->bcast_vring = -1; 441 411 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 442 412 setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); 443 413 ··· 688 656 689 657 cancel_work_sync(&wil->disconnect_worker); 690 658 wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false); 659 + wil_bcast_fini(wil); 691 660 692 661 /* prevent NAPI from being scheduled */ 693 662 bitmap_zero(wil->status, wil_status_last); ··· 747 714 748 715 /* init after reset */ 749 716 wil->pending_connect_cid = -1; 717 + wil->ap_isolate = 0; 750 718 reinit_completion(&wil->wmi_ready); 751 719 reinit_completion(&wil->wmi_call); 752 720 ··· 757 723 758 724 /* we just started MAC, wait for FW ready */ 759 725 rc = wil_wait_for_fw_ready(wil); 726 + if (rc == 0) /* check FW is responsive */ 727 + rc = wmi_echo(wil); 760 728 } 761 729 762 730 return rc;
+2 -2
drivers/net/wireless/ath/wil6210/netdev.c
··· 82 82 wil_rx_handle(wil, &quota); 83 83 done = budget - quota; 84 84 85 - if (done <= 1) { /* burst ends - only one packet processed */ 85 + if (done < budget) { 86 86 napi_complete(napi); 87 87 wil6210_unmask_irq_rx(wil); 88 88 wil_dbg_txrx(wil, "NAPI RX complete\n"); ··· 110 110 tx_done += wil_tx_complete(wil, i); 111 111 } 112 112 113 - if (tx_done <= 1) { /* burst ends - only one packet processed */ 113 + if (tx_done < budget) { 114 114 napi_complete(napi); 115 115 wil6210_unmask_irq_tx(wil); 116 116 wil_dbg_txrx(wil, "NAPI TX complete\n");
-2
drivers/net/wireless/ath/wil6210/pcie_bus.c
··· 246 246 247 247 wil6210_debugfs_init(wil); 248 248 249 - /* check FW is alive */ 250 - wmi_echo(wil); 251 249 252 250 return 0; 253 251
+254 -48
drivers/net/wireless/ath/wil6210/txrx.c
··· 33 33 MODULE_PARM_DESC(rtap_include_phy_info, 34 34 " Include PHY info in the radiotap header, default - no"); 35 35 36 + bool rx_align_2; 37 + module_param(rx_align_2, bool, S_IRUGO); 38 + MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); 39 + 40 + static inline uint wil_rx_snaplen(void) 41 + { 42 + return rx_align_2 ? 6 : 0; 43 + } 44 + 36 45 static inline int wil_vring_is_empty(struct vring *vring) 37 46 { 38 47 return vring->swhead == vring->swtail; ··· 218 209 u32 i, int headroom) 219 210 { 220 211 struct device *dev = wil_to_dev(wil); 221 - unsigned int sz = mtu_max + ETH_HLEN; 212 + unsigned int sz = mtu_max + ETH_HLEN + wil_rx_snaplen(); 222 213 struct vring_rx_desc dd, *d = &dd; 223 214 volatile struct vring_rx_desc *_d = &vring->va[i].rx; 224 215 dma_addr_t pa; ··· 374 365 struct vring_rx_desc *d; 375 366 struct sk_buff *skb; 376 367 dma_addr_t pa; 377 - unsigned int sz = mtu_max + ETH_HLEN; 368 + unsigned int snaplen = wil_rx_snaplen(); 369 + unsigned int sz = mtu_max + ETH_HLEN + snaplen; 378 370 u16 dmalen; 379 371 u8 ftype; 380 372 int cid; 373 + int i = (int)vring->swhead; 381 374 struct wil_net_stats *stats; 382 375 383 376 BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); ··· 387 376 if (unlikely(wil_vring_is_empty(vring))) 388 377 return NULL; 389 378 390 - _d = &vring->va[vring->swhead].rx; 379 + _d = &vring->va[i].rx; 391 380 if (unlikely(!(_d->dma.status & RX_DMA_STATUS_DU))) { 392 381 /* it is not error, we just reached end of Rx done area */ 393 382 return NULL; 394 383 } 395 384 396 - skb = vring->ctx[vring->swhead].skb; 385 + skb = vring->ctx[i].skb; 386 + vring->ctx[i].skb = NULL; 387 + wil_vring_advance_head(vring, 1); 388 + if (!skb) { 389 + wil_err(wil, "No Rx skb at [%d]\n", i); 390 + return NULL; 391 + } 397 392 d = wil_skb_rxdesc(skb); 398 393 *d = *_d; 399 394 pa = wil_desc_addr(&d->dma.addr); 400 - vring->ctx[vring->swhead].skb = NULL; 401 - wil_vring_advance_head(vring, 1); 402 395 403 396 dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE); 404 397 dmalen = le16_to_cpu(d->dma.length); 405 398 406 - trace_wil6210_rx(vring->swhead, d); 407 - wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", vring->swhead, dmalen); 399 + trace_wil6210_rx(i, d); 400 + wil_dbg_txrx(wil, "Rx[%3d] : %d bytes\n", i, dmalen); 408 401 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_NONE, 32, 4, 409 402 (const void *)d, sizeof(*d), false); 410 403 ··· 448 433 return NULL; 449 434 } 450 435 451 - if (unlikely(skb->len < ETH_HLEN)) { 436 + if (unlikely(skb->len < ETH_HLEN + snaplen)) { 452 437 wil_err(wil, "Short frame, len = %d\n", skb->len); 453 438 /* TODO: process it (i.e. BAR) */ 454 439 kfree_skb(skb); ··· 468 453 * mis-calculates TCP checksum - if it should be 0x0, 469 454 * it writes 0xffff in violation of RFC 1624 470 455 */ 456 + } 457 + 458 + if (snaplen) { 459 + /* Packet layout 460 + * +-------+-------+---------+------------+------+ 461 + * | SA(6) | DA(6) | SNAP(6) | ETHTYPE(2) | DATA | 462 + * +-------+-------+---------+------------+------+ 463 + * Need to remove SNAP, shifting SA and DA forward 464 + */ 465 + memmove(skb->data + snaplen, skb->data, 2 * ETH_ALEN); 466 + skb_pull(skb, snaplen); 471 467 } 472 468 473 469 return skb; ··· 518 492 */ 519 493 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 520 494 { 521 - gro_result_t rc; 495 + gro_result_t rc = GRO_NORMAL; 522 496 struct wil6210_priv *wil = ndev_to_wil(ndev); 497 + struct wireless_dev *wdev = wil_to_wdev(wil); 523 498 unsigned int len = skb->len; 524 499 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 525 - int cid = wil_rxdesc_cid(d); 500 + int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ 501 + struct ethhdr *eth = (void *)skb->data; 502 + /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 503 + * is not suitable, need to look at data 504 + */ 505 + int mcast = is_multicast_ether_addr(eth->h_dest); 526 506 struct wil_net_stats *stats = &wil->sta[cid].stats; 507 + struct sk_buff *xmit_skb = NULL; 508 + static const char * const gro_res_str[] = { 509 + [GRO_MERGED] = "GRO_MERGED", 510 + [GRO_MERGED_FREE] = "GRO_MERGED_FREE", 511 + [GRO_HELD] = "GRO_HELD", 512 + [GRO_NORMAL] = "GRO_NORMAL", 513 + [GRO_DROP] = "GRO_DROP", 514 + }; 527 515 528 516 skb_orphan(skb); 529 517 530 - rc = napi_gro_receive(&wil->napi_rx, skb); 518 + if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) { 519 + if (mcast) { 520 + /* send multicast frames both to higher layers in 521 + * local net stack and back to the wireless medium 522 + */ 523 + xmit_skb = skb_copy(skb, GFP_ATOMIC); 524 + } else { 525 + int xmit_cid = wil_find_cid(wil, eth->h_dest); 531 526 527 + if (xmit_cid >= 0) { 528 + /* The destination station is associated to 529 + * this AP (in this VLAN), so send the frame 530 + * directly to it and do not pass it to local 531 + * net stack. 532 + */ 533 + xmit_skb = skb; 534 + skb = NULL; 535 + } 536 + } 537 + } 538 + if (xmit_skb) { 539 + /* Send to wireless media and increase priority by 256 to 540 + * keep the received priority instead of reclassifying 541 + * the frame (see cfg80211_classify8021d). 542 + */ 543 + xmit_skb->dev = ndev; 544 + xmit_skb->priority += 256; 545 + xmit_skb->protocol = htons(ETH_P_802_3); 546 + skb_reset_network_header(xmit_skb); 547 + skb_reset_mac_header(xmit_skb); 548 + wil_dbg_txrx(wil, "Rx -> Tx %d bytes\n", len); 549 + dev_queue_xmit(xmit_skb); 550 + } 551 + 552 + if (skb) { /* deliver to local stack */ 553 + 554 + skb->protocol = eth_type_trans(skb, ndev); 555 + rc = napi_gro_receive(&wil->napi_rx, skb); 556 + wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 557 + len, gro_res_str[rc]); 558 + } 559 + /* statistics. rc set to GRO_NORMAL for AP bridging */ 532 560 if (unlikely(rc == GRO_DROP)) { 533 561 ndev->stats.rx_dropped++; 534 562 stats->rx_dropped++; ··· 592 512 stats->rx_packets++; 593 513 ndev->stats.rx_bytes += len; 594 514 stats->rx_bytes += len; 595 - } 596 - { 597 - static const char * const gro_res_str[] = { 598 - [GRO_MERGED] = "GRO_MERGED", 599 - [GRO_MERGED_FREE] = "GRO_MERGED_FREE", 600 - [GRO_HELD] = "GRO_HELD", 601 - [GRO_NORMAL] = "GRO_NORMAL", 602 - [GRO_DROP] = "GRO_DROP", 603 - }; 604 - wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 605 - len, gro_res_str[rc]); 515 + if (mcast) 516 + ndev->stats.multicast++; 606 517 } 607 518 } 608 519 ··· 624 553 skb->protocol = htons(ETH_P_802_2); 625 554 wil_netif_rx_any(skb, ndev); 626 555 } else { 627 - skb->protocol = eth_type_trans(skb, ndev); 628 556 wil_rx_reorder(wil, skb); 629 557 } 630 558 } ··· 749 679 return rc; 750 680 } 751 681 682 + int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) 683 + { 684 + int rc; 685 + struct wmi_bcast_vring_cfg_cmd cmd = { 686 + .action = cpu_to_le32(WMI_VRING_CMD_ADD), 687 + .vring_cfg = { 688 + .tx_sw_ring = { 689 + .max_mpdu_size = 690 + cpu_to_le16(wil_mtu2macbuf(mtu_max)), 691 + .ring_size = cpu_to_le16(size), 692 + }, 693 + .ringid = id, 694 + .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 695 + }, 696 + }; 697 + struct { 698 + struct wil6210_mbox_hdr_wmi wmi; 699 + struct wmi_vring_cfg_done_event cmd; 700 + } __packed reply; 701 + struct vring *vring = &wil->vring_tx[id]; 702 + struct vring_tx_data *txdata = &wil->vring_tx_data[id]; 703 + 704 + wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__, 705 + cmd.vring_cfg.tx_sw_ring.max_mpdu_size); 706 + 707 + if (vring->va) { 708 + wil_err(wil, "Tx ring [%d] already allocated\n", id); 709 + rc = -EINVAL; 710 + goto out; 711 + } 712 + 713 + memset(txdata, 0, sizeof(*txdata)); 714 + spin_lock_init(&txdata->lock); 715 + vring->size = size; 716 + rc = wil_vring_alloc(wil, vring); 717 + if (rc) 718 + goto out; 719 + 720 + wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ 721 + wil->vring2cid_tid[id][1] = 0; /* TID */ 722 + 723 + cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); 724 + 725 + rc = wmi_call(wil, WMI_BCAST_VRING_CFG_CMDID, &cmd, sizeof(cmd), 726 + WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); 727 + if (rc) 728 + goto out_free; 729 + 730 + if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { 731 + wil_err(wil, "Tx config failed, status 0x%02x\n", 732 + reply.cmd.status); 733 + rc = -EINVAL; 734 + goto out_free; 735 + } 736 + vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); 737 + 738 + txdata->enabled = 1; 739 + 740 + return 0; 741 + out_free: 742 + wil_vring_free(wil, vring, 1); 743 + out: 744 + 745 + return rc; 746 + } 747 + 752 748 void wil_vring_fini_tx(struct wil6210_priv *wil, int id) 753 749 { 754 750 struct vring *vring = &wil->vring_tx[id]; ··· 838 702 memset(txdata, 0, sizeof(*txdata)); 839 703 } 840 704 841 - static struct vring *wil_find_tx_vring(struct wil6210_priv *wil, 705 + static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, 842 706 struct sk_buff *skb) 843 707 { 844 708 int i; ··· 871 735 return NULL; 872 736 } 873 737 874 - static void wil_set_da_for_vring(struct wil6210_priv *wil, 875 - struct sk_buff *skb, int vring_index) 876 - { 877 - struct ethhdr *eth = (void *)skb->data; 878 - int cid = wil->vring2cid_tid[vring_index][0]; 879 - 880 - memcpy(eth->h_dest, wil->sta[cid].addr, ETH_ALEN); 881 - } 882 - 883 738 static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring, 884 739 struct sk_buff *skb); 885 740 ··· 891 764 continue; 892 765 893 766 cid = wil->vring2cid_tid[i][0]; 767 + if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 768 + continue; 769 + 894 770 if (!wil->sta[cid].data_port_open && 895 771 (skb->protocol != cpu_to_be16(ETH_P_PAE))) 896 772 break; ··· 908 778 return NULL; 909 779 } 910 780 911 - /* 912 - * Find 1-st vring and return it; set dest address for this vring in skb 913 - * duplicate skb and send it to other active vrings 781 + /* Use one of 2 strategies: 782 + * 783 + * 1. New (real broadcast): 784 + * use dedicated broadcast vring 785 + * 2. Old (pseudo-DMS): 786 + * Find 1-st vring and return it; 787 + * duplicate skb and send it to other active vrings; 788 + * in all cases override dest address to unicast peer's address 789 + * Use old strategy when new is not supported yet: 790 + * - for PBSS 791 + * - for secure link 914 792 */ 915 - static struct vring *wil_tx_bcast(struct wil6210_priv *wil, 916 - struct sk_buff *skb) 793 + static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, 794 + struct sk_buff *skb) 795 + { 796 + struct vring *v; 797 + int i = wil->bcast_vring; 798 + 799 + if (i < 0) 800 + return NULL; 801 + v = &wil->vring_tx[i]; 802 + if (!v->va) 803 + return NULL; 804 + 805 + return v; 806 + } 807 + 808 + static void wil_set_da_for_vring(struct wil6210_priv *wil, 809 + struct sk_buff *skb, int vring_index) 810 + { 811 + struct ethhdr *eth = (void *)skb->data; 812 + int cid = wil->vring2cid_tid[vring_index][0]; 813 + 814 + ether_addr_copy(eth->h_dest, wil->sta[cid].addr); 815 + } 816 + 817 + static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, 818 + struct sk_buff *skb) 917 819 { 918 820 struct vring *v, *v2; 919 821 struct sk_buff *skb2; 920 822 int i; 921 823 u8 cid; 824 + struct ethhdr *eth = (void *)skb->data; 825 + char *src = eth->h_source; 922 826 923 827 /* find 1-st vring eligible for data */ 924 828 for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { ··· 961 797 continue; 962 798 963 799 cid = wil->vring2cid_tid[i][0]; 800 + if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 801 + continue; 964 802 if (!wil->sta[cid].data_port_open) 803 + continue; 804 + 805 + /* don't Tx back to source when re-routing Rx->Tx at the AP */ 806 + if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 965 807 continue; 966 808 967 809 goto found; ··· 987 817 if (!v2->va) 988 818 continue; 989 819 cid = wil->vring2cid_tid[i][0]; 820 + if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 821 + continue; 990 822 if (!wil->sta[cid].data_port_open) 823 + continue; 824 + 825 + if (0 == memcmp(wil->sta[cid].addr, src, ETH_ALEN)) 991 826 continue; 992 827 993 828 skb2 = skb_copy(skb, GFP_ATOMIC); ··· 1006 831 } 1007 832 1008 833 return v; 834 + } 835 + 836 + static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil, 837 + struct sk_buff *skb) 838 + { 839 + struct wireless_dev *wdev = wil->wdev; 840 + 841 + if (wdev->iftype != NL80211_IFTYPE_AP) 842 + return wil_find_tx_bcast_2(wil, skb); 843 + 844 + if (wil->privacy) 845 + return wil_find_tx_bcast_2(wil, skb); 846 + 847 + return wil_find_tx_bcast_1(wil, skb); 1009 848 } 1010 849 1011 850 static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len, ··· 1114 925 uint i = swhead; 1115 926 dma_addr_t pa; 1116 927 int used; 928 + bool mcast = (vring_index == wil->bcast_vring); 929 + uint len = skb_headlen(skb); 1117 930 1118 931 wil_dbg_txrx(wil, "%s()\n", __func__); 1119 932 ··· 1141 950 return -EINVAL; 1142 951 vring->ctx[i].mapped_as = wil_mapped_as_single; 1143 952 /* 1-st segment */ 1144 - wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index); 953 + wil_tx_desc_map(d, pa, len, vring_index); 954 + if (unlikely(mcast)) { 955 + d->mac.d[0] |= BIT(MAC_CFG_DESC_TX_0_MCS_EN_POS); /* MCS 0 */ 956 + if (unlikely(len > WIL_BCAST_MCS0_LIMIT)) { 957 + /* set MCS 1 */ 958 + d->mac.d[0] |= (1 << MAC_CFG_DESC_TX_0_MCS_INDEX_POS); 959 + /* packet mode 2 */ 960 + d->mac.d[1] |= BIT(MAC_CFG_DESC_TX_1_PKT_MODE_EN_POS) | 961 + (2 << MAC_CFG_DESC_TX_1_PKT_MODE_POS); 962 + } 963 + } 1145 964 /* Process TCP/UDP checksum offloading */ 1146 965 if (unlikely(wil_tx_desc_offload_cksum_set(wil, d, skb))) { 1147 966 wil_err(wil, "Tx[%2d] Failed to set cksum, drop packet\n", ··· 1257 1056 { 1258 1057 struct wil6210_priv *wil = ndev_to_wil(ndev); 1259 1058 struct ethhdr *eth = (void *)skb->data; 1059 + bool bcast = is_multicast_ether_addr(eth->h_dest); 1260 1060 struct vring *vring; 1261 1061 static bool pr_once_fw; 1262 1062 int rc; ··· 1285 1083 /* in STA mode (ESS), all to same VRING */ 1286 1084 vring = wil_find_tx_vring_sta(wil, skb); 1287 1085 } else { /* direct communication, find matching VRING */ 1288 - if (is_unicast_ether_addr(eth->h_dest)) 1289 - vring = wil_find_tx_vring(wil, skb); 1290 - else 1291 - vring = wil_tx_bcast(wil, skb); 1086 + vring = bcast ? wil_find_tx_bcast(wil, skb) : 1087 + wil_find_tx_ucast(wil, skb); 1292 1088 } 1293 1089 if (unlikely(!vring)) { 1294 1090 wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest); ··· 1349 1149 struct vring_tx_data *txdata = &wil->vring_tx_data[ringid]; 1350 1150 int done = 0; 1351 1151 int cid = wil->vring2cid_tid[ringid][0]; 1352 - struct wil_net_stats *stats = &wil->sta[cid].stats; 1152 + struct wil_net_stats *stats = NULL; 1353 1153 volatile struct vring_tx_desc *_d; 1354 1154 int used_before_complete; 1355 1155 int used_new; ··· 1367 1167 wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid); 1368 1168 1369 1169 used_before_complete = wil_vring_used_tx(vring); 1170 + 1171 + if (cid < WIL6210_MAX_CID) 1172 + stats = &wil->sta[cid].stats; 1370 1173 1371 1174 while (!wil_vring_is_empty(vring)) { 1372 1175 int new_swtail; ··· 1412 1209 if (skb) { 1413 1210 if (likely(d->dma.error == 0)) { 1414 1211 ndev->stats.tx_packets++; 1415 - stats->tx_packets++; 1416 1212 ndev->stats.tx_bytes += skb->len; 1417 - stats->tx_bytes += skb->len; 1213 + if (stats) { 1214 + stats->tx_packets++; 1215 + stats->tx_bytes += skb->len; 1216 + } 1418 1217 } else { 1419 1218 ndev->stats.tx_errors++; 1420 - stats->tx_errors++; 1219 + if (stats) 1220 + stats->tx_errors++; 1421 1221 } 1422 1222 wil_consume_skb(skb, d->dma.error == 0); 1423 1223 }
+8
drivers/net/wireless/ath/wil6210/wil6210.h
··· 28 28 extern unsigned short rx_ring_overflow_thrsh; 29 29 extern int agg_wsize; 30 30 extern u32 vring_idle_trsh; 31 + extern bool rx_align_2; 31 32 32 33 #define WIL_NAME "wil6210" 33 34 #define WIL_FW_NAME "wil6210.fw" /* code */ ··· 50 49 #define WIL_TX_Q_LEN_DEFAULT (4000) 51 50 #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) 52 51 #define WIL_TX_RING_SIZE_ORDER_DEFAULT (10) 52 + #define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7) 53 + #define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */ 53 54 /* limit ring size in range [32..32k] */ 54 55 #define WIL_RING_SIZE_ORDER_MIN (5) 55 56 #define WIL_RING_SIZE_ORDER_MAX (15) ··· 545 542 u32 monitor_flags; 546 543 u32 privacy; /* secure connection? */ 547 544 int sinfo_gen; 545 + u32 ap_isolate; /* no intra-BSS communication */ 548 546 /* interrupt moderation */ 549 547 u32 tx_max_burst_duration; 550 548 u32 tx_interframe_timeout; ··· 597 593 struct vring_tx_data vring_tx_data[WIL6210_MAX_TX_RINGS]; 598 594 u8 vring2cid_tid[WIL6210_MAX_TX_RINGS][2]; /* [0] - CID, [1] - TID */ 599 595 struct wil_sta_info sta[WIL6210_MAX_CID]; 596 + int bcast_vring; 600 597 /* scan */ 601 598 struct cfg80211_scan_request *scan_request; 602 599 ··· 760 755 int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, 761 756 int cid, int tid); 762 757 void wil_vring_fini_tx(struct wil6210_priv *wil, int id); 758 + int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size); 759 + int wil_bcast_init(struct wil6210_priv *wil); 760 + void wil_bcast_fini(struct wil6210_priv *wil); 763 761 764 762 netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev); 765 763 int wil_tx_complete(struct wil6210_priv *wil, int ringid);
+11 -5
drivers/net/wireless/ath/wil6210/wmi.c
··· 466 466 467 467 /* FIXME FW can transmit only ucast frames to peer */ 468 468 /* FIXME real ring_id instead of hard coded 0 */ 469 - memcpy(wil->sta[evt->cid].addr, evt->bssid, ETH_ALEN); 469 + ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); 470 470 wil->sta[evt->cid].status = wil_sta_conn_pending; 471 471 472 472 wil->pending_connect_cid = evt->cid; ··· 524 524 } 525 525 526 526 eth = (struct ethhdr *)skb_put(skb, ETH_HLEN); 527 - memcpy(eth->h_dest, ndev->dev_addr, ETH_ALEN); 528 - memcpy(eth->h_source, evt->src_mac, ETH_ALEN); 527 + ether_addr_copy(eth->h_dest, ndev->dev_addr); 528 + ether_addr_copy(eth->h_source, evt->src_mac); 529 529 eth->h_proto = cpu_to_be16(ETH_P_PAE); 530 530 memcpy(skb_put(skb, eapol_len), evt->eapol, eapol_len); 531 531 skb->protocol = eth_type_trans(skb, ndev); ··· 851 851 { 852 852 struct wmi_set_mac_address_cmd cmd; 853 853 854 - memcpy(cmd.mac, addr, ETH_ALEN); 854 + ether_addr_copy(cmd.mac, addr); 855 855 856 856 wil_dbg_wmi(wil, "Set MAC %pM\n", addr); 857 857 ··· 1109 1109 */ 1110 1110 cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS); 1111 1111 } 1112 + 1113 + if (rx_align_2) 1114 + cmd.l2_802_3_offload_ctrl |= 1115 + L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK; 1116 + 1112 1117 /* typical time for secure PCP is 840ms */ 1113 1118 rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd), 1114 1119 WMI_CFG_RX_CHAIN_DONE_EVENTID, &evt, sizeof(evt), 2000); ··· 1162 1157 struct wmi_disconnect_sta_cmd cmd = { 1163 1158 .disconnect_reason = cpu_to_le16(reason), 1164 1159 }; 1165 - memcpy(cmd.dst_mac, mac, ETH_ALEN); 1160 + 1161 + ether_addr_copy(cmd.dst_mac, mac); 1166 1162 1167 1163 wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason); 1168 1164
+21 -2
drivers/net/wireless/ath/wil6210/wmi.h
··· 70 70 WMI_SET_UCODE_IDLE_CMDID = 0x0813, 71 71 WMI_SET_WORK_MODE_CMDID = 0x0815, 72 72 WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816, 73 - WMI_MARLON_R_ACTIVATE_CMDID = 0x0817, 74 73 WMI_MARLON_R_READ_CMDID = 0x0818, 75 74 WMI_MARLON_R_WRITE_CMDID = 0x0819, 76 75 WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a, ··· 79 80 WMI_RF_RX_TEST_CMDID = 0x081e, 80 81 WMI_CFG_RX_CHAIN_CMDID = 0x0820, 81 82 WMI_VRING_CFG_CMDID = 0x0821, 83 + WMI_BCAST_VRING_CFG_CMDID = 0x0822, 82 84 WMI_VRING_BA_EN_CMDID = 0x0823, 83 85 WMI_VRING_BA_DIS_CMDID = 0x0824, 84 86 WMI_RCP_ADDBA_RESP_CMDID = 0x0825, ··· 99 99 WMI_BF_TXSS_MGMT_CMDID = 0x0837, 100 100 WMI_BF_SM_MGMT_CMDID = 0x0838, 101 101 WMI_BF_RXSS_MGMT_CMDID = 0x0839, 102 + WMI_BF_TRIG_CMDID = 0x083A, 102 103 WMI_SET_SECTORS_CMDID = 0x0849, 103 104 WMI_MAINTAIN_PAUSE_CMDID = 0x0850, 104 105 WMI_MAINTAIN_RESUME_CMDID = 0x0851, ··· 597 596 } __packed; 598 597 599 598 /* 599 + * WMI_BCAST_VRING_CFG_CMDID 600 + */ 601 + struct wmi_bcast_vring_cfg { 602 + struct wmi_sw_ring_cfg tx_sw_ring; 603 + u8 ringid; /* 0-23 vrings */ 604 + u8 encap_trans_type; 605 + u8 ds_cfg; /* 802.3 DS cfg */ 606 + u8 nwifi_ds_trans_type; 607 + } __packed; 608 + 609 + struct wmi_bcast_vring_cfg_cmd { 610 + __le32 action; 611 + struct wmi_bcast_vring_cfg vring_cfg; 612 + } __packed; 613 + 614 + /* 600 615 * WMI_VRING_BA_EN_CMDID 601 616 */ 602 617 struct wmi_vring_ba_en_cmd { ··· 704 687 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0) 705 688 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1) 706 689 #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1) 690 + #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1) 691 + #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1) 692 + #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2) 707 693 u8 l2_802_3_offload_ctrl; 708 694 709 695 #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0) ··· 861 841 WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812, 862 842 WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815, 863 843 WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816, 864 - WMI_MARLON_R_ACTIVATE_DONE_EVENTID = 0x1817, 865 844 WMI_MARLON_R_READ_DONE_EVENTID = 0x1818, 866 845 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, 867 846 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
+1 -1
drivers/net/wireless/b43/main.c
··· 4866 4866 switch (dev->dev->bus_type) { 4867 4867 #ifdef CONFIG_B43_BCMA 4868 4868 case B43_BUS_BCMA: 4869 - bcma_core_pci_irq_ctl(dev->dev->bdev->bus, 4869 + bcma_host_pci_irq_ctl(dev->dev->bdev->bus, 4870 4870 dev->dev->bdev, true); 4871 4871 bcma_host_pci_up(dev->dev->bdev->bus); 4872 4872 break;
+6 -2
drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
··· 29 29 #include <linux/mmc/host.h> 30 30 #include <linux/platform_device.h> 31 31 #include <linux/platform_data/brcmfmac-sdio.h> 32 + #include <linux/pm_runtime.h> 32 33 #include <linux/suspend.h> 33 34 #include <linux/errno.h> 34 35 #include <linux/module.h> ··· 1007 1006 sg_free_table(&sdiodev->sgtable); 1008 1007 sdiodev->sbwad = 0; 1009 1008 1009 + pm_runtime_allow(sdiodev->func[1]->card->host->parent); 1010 1010 return 0; 1011 1011 } 1012 1012 ··· 1076 1074 ret = -ENODEV; 1077 1075 goto out; 1078 1076 } 1079 - 1077 + pm_runtime_forbid(host->parent); 1080 1078 out: 1081 1079 if (ret) 1082 1080 brcmf_sdiod_remove(sdiodev); ··· 1098 1096 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), 1099 1097 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), 1100 1098 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), 1099 + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), 1100 + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345), 1101 1101 BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), 1102 1102 { /* end: all zeroes */ } 1103 1103 }; ··· 1198 1194 brcmf_dbg(SDIO, "sdio device ID: 0x%04x\n", func->device); 1199 1195 brcmf_dbg(SDIO, "Function: %d\n", func->num); 1200 1196 1201 - if (func->num != 1 && func->num != 2) 1197 + if (func->num != 1) 1202 1198 return; 1203 1199 1204 1200 bus_if = dev_get_drvdata(&func->dev);
+254 -64
drivers/net/wireless/brcm80211/brcmfmac/chip.c
··· 100 100 #define BCM4329_CORE_SOCRAM_BASE 0x18003000 101 101 /* ARM Cortex M3 core, ID 0x82a */ 102 102 #define BCM4329_CORE_ARM_BASE 0x18002000 103 - #define BCM4329_RAMSIZE 0x48000 104 - /* bcm43143 */ 105 - #define BCM43143_RAMSIZE 0x70000 106 103 107 104 #define CORE_SB(base, field) \ 108 105 (base + SBCONFIGOFF + offsetof(struct sbconfig, field)) ··· 146 149 u32 sbidlow; /* identification */ 147 150 u32 sbidhigh; /* identification */ 148 151 }; 152 + 153 + /* bankidx and bankinfo reg defines corerev >= 8 */ 154 + #define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 155 + #define SOCRAM_BANKINFO_SZMASK 0x0000007f 156 + #define SOCRAM_BANKIDX_ROM_MASK 0x00000100 157 + 158 + #define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 159 + /* socram bankinfo memtype */ 160 + #define SOCRAM_MEMTYPE_RAM 0 161 + #define SOCRAM_MEMTYPE_R0M 1 162 + #define SOCRAM_MEMTYPE_DEVRAM 2 163 + 164 + #define SOCRAM_BANKINFO_SZBASE 8192 165 + #define SRCI_LSS_MASK 0x00f00000 166 + #define SRCI_LSS_SHIFT 20 167 + #define SRCI_SRNB_MASK 0xf0 168 + #define SRCI_SRNB_SHIFT 4 169 + #define SRCI_SRBSZ_MASK 0xf 170 + #define SRCI_SRBSZ_SHIFT 0 171 + #define SR_BSZ_BASE 14 172 + 173 + struct sbsocramregs { 174 + u32 coreinfo; 175 + u32 bwalloc; 176 + u32 extracoreinfo; 177 + u32 biststat; 178 + u32 bankidx; 179 + u32 standbyctrl; 180 + 181 + u32 errlogstatus; /* rev 6 */ 182 + u32 errlogaddr; /* rev 6 */ 183 + /* used for patching rev 3 & 5 */ 184 + u32 cambankidx; 185 + u32 cambankstandbyctrl; 186 + u32 cambankpatchctrl; 187 + u32 cambankpatchtblbaseaddr; 188 + u32 cambankcmdreg; 189 + u32 cambankdatareg; 190 + u32 cambankmaskreg; 191 + u32 PAD[1]; 192 + u32 bankinfo; /* corev 8 */ 193 + u32 bankpda; 194 + u32 PAD[14]; 195 + u32 extmemconfig; 196 + u32 extmemparitycsr; 197 + u32 extmemparityerrdata; 198 + u32 extmemparityerrcnt; 199 + u32 extmemwrctrlandsize; 200 + u32 PAD[84]; 201 + u32 workaround; 202 + u32 pwrctl; /* corerev >= 2 */ 203 + u32 PAD[133]; 204 + u32 sr_control; /* corerev >= 15 */ 205 + u32 sr_status; /* corerev >= 15 */ 206 + u32 sr_address; /* corerev >= 15 */ 207 + u32 sr_data; /* corerev >= 15 */ 208 + }; 209 + 210 + #define SOCRAMREGOFFS(_f) offsetof(struct sbsocramregs, _f) 211 + 212 + #define ARMCR4_CAP (0x04) 213 + #define ARMCR4_BANKIDX (0x40) 214 + #define ARMCR4_BANKINFO (0x44) 215 + #define ARMCR4_BANKPDA (0x4C) 216 + 217 + #define ARMCR4_TCBBNB_MASK 0xf0 218 + #define ARMCR4_TCBBNB_SHIFT 4 219 + #define ARMCR4_TCBANB_MASK 0xf 220 + #define ARMCR4_TCBANB_SHIFT 0 221 + 222 + #define ARMCR4_BSZ_MASK 0x3f 223 + #define ARMCR4_BSZ_MULT 8192 149 224 150 225 struct brcmf_core_priv { 151 226 struct brcmf_core pub; ··· 488 419 return &core->pub; 489 420 } 490 421 491 - #ifdef DEBUG 492 422 /* safety check for chipinfo */ 493 423 static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 494 424 { 495 425 struct brcmf_core_priv *core; 496 426 bool need_socram = false; 497 427 bool has_socram = false; 428 + bool cpu_found = false; 498 429 int idx = 1; 499 430 500 431 list_for_each_entry(core, &ci->cores, list) { ··· 504 435 505 436 switch (core->pub.id) { 506 437 case BCMA_CORE_ARM_CM3: 438 + cpu_found = true; 507 439 need_socram = true; 508 440 break; 509 441 case BCMA_CORE_INTERNAL_MEM: 510 442 has_socram = true; 511 443 break; 512 444 case BCMA_CORE_ARM_CR4: 513 - if (ci->pub.rambase == 0) { 514 - brcmf_err("RAM base not provided with ARM CR4 core\n"); 515 - return -ENOMEM; 516 - } 445 + cpu_found = true; 517 446 break; 518 447 default: 519 448 break; 520 449 } 521 450 } 522 451 452 + if (!cpu_found) { 453 + brcmf_err("CPU core not detected\n"); 454 + return -ENXIO; 455 + } 523 456 /* check RAM core presence for ARM CM3 core */ 524 457 if (need_socram && !has_socram) { 525 458 brcmf_err("RAM core not provided with ARM CM3 core\n"); ··· 529 458 } 530 459 return 0; 531 460 } 532 - #else /* DEBUG */ 533 - static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci) 534 - { 535 - return 0; 536 - } 537 - #endif 538 461 539 - static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci) 462 + static u32 brcmf_chip_core_read32(struct brcmf_core_priv *core, u16 reg) 463 + { 464 + return core->chip->ops->read32(core->chip->ctx, core->pub.base + reg); 465 + } 466 + 467 + static void brcmf_chip_core_write32(struct brcmf_core_priv *core, 468 + u16 reg, u32 val) 469 + { 470 + core->chip->ops->write32(core->chip->ctx, core->pub.base + reg, val); 471 + } 472 + 473 + static bool brcmf_chip_socram_banksize(struct brcmf_core_priv *core, u8 idx, 474 + u32 *banksize) 475 + { 476 + u32 bankinfo; 477 + u32 bankidx = (SOCRAM_MEMTYPE_RAM << SOCRAM_BANKIDX_MEMTYPE_SHIFT); 478 + 479 + bankidx |= idx; 480 + brcmf_chip_core_write32(core, SOCRAMREGOFFS(bankidx), bankidx); 481 + bankinfo = brcmf_chip_core_read32(core, SOCRAMREGOFFS(bankinfo)); 482 + *banksize = (bankinfo & SOCRAM_BANKINFO_SZMASK) + 1; 483 + *banksize *= SOCRAM_BANKINFO_SZBASE; 484 + return !!(bankinfo & SOCRAM_BANKINFO_RETNTRAM_MASK); 485 + } 486 + 487 + static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize, 488 + u32 *srsize) 489 + { 490 + u32 coreinfo; 491 + uint nb, banksize, lss; 492 + bool retent; 493 + int i; 494 + 495 + *ramsize = 0; 496 + *srsize = 0; 497 + 498 + if (WARN_ON(sr->pub.rev < 4)) 499 + return; 500 + 501 + if (!brcmf_chip_iscoreup(&sr->pub)) 502 + brcmf_chip_resetcore(&sr->pub, 0, 0, 0); 503 + 504 + /* Get info for determining size */ 505 + coreinfo = brcmf_chip_core_read32(sr, SOCRAMREGOFFS(coreinfo)); 506 + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 507 + 508 + if ((sr->pub.rev <= 7) || (sr->pub.rev == 12)) { 509 + banksize = (coreinfo & SRCI_SRBSZ_MASK); 510 + lss = (coreinfo & SRCI_LSS_MASK) >> SRCI_LSS_SHIFT; 511 + if (lss != 0) 512 + nb--; 513 + *ramsize = nb * (1 << (banksize + SR_BSZ_BASE)); 514 + if (lss != 0) 515 + *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE)); 516 + } else { 517 + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; 518 + for (i = 0; i < nb; i++) { 519 + retent = brcmf_chip_socram_banksize(sr, i, &banksize); 520 + *ramsize += banksize; 521 + if (retent) 522 + *srsize += banksize; 523 + } 524 + } 525 + 526 + /* hardcoded save&restore memory sizes */ 527 + switch (sr->chip->pub.chip) { 528 + case BRCM_CC_4334_CHIP_ID: 529 + if (sr->chip->pub.chiprev < 2) 530 + *srsize = (32 * 1024); 531 + break; 532 + case BRCM_CC_43430_CHIP_ID: 533 + /* assume sr for now as we can not check 534 + * firmware sr capability at this point. 535 + */ 536 + *srsize = (64 * 1024); 537 + break; 538 + default: 539 + break; 540 + } 541 + } 542 + 543 + /** Return the TCM-RAM size of the ARMCR4 core. */ 544 + static u32 brcmf_chip_tcm_ramsize(struct brcmf_core_priv *cr4) 545 + { 546 + u32 corecap; 547 + u32 memsize = 0; 548 + u32 nab; 549 + u32 nbb; 550 + u32 totb; 551 + u32 bxinfo; 552 + u32 idx; 553 + 554 + corecap = brcmf_chip_core_read32(cr4, ARMCR4_CAP); 555 + 556 + nab = (corecap & ARMCR4_TCBANB_MASK) >> ARMCR4_TCBANB_SHIFT; 557 + nbb = (corecap & ARMCR4_TCBBNB_MASK) >> ARMCR4_TCBBNB_SHIFT; 558 + totb = nab + nbb; 559 + 560 + for (idx = 0; idx < totb; idx++) { 561 + brcmf_chip_core_write32(cr4, ARMCR4_BANKIDX, idx); 562 + bxinfo = brcmf_chip_core_read32(cr4, ARMCR4_BANKINFO); 563 + memsize += ((bxinfo & ARMCR4_BSZ_MASK) + 1) * ARMCR4_BSZ_MULT; 564 + } 565 + 566 + return memsize; 567 + } 568 + 569 + static u32 brcmf_chip_tcm_rambase(struct brcmf_chip_priv *ci) 540 570 { 541 571 switch (ci->pub.chip) { 542 - case BRCM_CC_4329_CHIP_ID: 543 - ci->pub.ramsize = BCM4329_RAMSIZE; 544 - break; 545 - case BRCM_CC_43143_CHIP_ID: 546 - ci->pub.ramsize = BCM43143_RAMSIZE; 547 - break; 548 - case BRCM_CC_43241_CHIP_ID: 549 - ci->pub.ramsize = 0x90000; 550 - break; 551 - case BRCM_CC_4330_CHIP_ID: 552 - ci->pub.ramsize = 0x48000; 553 - break; 554 - case BRCM_CC_4334_CHIP_ID: 555 - case BRCM_CC_43340_CHIP_ID: 556 - ci->pub.ramsize = 0x80000; 557 - break; 572 + case BRCM_CC_4345_CHIP_ID: 573 + return 0x198000; 558 574 case BRCM_CC_4335_CHIP_ID: 559 - ci->pub.ramsize = 0xc0000; 560 - ci->pub.rambase = 0x180000; 561 - break; 562 - case BRCM_CC_43362_CHIP_ID: 563 - ci->pub.ramsize = 0x3c000; 564 - break; 565 575 case BRCM_CC_4339_CHIP_ID: 566 576 case BRCM_CC_4354_CHIP_ID: 567 577 case BRCM_CC_4356_CHIP_ID: 568 578 case BRCM_CC_43567_CHIP_ID: 569 579 case BRCM_CC_43569_CHIP_ID: 570 580 case BRCM_CC_43570_CHIP_ID: 571 - ci->pub.ramsize = 0xc0000; 572 - ci->pub.rambase = 0x180000; 573 - break; 574 581 case BRCM_CC_43602_CHIP_ID: 575 - ci->pub.ramsize = 0xf0000; 576 - ci->pub.rambase = 0x180000; 577 - break; 582 + return 0x180000; 578 583 default: 579 584 brcmf_err("unknown chip: %s\n", ci->pub.name); 580 585 break; 581 586 } 587 + return 0; 588 + } 589 + 590 + static int brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci) 591 + { 592 + struct brcmf_core_priv *mem_core; 593 + struct brcmf_core *mem; 594 + 595 + mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_ARM_CR4); 596 + if (mem) { 597 + mem_core = container_of(mem, struct brcmf_core_priv, pub); 598 + ci->pub.ramsize = brcmf_chip_tcm_ramsize(mem_core); 599 + ci->pub.rambase = brcmf_chip_tcm_rambase(ci); 600 + if (!ci->pub.rambase) { 601 + brcmf_err("RAM base not provided with ARM CR4 core\n"); 602 + return -EINVAL; 603 + } 604 + } else { 605 + mem = brcmf_chip_get_core(&ci->pub, BCMA_CORE_INTERNAL_MEM); 606 + mem_core = container_of(mem, struct brcmf_core_priv, pub); 607 + brcmf_chip_socram_ramsize(mem_core, &ci->pub.ramsize, 608 + &ci->pub.srsize); 609 + } 610 + brcmf_dbg(INFO, "RAM: base=0x%x size=%d (0x%x) sr=%d (0x%x)\n", 611 + ci->pub.rambase, ci->pub.ramsize, ci->pub.ramsize, 612 + ci->pub.srsize, ci->pub.srsize); 613 + 614 + if (!ci->pub.ramsize) { 615 + brcmf_err("RAM size is undetermined\n"); 616 + return -ENOMEM; 617 + } 618 + return 0; 582 619 } 583 620 584 621 static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr, ··· 839 660 struct brcmf_core *core; 840 661 u32 regdata; 841 662 u32 socitype; 663 + int ret; 842 664 843 665 /* Get CC core rev 844 666 * Chipid is assume to be at offset 0 from SI_ENUM_BASE ··· 892 712 return -ENODEV; 893 713 } 894 714 895 - brcmf_chip_get_raminfo(ci); 715 + ret = brcmf_chip_cores_check(ci); 716 + if (ret) 717 + return ret; 896 718 897 - return brcmf_chip_cores_check(ci); 719 + /* assure chip is passive for core access */ 720 + brcmf_chip_set_passive(&ci->pub); 721 + return brcmf_chip_get_raminfo(ci); 898 722 } 899 723 900 724 static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id) ··· 962 778 if (chip->ops->setup) 963 779 ret = chip->ops->setup(chip->ctx, pub); 964 780 965 - /* 966 - * Make sure any on-chip ARM is off (in case strapping is wrong), 967 - * or downloaded code was already running. 968 - */ 969 - brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3); 970 - brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4); 971 781 return ret; 972 782 } 973 783 ··· 977 799 err = -EINVAL; 978 800 if (WARN_ON(!ops->prepare)) 979 801 err = -EINVAL; 980 - if (WARN_ON(!ops->exit_dl)) 802 + if (WARN_ON(!ops->activate)) 981 803 err = -EINVAL; 982 804 if (err < 0) 983 805 return ERR_PTR(-EINVAL); ··· 1075 897 } 1076 898 1077 899 static void 1078 - brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip) 900 + brcmf_chip_cm3_set_passive(struct brcmf_chip_priv *chip) 1079 901 { 1080 902 struct brcmf_core *core; 903 + struct brcmf_core_priv *sr; 1081 904 1082 905 brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3); 1083 906 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211); ··· 1088 909 D11_BCMA_IOCTL_PHYCLOCKEN); 1089 910 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM); 1090 911 brcmf_chip_resetcore(core, 0, 0, 0); 912 + 913 + /* disable bank #3 remap for this device */ 914 + if (chip->pub.chip == BRCM_CC_43430_CHIP_ID) { 915 + sr = container_of(core, struct brcmf_core_priv, pub); 916 + brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankidx), 3); 917 + brcmf_chip_core_write32(sr, SOCRAMREGOFFS(bankpda), 0); 918 + } 1091 919 } 1092 920 1093 - static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip) 921 + static bool brcmf_chip_cm3_set_active(struct brcmf_chip_priv *chip) 1094 922 { 1095 923 struct brcmf_core *core; 1096 924 ··· 1107 921 return false; 1108 922 } 1109 923 1110 - chip->ops->exit_dl(chip->ctx, &chip->pub, 0); 924 + chip->ops->activate(chip->ctx, &chip->pub, 0); 1111 925 1112 926 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3); 1113 927 brcmf_chip_resetcore(core, 0, 0, 0); ··· 1116 930 } 1117 931 1118 932 static inline void 1119 - brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip) 933 + brcmf_chip_cr4_set_passive(struct brcmf_chip_priv *chip) 1120 934 { 1121 935 struct brcmf_core *core; 1122 936 ··· 1129 943 D11_BCMA_IOCTL_PHYCLOCKEN); 1130 944 } 1131 945 1132 - static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec) 946 + static bool brcmf_chip_cr4_set_active(struct brcmf_chip_priv *chip, u32 rstvec) 1133 947 { 1134 948 struct brcmf_core *core; 1135 949 1136 - chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec); 950 + chip->ops->activate(chip->ctx, &chip->pub, rstvec); 1137 951 1138 952 /* restore ARM */ 1139 953 core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4); ··· 1142 956 return true; 1143 957 } 1144 958 1145 - void brcmf_chip_enter_download(struct brcmf_chip *pub) 959 + void brcmf_chip_set_passive(struct brcmf_chip *pub) 1146 960 { 1147 961 struct brcmf_chip_priv *chip; 1148 962 struct brcmf_core *arm; ··· 1152 966 chip = container_of(pub, struct brcmf_chip_priv, pub); 1153 967 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1154 968 if (arm) { 1155 - brcmf_chip_cr4_enterdl(chip); 969 + brcmf_chip_cr4_set_passive(chip); 1156 970 return; 1157 971 } 1158 972 1159 - brcmf_chip_cm3_enterdl(chip); 973 + brcmf_chip_cm3_set_passive(chip); 1160 974 } 1161 975 1162 - bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec) 976 + bool brcmf_chip_set_active(struct brcmf_chip *pub, u32 rstvec) 1163 977 { 1164 978 struct brcmf_chip_priv *chip; 1165 979 struct brcmf_core *arm; ··· 1169 983 chip = container_of(pub, struct brcmf_chip_priv, pub); 1170 984 arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4); 1171 985 if (arm) 1172 - return brcmf_chip_cr4_exitdl(chip, rstvec); 986 + return brcmf_chip_cr4_set_active(chip, rstvec); 1173 987 1174 - return brcmf_chip_cm3_exitdl(chip); 988 + return brcmf_chip_cm3_set_active(chip); 1175 989 } 1176 990 1177 991 bool brcmf_chip_sr_capable(struct brcmf_chip *pub) ··· 1202 1016 addr = CORE_CC_REG(base, chipcontrol_data); 1203 1017 reg = chip->ops->read32(chip->ctx, addr); 1204 1018 return (reg & pmu_cc3_mask) != 0; 1019 + case BRCM_CC_43430_CHIP_ID: 1020 + addr = CORE_CC_REG(base, sr_control1); 1021 + reg = chip->ops->read32(chip->ctx, addr); 1022 + return reg != 0; 1205 1023 default: 1206 1024 addr = CORE_CC_REG(base, pmucapabilities_ext); 1207 1025 reg = chip->ops->read32(chip->ctx, addr);
+7 -5
drivers/net/wireless/brcm80211/brcmfmac/chip.h
··· 30 30 * @pmucaps: PMU capabilities. 31 31 * @pmurev: PMU revision. 32 32 * @rambase: RAM base address (only applicable for ARM CR4 chips). 33 - * @ramsize: amount of RAM on chip. 33 + * @ramsize: amount of RAM on chip including retention. 34 + * @srsize: amount of retention RAM on chip. 34 35 * @name: string representation of the chip identifier. 35 36 */ 36 37 struct brcmf_chip { ··· 42 41 u32 pmurev; 43 42 u32 rambase; 44 43 u32 ramsize; 44 + u32 srsize; 45 45 char name[8]; 46 46 }; 47 47 ··· 66 64 * @write32: write 32-bit value over bus. 67 65 * @prepare: prepare bus for core configuration. 68 66 * @setup: bus-specific core setup. 69 - * @exit_dl: exit download state. 67 + * @active: chip becomes active. 70 68 * The callback should use the provided @rstvec when non-zero. 71 69 */ 72 70 struct brcmf_buscore_ops { ··· 74 72 void (*write32)(void *ctx, u32 addr, u32 value); 75 73 int (*prepare)(void *ctx); 76 74 int (*setup)(void *ctx, struct brcmf_chip *chip); 77 - void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec); 75 + void (*activate)(void *ctx, struct brcmf_chip *chip, u32 rstvec); 78 76 }; 79 77 80 78 struct brcmf_chip *brcmf_chip_attach(void *ctx, ··· 86 84 void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset); 87 85 void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset, 88 86 u32 postreset); 89 - void brcmf_chip_enter_download(struct brcmf_chip *ci); 90 - bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec); 87 + void brcmf_chip_set_passive(struct brcmf_chip *ci); 88 + bool brcmf_chip_set_active(struct brcmf_chip *ci, u32 rstvec); 91 89 bool brcmf_chip_sr_capable(struct brcmf_chip *pub); 92 90 93 91 #endif /* BRCMF_AXIDMP_H */
+2 -3
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
··· 481 481 482 482 static void brcmf_msgbuf_ioctl_resp_wake(struct brcmf_msgbuf *msgbuf) 483 483 { 484 - if (waitqueue_active(&msgbuf->ioctl_resp_wait)) { 485 - msgbuf->ctl_completed = true; 484 + msgbuf->ctl_completed = true; 485 + if (waitqueue_active(&msgbuf->ioctl_resp_wait)) 486 486 wake_up(&msgbuf->ioctl_resp_wait); 487 - } 488 487 } 489 488 490 489
+4 -4
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.h
··· 17 17 18 18 #ifdef CONFIG_BRCMFMAC_PROTO_MSGBUF 19 19 20 - #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 20 21 - #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 256 22 - #define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 20 20 + #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_MAX_ITEM 64 21 + #define BRCMF_H2D_MSGRING_RXPOST_SUBMIT_MAX_ITEM 512 22 + #define BRCMF_D2H_MSGRING_CONTROL_COMPLETE_MAX_ITEM 64 23 23 #define BRCMF_D2H_MSGRING_TX_COMPLETE_MAX_ITEM 1024 24 - #define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 256 24 + #define BRCMF_D2H_MSGRING_RX_COMPLETE_MAX_ITEM 512 25 25 #define BRCMF_H2D_TXFLOWRING_MAX_ITEM 512 26 26 27 27 #define BRCMF_H2D_MSGRING_CONTROL_SUBMIT_ITEMSIZE 40
+7 -17
drivers/net/wireless/brcm80211/brcmfmac/pcie.c
··· 47 47 48 48 #define BRCMF_PCIE_43602_FW_NAME "brcm/brcmfmac43602-pcie.bin" 49 49 #define BRCMF_PCIE_43602_NVRAM_NAME "brcm/brcmfmac43602-pcie.txt" 50 - #define BRCMF_PCIE_4354_FW_NAME "brcm/brcmfmac4354-pcie.bin" 51 - #define BRCMF_PCIE_4354_NVRAM_NAME "brcm/brcmfmac4354-pcie.txt" 52 50 #define BRCMF_PCIE_4356_FW_NAME "brcm/brcmfmac4356-pcie.bin" 53 51 #define BRCMF_PCIE_4356_NVRAM_NAME "brcm/brcmfmac4356-pcie.txt" 54 52 #define BRCMF_PCIE_43570_FW_NAME "brcm/brcmfmac43570-pcie.bin" ··· 185 187 186 188 MODULE_FIRMWARE(BRCMF_PCIE_43602_FW_NAME); 187 189 MODULE_FIRMWARE(BRCMF_PCIE_43602_NVRAM_NAME); 188 - MODULE_FIRMWARE(BRCMF_PCIE_4354_FW_NAME); 189 - MODULE_FIRMWARE(BRCMF_PCIE_4354_NVRAM_NAME); 190 + MODULE_FIRMWARE(BRCMF_PCIE_4356_FW_NAME); 191 + MODULE_FIRMWARE(BRCMF_PCIE_4356_NVRAM_NAME); 190 192 MODULE_FIRMWARE(BRCMF_PCIE_43570_FW_NAME); 191 193 MODULE_FIRMWARE(BRCMF_PCIE_43570_NVRAM_NAME); 192 194 ··· 507 509 508 510 static int brcmf_pcie_enter_download_state(struct brcmf_pciedev_info *devinfo) 509 511 { 510 - brcmf_chip_enter_download(devinfo->ci); 511 - 512 512 if (devinfo->ci->chip == BRCM_CC_43602_CHIP_ID) { 513 513 brcmf_pcie_select_core(devinfo, BCMA_CORE_ARM_CR4); 514 514 brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_ARMCR4REG_BANKIDX, ··· 532 536 brcmf_chip_resetcore(core, 0, 0, 0); 533 537 } 534 538 535 - return !brcmf_chip_exit_download(devinfo->ci, resetintr); 539 + return !brcmf_chip_set_active(devinfo->ci, resetintr); 536 540 } 537 541 538 542 ··· 649 653 console->log_str[console->log_idx] = ch; 650 654 console->log_idx++; 651 655 } 652 - 653 656 if (ch == '\n') { 654 657 console->log_str[console->log_idx] = 0; 655 - brcmf_dbg(PCIE, "CONSOLE: %s\n", console->log_str); 658 + brcmf_dbg(PCIE, "CONSOLE: %s", console->log_str); 656 659 console->log_idx = 0; 657 660 } 658 661 } ··· 1323 1328 fw_name = BRCMF_PCIE_43602_FW_NAME; 1324 1329 nvram_name = BRCMF_PCIE_43602_NVRAM_NAME; 1325 1330 break; 1326 - case BRCM_CC_4354_CHIP_ID: 1327 - fw_name = BRCMF_PCIE_4354_FW_NAME; 1328 - nvram_name = BRCMF_PCIE_4354_NVRAM_NAME; 1329 - break; 1330 1331 case BRCM_CC_4356_CHIP_ID: 1331 1332 fw_name = BRCMF_PCIE_4356_FW_NAME; 1332 1333 nvram_name = BRCMF_PCIE_4356_NVRAM_NAME; ··· 1557 1566 } 1558 1567 1559 1568 1560 - static void brcmf_pcie_buscore_exitdl(void *ctx, struct brcmf_chip *chip, 1561 - u32 rstvec) 1569 + static void brcmf_pcie_buscore_activate(void *ctx, struct brcmf_chip *chip, 1570 + u32 rstvec) 1562 1571 { 1563 1572 struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)ctx; 1564 1573 ··· 1568 1577 1569 1578 static const struct brcmf_buscore_ops brcmf_pcie_buscore_ops = { 1570 1579 .prepare = brcmf_pcie_buscoreprep, 1571 - .exit_dl = brcmf_pcie_buscore_exitdl, 1580 + .activate = brcmf_pcie_buscore_activate, 1572 1581 .read32 = brcmf_pcie_buscore_read32, 1573 1582 .write32 = brcmf_pcie_buscore_write32, 1574 1583 }; ··· 1847 1856 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 1848 1857 1849 1858 static struct pci_device_id brcmf_pcie_devid_table[] = { 1850 - BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_DEVICE_ID), 1851 1859 BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), 1852 1860 BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), 1853 1861 BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID),
+105 -94
drivers/net/wireless/brcm80211/brcmfmac/sdio.c
··· 432 432 struct brcmf_sdio_dev *sdiodev; /* sdio device handler */ 433 433 struct brcmf_chip *ci; /* Chip info struct */ 434 434 435 - u32 ramsize; /* Size of RAM in SOCRAM (bytes) */ 436 - 437 435 u32 hostintmask; /* Copy of Host Interrupt Mask */ 438 436 atomic_t intstatus; /* Intstatus bits (events) pending */ 439 437 atomic_t fcstate; /* State of dongle flow-control */ ··· 483 485 #endif /* DEBUG */ 484 486 485 487 uint clkstate; /* State of sd and backplane clock(s) */ 486 - bool activity; /* Activity flag for clock down */ 487 488 s32 idletime; /* Control for activity timeout */ 488 - s32 idlecount; /* Activity timeout counter */ 489 - s32 idleclock; /* How to set bus driver when idle */ 489 + s32 idlecount; /* Activity timeout counter */ 490 + s32 idleclock; /* How to set bus driver when idle */ 490 491 bool rxflow_mode; /* Rx flow control mode */ 491 492 bool rxflow; /* Is rx flow control on */ 492 493 bool alp_only; /* Don't use HT clock (ALP only) */ ··· 507 510 508 511 struct workqueue_struct *brcmf_wq; 509 512 struct work_struct datawork; 510 - atomic_t dpc_tskcnt; 513 + bool dpc_triggered; 514 + bool dpc_running; 511 515 512 516 bool txoff; /* Transmit flow-controlled */ 513 517 struct brcmf_sdio_count sdcnt; ··· 615 617 #define BCM43362_NVRAM_NAME "brcm/brcmfmac43362-sdio.txt" 616 618 #define BCM4339_FIRMWARE_NAME "brcm/brcmfmac4339-sdio.bin" 617 619 #define BCM4339_NVRAM_NAME "brcm/brcmfmac4339-sdio.txt" 620 + #define BCM43430_FIRMWARE_NAME "brcm/brcmfmac43430-sdio.bin" 621 + #define BCM43430_NVRAM_NAME "brcm/brcmfmac43430-sdio.txt" 622 + #define BCM43455_FIRMWARE_NAME "brcm/brcmfmac43455-sdio.bin" 623 + #define BCM43455_NVRAM_NAME "brcm/brcmfmac43455-sdio.txt" 618 624 #define BCM4354_FIRMWARE_NAME "brcm/brcmfmac4354-sdio.bin" 619 625 #define BCM4354_NVRAM_NAME "brcm/brcmfmac4354-sdio.txt" 620 626 ··· 642 640 MODULE_FIRMWARE(BCM43362_NVRAM_NAME); 643 641 MODULE_FIRMWARE(BCM4339_FIRMWARE_NAME); 644 642 MODULE_FIRMWARE(BCM4339_NVRAM_NAME); 643 + MODULE_FIRMWARE(BCM43430_FIRMWARE_NAME); 644 + MODULE_FIRMWARE(BCM43430_NVRAM_NAME); 645 + MODULE_FIRMWARE(BCM43455_FIRMWARE_NAME); 646 + MODULE_FIRMWARE(BCM43455_NVRAM_NAME); 645 647 MODULE_FIRMWARE(BCM4354_FIRMWARE_NAME); 646 648 MODULE_FIRMWARE(BCM4354_NVRAM_NAME); 647 649 ··· 675 669 { BRCM_CC_4335_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4335) }, 676 670 { BRCM_CC_43362_CHIP_ID, 0xFFFFFFFE, BRCMF_FIRMWARE_NVRAM(BCM43362) }, 677 671 { BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4339) }, 672 + { BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM43430) }, 673 + { BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, BRCMF_FIRMWARE_NVRAM(BCM43455) }, 678 674 { BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, BRCMF_FIRMWARE_NVRAM(BCM4354) } 679 675 }; 680 676 ··· 967 959 brcmf_dbg(SDIO, "Enter\n"); 968 960 969 961 /* Early exit if we're already there */ 970 - if (bus->clkstate == target) { 971 - if (target == CLK_AVAIL) { 972 - brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 973 - bus->activity = true; 974 - } 962 + if (bus->clkstate == target) 975 963 return 0; 976 - } 977 964 978 965 switch (target) { 979 966 case CLK_AVAIL: ··· 977 974 brcmf_sdio_sdclk(bus, true); 978 975 /* Now request HT Avail on the backplane */ 979 976 brcmf_sdio_htclk(bus, true, pendok); 980 - brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 981 - bus->activity = true; 982 977 break; 983 978 984 979 case CLK_SDONLY: ··· 988 987 else 989 988 brcmf_err("request for %d -> %d\n", 990 989 bus->clkstate, target); 991 - brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 992 990 break; 993 991 994 992 case CLK_NONE: ··· 996 996 brcmf_sdio_htclk(bus, false, false); 997 997 /* Now remove the SD clock */ 998 998 brcmf_sdio_sdclk(bus, false); 999 - brcmf_sdio_wd_timer(bus, 0); 1000 999 break; 1001 1000 } 1002 1001 #ifdef DEBUG ··· 1023 1024 1024 1025 /* Going to sleep */ 1025 1026 if (sleep) { 1026 - /* Don't sleep if something is pending */ 1027 - if (atomic_read(&bus->intstatus) || 1028 - atomic_read(&bus->ipend) > 0 || 1029 - bus->ctrl_frame_stat || 1030 - (!atomic_read(&bus->fcstate) && 1031 - brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 1032 - data_ok(bus))) { 1033 - err = -EBUSY; 1034 - goto done; 1035 - } 1036 - 1037 1027 clkcsr = brcmf_sdiod_regrb(bus->sdiodev, 1038 1028 SBSDIO_FUNC1_CHIPCLKCSR, 1039 1029 &err); ··· 1033 1045 SBSDIO_ALP_AVAIL_REQ, &err); 1034 1046 } 1035 1047 err = brcmf_sdio_kso_control(bus, false); 1036 - /* disable watchdog */ 1037 - if (!err) 1038 - brcmf_sdio_wd_timer(bus, 0); 1039 1048 } else { 1040 - bus->idlecount = 0; 1041 1049 err = brcmf_sdio_kso_control(bus, true); 1042 1050 } 1043 1051 if (err) { ··· 1050 1066 brcmf_sdio_clkctl(bus, CLK_NONE, pendok); 1051 1067 } else { 1052 1068 brcmf_sdio_clkctl(bus, CLK_AVAIL, pendok); 1069 + brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 1053 1070 } 1054 1071 bus->sleeping = sleep; 1055 1072 brcmf_dbg(SDIO, "new state %s\n", ··· 1070 1085 static int brcmf_sdio_readshared(struct brcmf_sdio *bus, 1071 1086 struct sdpcm_shared *sh) 1072 1087 { 1073 - u32 addr; 1088 + u32 addr = 0; 1074 1089 int rv; 1075 1090 u32 shaddr = 0; 1076 1091 struct sdpcm_shared_le sh_le; 1077 1092 __le32 addr_le; 1078 1093 1079 - shaddr = bus->ci->rambase + bus->ramsize - 4; 1094 + sdio_claim_host(bus->sdiodev->func[1]); 1095 + brcmf_sdio_bus_sleep(bus, false, false); 1080 1096 1081 1097 /* 1082 1098 * Read last word in socram to determine 1083 1099 * address of sdpcm_shared structure 1084 1100 */ 1085 - sdio_claim_host(bus->sdiodev->func[1]); 1086 - brcmf_sdio_bus_sleep(bus, false, false); 1087 - rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4); 1088 - sdio_release_host(bus->sdiodev->func[1]); 1101 + shaddr = bus->ci->rambase + bus->ci->ramsize - 4; 1102 + if (!bus->ci->rambase && brcmf_chip_sr_capable(bus->ci)) 1103 + shaddr -= bus->ci->srsize; 1104 + rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, 1105 + (u8 *)&addr_le, 4); 1089 1106 if (rv < 0) 1090 - return rv; 1091 - 1092 - addr = le32_to_cpu(addr_le); 1093 - 1094 - brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr); 1107 + goto fail; 1095 1108 1096 1109 /* 1097 1110 * Check if addr is valid. 1098 1111 * NVRAM length at the end of memory should have been overwritten. 1099 1112 */ 1113 + addr = le32_to_cpu(addr_le); 1100 1114 if (!brcmf_sdio_valid_shared_address(addr)) { 1101 - brcmf_err("invalid sdpcm_shared address 0x%08X\n", 1102 - addr); 1103 - return -EINVAL; 1115 + brcmf_err("invalid sdpcm_shared address 0x%08X\n", addr); 1116 + rv = -EINVAL; 1117 + goto fail; 1104 1118 } 1119 + 1120 + brcmf_dbg(INFO, "sdpcm_shared address 0x%08X\n", addr); 1105 1121 1106 1122 /* Read hndrte_shared structure */ 1107 1123 rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le, 1108 1124 sizeof(struct sdpcm_shared_le)); 1109 1125 if (rv < 0) 1110 - return rv; 1126 + goto fail; 1127 + 1128 + sdio_release_host(bus->sdiodev->func[1]); 1111 1129 1112 1130 /* Endianness */ 1113 1131 sh->flags = le32_to_cpu(sh_le.flags); ··· 1127 1139 sh->flags & SDPCM_SHARED_VERSION_MASK); 1128 1140 return -EPROTO; 1129 1141 } 1130 - 1131 1142 return 0; 1143 + 1144 + fail: 1145 + brcmf_err("unable to obtain sdpcm_shared info: rv=%d (addr=0x%x)\n", 1146 + rv, addr); 1147 + sdio_release_host(bus->sdiodev->func[1]); 1148 + return rv; 1132 1149 } 1133 1150 1134 1151 static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus) ··· 2714 2721 if (bus->ctrl_frame_stat && (bus->clkstate == CLK_AVAIL) && 2715 2722 data_ok(bus)) { 2716 2723 sdio_claim_host(bus->sdiodev->func[1]); 2717 - err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, 2718 - bus->ctrl_frame_len); 2724 + if (bus->ctrl_frame_stat) { 2725 + err = brcmf_sdio_tx_ctrlframe(bus, bus->ctrl_frame_buf, 2726 + bus->ctrl_frame_len); 2727 + bus->ctrl_frame_err = err; 2728 + wmb(); 2729 + bus->ctrl_frame_stat = false; 2730 + } 2719 2731 sdio_release_host(bus->sdiodev->func[1]); 2720 - bus->ctrl_frame_err = err; 2721 - bus->ctrl_frame_stat = false; 2722 2732 brcmf_sdio_wait_event_wakeup(bus); 2723 2733 } 2724 2734 /* Send queued frames (limit 1 if rx may still be pending) */ ··· 2736 2740 if ((bus->sdiodev->state != BRCMF_SDIOD_DATA) || (err != 0)) { 2737 2741 brcmf_err("failed backplane access over SDIO, halting operation\n"); 2738 2742 atomic_set(&bus->intstatus, 0); 2743 + if (bus->ctrl_frame_stat) { 2744 + sdio_claim_host(bus->sdiodev->func[1]); 2745 + if (bus->ctrl_frame_stat) { 2746 + bus->ctrl_frame_err = -ENODEV; 2747 + wmb(); 2748 + bus->ctrl_frame_stat = false; 2749 + brcmf_sdio_wait_event_wakeup(bus); 2750 + } 2751 + sdio_release_host(bus->sdiodev->func[1]); 2752 + } 2739 2753 } else if (atomic_read(&bus->intstatus) || 2740 2754 atomic_read(&bus->ipend) > 0 || 2741 2755 (!atomic_read(&bus->fcstate) && 2742 2756 brcmu_pktq_mlen(&bus->txq, ~bus->flowcontrol) && 2743 2757 data_ok(bus))) { 2744 - atomic_inc(&bus->dpc_tskcnt); 2758 + bus->dpc_triggered = true; 2745 2759 } 2746 2760 } 2747 2761 ··· 2947 2941 /* Send from dpc */ 2948 2942 bus->ctrl_frame_buf = msg; 2949 2943 bus->ctrl_frame_len = msglen; 2944 + wmb(); 2950 2945 bus->ctrl_frame_stat = true; 2951 2946 2952 2947 brcmf_sdio_trigger_dpc(bus); 2953 2948 wait_event_interruptible_timeout(bus->ctrl_wait, !bus->ctrl_frame_stat, 2954 2949 msecs_to_jiffies(CTL_DONE_TIMEOUT)); 2955 - 2956 - if (!bus->ctrl_frame_stat) { 2950 + ret = 0; 2951 + if (bus->ctrl_frame_stat) { 2952 + sdio_claim_host(bus->sdiodev->func[1]); 2953 + if (bus->ctrl_frame_stat) { 2954 + brcmf_dbg(SDIO, "ctrl_frame timeout\n"); 2955 + bus->ctrl_frame_stat = false; 2956 + ret = -ETIMEDOUT; 2957 + } 2958 + sdio_release_host(bus->sdiodev->func[1]); 2959 + } 2960 + if (!ret) { 2957 2961 brcmf_dbg(SDIO, "ctrl_frame complete, err=%d\n", 2958 2962 bus->ctrl_frame_err); 2963 + rmb(); 2959 2964 ret = bus->ctrl_frame_err; 2960 - } else { 2961 - brcmf_dbg(SDIO, "ctrl_frame timeout\n"); 2962 - bus->ctrl_frame_stat = false; 2963 - ret = -ETIMEDOUT; 2964 2965 } 2965 2966 2966 2967 if (ret) ··· 3371 3358 sdio_claim_host(bus->sdiodev->func[1]); 3372 3359 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 3373 3360 3374 - /* Keep arm in reset */ 3375 - brcmf_chip_enter_download(bus->ci); 3376 - 3377 3361 rstvec = get_unaligned_le32(fw->data); 3378 3362 brcmf_dbg(SDIO, "firmware rstvec: %x\n", rstvec); 3379 3363 ··· 3390 3380 } 3391 3381 3392 3382 /* Take arm out of reset */ 3393 - if (!brcmf_chip_exit_download(bus->ci, rstvec)) { 3383 + if (!brcmf_chip_set_active(bus->ci, rstvec)) { 3394 3384 brcmf_err("error getting out of ARM core reset\n"); 3395 3385 goto err; 3396 3386 } ··· 3535 3525 3536 3526 void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus) 3537 3527 { 3538 - if (atomic_read(&bus->dpc_tskcnt) == 0) { 3539 - atomic_inc(&bus->dpc_tskcnt); 3528 + if (!bus->dpc_triggered) { 3529 + bus->dpc_triggered = true; 3540 3530 queue_work(bus->brcmf_wq, &bus->datawork); 3541 3531 } 3542 3532 } ··· 3567 3557 if (!bus->intr) 3568 3558 brcmf_err("isr w/o interrupt configured!\n"); 3569 3559 3570 - atomic_inc(&bus->dpc_tskcnt); 3560 + bus->dpc_triggered = true; 3571 3561 queue_work(bus->brcmf_wq, &bus->datawork); 3572 3562 } 3573 3563 3574 - static bool brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) 3564 + static void brcmf_sdio_bus_watchdog(struct brcmf_sdio *bus) 3575 3565 { 3576 3566 brcmf_dbg(TIMER, "Enter\n"); 3577 3567 ··· 3587 3577 if (!bus->intr || 3588 3578 (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { 3589 3579 3590 - if (atomic_read(&bus->dpc_tskcnt) == 0) { 3580 + if (!bus->dpc_triggered) { 3591 3581 u8 devpend; 3592 3582 3593 3583 sdio_claim_host(bus->sdiodev->func[1]); ··· 3605 3595 bus->sdcnt.pollcnt++; 3606 3596 atomic_set(&bus->ipend, 1); 3607 3597 3608 - atomic_inc(&bus->dpc_tskcnt); 3598 + bus->dpc_triggered = true; 3609 3599 queue_work(bus->brcmf_wq, &bus->datawork); 3610 3600 } 3611 3601 } ··· 3632 3622 #endif /* DEBUG */ 3633 3623 3634 3624 /* On idle timeout clear activity flag and/or turn off clock */ 3635 - if ((bus->idletime > 0) && (bus->clkstate == CLK_AVAIL)) { 3636 - if (++bus->idlecount >= bus->idletime) { 3637 - bus->idlecount = 0; 3638 - if (bus->activity) { 3639 - bus->activity = false; 3640 - brcmf_sdio_wd_timer(bus, BRCMF_WD_POLL_MS); 3641 - } else { 3625 + if (!bus->dpc_triggered) { 3626 + rmb(); 3627 + if ((!bus->dpc_running) && (bus->idletime > 0) && 3628 + (bus->clkstate == CLK_AVAIL)) { 3629 + bus->idlecount++; 3630 + if (bus->idlecount > bus->idletime) { 3642 3631 brcmf_dbg(SDIO, "idle\n"); 3643 3632 sdio_claim_host(bus->sdiodev->func[1]); 3633 + brcmf_sdio_wd_timer(bus, 0); 3634 + bus->idlecount = 0; 3644 3635 brcmf_sdio_bus_sleep(bus, true, false); 3645 3636 sdio_release_host(bus->sdiodev->func[1]); 3646 3637 } 3638 + } else { 3639 + bus->idlecount = 0; 3647 3640 } 3641 + } else { 3642 + bus->idlecount = 0; 3648 3643 } 3649 - 3650 - return (atomic_read(&bus->ipend) > 0); 3651 3644 } 3652 3645 3653 3646 static void brcmf_sdio_dataworker(struct work_struct *work) ··· 3658 3645 struct brcmf_sdio *bus = container_of(work, struct brcmf_sdio, 3659 3646 datawork); 3660 3647 3661 - while (atomic_read(&bus->dpc_tskcnt)) { 3662 - atomic_set(&bus->dpc_tskcnt, 0); 3648 + bus->dpc_running = true; 3649 + wmb(); 3650 + while (ACCESS_ONCE(bus->dpc_triggered)) { 3651 + bus->dpc_triggered = false; 3663 3652 brcmf_sdio_dpc(bus); 3653 + bus->idlecount = 0; 3664 3654 } 3655 + bus->dpc_running = false; 3665 3656 if (brcmf_sdiod_freezing(bus->sdiodev)) { 3666 3657 brcmf_sdiod_change_state(bus->sdiodev, BRCMF_SDIOD_DOWN); 3667 3658 brcmf_sdiod_try_freeze(bus->sdiodev); ··· 3788 3771 return 0; 3789 3772 } 3790 3773 3791 - static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip, 3792 - u32 rstvec) 3774 + static void brcmf_sdio_buscore_activate(void *ctx, struct brcmf_chip *chip, 3775 + u32 rstvec) 3793 3776 { 3794 3777 struct brcmf_sdio_dev *sdiodev = ctx; 3795 3778 struct brcmf_core *core; ··· 3832 3815 3833 3816 static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = { 3834 3817 .prepare = brcmf_sdio_buscoreprep, 3835 - .exit_dl = brcmf_sdio_buscore_exitdl, 3818 + .activate = brcmf_sdio_buscore_activate, 3836 3819 .read32 = brcmf_sdio_buscore_read32, 3837 3820 .write32 = brcmf_sdio_buscore_write32, 3838 3821 }; ··· 3885 3868 else 3886 3869 drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; 3887 3870 brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength); 3888 - 3889 - /* Get info on the SOCRAM cores... */ 3890 - bus->ramsize = bus->ci->ramsize; 3891 - if (!(bus->ramsize)) { 3892 - brcmf_err("failed to find SOCRAM memory!\n"); 3893 - goto fail; 3894 - } 3895 3871 3896 3872 /* Set card control so an SDIO card reset does a WLAN backplane reset */ 3897 3873 reg_val = brcmf_sdiod_regrb(bus->sdiodev, ··· 4158 4148 bus->watchdog_tsk = NULL; 4159 4149 } 4160 4150 /* Initialize DPC thread */ 4161 - atomic_set(&bus->dpc_tskcnt, 0); 4151 + bus->dpc_triggered = false; 4152 + bus->dpc_running = false; 4162 4153 4163 4154 /* Assign bus interface call back */ 4164 4155 bus->sdiodev->bus_if->dev = bus->sdiodev->dev; ··· 4254 4243 if (bus->ci) { 4255 4244 if (bus->sdiodev->state != BRCMF_SDIOD_NOMEDIUM) { 4256 4245 sdio_claim_host(bus->sdiodev->func[1]); 4246 + brcmf_sdio_wd_timer(bus, 0); 4257 4247 brcmf_sdio_clkctl(bus, CLK_AVAIL, false); 4258 4248 /* Leave the device in state where it is 4259 - * 'quiet'. This is done by putting it in 4260 - * download_state which essentially resets 4261 - * all necessary cores. 4249 + * 'passive'. This is done by resetting all 4250 + * necessary cores. 4262 4251 */ 4263 4252 msleep(20); 4264 - brcmf_chip_enter_download(bus->ci); 4253 + brcmf_chip_set_passive(bus->ci); 4265 4254 brcmf_sdio_clkctl(bus, CLK_NONE, false); 4266 4255 sdio_release_host(bus->sdiodev->func[1]); 4267 4256 }
+1 -1
drivers/net/wireless/brcm80211/brcmsmac/main.c
··· 4959 4959 * Configure pci/pcmcia here instead of in brcms_c_attach() 4960 4960 * to allow mfg hotswap: down, hotswap (chip power cycle), up. 4961 4961 */ 4962 - bcma_core_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core, 4962 + bcma_host_pci_irq_ctl(wlc_hw->d11core->bus, wlc_hw->d11core, 4963 4963 true); 4964 4964 4965 4965 /*
+2
drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
··· 37 37 #define BRCM_CC_43362_CHIP_ID 43362 38 38 #define BRCM_CC_4335_CHIP_ID 0x4335 39 39 #define BRCM_CC_4339_CHIP_ID 0x4339 40 + #define BRCM_CC_43430_CHIP_ID 43430 41 + #define BRCM_CC_4345_CHIP_ID 0x4345 40 42 #define BRCM_CC_4354_CHIP_ID 0x4354 41 43 #define BRCM_CC_4356_CHIP_ID 0x4356 42 44 #define BRCM_CC_43566_CHIP_ID 43566
+8 -1
drivers/net/wireless/brcm80211/include/chipcommon.h
··· 183 183 u8 uart1lsr; 184 184 u8 uart1msr; 185 185 u8 uart1scratch; 186 - u32 PAD[126]; 186 + u32 PAD[62]; 187 + 188 + /* save/restore, corerev >= 48 */ 189 + u32 sr_capability; /* 0x500 */ 190 + u32 sr_control0; /* 0x504 */ 191 + u32 sr_control1; /* 0x508 */ 192 + u32 gpio_control; /* 0x50C */ 193 + u32 PAD[60]; 187 194 188 195 /* PMU registers (corerev >= 20) */ 189 196 u32 pmucontrol; /* 0x600 */
+4 -7
drivers/net/wireless/cw1200/cw1200_spi.c
··· 447 447 } 448 448 449 449 #ifdef CONFIG_PM 450 - static int cw1200_spi_suspend(struct device *dev, pm_message_t state) 450 + static int cw1200_spi_suspend(struct device *dev) 451 451 { 452 452 struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); 453 453 ··· 458 458 return 0; 459 459 } 460 460 461 - static int cw1200_spi_resume(struct device *dev) 462 - { 463 - return 0; 464 - } 461 + static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL); 462 + 465 463 #endif 466 464 467 465 static struct spi_driver spi_driver = { ··· 470 472 .bus = &spi_bus_type, 471 473 .owner = THIS_MODULE, 472 474 #ifdef CONFIG_PM 473 - .suspend = cw1200_spi_suspend, 474 - .resume = cw1200_spi_resume, 475 + .pm = &cw1200_pm_ops, 475 476 #endif 476 477 }, 477 478 };
+9 -8
drivers/net/wireless/iwlwifi/dvm/mac80211.c
··· 1114 1114 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) | 1115 1115 BIT(IWL_DEFAULT_CMD_QUEUE_NUM)); 1116 1116 1117 - if (vif) 1118 - scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]); 1119 - 1120 - IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues); 1121 - if (iwlagn_txfifo_flush(priv, scd_queues)) { 1122 - IWL_ERR(priv, "flush request fail\n"); 1123 - goto done; 1117 + if (drop) { 1118 + IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", 1119 + scd_queues); 1120 + if (iwlagn_txfifo_flush(priv, scd_queues)) { 1121 + IWL_ERR(priv, "flush request fail\n"); 1122 + goto done; 1123 + } 1124 1124 } 1125 + 1125 1126 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n"); 1126 - iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); 1127 + iwl_trans_wait_tx_queue_empty(priv->trans, scd_queues); 1127 1128 done: 1128 1129 mutex_unlock(&priv->mutex); 1129 1130 IWL_DEBUG_MAC80211(priv, "leave\n");
+4 -3
drivers/net/wireless/iwlwifi/dvm/rs.c
··· 3153 3153 desc += sprintf(buff+desc, "lq type %s\n", 3154 3154 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3155 3155 if (is_Ht(tbl->lq_type)) { 3156 - desc += sprintf(buff+desc, " %s", 3156 + desc += sprintf(buff + desc, " %s", 3157 3157 (is_siso(tbl->lq_type)) ? "SISO" : 3158 3158 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); 3159 - desc += sprintf(buff+desc, " %s", 3159 + desc += sprintf(buff + desc, " %s", 3160 3160 (tbl->is_ht40) ? "40MHz" : "20MHz"); 3161 - desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "", 3161 + desc += sprintf(buff + desc, " %s %s %s\n", 3162 + (tbl->is_SGI) ? "SGI" : "", 3162 3163 (lq_sta->is_green) ? "GF enabled" : "", 3163 3164 (lq_sta->is_agg) ? "AGG on" : ""); 3164 3165 }
+3 -3
drivers/net/wireless/iwlwifi/dvm/tx.c
··· 189 189 rate_flags |= RATE_MCS_CCK_MSK; 190 190 191 191 /* Set up antennas */ 192 - if (priv->lib->bt_params && 193 - priv->lib->bt_params->advanced_bt_coexist && 194 - priv->bt_full_concurrent) { 192 + if (priv->lib->bt_params && 193 + priv->lib->bt_params->advanced_bt_coexist && 194 + priv->bt_full_concurrent) { 195 195 /* operated as 1x1 in full concurrency mode */ 196 196 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 197 197 first_antenna(priv->nvm_data->valid_tx_ant));
+4 -4
drivers/net/wireless/iwlwifi/iwl-7000.c
··· 69 69 #include "iwl-agn-hw.h" 70 70 71 71 /* Highest firmware API version supported */ 72 - #define IWL7260_UCODE_API_MAX 12 73 - #define IWL3160_UCODE_API_MAX 12 72 + #define IWL7260_UCODE_API_MAX 13 73 + #define IWL3160_UCODE_API_MAX 13 74 74 75 75 /* Oldest version we won't warn about */ 76 - #define IWL7260_UCODE_API_OK 10 77 - #define IWL3160_UCODE_API_OK 10 76 + #define IWL7260_UCODE_API_OK 12 77 + #define IWL3160_UCODE_API_OK 12 78 78 79 79 /* Lowest firmware API version supported */ 80 80 #define IWL7260_UCODE_API_MIN 10
+2 -2
drivers/net/wireless/iwlwifi/iwl-8000.c
··· 69 69 #include "iwl-agn-hw.h" 70 70 71 71 /* Highest firmware API version supported */ 72 - #define IWL8000_UCODE_API_MAX 12 72 + #define IWL8000_UCODE_API_MAX 13 73 73 74 74 /* Oldest version we won't warn about */ 75 - #define IWL8000_UCODE_API_OK 10 75 + #define IWL8000_UCODE_API_OK 12 76 76 77 77 /* Lowest firmware API version supported */ 78 78 #define IWL8000_UCODE_API_MIN 10
+2
drivers/net/wireless/iwlwifi/iwl-debug.h
··· 157 157 /* 0x0000F000 - 0x00001000 */ 158 158 #define IWL_DL_ASSOC 0x00001000 159 159 #define IWL_DL_DROP 0x00002000 160 + #define IWL_DL_LAR 0x00004000 160 161 #define IWL_DL_COEX 0x00008000 161 162 /* 0x000F0000 - 0x00010000 */ 162 163 #define IWL_DL_FW 0x00010000 ··· 220 219 #define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a) 221 220 #define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a) 222 221 #define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a) 222 + #define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a) 223 223 224 224 #endif
+13 -9
drivers/net/wireless/iwlwifi/iwl-drv.c
··· 1014 1014 1015 1015 /* Verify that uCode images will fit in card's SRAM. */ 1016 1016 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) > 1017 - cfg->max_inst_size) { 1017 + cfg->max_inst_size) { 1018 1018 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n", 1019 1019 get_sec_size(pieces, IWL_UCODE_REGULAR, 1020 - IWL_UCODE_SECTION_INST)); 1020 + IWL_UCODE_SECTION_INST)); 1021 1021 return -1; 1022 1022 } 1023 1023 1024 1024 if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) > 1025 - cfg->max_data_size) { 1025 + cfg->max_data_size) { 1026 1026 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n", 1027 1027 get_sec_size(pieces, IWL_UCODE_REGULAR, 1028 - IWL_UCODE_SECTION_DATA)); 1028 + IWL_UCODE_SECTION_DATA)); 1029 1029 return -1; 1030 1030 } 1031 1031 1032 - if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > 1033 - cfg->max_inst_size) { 1032 + if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) > 1033 + cfg->max_inst_size) { 1034 1034 IWL_ERR(drv, "uCode init instr len %Zd too large to fit in\n", 1035 1035 get_sec_size(pieces, IWL_UCODE_INIT, 1036 - IWL_UCODE_SECTION_INST)); 1036 + IWL_UCODE_SECTION_INST)); 1037 1037 return -1; 1038 1038 } 1039 1039 1040 1040 if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) > 1041 - cfg->max_data_size) { 1041 + cfg->max_data_size) { 1042 1042 IWL_ERR(drv, "uCode init data len %Zd too large to fit in\n", 1043 1043 get_sec_size(pieces, IWL_UCODE_REGULAR, 1044 - IWL_UCODE_SECTION_DATA)); 1044 + IWL_UCODE_SECTION_DATA)); 1045 1045 return -1; 1046 1046 } 1047 1047 return 0; ··· 1545 1545 module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, 1546 1546 bool, S_IRUGO); 1547 1547 MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)"); 1548 + 1549 + module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, 1550 + bool, S_IRUGO); 1551 + MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); 1548 1552 1549 1553 module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, 1550 1554 bool, S_IRUGO | S_IWUSR);
+1 -1
drivers/net/wireless/iwlwifi/iwl-drv.h
··· 68 68 69 69 /* for all modules */ 70 70 #define DRV_NAME "iwlwifi" 71 - #define DRV_COPYRIGHT "Copyright(c) 2003- 2014 Intel Corporation" 71 + #define DRV_COPYRIGHT "Copyright(c) 2003- 2015 Intel Corporation" 72 72 #define DRV_AUTHOR "<ilw@linux.intel.com>" 73 73 74 74 /* radio config bits (actual values from NVM definition) */
+1
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
··· 94 94 u32 nvm_version; 95 95 s8 max_tx_pwr_half_dbm; 96 96 97 + bool lar_enabled; 97 98 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS]; 98 99 struct ieee80211_channel channels[]; 99 100 };
+4 -4
drivers/net/wireless/iwlwifi/iwl-fw-file.h
··· 240 240 /** 241 241 * enum iwl_ucode_tlv_api - ucode api 242 242 * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex 243 - * @IWL_UCODE_TLV_API_DISABLE_STA_TX: ucode supports tx_disable bit. 244 - * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. 245 243 * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time 246 244 * longer than the passive one, which is essential for fragmented scan. 245 + * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source. 247 246 * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR 248 247 * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, 249 248 * regardless of the band or the number of the probes. FW will calculate ··· 257 258 */ 258 259 enum iwl_ucode_tlv_api { 259 260 IWL_UCODE_TLV_API_BT_COEX_SPLIT = BIT(3), 260 - IWL_UCODE_TLV_API_DISABLE_STA_TX = BIT(5), 261 - IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), 262 261 IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), 262 + IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = BIT(9), 263 263 IWL_UCODE_TLV_API_HDC_PHASE_0 = BIT(10), 264 264 IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), 265 265 IWL_UCODE_TLV_API_SCD_CFG = BIT(15), ··· 290 292 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command 291 293 * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics 292 294 * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running 295 + * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC 293 296 */ 294 297 enum iwl_ucode_tlv_capa { 295 298 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), ··· 307 308 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18), 308 309 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = BIT(22), 309 310 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = BIT(28), 311 + IWL_UCODE_TLV_CAPA_BT_COEX_RRC = BIT(30), 310 312 }; 311 313 312 314 /* The default calibrate table size if not specified by firmware file */
+2
drivers/net/wireless/iwlwifi/iwl-io.c
··· 201 201 } else { 202 202 iwl_write_prph(trans, DEVICE_SET_NMI_8000B_REG, 203 203 DEVICE_SET_NMI_8000B_VAL); 204 + iwl_write_prph(trans, DEVICE_SET_NMI_REG, 205 + DEVICE_SET_NMI_VAL_DRV); 204 206 } 205 207 } 206 208 IWL_EXPORT_SYMBOL(iwl_force_nmi);
+2
drivers/net/wireless/iwlwifi/iwl-modparams.h
··· 103 103 * @debug_level: levels are IWL_DL_* 104 104 * @ant_coupling: antenna coupling in dB, default = 0 105 105 * @d0i3_disable: disable d0i3, default = 1, 106 + * @lar_disable: disable LAR (regulatory), default = 0 106 107 * @fw_monitor: allow to use firmware monitor 107 108 */ 108 109 struct iwl_mod_params { ··· 122 121 char *nvm_file; 123 122 bool uapsd_disable; 124 123 bool d0i3_disable; 124 + bool lar_disable; 125 125 bool fw_monitor; 126 126 }; 127 127
+302 -105
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
··· 103 103 SKU_FAMILY_8000 = 4, 104 104 N_HW_ADDRS_FAMILY_8000 = 5, 105 105 106 + /* NVM PHY-SKU-Section offset (in words) for B0 */ 107 + RADIO_CFG_FAMILY_8000_B0 = 0, 108 + SKU_FAMILY_8000_B0 = 2, 109 + N_HW_ADDRS_FAMILY_8000_B0 = 3, 110 + 106 111 /* NVM REGULATORY -Section offset (in words) definitions */ 107 112 NVM_CHANNELS_FAMILY_8000 = 0, 113 + NVM_LAR_OFFSET_FAMILY_8000_OLD = 0x4C7, 114 + NVM_LAR_OFFSET_FAMILY_8000 = 0x507, 115 + NVM_LAR_ENABLED_FAMILY_8000 = 0x7, 108 116 109 117 /* NVM calibration section offset (in words) definitions */ 110 118 NVM_CALIB_SECTION_FAMILY_8000 = 0x2B8, ··· 154 146 #define NUM_2GHZ_CHANNELS_FAMILY_8000 14 155 147 #define FIRST_2GHZ_HT_MINUS 5 156 148 #define LAST_2GHZ_HT_PLUS 9 157 - #define LAST_5GHZ_HT 161 149 + #define LAST_5GHZ_HT 165 150 + #define LAST_5GHZ_HT_FAMILY_8000 181 151 + #define N_HW_ADDR_MASK 0xF 158 152 159 153 /* rate data (static) */ 160 154 static struct ieee80211_rate iwl_cfg80211_rates[] = { ··· 211 201 #define CHECK_AND_PRINT_I(x) \ 212 202 ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") 213 203 204 + static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, 205 + u16 nvm_flags, const struct iwl_cfg *cfg) 206 + { 207 + u32 flags = IEEE80211_CHAN_NO_HT40; 208 + u32 last_5ghz_ht = LAST_5GHZ_HT; 209 + 210 + if (cfg->device_family == IWL_DEVICE_FAMILY_8000) 211 + last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 212 + 213 + if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { 214 + if (ch_num <= LAST_2GHZ_HT_PLUS) 215 + flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 216 + if (ch_num >= FIRST_2GHZ_HT_MINUS) 217 + flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 218 + } else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) { 219 + if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 220 + flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 221 + else 222 + flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 223 + } 224 + if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 225 + flags |= IEEE80211_CHAN_NO_80MHZ; 226 + if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 227 + flags |= IEEE80211_CHAN_NO_160MHZ; 228 + 229 + if (!(nvm_flags & NVM_CHANNEL_IBSS)) 230 + flags |= IEEE80211_CHAN_NO_IR; 231 + 232 + if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 233 + flags |= IEEE80211_CHAN_NO_IR; 234 + 235 + if (nvm_flags & NVM_CHANNEL_RADAR) 236 + flags |= IEEE80211_CHAN_RADAR; 237 + 238 + if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 239 + flags |= IEEE80211_CHAN_INDOOR_ONLY; 240 + 241 + /* Set the GO concurrent flag only in case that NO_IR is set. 242 + * Otherwise it is meaningless 243 + */ 244 + if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 245 + (flags & IEEE80211_CHAN_NO_IR)) 246 + flags |= IEEE80211_CHAN_GO_CONCURRENT; 247 + 248 + return flags; 249 + } 250 + 214 251 static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, 215 252 struct iwl_nvm_data *data, 216 - const __le16 * const nvm_ch_flags) 253 + const __le16 * const nvm_ch_flags, 254 + bool lar_supported) 217 255 { 218 256 int ch_idx; 219 257 int n_channels = 0; ··· 286 228 287 229 if (ch_idx >= num_2ghz_channels && 288 230 !data->sku_cap_band_52GHz_enable) 289 - ch_flags &= ~NVM_CHANNEL_VALID; 231 + continue; 290 232 291 - if (!(ch_flags & NVM_CHANNEL_VALID)) { 233 + if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) { 234 + /* 235 + * Channels might become valid later if lar is 236 + * supported, hence we still want to add them to 237 + * the list of supported channels to cfg80211. 238 + */ 292 239 IWL_DEBUG_EEPROM(dev, 293 240 "Ch. %d Flags %x [%sGHz] - No traffic\n", 294 241 nvm_chan[ch_idx], ··· 313 250 ieee80211_channel_to_frequency( 314 251 channel->hw_value, channel->band); 315 252 316 - /* TODO: Need to be dependent to the NVM */ 317 - channel->flags = IEEE80211_CHAN_NO_HT40; 318 - if (ch_idx < num_2ghz_channels && 319 - (ch_flags & NVM_CHANNEL_40MHZ)) { 320 - if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 321 - channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 322 - if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) 323 - channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 324 - } else if (nvm_chan[ch_idx] <= LAST_5GHZ_HT && 325 - (ch_flags & NVM_CHANNEL_40MHZ)) { 326 - if ((ch_idx - num_2ghz_channels) % 2 == 0) 327 - channel->flags &= ~IEEE80211_CHAN_NO_HT40PLUS; 328 - else 329 - channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 330 - } 331 - if (!(ch_flags & NVM_CHANNEL_80MHZ)) 332 - channel->flags |= IEEE80211_CHAN_NO_80MHZ; 333 - if (!(ch_flags & NVM_CHANNEL_160MHZ)) 334 - channel->flags |= IEEE80211_CHAN_NO_160MHZ; 335 - 336 - if (!(ch_flags & NVM_CHANNEL_IBSS)) 337 - channel->flags |= IEEE80211_CHAN_NO_IR; 338 - 339 - if (!(ch_flags & NVM_CHANNEL_ACTIVE)) 340 - channel->flags |= IEEE80211_CHAN_NO_IR; 341 - 342 - if (ch_flags & NVM_CHANNEL_RADAR) 343 - channel->flags |= IEEE80211_CHAN_RADAR; 344 - 345 - if (ch_flags & NVM_CHANNEL_INDOOR_ONLY) 346 - channel->flags |= IEEE80211_CHAN_INDOOR_ONLY; 347 - 348 - /* Set the GO concurrent flag only in case that NO_IR is set. 349 - * Otherwise it is meaningless 350 - */ 351 - if ((ch_flags & NVM_CHANNEL_GO_CONCURRENT) && 352 - (channel->flags & IEEE80211_CHAN_NO_IR)) 353 - channel->flags |= IEEE80211_CHAN_GO_CONCURRENT; 354 - 355 253 /* Initialize regulatory-based run-time data */ 356 254 357 255 /* ··· 321 297 */ 322 298 channel->max_power = IWL_DEFAULT_MAX_TX_POWER; 323 299 is_5ghz = channel->band == IEEE80211_BAND_5GHZ; 300 + 301 + /* don't put limitations in case we're using LAR */ 302 + if (!lar_supported) 303 + channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx], 304 + ch_idx, is_5ghz, 305 + ch_flags, cfg); 306 + else 307 + channel->flags = 0; 308 + 324 309 IWL_DEBUG_EEPROM(dev, 325 310 "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n", 326 311 channel->hw_value, ··· 403 370 404 371 static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 405 372 struct iwl_nvm_data *data, 406 - const __le16 *ch_section, bool enable_vht, 407 - u8 tx_chains, u8 rx_chains) 373 + const __le16 *ch_section, 374 + u8 tx_chains, u8 rx_chains, bool lar_supported) 408 375 { 409 376 int n_channels; 410 377 int n_used = 0; ··· 413 380 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 414 381 n_channels = iwl_init_channel_map( 415 382 dev, cfg, data, 416 - &ch_section[NVM_CHANNELS]); 383 + &ch_section[NVM_CHANNELS], lar_supported); 417 384 else 418 385 n_channels = iwl_init_channel_map( 419 386 dev, cfg, data, 420 - &ch_section[NVM_CHANNELS_FAMILY_8000]); 387 + &ch_section[NVM_CHANNELS_FAMILY_8000], 388 + lar_supported); 421 389 422 390 sband = &data->bands[IEEE80211_BAND_2GHZ]; 423 391 sband->band = IEEE80211_BAND_2GHZ; ··· 437 403 IEEE80211_BAND_5GHZ); 438 404 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, 439 405 tx_chains, rx_chains); 440 - if (enable_vht) 406 + if (data->sku_cap_11ac_enable) 441 407 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, 442 408 tx_chains, rx_chains); 443 409 ··· 447 413 } 448 414 449 415 static int iwl_get_sku(const struct iwl_cfg *cfg, 450 - const __le16 *nvm_sw) 416 + const __le16 *nvm_sw, const __le16 *phy_sku, 417 + bool is_family_8000_a_step) 451 418 { 452 419 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 453 420 return le16_to_cpup(nvm_sw + SKU); 421 + 422 + if (!is_family_8000_a_step) 423 + return le32_to_cpup((__le32 *)(phy_sku + 424 + SKU_FAMILY_8000_B0)); 454 425 else 455 426 return le32_to_cpup((__le32 *)(nvm_sw + SKU_FAMILY_8000)); 456 427 } ··· 471 432 } 472 433 473 434 static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, 474 - const __le16 *nvm_sw) 435 + const __le16 *nvm_sw, const __le16 *phy_sku, 436 + bool is_family_8000_a_step) 475 437 { 476 438 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 477 439 return le16_to_cpup(nvm_sw + RADIO_CFG); 440 + 441 + if (!is_family_8000_a_step) 442 + return le32_to_cpup((__le32 *)(phy_sku + 443 + RADIO_CFG_FAMILY_8000_B0)); 478 444 else 479 445 return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000)); 446 + 480 447 } 481 448 482 - #define N_HW_ADDRS_MASK_FAMILY_8000 0xF 483 449 static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, 484 - const __le16 *nvm_sw) 450 + const __le16 *nvm_sw, bool is_family_8000_a_step) 485 451 { 452 + int n_hw_addr; 453 + 486 454 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 487 455 return le16_to_cpup(nvm_sw + N_HW_ADDRS); 456 + 457 + if (!is_family_8000_a_step) 458 + n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + 459 + N_HW_ADDRS_FAMILY_8000_B0)); 488 460 else 489 - return le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)) 490 - & N_HW_ADDRS_MASK_FAMILY_8000; 461 + n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + 462 + N_HW_ADDRS_FAMILY_8000)); 463 + 464 + return n_hw_addr & N_HW_ADDR_MASK; 491 465 } 492 466 493 467 static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, ··· 543 491 const struct iwl_cfg *cfg, 544 492 struct iwl_nvm_data *data, 545 493 const __le16 *mac_override, 546 - const __le16 *nvm_hw) 494 + const __le16 *nvm_hw, 495 + u32 mac_addr0, u32 mac_addr1) 547 496 { 548 497 const u8 *hw_addr; 549 498 ··· 568 515 } 569 516 570 517 if (nvm_hw) { 571 - /* read the MAC address from OTP */ 572 - if (!dev_is_pci(dev) || (data->nvm_version < 0xE08)) { 573 - /* read the mac address from the WFPM location */ 574 - hw_addr = (const u8 *)(nvm_hw + 575 - HW_ADDR0_WFPM_FAMILY_8000); 576 - data->hw_addr[0] = hw_addr[3]; 577 - data->hw_addr[1] = hw_addr[2]; 578 - data->hw_addr[2] = hw_addr[1]; 579 - data->hw_addr[3] = hw_addr[0]; 518 + /* read the MAC address from HW resisters */ 519 + hw_addr = (const u8 *)&mac_addr0; 520 + data->hw_addr[0] = hw_addr[3]; 521 + data->hw_addr[1] = hw_addr[2]; 522 + data->hw_addr[2] = hw_addr[1]; 523 + data->hw_addr[3] = hw_addr[0]; 580 524 581 - hw_addr = (const u8 *)(nvm_hw + 582 - HW_ADDR1_WFPM_FAMILY_8000); 583 - data->hw_addr[4] = hw_addr[1]; 584 - data->hw_addr[5] = hw_addr[0]; 585 - } else if ((data->nvm_version >= 0xE08) && 586 - (data->nvm_version < 0xE0B)) { 587 - /* read "reverse order" from the PCIe location */ 588 - hw_addr = (const u8 *)(nvm_hw + 589 - HW_ADDR0_PCIE_FAMILY_8000); 590 - data->hw_addr[5] = hw_addr[2]; 591 - data->hw_addr[4] = hw_addr[1]; 592 - data->hw_addr[3] = hw_addr[0]; 525 + hw_addr = (const u8 *)&mac_addr1; 526 + data->hw_addr[4] = hw_addr[1]; 527 + data->hw_addr[5] = hw_addr[0]; 593 528 594 - hw_addr = (const u8 *)(nvm_hw + 595 - HW_ADDR1_PCIE_FAMILY_8000); 596 - data->hw_addr[2] = hw_addr[3]; 597 - data->hw_addr[1] = hw_addr[2]; 598 - data->hw_addr[0] = hw_addr[1]; 599 - } else { 600 - /* read from the PCIe location */ 601 - hw_addr = (const u8 *)(nvm_hw + 602 - HW_ADDR0_PCIE_FAMILY_8000); 603 - data->hw_addr[5] = hw_addr[0]; 604 - data->hw_addr[4] = hw_addr[1]; 605 - data->hw_addr[3] = hw_addr[2]; 606 - 607 - hw_addr = (const u8 *)(nvm_hw + 608 - HW_ADDR1_PCIE_FAMILY_8000); 609 - data->hw_addr[2] = hw_addr[1]; 610 - data->hw_addr[1] = hw_addr[2]; 611 - data->hw_addr[0] = hw_addr[3]; 612 - } 613 529 if (!is_valid_ether_addr(data->hw_addr)) 614 530 IWL_ERR_DEV(dev, 615 531 "mac address from hw section is not valid\n"); ··· 593 571 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 594 572 const __le16 *nvm_hw, const __le16 *nvm_sw, 595 573 const __le16 *nvm_calib, const __le16 *regulatory, 596 - const __le16 *mac_override, u8 tx_chains, u8 rx_chains) 574 + const __le16 *mac_override, const __le16 *phy_sku, 575 + u8 tx_chains, u8 rx_chains, 576 + bool lar_fw_supported, bool is_family_8000_a_step, 577 + u32 mac_addr0, u32 mac_addr1) 597 578 { 598 579 struct iwl_nvm_data *data; 599 580 u32 sku; 600 581 u32 radio_cfg; 582 + u16 lar_config; 601 583 602 584 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) 603 585 data = kzalloc(sizeof(*data) + ··· 618 592 619 593 data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw); 620 594 621 - radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw); 595 + radio_cfg = 596 + iwl_get_radio_cfg(cfg, nvm_sw, phy_sku, is_family_8000_a_step); 622 597 iwl_set_radio_cfg(cfg, data, radio_cfg); 623 598 if (data->valid_tx_ant) 624 599 tx_chains &= data->valid_tx_ant; 625 600 if (data->valid_rx_ant) 626 601 rx_chains &= data->valid_rx_ant; 627 602 628 - sku = iwl_get_sku(cfg, nvm_sw); 603 + sku = iwl_get_sku(cfg, nvm_sw, phy_sku, is_family_8000_a_step); 629 604 data->sku_cap_band_24GHz_enable = sku & NVM_SKU_CAP_BAND_24GHZ; 630 605 data->sku_cap_band_52GHz_enable = sku & NVM_SKU_CAP_BAND_52GHZ; 631 606 data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE; 632 - data->sku_cap_11ac_enable = sku & NVM_SKU_CAP_11AC_ENABLE; 633 607 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) 634 608 data->sku_cap_11n_enable = false; 609 + data->sku_cap_11ac_enable = data->sku_cap_11n_enable && 610 + (sku & NVM_SKU_CAP_11AC_ENABLE); 635 611 636 - data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); 612 + data->n_hw_addrs = 613 + iwl_get_n_hw_addrs(cfg, nvm_sw, is_family_8000_a_step); 637 614 638 615 if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { 639 616 /* Checking for required sections */ ··· 655 626 iwl_set_hw_address(cfg, data, nvm_hw); 656 627 657 628 iwl_init_sbands(dev, cfg, data, nvm_sw, 658 - sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains, 659 - rx_chains); 629 + tx_chains, rx_chains, lar_fw_supported); 660 630 } else { 631 + u16 lar_offset = data->nvm_version < 0xE39 ? 632 + NVM_LAR_OFFSET_FAMILY_8000_OLD : 633 + NVM_LAR_OFFSET_FAMILY_8000; 634 + 635 + lar_config = le16_to_cpup(regulatory + lar_offset); 636 + data->lar_enabled = !!(lar_config & 637 + NVM_LAR_ENABLED_FAMILY_8000); 638 + 661 639 /* MAC address in family 8000 */ 662 640 iwl_set_hw_address_family_8000(dev, cfg, data, mac_override, 663 - nvm_hw); 641 + nvm_hw, mac_addr0, mac_addr1); 664 642 665 643 iwl_init_sbands(dev, cfg, data, regulatory, 666 - sku & NVM_SKU_CAP_11AC_ENABLE, tx_chains, 667 - rx_chains); 644 + tx_chains, rx_chains, 645 + lar_fw_supported && data->lar_enabled); 668 646 } 669 647 670 648 data->calib_version = 255; ··· 679 643 return data; 680 644 } 681 645 IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); 646 + 647 + static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, 648 + int ch_idx, u16 nvm_flags, 649 + const struct iwl_cfg *cfg) 650 + { 651 + u32 flags = NL80211_RRF_NO_HT40; 652 + u32 last_5ghz_ht = LAST_5GHZ_HT; 653 + 654 + if (cfg->device_family == IWL_DEVICE_FAMILY_8000) 655 + last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; 656 + 657 + if (ch_idx < NUM_2GHZ_CHANNELS && 658 + (nvm_flags & NVM_CHANNEL_40MHZ)) { 659 + if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS) 660 + flags &= ~NL80211_RRF_NO_HT40PLUS; 661 + if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) 662 + flags &= ~NL80211_RRF_NO_HT40MINUS; 663 + } else if (nvm_chan[ch_idx] <= last_5ghz_ht && 664 + (nvm_flags & NVM_CHANNEL_40MHZ)) { 665 + if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) 666 + flags &= ~NL80211_RRF_NO_HT40PLUS; 667 + else 668 + flags &= ~NL80211_RRF_NO_HT40MINUS; 669 + } 670 + 671 + if (!(nvm_flags & NVM_CHANNEL_80MHZ)) 672 + flags |= NL80211_RRF_NO_80MHZ; 673 + if (!(nvm_flags & NVM_CHANNEL_160MHZ)) 674 + flags |= NL80211_RRF_NO_160MHZ; 675 + 676 + if (!(nvm_flags & NVM_CHANNEL_ACTIVE)) 677 + flags |= NL80211_RRF_NO_IR; 678 + 679 + if (nvm_flags & NVM_CHANNEL_RADAR) 680 + flags |= NL80211_RRF_DFS; 681 + 682 + if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY) 683 + flags |= NL80211_RRF_NO_OUTDOOR; 684 + 685 + /* Set the GO concurrent flag only in case that NO_IR is set. 686 + * Otherwise it is meaningless 687 + */ 688 + if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) && 689 + (flags & NL80211_RRF_NO_IR)) 690 + flags |= NL80211_RRF_GO_CONCURRENT; 691 + 692 + return flags; 693 + } 694 + 695 + struct ieee80211_regdomain * 696 + iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 697 + int num_of_ch, __le32 *channels, u16 fw_mcc) 698 + { 699 + int ch_idx; 700 + u16 ch_flags, prev_ch_flags = 0; 701 + const u8 *nvm_chan = cfg->device_family == IWL_DEVICE_FAMILY_8000 ? 702 + iwl_nvm_channels_family_8000 : iwl_nvm_channels; 703 + struct ieee80211_regdomain *regd; 704 + int size_of_regd; 705 + struct ieee80211_reg_rule *rule; 706 + enum ieee80211_band band; 707 + int center_freq, prev_center_freq = 0; 708 + int valid_rules = 0; 709 + bool new_rule; 710 + int max_num_ch = cfg->device_family == IWL_DEVICE_FAMILY_8000 ? 711 + IWL_NUM_CHANNELS_FAMILY_8000 : IWL_NUM_CHANNELS; 712 + 713 + if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) 714 + return ERR_PTR(-EINVAL); 715 + 716 + if (WARN_ON(num_of_ch > max_num_ch)) 717 + num_of_ch = max_num_ch; 718 + 719 + IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n", 720 + num_of_ch); 721 + 722 + /* build a regdomain rule for every valid channel */ 723 + size_of_regd = 724 + sizeof(struct ieee80211_regdomain) + 725 + num_of_ch * sizeof(struct ieee80211_reg_rule); 726 + 727 + regd = kzalloc(size_of_regd, GFP_KERNEL); 728 + if (!regd) 729 + return ERR_PTR(-ENOMEM); 730 + 731 + for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { 732 + ch_flags = (u16)__le32_to_cpup(channels + ch_idx); 733 + band = (ch_idx < NUM_2GHZ_CHANNELS) ? 734 + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 735 + center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], 736 + band); 737 + new_rule = false; 738 + 739 + if (!(ch_flags & NVM_CHANNEL_VALID)) { 740 + IWL_DEBUG_DEV(dev, IWL_DL_LAR, 741 + "Ch. %d Flags %x [%sGHz] - No traffic\n", 742 + nvm_chan[ch_idx], 743 + ch_flags, 744 + (ch_idx >= NUM_2GHZ_CHANNELS) ? 745 + "5.2" : "2.4"); 746 + continue; 747 + } 748 + 749 + /* we can't continue the same rule */ 750 + if (ch_idx == 0 || prev_ch_flags != ch_flags || 751 + center_freq - prev_center_freq > 20) { 752 + valid_rules++; 753 + new_rule = true; 754 + } 755 + 756 + rule = &regd->reg_rules[valid_rules - 1]; 757 + 758 + if (new_rule) 759 + rule->freq_range.start_freq_khz = 760 + MHZ_TO_KHZ(center_freq - 10); 761 + 762 + rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10); 763 + 764 + /* this doesn't matter - not used by FW */ 765 + rule->power_rule.max_antenna_gain = DBI_TO_MBI(6); 766 + rule->power_rule.max_eirp = 767 + DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER); 768 + 769 + rule->flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, 770 + ch_flags, cfg); 771 + 772 + /* rely on auto-calculation to merge BW of contiguous chans */ 773 + rule->flags |= NL80211_RRF_AUTO_BW; 774 + rule->freq_range.max_bandwidth_khz = 0; 775 + 776 + prev_ch_flags = ch_flags; 777 + prev_center_freq = center_freq; 778 + 779 + IWL_DEBUG_DEV(dev, IWL_DL_LAR, 780 + "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n", 781 + center_freq, 782 + band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4", 783 + CHECK_AND_PRINT_I(VALID), 784 + CHECK_AND_PRINT_I(ACTIVE), 785 + CHECK_AND_PRINT_I(RADAR), 786 + CHECK_AND_PRINT_I(WIDE), 787 + CHECK_AND_PRINT_I(40MHZ), 788 + CHECK_AND_PRINT_I(80MHZ), 789 + CHECK_AND_PRINT_I(160MHZ), 790 + CHECK_AND_PRINT_I(INDOOR_ONLY), 791 + CHECK_AND_PRINT_I(GO_CONCURRENT), 792 + ch_flags, 793 + ((ch_flags & NVM_CHANNEL_ACTIVE) && 794 + !(ch_flags & NVM_CHANNEL_RADAR)) 795 + ? "" : "not "); 796 + } 797 + 798 + regd->n_reg_rules = valid_rules; 799 + 800 + /* set alpha2 from FW. */ 801 + regd->alpha2[0] = fw_mcc >> 8; 802 + regd->alpha2[1] = fw_mcc & 0xff; 803 + 804 + return regd; 805 + } 806 + IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+18 -1
drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
··· 62 62 #ifndef __iwl_nvm_parse_h__ 63 63 #define __iwl_nvm_parse_h__ 64 64 65 + #include <net/cfg80211.h> 65 66 #include "iwl-eeprom-parse.h" 66 67 67 68 /** ··· 77 76 iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, 78 77 const __le16 *nvm_hw, const __le16 *nvm_sw, 79 78 const __le16 *nvm_calib, const __le16 *regulatory, 80 - const __le16 *mac_override, u8 tx_chains, u8 rx_chains); 79 + const __le16 *mac_override, const __le16 *phy_sku, 80 + u8 tx_chains, u8 rx_chains, 81 + bool lar_fw_supported, bool is_family_8000_a_step, 82 + u32 mac_addr0, u32 mac_addr1); 83 + 84 + /** 85 + * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW 86 + * 87 + * This function parses the regulatory channel data received as a 88 + * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, 89 + * to be fed into the regulatory core. An ERR_PTR is returned on error. 90 + * If not given to the regulatory core, the user is responsible for freeing 91 + * the regdomain returned here with kfree. 92 + */ 93 + struct ieee80211_regdomain * 94 + iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, 95 + int num_of_ch, __le32 *channels, u16 fw_mcc); 81 96 82 97 #endif /* __iwl_nvm_parse_h__ */
+27
drivers/net/wireless/iwlwifi/iwl-prph.h
··· 371 371 372 372 #define DBGC_IN_SAMPLE (0xa03c00) 373 373 374 + /* enable the ID buf for read */ 375 + #define WFPM_PS_CTL_CLR 0xA0300C 376 + #define WFMP_MAC_ADDR_0 0xA03080 377 + #define WFMP_MAC_ADDR_1 0xA03084 378 + #define LMPM_PMG_EN 0xA01CEC 379 + #define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078 380 + #define RFIC_REG_RD 0xAD0470 381 + #define WFPM_CTRL_REG 0xA03030 382 + enum { 383 + ENABLE_WFPM = BIT(31), 384 + WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK = 0x80000000, 385 + }; 386 + 387 + #define AUX_MISC_REG 0xA200B0 388 + enum { 389 + HW_STEP_LOCATION_BITS = 24, 390 + }; 391 + 392 + #define AUX_MISC_MASTER1_EN 0xA20818 393 + enum aux_misc_master1_en { 394 + AUX_MISC_MASTER1_EN_SBE_MSK = 0x1, 395 + }; 396 + 397 + #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 398 + #define RSA_ENABLE 0xA24B08 399 + #define PREG_AUX_BUS_WPROT_0 0xA04CC0 400 + 374 401 /* FW chicken bits */ 375 402 #define LMPM_CHICK 0xA01FF8 376 403 enum {
+15
drivers/net/wireless/iwlwifi/iwl-trans.h
··· 458 458 * @txq_disable: de-configure a Tx queue to send AMPDUs 459 459 * Must be atomic 460 460 * @wait_tx_queue_empty: wait until tx queues are empty. May sleep. 461 + * @freeze_txq_timer: prevents the timer of the queue from firing until the 462 + * queue is set to awake. Must be atomic. 461 463 * @dbgfs_register: add the dbgfs files under this directory. Files will be 462 464 * automatically deleted. 463 465 * @write8: write a u8 to a register at offset ofs from the BAR ··· 519 517 520 518 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 521 519 int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm); 520 + void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, 521 + bool freeze); 522 522 523 523 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); 524 524 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); ··· 875 871 }; 876 872 877 873 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); 874 + } 875 + 876 + static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, 877 + unsigned long txqs, 878 + bool freeze) 879 + { 880 + if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) 881 + IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); 882 + 883 + if (trans->ops->freeze_txq_timer) 884 + trans->ops->freeze_txq_timer(trans, txqs, freeze); 878 885 } 879 886 880 887 static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans,
-220
drivers/net/wireless/iwlwifi/mvm/coex.c
··· 72 72 #include "mvm.h" 73 73 #include "iwl-debug.h" 74 74 75 - const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { 76 - [BT_KILL_MSK_DEFAULT] = 0xfffffc00, 77 - [BT_KILL_MSK_NEVER] = 0xffffffff, 78 - [BT_KILL_MSK_ALWAYS] = 0, 79 - }; 80 - 81 - const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { 82 - { 83 - BT_KILL_MSK_ALWAYS, 84 - BT_KILL_MSK_ALWAYS, 85 - BT_KILL_MSK_ALWAYS, 86 - }, 87 - { 88 - BT_KILL_MSK_NEVER, 89 - BT_KILL_MSK_NEVER, 90 - BT_KILL_MSK_NEVER, 91 - }, 92 - { 93 - BT_KILL_MSK_NEVER, 94 - BT_KILL_MSK_NEVER, 95 - BT_KILL_MSK_NEVER, 96 - }, 97 - { 98 - BT_KILL_MSK_DEFAULT, 99 - BT_KILL_MSK_NEVER, 100 - BT_KILL_MSK_DEFAULT, 101 - }, 102 - }; 103 - 104 - const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { 105 - { 106 - BT_KILL_MSK_ALWAYS, 107 - BT_KILL_MSK_ALWAYS, 108 - BT_KILL_MSK_ALWAYS, 109 - }, 110 - { 111 - BT_KILL_MSK_ALWAYS, 112 - BT_KILL_MSK_ALWAYS, 113 - BT_KILL_MSK_ALWAYS, 114 - }, 115 - { 116 - BT_KILL_MSK_ALWAYS, 117 - BT_KILL_MSK_ALWAYS, 118 - BT_KILL_MSK_ALWAYS, 119 - }, 120 - { 121 - BT_KILL_MSK_DEFAULT, 122 - BT_KILL_MSK_ALWAYS, 123 - BT_KILL_MSK_DEFAULT, 124 - }, 125 - }; 126 - 127 - static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = { 128 - cpu_to_le32(0xf0f0f0f0), /* 50% */ 129 - cpu_to_le32(0xc0c0c0c0), /* 25% */ 130 - cpu_to_le32(0xfcfcfcfc), /* 75% */ 131 - cpu_to_le32(0xfefefefe), /* 87.5% */ 132 - }; 133 - 134 - static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { 135 - { 136 - cpu_to_le32(0x40000000), 137 - cpu_to_le32(0x00000000), 138 - cpu_to_le32(0x44000000), 139 - cpu_to_le32(0x00000000), 140 - cpu_to_le32(0x40000000), 141 - cpu_to_le32(0x00000000), 142 - cpu_to_le32(0x44000000), 143 - cpu_to_le32(0x00000000), 144 - cpu_to_le32(0xc0004000), 145 - cpu_to_le32(0xf0005000), 146 - cpu_to_le32(0xc0004000), 147 - cpu_to_le32(0xf0005000), 148 - }, 149 - { 150 - cpu_to_le32(0x40000000), 151 - cpu_to_le32(0x00000000), 152 - cpu_to_le32(0x44000000), 153 - cpu_to_le32(0x00000000), 154 - cpu_to_le32(0x40000000), 155 - cpu_to_le32(0x00000000), 156 - cpu_to_le32(0x44000000), 157 - cpu_to_le32(0x00000000), 158 - cpu_to_le32(0xc0004000), 159 - cpu_to_le32(0xf0005000), 160 - cpu_to_le32(0xc0004000), 161 - cpu_to_le32(0xf0005000), 162 - }, 163 - { 164 - cpu_to_le32(0x40000000), 165 - cpu_to_le32(0x00000000), 166 - cpu_to_le32(0x44000000), 167 - cpu_to_le32(0x00000000), 168 - cpu_to_le32(0x40000000), 169 - cpu_to_le32(0x00000000), 170 - cpu_to_le32(0x44000000), 171 - cpu_to_le32(0x00000000), 172 - cpu_to_le32(0xc0004000), 173 - cpu_to_le32(0xf0005000), 174 - cpu_to_le32(0xc0004000), 175 - cpu_to_le32(0xf0005000), 176 - }, 177 - }; 178 - 179 - static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = { 180 - { 181 - /* Tight */ 182 - cpu_to_le32(0xaaaaaaaa), 183 - cpu_to_le32(0xaaaaaaaa), 184 - cpu_to_le32(0xaeaaaaaa), 185 - cpu_to_le32(0xaaaaaaaa), 186 - cpu_to_le32(0xcc00ff28), 187 - cpu_to_le32(0x0000aaaa), 188 - cpu_to_le32(0xcc00aaaa), 189 - cpu_to_le32(0x0000aaaa), 190 - cpu_to_le32(0xc0004000), 191 - cpu_to_le32(0x00004000), 192 - cpu_to_le32(0xf0005000), 193 - cpu_to_le32(0xf0005000), 194 - }, 195 - { 196 - /* Loose */ 197 - cpu_to_le32(0xaaaaaaaa), 198 - cpu_to_le32(0xaaaaaaaa), 199 - cpu_to_le32(0xaaaaaaaa), 200 - cpu_to_le32(0xaaaaaaaa), 201 - cpu_to_le32(0xcc00ff28), 202 - cpu_to_le32(0x0000aaaa), 203 - cpu_to_le32(0xcc00aaaa), 204 - cpu_to_le32(0x0000aaaa), 205 - cpu_to_le32(0x00000000), 206 - cpu_to_le32(0x00000000), 207 - cpu_to_le32(0xf0005000), 208 - cpu_to_le32(0xf0005000), 209 - }, 210 - { 211 - /* Tx Tx disabled */ 212 - cpu_to_le32(0xaaaaaaaa), 213 - cpu_to_le32(0xaaaaaaaa), 214 - cpu_to_le32(0xeeaaaaaa), 215 - cpu_to_le32(0xaaaaaaaa), 216 - cpu_to_le32(0xcc00ff28), 217 - cpu_to_le32(0x0000aaaa), 218 - cpu_to_le32(0xcc00aaaa), 219 - cpu_to_le32(0x0000aaaa), 220 - cpu_to_le32(0xc0004000), 221 - cpu_to_le32(0xc0004000), 222 - cpu_to_le32(0xf0005000), 223 - cpu_to_le32(0xf0005000), 224 - }, 225 - }; 226 - 227 75 /* 20MHz / 40MHz below / 40Mhz above*/ 228 76 static const __le64 iwl_ci_mask[][3] = { 229 77 /* dummy entry for channel 0 */ ··· 444 596 goto send_cmd; 445 597 } 446 598 447 - bt_cmd->max_kill = cpu_to_le32(5); 448 - bt_cmd->bt4_antenna_isolation_thr = 449 - cpu_to_le32(IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS); 450 - bt_cmd->bt4_tx_tx_delta_freq_thr = cpu_to_le32(15); 451 - bt_cmd->bt4_tx_rx_max_freq0 = cpu_to_le32(15); 452 - bt_cmd->override_primary_lut = cpu_to_le32(BT_COEX_INVALID_LUT); 453 - bt_cmd->override_secondary_lut = cpu_to_le32(BT_COEX_INVALID_LUT); 454 - 455 599 mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE; 456 600 bt_cmd->mode = cpu_to_le32(mode); 457 601 ··· 462 622 463 623 bt_cmd->enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET); 464 624 465 - if (mvm->cfg->bt_shared_single_ant) 466 - memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant, 467 - sizeof(iwl_single_shared_ant)); 468 - else 469 - memcpy(&bt_cmd->decision_lut, iwl_combined_lookup, 470 - sizeof(iwl_combined_lookup)); 471 - 472 - memcpy(&bt_cmd->mplut_prio_boost, iwl_bt_prio_boost, 473 - sizeof(iwl_bt_prio_boost)); 474 - bt_cmd->multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0); 475 - bt_cmd->multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1); 476 - 477 625 send_cmd: 478 626 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 479 627 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); ··· 470 642 471 643 kfree(bt_cmd); 472 644 return ret; 473 - } 474 - 475 - static int iwl_mvm_bt_udpate_sw_boost(struct iwl_mvm *mvm) 476 - { 477 - struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; 478 - u32 primary_lut = le32_to_cpu(notif->primary_ch_lut); 479 - u32 secondary_lut = le32_to_cpu(notif->secondary_ch_lut); 480 - u32 ag = le32_to_cpu(notif->bt_activity_grading); 481 - struct iwl_bt_coex_sw_boost_update_cmd cmd = {}; 482 - u8 ack_kill_msk[NUM_PHY_CTX] = {}; 483 - u8 cts_kill_msk[NUM_PHY_CTX] = {}; 484 - int i; 485 - 486 - lockdep_assert_held(&mvm->mutex); 487 - 488 - ack_kill_msk[0] = iwl_bt_ack_kill_msk[ag][primary_lut]; 489 - cts_kill_msk[0] = iwl_bt_cts_kill_msk[ag][primary_lut]; 490 - 491 - ack_kill_msk[1] = iwl_bt_ack_kill_msk[ag][secondary_lut]; 492 - cts_kill_msk[1] = iwl_bt_cts_kill_msk[ag][secondary_lut]; 493 - 494 - /* Don't send HCMD if there is no update */ 495 - if (!memcmp(ack_kill_msk, mvm->bt_ack_kill_msk, sizeof(ack_kill_msk)) || 496 - !memcmp(cts_kill_msk, mvm->bt_cts_kill_msk, sizeof(cts_kill_msk))) 497 - return 0; 498 - 499 - memcpy(mvm->bt_ack_kill_msk, ack_kill_msk, 500 - sizeof(mvm->bt_ack_kill_msk)); 501 - memcpy(mvm->bt_cts_kill_msk, cts_kill_msk, 502 - sizeof(mvm->bt_cts_kill_msk)); 503 - 504 - BUILD_BUG_ON(ARRAY_SIZE(ack_kill_msk) < ARRAY_SIZE(cmd.boost_values)); 505 - 506 - for (i = 0; i < ARRAY_SIZE(cmd.boost_values); i++) { 507 - cmd.boost_values[i].kill_ack_msk = 508 - cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk[i]]); 509 - cmd.boost_values[i].kill_cts_msk = 510 - cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk[i]]); 511 - } 512 - 513 - return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_SW_BOOST, 0, 514 - sizeof(cmd), &cmd); 515 645 } 516 646 517 647 static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, ··· 737 951 IWL_ERR(mvm, "Failed to send BT_CI cmd\n"); 738 952 memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd)); 739 953 } 740 - 741 - if (iwl_mvm_bt_udpate_sw_boost(mvm)) 742 - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); 743 954 } 744 955 745 956 int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm, ··· 857 1074 ieee80211_iterate_active_interfaces_atomic( 858 1075 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 859 1076 iwl_mvm_bt_rssi_iterator, &data); 860 - 861 - if (iwl_mvm_bt_udpate_sw_boost(mvm)) 862 - IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n"); 863 1077 } 864 1078 865 1079 #define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
+60 -1
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
··· 288 288 }, 289 289 }; 290 290 291 + enum iwl_bt_kill_msk { 292 + BT_KILL_MSK_DEFAULT, 293 + BT_KILL_MSK_NEVER, 294 + BT_KILL_MSK_ALWAYS, 295 + BT_KILL_MSK_MAX, 296 + }; 297 + 298 + static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = { 299 + [BT_KILL_MSK_DEFAULT] = 0xfffffc00, 300 + [BT_KILL_MSK_NEVER] = 0xffffffff, 301 + [BT_KILL_MSK_ALWAYS] = 0, 302 + }; 303 + 304 + static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { 305 + { 306 + BT_KILL_MSK_ALWAYS, 307 + BT_KILL_MSK_ALWAYS, 308 + BT_KILL_MSK_ALWAYS, 309 + }, 310 + { 311 + BT_KILL_MSK_NEVER, 312 + BT_KILL_MSK_NEVER, 313 + BT_KILL_MSK_NEVER, 314 + }, 315 + { 316 + BT_KILL_MSK_NEVER, 317 + BT_KILL_MSK_NEVER, 318 + BT_KILL_MSK_NEVER, 319 + }, 320 + { 321 + BT_KILL_MSK_DEFAULT, 322 + BT_KILL_MSK_NEVER, 323 + BT_KILL_MSK_DEFAULT, 324 + }, 325 + }; 326 + 327 + static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = { 328 + { 329 + BT_KILL_MSK_ALWAYS, 330 + BT_KILL_MSK_ALWAYS, 331 + BT_KILL_MSK_ALWAYS, 332 + }, 333 + { 334 + BT_KILL_MSK_ALWAYS, 335 + BT_KILL_MSK_ALWAYS, 336 + BT_KILL_MSK_ALWAYS, 337 + }, 338 + { 339 + BT_KILL_MSK_ALWAYS, 340 + BT_KILL_MSK_ALWAYS, 341 + BT_KILL_MSK_ALWAYS, 342 + }, 343 + { 344 + BT_KILL_MSK_DEFAULT, 345 + BT_KILL_MSK_ALWAYS, 346 + BT_KILL_MSK_DEFAULT, 347 + }, 348 + }; 349 + 291 350 struct corunning_block_luts { 292 351 u8 range; 293 352 __le32 lut20[BT_COEX_CORUN_LUT_SIZE]; ··· 692 633 if (IWL_MVM_BT_COEX_TTC) 693 634 bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC); 694 635 695 - if (IWL_MVM_BT_COEX_RRC) 636 + if (iwl_mvm_bt_is_rrc_supported(mvm)) 696 637 bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC); 697 638 698 639 if (mvm->cfg->bt_shared_single_ant)
+14 -5
drivers/net/wireless/iwlwifi/mvm/d3.c
··· 694 694 if (ret) 695 695 IWL_ERR(mvm, "Failed to send quota: %d\n", ret); 696 696 697 + if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm)) 698 + IWL_ERR(mvm, "Failed to initialize D3 LAR information\n"); 699 + 697 700 return 0; 698 701 } 699 702 ··· 1599 1596 1600 1597 /* RF-kill already asserted again... */ 1601 1598 if (!cmd.resp_pkt) { 1602 - ret = -ERFKILL; 1599 + fw_status = ERR_PTR(-ERFKILL); 1603 1600 goto out_free_resp; 1604 1601 } 1605 1602 ··· 1608 1605 len = iwl_rx_packet_payload_len(cmd.resp_pkt); 1609 1606 if (len < status_size) { 1610 1607 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1611 - ret = -EIO; 1608 + fw_status = ERR_PTR(-EIO); 1612 1609 goto out_free_resp; 1613 1610 } 1614 1611 ··· 1616 1613 if (len != (status_size + 1617 1614 ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) { 1618 1615 IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); 1619 - ret = -EIO; 1616 + fw_status = ERR_PTR(-EIO); 1620 1617 goto out_free_resp; 1621 1618 } 1622 1619 ··· 1624 1621 1625 1622 out_free_resp: 1626 1623 iwl_free_resp(&cmd); 1627 - return ret ? ERR_PTR(ret) : fw_status; 1624 + return fw_status; 1628 1625 } 1629 1626 1630 1627 /* releases the MVM mutex */ ··· 1877 1874 /* query SRAM first in case we want event logging */ 1878 1875 iwl_mvm_read_d3_sram(mvm); 1879 1876 1877 + /* 1878 + * Query the current location and source from the D3 firmware so we 1879 + * can play it back when we re-intiailize the D0 firmware 1880 + */ 1881 + iwl_mvm_update_changed_regdom(mvm); 1882 + 1880 1883 if (mvm->net_detect) { 1881 1884 iwl_mvm_query_netdetect_reasons(mvm, vif); 1882 1885 /* has unlocked the mutex, so skip that */ ··· 1892 1883 #ifdef CONFIG_IWLWIFI_DEBUGFS 1893 1884 if (keep) 1894 1885 mvm->keep_vif = vif; 1886 + #endif 1895 1887 /* has unlocked the mutex, so skip that */ 1896 1888 goto out_iterate; 1897 - #endif 1898 1889 } 1899 1890 1900 1891 out_unlock:
+6 -20
drivers/net/wireless/iwlwifi/mvm/debugfs.c
··· 562 562 "\tSecondary Channel Bitmap 0x%016llx\n", 563 563 le64_to_cpu(cmd->bt_secondary_ci)); 564 564 565 - pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n"); 566 - pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill Mask 0x%08x\n", 567 - iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]); 568 - pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill Mask 0x%08x\n", 569 - iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]); 565 + pos += scnprintf(buf+pos, bufsz-pos, 566 + "BT Configuration CMD - 0=default, 1=never, 2=always\n"); 567 + pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n", 568 + mvm->bt_ack_kill_msk[0]); 569 + pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n", 570 + mvm->bt_cts_kill_msk[0]); 570 571 571 572 } else { 572 573 struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; ··· 580 579 pos += scnprintf(buf+pos, bufsz-pos, 581 580 "\tSecondary Channel Bitmap 0x%016llx\n", 582 581 le64_to_cpu(cmd->bt_secondary_ci)); 583 - 584 - pos += scnprintf(buf+pos, bufsz-pos, "BT Configuration CMD\n"); 585 - pos += scnprintf(buf+pos, bufsz-pos, 586 - "\tPrimary: ACK Kill Mask 0x%08x\n", 587 - iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[0]]); 588 - pos += scnprintf(buf+pos, bufsz-pos, 589 - "\tPrimary: CTS Kill Mask 0x%08x\n", 590 - iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[0]]); 591 - pos += scnprintf(buf+pos, bufsz-pos, 592 - "\tSecondary: ACK Kill Mask 0x%08x\n", 593 - iwl_bt_ctl_kill_msk[mvm->bt_ack_kill_msk[1]]); 594 - pos += scnprintf(buf+pos, bufsz-pos, 595 - "\tSecondary: CTS Kill Mask 0x%08x\n", 596 - iwl_bt_ctl_kill_msk[mvm->bt_cts_kill_msk[1]]); 597 - 598 582 } 599 583 600 584 mutex_unlock(&mvm->mutex);
-47
drivers/net/wireless/iwlwifi/mvm/fw-api-coex.h
··· 235 235 * struct iwl_bt_coex_cmd - bt coex configuration command 236 236 * @mode: enum %iwl_bt_coex_mode 237 237 * @enabled_modules: enum %iwl_bt_coex_enabled_modules 238 - * @max_kill: max count of Tx retries due to kill from PTA 239 - * @override_primary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT 240 - * should be set by default 241 - * @override_secondary_lut: enum %iwl_bt_coex_lut_type: BT_COEX_INVALID_LUT 242 - * should be set by default 243 - * @bt4_antenna_isolation_thr: antenna threshold value 244 - * @bt4_tx_tx_delta_freq_thr: TxTx delta frequency 245 - * @bt4_tx_rx_max_freq0: TxRx max frequency 246 - * @multiprio_lut: multi priority LUT configuration 247 - * @mplut_prio_boost: BT priority boost registers 248 - * @decision_lut: PTA decision LUT, per Prio-Ch 249 238 * 250 239 * The structure is used for the BT_COEX command. 251 240 */ 252 241 struct iwl_bt_coex_cmd { 253 242 __le32 mode; 254 243 __le32 enabled_modules; 255 - 256 - __le32 max_kill; 257 - __le32 override_primary_lut; 258 - __le32 override_secondary_lut; 259 - __le32 bt4_antenna_isolation_thr; 260 - 261 - __le32 bt4_tx_tx_delta_freq_thr; 262 - __le32 bt4_tx_rx_max_freq0; 263 - 264 - __le32 multiprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE]; 265 - __le32 mplut_prio_boost[BT_COEX_BOOST_SIZE]; 266 - 267 - __le32 decision_lut[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE]; 268 244 } __packed; /* BT_COEX_CMD_API_S_VER_6 */ 269 245 270 246 /** ··· 254 278 __le32 corun_lut20[BT_COEX_CORUN_LUT_SIZE]; 255 279 __le32 corun_lut40[BT_COEX_CORUN_LUT_SIZE]; 256 280 } __packed; /* BT_COEX_UPDATE_CORUN_LUT_API_S_VER_1 */ 257 - 258 - /** 259 - * struct iwl_bt_coex_sw_boost - SW boost values 260 - * @wifi_tx_prio_boost: SW boost of wifi tx priority 261 - * @wifi_rx_prio_boost: SW boost of wifi rx priority 262 - * @kill_ack_msk: kill ACK mask. 1 - Tx ACK, 0 - kill Tx of ACK. 263 - * @kill_cts_msk: kill CTS mask. 1 - Tx CTS, 0 - kill Tx of CTS. 264 - */ 265 - struct iwl_bt_coex_sw_boost { 266 - __le32 wifi_tx_prio_boost; 267 - __le32 wifi_rx_prio_boost; 268 - __le32 kill_ack_msk; 269 - __le32 kill_cts_msk; 270 - }; 271 - 272 - /** 273 - * struct iwl_bt_coex_sw_boost_update_cmd - command to update the SW boost 274 - * @boost_values: check struct %iwl_bt_coex_sw_boost - one for each channel 275 - * primary / secondary / low priority 276 - */ 277 - struct iwl_bt_coex_sw_boost_update_cmd { 278 - struct iwl_bt_coex_sw_boost boost_values[3]; 279 - } __packed; /* BT_COEX_UPDATE_SW_BOOST_S_VER_1 */ 280 281 281 282 /** 282 283 * struct iwl_bt_coex_reduced_txp_update_cmd
+105 -2
drivers/net/wireless/iwlwifi/mvm/fw-api.h
··· 212 212 REPLY_RX_MPDU_CMD = 0xc1, 213 213 BA_NOTIF = 0xc5, 214 214 215 + /* Location Aware Regulatory */ 216 + MCC_UPDATE_CMD = 0xc8, 217 + MCC_CHUB_UPDATE_CMD = 0xc9, 218 + 215 219 MARKER_CMD = 0xcb, 216 220 217 221 /* BT Coex */ ··· 366 362 NVM_SECTION_TYPE_CALIBRATION = 4, 367 363 NVM_SECTION_TYPE_PRODUCTION = 5, 368 364 NVM_SECTION_TYPE_MAC_OVERRIDE = 11, 369 - NVM_MAX_NUM_SECTIONS = 12, 365 + NVM_SECTION_TYPE_PHY_SKU = 12, 366 + NVM_MAX_NUM_SECTIONS = 13, 370 367 }; 371 368 372 369 /** ··· 1447 1442 #define SF_W_MARK_LEGACY 4096 1448 1443 #define SF_W_MARK_SCAN 4096 1449 1444 1450 - /* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */ 1445 + /* SF Scenarios timers for default configuration (aligned to 32 uSec) */ 1446 + #define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ 1447 + #define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ 1448 + #define SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */ 1449 + #define SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ 1450 + #define SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */ 1451 + #define SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */ 1452 + #define SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */ 1453 + #define SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */ 1454 + #define SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */ 1455 + #define SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */ 1456 + 1457 + /* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */ 1451 1458 #define SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */ 1452 1459 #define SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */ 1453 1460 #define SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */ ··· 1489 1472 __le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; 1490 1473 __le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES]; 1491 1474 } __packed; /* SF_CFG_API_S_VER_2 */ 1475 + 1476 + /*********************************** 1477 + * Location Aware Regulatory (LAR) API - MCC updates 1478 + ***********************************/ 1479 + 1480 + /** 1481 + * struct iwl_mcc_update_cmd - Request the device to update geographic 1482 + * regulatory profile according to the given MCC (Mobile Country Code). 1483 + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. 1484 + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the 1485 + * MCC in the cmd response will be the relevant MCC in the NVM. 1486 + * @mcc: given mobile country code 1487 + * @source_id: the source from where we got the MCC, see iwl_mcc_source 1488 + * @reserved: reserved for alignment 1489 + */ 1490 + struct iwl_mcc_update_cmd { 1491 + __le16 mcc; 1492 + u8 source_id; 1493 + u8 reserved; 1494 + } __packed; /* LAR_UPDATE_MCC_CMD_API_S */ 1495 + 1496 + /** 1497 + * iwl_mcc_update_resp - response to MCC_UPDATE_CMD. 1498 + * Contains the new channel control profile map, if changed, and the new MCC 1499 + * (mobile country code). 1500 + * The new MCC may be different than what was requested in MCC_UPDATE_CMD. 1501 + * @status: see &enum iwl_mcc_update_status 1502 + * @mcc: the new applied MCC 1503 + * @cap: capabilities for all channels which matches the MCC 1504 + * @source_id: the MCC source, see iwl_mcc_source 1505 + * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51 1506 + * channels, depending on platform) 1507 + * @channels: channel control data map, DWORD for each channel. Only the first 1508 + * 16bits are used. 1509 + */ 1510 + struct iwl_mcc_update_resp { 1511 + __le32 status; 1512 + __le16 mcc; 1513 + u8 cap; 1514 + u8 source_id; 1515 + __le32 n_channels; 1516 + __le32 channels[0]; 1517 + } __packed; /* LAR_UPDATE_MCC_CMD_RESP_S */ 1518 + 1519 + /** 1520 + * struct iwl_mcc_chub_notif - chub notifies of mcc change 1521 + * (MCC_CHUB_UPDATE_CMD = 0xc9) 1522 + * The Chub (Communication Hub, CommsHUB) is a HW component that connects to 1523 + * the cellular and connectivity cores that gets updates of the mcc, and 1524 + * notifies the ucode directly of any mcc change. 1525 + * The ucode requests the driver to request the device to update geographic 1526 + * regulatory profile according to the given MCC (Mobile Country Code). 1527 + * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain. 1528 + * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the 1529 + * MCC in the cmd response will be the relevant MCC in the NVM. 1530 + * @mcc: given mobile country code 1531 + * @source_id: identity of the change originator, see iwl_mcc_source 1532 + * @reserved1: reserved for alignment 1533 + */ 1534 + struct iwl_mcc_chub_notif { 1535 + u16 mcc; 1536 + u8 source_id; 1537 + u8 reserved1; 1538 + } __packed; /* LAR_MCC_NOTIFY_S */ 1539 + 1540 + enum iwl_mcc_update_status { 1541 + MCC_RESP_NEW_CHAN_PROFILE, 1542 + MCC_RESP_SAME_CHAN_PROFILE, 1543 + MCC_RESP_INVALID, 1544 + MCC_RESP_NVM_DISABLED, 1545 + MCC_RESP_ILLEGAL, 1546 + MCC_RESP_LOW_PRIORITY, 1547 + }; 1548 + 1549 + enum iwl_mcc_source { 1550 + MCC_SOURCE_OLD_FW = 0, 1551 + MCC_SOURCE_ME = 1, 1552 + MCC_SOURCE_BIOS = 2, 1553 + MCC_SOURCE_3G_LTE_HOST = 3, 1554 + MCC_SOURCE_3G_LTE_DEVICE = 4, 1555 + MCC_SOURCE_WIFI = 5, 1556 + MCC_SOURCE_RESERVED = 6, 1557 + MCC_SOURCE_DEFAULT = 7, 1558 + MCC_SOURCE_UNINITIALIZED = 8, 1559 + MCC_SOURCE_GET_CURRENT = 0x10 1560 + }; 1492 1561 1493 1562 /* DTS measurements */ 1494 1563
+10
drivers/net/wireless/iwlwifi/mvm/fw.c
··· 739 739 if (ret) 740 740 goto error; 741 741 742 + /* 743 + * RTNL is not taken during Ct-kill, but we don't need to scan/Tx 744 + * anyway, so don't init MCC. 745 + */ 746 + if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) { 747 + ret = iwl_mvm_init_mcc(mvm); 748 + if (ret) 749 + goto error; 750 + } 751 + 742 752 if (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_UMAC_SCAN) { 743 753 ret = iwl_mvm_config_scan(mvm); 744 754 if (ret)
+157 -23
drivers/net/wireless/iwlwifi/mvm/mac80211.c
··· 86 86 #include "iwl-fw-error-dump.h" 87 87 #include "iwl-prph.h" 88 88 #include "iwl-csr.h" 89 + #include "iwl-nvm-parse.h" 89 90 90 91 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 91 92 { ··· 302 301 } 303 302 } 304 303 304 + struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 305 + const char *alpha2, 306 + enum iwl_mcc_source src_id, 307 + bool *changed) 308 + { 309 + struct ieee80211_regdomain *regd = NULL; 310 + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 311 + struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 312 + struct iwl_mcc_update_resp *resp; 313 + 314 + IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 315 + 316 + lockdep_assert_held(&mvm->mutex); 317 + 318 + resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 319 + if (IS_ERR_OR_NULL(resp)) { 320 + IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 321 + PTR_RET(resp)); 322 + goto out; 323 + } 324 + 325 + if (changed) 326 + *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); 327 + 328 + regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 329 + __le32_to_cpu(resp->n_channels), 330 + resp->channels, 331 + __le16_to_cpu(resp->mcc)); 332 + /* Store the return source id */ 333 + src_id = resp->source_id; 334 + kfree(resp); 335 + if (IS_ERR_OR_NULL(regd)) { 336 + IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 337 + PTR_RET(regd)); 338 + goto out; 339 + } 340 + 341 + IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 342 + regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 343 + mvm->lar_regdom_set = true; 344 + mvm->mcc_src = src_id; 345 + 346 + out: 347 + return regd; 348 + } 349 + 350 + void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 351 + { 352 + bool changed; 353 + struct ieee80211_regdomain *regd; 354 + 355 + if (!iwl_mvm_is_lar_supported(mvm)) 356 + return; 357 + 358 + regd = iwl_mvm_get_current_regdomain(mvm, &changed); 359 + if (!IS_ERR_OR_NULL(regd)) { 360 + /* only update the regulatory core if changed */ 361 + if (changed) 362 + regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 363 + 364 + kfree(regd); 365 + } 366 + } 367 + 368 + struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 369 + bool *changed) 370 + { 371 + return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 372 + iwl_mvm_is_wifi_mcc_supported(mvm) ? 373 + MCC_SOURCE_GET_CURRENT : 374 + MCC_SOURCE_OLD_FW, changed); 375 + } 376 + 377 + int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 378 + { 379 + enum iwl_mcc_source used_src; 380 + struct ieee80211_regdomain *regd; 381 + const struct ieee80211_regdomain *r = 382 + rtnl_dereference(mvm->hw->wiphy->regd); 383 + 384 + if (!r) 385 + return 0; 386 + 387 + /* save the last source in case we overwrite it below */ 388 + used_src = mvm->mcc_src; 389 + if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 390 + /* Notify the firmware we support wifi location updates */ 391 + regd = iwl_mvm_get_current_regdomain(mvm, NULL); 392 + if (!IS_ERR_OR_NULL(regd)) 393 + kfree(regd); 394 + } 395 + 396 + /* Now set our last stored MCC and source */ 397 + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, NULL); 398 + if (IS_ERR_OR_NULL(regd)) 399 + return -EIO; 400 + 401 + regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 402 + kfree(regd); 403 + 404 + return 0; 405 + } 406 + 305 407 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 306 408 { 307 409 struct ieee80211_hw *hw = mvm->hw; ··· 460 356 BIT(NL80211_IFTYPE_ADHOC); 461 357 462 358 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 463 - hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 464 - REGULATORY_DISABLE_BEACON_HINTS; 359 + hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 360 + if (iwl_mvm_is_lar_supported(mvm)) 361 + hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 362 + else 363 + hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 364 + REGULATORY_DISABLE_BEACON_HINTS; 465 365 466 366 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD) 467 367 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; ··· 1301 1193 1302 1194 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1303 1195 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1304 - ret = iwl_mvm_update_quotas(mvm, NULL); 1196 + ret = iwl_mvm_update_quotas(mvm, false, NULL); 1305 1197 if (ret) 1306 1198 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1307 1199 ret); ··· 1980 1872 sizeof(mvmvif->beacon_stats)); 1981 1873 1982 1874 /* add quota for this interface */ 1983 - ret = iwl_mvm_update_quotas(mvm, NULL); 1875 + ret = iwl_mvm_update_quotas(mvm, true, NULL); 1984 1876 if (ret) { 1985 1877 IWL_ERR(mvm, "failed to update quotas\n"); 1986 1878 return; ··· 2032 1924 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; 2033 1925 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 2034 1926 /* remove quota for this interface */ 2035 - ret = iwl_mvm_update_quotas(mvm, NULL); 1927 + ret = iwl_mvm_update_quotas(mvm, false, NULL); 2036 1928 if (ret) 2037 1929 IWL_ERR(mvm, "failed to update quotas\n"); 2038 1930 ··· 2151 2043 /* power updated needs to be done before quotas */ 2152 2044 iwl_mvm_power_update_mac(mvm); 2153 2045 2154 - ret = iwl_mvm_update_quotas(mvm, NULL); 2046 + ret = iwl_mvm_update_quotas(mvm, false, NULL); 2155 2047 if (ret) 2156 2048 goto out_quota_failed; 2157 2049 ··· 2217 2109 if (vif->p2p && mvm->p2p_device_vif) 2218 2110 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2219 2111 2220 - iwl_mvm_update_quotas(mvm, NULL); 2112 + iwl_mvm_update_quotas(mvm, false, NULL); 2221 2113 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2222 2114 iwl_mvm_binding_remove_vif(mvm, vif); 2223 2115 ··· 2356 2248 2357 2249 mutex_lock(&mvm->mutex); 2358 2250 2251 + if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { 2252 + IWL_ERR(mvm, "scan while LAR regdomain is not set\n"); 2253 + ret = -EBUSY; 2254 + goto out; 2255 + } 2256 + 2359 2257 if (mvm->scan_status != IWL_MVM_SCAN_NONE) { 2360 2258 ret = -EBUSY; 2361 2259 goto out; ··· 2442 2328 { 2443 2329 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2444 2330 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2331 + unsigned long txqs = 0, tids = 0; 2445 2332 int tid; 2333 + 2334 + spin_lock_bh(&mvmsta->lock); 2335 + for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2336 + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2337 + 2338 + if (tid_data->state != IWL_AGG_ON && 2339 + tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) 2340 + continue; 2341 + 2342 + __set_bit(tid_data->txq_id, &txqs); 2343 + 2344 + if (iwl_mvm_tid_queued(tid_data) == 0) 2345 + continue; 2346 + 2347 + __set_bit(tid, &tids); 2348 + } 2446 2349 2447 2350 switch (cmd) { 2448 2351 case STA_NOTIFY_SLEEP: 2449 2352 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) 2450 2353 ieee80211_sta_block_awake(hw, sta, true); 2451 - spin_lock_bh(&mvmsta->lock); 2452 - for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 2453 - struct iwl_mvm_tid_data *tid_data; 2454 2354 2455 - tid_data = &mvmsta->tid_data[tid]; 2456 - if (tid_data->state != IWL_AGG_ON && 2457 - tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) 2458 - continue; 2459 - if (iwl_mvm_tid_queued(tid_data) == 0) 2460 - continue; 2355 + for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2461 2356 ieee80211_sta_set_buffered(sta, tid, true); 2462 - } 2463 - spin_unlock_bh(&mvmsta->lock); 2357 + 2358 + if (txqs) 2359 + iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2464 2360 /* 2465 2361 * The fw updates the STA to be asleep. Tx packets on the Tx 2466 2362 * queues to this station will not be transmitted. The fw will ··· 2480 2356 case STA_NOTIFY_AWAKE: 2481 2357 if (WARN_ON(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) 2482 2358 break; 2359 + 2360 + if (txqs) 2361 + iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2483 2362 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2484 2363 break; 2485 2364 default: 2486 2365 break; 2487 2366 } 2367 + spin_unlock_bh(&mvmsta->lock); 2488 2368 } 2489 2369 2490 2370 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, ··· 2725 2597 } 2726 2598 2727 2599 mutex_lock(&mvm->mutex); 2600 + 2601 + if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) { 2602 + IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n"); 2603 + ret = -EBUSY; 2604 + goto out; 2605 + } 2728 2606 2729 2607 if (!vif->bss_conf.idle) { 2730 2608 ret = -EBUSY; ··· 3293 3159 */ 3294 3160 if (vif->type == NL80211_IFTYPE_MONITOR) { 3295 3161 mvmvif->monitor_active = true; 3296 - ret = iwl_mvm_update_quotas(mvm, NULL); 3162 + ret = iwl_mvm_update_quotas(mvm, false, NULL); 3297 3163 if (ret) 3298 3164 goto out_remove_binding; 3299 3165 } 3300 3166 3301 3167 /* Handle binding during CSA */ 3302 3168 if (vif->type == NL80211_IFTYPE_AP) { 3303 - iwl_mvm_update_quotas(mvm, NULL); 3169 + iwl_mvm_update_quotas(mvm, false, NULL); 3304 3170 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3305 3171 } 3306 3172 ··· 3324 3190 3325 3191 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); 3326 3192 3327 - iwl_mvm_update_quotas(mvm, NULL); 3193 + iwl_mvm_update_quotas(mvm, false, NULL); 3328 3194 } 3329 3195 3330 3196 goto out; ··· 3397 3263 break; 3398 3264 } 3399 3265 3400 - iwl_mvm_update_quotas(mvm, disabled_vif); 3266 + iwl_mvm_update_quotas(mvm, false, disabled_vif); 3401 3267 iwl_mvm_binding_remove_vif(mvm, vif); 3402 3268 3403 3269 out: ··· 3589 3455 mvm->noa_duration = noa_duration; 3590 3456 mvm->noa_vif = vif; 3591 3457 3592 - return iwl_mvm_update_quotas(mvm, NULL); 3458 + return iwl_mvm_update_quotas(mvm, false, NULL); 3593 3459 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 3594 3460 /* must be associated client vif - ignore authorized */ 3595 3461 if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+51 -12
drivers/net/wireless/iwlwifi/mvm/mvm.h
··· 810 810 /* system time of last beacon (for AP/GO interface) */ 811 811 u32 ap_last_beacon_gp2; 812 812 813 + bool lar_regdom_set; 814 + enum iwl_mcc_source mcc_src; 815 + 813 816 u8 low_latency_agg_frame_limit; 814 817 815 818 /* TDLS channel switch data */ ··· 913 910 (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); 914 911 } 915 912 913 + static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) 914 + { 915 + bool nvm_lar = mvm->nvm_data->lar_enabled; 916 + bool tlv_lar = mvm->fw->ucode_capa.capa[0] & 917 + IWL_UCODE_TLV_CAPA_LAR_SUPPORT; 918 + 919 + if (iwlwifi_mod_params.lar_disable) 920 + return false; 921 + 922 + /* 923 + * Enable LAR only if it is supported by the FW (TLV) && 924 + * enabled in the NVM 925 + */ 926 + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) 927 + return nvm_lar && tlv_lar; 928 + else 929 + return tlv_lar; 930 + } 931 + 932 + static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm) 933 + { 934 + return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WIFI_MCC_UPDATE; 935 + } 936 + 916 937 static inline bool iwl_mvm_is_scd_cfg_supported(struct iwl_mvm *mvm) 917 938 { 918 939 return mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SCD_CFG; ··· 946 919 { 947 920 return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_PLCR) && 948 921 IWL_MVM_BT_COEX_CORUNNING; 922 + } 923 + 924 + static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm) 925 + { 926 + return (mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_BT_COEX_RRC) && 927 + IWL_MVM_BT_COEX_RRC; 949 928 } 950 929 951 930 extern const u8 iwl_mvm_ac_to_tx_fifo[]; ··· 1139 1106 int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif); 1140 1107 1141 1108 /* Quota management */ 1142 - int iwl_mvm_update_quotas(struct iwl_mvm *mvm, 1109 + int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload, 1143 1110 struct ieee80211_vif *disabled_vif); 1144 1111 1145 1112 /* Scanning */ ··· 1315 1282 struct iwl_rx_cmd_buffer *rxb, 1316 1283 struct iwl_device_cmd *cmd); 1317 1284 1318 - enum iwl_bt_kill_msk { 1319 - BT_KILL_MSK_DEFAULT, 1320 - BT_KILL_MSK_NEVER, 1321 - BT_KILL_MSK_ALWAYS, 1322 - BT_KILL_MSK_MAX, 1323 - }; 1324 - 1325 - extern const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT]; 1326 - extern const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT]; 1327 - extern const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX]; 1328 - 1329 1285 /* beacon filtering */ 1330 1286 #ifdef CONFIG_IWLWIFI_DEBUGFS 1331 1287 void ··· 1410 1388 void iwl_mvm_tt_exit(struct iwl_mvm *mvm); 1411 1389 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); 1412 1390 int iwl_mvm_get_temp(struct iwl_mvm *mvm); 1391 + 1392 + /* Location Aware Regulatory */ 1393 + struct iwl_mcc_update_resp * 1394 + iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, 1395 + enum iwl_mcc_source src_id); 1396 + int iwl_mvm_init_mcc(struct iwl_mvm *mvm); 1397 + int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, 1398 + struct iwl_rx_cmd_buffer *rxb, 1399 + struct iwl_device_cmd *cmd); 1400 + struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 1401 + const char *alpha2, 1402 + enum iwl_mcc_source src_id, 1403 + bool *changed); 1404 + struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 1405 + bool *changed); 1406 + int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm); 1407 + void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm); 1413 1408 1414 1409 /* smart fifo */ 1415 1410 int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+286 -4
drivers/net/wireless/iwlwifi/mvm/nvm.c
··· 63 63 * 64 64 *****************************************************************************/ 65 65 #include <linux/firmware.h> 66 + #include <linux/rtnetlink.h> 67 + #include <linux/pci.h> 68 + #include <linux/acpi.h> 66 69 #include "iwl-trans.h" 67 70 #include "iwl-csr.h" 68 71 #include "mvm.h" 69 72 #include "iwl-eeprom-parse.h" 70 73 #include "iwl-eeprom-read.h" 71 74 #include "iwl-nvm-parse.h" 75 + #include "iwl-prph.h" 72 76 73 77 /* Default NVM size to read */ 74 78 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024) ··· 266 262 iwl_parse_nvm_sections(struct iwl_mvm *mvm) 267 263 { 268 264 struct iwl_nvm_section *sections = mvm->nvm_sections; 269 - const __le16 *hw, *sw, *calib, *regulatory, *mac_override; 265 + const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 266 + bool is_family_8000_a_step = false, lar_enabled; 267 + u32 mac_addr0, mac_addr1; 270 268 271 269 /* Checking for required sections */ 272 270 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { ··· 292 286 "Can't parse mac_address, empty sections\n"); 293 287 return NULL; 294 288 } 289 + 290 + if (CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_A_STEP) 291 + is_family_8000_a_step = true; 292 + 293 + /* PHY_SKU section is mandatory in B0 */ 294 + if (!is_family_8000_a_step && 295 + !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) { 296 + IWL_ERR(mvm, 297 + "Can't parse phy_sku in B0, empty sections\n"); 298 + return NULL; 299 + } 295 300 } 296 301 297 302 if (WARN_ON(!mvm->cfg)) 298 303 return NULL; 304 + 305 + /* read the mac address from WFMP registers */ 306 + mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); 307 + mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); 299 308 300 309 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; 301 310 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; ··· 318 297 regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; 319 298 mac_override = 320 299 (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; 300 + phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; 301 + 302 + lar_enabled = !iwlwifi_mod_params.lar_disable && 303 + (mvm->fw->ucode_capa.capa[0] & 304 + IWL_UCODE_TLV_CAPA_LAR_SUPPORT); 321 305 322 306 return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, 323 - regulatory, mac_override, 324 - mvm->fw->valid_tx_ant, 325 - mvm->fw->valid_rx_ant); 307 + regulatory, mac_override, phy_sku, 308 + mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, 309 + lar_enabled, is_family_8000_a_step, 310 + mac_addr0, mac_addr1); 326 311 } 327 312 328 313 #define MAX_NVM_FILE_LEN 16384 ··· 594 567 return -ENODATA; 595 568 IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n", 596 569 mvm->nvm_data->nvm_version); 570 + 571 + return 0; 572 + } 573 + 574 + struct iwl_mcc_update_resp * 575 + iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, 576 + enum iwl_mcc_source src_id) 577 + { 578 + struct iwl_mcc_update_cmd mcc_update_cmd = { 579 + .mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]), 580 + .source_id = (u8)src_id, 581 + }; 582 + struct iwl_mcc_update_resp *mcc_resp, *resp_cp = NULL; 583 + struct iwl_rx_packet *pkt; 584 + struct iwl_host_cmd cmd = { 585 + .id = MCC_UPDATE_CMD, 586 + .flags = CMD_WANT_SKB, 587 + .data = { &mcc_update_cmd }, 588 + }; 589 + 590 + int ret; 591 + u32 status; 592 + int resp_len, n_channels; 593 + u16 mcc; 594 + 595 + if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) 596 + return ERR_PTR(-EOPNOTSUPP); 597 + 598 + cmd.len[0] = sizeof(struct iwl_mcc_update_cmd); 599 + 600 + IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n", 601 + alpha2[0], alpha2[1], src_id); 602 + 603 + ret = iwl_mvm_send_cmd(mvm, &cmd); 604 + if (ret) 605 + return ERR_PTR(ret); 606 + 607 + pkt = cmd.resp_pkt; 608 + if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 609 + IWL_ERR(mvm, "Bad return from MCC_UPDATE_COMMAND (0x%08X)\n", 610 + pkt->hdr.flags); 611 + ret = -EIO; 612 + goto exit; 613 + } 614 + 615 + /* Extract MCC response */ 616 + mcc_resp = (void *)pkt->data; 617 + status = le32_to_cpu(mcc_resp->status); 618 + 619 + mcc = le16_to_cpu(mcc_resp->mcc); 620 + 621 + /* W/A for a FW/NVM issue - returns 0x00 for the world domain */ 622 + if (mcc == 0) { 623 + mcc = 0x3030; /* "00" - world */ 624 + mcc_resp->mcc = cpu_to_le16(mcc); 625 + } 626 + 627 + n_channels = __le32_to_cpu(mcc_resp->n_channels); 628 + IWL_DEBUG_LAR(mvm, 629 + "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", 630 + status, mcc, mcc >> 8, mcc & 0xff, 631 + !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); 632 + 633 + resp_len = sizeof(*mcc_resp) + n_channels * sizeof(__le32); 634 + resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL); 635 + if (!resp_cp) { 636 + ret = -ENOMEM; 637 + goto exit; 638 + } 639 + 640 + ret = 0; 641 + exit: 642 + iwl_free_resp(&cmd); 643 + if (ret) 644 + return ERR_PTR(ret); 645 + return resp_cp; 646 + } 647 + 648 + #ifdef CONFIG_ACPI 649 + #define WRD_METHOD "WRDD" 650 + #define WRDD_WIFI (0x07) 651 + #define WRDD_WIGIG (0x10) 652 + 653 + static u32 iwl_mvm_wrdd_get_mcc(struct iwl_mvm *mvm, union acpi_object *wrdd) 654 + { 655 + union acpi_object *mcc_pkg, *domain_type, *mcc_value; 656 + u32 i; 657 + 658 + if (wrdd->type != ACPI_TYPE_PACKAGE || 659 + wrdd->package.count < 2 || 660 + wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 661 + wrdd->package.elements[0].integer.value != 0) { 662 + IWL_DEBUG_LAR(mvm, "Unsupported wrdd structure\n"); 663 + return 0; 664 + } 665 + 666 + for (i = 1 ; i < wrdd->package.count ; ++i) { 667 + mcc_pkg = &wrdd->package.elements[i]; 668 + 669 + if (mcc_pkg->type != ACPI_TYPE_PACKAGE || 670 + mcc_pkg->package.count < 2 || 671 + mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 672 + mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) { 673 + mcc_pkg = NULL; 674 + continue; 675 + } 676 + 677 + domain_type = &mcc_pkg->package.elements[0]; 678 + if (domain_type->integer.value == WRDD_WIFI) 679 + break; 680 + 681 + mcc_pkg = NULL; 682 + } 683 + 684 + if (mcc_pkg) { 685 + mcc_value = &mcc_pkg->package.elements[1]; 686 + return mcc_value->integer.value; 687 + } 688 + 689 + return 0; 690 + } 691 + 692 + static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) 693 + { 694 + acpi_handle root_handle; 695 + acpi_handle handle; 696 + struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 697 + acpi_status status; 698 + u32 mcc_val; 699 + struct pci_dev *pdev = to_pci_dev(mvm->dev); 700 + 701 + root_handle = ACPI_HANDLE(&pdev->dev); 702 + if (!root_handle) { 703 + IWL_DEBUG_LAR(mvm, 704 + "Could not retrieve root port ACPI handle\n"); 705 + return -ENOENT; 706 + } 707 + 708 + /* Get the method's handle */ 709 + status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 710 + if (ACPI_FAILURE(status)) { 711 + IWL_DEBUG_LAR(mvm, "WRD method not found\n"); 712 + return -ENOENT; 713 + } 714 + 715 + /* Call WRDD with no arguments */ 716 + status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 717 + if (ACPI_FAILURE(status)) { 718 + IWL_DEBUG_LAR(mvm, "WRDC invocation failed (0x%x)\n", status); 719 + return -ENOENT; 720 + } 721 + 722 + mcc_val = iwl_mvm_wrdd_get_mcc(mvm, wrdd.pointer); 723 + kfree(wrdd.pointer); 724 + if (!mcc_val) 725 + return -ENOENT; 726 + 727 + mcc[0] = (mcc_val >> 8) & 0xff; 728 + mcc[1] = mcc_val & 0xff; 729 + mcc[2] = '\0'; 730 + return 0; 731 + } 732 + #else /* CONFIG_ACPI */ 733 + static int iwl_mvm_get_bios_mcc(struct iwl_mvm *mvm, char *mcc) 734 + { 735 + return -ENOENT; 736 + } 737 + #endif 738 + 739 + int iwl_mvm_init_mcc(struct iwl_mvm *mvm) 740 + { 741 + bool tlv_lar; 742 + bool nvm_lar; 743 + int retval; 744 + struct ieee80211_regdomain *regd; 745 + char mcc[3]; 746 + 747 + if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 748 + tlv_lar = mvm->fw->ucode_capa.capa[0] & 749 + IWL_UCODE_TLV_CAPA_LAR_SUPPORT; 750 + nvm_lar = mvm->nvm_data->lar_enabled; 751 + if (tlv_lar != nvm_lar) 752 + IWL_INFO(mvm, 753 + "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n", 754 + tlv_lar ? "enabled" : "disabled", 755 + nvm_lar ? "enabled" : "disabled"); 756 + } 757 + 758 + if (!iwl_mvm_is_lar_supported(mvm)) 759 + return 0; 760 + 761 + /* 762 + * During HW restart, only replay the last set MCC to FW. Otherwise, 763 + * queue an update to cfg80211 to retrieve the default alpha2 from FW. 764 + */ 765 + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 766 + /* This should only be called during vif up and hold RTNL */ 767 + return iwl_mvm_init_fw_regd(mvm); 768 + } 769 + 770 + /* 771 + * Driver regulatory hint for initial update, this also informs the 772 + * firmware we support wifi location updates. 773 + * Disallow scans that might crash the FW while the LAR regdomain 774 + * is not set. 775 + */ 776 + mvm->lar_regdom_set = false; 777 + 778 + regd = iwl_mvm_get_current_regdomain(mvm, NULL); 779 + if (IS_ERR_OR_NULL(regd)) 780 + return -EIO; 781 + 782 + if (iwl_mvm_is_wifi_mcc_supported(mvm) && 783 + !iwl_mvm_get_bios_mcc(mvm, mcc)) { 784 + kfree(regd); 785 + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, 786 + MCC_SOURCE_BIOS, NULL); 787 + if (IS_ERR_OR_NULL(regd)) 788 + return -EIO; 789 + } 790 + 791 + retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); 792 + kfree(regd); 793 + return retval; 794 + } 795 + 796 + int iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm, 797 + struct iwl_rx_cmd_buffer *rxb, 798 + struct iwl_device_cmd *cmd) 799 + { 800 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 801 + struct iwl_mcc_chub_notif *notif = (void *)pkt->data; 802 + enum iwl_mcc_source src; 803 + char mcc[3]; 804 + struct ieee80211_regdomain *regd; 805 + 806 + lockdep_assert_held(&mvm->mutex); 807 + 808 + if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm))) 809 + return 0; 810 + 811 + mcc[0] = notif->mcc >> 8; 812 + mcc[1] = notif->mcc & 0xff; 813 + mcc[2] = '\0'; 814 + src = notif->source_id; 815 + 816 + IWL_DEBUG_LAR(mvm, 817 + "RX: received chub update mcc cmd (mcc '%s' src %d)\n", 818 + mcc, src); 819 + regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL); 820 + if (IS_ERR_OR_NULL(regd)) 821 + return 0; 822 + 823 + regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 824 + kfree(regd); 597 825 598 826 return 0; 599 827 }
+8 -3
drivers/net/wireless/iwlwifi/mvm/ops.c
··· 82 82 #include "rs.h" 83 83 #include "fw-api-scan.h" 84 84 #include "time-event.h" 85 - #include "iwl-fw-error-dump.h" 86 85 87 86 #define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux" 88 87 MODULE_DESCRIPTION(DRV_DESCRIPTION); ··· 233 234 iwl_mvm_rx_ant_coupling_notif, true), 234 235 235 236 RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), 237 + RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), 236 238 237 239 RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), 238 240 ··· 358 358 CMD(TDLS_CHANNEL_SWITCH_CMD), 359 359 CMD(TDLS_CHANNEL_SWITCH_NOTIFICATION), 360 360 CMD(TDLS_CONFIG_CMD), 361 + CMD(MCC_UPDATE_CMD), 361 362 }; 362 363 #undef CMD 363 364 ··· 872 871 873 872 /* start recording again if the firmware is not crashed */ 874 873 WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && 875 - mvm->fw->dbg_dest_tlv && 876 - iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 874 + mvm->fw->dbg_dest_tlv && 875 + iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); 877 876 878 877 mutex_unlock(&mvm->mutex); 879 878 ··· 1271 1270 iwl_free_resp(&get_status_cmd); 1272 1271 out: 1273 1272 iwl_mvm_d0i3_enable_tx(mvm, qos_seq); 1273 + 1274 + /* the FW might have updated the regdomain */ 1275 + iwl_mvm_update_changed_regdom(mvm); 1276 + 1274 1277 iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK); 1275 1278 mutex_unlock(&mvm->mutex); 1276 1279 }
+5 -1
drivers/net/wireless/iwlwifi/mvm/power.c
··· 358 358 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK); 359 359 360 360 if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) || 361 - !mvmvif->pm_enabled || iwl_mvm_tdls_sta_count(mvm, vif)) 361 + !mvmvif->pm_enabled) 362 362 return; 363 363 364 364 cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK); ··· 638 638 639 639 if (vifs->ap_vif) 640 640 ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif); 641 + 642 + /* don't allow PM if any TDLS stations exist */ 643 + if (iwl_mvm_tdls_sta_count(mvm, NULL)) 644 + return; 641 645 642 646 /* enable PM on bss if bss stand alone */ 643 647 if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+2 -1
drivers/net/wireless/iwlwifi/mvm/quota.c
··· 172 172 } 173 173 174 174 int iwl_mvm_update_quotas(struct iwl_mvm *mvm, 175 + bool force_update, 175 176 struct ieee80211_vif *disabled_vif) 176 177 { 177 178 struct iwl_time_quota_cmd cmd = {}; ··· 310 309 "zero quota on binding %d\n", i); 311 310 } 312 311 313 - if (!send) { 312 + if (!send && !force_update) { 314 313 /* don't send a practically unchanged command, the firmware has 315 314 * to re-initialize a lot of state and that can have an adverse 316 315 * impact on it
+77 -21
drivers/net/wireless/iwlwifi/mvm/rs.c
··· 1065 1065 && ant_match; 1066 1066 } 1067 1067 1068 + static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate) 1069 + { 1070 + if (is_legacy(rate)) { 1071 + if (rate->ant == ANT_A) 1072 + return RS_COLUMN_LEGACY_ANT_A; 1073 + 1074 + if (rate->ant == ANT_B) 1075 + return RS_COLUMN_LEGACY_ANT_B; 1076 + 1077 + goto err; 1078 + } 1079 + 1080 + if (is_siso(rate)) { 1081 + if (rate->ant == ANT_A || rate->stbc || rate->bfer) 1082 + return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI : 1083 + RS_COLUMN_SISO_ANT_A; 1084 + 1085 + if (rate->ant == ANT_B) 1086 + return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI : 1087 + RS_COLUMN_SISO_ANT_B; 1088 + 1089 + goto err; 1090 + } 1091 + 1092 + if (is_mimo(rate)) 1093 + return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2; 1094 + 1095 + err: 1096 + return RS_COLUMN_INVALID; 1097 + } 1098 + 1068 1099 static u8 rs_get_tid(struct ieee80211_hdr *hdr) 1069 1100 { 1070 1101 u8 tid = IWL_MAX_TID_COUNT; ··· 1137 1106 return; 1138 1107 } 1139 1108 1140 - #ifdef CONFIG_MAC80211_DEBUGFS 1141 - /* Disable last tx check if we are debugging with fixed rate */ 1142 - if (lq_sta->pers.dbg_fixed_rate) { 1143 - IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); 1144 - return; 1145 - } 1146 - #endif 1147 1109 /* This packet was aggregated but doesn't carry status info */ 1148 1110 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && 1149 1111 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 1150 1112 return; 1113 + 1114 + rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); 1115 + 1116 + #ifdef CONFIG_MAC80211_DEBUGFS 1117 + /* Disable last tx check if we are debugging with fixed rate but 1118 + * update tx stats */ 1119 + if (lq_sta->pers.dbg_fixed_rate) { 1120 + int index = tx_resp_rate.index; 1121 + enum rs_column column; 1122 + int attempts, success; 1123 + 1124 + column = rs_get_column_from_rate(&tx_resp_rate); 1125 + if (WARN_ONCE(column == RS_COLUMN_INVALID, 1126 + "Can't map rate 0x%x to column", 1127 + tx_resp_hwrate)) 1128 + return; 1129 + 1130 + if (info->flags & IEEE80211_TX_STAT_AMPDU) { 1131 + attempts = info->status.ampdu_len; 1132 + success = info->status.ampdu_ack_len; 1133 + } else { 1134 + attempts = info->status.rates[0].count; 1135 + success = !!(info->flags & IEEE80211_TX_STAT_ACK); 1136 + } 1137 + 1138 + lq_sta->pers.tx_stats[column][index].total += attempts; 1139 + lq_sta->pers.tx_stats[column][index].success += success; 1140 + 1141 + IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n", 1142 + tx_resp_hwrate, success, attempts); 1143 + return; 1144 + } 1145 + #endif 1151 1146 1152 1147 if (time_after(jiffies, 1153 1148 (unsigned long)(lq_sta->last_tx + ··· 1199 1142 table = &lq_sta->lq; 1200 1143 lq_hwrate = le32_to_cpu(table->rs_table[0]); 1201 1144 rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); 1202 - rs_rate_from_ucode_rate(tx_resp_hwrate, info->band, &tx_resp_rate); 1203 1145 1204 1146 /* Here we actually compare this rate to the latest LQ command */ 1205 1147 if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { ··· 3399 3343 (is_legacy(rate)) ? "legacy" : 3400 3344 is_vht(rate) ? "VHT" : "HT"); 3401 3345 if (!is_legacy(rate)) { 3402 - desc += sprintf(buff+desc, " %s", 3346 + desc += sprintf(buff + desc, " %s", 3403 3347 (is_siso(rate)) ? "SISO" : "MIMO2"); 3404 - desc += sprintf(buff+desc, " %s", 3405 - (is_ht20(rate)) ? "20MHz" : 3406 - (is_ht40(rate)) ? "40MHz" : 3407 - (is_ht80(rate)) ? "80Mhz" : "BAD BW"); 3408 - desc += sprintf(buff+desc, " %s %s %s\n", 3409 - (rate->sgi) ? "SGI" : "NGI", 3410 - (rate->ldpc) ? "LDPC" : "BCC", 3411 - (lq_sta->is_agg) ? "AGG on" : ""); 3348 + desc += sprintf(buff + desc, " %s", 3349 + (is_ht20(rate)) ? "20MHz" : 3350 + (is_ht40(rate)) ? "40MHz" : 3351 + (is_ht80(rate)) ? "80Mhz" : "BAD BW"); 3352 + desc += sprintf(buff + desc, " %s %s %s\n", 3353 + (rate->sgi) ? "SGI" : "NGI", 3354 + (rate->ldpc) ? "LDPC" : "BCC", 3355 + (lq_sta->is_agg) ? "AGG on" : ""); 3412 3356 } 3413 3357 desc += sprintf(buff+desc, "last tx rate=0x%X\n", 3414 3358 lq_sta->last_rate_n_flags); ··· 3429 3373 ss_params = le32_to_cpu(lq_sta->lq.ss_params); 3430 3374 desc += sprintf(buff+desc, "single stream params: %s%s%s%s\n", 3431 3375 (ss_params & LQ_SS_PARAMS_VALID) ? 3432 - "VALID," : "INVALID", 3376 + "VALID" : "INVALID", 3433 3377 (ss_params & LQ_SS_BFER_ALLOWED) ? 3434 - "BFER," : "", 3378 + ", BFER" : "", 3435 3379 (ss_params & LQ_SS_STBC_1SS_ALLOWED) ? 3436 - "STBC," : "", 3380 + ", STBC" : "", 3437 3381 (ss_params & LQ_SS_FORCE) ? 3438 - "FORCE" : ""); 3382 + ", FORCE" : ""); 3439 3383 desc += sprintf(buff+desc, 3440 3384 "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n", 3441 3385 lq_sta->lq.initial_rate_index[0],
+56 -11
drivers/net/wireless/iwlwifi/mvm/sf.c
··· 99 99 100 100 /* 101 101 * Aging and idle timeouts for the different possible scenarios 102 - * in SF_FULL_ON state. 102 + * in default configuration 103 + */ 104 + static const 105 + __le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { 106 + { 107 + cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF), 108 + cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF) 109 + }, 110 + { 111 + cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF), 112 + cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF) 113 + }, 114 + { 115 + cpu_to_le32(SF_MCAST_AGING_TIMER_DEF), 116 + cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF) 117 + }, 118 + { 119 + cpu_to_le32(SF_BA_AGING_TIMER_DEF), 120 + cpu_to_le32(SF_BA_IDLE_TIMER_DEF) 121 + }, 122 + { 123 + cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF), 124 + cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF) 125 + }, 126 + }; 127 + 128 + /* 129 + * Aging and idle timeouts for the different possible scenarios 130 + * in single BSS MAC configuration. 103 131 */ 104 132 static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = { 105 133 { ··· 152 124 }, 153 125 }; 154 126 155 - static void iwl_mvm_fill_sf_command(struct iwl_sf_cfg_cmd *sf_cmd, 127 + static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm, 128 + struct iwl_sf_cfg_cmd *sf_cmd, 156 129 struct ieee80211_sta *sta) 157 130 { 158 131 int i, j, watermark; ··· 192 163 cpu_to_le32(SF_LONG_DELAY_AGING_TIMER); 193 164 } 194 165 } 195 - BUILD_BUG_ON(sizeof(sf_full_timeout) != 196 - sizeof(__le32) * SF_NUM_SCENARIO * SF_NUM_TIMEOUT_TYPES); 197 166 198 - memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, 199 - sizeof(sf_full_timeout)); 167 + if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) { 168 + BUILD_BUG_ON(sizeof(sf_full_timeout) != 169 + sizeof(__le32) * SF_NUM_SCENARIO * 170 + SF_NUM_TIMEOUT_TYPES); 171 + 172 + memcpy(sf_cmd->full_on_timeouts, sf_full_timeout, 173 + sizeof(sf_full_timeout)); 174 + } else { 175 + BUILD_BUG_ON(sizeof(sf_full_timeout_def) != 176 + sizeof(__le32) * SF_NUM_SCENARIO * 177 + SF_NUM_TIMEOUT_TYPES); 178 + 179 + memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def, 180 + sizeof(sf_full_timeout_def)); 181 + } 182 + 200 183 } 201 184 202 185 static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id, 203 186 enum iwl_sf_state new_state) 204 187 { 205 188 struct iwl_sf_cfg_cmd sf_cmd = { 206 - .state = cpu_to_le32(new_state), 189 + .state = cpu_to_le32(SF_FULL_ON), 207 190 }; 208 191 struct ieee80211_sta *sta; 209 192 int ret = 0; 210 193 211 - if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF && 212 - mvm->cfg->disable_dummy_notification) 194 + if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13) 195 + sf_cmd.state = cpu_to_le32(new_state); 196 + 197 + if (mvm->cfg->disable_dummy_notification) 213 198 sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF); 214 199 215 200 /* ··· 235 192 236 193 switch (new_state) { 237 194 case SF_UNINIT: 195 + if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13) 196 + iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); 238 197 break; 239 198 case SF_FULL_ON: 240 199 if (sta_id == IWL_MVM_STATION_COUNT) { ··· 251 206 rcu_read_unlock(); 252 207 return -EINVAL; 253 208 } 254 - iwl_mvm_fill_sf_command(&sf_cmd, sta); 209 + iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta); 255 210 rcu_read_unlock(); 256 211 break; 257 212 case SF_INIT_OFF: 258 - iwl_mvm_fill_sf_command(&sf_cmd, NULL); 213 + iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL); 259 214 break; 260 215 default: 261 216 WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+1 -4
drivers/net/wireless/iwlwifi/mvm/sta.c
··· 273 273 else 274 274 sta_id = mvm_sta->sta_id; 275 275 276 - if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT)) 276 + if (sta_id == IWL_MVM_STATION_COUNT) 277 277 return -ENOSPC; 278 278 279 279 spin_lock_init(&mvm_sta->lock); ··· 1680 1680 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 1681 1681 }; 1682 1682 int ret; 1683 - 1684 - if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_DISABLE_STA_TX)) 1685 - return; 1686 1683 1687 1684 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1688 1685 if (ret)
+15 -7
drivers/net/wireless/iwlwifi/mvm/time-event.c
··· 197 197 struct iwl_time_event_notif *notif) 198 198 { 199 199 if (!le32_to_cpu(notif->status)) { 200 + if (te_data->vif->type == NL80211_IFTYPE_STATION) 201 + ieee80211_connection_loss(te_data->vif); 200 202 IWL_DEBUG_TE(mvm, "CSA time event failed to start\n"); 201 203 iwl_mvm_te_clear_data(mvm, te_data); 202 204 return; ··· 263 261 "TE ended - current time %lu, estimated end %lu\n", 264 262 jiffies, te_data->end_jiffies); 265 263 266 - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { 264 + switch (te_data->vif->type) { 265 + case NL80211_IFTYPE_P2P_DEVICE: 267 266 ieee80211_remain_on_channel_expired(mvm->hw); 268 267 iwl_mvm_roc_finished(mvm); 268 + break; 269 + case NL80211_IFTYPE_STATION: 270 + /* 271 + * By now, we should have finished association 272 + * and know the dtim period. 273 + */ 274 + iwl_mvm_te_check_disconnect(mvm, te_data->vif, 275 + "No association and the time event is over already..."); 276 + break; 277 + default: 278 + break; 269 279 } 270 280 271 - /* 272 - * By now, we should have finished association 273 - * and know the dtim period. 274 - */ 275 - iwl_mvm_te_check_disconnect(mvm, te_data->vif, 276 - "No association and the time event is over already..."); 277 281 iwl_mvm_te_clear_data(mvm, te_data); 278 282 } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) { 279 283 te_data->running = true;
+1 -1
drivers/net/wireless/iwlwifi/mvm/utils.c
··· 857 857 858 858 mvmvif->low_latency = value; 859 859 860 - res = iwl_mvm_update_quotas(mvm, NULL); 860 + res = iwl_mvm_update_quotas(mvm, false, NULL); 861 861 if (res) 862 862 return res; 863 863
+26 -1
drivers/net/wireless/iwlwifi/pcie/drv.c
··· 413 413 414 414 /* 8000 Series */ 415 415 {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)}, 416 - {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 416 + {IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)}, 417 + {IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)}, 418 + {IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)}, 419 + {IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)}, 420 + {IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)}, 421 + {IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)}, 422 + {IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)}, 417 423 {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)}, 424 + {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 425 + {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 426 + {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 427 + {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 428 + {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, 429 + {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, 430 + {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 431 + {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 432 + {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 433 + {IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)}, 434 + {IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)}, 435 + {IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)}, 436 + {IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)}, 437 + {IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)}, 438 + {IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)}, 418 439 {IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)}, 419 440 {IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)}, 441 + {IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)}, 442 + {IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)}, 443 + {IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)}, 444 + {IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)}, 420 445 #endif /* CONFIG_IWLMVM */ 421 446 422 447 {0}
+4
drivers/net/wireless/iwlwifi/pcie/internal.h
··· 217 217 * @active: stores if queue is active 218 218 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 219 219 * @wd_timeout: queue watchdog timeout (jiffies) - per queue 220 + * @frozen: tx stuck queue timer is frozen 221 + * @frozen_expiry_remainder: remember how long until the timer fires 220 222 * 221 223 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 222 224 * descriptors) and required locking structures. ··· 230 228 dma_addr_t scratchbufs_dma; 231 229 struct iwl_pcie_txq_entry *entries; 232 230 spinlock_t lock; 231 + unsigned long frozen_expiry_remainder; 233 232 struct timer_list stuck_timer; 234 233 struct iwl_trans_pcie *trans_pcie; 235 234 bool need_update; 235 + bool frozen; 236 236 u8 active; 237 237 bool ampdu; 238 238 unsigned long wd_timeout;
+145 -12
drivers/net/wireless/iwlwifi/pcie/trans.c
··· 682 682 return ret; 683 683 } 684 684 685 + /* 686 + * Driver Takes the ownership on secure machine before FW load 687 + * and prevent race with the BT load. 688 + * W/A for ROM bug. (should be remove in the next Si step) 689 + */ 690 + static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans) 691 + { 692 + u32 val, loop = 1000; 693 + 694 + /* Check the RSA semaphore is accessible - if not, we are in trouble */ 695 + val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0); 696 + if (val & (BIT(1) | BIT(17))) { 697 + IWL_ERR(trans, 698 + "can't access the RSA semaphore it is write protected\n"); 699 + return 0; 700 + } 701 + 702 + /* take ownership on the AUX IF */ 703 + iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK); 704 + iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK); 705 + 706 + do { 707 + iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1); 708 + val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS); 709 + if (val == 0x1) { 710 + iwl_write_prph(trans, RSA_ENABLE, 0); 711 + return 0; 712 + } 713 + 714 + udelay(10); 715 + loop--; 716 + } while (loop > 0); 717 + 718 + IWL_ERR(trans, "Failed to take ownership on secure machine\n"); 719 + return -EIO; 720 + } 721 + 685 722 static int iwl_pcie_load_cpu_sections_8000b(struct iwl_trans *trans, 686 723 const struct fw_img *image, 687 724 int cpu, ··· 937 900 938 901 if (trans->dbg_dest_tlv) 939 902 iwl_pcie_apply_destination(trans); 903 + 904 + /* TODO: remove in the next Si step */ 905 + ret = iwl_pcie_rsa_race_bug_wa(trans); 906 + if (ret) 907 + return ret; 940 908 941 909 /* configure the ucode to be ready to get the secured image */ 942 910 /* release CPU reset */ ··· 1504 1462 return ret; 1505 1463 } 1506 1464 1465 + static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans, 1466 + unsigned long txqs, 1467 + bool freeze) 1468 + { 1469 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1470 + int queue; 1471 + 1472 + for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 1473 + struct iwl_txq *txq = &trans_pcie->txq[queue]; 1474 + unsigned long now; 1475 + 1476 + spin_lock_bh(&txq->lock); 1477 + 1478 + now = jiffies; 1479 + 1480 + if (txq->frozen == freeze) 1481 + goto next_queue; 1482 + 1483 + IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 1484 + freeze ? "Freezing" : "Waking", queue); 1485 + 1486 + txq->frozen = freeze; 1487 + 1488 + if (txq->q.read_ptr == txq->q.write_ptr) 1489 + goto next_queue; 1490 + 1491 + if (freeze) { 1492 + if (unlikely(time_after(now, 1493 + txq->stuck_timer.expires))) { 1494 + /* 1495 + * The timer should have fired, maybe it is 1496 + * spinning right now on the lock. 1497 + */ 1498 + goto next_queue; 1499 + } 1500 + /* remember how long until the timer fires */ 1501 + txq->frozen_expiry_remainder = 1502 + txq->stuck_timer.expires - now; 1503 + del_timer(&txq->stuck_timer); 1504 + goto next_queue; 1505 + } 1506 + 1507 + /* 1508 + * Wake a non-empty queue -> arm timer with the 1509 + * remainder before it froze 1510 + */ 1511 + mod_timer(&txq->stuck_timer, 1512 + now + txq->frozen_expiry_remainder); 1513 + 1514 + next_queue: 1515 + spin_unlock_bh(&txq->lock); 1516 + } 1517 + } 1518 + 1507 1519 #define IWL_FLUSH_WAIT_MS 2000 1508 1520 1509 1521 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm) ··· 1809 1713 int ret; 1810 1714 size_t bufsz; 1811 1715 1812 - bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues; 1716 + bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues; 1813 1717 1814 1718 if (!trans_pcie->txq) 1815 1719 return -EAGAIN; ··· 1822 1726 txq = &trans_pcie->txq[cnt]; 1823 1727 q = &txq->q; 1824 1728 pos += scnprintf(buf + pos, bufsz - pos, 1825 - "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d%s\n", 1729 + "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n", 1826 1730 cnt, q->read_ptr, q->write_ptr, 1827 1731 !!test_bit(cnt, trans_pcie->queue_used), 1828 1732 !!test_bit(cnt, trans_pcie->queue_stopped), 1829 - txq->need_update, 1733 + txq->need_update, txq->frozen, 1830 1734 (cnt == trans_pcie->cmd_queue ? " HCMD" : "")); 1831 1735 } 1832 1736 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); ··· 2057 1961 { .start = 0x00a01c7c, .end = 0x00a01c7c }, 2058 1962 { .start = 0x00a01c28, .end = 0x00a01c54 }, 2059 1963 { .start = 0x00a01c5c, .end = 0x00a01c5c }, 2060 - { .start = 0x00a01c84, .end = 0x00a01c84 }, 1964 + { .start = 0x00a01c60, .end = 0x00a01cdc }, 2061 1965 { .start = 0x00a01ce0, .end = 0x00a01d0c }, 2062 1966 { .start = 0x00a01d18, .end = 0x00a01d20 }, 2063 1967 { .start = 0x00a01d2c, .end = 0x00a01d30 }, 2064 1968 { .start = 0x00a01d40, .end = 0x00a01d5c }, 2065 1969 { .start = 0x00a01d80, .end = 0x00a01d80 }, 2066 - { .start = 0x00a01d98, .end = 0x00a01d98 }, 1970 + { .start = 0x00a01d98, .end = 0x00a01d9c }, 1971 + { .start = 0x00a01da8, .end = 0x00a01da8 }, 1972 + { .start = 0x00a01db8, .end = 0x00a01df4 }, 2067 1973 { .start = 0x00a01dc0, .end = 0x00a01dfc }, 2068 1974 { .start = 0x00a01e00, .end = 0x00a01e2c }, 2069 1975 { .start = 0x00a01e40, .end = 0x00a01e60 }, 1976 + { .start = 0x00a01e68, .end = 0x00a01e6c }, 1977 + { .start = 0x00a01e74, .end = 0x00a01e74 }, 2070 1978 { .start = 0x00a01e84, .end = 0x00a01e90 }, 2071 1979 { .start = 0x00a01e9c, .end = 0x00a01ec4 }, 2072 - { .start = 0x00a01ed0, .end = 0x00a01ed0 }, 2073 - { .start = 0x00a01f00, .end = 0x00a01f14 }, 2074 - { .start = 0x00a01f44, .end = 0x00a01f58 }, 2075 - { .start = 0x00a01f80, .end = 0x00a01fa8 }, 2076 - { .start = 0x00a01fb0, .end = 0x00a01fbc }, 2077 - { .start = 0x00a01ff8, .end = 0x00a01ffc }, 1980 + { .start = 0x00a01ed0, .end = 0x00a01ee0 }, 1981 + { .start = 0x00a01f00, .end = 0x00a01f1c }, 1982 + { .start = 0x00a01f44, .end = 0x00a01ffc }, 2078 1983 { .start = 0x00a02000, .end = 0x00a02048 }, 2079 1984 { .start = 0x00a02068, .end = 0x00a020f0 }, 2080 1985 { .start = 0x00a02100, .end = 0x00a02118 }, ··· 2402 2305 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2403 2306 2404 2307 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty, 2308 + .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, 2405 2309 2406 2310 .write8 = iwl_trans_pcie_write8, 2407 2311 .write32 = iwl_trans_pcie_write32, ··· 2521 2423 * "dash" value). To keep hw_rev backwards compatible - we'll store it 2522 2424 * in the old format. 2523 2425 */ 2524 - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 2426 + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2427 + unsigned long flags; 2428 + int ret; 2429 + 2525 2430 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2526 2431 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2432 + 2433 + /* 2434 + * in-order to recognize C step driver should read chip version 2435 + * id located at the AUX bus MISC address space. 2436 + */ 2437 + iwl_set_bit(trans, CSR_GP_CNTRL, 2438 + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 2439 + udelay(2); 2440 + 2441 + ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 2442 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2443 + CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 2444 + 25000); 2445 + if (ret < 0) { 2446 + IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n"); 2447 + goto out_pci_disable_msi; 2448 + } 2449 + 2450 + if (iwl_trans_grab_nic_access(trans, false, &flags)) { 2451 + u32 hw_step; 2452 + 2453 + hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG); 2454 + hw_step |= ENABLE_WFPM; 2455 + __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step); 2456 + hw_step = __iwl_read_prph(trans, AUX_MISC_REG); 2457 + hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF; 2458 + if (hw_step == 0x3) 2459 + trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) | 2460 + (SILICON_C_STEP << 2); 2461 + iwl_trans_release_nic_access(trans, &flags); 2462 + } 2463 + } 2527 2464 2528 2465 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; 2529 2466 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+46 -17
drivers/net/wireless/iwlwifi/pcie/tx.c
··· 725 725 iwl_pcie_tx_start(trans, 0); 726 726 } 727 727 728 + static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 729 + { 730 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 731 + unsigned long flags; 732 + int ch, ret; 733 + u32 mask = 0; 734 + 735 + spin_lock(&trans_pcie->irq_lock); 736 + 737 + if (!iwl_trans_grab_nic_access(trans, false, &flags)) 738 + goto out; 739 + 740 + /* Stop each Tx DMA channel */ 741 + for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 742 + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 743 + mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 744 + } 745 + 746 + /* Wait for DMA channels to be idle */ 747 + ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 748 + if (ret < 0) 749 + IWL_ERR(trans, 750 + "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 751 + ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 752 + 753 + iwl_trans_release_nic_access(trans, &flags); 754 + 755 + out: 756 + spin_unlock(&trans_pcie->irq_lock); 757 + } 758 + 728 759 /* 729 760 * iwl_pcie_tx_stop - Stop all Tx DMA channels 730 761 */ 731 762 int iwl_pcie_tx_stop(struct iwl_trans *trans) 732 763 { 733 764 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 734 - int ch, txq_id, ret; 765 + int txq_id; 735 766 736 767 /* Turn off all Tx DMA fifos */ 737 - spin_lock(&trans_pcie->irq_lock); 738 - 739 768 iwl_scd_deactivate_fifos(trans); 740 769 741 - /* Stop each Tx DMA channel, and wait for it to be idle */ 742 - for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 743 - iwl_write_direct32(trans, 744 - FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 745 - ret = iwl_poll_direct_bit(trans, FH_TSSR_TX_STATUS_REG, 746 - FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000); 747 - if (ret < 0) 748 - IWL_ERR(trans, 749 - "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 750 - ch, 751 - iwl_read_direct32(trans, 752 - FH_TSSR_TX_STATUS_REG)); 753 - } 754 - spin_unlock(&trans_pcie->irq_lock); 770 + /* Turn off all Tx DMA channels */ 771 + iwl_pcie_tx_stop_fh(trans); 755 772 756 773 /* 757 774 * This function can be called before the op_mode disabled the ··· 929 912 930 913 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq) 931 914 { 915 + lockdep_assert_held(&txq->lock); 916 + 932 917 if (!txq->wd_timeout) 918 + return; 919 + 920 + /* 921 + * station is asleep and we send data - that must 922 + * be uAPSD or PS-Poll. Don't rearm the timer. 923 + */ 924 + if (txq->frozen) 933 925 return; 934 926 935 927 /* ··· 1273 1247 u32 stts_addr = trans_pcie->scd_base_addr + 1274 1248 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1275 1249 static const u32 zero_val[4] = {}; 1250 + 1251 + trans_pcie->txq[txq_id].frozen_expiry_remainder = 0; 1252 + trans_pcie->txq[txq_id].frozen = false; 1276 1253 1277 1254 /* 1278 1255 * Upon HW Rfkill - we stop the device, and then stop the queues
-2
drivers/net/wireless/libertas_tf/if_usb.c
··· 365 365 366 366 return ret; 367 367 } 368 - EXPORT_SYMBOL_GPL(if_usb_reset_device); 369 368 370 369 /** 371 370 * usb_tx_block - transfer data to the device ··· 906 907 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret); 907 908 return ret; 908 909 } 909 - EXPORT_SYMBOL_GPL(if_usb_prog_firmware); 910 910 911 911 912 912 #define if_usb_suspend NULL
+17 -1
drivers/net/wireless/mwifiex/11n.c
··· 159 159 int tid; 160 160 struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp; 161 161 struct mwifiex_tx_ba_stream_tbl *tx_ba_tbl; 162 + struct mwifiex_ra_list_tbl *ra_list; 162 163 u16 block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set); 163 164 164 165 add_ba_rsp->ssn = cpu_to_le16((le16_to_cpu(add_ba_rsp->ssn)) ··· 167 166 168 167 tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK) 169 168 >> BLOCKACKPARAM_TID_POS; 169 + ra_list = mwifiex_wmm_get_ralist_node(priv, tid, add_ba_rsp-> 170 + peer_mac_addr); 170 171 if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) { 172 + if (ra_list) { 173 + ra_list->ba_status = BA_SETUP_NONE; 174 + ra_list->amsdu_in_ampdu = false; 175 + } 171 176 mwifiex_del_ba_tbl(priv, tid, add_ba_rsp->peer_mac_addr, 172 177 TYPE_DELBA_SENT, true); 173 178 if (add_ba_rsp->add_rsp_result != BA_RESULT_TIMEOUT) ··· 192 185 tx_ba_tbl->amsdu = true; 193 186 else 194 187 tx_ba_tbl->amsdu = false; 188 + if (ra_list) { 189 + ra_list->amsdu_in_ampdu = tx_ba_tbl->amsdu; 190 + ra_list->ba_status = BA_SETUP_COMPLETE; 191 + } 195 192 } else { 196 193 dev_err(priv->adapter->dev, "BA stream not created\n"); 197 194 } ··· 526 515 enum mwifiex_ba_status ba_status) 527 516 { 528 517 struct mwifiex_tx_ba_stream_tbl *new_node; 518 + struct mwifiex_ra_list_tbl *ra_list; 529 519 unsigned long flags; 530 520 531 521 if (!mwifiex_get_ba_tbl(priv, tid, ra)) { ··· 534 522 GFP_ATOMIC); 535 523 if (!new_node) 536 524 return; 537 - 525 + ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra); 526 + if (ra_list) { 527 + ra_list->ba_status = ba_status; 528 + ra_list->amsdu_in_ampdu = false; 529 + } 538 530 INIT_LIST_HEAD(&new_node->list); 539 531 540 532 new_node->tid = tid;
-32
drivers/net/wireless/mwifiex/11n.h
··· 77 77 return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false; 78 78 } 79 79 80 - /* This function checks whether AMSDU is allowed for BA stream. */ 81 - static inline u8 82 - mwifiex_is_amsdu_in_ampdu_allowed(struct mwifiex_private *priv, 83 - struct mwifiex_ra_list_tbl *ptr, int tid) 84 - { 85 - struct mwifiex_tx_ba_stream_tbl *tx_tbl; 86 - 87 - if (is_broadcast_ether_addr(ptr->ra)) 88 - return false; 89 - tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra); 90 - if (tx_tbl) 91 - return tx_tbl->amsdu; 92 - 93 - return false; 94 - } 95 - 96 80 /* This function checks whether AMPDU is allowed or not for a particular TID. */ 97 81 static inline u8 98 82 mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, ··· 163 179 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags); 164 180 165 181 return ret; 166 - } 167 - 168 - /* 169 - * This function checks whether BA stream is set up or not. 170 - */ 171 - static inline int 172 - mwifiex_is_ba_stream_setup(struct mwifiex_private *priv, 173 - struct mwifiex_ra_list_tbl *ptr, int tid) 174 - { 175 - struct mwifiex_tx_ba_stream_tbl *tx_tbl; 176 - 177 - tx_tbl = mwifiex_get_ba_tbl(priv, tid, ptr->ra); 178 - if (tx_tbl && IS_BASTREAM_SETUP(tx_tbl)) 179 - return true; 180 - 181 - return false; 182 182 } 183 183 184 184 /*
+11 -5
drivers/net/wireless/mwifiex/11n_aggr.c
··· 170 170 struct mwifiex_adapter *adapter = priv->adapter; 171 171 struct sk_buff *skb_aggr, *skb_src; 172 172 struct mwifiex_txinfo *tx_info_aggr, *tx_info_src; 173 - int pad = 0, ret; 173 + int pad = 0, aggr_num = 0, ret; 174 174 struct mwifiex_tx_param tx_param; 175 175 struct txpd *ptx_pd = NULL; 176 176 struct timeval tv; ··· 184 184 } 185 185 186 186 tx_info_src = MWIFIEX_SKB_TXCB(skb_src); 187 - skb_aggr = dev_alloc_skb(adapter->tx_buf_size); 187 + skb_aggr = mwifiex_alloc_dma_align_buf(adapter->tx_buf_size, 188 + GFP_ATOMIC | GFP_DMA); 188 189 if (!skb_aggr) { 189 190 dev_err(adapter->dev, "%s: alloc skb_aggr\n", __func__); 190 191 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ··· 201 200 202 201 if (tx_info_src->flags & MWIFIEX_BUF_FLAG_TDLS_PKT) 203 202 tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; 203 + tx_info_aggr->flags |= MWIFIEX_BUF_FLAG_AGGR_PKT; 204 204 skb_aggr->priority = skb_src->priority; 205 205 206 206 do_gettimeofday(&tv); ··· 213 211 break; 214 212 215 213 skb_src = skb_dequeue(&pra_list->skb_head); 216 - 217 214 pra_list->total_pkt_count--; 218 - 219 215 atomic_dec(&priv->wmm.tx_pkts_queued); 220 - 216 + aggr_num++; 221 217 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 222 218 ra_list_flags); 223 219 mwifiex_11n_form_amsdu_pkt(skb_aggr, skb_src, &pad); ··· 251 251 ptx_pd = (struct txpd *)skb_aggr->data; 252 252 253 253 skb_push(skb_aggr, headroom); 254 + tx_info_aggr->aggr_num = aggr_num * 2; 255 + if (adapter->data_sent || adapter->tx_lock_flag) { 256 + atomic_add(aggr_num * 2, &adapter->tx_queued); 257 + skb_queue_tail(&adapter->tx_data_q, skb_aggr); 258 + return 0; 259 + } 254 260 255 261 if (adapter->iface_type == MWIFIEX_USB) { 256 262 adapter->data_sent = true;
+6 -1
drivers/net/wireless/mwifiex/11n_rxreorder.c
··· 659 659 { 660 660 struct mwifiex_rx_reorder_tbl *tbl; 661 661 struct mwifiex_tx_ba_stream_tbl *ptx_tbl; 662 + struct mwifiex_ra_list_tbl *ra_list; 662 663 u8 cleanup_rx_reorder_tbl; 663 664 unsigned long flags; 664 665 ··· 687 686 "event: TID, RA not found in table\n"); 688 687 return; 689 688 } 690 - 689 + ra_list = mwifiex_wmm_get_ralist_node(priv, tid, peer_mac); 690 + if (ra_list) { 691 + ra_list->amsdu_in_ampdu = false; 692 + ra_list->ba_status = BA_SETUP_NONE; 693 + } 691 694 spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags); 692 695 mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl); 693 696 spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
+140 -27
drivers/net/wireless/mwifiex/cfg80211.c
··· 717 717 718 718 static int mwifiex_deinit_priv_params(struct mwifiex_private *priv) 719 719 { 720 + struct mwifiex_adapter *adapter = priv->adapter; 721 + unsigned long flags; 722 + 720 723 priv->mgmt_frame_mask = 0; 721 724 if (mwifiex_send_cmd(priv, HostCmd_CMD_MGMT_FRAME_REG, 722 725 HostCmd_ACT_GEN_SET, 0, ··· 730 727 } 731 728 732 729 mwifiex_deauthenticate(priv, NULL); 730 + 731 + spin_lock_irqsave(&adapter->main_proc_lock, flags); 732 + adapter->main_locked = true; 733 + if (adapter->mwifiex_processing) { 734 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 735 + flush_workqueue(adapter->workqueue); 736 + } else { 737 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 738 + } 739 + 740 + spin_lock_irqsave(&adapter->rx_proc_lock, flags); 741 + adapter->rx_locked = true; 742 + if (adapter->rx_processing) { 743 + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); 744 + flush_workqueue(adapter->rx_workqueue); 745 + } else { 746 + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); 747 + } 748 + 733 749 mwifiex_free_priv(priv); 734 750 priv->wdev.iftype = NL80211_IFTYPE_UNSPECIFIED; 735 751 priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; ··· 762 740 struct net_device *dev, 763 741 enum nl80211_iftype type) 764 742 { 743 + struct mwifiex_adapter *adapter = priv->adapter; 744 + unsigned long flags; 745 + 765 746 mwifiex_init_priv(priv); 766 747 767 748 priv->bss_mode = type; ··· 794 769 dev->name, type); 795 770 return -EOPNOTSUPP; 796 771 } 772 + 773 + spin_lock_irqsave(&adapter->main_proc_lock, flags); 774 + adapter->main_locked = false; 775 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 776 + 777 + spin_lock_irqsave(&adapter->rx_proc_lock, flags); 778 + adapter->rx_locked = false; 779 + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); 797 780 798 781 return 0; 799 782 } ··· 2766 2733 } 2767 2734 2768 2735 #ifdef CONFIG_PM 2769 - static int mwifiex_set_mef_filter(struct mwifiex_private *priv, 2770 - struct cfg80211_wowlan *wowlan) 2736 + static void mwifiex_set_auto_arp_mef_entry(struct mwifiex_private *priv, 2737 + struct mwifiex_mef_entry *mef_entry) 2738 + { 2739 + int i, filt_num = 0, num_ipv4 = 0; 2740 + struct in_device *in_dev; 2741 + struct in_ifaddr *ifa; 2742 + __be32 ips[MWIFIEX_MAX_SUPPORTED_IPADDR]; 2743 + struct mwifiex_adapter *adapter = priv->adapter; 2744 + 2745 + mef_entry->mode = MEF_MODE_HOST_SLEEP; 2746 + mef_entry->action = MEF_ACTION_AUTO_ARP; 2747 + 2748 + /* Enable ARP offload feature */ 2749 + memset(ips, 0, sizeof(ips)); 2750 + for (i = 0; i < MWIFIEX_MAX_BSS_NUM; i++) { 2751 + if (adapter->priv[i]->netdev) { 2752 + in_dev = __in_dev_get_rtnl(adapter->priv[i]->netdev); 2753 + if (!in_dev) 2754 + continue; 2755 + ifa = in_dev->ifa_list; 2756 + if (!ifa || !ifa->ifa_local) 2757 + continue; 2758 + ips[i] = ifa->ifa_local; 2759 + num_ipv4++; 2760 + } 2761 + } 2762 + 2763 + for (i = 0; i < num_ipv4; i++) { 2764 + if (!ips[i]) 2765 + continue; 2766 + mef_entry->filter[filt_num].repeat = 1; 2767 + memcpy(mef_entry->filter[filt_num].byte_seq, 2768 + (u8 *)&ips[i], sizeof(ips[i])); 2769 + mef_entry->filter[filt_num]. 2770 + byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2771 + sizeof(ips[i]); 2772 + mef_entry->filter[filt_num].offset = 46; 2773 + mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2774 + if (filt_num) { 2775 + mef_entry->filter[filt_num].filt_action = 2776 + TYPE_OR; 2777 + } 2778 + filt_num++; 2779 + } 2780 + 2781 + mef_entry->filter[filt_num].repeat = 1; 2782 + mef_entry->filter[filt_num].byte_seq[0] = 0x08; 2783 + mef_entry->filter[filt_num].byte_seq[1] = 0x06; 2784 + mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] = 2; 2785 + mef_entry->filter[filt_num].offset = 20; 2786 + mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2787 + mef_entry->filter[filt_num].filt_action = TYPE_AND; 2788 + } 2789 + 2790 + static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, 2791 + struct mwifiex_ds_mef_cfg *mef_cfg, 2792 + struct mwifiex_mef_entry *mef_entry, 2793 + struct cfg80211_wowlan *wowlan) 2771 2794 { 2772 2795 int i, filt_num = 0, ret = 0; 2773 2796 bool first_pat = true; 2774 2797 u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1]; 2775 2798 const u8 ipv4_mc_mac[] = {0x33, 0x33}; 2776 2799 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e}; 2777 - struct mwifiex_ds_mef_cfg mef_cfg; 2778 - struct mwifiex_mef_entry *mef_entry; 2779 2800 2780 - mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL); 2781 - if (!mef_entry) 2782 - return -ENOMEM; 2783 - 2784 - memset(&mef_cfg, 0, sizeof(mef_cfg)); 2785 - mef_cfg.num_entries = 1; 2786 - mef_cfg.mef_entry = mef_entry; 2787 2801 mef_entry->mode = MEF_MODE_HOST_SLEEP; 2788 2802 mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST; 2789 2803 ··· 2847 2767 if (!wowlan->patterns[i].pkt_offset) { 2848 2768 if (!(byte_seq[0] & 0x01) && 2849 2769 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) { 2850 - mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; 2770 + mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST; 2851 2771 continue; 2852 2772 } else if (is_broadcast_ether_addr(byte_seq)) { 2853 - mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST; 2773 + mef_cfg->criteria |= MWIFIEX_CRITERIA_BROADCAST; 2854 2774 continue; 2855 2775 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) && 2856 2776 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) || 2857 2777 (!memcmp(byte_seq, ipv6_mc_mac, 3) && 2858 2778 (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) { 2859 - mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST; 2779 + mef_cfg->criteria |= MWIFIEX_CRITERIA_MULTICAST; 2860 2780 continue; 2861 2781 } 2862 2782 } 2863 - 2864 2783 mef_entry->filter[filt_num].repeat = 1; 2865 2784 mef_entry->filter[filt_num].offset = 2866 2785 wowlan->patterns[i].pkt_offset; ··· 2876 2797 } 2877 2798 2878 2799 if (wowlan->magic_pkt) { 2879 - mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST; 2800 + mef_cfg->criteria |= MWIFIEX_CRITERIA_UNICAST; 2880 2801 mef_entry->filter[filt_num].repeat = 16; 2881 2802 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr, 2882 2803 ETH_ALEN); ··· 2897 2818 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 2898 2819 mef_entry->filter[filt_num].filt_action = TYPE_OR; 2899 2820 } 2821 + return ret; 2822 + } 2823 + 2824 + static int mwifiex_set_mef_filter(struct mwifiex_private *priv, 2825 + struct cfg80211_wowlan *wowlan) 2826 + { 2827 + int ret = 0, num_entries = 1; 2828 + struct mwifiex_ds_mef_cfg mef_cfg; 2829 + struct mwifiex_mef_entry *mef_entry; 2830 + 2831 + if (wowlan->n_patterns || wowlan->magic_pkt) 2832 + num_entries++; 2833 + 2834 + mef_entry = kcalloc(num_entries, sizeof(*mef_entry), GFP_KERNEL); 2835 + if (!mef_entry) 2836 + return -ENOMEM; 2837 + 2838 + memset(&mef_cfg, 0, sizeof(mef_cfg)); 2839 + mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST | 2840 + MWIFIEX_CRITERIA_UNICAST; 2841 + mef_cfg.num_entries = num_entries; 2842 + mef_cfg.mef_entry = mef_entry; 2843 + 2844 + mwifiex_set_auto_arp_mef_entry(priv, &mef_entry[0]); 2845 + 2846 + if (wowlan->n_patterns || wowlan->magic_pkt) 2847 + ret = mwifiex_set_wowlan_mef_entry(priv, &mef_cfg, 2848 + &mef_entry[1], wowlan); 2900 2849 2901 2850 if (!mef_cfg.criteria) 2902 2851 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST | ··· 2932 2825 MWIFIEX_CRITERIA_MULTICAST; 2933 2826 2934 2827 ret = mwifiex_send_cmd(priv, HostCmd_CMD_MEF_CFG, 2935 - HostCmd_ACT_GEN_SET, 0, &mef_cfg, true); 2936 - 2828 + HostCmd_ACT_GEN_SET, 0, 2829 + &mef_cfg, true); 2937 2830 kfree(mef_entry); 2938 2831 return ret; 2939 2832 } ··· 2943 2836 { 2944 2837 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 2945 2838 struct mwifiex_ds_hs_cfg hs_cfg; 2946 - int ret = 0; 2947 - struct mwifiex_private *priv = 2948 - mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 2839 + int i, ret = 0; 2840 + struct mwifiex_private *priv; 2841 + 2842 + for (i = 0; i < adapter->priv_num; i++) { 2843 + priv = adapter->priv[i]; 2844 + mwifiex_abort_cac(priv); 2845 + } 2846 + 2847 + mwifiex_cancel_all_pending_cmd(adapter); 2949 2848 2950 2849 if (!wowlan) { 2951 2850 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n"); 2952 2851 return 0; 2953 2852 } 2853 + 2854 + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 2954 2855 2955 2856 if (!priv->media_connected) { 2956 2857 dev_warn(adapter->dev, ··· 2966 2851 return 0; 2967 2852 } 2968 2853 2969 - if (wowlan->n_patterns || wowlan->magic_pkt) { 2970 - ret = mwifiex_set_mef_filter(priv, wowlan); 2971 - if (ret) { 2972 - dev_err(adapter->dev, "Failed to set MEF filter\n"); 2973 - return ret; 2974 - } 2854 + ret = mwifiex_set_mef_filter(priv, wowlan); 2855 + if (ret) { 2856 + dev_err(adapter->dev, "Failed to set MEF filter\n"); 2857 + return ret; 2975 2858 } 2976 2859 2977 2860 if (wowlan->disconnect) {
+9 -1
drivers/net/wireless/mwifiex/decl.h
··· 83 83 #define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2) 84 84 #define MWIFIEX_BUF_FLAG_EAPOL_TX_STATUS BIT(3) 85 85 #define MWIFIEX_BUF_FLAG_ACTION_TX_STATUS BIT(4) 86 + #define MWIFIEX_BUF_FLAG_AGGR_PKT BIT(5) 86 87 87 88 #define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024 88 89 #define MWIFIEX_BRIDGED_PKTS_THR_LOW 128 ··· 111 110 #define MWIFIEX_MAX_P2P_NUM 1 112 111 113 112 #define MWIFIEX_A_BAND_START_FREQ 5000 113 + 114 + /* SDIO Aggr data packet special info */ 115 + #define SDIO_MAX_AGGR_BUF_SIZE (256 * 255) 116 + #define BLOCK_NUMBER_OFFSET 15 117 + #define SDIO_HEADER_OFFSET 28 114 118 115 119 enum mwifiex_bss_type { 116 120 MWIFIEX_BSS_TYPE_STA = 0, ··· 174 168 }; 175 169 176 170 struct mwifiex_rxinfo { 171 + struct sk_buff *parent; 177 172 u8 bss_num; 178 173 u8 bss_type; 179 - struct sk_buff *parent; 180 174 u8 use_count; 175 + u8 buf_type; 181 176 }; 182 177 183 178 struct mwifiex_txinfo { ··· 186 179 u8 flags; 187 180 u8 bss_num; 188 181 u8 bss_type; 182 + u8 aggr_num; 189 183 u32 pkt_len; 190 184 u8 ack_frame_id; 191 185 u64 cookie;
+11
drivers/net/wireless/mwifiex/fw.h
··· 197 197 198 198 #define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11)) 199 199 #define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14)) 200 + #define ISSUPP_SDIO_SPA_ENABLED(FwCapInfo) (FwCapInfo & BIT(16)) 200 201 201 202 #define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \ 202 203 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \ ··· 354 353 #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 355 354 #define HostCmd_CMD_11AC_CFG 0x0112 356 355 #define HostCmd_CMD_TDLS_OPER 0x0122 356 + #define HostCmd_CMD_SDIO_SP_RX_AGGR_CFG 0x0223 357 357 358 358 #define PROTOCOL_NO_SECURITY 0x01 359 359 #define PROTOCOL_STATIC_WEP 0x02 ··· 525 523 #define TYPE_OR (MAX_OPERAND+5) 526 524 #define MEF_MODE_HOST_SLEEP 1 527 525 #define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3 526 + #define MEF_ACTION_AUTO_ARP 0x10 528 527 #define MWIFIEX_CRITERIA_BROADCAST BIT(0) 529 528 #define MWIFIEX_CRITERIA_UNICAST BIT(1) 530 529 #define MWIFIEX_CRITERIA_MULTICAST BIT(3) 530 + #define MWIFIEX_MAX_SUPPORTED_IPADDR 4 531 531 532 532 #define ACT_TDLS_DELETE 0x00 533 533 #define ACT_TDLS_CREATE 0x01 ··· 1242 1238 __le64 start_tsf; 1243 1239 __le32 duration; 1244 1240 u8 tlvbuf[0]; 1241 + } __packed; 1242 + 1243 + struct host_cmd_sdio_sp_rx_aggr_cfg { 1244 + u8 action; 1245 + u8 enable; 1246 + __le16 block_size; 1245 1247 } __packed; 1246 1248 1247 1249 struct mwifiex_fixed_bcn_param { ··· 1972 1962 struct host_cmd_ds_coalesce_cfg coalesce_cfg; 1973 1963 struct host_cmd_ds_tdls_oper tdls_oper; 1974 1964 struct host_cmd_ds_chan_rpt_req chan_rpt_req; 1965 + struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; 1975 1966 } params; 1976 1967 } __packed; 1977 1968
+14 -12
drivers/net/wireless/mwifiex/init.c
··· 266 266 267 267 mwifiex_wmm_init(adapter); 268 268 269 - if (adapter->sleep_cfm) { 270 - sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) 271 - adapter->sleep_cfm->data; 272 - memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len); 273 - sleep_cfm_buf->command = 274 - cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); 275 - sleep_cfm_buf->size = 276 - cpu_to_le16(adapter->sleep_cfm->len); 277 - sleep_cfm_buf->result = 0; 278 - sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM); 279 - sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED); 280 - } 269 + sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *) 270 + adapter->sleep_cfm->data; 271 + memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len); 272 + sleep_cfm_buf->command = cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH); 273 + sleep_cfm_buf->size = cpu_to_le16(adapter->sleep_cfm->len); 274 + sleep_cfm_buf->result = 0; 275 + sleep_cfm_buf->action = cpu_to_le16(SLEEP_CONFIRM); 276 + sleep_cfm_buf->resp_ctrl = cpu_to_le16(RESP_NEEDED); 277 + 281 278 memset(&adapter->sleep_params, 0, sizeof(adapter->sleep_params)); 282 279 memset(&adapter->sleep_period, 0, sizeof(adapter->sleep_period)); 283 280 adapter->tx_lock_flag = false; ··· 478 481 spin_lock_init(&adapter->rx_proc_lock); 479 482 480 483 skb_queue_head_init(&adapter->rx_data_q); 484 + skb_queue_head_init(&adapter->tx_data_q); 481 485 482 486 for (i = 0; i < adapter->priv_num; ++i) { 483 487 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); ··· 685 687 mwifiex_delete_bss_prio_tbl(priv); 686 688 } 687 689 } 690 + 691 + atomic_set(&adapter->tx_queued, 0); 692 + while ((skb = skb_dequeue(&adapter->tx_data_q))) 693 + mwifiex_write_data_complete(adapter, skb, 0, 0); 688 694 689 695 spin_lock_irqsave(&adapter->rx_proc_lock, flags); 690 696
+62 -14
drivers/net/wireless/mwifiex/main.c
··· 131 131 return 0; 132 132 } 133 133 134 + void mwifiex_queue_main_work(struct mwifiex_adapter *adapter) 135 + { 136 + unsigned long flags; 137 + 138 + spin_lock_irqsave(&adapter->main_proc_lock, flags); 139 + if (adapter->mwifiex_processing) { 140 + adapter->more_task_flag = true; 141 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 142 + } else { 143 + spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 144 + queue_work(adapter->workqueue, &adapter->main_work); 145 + } 146 + } 147 + EXPORT_SYMBOL_GPL(mwifiex_queue_main_work); 148 + 149 + static void mwifiex_queue_rx_work(struct mwifiex_adapter *adapter) 150 + { 151 + unsigned long flags; 152 + 153 + spin_lock_irqsave(&adapter->rx_proc_lock, flags); 154 + if (adapter->rx_processing) { 155 + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); 156 + } else { 157 + spin_unlock_irqrestore(&adapter->rx_proc_lock, flags); 158 + queue_work(adapter->rx_workqueue, &adapter->rx_work); 159 + } 160 + } 161 + 134 162 static int mwifiex_process_rx(struct mwifiex_adapter *adapter) 135 163 { 136 164 unsigned long flags; 137 165 struct sk_buff *skb; 166 + struct mwifiex_rxinfo *rx_info; 138 167 139 168 spin_lock_irqsave(&adapter->rx_proc_lock, flags); 140 169 if (adapter->rx_processing || adapter->rx_locked) { ··· 183 154 if (adapter->if_ops.submit_rem_rx_urbs) 184 155 adapter->if_ops.submit_rem_rx_urbs(adapter); 185 156 adapter->delay_main_work = false; 186 - queue_work(adapter->workqueue, &adapter->main_work); 157 + mwifiex_queue_main_work(adapter); 187 158 } 188 - mwifiex_handle_rx_packet(adapter, skb); 159 + rx_info = MWIFIEX_SKB_RXCB(skb); 160 + if (rx_info->buf_type == MWIFIEX_TYPE_AGGR_DATA) { 161 + if (adapter->if_ops.deaggr_pkt) 162 + adapter->if_ops.deaggr_pkt(adapter, skb); 163 + dev_kfree_skb_any(skb); 164 + } else { 165 + mwifiex_handle_rx_packet(adapter, skb); 166 + } 189 167 } 190 168 spin_lock_irqsave(&adapter->rx_proc_lock, flags); 191 169 adapter->rx_processing = false; ··· 225 189 spin_lock_irqsave(&adapter->main_proc_lock, flags); 226 190 227 191 /* Check if already processing */ 228 - if (adapter->mwifiex_processing) { 192 + if (adapter->mwifiex_processing || adapter->main_locked) { 229 193 adapter->more_task_flag = true; 230 194 spin_unlock_irqrestore(&adapter->main_proc_lock, flags); 231 195 goto exit_main_proc; ··· 250 214 if (atomic_read(&adapter->rx_pending) >= HIGH_RX_PENDING && 251 215 adapter->iface_type != MWIFIEX_USB) { 252 216 adapter->delay_main_work = true; 253 - if (!adapter->rx_processing) 254 - queue_work(adapter->rx_workqueue, 255 - &adapter->rx_work); 217 + mwifiex_queue_rx_work(adapter); 256 218 break; 257 219 } 258 220 ··· 263 229 } 264 230 265 231 if (adapter->rx_work_enabled && adapter->data_received) 266 - queue_work(adapter->rx_workqueue, &adapter->rx_work); 232 + mwifiex_queue_rx_work(adapter); 267 233 268 234 /* Need to wake up the card ? */ 269 235 if ((adapter->ps_state == PS_STATE_SLEEP) && 270 236 (adapter->pm_wakeup_card_req && 271 237 !adapter->pm_wakeup_fw_try) && 272 238 (is_command_pending(adapter) || 239 + !skb_queue_empty(&adapter->tx_data_q) || 273 240 !mwifiex_wmm_lists_empty(adapter))) { 274 241 adapter->pm_wakeup_fw_try = true; 275 242 mod_timer(&adapter->wakeup_timer, jiffies + (HZ*3)); ··· 282 247 if (IS_CARD_RX_RCVD(adapter)) { 283 248 adapter->data_received = false; 284 249 adapter->pm_wakeup_fw_try = false; 285 - del_timer_sync(&adapter->wakeup_timer); 250 + del_timer(&adapter->wakeup_timer); 286 251 if (adapter->ps_state == PS_STATE_SLEEP) 287 252 adapter->ps_state = PS_STATE_AWAKE; 288 253 } else { ··· 295 260 296 261 if ((!adapter->scan_chan_gap_enabled && 297 262 adapter->scan_processing) || adapter->data_sent || 298 - mwifiex_wmm_lists_empty(adapter)) { 263 + (mwifiex_wmm_lists_empty(adapter) && 264 + skb_queue_empty(&adapter->tx_data_q))) { 299 265 if (adapter->cmd_sent || adapter->curr_cmd || 300 266 (!is_command_pending(adapter))) 301 267 break; ··· 348 312 349 313 if ((adapter->scan_chan_gap_enabled || 350 314 !adapter->scan_processing) && 315 + !adapter->data_sent && 316 + !skb_queue_empty(&adapter->tx_data_q)) { 317 + mwifiex_process_tx_queue(adapter); 318 + if (adapter->hs_activated) { 319 + adapter->is_hs_configured = false; 320 + mwifiex_hs_activated_event 321 + (mwifiex_get_priv 322 + (adapter, MWIFIEX_BSS_ROLE_ANY), 323 + false); 324 + } 325 + } 326 + 327 + if ((adapter->scan_chan_gap_enabled || 328 + !adapter->scan_processing) && 351 329 !adapter->data_sent && !mwifiex_wmm_lists_empty(adapter)) { 352 330 mwifiex_wmm_process_tx(adapter); 353 331 if (adapter->hs_activated) { ··· 375 325 376 326 if (adapter->delay_null_pkt && !adapter->cmd_sent && 377 327 !adapter->curr_cmd && !is_command_pending(adapter) && 378 - mwifiex_wmm_lists_empty(adapter)) { 328 + (mwifiex_wmm_lists_empty(adapter) && 329 + skb_queue_empty(&adapter->tx_data_q))) { 379 330 if (!mwifiex_send_null_packet 380 331 (mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), 381 332 MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET | ··· 657 606 atomic_inc(&priv->adapter->tx_pending); 658 607 mwifiex_wmm_add_buf_txqueue(priv, skb); 659 608 660 - queue_work(priv->adapter->workqueue, &priv->adapter->main_work); 609 + mwifiex_queue_main_work(priv->adapter); 661 610 662 611 return 0; 663 612 } ··· 1148 1097 1149 1098 INIT_WORK(&adapter->rx_work, mwifiex_rx_work_queue); 1150 1099 } 1151 - 1152 - if (adapter->if_ops.iface_work) 1153 - INIT_WORK(&adapter->iface_work, adapter->if_ops.iface_work); 1154 1100 1155 1101 /* Register the device. Fill up the private data structure with relevant 1156 1102 information from the card. */
+22 -8
drivers/net/wireless/mwifiex/main.h
··· 35 35 #include <linux/ctype.h> 36 36 #include <linux/of.h> 37 37 #include <linux/idr.h> 38 + #include <linux/inetdevice.h> 38 39 39 40 #include "decl.h" 40 41 #include "ioctl.h" ··· 58 57 #define MWIFIEX_DRIVER_MODE_BITMASK (BIT(0) | BIT(1) | BIT(2)) 59 58 60 59 #define MWIFIEX_MAX_AP 64 60 + 61 + #define MWIFIEX_MAX_PKTS_TXQ 16 61 62 62 63 #define MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT (5 * HZ) 63 64 ··· 121 118 122 119 #define MWIFIEX_TYPE_CMD 1 123 120 #define MWIFIEX_TYPE_DATA 0 121 + #define MWIFIEX_TYPE_AGGR_DATA 10 124 122 #define MWIFIEX_TYPE_EVENT 3 125 123 126 124 #define MAX_BITMAP_RATES_SIZE 18 ··· 214 210 u8 amsdu; 215 211 }; 216 212 213 + enum mwifiex_ba_status { 214 + BA_SETUP_NONE = 0, 215 + BA_SETUP_INPROGRESS, 216 + BA_SETUP_COMPLETE 217 + }; 218 + 217 219 struct mwifiex_ra_list_tbl { 218 220 struct list_head list; 219 221 struct sk_buff_head skb_head; ··· 228 218 u16 max_amsdu; 229 219 u16 ba_pkt_count; 230 220 u8 ba_packet_thr; 221 + enum mwifiex_ba_status ba_status; 222 + u8 amsdu_in_ampdu; 231 223 u16 total_pkt_count; 232 224 bool tdls_link; 233 225 }; ··· 613 601 struct mwifiex_11h_intf_state state_11h; 614 602 }; 615 603 616 - enum mwifiex_ba_status { 617 - BA_SETUP_NONE = 0, 618 - BA_SETUP_INPROGRESS, 619 - BA_SETUP_COMPLETE 620 - }; 621 604 622 605 struct mwifiex_tx_ba_stream_tbl { 623 606 struct list_head list; ··· 745 738 int (*clean_pcie_ring) (struct mwifiex_adapter *adapter); 746 739 void (*iface_work)(struct work_struct *work); 747 740 void (*submit_rem_rx_urbs)(struct mwifiex_adapter *adapter); 741 + void (*deaggr_pkt)(struct mwifiex_adapter *, struct sk_buff *); 748 742 }; 749 743 750 744 struct mwifiex_adapter { ··· 779 771 bool rx_processing; 780 772 bool delay_main_work; 781 773 bool rx_locked; 774 + bool main_locked; 782 775 struct mwifiex_bss_prio_tbl bss_prio_tbl[MWIFIEX_MAX_BSS_NUM]; 783 776 /* spin lock for init/shutdown */ 784 777 spinlock_t mwifiex_lock; ··· 789 780 u8 more_task_flag; 790 781 u16 tx_buf_size; 791 782 u16 curr_tx_buf_size; 783 + bool sdio_rx_aggr_enable; 784 + u16 sdio_rx_block_size; 792 785 u32 ioport; 793 786 enum MWIFIEX_HARDWARE_STATUS hw_status; 794 787 u16 number_of_antenna; ··· 825 814 spinlock_t scan_pending_q_lock; 826 815 /* spin lock for RX processing routine */ 827 816 spinlock_t rx_proc_lock; 817 + struct sk_buff_head tx_data_q; 818 + atomic_t tx_queued; 828 819 u32 scan_processing; 829 820 u16 region_code; 830 821 struct mwifiex_802_11d_domain_reg domain_reg; ··· 898 885 bool ext_scan; 899 886 u8 fw_api_ver; 900 887 u8 key_api_major_ver, key_api_minor_ver; 901 - struct work_struct iface_work; 902 - unsigned long iface_work_flags; 903 888 struct memory_type_mapping *mem_type_mapping_tbl; 904 889 u8 num_mem_types; 905 890 u8 curr_mem_idx; ··· 910 899 int survey_idx; 911 900 bool auto_tdls; 912 901 }; 902 + 903 + void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); 913 904 914 905 int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 915 906 ··· 1435 1422 u8 rx_rate, u8 ht_info); 1436 1423 1437 1424 void mwifiex_dump_drv_info(struct mwifiex_adapter *adapter); 1438 - void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags); 1425 + void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); 1426 + void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); 1439 1427 1440 1428 #ifdef CONFIG_DEBUG_FS 1441 1429 void mwifiex_debugfs_init(void);
+15 -16
drivers/net/wireless/mwifiex/pcie.c
··· 234 234 if (!adapter || !adapter->priv_num) 235 235 return; 236 236 237 - cancel_work_sync(&adapter->iface_work); 238 - 239 237 if (user_rmmod) { 240 238 #ifdef CONFIG_PM_SLEEP 241 239 if (adapter->is_suspended) ··· 496 498 497 499 for (i = 0; i < MWIFIEX_MAX_TXRX_BD; i++) { 498 500 /* Allocate skb here so that firmware can DMA data from it */ 499 - skb = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE, 500 - GFP_KERNEL | GFP_DMA); 501 + skb = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, 502 + GFP_KERNEL | GFP_DMA); 501 503 if (!skb) { 502 504 dev_err(adapter->dev, 503 505 "Unable to allocate skb for RX ring.\n"); ··· 1296 1298 } 1297 1299 } 1298 1300 1299 - skb_tmp = mwifiex_alloc_rx_buf(MWIFIEX_RX_DATA_BUF_SIZE, 1300 - GFP_KERNEL | GFP_DMA); 1301 + skb_tmp = mwifiex_alloc_dma_align_buf(MWIFIEX_RX_DATA_BUF_SIZE, 1302 + GFP_KERNEL | GFP_DMA); 1301 1303 if (!skb_tmp) { 1302 1304 dev_err(adapter->dev, 1303 1305 "Unable to allocate skb.\n"); ··· 2099 2101 goto exit; 2100 2102 2101 2103 mwifiex_interrupt_status(adapter); 2102 - queue_work(adapter->workqueue, &adapter->main_work); 2104 + mwifiex_queue_main_work(adapter); 2103 2105 2104 2106 exit: 2105 2107 return IRQ_HANDLED; ··· 2371 2373 adapter->curr_mem_idx = 0; 2372 2374 } 2373 2375 2376 + static unsigned long iface_work_flags; 2377 + static struct mwifiex_adapter *save_adapter; 2374 2378 static void mwifiex_pcie_work(struct work_struct *work) 2375 2379 { 2376 - struct mwifiex_adapter *adapter = 2377 - container_of(work, struct mwifiex_adapter, iface_work); 2378 - 2379 2380 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, 2380 - &adapter->iface_work_flags)) 2381 - mwifiex_pcie_fw_dump_work(adapter); 2381 + &iface_work_flags)) 2382 + mwifiex_pcie_fw_dump_work(save_adapter); 2382 2383 } 2383 2384 2385 + static DECLARE_WORK(pcie_work, mwifiex_pcie_work); 2384 2386 /* This function dumps FW information */ 2385 2387 static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) 2386 2388 { 2387 - if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags)) 2389 + save_adapter = adapter; 2390 + if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags)) 2388 2391 return; 2389 2392 2390 - set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags); 2393 + set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags); 2391 2394 2392 - schedule_work(&adapter->iface_work); 2395 + schedule_work(&pcie_work); 2393 2396 } 2394 2397 2395 2398 /* ··· 2618 2619 .init_fw_port = mwifiex_pcie_init_fw_port, 2619 2620 .clean_pcie_ring = mwifiex_clean_pcie_ring_buf, 2620 2621 .fw_dump = mwifiex_pcie_fw_dump, 2621 - .iface_work = mwifiex_pcie_work, 2622 2622 }; 2623 2623 2624 2624 /* ··· 2663 2665 /* Set the flag as user is removing this module. */ 2664 2666 user_rmmod = 1; 2665 2667 2668 + cancel_work_sync(&pcie_work); 2666 2669 pci_unregister_driver(&mwifiex_pcie); 2667 2670 } 2668 2671
+163 -69
drivers/net/wireless/mwifiex/sdio.c
··· 47 47 static u8 user_rmmod; 48 48 49 49 static struct mwifiex_if_ops sdio_ops; 50 + static unsigned long iface_work_flags; 50 51 51 52 static struct semaphore add_remove_card_sem; 52 53 ··· 200 199 adapter = card->adapter; 201 200 if (!adapter || !adapter->priv_num) 202 201 return; 203 - 204 - cancel_work_sync(&adapter->iface_work); 205 202 206 203 if (user_rmmod) { 207 204 if (adapter->is_suspended) ··· 1042 1043 } 1043 1044 1044 1045 /* 1046 + * This function decode sdio aggreation pkt. 1047 + * 1048 + * Based on the the data block size and pkt_len, 1049 + * skb data will be decoded to few packets. 1050 + */ 1051 + static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter, 1052 + struct sk_buff *skb) 1053 + { 1054 + u32 total_pkt_len, pkt_len; 1055 + struct sk_buff *skb_deaggr; 1056 + u32 pkt_type; 1057 + u16 blk_size; 1058 + u8 blk_num; 1059 + u8 *data; 1060 + 1061 + data = skb->data; 1062 + total_pkt_len = skb->len; 1063 + 1064 + while (total_pkt_len >= (SDIO_HEADER_OFFSET + INTF_HEADER_LEN)) { 1065 + if (total_pkt_len < adapter->sdio_rx_block_size) 1066 + break; 1067 + blk_num = *(data + BLOCK_NUMBER_OFFSET); 1068 + blk_size = adapter->sdio_rx_block_size * blk_num; 1069 + if (blk_size > total_pkt_len) { 1070 + dev_err(adapter->dev, "%s: error in pkt,\t" 1071 + "blk_num=%d, blk_size=%d, total_pkt_len=%d\n", 1072 + __func__, blk_num, blk_size, total_pkt_len); 1073 + break; 1074 + } 1075 + pkt_len = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET)); 1076 + pkt_type = le16_to_cpu(*(__le16 *)(data + SDIO_HEADER_OFFSET + 1077 + 2)); 1078 + if ((pkt_len + SDIO_HEADER_OFFSET) > blk_size) { 1079 + dev_err(adapter->dev, "%s: error in pkt,\t" 1080 + "pkt_len=%d, blk_size=%d\n", 1081 + __func__, pkt_len, blk_size); 1082 + break; 1083 + } 1084 + skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, 1085 + GFP_KERNEL | GFP_DMA); 1086 + if (!skb_deaggr) 1087 + break; 1088 + skb_put(skb_deaggr, pkt_len); 1089 + memcpy(skb_deaggr->data, data + SDIO_HEADER_OFFSET, pkt_len); 1090 + skb_pull(skb_deaggr, INTF_HEADER_LEN); 1091 + 1092 + mwifiex_handle_rx_packet(adapter, skb_deaggr); 1093 + data += blk_size; 1094 + total_pkt_len -= blk_size; 1095 + } 1096 + } 1097 + 1098 + /* 1045 1099 * This function decodes a received packet. 1046 1100 * 1047 1101 * Based on the type, the packet is treated as either a data, or ··· 1107 1055 u8 *cmd_buf; 1108 1056 __le16 *curr_ptr = (__le16 *)skb->data; 1109 1057 u16 pkt_len = le16_to_cpu(*curr_ptr); 1058 + struct mwifiex_rxinfo *rx_info; 1110 1059 1111 - skb_trim(skb, pkt_len); 1112 - skb_pull(skb, INTF_HEADER_LEN); 1060 + if (upld_typ != MWIFIEX_TYPE_AGGR_DATA) { 1061 + skb_trim(skb, pkt_len); 1062 + skb_pull(skb, INTF_HEADER_LEN); 1063 + } 1113 1064 1114 1065 switch (upld_typ) { 1066 + case MWIFIEX_TYPE_AGGR_DATA: 1067 + dev_dbg(adapter->dev, "info: --- Rx: Aggr Data packet ---\n"); 1068 + rx_info = MWIFIEX_SKB_RXCB(skb); 1069 + rx_info->buf_type = MWIFIEX_TYPE_AGGR_DATA; 1070 + if (adapter->rx_work_enabled) { 1071 + skb_queue_tail(&adapter->rx_data_q, skb); 1072 + atomic_inc(&adapter->rx_pending); 1073 + adapter->data_received = true; 1074 + } else { 1075 + mwifiex_deaggr_sdio_pkt(adapter, skb); 1076 + dev_kfree_skb_any(skb); 1077 + } 1078 + break; 1079 + 1115 1080 case MWIFIEX_TYPE_DATA: 1116 1081 dev_dbg(adapter->dev, "info: --- Rx: Data packet ---\n"); 1117 1082 if (adapter->rx_work_enabled) { ··· 1196 1127 * provided there is space left, processed and finally uploaded. 1197 1128 */ 1198 1129 static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, 1199 - struct sk_buff *skb, u8 port) 1130 + u16 rx_len, u8 port) 1200 1131 { 1201 1132 struct sdio_mmc_card *card = adapter->card; 1202 1133 s32 f_do_rx_aggr = 0; 1203 1134 s32 f_do_rx_cur = 0; 1204 1135 s32 f_aggr_cur = 0; 1136 + s32 f_post_aggr_cur = 0; 1205 1137 struct sk_buff *skb_deaggr; 1206 - u32 pind; 1207 - u32 pkt_len, pkt_type, mport; 1138 + struct sk_buff *skb = NULL; 1139 + u32 pkt_len, pkt_type, mport, pind; 1208 1140 u8 *curr_ptr; 1209 - u32 rx_len = skb->len; 1210 1141 1211 1142 if ((card->has_control_mask) && (port == CTRL_PORT)) { 1212 1143 /* Read the command Resp without aggr */ ··· 1233 1164 dev_dbg(adapter->dev, "info: %s: not last packet\n", __func__); 1234 1165 1235 1166 if (MP_RX_AGGR_IN_PROGRESS(card)) { 1236 - if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) { 1167 + if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) { 1237 1168 f_aggr_cur = 1; 1238 1169 } else { 1239 1170 /* No room in Aggr buf, do rx aggr now */ 1240 1171 f_do_rx_aggr = 1; 1241 - f_do_rx_cur = 1; 1172 + f_post_aggr_cur = 1; 1242 1173 } 1243 1174 } else { 1244 1175 /* Rx aggr not in progress */ ··· 1251 1182 1252 1183 if (MP_RX_AGGR_IN_PROGRESS(card)) { 1253 1184 f_do_rx_aggr = 1; 1254 - if (MP_RX_AGGR_BUF_HAS_ROOM(card, skb->len)) 1185 + if (MP_RX_AGGR_BUF_HAS_ROOM(card, rx_len)) 1255 1186 f_aggr_cur = 1; 1256 1187 else 1257 1188 /* No room in Aggr buf, do rx aggr now */ ··· 1264 1195 if (f_aggr_cur) { 1265 1196 dev_dbg(adapter->dev, "info: current packet aggregation\n"); 1266 1197 /* Curr pkt can be aggregated */ 1267 - mp_rx_aggr_setup(card, skb, port); 1198 + mp_rx_aggr_setup(card, rx_len, port); 1268 1199 1269 1200 if (MP_RX_AGGR_PKT_LIMIT_REACHED(card) || 1270 1201 mp_rx_aggr_port_limit_reached(card)) { ··· 1307 1238 curr_ptr = card->mpa_rx.buf; 1308 1239 1309 1240 for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { 1241 + u32 *len_arr = card->mpa_rx.len_arr; 1310 1242 1311 1243 /* get curr PKT len & type */ 1312 1244 pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]); 1313 1245 pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]); 1314 1246 1315 1247 /* copy pkt to deaggr buf */ 1316 - skb_deaggr = card->mpa_rx.skb_arr[pind]; 1248 + skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind], 1249 + GFP_KERNEL | 1250 + GFP_DMA); 1251 + if (!skb_deaggr) { 1252 + dev_err(adapter->dev, "skb allocation failure drop pkt len=%d type=%d\n", 1253 + pkt_len, pkt_type); 1254 + curr_ptr += len_arr[pind]; 1255 + continue; 1256 + } 1317 1257 1318 - if ((pkt_type == MWIFIEX_TYPE_DATA) && (pkt_len <= 1319 - card->mpa_rx.len_arr[pind])) { 1258 + skb_put(skb_deaggr, len_arr[pind]); 1259 + 1260 + if ((pkt_type == MWIFIEX_TYPE_DATA || 1261 + (pkt_type == MWIFIEX_TYPE_AGGR_DATA && 1262 + adapter->sdio_rx_aggr_enable)) && 1263 + (pkt_len <= len_arr[pind])) { 1320 1264 1321 1265 memcpy(skb_deaggr->data, curr_ptr, pkt_len); 1322 1266 ··· 1339 1257 mwifiex_decode_rx_packet(adapter, skb_deaggr, 1340 1258 pkt_type); 1341 1259 } else { 1342 - dev_err(adapter->dev, "wrong aggr pkt:" 1343 - " type=%d len=%d max_len=%d\n", 1260 + dev_err(adapter->dev, " drop wrong aggr pkt:\t" 1261 + "sdio_single_port_rx_aggr=%d\t" 1262 + "type=%d len=%d max_len=%d\n", 1263 + adapter->sdio_rx_aggr_enable, 1344 1264 pkt_type, pkt_len, 1345 - card->mpa_rx.len_arr[pind]); 1265 + len_arr[pind]); 1346 1266 dev_kfree_skb_any(skb_deaggr); 1347 1267 } 1348 - curr_ptr += card->mpa_rx.len_arr[pind]; 1268 + curr_ptr += len_arr[pind]; 1349 1269 } 1350 1270 MP_RX_AGGR_BUF_RESET(card); 1351 1271 } ··· 1357 1273 dev_dbg(adapter->dev, "info: RX: port: %d, rx_len: %d\n", 1358 1274 port, rx_len); 1359 1275 1276 + skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); 1277 + if (!skb) { 1278 + dev_err(adapter->dev, "single skb allocated fail,\t" 1279 + "drop pkt port=%d len=%d\n", port, rx_len); 1280 + if (mwifiex_sdio_card_to_host(adapter, &pkt_type, 1281 + card->mpa_rx.buf, rx_len, 1282 + adapter->ioport + port)) 1283 + goto error; 1284 + return 0; 1285 + } 1286 + 1287 + skb_put(skb, rx_len); 1288 + 1360 1289 if (mwifiex_sdio_card_to_host(adapter, &pkt_type, 1361 1290 skb->data, skb->len, 1362 1291 adapter->ioport + port)) 1363 1292 goto error; 1293 + if (!adapter->sdio_rx_aggr_enable && 1294 + pkt_type == MWIFIEX_TYPE_AGGR_DATA) { 1295 + dev_err(adapter->dev, "drop wrong pkt type %d\t" 1296 + "current SDIO RX Aggr not enabled\n", 1297 + pkt_type); 1298 + dev_kfree_skb_any(skb); 1299 + return 0; 1300 + } 1364 1301 1365 1302 mwifiex_decode_rx_packet(adapter, skb, pkt_type); 1366 1303 } 1367 - 1368 - return 0; 1369 - 1370 - error: 1371 - if (MP_RX_AGGR_IN_PROGRESS(card)) { 1372 - /* Multiport-aggregation transfer failed - cleanup */ 1373 - for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) { 1374 - /* copy pkt to deaggr buf */ 1375 - skb_deaggr = card->mpa_rx.skb_arr[pind]; 1376 - dev_kfree_skb_any(skb_deaggr); 1377 - } 1378 - MP_RX_AGGR_BUF_RESET(card); 1304 + if (f_post_aggr_cur) { 1305 + dev_dbg(adapter->dev, "info: current packet aggregation\n"); 1306 + /* Curr pkt can be aggregated */ 1307 + mp_rx_aggr_setup(card, rx_len, port); 1379 1308 } 1380 1309 1381 - if (f_do_rx_cur) 1310 + return 0; 1311 + error: 1312 + if (MP_RX_AGGR_IN_PROGRESS(card)) 1313 + MP_RX_AGGR_BUF_RESET(card); 1314 + 1315 + if (f_do_rx_cur && skb) 1382 1316 /* Single transfer pending. Free curr buff also */ 1383 1317 dev_kfree_skb_any(skb); 1384 1318 ··· 1458 1356 MWIFIEX_RX_DATA_BUF_SIZE) 1459 1357 return -1; 1460 1358 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); 1359 + dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len); 1461 1360 1462 - skb = mwifiex_alloc_rx_buf(rx_len, GFP_KERNEL | GFP_DMA); 1361 + skb = mwifiex_alloc_dma_align_buf(rx_len, GFP_KERNEL | GFP_DMA); 1463 1362 if (!skb) 1464 1363 return -1; 1465 1364 ··· 1550 1447 1) / MWIFIEX_SDIO_BLOCK_SIZE; 1551 1448 if (rx_len <= INTF_HEADER_LEN || 1552 1449 (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) > 1553 - MWIFIEX_RX_DATA_BUF_SIZE) { 1450 + card->mpa_rx.buf_size) { 1554 1451 dev_err(adapter->dev, "invalid rx_len=%d\n", 1555 1452 rx_len); 1556 1453 return -1; 1557 1454 } 1455 + 1558 1456 rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE); 1457 + dev_dbg(adapter->dev, "info: rx_len = %d\n", rx_len); 1559 1458 1560 - skb = mwifiex_alloc_rx_buf(rx_len, 1561 - GFP_KERNEL | GFP_DMA); 1562 - 1563 - if (!skb) { 1564 - dev_err(adapter->dev, "%s: failed to alloc skb", 1565 - __func__); 1566 - return -1; 1567 - } 1568 - 1569 - skb_put(skb, rx_len); 1570 - 1571 - dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n", 1572 - rx_len, skb->len); 1573 - 1574 - if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb, 1459 + if (mwifiex_sdio_card_to_host_mp_aggr(adapter, rx_len, 1575 1460 port)) { 1576 1461 dev_err(adapter->dev, "card_to_host_mpa failed:" 1577 1462 " int status=%#x\n", sdio_ireg); ··· 1827 1736 u32 mpa_tx_buf_size, u32 mpa_rx_buf_size) 1828 1737 { 1829 1738 struct sdio_mmc_card *card = adapter->card; 1739 + u32 rx_buf_size; 1830 1740 int ret = 0; 1831 1741 1832 1742 card->mpa_tx.buf = kzalloc(mpa_tx_buf_size, GFP_KERNEL); ··· 1838 1746 1839 1747 card->mpa_tx.buf_size = mpa_tx_buf_size; 1840 1748 1841 - card->mpa_rx.buf = kzalloc(mpa_rx_buf_size, GFP_KERNEL); 1749 + rx_buf_size = max_t(u32, mpa_rx_buf_size, 1750 + (u32)SDIO_MAX_AGGR_BUF_SIZE); 1751 + card->mpa_rx.buf = kzalloc(rx_buf_size, GFP_KERNEL); 1842 1752 if (!card->mpa_rx.buf) { 1843 1753 ret = -1; 1844 1754 goto error; 1845 1755 } 1846 1756 1847 - card->mpa_rx.buf_size = mpa_rx_buf_size; 1757 + card->mpa_rx.buf_size = rx_buf_size; 1848 1758 1849 1759 error: 1850 1760 if (ret) { ··· 2045 1951 port, card->mp_data_port_mask); 2046 1952 } 2047 1953 1954 + static struct mwifiex_adapter *save_adapter; 2048 1955 static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter) 2049 1956 { 2050 1957 struct sdio_mmc_card *card = adapter->card; ··· 2114 2019 } 2115 2020 2116 2021 /* This function dump firmware memory to file */ 2117 - static void mwifiex_sdio_fw_dump_work(struct work_struct *work) 2022 + static void mwifiex_sdio_fw_dump_work(struct mwifiex_adapter *adapter) 2118 2023 { 2119 - struct mwifiex_adapter *adapter = 2120 - container_of(work, struct mwifiex_adapter, iface_work); 2121 2024 struct sdio_mmc_card *card = adapter->card; 2122 2025 int ret = 0; 2123 2026 unsigned int reg, reg_start, reg_end; ··· 2237 2144 2238 2145 static void mwifiex_sdio_work(struct work_struct *work) 2239 2146 { 2240 - struct mwifiex_adapter *adapter = 2241 - container_of(work, struct mwifiex_adapter, iface_work); 2242 - 2243 - if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, 2244 - &adapter->iface_work_flags)) 2245 - mwifiex_sdio_card_reset_work(adapter); 2246 2147 if (test_and_clear_bit(MWIFIEX_IFACE_WORK_FW_DUMP, 2247 - &adapter->iface_work_flags)) 2248 - mwifiex_sdio_fw_dump_work(work); 2148 + &iface_work_flags)) 2149 + mwifiex_sdio_fw_dump_work(save_adapter); 2150 + if (test_and_clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, 2151 + &iface_work_flags)) 2152 + mwifiex_sdio_card_reset_work(save_adapter); 2249 2153 } 2250 2154 2155 + static DECLARE_WORK(sdio_work, mwifiex_sdio_work); 2251 2156 /* This function resets the card */ 2252 2157 static void mwifiex_sdio_card_reset(struct mwifiex_adapter *adapter) 2253 2158 { 2254 - if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags)) 2159 + save_adapter = adapter; 2160 + if (test_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags)) 2255 2161 return; 2256 2162 2257 - set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &adapter->iface_work_flags); 2163 + set_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags); 2258 2164 2259 - schedule_work(&adapter->iface_work); 2165 + schedule_work(&sdio_work); 2260 2166 } 2261 2167 2262 2168 /* This function dumps FW information */ 2263 2169 static void mwifiex_sdio_fw_dump(struct mwifiex_adapter *adapter) 2264 2170 { 2265 - if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags)) 2171 + save_adapter = adapter; 2172 + if (test_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags)) 2266 2173 return; 2267 2174 2268 - set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &adapter->iface_work_flags); 2269 - schedule_work(&adapter->iface_work); 2175 + set_bit(MWIFIEX_IFACE_WORK_FW_DUMP, &iface_work_flags); 2176 + schedule_work(&sdio_work); 2270 2177 } 2271 2178 2272 2179 /* Function to dump SDIO function registers and SDIO scratch registers in case ··· 2382 2289 .cmdrsp_complete = mwifiex_sdio_cmdrsp_complete, 2383 2290 .event_complete = mwifiex_sdio_event_complete, 2384 2291 .card_reset = mwifiex_sdio_card_reset, 2385 - .iface_work = mwifiex_sdio_work, 2386 2292 .fw_dump = mwifiex_sdio_fw_dump, 2387 2293 .reg_dump = mwifiex_sdio_reg_dump, 2294 + .deaggr_pkt = mwifiex_deaggr_sdio_pkt, 2388 2295 }; 2389 2296 2390 2297 /* ··· 2421 2328 2422 2329 /* Set the flag as user is removing this module. */ 2423 2330 user_rmmod = 1; 2331 + cancel_work_sync(&sdio_work); 2424 2332 2425 2333 sdio_unregister_driver(&mwifiex_sdio); 2426 2334 }
+8 -6
drivers/net/wireless/mwifiex/sdio.h
··· 67 67 68 68 #define MWIFIEX_MP_AGGR_BUF_SIZE_16K (16384) 69 69 #define MWIFIEX_MP_AGGR_BUF_SIZE_32K (32768) 70 + /* we leave one block of 256 bytes for DMA alignment*/ 71 + #define MWIFIEX_MP_AGGR_BUF_SIZE_MAX (65280) 70 72 71 73 /* Misc. Config Register : Auto Re-enable interrupts */ 72 74 #define AUTO_RE_ENABLE_INT BIT(4) ··· 460 458 .max_ports = 32, 461 459 .mp_agg_pkt_limit = 16, 462 460 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 463 - .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, 464 - .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K, 461 + .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, 462 + .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, 465 463 .supports_sdio_new_mode = true, 466 464 .has_control_mask = false, 467 465 .can_dump_fw = true, ··· 573 571 574 572 /* Prepare to copy current packet from card to SDIO Rx aggregation buffer */ 575 573 static inline void mp_rx_aggr_setup(struct sdio_mmc_card *card, 576 - struct sk_buff *skb, u8 port) 574 + u16 rx_len, u8 port) 577 575 { 578 - card->mpa_rx.buf_len += skb->len; 576 + card->mpa_rx.buf_len += rx_len; 579 577 580 578 if (!card->mpa_rx.pkt_cnt) 581 579 card->mpa_rx.start_port = port; ··· 588 586 else 589 587 card->mpa_rx.ports |= 1 << (card->mpa_rx.pkt_cnt + 1); 590 588 } 591 - card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = skb; 592 - card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = skb->len; 589 + card->mpa_rx.skb_arr[card->mpa_rx.pkt_cnt] = NULL; 590 + card->mpa_rx.len_arr[card->mpa_rx.pkt_cnt] = rx_len; 593 591 card->mpa_rx.pkt_cnt++; 594 592 } 595 593 #endif /* _MWIFIEX_SDIO_H */
+54 -7
drivers/net/wireless/mwifiex/sta_cmd.c
··· 1370 1370 struct mwifiex_ds_mef_cfg *mef) 1371 1371 { 1372 1372 struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg; 1373 + struct mwifiex_fw_mef_entry *mef_entry = NULL; 1373 1374 u8 *pos = (u8 *)mef_cfg; 1375 + u16 i; 1374 1376 1375 1377 cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG); 1376 1378 1377 1379 mef_cfg->criteria = cpu_to_le32(mef->criteria); 1378 1380 mef_cfg->num_entries = cpu_to_le16(mef->num_entries); 1379 1381 pos += sizeof(*mef_cfg); 1380 - mef_cfg->mef_entry->mode = mef->mef_entry->mode; 1381 - mef_cfg->mef_entry->action = mef->mef_entry->action; 1382 - pos += sizeof(*(mef_cfg->mef_entry)); 1383 1382 1384 - if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos)) 1385 - return -1; 1383 + for (i = 0; i < mef->num_entries; i++) { 1384 + mef_entry = (struct mwifiex_fw_mef_entry *)pos; 1385 + mef_entry->mode = mef->mef_entry[i].mode; 1386 + mef_entry->action = mef->mef_entry[i].action; 1387 + pos += sizeof(*mef_cfg->mef_entry); 1386 1388 1387 - mef_cfg->mef_entry->exprsize = 1388 - cpu_to_le16(pos - mef_cfg->mef_entry->expr); 1389 + if (mwifiex_cmd_append_rpn_expression(priv, 1390 + &mef->mef_entry[i], &pos)) 1391 + return -1; 1392 + 1393 + mef_entry->exprsize = 1394 + cpu_to_le16(pos - mef_entry->expr); 1395 + } 1389 1396 cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN); 1390 1397 1391 1398 return 0; ··· 1671 1664 1672 1665 return 0; 1673 1666 } 1667 + 1668 + /* This function prepares command of sdio rx aggr info. */ 1669 + static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd, 1670 + u16 cmd_action, void *data_buf) 1671 + { 1672 + struct host_cmd_sdio_sp_rx_aggr_cfg *cfg = 1673 + &cmd->params.sdio_rx_aggr_cfg; 1674 + 1675 + cmd->command = cpu_to_le16(HostCmd_CMD_SDIO_SP_RX_AGGR_CFG); 1676 + cmd->size = 1677 + cpu_to_le16(sizeof(struct host_cmd_sdio_sp_rx_aggr_cfg) + 1678 + S_DS_GEN); 1679 + cfg->action = cmd_action; 1680 + if (cmd_action == HostCmd_ACT_GEN_SET) 1681 + cfg->enable = *(u8 *)data_buf; 1682 + 1683 + return 0; 1684 + } 1685 + 1674 1686 /* 1675 1687 * This function prepares the commands before sending them to the firmware. 1676 1688 * ··· 1927 1901 ret = mwifiex_cmd_issue_chan_report_request(priv, cmd_ptr, 1928 1902 data_buf); 1929 1903 break; 1904 + case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: 1905 + ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action, 1906 + data_buf); 1907 + break; 1930 1908 default: 1931 1909 dev_err(priv->adapter->dev, 1932 1910 "PREP_CMD: unknown cmd- %#x\n", cmd_no); ··· 1970 1940 struct mwifiex_ds_auto_ds auto_ds; 1971 1941 enum state_11d_t state_11d; 1972 1942 struct mwifiex_ds_11n_tx_cfg tx_cfg; 1943 + u8 sdio_sp_rx_aggr_enable; 1973 1944 1974 1945 if (first_sta) { 1975 1946 if (priv->adapter->iface_type == MWIFIEX_PCIE) { ··· 2013 1982 HostCmd_ACT_GEN_GET, 0, NULL, true); 2014 1983 if (ret) 2015 1984 return -1; 1985 + 1986 + /** Set SDIO Single Port RX Aggr Info */ 1987 + if (priv->adapter->iface_type == MWIFIEX_SDIO && 1988 + ISSUPP_SDIO_SPA_ENABLED(priv->adapter->fw_cap_info)) { 1989 + sdio_sp_rx_aggr_enable = true; 1990 + ret = mwifiex_send_cmd(priv, 1991 + HostCmd_CMD_SDIO_SP_RX_AGGR_CFG, 1992 + HostCmd_ACT_GEN_SET, 0, 1993 + &sdio_sp_rx_aggr_enable, 1994 + true); 1995 + if (ret) { 1996 + dev_err(priv->adapter->dev, 1997 + "error while enabling SP aggregation..disable it"); 1998 + adapter->sdio_rx_aggr_enable = false; 1999 + } 2000 + } 2016 2001 2017 2002 /* Reconfigure tx buf size */ 2018 2003 ret = mwifiex_send_cmd(priv, HostCmd_CMD_RECONFIGURE_TX_BUFF,
+21
drivers/net/wireless/mwifiex/sta_cmdresp.c
··· 90 90 case HostCmd_CMD_MAC_CONTROL: 91 91 break; 92 92 93 + case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: 94 + dev_err(priv->adapter->dev, "SDIO RX single-port aggregation Not support\n"); 95 + break; 96 + 93 97 default: 94 98 break; 95 99 } ··· 947 943 return 0; 948 944 } 949 945 946 + /** This Function handles the command response of sdio rx aggr */ 947 + static int mwifiex_ret_sdio_rx_aggr_cfg(struct mwifiex_private *priv, 948 + struct host_cmd_ds_command *resp) 949 + { 950 + struct mwifiex_adapter *adapter = priv->adapter; 951 + struct host_cmd_sdio_sp_rx_aggr_cfg *cfg = 952 + &resp->params.sdio_rx_aggr_cfg; 953 + 954 + adapter->sdio_rx_aggr_enable = cfg->enable; 955 + adapter->sdio_rx_block_size = le16_to_cpu(cfg->block_size); 956 + 957 + return 0; 958 + } 959 + 950 960 /* 951 961 * This function handles the command responses. 952 962 * ··· 1141 1123 ret = mwifiex_ret_tdls_oper(priv, resp); 1142 1124 break; 1143 1125 case HostCmd_CMD_CHAN_REPORT_REQUEST: 1126 + break; 1127 + case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: 1128 + ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); 1144 1129 break; 1145 1130 default: 1146 1131 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
+2 -2
drivers/net/wireless/mwifiex/sta_event.c
··· 312 312 adapter->ps_state = PS_STATE_AWAKE; 313 313 adapter->pm_wakeup_card_req = false; 314 314 adapter->pm_wakeup_fw_try = false; 315 - del_timer_sync(&adapter->wakeup_timer); 315 + del_timer(&adapter->wakeup_timer); 316 316 break; 317 317 } 318 318 if (!mwifiex_send_null_packet ··· 327 327 adapter->ps_state = PS_STATE_AWAKE; 328 328 adapter->pm_wakeup_card_req = false; 329 329 adapter->pm_wakeup_fw_try = false; 330 - del_timer_sync(&adapter->wakeup_timer); 330 + del_timer(&adapter->wakeup_timer); 331 331 332 332 break; 333 333
+127 -1
drivers/net/wireless/mwifiex/txrx.c
··· 92 92 else 93 93 head_ptr = mwifiex_process_sta_txpd(priv, skb); 94 94 95 + if ((adapter->data_sent || adapter->tx_lock_flag) && head_ptr) { 96 + skb_queue_tail(&adapter->tx_data_q, skb); 97 + atomic_inc(&adapter->tx_queued); 98 + return 0; 99 + } 100 + 95 101 if (head_ptr) { 96 102 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) 97 103 local_tx_pd = (struct txpd *)(head_ptr + hroom); ··· 148 142 return ret; 149 143 } 150 144 145 + static int mwifiex_host_to_card(struct mwifiex_adapter *adapter, 146 + struct sk_buff *skb, 147 + struct mwifiex_tx_param *tx_param) 148 + { 149 + struct txpd *local_tx_pd = NULL; 150 + u8 *head_ptr = skb->data; 151 + int ret = 0; 152 + struct mwifiex_private *priv; 153 + struct mwifiex_txinfo *tx_info; 154 + 155 + tx_info = MWIFIEX_SKB_TXCB(skb); 156 + priv = mwifiex_get_priv_by_id(adapter, tx_info->bss_num, 157 + tx_info->bss_type); 158 + if (!priv) { 159 + dev_err(adapter->dev, "data: priv not found. Drop TX packet\n"); 160 + adapter->dbg.num_tx_host_to_card_failure++; 161 + mwifiex_write_data_complete(adapter, skb, 0, 0); 162 + return ret; 163 + } 164 + if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) { 165 + if (adapter->iface_type == MWIFIEX_USB) 166 + local_tx_pd = (struct txpd *)head_ptr; 167 + else 168 + local_tx_pd = (struct txpd *) (head_ptr + 169 + INTF_HEADER_LEN); 170 + } 171 + 172 + if (adapter->iface_type == MWIFIEX_USB) { 173 + adapter->data_sent = true; 174 + ret = adapter->if_ops.host_to_card(adapter, 175 + MWIFIEX_USB_EP_DATA, 176 + skb, NULL); 177 + } else { 178 + ret = adapter->if_ops.host_to_card(adapter, 179 + MWIFIEX_TYPE_DATA, 180 + skb, tx_param); 181 + } 182 + switch (ret) { 183 + case -ENOSR: 184 + dev_err(adapter->dev, "data: -ENOSR is returned\n"); 185 + break; 186 + case -EBUSY: 187 + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && 188 + (adapter->pps_uapsd_mode) && 189 + (adapter->tx_lock_flag)) { 190 + priv->adapter->tx_lock_flag = false; 191 + if (local_tx_pd) 192 + local_tx_pd->flags = 0; 193 + } 194 + skb_queue_head(&adapter->tx_data_q, skb); 195 + if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT) 196 + atomic_add(tx_info->aggr_num, &adapter->tx_queued); 197 + else 198 + atomic_inc(&adapter->tx_queued); 199 + dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); 200 + break; 201 + case -1: 202 + if (adapter->iface_type != MWIFIEX_PCIE) 203 + adapter->data_sent = false; 204 + dev_err(adapter->dev, "mwifiex_write_data_async failed: 0x%X\n", 205 + ret); 206 + adapter->dbg.num_tx_host_to_card_failure++; 207 + mwifiex_write_data_complete(adapter, skb, 0, ret); 208 + break; 209 + case -EINPROGRESS: 210 + if (adapter->iface_type != MWIFIEX_PCIE) 211 + adapter->data_sent = false; 212 + break; 213 + case 0: 214 + mwifiex_write_data_complete(adapter, skb, 0, ret); 215 + break; 216 + default: 217 + break; 218 + } 219 + return ret; 220 + } 221 + 222 + static int 223 + mwifiex_dequeue_tx_queue(struct mwifiex_adapter *adapter) 224 + { 225 + struct sk_buff *skb, *skb_next; 226 + struct mwifiex_txinfo *tx_info; 227 + struct mwifiex_tx_param tx_param; 228 + 229 + skb = skb_dequeue(&adapter->tx_data_q); 230 + if (!skb) 231 + return -1; 232 + 233 + tx_info = MWIFIEX_SKB_TXCB(skb); 234 + if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT) 235 + atomic_sub(tx_info->aggr_num, &adapter->tx_queued); 236 + else 237 + atomic_dec(&adapter->tx_queued); 238 + 239 + if (!skb_queue_empty(&adapter->tx_data_q)) 240 + skb_next = skb_peek(&adapter->tx_data_q); 241 + else 242 + skb_next = NULL; 243 + tx_param.next_pkt_len = ((skb_next) ? skb_next->len : 0); 244 + if (!tx_param.next_pkt_len) { 245 + if (!mwifiex_wmm_lists_empty(adapter)) 246 + tx_param.next_pkt_len = 1; 247 + } 248 + return mwifiex_host_to_card(adapter, skb, &tx_param); 249 + } 250 + 251 + void 252 + mwifiex_process_tx_queue(struct mwifiex_adapter *adapter) 253 + { 254 + do { 255 + if (adapter->data_sent || adapter->tx_lock_flag) 256 + break; 257 + if (mwifiex_dequeue_tx_queue(adapter)) 258 + break; 259 + } while (!skb_queue_empty(&adapter->tx_data_q)); 260 + } 261 + 151 262 /* 152 263 * Packet send completion callback handler. 153 264 * ··· 302 179 priv->stats.tx_errors++; 303 180 } 304 181 305 - if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) 182 + if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) { 306 183 atomic_dec_return(&adapter->pending_bridged_pkts); 184 + if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT) 185 + goto done; 186 + } 307 187 308 188 if (aggr) 309 189 /* For skb_aggr, do not wake up tx queue */
+3 -3
drivers/net/wireless/mwifiex/usb.c
··· 193 193 dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n", 194 194 recv_length, status); 195 195 if (status == -EINPROGRESS) { 196 - queue_work(adapter->workqueue, &adapter->main_work); 196 + mwifiex_queue_main_work(adapter); 197 197 198 198 /* urb for data_ep is re-submitted now; 199 199 * urb for cmd_ep will be re-submitted in callback ··· 262 262 urb->status ? -1 : 0); 263 263 } 264 264 265 - queue_work(adapter->workqueue, &adapter->main_work); 265 + mwifiex_queue_main_work(adapter); 266 266 267 267 return; 268 268 } ··· 1006 1006 { 1007 1007 /* Simulation of HS_AWAKE event */ 1008 1008 adapter->pm_wakeup_fw_try = false; 1009 - del_timer_sync(&adapter->wakeup_timer); 1009 + del_timer(&adapter->wakeup_timer); 1010 1010 adapter->pm_wakeup_card_req = false; 1011 1011 adapter->ps_state = PS_STATE_AWAKE; 1012 1012
+2 -2
drivers/net/wireless/mwifiex/util.c
··· 632 632 atomic_set(&phist_data->sig_str[ix], 0); 633 633 } 634 634 635 - void *mwifiex_alloc_rx_buf(int rx_len, gfp_t flags) 635 + void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags) 636 636 { 637 637 struct sk_buff *skb; 638 638 int buf_len, pad; ··· 653 653 654 654 return skb; 655 655 } 656 - EXPORT_SYMBOL_GPL(mwifiex_alloc_rx_buf); 656 + EXPORT_SYMBOL_GPL(mwifiex_alloc_dma_align_buf);
+28 -20
drivers/net/wireless/mwifiex/wmm.c
··· 157 157 158 158 ra_list->is_11n_enabled = 0; 159 159 ra_list->tdls_link = false; 160 + ra_list->ba_status = BA_SETUP_NONE; 161 + ra_list->amsdu_in_ampdu = false; 160 162 if (!mwifiex_queuing_ra_based(priv)) { 161 163 if (mwifiex_get_tdls_link_status(priv, ra) == 162 164 TDLS_SETUP_COMPLETE) { ··· 576 574 * This function retrieves a particular RA list node, matching with the 577 575 * given TID and RA address. 578 576 */ 579 - static struct mwifiex_ra_list_tbl * 577 + struct mwifiex_ra_list_tbl * 580 578 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid, 581 579 const u8 *ra_addr) 582 580 { ··· 944 942 struct mwifiex_ra_list_tbl *ptr; 945 943 struct mwifiex_tid_tbl *tid_ptr; 946 944 atomic_t *hqp; 947 - unsigned long flags_bss, flags_ra; 945 + unsigned long flags_ra; 948 946 int i, j; 949 947 950 948 /* check the BSS with highest priority first */ 951 949 for (j = adapter->priv_num - 1; j >= 0; --j) { 952 - spin_lock_irqsave(&adapter->bss_prio_tbl[j].bss_prio_lock, 953 - flags_bss); 954 - 955 950 /* iterate over BSS with the equal priority */ 956 951 list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur, 957 952 &adapter->bss_prio_tbl[j].bss_prio_head, ··· 984 985 } 985 986 } 986 987 987 - spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, 988 - flags_bss); 989 988 } 990 989 991 990 return NULL; 992 991 993 992 found: 994 - /* holds bss_prio_lock / ra_list_spinlock */ 993 + /* holds ra_list_spinlock */ 995 994 if (atomic_read(hqp) > i) 996 995 atomic_set(hqp, i); 997 996 spin_unlock_irqrestore(&priv_tmp->wmm.ra_list_spinlock, flags_ra); 998 - spin_unlock_irqrestore(&adapter->bss_prio_tbl[j].bss_prio_lock, 999 - flags_bss); 1000 997 1001 998 *priv = priv_tmp; 1002 999 *tid = tos_to_tid[i]; ··· 1174 1179 1175 1180 skb = skb_dequeue(&ptr->skb_head); 1176 1181 1182 + if (adapter->data_sent || adapter->tx_lock_flag) { 1183 + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, 1184 + ra_list_flags); 1185 + skb_queue_tail(&adapter->tx_data_q, skb); 1186 + atomic_inc(&adapter->tx_queued); 1187 + return; 1188 + } 1189 + 1177 1190 if (!skb_queue_empty(&ptr->skb_head)) 1178 1191 skb_next = skb_peek(&ptr->skb_head); 1179 1192 else ··· 1279 1276 } 1280 1277 1281 1278 if (!ptr->is_11n_enabled || 1282 - mwifiex_is_ba_stream_setup(priv, ptr, tid) || 1283 - priv->wps.session_enable) { 1279 + ptr->ba_status || 1280 + priv->wps.session_enable) { 1284 1281 if (ptr->is_11n_enabled && 1285 - mwifiex_is_ba_stream_setup(priv, ptr, tid) && 1286 - mwifiex_is_amsdu_in_ampdu_allowed(priv, ptr, tid) && 1287 - mwifiex_is_amsdu_allowed(priv, tid) && 1288 - mwifiex_is_11n_aggragation_possible(priv, ptr, 1282 + ptr->ba_status && 1283 + ptr->amsdu_in_ampdu && 1284 + mwifiex_is_amsdu_allowed(priv, tid) && 1285 + mwifiex_is_11n_aggragation_possible(priv, ptr, 1289 1286 adapter->tx_buf_size)) 1290 1287 mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index, flags); 1291 1288 /* ra_list_spinlock has been freed in ··· 1332 1329 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter) 1333 1330 { 1334 1331 do { 1335 - /* Check if busy */ 1336 - if (adapter->data_sent || adapter->tx_lock_flag) 1337 - break; 1338 - 1339 1332 if (mwifiex_dequeue_tx_packet(adapter)) 1340 1333 break; 1334 + if (adapter->iface_type != MWIFIEX_SDIO) { 1335 + if (adapter->data_sent || 1336 + adapter->tx_lock_flag) 1337 + break; 1338 + } else { 1339 + if (atomic_read(&adapter->tx_queued) >= 1340 + MWIFIEX_MAX_PKTS_TXQ) 1341 + break; 1342 + } 1341 1343 } while (!mwifiex_wmm_lists_empty(adapter)); 1342 1344 }
+2
drivers/net/wireless/mwifiex/wmm.h
··· 127 127 const u8 *ra_addr); 128 128 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid); 129 129 130 + struct mwifiex_ra_list_tbl *mwifiex_wmm_get_ralist_node(struct mwifiex_private 131 + *priv, u8 tid, const u8 *ra_addr); 130 132 #endif /* !_MWIFIEX_WMM_H_ */
+9 -4
drivers/net/wireless/rt2x00/rt2800usb.c
··· 233 233 { 234 234 __le32 *reg; 235 235 u32 fw_mode; 236 + int ret; 236 237 237 238 reg = kmalloc(sizeof(*reg), GFP_KERNEL); 238 239 if (reg == NULL) ··· 243 242 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the 244 243 * returned value would be invalid. 245 244 */ 246 - rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, 247 - USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, 248 - reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE); 245 + ret = rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, 246 + USB_VENDOR_REQUEST_IN, 0, 247 + USB_MODE_AUTORUN, reg, sizeof(*reg), 248 + REGISTER_TIMEOUT_FIRMWARE); 249 249 fw_mode = le32_to_cpu(*reg); 250 250 kfree(reg); 251 + if (ret < 0) 252 + return ret; 251 253 252 254 if ((fw_mode & 0x00000003) == 2) 253 255 return 1; ··· 293 289 if (retval) { 294 290 rt2x00_info(rt2x00dev, 295 291 "Firmware loading not required - NIC in AutoRun mode\n"); 292 + __clear_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags); 296 293 } else { 297 294 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 298 295 data + offset, length); ··· 379 374 static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev) 380 375 { 381 376 rt2800_disable_radio(rt2x00dev); 382 - rt2x00usb_disable_radio(rt2x00dev); 383 377 } 384 378 385 379 static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev, ··· 1044 1040 { USB_DEVICE(0x07d1, 0x3c17) }, 1045 1041 { USB_DEVICE(0x2001, 0x3317) }, 1046 1042 { USB_DEVICE(0x2001, 0x3c1b) }, 1043 + { USB_DEVICE(0x2001, 0x3c25) }, 1047 1044 /* Draytek */ 1048 1045 { USB_DEVICE(0x07fa, 0x7712) }, 1049 1046 /* DVICO */
+2 -2
drivers/net/wireless/rt2x00/rt2x00usb.h
··· 199 199 const unsigned int offset, 200 200 u32 *value) 201 201 { 202 - __le32 reg; 202 + __le32 reg = 0; 203 203 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, 204 204 USB_VENDOR_REQUEST_IN, offset, 205 205 &reg, sizeof(reg)); ··· 219 219 const unsigned int offset, 220 220 u32 *value) 221 221 { 222 - __le32 reg; 222 + __le32 reg = 0; 223 223 rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, 224 224 USB_VENDOR_REQUEST_IN, offset, 225 225 &reg, sizeof(reg), REGISTER_TIMEOUT);
-1
drivers/net/wireless/rtlwifi/base.h
··· 123 123 u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 124 124 125 125 void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb); 126 - void rtl_watch_dog_timer_callback(unsigned long data); 127 126 int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 128 127 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 129 128 int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+3 -2
drivers/net/wireless/rtlwifi/rtl8188ee/hw.c
··· 30 30 #include "../cam.h" 31 31 #include "../ps.h" 32 32 #include "../pci.h" 33 + #include "../pwrseqcmd.h" 33 34 #include "reg.h" 34 35 #include "def.h" 35 36 #include "phy.h" ··· 886 885 887 886 rtl_write_word(rtlpriv, REG_CR, 0x2ff); 888 887 rtl_write_byte(rtlpriv, REG_CR+1, 0x06); 889 - rtl_write_byte(rtlpriv, REG_CR+2, 0x00); 888 + rtl_write_byte(rtlpriv, MSR, 0x00); 890 889 891 890 if (!rtlhal->mac_func_enable) { 892 891 if (_rtl88ee_llt_table_init(hw) == false) { ··· 1278 1277 mode); 1279 1278 } 1280 1279 1281 - rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1280 + rtl_write_byte(rtlpriv, MSR, bt_msr | mode); 1282 1281 rtlpriv->cfg->ops->led_control(hw, ledaction); 1283 1282 if (mode == MSR_AP) 1284 1283 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+3 -4
drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
··· 1364 1364 "Network type %d not supported!\n", type); 1365 1365 goto error_out; 1366 1366 } 1367 - rtl_write_byte(rtlpriv, (MSR), bt_msr); 1367 + rtl_write_byte(rtlpriv, MSR, bt_msr); 1368 1368 rtlpriv->cfg->ops->led_control(hw, ledaction); 1369 1369 if ((bt_msr & MSR_MASK) == MSR_AP) 1370 1370 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); ··· 1471 1471 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF); 1472 1472 } 1473 1473 1474 - static void _beacon_function_enable(struct ieee80211_hw *hw, bool Enable, 1475 - bool Linked) 1474 + static void _beacon_function_enable(struct ieee80211_hw *hw) 1476 1475 { 1477 1476 struct rtl_priv *rtlpriv = rtl_priv(hw); 1478 1477 ··· 1516 1517 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x50); 1517 1518 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x50); 1518 1519 } 1519 - _beacon_function_enable(hw, true, true); 1520 + _beacon_function_enable(hw); 1520 1521 } 1521 1522 1522 1523 void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
+1 -1
drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
··· 497 497 "Network type %d not supported!\n", type); 498 498 return -EOPNOTSUPP; 499 499 } 500 - rtl_write_byte(rtlpriv, (REG_CR + 2), value); 500 + rtl_write_byte(rtlpriv, MSR, value); 501 501 return 0; 502 502 } 503 503
+2
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
··· 321 321 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ 322 322 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ 323 323 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ 324 + {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ 324 325 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ 325 326 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ 326 327 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ ··· 378 377 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/ 379 378 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 380 379 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ 380 + {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */ 381 381 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ 382 382 {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ 383 383 {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
+1 -1
drivers/net/wireless/rtlwifi/rtl8192de/hw.c
··· 1126 1126 break; 1127 1127 1128 1128 } 1129 - rtl_write_byte(rtlpriv, REG_CR + 2, bt_msr); 1129 + rtl_write_byte(rtlpriv, MSR, bt_msr); 1130 1130 rtlpriv->cfg->ops->led_control(hw, ledaction); 1131 1131 if ((bt_msr & MSR_MASK) == MSR_AP) 1132 1132 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+1 -1
drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
··· 1510 1510 mode); 1511 1511 } 1512 1512 1513 - rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1513 + rtl_write_byte(rtlpriv, MSR, bt_msr | mode); 1514 1514 rtlpriv->cfg->ops->led_control(hw, ledaction); 1515 1515 if (mode == MSR_AP) 1516 1516 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+1 -1
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
··· 1204 1204 if (type != NL80211_IFTYPE_AP && 1205 1205 rtlpriv->mac80211.link_state < MAC80211_LINKED) 1206 1206 bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK; 1207 - rtl_write_byte(rtlpriv, (MSR), bt_msr); 1207 + rtl_write_byte(rtlpriv, MSR, bt_msr); 1208 1208 1209 1209 temp = rtl_read_dword(rtlpriv, TCR); 1210 1210 rtl_write_dword(rtlpriv, TCR, temp & (~BIT(8)));
+1 -1
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
··· 1183 1183 mode); 1184 1184 } 1185 1185 1186 - rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1186 + rtl_write_byte(rtlpriv, MSR, bt_msr | mode); 1187 1187 rtlpriv->cfg->ops->led_control(hw, ledaction); 1188 1188 if (mode == MSR_AP) 1189 1189 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+1 -1
drivers/net/wireless/rtlwifi/rtl8723be/hw.c
··· 1558 1558 mode); 1559 1559 } 1560 1560 1561 - rtl_write_byte(rtlpriv, (MSR), bt_msr | mode); 1561 + rtl_write_byte(rtlpriv, MSR, bt_msr | mode); 1562 1562 rtlpriv->cfg->ops->led_control(hw, ledaction); 1563 1563 if (mode == MSR_AP) 1564 1564 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+2 -2
drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
··· 423 423 *((u16 *)(val+4)) = rtl_read_word(rtlpriv, REG_BSSID+4); 424 424 break; 425 425 case HW_VAR_MEDIA_STATUS: 426 - val[0] = rtl_read_byte(rtlpriv, REG_CR+2) & 0x3; 426 + val[0] = rtl_read_byte(rtlpriv, MSR) & 0x3; 427 427 break; 428 428 case HW_VAR_SLOT_TIME: 429 429 *((u8 *)(val)) = mac->slot_time; ··· 2178 2178 return 1; 2179 2179 } 2180 2180 2181 - rtl_write_byte(rtlpriv, (MSR), bt_msr); 2181 + rtl_write_byte(rtlpriv, MSR, bt_msr); 2182 2182 rtlpriv->cfg->ops->led_control(hw, ledaction); 2183 2183 if ((bt_msr & 0xfc) == MSR_AP) 2184 2184 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+15 -1
drivers/net/wireless/rtlwifi/rtl8821ae/trx.c
··· 64 64 return ret_val; 65 65 } 66 66 67 + static u8 _rtl8821ae_evm_dbm_jaguar(char value) 68 + { 69 + char ret_val = value; 70 + 71 + /* -33dB~0dB to 33dB ~ 0dB*/ 72 + if (ret_val == -128) 73 + ret_val = 127; 74 + else if (ret_val < 0) 75 + ret_val = 0 - ret_val; 76 + 77 + ret_val = ret_val >> 1; 78 + return ret_val; 79 + } 80 + 67 81 static void query_rxphystatus(struct ieee80211_hw *hw, 68 82 struct rtl_stats *pstatus, u8 *pdesc, 69 83 struct rx_fwinfo_8821ae *p_drvinfo, ··· 260 246 261 247 for (i = 0; i < max_spatial_stream; i++) { 262 248 evm = rtl_evm_db_to_percentage(p_phystrpt->rxevm[i]); 263 - evmdbm = rtl_evm_dbm_jaguar(p_phystrpt->rxevm[i]); 249 + evmdbm = _rtl8821ae_evm_dbm_jaguar(p_phystrpt->rxevm[i]); 264 250 265 251 if (bpacket_match_bssid) { 266 252 /* Fill value in RFD, Get the first
+1 -23
drivers/net/wireless/rtlwifi/stats.c
··· 39 39 40 40 u8 rtl_evm_db_to_percentage(char value) 41 41 { 42 - char ret_val; 43 - ret_val = value; 42 + char ret_val = clamp(-value, 0, 33) * 3; 44 43 45 - if (ret_val >= 0) 46 - ret_val = 0; 47 - if (ret_val <= -33) 48 - ret_val = -33; 49 - ret_val = 0 - ret_val; 50 - ret_val *= 3; 51 44 if (ret_val == 99) 52 45 ret_val = 100; 53 46 54 47 return ret_val; 55 48 } 56 49 EXPORT_SYMBOL(rtl_evm_db_to_percentage); 57 - 58 - u8 rtl_evm_dbm_jaguar(char value) 59 - { 60 - char ret_val = value; 61 - 62 - /* -33dB~0dB to 33dB ~ 0dB*/ 63 - if (ret_val == -128) 64 - ret_val = 127; 65 - else if (ret_val < 0) 66 - ret_val = 0 - ret_val; 67 - 68 - ret_val = ret_val >> 1; 69 - return ret_val; 70 - } 71 - EXPORT_SYMBOL(rtl_evm_dbm_jaguar); 72 50 73 51 static long rtl_translate_todbm(struct ieee80211_hw *hw, 74 52 u8 signal_strength_index)
-1
drivers/net/wireless/rtlwifi/stats.h
··· 35 35 36 36 u8 rtl_query_rxpwrpercentage(char antpower); 37 37 u8 rtl_evm_db_to_percentage(char value); 38 - u8 rtl_evm_dbm_jaguar(char value); 39 38 long rtl_signal_scale_mapping(struct ieee80211_hw *hw, long currsig); 40 39 void rtl_process_phyinfo(struct ieee80211_hw *hw, u8 *buffer, 41 40 struct rtl_stats *pstatus);
+1 -1
drivers/net/wireless/ti/wl18xx/debugfs.c
··· 139 139 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u"); 140 140 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u"); 141 141 142 - WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u"); 142 + WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50); 143 143 144 144 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate, 145 145 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
+2 -2
drivers/net/wireless/ti/wlcore/debugfs.h
··· 26 26 27 27 #include "wlcore.h" 28 28 29 - int wl1271_format_buffer(char __user *userbuf, size_t count, 30 - loff_t *ppos, char *fmt, ...); 29 + __printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count, 30 + loff_t *ppos, char *fmt, ...); 31 31 32 32 int wl1271_debugfs_init(struct wl1271 *wl); 33 33 void wl1271_debugfs_exit(struct wl1271 *wl);
+9
include/linux/bcma/bcma.h
··· 437 437 #ifdef CONFIG_BCMA_HOST_PCI 438 438 extern void bcma_host_pci_up(struct bcma_bus *bus); 439 439 extern void bcma_host_pci_down(struct bcma_bus *bus); 440 + extern int bcma_host_pci_irq_ctl(struct bcma_bus *bus, 441 + struct bcma_device *core, bool enable); 440 442 #else 441 443 static inline void bcma_host_pci_up(struct bcma_bus *bus) 442 444 { 443 445 } 444 446 static inline void bcma_host_pci_down(struct bcma_bus *bus) 445 447 { 448 + } 449 + static inline int bcma_host_pci_irq_ctl(struct bcma_bus *bus, 450 + struct bcma_device *core, bool enable) 451 + { 452 + if (bus->hosttype == BCMA_HOSTTYPE_PCI) 453 + return -ENOTSUPP; 454 + return 0; 446 455 } 447 456 #endif 448 457
+6 -2
include/linux/bcma/bcma_driver_pci.h
··· 238 238 #define pcicore_write16(pc, offset, val) bcma_write16((pc)->core, offset, val) 239 239 #define pcicore_write32(pc, offset, val) bcma_write32((pc)->core, offset, val) 240 240 241 - extern int bcma_core_pci_irq_ctl(struct bcma_bus *bus, 242 - struct bcma_device *core, bool enable); 241 + #ifdef CONFIG_BCMA_DRIVER_PCI 243 242 extern void bcma_core_pci_power_save(struct bcma_bus *bus, bool up); 243 + #else 244 + static inline void bcma_core_pci_power_save(struct bcma_bus *bus, bool up) 245 + { 246 + } 247 + #endif 244 248 245 249 extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev); 246 250 extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
+2
include/linux/mmc/sdio_ids.h
··· 33 33 #define SDIO_DEVICE_ID_BROADCOM_43341 0xa94d 34 34 #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 35 35 #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 36 + #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 37 + #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 36 38 #define SDIO_DEVICE_ID_BROADCOM_4354 0x4354 37 39 38 40 #define SDIO_VENDOR_ID_INTEL 0x0089