Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-for-davem-2016-02-12' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
Major changes:

wl12xx

* add device tree support for SPI

mwifiex

* add debugfs file to read chip information
* add MSIx support for newer pcie chipsets (8997 onwards)
* add schedule scan support
* add WoWLAN net-detect support
* firmware dump support for w8997 chipset

iwlwifi

* continue the work on multiple Rx queues
* add support for beacon storing used in low power states
* use the regular firmware image of WoWLAN
* fix 8000 devices for Big Endian machines
* more firmware debug hooks
* add support for P2P Client snoozing
* make the beacon filtering for AP mode configurable
* fix transmit queues overflow with LSO

libertas

* add support for setting power save via cfg80211
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+3046 -728
+36
Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
··· 1 + * Texas Instruments wl1271 wireless lan controller 2 + 3 + The wl1271 chip can be connected via SPI or via SDIO. This 4 + document describes the binding for the SPI connected chip. 5 + 6 + Required properties: 7 + - compatible : Should be "ti,wl1271" 8 + - reg : Chip select address of device 9 + - spi-max-frequency : Maximum SPI clocking speed of device in Hz 10 + - ref-clock-frequency : Reference clock frequency 11 + - interrupt-parent, interrupts : 12 + Should contain parameters for 1 interrupt line. 13 + Interrupt parameters: parent, line number, type. 14 + - vwlan-supply : Point the node of the regulator that powers/enable the wl1271 chip 15 + 16 + Optional properties: 17 + - clock-xtal : boolean, clock is generated from XTAL 18 + 19 + - Please consult Documentation/devicetree/bindings/spi/spi-bus.txt 20 + for optional SPI connection related properties, 21 + 22 + Examples: 23 + 24 + &spi1 { 25 + wl1271@1 { 26 + compatible = "ti,wl1271"; 27 + 28 + reg = <1>; 29 + spi-max-frequency = <48000000>; 30 + clock-xtal; 31 + ref-clock-frequency = <38400000>; 32 + interrupt-parent = <&gpio3>; 33 + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; 34 + vwlan-supply = <&vwlan_fixed>; 35 + }; 36 + };
-1
drivers/bcma/bcma_private.h
··· 48 48 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); 49 49 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); 50 50 #ifdef CONFIG_BCMA_DRIVER_MIPS 51 - void bcma_chipco_serial_init(struct bcma_drv_cc *cc); 52 51 extern struct platform_device bcma_pflash_dev; 53 52 #endif /* CONFIG_BCMA_DRIVER_MIPS */ 54 53
+12 -4
drivers/bcma/driver_chipcommon.c
··· 15 15 #include <linux/platform_device.h> 16 16 #include <linux/bcma/bcma.h> 17 17 18 + static void bcma_chipco_serial_init(struct bcma_drv_cc *cc); 19 + 18 20 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, 19 21 u32 mask, u32 value) 20 22 { ··· 117 115 118 116 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) 119 117 { 118 + struct bcma_bus *bus = cc->core->bus; 119 + 120 120 if (cc->early_setup_done) 121 121 return; 122 122 ··· 132 128 133 129 if (cc->capabilities & BCMA_CC_CAP_PMU) 134 130 bcma_pmu_early_init(cc); 131 + 132 + if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC) 133 + bcma_chipco_serial_init(cc); 135 134 136 135 cc->early_setup_done = true; 137 136 } ··· 192 185 ticks = 2; 193 186 else if (ticks > maxt) 194 187 ticks = maxt; 195 - bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); 188 + bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); 196 189 } else { 197 190 struct bcma_bus *bus = cc->core->bus; 198 191 199 192 if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 && 193 + bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 && 200 194 bus->chipinfo.id != BCMA_CHIP_ID_BCM53018) 201 195 bcma_core_set_clockmode(cc->core, 202 196 ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC); ··· 322 314 return res; 323 315 } 324 316 325 - #ifdef CONFIG_BCMA_DRIVER_MIPS 326 - void bcma_chipco_serial_init(struct bcma_drv_cc *cc) 317 + static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) 327 318 { 319 + #if IS_BUILTIN(CONFIG_BCM47XX) 328 320 unsigned int irq; 329 321 u32 baud_base; 330 322 u32 i; ··· 366 358 ports[i].baud_base = baud_base; 367 359 ports[i].reg_shift = 0; 368 360 } 361 + #endif /* CONFIG_BCM47XX */ 369 362 } 370 - #endif /* CONFIG_BCMA_DRIVER_MIPS */
+52 -42
drivers/bcma/driver_chipcommon_pmu.c
··· 15 15 16 16 u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) 17 17 { 18 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); 19 - bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); 20 - return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); 18 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); 19 + bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); 20 + return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); 21 21 } 22 22 EXPORT_SYMBOL_GPL(bcma_chipco_pll_read); 23 23 24 24 void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) 25 25 { 26 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); 27 - bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); 28 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value); 26 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); 27 + bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); 28 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); 29 29 } 30 30 EXPORT_SYMBOL_GPL(bcma_chipco_pll_write); 31 31 32 32 void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, 33 33 u32 set) 34 34 { 35 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); 36 - bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); 37 - bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set); 35 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); 36 + bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); 37 + bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set); 38 38 } 39 39 EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset); 40 40 41 41 void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, 42 42 u32 offset, u32 mask, u32 set) 43 43 { 44 - bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset); 45 - bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR); 46 - bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set); 44 + bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset); 45 + bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR); 46 + bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set); 47 47 } 48 48 EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset); 49 49 50 50 void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, 51 51 u32 set) 52 52 { 53 - bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset); 54 - bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR); 55 - bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set); 53 + bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset); 54 + bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR); 55 + bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set); 56 56 } 57 57 EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); 58 58 ··· 60 60 { 61 61 u32 ilp_ctl, alp_hz; 62 62 63 - if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) & 63 + if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) & 64 64 BCMA_CC_PMU_STAT_EXT_LPO_AVAIL)) 65 65 return 0; 66 66 67 - bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 68 - BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT)); 67 + bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 68 + BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT)); 69 69 usleep_range(1000, 2000); 70 70 71 - ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ); 71 + ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ); 72 72 ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK; 73 73 74 - bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0); 74 + bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0); 75 75 76 76 alp_hz = ilp_ctl * 32768 / 4; 77 77 return (alp_hz + 50000) / 100000 * 100; ··· 127 127 mask = (u32)~(BCMA_RES_4314_HT_AVAIL | 128 128 BCMA_RES_4314_MACPHY_CLK_AVAIL); 129 129 130 - bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask); 131 - bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask); 130 + bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask); 131 + bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask); 132 132 bcma_wait_value(cc->core, BCMA_CLKCTLST, 133 133 BCMA_CLKCTLST_HAVEHT, 0, 20000); 134 134 break; ··· 140 140 141 141 /* Flush */ 142 142 if (cc->pmu.rev >= 2) 143 - bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD); 143 + bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD); 144 144 145 145 /* TODO: Do we need to update OTP? */ 146 146 } ··· 195 195 196 196 /* Set the resource masks. */ 197 197 if (min_msk) 198 - bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); 198 + bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); 199 199 if (max_msk) 200 - bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); 200 + bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); 201 201 202 202 /* 203 203 * Add some delay; allow resources to come up and settle. ··· 269 269 270 270 void bcma_pmu_early_init(struct bcma_drv_cc *cc) 271 271 { 272 + struct bcma_bus *bus = cc->core->bus; 272 273 u32 pmucap; 273 274 274 - pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP); 275 + if (cc->core->id.rev >= 35 && 276 + cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { 277 + cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU); 278 + if (!cc->pmu.core) 279 + bcma_warn(bus, "Couldn't find expected PMU core"); 280 + } 281 + if (!cc->pmu.core) 282 + cc->pmu.core = cc->core; 283 + 284 + pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP); 275 285 cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION); 276 286 277 - bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n", 278 - cc->pmu.rev, pmucap); 287 + bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, 288 + pmucap); 279 289 } 280 290 281 291 void bcma_pmu_init(struct bcma_drv_cc *cc) 282 292 { 283 293 if (cc->pmu.rev == 1) 284 - bcma_cc_mask32(cc, BCMA_CC_PMU_CTL, 285 - ~BCMA_CC_PMU_CTL_NOILPONW); 294 + bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL, 295 + ~BCMA_CC_PMU_CTL_NOILPONW); 286 296 else 287 - bcma_cc_set32(cc, BCMA_CC_PMU_CTL, 288 - BCMA_CC_PMU_CTL_NOILPONW); 297 + bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, 298 + BCMA_CC_PMU_CTL_NOILPONW); 289 299 290 300 bcma_pmu_pll_init(cc); 291 301 bcma_pmu_resources_init(cc); ··· 482 472 static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset, 483 473 u32 value) 484 474 { 485 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); 486 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value); 475 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); 476 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); 487 477 } 488 478 489 479 void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid) ··· 507 497 bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0; 508 498 509 499 /* RMW only the P1 divider */ 510 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, 500 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, 511 501 BCMA_CC_PMU_PLL_CTL0 + phypll_offset); 512 - tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); 502 + tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); 513 503 tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK)); 514 504 tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT); 515 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp); 505 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); 516 506 517 507 /* RMW only the int feedback divider */ 518 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, 508 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, 519 509 BCMA_CC_PMU_PLL_CTL2 + phypll_offset); 520 - tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); 510 + tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); 521 511 tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK); 522 512 tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT; 523 - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp); 513 + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); 524 514 525 515 tmp = BCMA_CC_PMU_CTL_PLL_UPD; 526 516 break; ··· 656 646 break; 657 647 } 658 648 659 - tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL); 660 - bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp); 649 + tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL); 650 + bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp); 661 651 } 662 652 EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
+1
drivers/bcma/driver_chipcommon_sflash.c
··· 38 38 { "M25P32", 0x15, 0x10000, 64, }, 39 39 { "M25P64", 0x16, 0x10000, 128, }, 40 40 { "M25FL128", 0x17, 0x10000, 256, }, 41 + { "MX25L25635F", 0x18, 0x10000, 512, }, 41 42 { NULL }, 42 43 }; 43 44
+1
drivers/bcma/driver_gpio.c
··· 192 192 case BCMA_CHIP_ID_BCM4707: 193 193 case BCMA_CHIP_ID_BCM5357: 194 194 case BCMA_CHIP_ID_BCM53572: 195 + case BCMA_CHIP_ID_BCM47094: 195 196 chip->ngpio = 32; 196 197 break; 197 198 default:
-3
drivers/bcma/driver_mips.c
··· 337 337 338 338 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) 339 339 { 340 - struct bcma_bus *bus = mcore->core->bus; 341 - 342 340 if (mcore->early_setup_done) 343 341 return; 344 342 345 - bcma_chipco_serial_init(&bus->drv_cc); 346 343 bcma_core_mips_flash_detect(mcore); 347 344 348 345 mcore->early_setup_done = true;
+1 -1
drivers/bcma/host_pci.c
··· 294 294 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) }, 295 295 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, 296 296 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) }, 297 - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) }, 297 + { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) }, 298 298 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) }, 299 299 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, 300 300 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
+5
drivers/bcma/scan.c
··· 98 98 { BCMA_CORE_SHIM, "SHIM" }, 99 99 { BCMA_CORE_PCIE2, "PCIe Gen2" }, 100 100 { BCMA_CORE_ARM_CR4, "ARM CR4" }, 101 + { BCMA_CORE_GCI, "GCI" }, 102 + { BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" }, 103 + { BCMA_CORE_ARM_CA7, "ARM CA7" }, 101 104 { BCMA_CORE_DEFAULT, "Default" }, 102 105 }; 103 106 ··· 318 315 switch (core->id.id) { 319 316 case BCMA_CORE_4706_MAC_GBIT_COMMON: 320 317 case BCMA_CORE_NS_CHIPCOMMON_B: 318 + case BCMA_CORE_PMU: 319 + case BCMA_CORE_GCI: 321 320 /* Not used yet: case BCMA_CORE_OOB_ROUTER: */ 322 321 break; 323 322 default:
+4 -4
drivers/net/wireless/broadcom/b43/main.c
··· 1215 1215 case B43_BUS_BCMA: 1216 1216 bcma_cc = &dev->dev->bdev->bus->drv_cc; 1217 1217 1218 - bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0); 1219 - bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); 1220 - bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4); 1221 - bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); 1218 + bcma_cc_write32(bcma_cc, BCMA_CC_PMU_CHIPCTL_ADDR, 0); 1219 + bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4); 1220 + bcma_cc_set32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, 0x4); 1221 + bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4); 1222 1222 break; 1223 1223 #endif 1224 1224 #ifdef CONFIG_B43_SSB
+10 -13
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 247 247 brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n", 248 248 ch->chan->center_freq, ch->center_freq1, ch->width); 249 249 ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1); 250 - primary_offset = ch->center_freq1 - ch->chan->center_freq; 250 + primary_offset = ch->chan->center_freq - ch->center_freq1; 251 251 switch (ch->width) { 252 252 case NL80211_CHAN_WIDTH_20: 253 253 case NL80211_CHAN_WIDTH_20_NOHT: ··· 256 256 break; 257 257 case NL80211_CHAN_WIDTH_40: 258 258 ch_inf.bw = BRCMU_CHAN_BW_40; 259 - if (primary_offset < 0) 259 + if (primary_offset > 0) 260 260 ch_inf.sb = BRCMU_CHAN_SB_U; 261 261 else 262 262 ch_inf.sb = BRCMU_CHAN_SB_L; 263 263 break; 264 264 case NL80211_CHAN_WIDTH_80: 265 265 ch_inf.bw = BRCMU_CHAN_BW_80; 266 - if (primary_offset < 0) { 267 - if (primary_offset < -CH_10MHZ_APART) 268 - ch_inf.sb = BRCMU_CHAN_SB_UU; 269 - else 270 - ch_inf.sb = BRCMU_CHAN_SB_UL; 271 - } else { 272 - if (primary_offset > CH_10MHZ_APART) 273 - ch_inf.sb = BRCMU_CHAN_SB_LL; 274 - else 275 - ch_inf.sb = BRCMU_CHAN_SB_LU; 276 - } 266 + if (primary_offset == -30) 267 + ch_inf.sb = BRCMU_CHAN_SB_LL; 268 + else if (primary_offset == -10) 269 + ch_inf.sb = BRCMU_CHAN_SB_LU; 270 + else if (primary_offset == 10) 271 + ch_inf.sb = BRCMU_CHAN_SB_UL; 272 + else 273 + ch_inf.sb = BRCMU_CHAN_SB_UU; 277 274 break; 278 275 case NL80211_CHAN_WIDTH_80P80: 279 276 case NL80211_CHAN_WIDTH_160:
+38 -7
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
··· 803 803 *eromaddr -= 4; 804 804 return -EFAULT; 805 805 } 806 - } while (desc != DMP_DESC_ADDRESS); 806 + } while (desc != DMP_DESC_ADDRESS && 807 + desc != DMP_DESC_COMPONENT); 808 + 809 + /* stop if we crossed current component border */ 810 + if (desc == DMP_DESC_COMPONENT) { 811 + *eromaddr -= 4; 812 + return 0; 813 + } 807 814 808 815 /* skip upper 32-bit address descriptor */ 809 816 if (val & DMP_DESC_ADDRSIZE_GT32) ··· 883 876 rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; 884 877 885 878 /* need core with ports */ 886 - if (nmw + nsw == 0) 879 + if (nmw + nsw == 0 && 880 + id != BCMA_CORE_PMU) 887 881 continue; 888 882 889 883 /* try to obtain register address info */ ··· 1014 1006 { 1015 1007 struct brcmf_chip *pub; 1016 1008 struct brcmf_core_priv *cc; 1009 + struct brcmf_core *pmu; 1017 1010 u32 base; 1018 1011 u32 val; 1019 1012 int ret = 0; ··· 1026 1017 /* get chipcommon capabilites */ 1027 1018 pub->cc_caps = chip->ops->read32(chip->ctx, 1028 1019 CORE_CC_REG(base, capabilities)); 1020 + pub->cc_caps_ext = chip->ops->read32(chip->ctx, 1021 + CORE_CC_REG(base, 1022 + capabilities_ext)); 1029 1023 1030 1024 /* get pmu caps & rev */ 1025 + pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */ 1031 1026 if (pub->cc_caps & CC_CAP_PMU) { 1032 1027 val = chip->ops->read32(chip->ctx, 1033 - CORE_CC_REG(base, pmucapabilities)); 1028 + CORE_CC_REG(pmu->base, pmucapabilities)); 1034 1029 pub->pmurev = val & PCAP_REV_MASK; 1035 1030 pub->pmucaps = val; 1036 1031 } ··· 1131 1118 if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON)) 1132 1119 return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON); 1133 1120 return &cc->pub; 1121 + } 1122 + 1123 + struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub) 1124 + { 1125 + struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub); 1126 + struct brcmf_core *pmu; 1127 + 1128 + /* See if there is separated PMU core available */ 1129 + if (cc->rev >= 35 && 1130 + pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { 1131 + pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU); 1132 + if (pmu) 1133 + return pmu; 1134 + } 1135 + 1136 + /* Fallback to ChipCommon core for older hardware */ 1137 + return cc; 1134 1138 } 1135 1139 1136 1140 bool brcmf_chip_iscoreup(struct brcmf_core *pub) ··· 1320 1290 { 1321 1291 u32 base, addr, reg, pmu_cc3_mask = ~0; 1322 1292 struct brcmf_chip_priv *chip; 1293 + struct brcmf_core *pmu = brcmf_chip_get_pmu(pub); 1323 1294 1324 1295 brcmf_dbg(TRACE, "Enter\n"); 1325 1296 ··· 1340 1309 case BRCM_CC_4335_CHIP_ID: 1341 1310 case BRCM_CC_4339_CHIP_ID: 1342 1311 /* read PMU chipcontrol register 3 */ 1343 - addr = CORE_CC_REG(base, chipcontrol_addr); 1312 + addr = CORE_CC_REG(pmu->base, chipcontrol_addr); 1344 1313 chip->ops->write32(chip->ctx, addr, 3); 1345 - addr = CORE_CC_REG(base, chipcontrol_data); 1314 + addr = CORE_CC_REG(pmu->base, chipcontrol_data); 1346 1315 reg = chip->ops->read32(chip->ctx, addr); 1347 1316 return (reg & pmu_cc3_mask) != 0; 1348 1317 case BRCM_CC_43430_CHIP_ID: ··· 1350 1319 reg = chip->ops->read32(chip->ctx, addr); 1351 1320 return reg != 0; 1352 1321 default: 1353 - addr = CORE_CC_REG(base, pmucapabilities_ext); 1322 + addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); 1354 1323 reg = chip->ops->read32(chip->ctx, addr); 1355 1324 if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0) 1356 1325 return false; 1357 1326 1358 - addr = CORE_CC_REG(base, retention_ctl); 1327 + addr = CORE_CC_REG(pmu->base, retention_ctl); 1359 1328 reg = chip->ops->read32(chip->ctx, addr); 1360 1329 return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | 1361 1330 PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+3
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
··· 27 27 * @chip: chip identifier. 28 28 * @chiprev: chip revision. 29 29 * @cc_caps: chipcommon core capabilities. 30 + * @cc_caps_ext: chipcommon core extended capabilities. 30 31 * @pmucaps: PMU capabilities. 31 32 * @pmurev: PMU revision. 32 33 * @rambase: RAM base address (only applicable for ARM CR4 chips). ··· 39 38 u32 chip; 40 39 u32 chiprev; 41 40 u32 cc_caps; 41 + u32 cc_caps_ext; 42 42 u32 pmucaps; 43 43 u32 pmurev; 44 44 u32 rambase; ··· 85 83 void brcmf_chip_detach(struct brcmf_chip *chip); 86 84 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid); 87 85 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip); 86 + struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub); 88 87 bool brcmf_chip_iscoreup(struct brcmf_core *core); 89 88 void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset); 90 89 void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
··· 93 93 c = nvp->data[nvp->pos]; 94 94 if (c == '\n') 95 95 return COMMENT; 96 - if (is_whitespace(c)) 96 + if (is_whitespace(c) || c == '\0') 97 97 goto proceed; 98 98 if (c == '#') 99 99 return COMMENT;
+4
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
··· 1951 1951 1952 1952 #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ 1953 1953 PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 1954 + #define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \ 1955 + BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ 1956 + subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } 1954 1957 1955 1958 static struct pci_device_id brcmf_pcie_devid_table[] = { 1956 1959 BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), ··· 1969 1966 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID), 1970 1967 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID), 1971 1968 BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID), 1969 + BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365), 1972 1970 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID), 1973 1971 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID), 1974 1972 BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
+6 -7
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
··· 45 45 #include "chip.h" 46 46 #include "firmware.h" 47 47 48 - #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2000) 49 - #define CTL_DONE_TIMEOUT msecs_to_jiffies(2000) 48 + #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) 49 + #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) 50 50 51 51 #ifdef DEBUG 52 52 ··· 3615 3615 const struct sdiod_drive_str *str_tab = NULL; 3616 3616 u32 str_mask; 3617 3617 u32 str_shift; 3618 - u32 base; 3619 3618 u32 i; 3620 3619 u32 drivestrength_sel = 0; 3621 3620 u32 cc_data_temp; ··· 3657 3658 } 3658 3659 3659 3660 if (str_tab != NULL) { 3661 + struct brcmf_core *pmu = brcmf_chip_get_pmu(ci); 3662 + 3660 3663 for (i = 0; str_tab[i].strength != 0; i++) { 3661 3664 if (drivestrength >= str_tab[i].strength) { 3662 3665 drivestrength_sel = str_tab[i].sel; 3663 3666 break; 3664 3667 } 3665 3668 } 3666 - base = brcmf_chip_get_chipcommon(ci)->base; 3667 - addr = CORE_CC_REG(base, chipcontrol_addr); 3669 + addr = CORE_CC_REG(pmu->base, chipcontrol_addr); 3668 3670 brcmf_sdiod_regwl(sdiodev, addr, 1, NULL); 3669 3671 cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL); 3670 3672 cc_data_temp &= ~str_mask; ··· 3835 3835 goto fail; 3836 3836 3837 3837 /* set PMUControl so a backplane reset does PMU state reload */ 3838 - reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base, 3839 - pmucontrol); 3838 + reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol); 3840 3839 reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err); 3841 3840 if (err) 3842 3841 goto fail;
+12
drivers/net/wireless/intel/iwlwifi/Kconfig
··· 99 99 100 100 If unsure, say N. 101 101 102 + config IWLWIFI_PCIE_RTPM 103 + bool "Enable runtime power management mode for PCIe devices" 104 + depends on IWLMVM && PM 105 + default false 106 + help 107 + Say Y here to enable runtime power management for PCIe 108 + devices. If enabled, the device will go into low power mode 109 + when idle for a short period of time, allowing for improved 110 + power saving during runtime. 111 + 112 + If unsure, say N. 113 + 102 114 menu "Debugging Options" 103 115 104 116 config IWLWIFI_DEBUG
+4 -1
drivers/net/wireless/intel/iwlwifi/dvm/led.c
··· 152 152 { 153 153 struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); 154 154 unsigned long on = 0; 155 + unsigned long off = 0; 155 156 156 157 if (brightness > 0) 157 158 on = IWL_LED_SOLID; 159 + else 160 + off = IWL_LED_SOLID; 158 161 159 - iwl_led_cmd(priv, on, 0); 162 + iwl_led_cmd(priv, on, off); 160 163 } 161 164 162 165 static int iwl_led_blink_set(struct led_classdev *led_cdev,
+2 -2
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
··· 396 396 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, 397 397 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); 398 398 399 - iwl_trans_d3_suspend(priv->trans, false); 399 + iwl_trans_d3_suspend(priv->trans, false, true); 400 400 401 401 goto out; 402 402 ··· 469 469 /* we'll clear ctx->vif during iwlagn_prepare_restart() */ 470 470 vif = ctx->vif; 471 471 472 - ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); 472 + ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true); 473 473 if (ret) 474 474 goto out_unlock; 475 475
+2 -1
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
··· 138 138 .smem_offset = IWL9000_SMEM_OFFSET, \ 139 139 .smem_len = IWL9000_SMEM_LEN, \ 140 140 .thermal_params = &iwl9000_tt_params, \ 141 - .apmg_not_supported = true 141 + .apmg_not_supported = true, \ 142 + .mq_rx_supported = true 142 143 143 144 const struct iwl_cfg iwl9260_2ac_cfg = { 144 145 .name = "Intel(R) Dual Band Wireless AC 9260",
+2
drivers/net/wireless/intel/iwlwifi/iwl-config.h
··· 311 311 * @dccm2_len: length of the second DCCM 312 312 * @smem_offset: offset from which the SMEM begins 313 313 * @smem_len: the length of SMEM 314 + * @mq_rx_supported: multi-queue rx support 314 315 * 315 316 * We enable the driver to be backward compatible wrt. hardware features. 316 317 * API differences in uCode shouldn't be handled here but through TLVs ··· 363 362 const u32 smem_len; 364 363 const struct iwl_tt_params *thermal_params; 365 364 bool apmg_not_supported; 365 + bool mq_rx_supported; 366 366 }; 367 367 368 368 /*
+77
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
··· 6 6 * GPL LICENSE SUMMARY 7 7 * 8 8 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 9 + * Copyright(c) 2015 Intel Deutschland GmbH 9 10 * 10 11 * This program is free software; you can redistribute it and/or modify 11 12 * it under the terms of version 2 of the GNU General Public License as ··· 32 31 * BSD LICENSE 33 32 * 34 33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 + * Copyright(c) 2015 Intel Deutschland GmbH 35 35 * All rights reserved. 36 36 * 37 37 * Redistribution and use in source and binary forms, with or without ··· 314 312 #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 315 313 #define FH_MEM_TB_MAX_LENGTH (0x00020000) 316 314 315 + /* 9000 rx series registers */ 316 + 317 + #define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ 318 + #define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) 319 + /* Write index table */ 320 + #define RFH_Q0_FRBDCB_WIDX 0xA08080 321 + #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) 322 + /* Read index table */ 323 + #define RFH_Q0_FRBDCB_RIDX 0xA080C0 324 + #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) 325 + /* Used list table */ 326 + #define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ 327 + #define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) 328 + /* Write index table */ 329 + #define RFH_Q0_URBDCB_WIDX 0xA08180 330 + #define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) 331 + #define RFH_Q0_URBDCB_VAID 0xA081C0 332 + #define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) 333 + /* stts */ 334 + #define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ 335 + #define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) 336 + 337 + #define RFH_Q0_ORB_WPTR_LSB 0xA08280 338 + #define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) 339 + #define RFH_RBDBUF_RBD0_LSB 0xA08300 340 + #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) 341 + 342 + /* DMA configuration */ 343 + #define RFH_RXF_DMA_CFG 0xA09820 344 + /* RB size */ 345 + #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ 346 + #define RFH_RXF_DMA_RB_SIZE_POS 16 347 + #define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) 348 + #define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) 349 + #define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) 350 + #define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) 351 + #define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) 352 + #define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) 353 + #define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) 354 + #define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) 355 + #define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) 356 + #define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) 357 + /* RB Circular Buffer size:defines the table sizes in RBD units */ 358 + #define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ 359 + #define RFH_RXF_DMA_RBDCB_SIZE_POS 20 360 + #define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) 361 + #define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) 362 + #define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) 363 + #define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) 364 + #define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) 365 + #define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) 366 + #define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) 367 + #define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) 368 + #define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) 369 + #define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ 370 + #define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 371 + #define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) 372 + #define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ 373 + #define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ 374 + #define RFH_DMA_EN_ENABLE_VAL BIT(31) 375 + 376 + #define RFH_RXF_RXQ_ACTIVE 0xA0980C 377 + 378 + #define RFH_GEN_CFG 0xA09800 379 + #define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 380 + #define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) 381 + #define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) 382 + #define DEFAULT_RXQ_NUM 8 383 + 384 + /* end of 9000 rx series registers */ 385 + 317 386 /* TFDB Area - TFDs buffer table */ 318 387 #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) 319 388 #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) ··· 506 433 * it is brought from the memory to TX-FIFO 507 434 */ 508 435 #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) 436 + 437 + #define MQ_RX_TABLE_SIZE 512 438 + #define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) 439 + #define MQ_RX_POOL_SIZE MQ_RX_TABLE_MASK 509 440 510 441 #define RX_QUEUE_SIZE 256 511 442 #define RX_QUEUE_MASK 255
+3
drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
··· 293 293 * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a 294 294 * threshold. 295 295 * @FW_DBG_TDLS: trigger log collection upon TDLS related events. 296 + * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when 297 + * the firmware sends a tx reply. 296 298 */ 297 299 enum iwl_fw_dbg_trigger { 298 300 FW_DBG_TRIGGER_INVALID = 0, ··· 311 309 FW_DBG_TRIGGER_BA, 312 310 FW_DBG_TRIGGER_TX_LATENCY, 313 311 FW_DBG_TRIGGER_TDLS, 312 + FW_DBG_TRIGGER_TX_STATUS, 314 313 315 314 /* must be last */ 316 315 FW_DBG_TRIGGER_MAX,
+21
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
··· 297 297 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), 298 298 * which also implies support for the scheduler configuration command 299 299 * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching 300 + * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image 300 301 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command 301 302 * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command 302 303 * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload 303 304 * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics 305 + * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD 304 306 * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running 305 307 * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different 306 308 * sources for the MCC. This TLV bit is a future replacement to ··· 315 313 * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT 316 314 * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what 317 315 * antenna the beacon should be transmitted 316 + * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon 317 + * from AP and will send it upon d0i3 exit. 318 318 * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 319 319 * 320 320 * @NUM_IWL_UCODE_TLV_CAPA: number of bits used ··· 334 330 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, 335 331 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, 336 332 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, 333 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, 337 334 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, 338 335 IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, 339 336 IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, 340 337 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, 338 + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26, 341 339 IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, 342 340 IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, 343 341 IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, ··· 347 341 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, 348 342 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, 349 343 IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, 344 + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, 350 345 IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, 346 + IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, 351 347 IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, 352 348 353 349 NUM_IWL_UCODE_TLV_CAPA ··· 753 745 u8 peer_mode; 754 746 u8 peer[ETH_ALEN]; 755 747 u8 reserved[4]; 748 + } __packed; 749 + 750 + /** 751 + * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response 752 + * status. 753 + * @statuses: the list of statuses to trigger the collection on 754 + */ 755 + struct iwl_fw_dbg_trigger_tx_status { 756 + struct tx_status { 757 + u8 status; 758 + u8 reserved[3]; 759 + } __packed statuses[16]; 760 + __le32 reserved[2]; 756 761 } __packed; 757 762 758 763 /**
+2
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
··· 108 108 * @power_level: power level, default = 1 109 109 * @debug_level: levels are IWL_DL_* 110 110 * @ant_coupling: antenna coupling in dB, default = 0 111 + * @nvm_file: specifies a external NVM file 112 + * @uapsd_disable: disable U-APSD, default = 1 111 113 * @d0i3_disable: disable d0i3, default = 1, 112 114 * @d0i3_entry_delay: time to wait after no refs are taken before 113 115 * entering D0i3 (in msecs)
+4 -3
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
··· 539 539 struct iwl_nvm_data *data, 540 540 const __le16 *mac_override, 541 541 const __le16 *nvm_hw, 542 - u32 mac_addr0, u32 mac_addr1) 542 + __le32 mac_addr0, __le32 mac_addr1) 543 543 { 544 544 const u8 *hw_addr; 545 545 ··· 583 583 584 584 if (!is_valid_ether_addr(data->hw_addr)) 585 585 IWL_ERR_DEV(dev, 586 - "mac address from hw section is not valid\n"); 586 + "mac address (%pM) from hw section is not valid\n", 587 + data->hw_addr); 587 588 588 589 return; 589 590 } ··· 598 597 const __le16 *nvm_calib, const __le16 *regulatory, 599 598 const __le16 *mac_override, const __le16 *phy_sku, 600 599 u8 tx_chains, u8 rx_chains, bool lar_fw_supported, 601 - u32 mac_addr0, u32 mac_addr1) 600 + __le32 mac_addr0, __le32 mac_addr1) 602 601 { 603 602 struct iwl_nvm_data *data; 604 603 u32 sku;
+1 -1
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
··· 79 79 const __le16 *nvm_calib, const __le16 *regulatory, 80 80 const __le16 *mac_override, const __le16 *phy_sku, 81 81 u8 tx_chains, u8 rx_chains, bool lar_fw_supported, 82 - u32 mac_addr0, u32 mac_addr1); 82 + __le32 mac_addr0, __le32 mac_addr1); 83 83 84 84 /** 85 85 * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+13 -7
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
··· 506 506 bool sw_csum_tx; 507 507 const struct iwl_hcmd_arr *command_groups; 508 508 int command_groups_size; 509 - 509 + 510 510 u32 sdio_adma_addr; 511 511 }; 512 512 ··· 618 618 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); 619 619 void (*stop_device)(struct iwl_trans *trans, bool low_power); 620 620 621 - void (*d3_suspend)(struct iwl_trans *trans, bool test); 621 + void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); 622 622 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, 623 - bool test); 623 + bool test, bool reset); 624 624 625 625 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 626 626 ··· 735 735 IWL_PLAT_PM_MODE_D3, 736 736 IWL_PLAT_PM_MODE_D0I3, 737 737 }; 738 + 739 + /* Max time to wait for trans to become idle/non-idle on d0i3 740 + * enter/exit (in msecs). 741 + */ 742 + #define IWL_TRANS_IDLE_TIMEOUT 2000 738 743 739 744 /** 740 745 * struct iwl_trans - transport common data ··· 925 920 _iwl_trans_stop_device(trans, true); 926 921 } 927 922 928 - static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) 923 + static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, 924 + bool reset) 929 925 { 930 926 might_sleep(); 931 927 if (trans->ops->d3_suspend) 932 - trans->ops->d3_suspend(trans, test); 928 + trans->ops->d3_suspend(trans, test, reset); 933 929 } 934 930 935 931 static inline int iwl_trans_d3_resume(struct iwl_trans *trans, 936 932 enum iwl_d3_status *status, 937 - bool test) 933 + bool test, bool reset) 938 934 { 939 935 might_sleep(); 940 936 if (!trans->ops->d3_resume) 941 937 return 0; 942 938 943 - return trans->ops->d3_resume(trans, status, test); 939 + return trans->ops->d3_resume(trans, status, test, reset); 944 940 } 945 941 946 942 static inline void iwl_trans_ref(struct iwl_trans *trans)
+6 -2
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
··· 6 6 * GPL LICENSE SUMMARY 7 7 * 8 8 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. 9 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 9 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2015 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 33 32 * BSD LICENSE 34 33 * 35 34 * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. 36 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 35 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 + * Copyright(c) 2015 Intel Deutschland GmbH 37 37 * All rights reserved. 38 38 * 39 39 * Redistribution and use in source and binary forms, with or without ··· 75 73 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) 76 74 #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ 77 75 #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ 76 + #define IWL_MVM_P2P_UAPSD_STANDALONE 0 78 77 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 79 78 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) 80 79 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) ··· 110 107 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 111 108 #define IWL_MVM_TOF_IS_RESPONDER 0 112 109 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 110 + #define IWL_MVM_COLLECT_FW_ERR_DUMP 1 113 111 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 114 112 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 115 113 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
+51 -22
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 33 * 35 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 + * Copyright(c) 2016 Intel Deutschland GmbH 37 37 * All rights reserved. 38 38 * 39 39 * Redistribution and use in source and binary forms, with or without ··· 853 851 wowlan_config_cmd->is_11n_connection = 854 852 ap_sta->ht_cap.ht_supported; 855 853 wowlan_config_cmd->flags = ENABLE_L3_FILTERING | 856 - ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; 854 + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | 855 + ENABLE_STORE_BEACON; 857 856 858 857 /* Query the last used seqno and set it */ 859 858 ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); ··· 1026 1023 struct ieee80211_sta *ap_sta) 1027 1024 { 1028 1025 int ret; 1026 + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1027 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1029 1028 1030 - ret = iwl_mvm_switch_to_d3(mvm); 1031 - if (ret) 1032 - return ret; 1029 + if (!unified_image) { 1030 + ret = iwl_mvm_switch_to_d3(mvm); 1031 + if (ret) 1032 + return ret; 1033 1033 1034 - ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 1035 - if (ret) 1036 - return ret; 1034 + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); 1035 + if (ret) 1036 + return ret; 1037 + } 1037 1038 1038 1039 if (!iwlwifi_mod_params.sw_crypto) { 1039 1040 /* ··· 1079 1072 { 1080 1073 struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; 1081 1074 int ret; 1075 + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1076 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1082 1077 1083 - ret = iwl_mvm_switch_to_d3(mvm); 1084 - if (ret) 1085 - return ret; 1078 + if (!unified_image) { 1079 + ret = iwl_mvm_switch_to_d3(mvm); 1080 + if (ret) 1081 + return ret; 1082 + } 1086 1083 1087 1084 /* rfkill release can be either for wowlan or netdetect */ 1088 1085 if (wowlan->rfkill_release) ··· 1162 1151 }; 1163 1152 int ret; 1164 1153 int len __maybe_unused; 1154 + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 1155 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 1165 1156 1166 1157 if (!wowlan) { 1167 1158 /* ··· 1249 1236 1250 1237 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1251 1238 1252 - iwl_trans_d3_suspend(mvm->trans, test); 1239 + iwl_trans_d3_suspend(mvm->trans, test, !unified_image); 1253 1240 out: 1254 1241 if (ret < 0) { 1255 1242 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); ··· 1312 1299 __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); 1313 1300 mutex_unlock(&mvm->d0i3_suspend_mutex); 1314 1301 1315 - iwl_trans_d3_suspend(trans, false); 1302 + iwl_trans_d3_suspend(trans, false, false); 1316 1303 1317 1304 return 0; 1318 1305 } ··· 2054 2041 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) 2055 2042 { 2056 2043 struct ieee80211_vif *vif = NULL; 2057 - int ret; 2044 + int ret = 1; 2058 2045 enum iwl_d3_status d3_status; 2059 2046 bool keep = false; 2047 + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, 2048 + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 2049 + 2050 + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | 2051 + CMD_WAKE_UP_TRANS; 2060 2052 2061 2053 mutex_lock(&mvm->mutex); 2062 2054 ··· 2070 2052 if (IS_ERR_OR_NULL(vif)) 2071 2053 goto err; 2072 2054 2073 - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); 2055 + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); 2074 2056 if (ret) 2075 2057 goto err; 2076 2058 ··· 2113 2095 iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); 2114 2096 2115 2097 out: 2116 - /* return 1 to reconfigure the device */ 2098 + if (unified_image && !ret) { 2099 + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); 2100 + if (!ret) /* D3 ended successfully - no need to reset device */ 2101 + return 0; 2102 + } 2103 + 2104 + /* 2105 + * Reconfigure the device in one of the following cases: 2106 + * 1. We are not using a unified image 2107 + * 2. We are using a unified image but had an error while exiting D3 2108 + */ 2117 2109 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 2118 2110 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 2119 - 2120 - /* We always return 1, which causes mac80211 to do a reconfig 2121 - * with IEEE80211_RECONFIG_TYPE_RESTART. This type of 2122 - * reconfig calls iwl_mvm_restart_complete(), where we unref 2123 - * the IWL_MVM_REF_UCODE_DOWN, so we need to take the 2124 - * reference here. 2111 + /* 2112 + * When switching images we return 1, which causes mac80211 2113 + * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. 2114 + * This type of reconfig calls iwl_mvm_restart_complete(), 2115 + * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need 2116 + * to take the reference here. 2125 2117 */ 2126 2118 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 2119 + 2127 2120 return 1; 2128 2121 } 2129 2122 ··· 2151 2122 enum iwl_d3_status d3_status; 2152 2123 struct iwl_trans *trans = mvm->trans; 2153 2124 2154 - iwl_trans_d3_resume(trans, &d3_status, false); 2125 + iwl_trans_d3_resume(trans, &d3_status, false, false); 2155 2126 2156 2127 /* 2157 2128 * make sure to clear D0I3_DEFER_WAKEUP before
+70 -5
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 33 * 35 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 + * Copyright(c) 2016 Intel Deutschland GmbH 37 37 * All rights reserved. 38 38 * 39 39 * Redistribution and use in source and binary forms, with or without ··· 1257 1255 { 1258 1256 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1259 1257 struct iwl_mvm *mvm = mvmvif->mvm; 1258 + bool prev; 1260 1259 u8 value; 1261 1260 int ret; 1262 1261 ··· 1268 1265 return -EINVAL; 1269 1266 1270 1267 mutex_lock(&mvm->mutex); 1271 - iwl_mvm_update_low_latency(mvm, vif, value); 1268 + prev = iwl_mvm_vif_low_latency(mvmvif); 1269 + mvmvif->low_latency_dbgfs = value; 1270 + iwl_mvm_update_low_latency(mvm, vif, prev); 1272 1271 mutex_unlock(&mvm->mutex); 1273 1272 1274 1273 return count; ··· 1282 1277 { 1283 1278 struct ieee80211_vif *vif = file->private_data; 1284 1279 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1285 - char buf[2]; 1280 + char buf[30] = {}; 1281 + int len; 1286 1282 1287 - buf[0] = mvmvif->low_latency ? '1' : '0'; 1288 - buf[1] = '\n'; 1289 - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); 1283 + len = snprintf(buf, sizeof(buf) - 1, 1284 + "traffic=%d\ndbgfs=%d\nvcmd=%d\n", 1285 + mvmvif->low_latency_traffic, 1286 + mvmvif->low_latency_dbgfs, 1287 + mvmvif->low_latency_vcmd); 1288 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1290 1289 } 1291 1290 1292 1291 static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, ··· 1372 1363 return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); 1373 1364 } 1374 1365 1366 + static void iwl_dbgfs_quota_check(void *data, u8 *mac, 1367 + struct ieee80211_vif *vif) 1368 + { 1369 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1370 + int *ret = data; 1371 + 1372 + if (mvmvif->dbgfs_quota_min) 1373 + *ret = -EINVAL; 1374 + } 1375 + 1376 + static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, 1377 + size_t count, loff_t *ppos) 1378 + { 1379 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1380 + struct iwl_mvm *mvm = mvmvif->mvm; 1381 + u16 value; 1382 + int ret; 1383 + 1384 + ret = kstrtou16(buf, 0, &value); 1385 + if (ret) 1386 + return ret; 1387 + 1388 + if (value > 95) 1389 + return -EINVAL; 1390 + 1391 + mutex_lock(&mvm->mutex); 1392 + 1393 + mvmvif->dbgfs_quota_min = 0; 1394 + ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1395 + iwl_dbgfs_quota_check, &ret); 1396 + if (ret == 0) { 1397 + mvmvif->dbgfs_quota_min = value; 1398 + iwl_mvm_update_quotas(mvm, false, NULL); 1399 + } 1400 + mutex_unlock(&mvm->mutex); 1401 + 1402 + return ret ?: count; 1403 + } 1404 + 1405 + static ssize_t iwl_dbgfs_quota_min_read(struct file *file, 1406 + char __user *user_buf, 1407 + size_t count, loff_t *ppos) 1408 + { 1409 + struct ieee80211_vif *vif = file->private_data; 1410 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1411 + char buf[10]; 1412 + int len; 1413 + 1414 + len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); 1415 + 1416 + return simple_read_from_buffer(user_buf, count, ppos, buf, len); 1417 + } 1418 + 1375 1419 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ 1376 1420 _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) 1377 1421 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ ··· 1448 1386 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); 1449 1387 MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); 1450 1388 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); 1389 + MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); 1451 1390 1452 1391 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1453 1392 { ··· 1485 1422 MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 1486 1423 S_IRUSR | S_IWUSR); 1487 1424 MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 1425 + S_IRUSR | S_IWUSR); 1426 + MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 1488 1427 S_IRUSR | S_IWUSR); 1489 1428 1490 1429 if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+53 -5
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 262 261 { 263 262 struct iwl_mvm *mvm = file->private_data; 264 263 char buf[16]; 265 - int pos, temp; 264 + int pos, ret; 265 + s32 temp; 266 266 267 267 if (!mvm->ucode_loaded) 268 268 return -EIO; 269 269 270 270 mutex_lock(&mvm->mutex); 271 - temp = iwl_mvm_get_temp(mvm); 271 + ret = iwl_mvm_get_temp(mvm, &temp); 272 272 mutex_unlock(&mvm->mutex); 273 273 274 - if (temp < 0) 275 - return temp; 274 + if (ret) 275 + return -EIO; 276 276 277 277 pos = scnprintf(buf , sizeof(buf), "%d\n", temp); 278 278 ··· 944 942 return count; 945 943 } 946 944 945 + static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, 946 + char *buf, size_t count, 947 + loff_t *ppos) 948 + { 949 + struct iwl_rss_config_cmd cmd = { 950 + .flags = cpu_to_le32(IWL_RSS_ENABLE), 951 + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 952 + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 953 + IWL_RSS_HASH_TYPE_IPV6_TCP | 954 + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 955 + }; 956 + int ret, i, num_repeats, nbytes = count / 2; 957 + 958 + ret = hex2bin(cmd.indirection_table, buf, nbytes); 959 + if (ret) 960 + return ret; 961 + 962 + /* 963 + * The input is the redirection table, partial or full. 964 + * Repeat the pattern if needed. 965 + * For example, input of 01020F will be repeated 42 times, 966 + * indirecting RSS hash results to queues 1, 2, 15 (skipping 967 + * queues 3 - 14). 968 + */ 969 + num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; 970 + for (i = 1; i < num_repeats; i++) 971 + memcpy(&cmd.indirection_table[i * nbytes], 972 + cmd.indirection_table, nbytes); 973 + /* handle cut in the middle pattern for the last places */ 974 + memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, 975 + ARRAY_SIZE(cmd.indirection_table) % nbytes); 976 + 977 + memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); 978 + 979 + mutex_lock(&mvm->mutex); 980 + ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 981 + mutex_unlock(&mvm->mutex); 982 + 983 + return ret ?: count; 984 + } 985 + 947 986 static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, 948 987 char __user *user_buf, 949 988 size_t count, loff_t *ppos) ··· 1026 983 trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) 1027 984 return -EOPNOTSUPP; 1028 985 1029 - ret = kstrtouint(buf, 0, &rec_mode); 986 + ret = kstrtoint(buf, 0, &rec_mode); 1030 987 if (ret) 1031 988 return ret; 1032 989 ··· 1497 1454 MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); 1498 1455 MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); 1499 1456 MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); 1457 + MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16); 1500 1458 1501 1459 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1502 1460 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); ··· 1542 1498 MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); 1543 1499 MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); 1544 1500 MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); 1501 + MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); 1545 1502 if (!debugfs_create_bool("enable_scan_iteration_notif", 1546 1503 S_IRUSR | S_IWUSR, 1547 1504 mvm->debugfs_dir, 1548 1505 &mvm->scan_iter_notif_enabled)) 1506 + goto err; 1507 + if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR, 1508 + mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) 1549 1509 goto err; 1550 1510 1551 1511 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+1
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
··· 251 251 ENABLE_L3_FILTERING = BIT(1), 252 252 ENABLE_NBNS_FILTERING = BIT(2), 253 253 ENABLE_DHCP_FILTERING = BIT(3), 254 + ENABLE_STORE_BEACON = BIT(4), 254 255 }; 255 256 256 257 struct iwl_wowlan_config_cmd {
+34 -8
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 - * Copyright(c) 2015 Intel Deutschland GmbH 10 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify 13 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 34 * 35 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 - * Copyright(c) 2015 Intel Deutschland GmbH 37 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 38 38 * All rights reserved. 39 39 * 40 40 * Redistribution and use in source and binary forms, with or without ··· 287 287 IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4), 288 288 IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), 289 289 IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), 290 - /* TODO - verify this is the correct value */ 291 290 IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), 292 291 IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, 293 292 IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, 294 293 IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, 295 294 IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, 296 295 IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, 297 - /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */ 298 296 IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, 299 - /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */ 300 297 IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, 301 298 IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), 302 299 IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), ··· 347 350 /* DW8 */ 348 351 __le32 filter_match; 349 352 /* DW9 */ 350 - __le32 gp2_on_air_rise; 351 - /* DW10 */ 352 353 __le32 rate_n_flags; 354 + /* DW10 */ 355 + u8 energy_a, energy_b, channel, reserved; 353 356 /* DW11 */ 354 - u8 energy_a, energy_b, energy_c, channel; 357 + __le32 gp2_on_air_rise; 355 358 /* DW12 & DW13 */ 356 359 __le64 tsf_on_air_rise; 357 360 } __packed; ··· 361 364 u8 reserved; 362 365 __le16 nssn; 363 366 }; 367 + 368 + enum iwl_rss_hash_func_en { 369 + IWL_RSS_HASH_TYPE_IPV4_TCP, 370 + IWL_RSS_HASH_TYPE_IPV4_UDP, 371 + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, 372 + IWL_RSS_HASH_TYPE_IPV6_TCP, 373 + IWL_RSS_HASH_TYPE_IPV6_UDP, 374 + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 375 + }; 376 + 377 + #define IWL_RSS_HASH_KEY_CNT 10 378 + #define IWL_RSS_INDIRECTION_TABLE_SIZE 128 379 + #define IWL_RSS_ENABLE 1 380 + 381 + /** 382 + * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration 383 + * 384 + * @flags: 1 - enable, 0 - disable 385 + * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en 386 + * @secret_key: 320 bit input of random key configuration from driver 387 + * @indirection_table: indirection table 388 + */ 389 + struct iwl_rss_config_cmd { 390 + __le32 flags; 391 + u8 hash_mask; 392 + u8 reserved[3]; 393 + __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; 394 + u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; 395 + } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ 364 396 365 397 #endif /* __fw_api_rx_h__ */
+69 -2
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 33 * 35 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 36 + * Copyright(c) 2016 Intel Deutschland GmbH 37 37 * All rights reserved. 38 38 * 39 39 * Redistribution and use in source and binary forms, with or without ··· 255 253 __le64 hw_tkip_mic_tx_key; 256 254 } __packed; 257 255 256 + #define IWL_ADD_STA_STATUS_MASK 0xFF 257 + #define IWL_ADD_STA_BAID_MASK 0xFF00 258 + 258 259 /** 259 - * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. 260 + * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. 260 261 * ( REPLY_ADD_STA = 0x18 ) 261 262 * @add_modify: 1: modify existing, 0: add new station 262 263 * @awake_acs: ··· 295 290 * ADD_STA sets up the table entry for one station, either creating a new 296 291 * entry, or modifying a pre-existing one. 297 292 */ 298 - struct iwl_mvm_add_sta_cmd { 293 + struct iwl_mvm_add_sta_cmd_v7 { 299 294 u8 add_modify; 300 295 u8 awake_acs; 301 296 __le16 tid_disable_tx; ··· 316 311 __le16 beamform_flags; 317 312 __le32 tfd_queue_msk; 318 313 } __packed; /* ADD_STA_CMD_API_S_VER_7 */ 314 + 315 + /** 316 + * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. 317 + * ( REPLY_ADD_STA = 0x18 ) 318 + * @add_modify: 1: modify existing, 0: add new station 319 + * @awake_acs: 320 + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable 321 + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. 322 + * @mac_id_n_color: the Mac context this station belongs to 323 + * @addr[ETH_ALEN]: station's MAC address 324 + * @sta_id: index of station in uCode's station table 325 + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave 326 + * alone. 1 - modify, 0 - don't change. 327 + * @station_flags: look at %iwl_sta_flags 328 + * @station_flags_msk: what of %station_flags have changed 329 + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) 330 + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set 331 + * add_immediate_ba_ssn. 332 + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) 333 + * Set %STA_MODIFY_REMOVE_BA_TID to use this field 334 + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with 335 + * add_immediate_ba_tid. 336 + * @sleep_tx_count: number of packets to transmit to station even though it is 337 + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode 338 + * keeps track of STA sleep state. 339 + * @sleep_state_flags: Look at %iwl_sta_sleep_flag. 340 + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP 341 + * mac-addr. 342 + * @beamform_flags: beam forming controls 343 + * @tfd_queue_msk: tfd queues used by this station 344 + * @rx_ba_window: aggregation window size 345 + * 346 + * The device contains an internal table of per-station information, with info 347 + * on security keys, aggregation parameters, and Tx rates for initial Tx 348 + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). 349 + * 350 + * ADD_STA sets up the table entry for one station, either creating a new 351 + * entry, or modifying a pre-existing one. 352 + */ 353 + struct iwl_mvm_add_sta_cmd { 354 + u8 add_modify; 355 + u8 awake_acs; 356 + __le16 tid_disable_tx; 357 + __le32 mac_id_n_color; 358 + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ 359 + __le16 reserved2; 360 + u8 sta_id; 361 + u8 modify_mask; 362 + __le16 reserved3; 363 + __le32 station_flags; 364 + __le32 station_flags_msk; 365 + u8 add_immediate_ba_tid; 366 + u8 remove_immediate_ba_tid; 367 + __le16 add_immediate_ba_ssn; 368 + __le16 sleep_tx_count; 369 + __le16 sleep_state_flags; 370 + __le16 assoc_id; 371 + __le16 beamform_flags; 372 + __le32 tfd_queue_msk; 373 + __le16 rx_ba_window; 374 + __le16 reserved; 375 + } __packed; /* ADD_STA_CMD_API_S_VER_8 */ 319 376 320 377 /** 321 378 * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+31
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
··· 213 213 214 214 MFUART_LOAD_NOTIFICATION = 0xb1, 215 215 216 + RSS_CONFIG_CMD = 0xb3, 217 + 216 218 REPLY_RX_PHY_CMD = 0xc0, 217 219 REPLY_RX_MPDU_CMD = 0xc1, 218 220 FRAME_RELEASE = 0xc3, ··· 282 280 DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, 283 281 }; 284 282 283 + enum iwl_prot_offload_subcmd_ids { 284 + STORED_BEACON_NTF = 0xFF, 285 + }; 286 + 285 287 /* command groups */ 286 288 enum { 287 289 LEGACY_GROUP = 0x0, 288 290 LONG_GROUP = 0x1, 289 291 PHY_OPS_GROUP = 0x4, 292 + PROT_OFFLOAD_GROUP = 0xb, 290 293 }; 291 294 292 295 /** ··· 1857 1850 __le32 page_buff_addr; 1858 1851 __le32 page_buff_size; 1859 1852 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ 1853 + 1854 + #define MAX_STORED_BEACON_SIZE 600 1855 + 1856 + /** 1857 + * Stored beacon notification 1858 + * 1859 + * @system_time: system time on air rise 1860 + * @tsf: TSF on air rise 1861 + * @beacon_timestamp: beacon on air rise 1862 + * @phy_flags: general phy flags: band, modulation, etc. 1863 + * @channel: channel this beacon was received on 1864 + * @rates: rate in ucode internal format 1865 + * @byte_count: frame's byte count 1866 + */ 1867 + struct iwl_stored_beacon_notif { 1868 + __le32 system_time; 1869 + __le64 tsf; 1870 + __le32 beacon_timestamp; 1871 + __le16 phy_flags; 1872 + __le16 channel; 1873 + __le32 rates; 1874 + __le32 byte_count; 1875 + u8 data[MAX_STORED_BEACON_SIZE]; 1876 + } __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ 1860 1877 1861 1878 #endif /* __fw_api_h__ */
+4 -2
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
··· 435 435 bool monitor_dump_only = false; 436 436 int i; 437 437 438 + if (!IWL_MVM_COLLECT_FW_ERR_DUMP && 439 + !mvm->trans->dbg_dest_tlv) 440 + return; 441 + 438 442 lockdep_assert_held(&mvm->mutex); 439 443 440 444 /* there's no point in fw dump if the bus is dead */ ··· 644 640 645 641 /* Dump fw's virtual image */ 646 642 if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { 647 - u32 i; 648 - 649 643 for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { 650 644 struct iwl_fw_error_dump_paging *paging; 651 645 struct page *pages =
+29
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 106 105 IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant); 107 106 return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0, 108 107 sizeof(tx_ant_cmd), &tx_ant_cmd); 108 + } 109 + 110 + static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) 111 + { 112 + int i; 113 + struct iwl_rss_config_cmd cmd = { 114 + .flags = cpu_to_le32(IWL_RSS_ENABLE), 115 + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | 116 + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | 117 + IWL_RSS_HASH_TYPE_IPV6_TCP | 118 + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, 119 + }; 120 + 121 + for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) 122 + cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; 123 + memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); 124 + 125 + return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); 109 126 } 110 127 111 128 static void iwl_free_fw_paging(struct iwl_mvm *mvm) ··· 912 893 ret = iwl_send_phy_cfg_cmd(mvm); 913 894 if (ret) 914 895 goto error; 896 + 897 + /* Init RSS configuration */ 898 + if (iwl_mvm_has_new_rx_api(mvm)) { 899 + ret = iwl_send_rss_cfg_cmd(mvm); 900 + if (ret) { 901 + IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", 902 + ret); 903 + goto error; 904 + } 905 + } 915 906 916 907 /* init the fw <-> mac80211 STA mapping */ 917 908 for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
+40 -3
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 - * Copyright(c) 2015 Intel Deutschland GmbH 10 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify 13 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 34 * 35 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 37 - * Copyright(c) 2015 Intel Deutschland GmbH 37 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 38 38 * All rights reserved. 39 39 * 40 40 * Redistribution and use in source and binary forms, with or without ··· 744 744 * wake-ups. 745 745 */ 746 746 cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); 747 - if (mvmvif->ap_assoc_sta_count) { 747 + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { 748 748 cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); 749 749 IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); 750 750 } else { ··· 1461 1461 IEEE80211_IFACE_ITER_NORMAL, 1462 1462 iwl_mvm_beacon_loss_iterator, 1463 1463 mb); 1464 + } 1465 + 1466 + void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, 1467 + struct iwl_rx_cmd_buffer *rxb) 1468 + { 1469 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 1470 + struct iwl_stored_beacon_notif *sb = (void *)pkt->data; 1471 + struct ieee80211_rx_status rx_status; 1472 + struct sk_buff *skb; 1473 + u32 size = le32_to_cpu(sb->byte_count); 1474 + 1475 + if (size == 0) 1476 + return; 1477 + 1478 + skb = alloc_skb(size, GFP_ATOMIC); 1479 + if (!skb) { 1480 + IWL_ERR(mvm, "alloc_skb failed\n"); 1481 + return; 1482 + } 1483 + 1484 + /* update rx_status according to the notification's metadata */ 1485 + memset(&rx_status, 0, sizeof(rx_status)); 1486 + rx_status.mactime = le64_to_cpu(sb->tsf); 1487 + rx_status.device_timestamp = le32_to_cpu(sb->system_time); 1488 + rx_status.band = 1489 + (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? 1490 + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 1491 + rx_status.freq = 1492 + ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), 1493 + rx_status.band); 1494 + 1495 + /* copy the data */ 1496 + memcpy(skb_put(skb, size), sb->data, size); 1497 + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 1498 + 1499 + /* pass it as regular rx to mac80211 */ 1500 + ieee80211_rx_napi(mvm->hw, skb, NULL); 1464 1501 }
+2 -2
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
··· 884 884 ret = -EINVAL; 885 885 break; 886 886 } 887 - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); 887 + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size); 888 888 break; 889 889 case IEEE80211_AMPDU_RX_STOP: 890 - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 890 + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size); 891 891 break; 892 892 case IEEE80211_AMPDU_TX_START: 893 893 if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+28 -8
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
··· 346 346 * @pm_enabled - Indicate if MAC power management is allowed 347 347 * @monitor_active: indicates that monitor context is configured, and that the 348 348 * interface should get quota etc. 349 - * @low_latency: indicates that this interface is in low-latency mode 350 - * (VMACLowLatencyMode) 349 + * @low_latency_traffic: indicates low latency traffic was detected 350 + * @low_latency_dbgfs: low latency mode set from debugfs 351 + * @low_latency_vcmd: low latency mode set from vendor command 351 352 * @ps_disabled: indicates that this interface requires PS to be disabled 352 353 * @queue_params: QoS params for this MAC 353 354 * @bcast_sta: station used for broadcast packets. Used by the following ··· 376 375 bool ap_ibss_active; 377 376 bool pm_enabled; 378 377 bool monitor_active; 379 - bool low_latency; 378 + bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd; 380 379 bool ps_disabled; 381 380 struct iwl_mvm_vif_bf_data bf_data; 382 381 ··· 433 432 struct iwl_dbgfs_pm dbgfs_pm; 434 433 struct iwl_dbgfs_bf dbgfs_bf; 435 434 struct iwl_mac_power_cmd mac_pwr_cmd; 435 + int dbgfs_quota_min; 436 436 #endif 437 437 438 438 enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; ··· 647 645 atomic_t pending_frames[IWL_MVM_STATION_COUNT]; 648 646 u32 tfd_drained[IWL_MVM_STATION_COUNT]; 649 647 u8 rx_ba_sessions; 648 + u32 secret_key[IWL_RSS_HASH_KEY_CNT]; 650 649 651 650 /* configured by mac80211 */ 652 651 u32 rts_threshold; ··· 859 856 860 857 u32 ciphers[6]; 861 858 struct iwl_mvm_tof_data tof_data; 859 + 860 + /* 861 + * Drop beacons from other APs in AP mode when there are no connected 862 + * clients. 863 + */ 864 + bool drop_bcn_ap_mode; 862 865 }; 863 866 864 867 /* Extract MVM priv from op_mode and _hw */ ··· 1014 1005 IWL_MVM_BT_COEX_MPLUT; 1015 1006 } 1016 1007 1008 + static inline 1009 + bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) 1010 + { 1011 + return fw_has_capa(&mvm->fw->ucode_capa, 1012 + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && 1013 + IWL_MVM_P2P_UAPSD_STANDALONE; 1014 + } 1015 + 1017 1016 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) 1018 1017 { 1019 - /* firmware flag isn't defined yet */ 1020 - return false; 1018 + return fw_has_capa(&mvm->fw->ucode_capa, 1019 + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); 1021 1020 } 1022 1021 1023 1022 extern const u8 iwl_mvm_ac_to_tx_fifo[]; ··· 1201 1184 struct iwl_rx_cmd_buffer *rxb); 1202 1185 void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, 1203 1186 struct iwl_rx_cmd_buffer *rxb); 1187 + void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, 1188 + struct iwl_rx_cmd_buffer *rxb); 1204 1189 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, 1205 1190 struct ieee80211_vif *vif); 1206 1191 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, ··· 1436 1417 * binding, so this has no real impact. For now, just return 1437 1418 * the current desired low-latency state. 1438 1419 */ 1439 - 1440 - return mvmvif->low_latency; 1420 + return mvmvif->low_latency_dbgfs || 1421 + mvmvif->low_latency_traffic || 1422 + mvmvif->low_latency_vcmd; 1441 1423 } 1442 1424 1443 1425 /* hw scheduler queue config */ ··· 1501 1481 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); 1502 1482 void iwl_mvm_tt_exit(struct iwl_mvm *mvm); 1503 1483 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); 1504 - int iwl_mvm_get_temp(struct iwl_mvm *mvm); 1484 + int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); 1505 1485 1506 1486 /* Location Aware Regulatory */ 1507 1487 struct iwl_mcc_update_resp *
+5 -3
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
··· 300 300 struct iwl_nvm_section *sections = mvm->nvm_sections; 301 301 const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; 302 302 bool lar_enabled; 303 - u32 mac_addr0, mac_addr1; 303 + __le32 mac_addr0, mac_addr1; 304 304 305 305 /* Checking for required sections */ 306 306 if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { ··· 337 337 return NULL; 338 338 339 339 /* read the mac address from WFMP registers */ 340 - mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); 341 - mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); 340 + mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, 341 + WFMP_MAC_ADDR_0)); 342 + mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, 343 + WFMP_MAC_ADDR_1)); 342 344 343 345 hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; 344 346 sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+24 -4
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
··· 33 33 * 34 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 35 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 + * Copyright(c) 2016 Intel Deutschland GmbH 36 37 * All rights reserved. 37 38 * 38 39 * Redistribution and use in source and binary forms, with or without ··· 268 267 true), 269 268 RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), 270 269 RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), 270 + RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, 271 + iwl_mvm_rx_stored_beacon_notif, false), 271 272 272 273 }; 273 274 #undef RX_HANDLER ··· 347 344 HCMD_NAME(MAC_PM_POWER_TABLE), 348 345 HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), 349 346 HCMD_NAME(MFUART_LOAD_NOTIFICATION), 347 + HCMD_NAME(RSS_CONFIG_CMD), 350 348 HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), 351 349 HCMD_NAME(REPLY_RX_PHY_CMD), 352 350 HCMD_NAME(REPLY_RX_MPDU_CMD), ··· 390 386 HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), 391 387 }; 392 388 389 + /* Please keep this array *SORTED* by hex value. 390 + * Access is done through binary search 391 + */ 392 + static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { 393 + HCMD_NAME(STORED_BEACON_NTF), 394 + }; 395 + 393 396 static const struct iwl_hcmd_arr iwl_mvm_groups[] = { 394 397 [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 395 398 [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), 396 399 [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), 400 + [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), 397 401 }; 398 - 399 402 400 403 /* this forward declaration can avoid to export the function */ 401 404 static void iwl_mvm_async_handlers_wk(struct work_struct *wk); ··· 492 481 } 493 482 mvm->sf_state = SF_UNINIT; 494 483 mvm->cur_ucode = IWL_UCODE_INIT; 484 + mvm->drop_bcn_ap_mode = true; 495 485 496 486 mutex_init(&mvm->mutex); 497 487 mutex_init(&mvm->d0i3_suspend_mutex); ··· 652 640 iwl_trans_unref(mvm->trans); 653 641 654 642 iwl_mvm_tof_init(mvm); 643 + 644 + /* init RSS hash key */ 645 + get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key)); 655 646 656 647 return op_mode; 657 648 ··· 1211 1196 cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; 1212 1197 cmd->offloading_tid = iter_data->offloading_tid; 1213 1198 cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | 1214 - ENABLE_DHCP_FILTERING; 1199 + ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; 1215 1200 /* 1216 1201 * The d0i3 uCode takes care of the nonqos counters, 1217 1202 * so configure only the qos seq ones. ··· 1232 1217 struct iwl_wowlan_config_cmd wowlan_config_cmd = { 1233 1218 .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | 1234 1219 IWL_WOWLAN_WAKEUP_BEACON_MISS | 1235 - IWL_WOWLAN_WAKEUP_LINK_CHANGE | 1236 - IWL_WOWLAN_WAKEUP_BCN_FILTERING), 1220 + IWL_WOWLAN_WAKEUP_LINK_CHANGE), 1237 1221 }; 1238 1222 struct iwl_d3_manager_config d3_cfg_cmd = { 1239 1223 .min_sleep_time = cpu_to_le32(1000), ··· 1282 1268 1283 1269 /* configure wowlan configuration only if needed */ 1284 1270 if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { 1271 + /* wake on beacons only if beacon storing isn't supported */ 1272 + if (!fw_has_capa(&mvm->fw->ucode_capa, 1273 + IWL_UCODE_TLV_CAPA_BEACON_STORING)) 1274 + wowlan_config_cmd.wakeup_filter |= 1275 + cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); 1276 + 1285 1277 iwl_mvm_wowlan_config_key_params(mvm, 1286 1278 d0i3_iter_data.connected_vif, 1287 1279 true, flags);
+83 -48
drivers/net/wireless/intel/iwlwifi/mvm/power.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 - * Copyright(c) 2015 Intel Deutschland GmbH 10 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 11 11 * 12 12 * This program is free software; you can redistribute it and/or modify 13 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 34 * 35 35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 36 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 37 - * Copyright(c) 2015 Intel Deutschland GmbH 37 + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH 38 38 * All rights reserved. 39 39 * 40 40 * Redistribution and use in source and binary forms, with or without ··· 259 259 IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; 260 260 } 261 261 262 + static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, 263 + struct ieee80211_vif *vif) 264 + { 265 + bool *is_p2p_standalone = _data; 266 + 267 + switch (ieee80211_vif_type_p2p(vif)) { 268 + case NL80211_IFTYPE_P2P_GO: 269 + case NL80211_IFTYPE_AP: 270 + *is_p2p_standalone = false; 271 + break; 272 + case NL80211_IFTYPE_STATION: 273 + if (vif->bss_conf.assoc) 274 + *is_p2p_standalone = false; 275 + break; 276 + 277 + default: 278 + break; 279 + } 280 + } 281 + 262 282 static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, 263 283 struct ieee80211_vif *vif) 264 284 { ··· 288 268 ETH_ALEN)) 289 269 return false; 290 270 291 - if (vif->p2p && 292 - !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD)) 293 - return false; 294 271 /* 295 272 * Avoid using uAPSD if P2P client is associated to GO that uses 296 273 * opportunistic power save. This is due to current FW limitation. ··· 303 286 */ 304 287 if (iwl_mvm_phy_ctx_count(mvm) >= 2) 305 288 return false; 289 + 290 + if (vif->p2p) { 291 + /* Allow U-APSD only if p2p is stand alone */ 292 + bool is_p2p_standalone = true; 293 + 294 + if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) 295 + return false; 296 + 297 + ieee80211_iterate_active_interfaces_atomic(mvm->hw, 298 + IEEE80211_IFACE_ITER_NORMAL, 299 + iwl_mvm_p2p_standalone_iterator, 300 + &is_p2p_standalone); 301 + 302 + if (!is_p2p_standalone) 303 + return false; 304 + } 306 305 307 306 return true; 308 307 } ··· 577 544 578 545 struct iwl_power_vifs { 579 546 struct iwl_mvm *mvm; 580 - struct ieee80211_vif *bf_vif; 581 547 struct ieee80211_vif *bss_vif; 582 548 struct ieee80211_vif *p2p_vif; 583 549 struct ieee80211_vif *ap_vif; ··· 649 617 if (mvmvif->phy_ctxt) 650 618 if (mvmvif->phy_ctxt->id < MAX_PHYS) 651 619 power_iterator->bss_active = true; 652 - 653 - if (mvmvif->bf_data.bf_enabled && 654 - !WARN_ON(power_iterator->bf_vif)) 655 - power_iterator->bf_vif = vif; 656 - 657 620 break; 658 621 659 622 default: ··· 877 850 return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); 878 851 } 879 852 880 - static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, 881 - struct ieee80211_vif *vif, 882 - bool enable) 883 - { 884 - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 885 - struct iwl_beacon_filter_cmd cmd = { 886 - IWL_BF_CMD_CONFIG_DEFAULTS, 887 - .bf_enable_beacon_filter = cpu_to_le32(1), 888 - }; 889 - 890 - if (!mvmvif->bf_data.bf_enabled) 891 - return 0; 892 - 893 - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) 894 - cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); 895 - 896 - mvmvif->bf_data.ba_enabled = enable; 897 - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); 898 - } 899 - 900 - int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 901 - struct ieee80211_vif *vif, 902 - u32 flags) 853 + static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 854 + struct ieee80211_vif *vif, 855 + u32 flags, bool d0i3) 903 856 { 904 857 struct iwl_beacon_filter_cmd cmd = {}; 905 858 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); ··· 890 883 891 884 ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); 892 885 893 - if (!ret) 886 + /* don't change bf_enabled in case of temporary d0i3 configuration */ 887 + if (!ret && !d0i3) 894 888 mvmvif->bf_data.bf_enabled = false; 895 889 896 890 return ret; 891 + } 892 + 893 + int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, 894 + struct ieee80211_vif *vif, 895 + u32 flags) 896 + { 897 + return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false); 897 898 } 898 899 899 900 static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) ··· 933 918 } 934 919 935 920 static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, 936 - struct iwl_power_vifs *vifs) 921 + struct ieee80211_vif *vif) 937 922 { 938 - struct iwl_mvm_vif *mvmvif; 939 - bool ba_enable; 923 + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 924 + struct iwl_beacon_filter_cmd cmd = { 925 + IWL_BF_CMD_CONFIG_DEFAULTS, 926 + .bf_enable_beacon_filter = cpu_to_le32(1), 927 + }; 940 928 941 - if (!vifs->bf_vif) 929 + if (!mvmvif->bf_data.bf_enabled) 942 930 return 0; 943 931 944 - mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); 932 + if (mvm->cur_ucode == IWL_UCODE_WOWLAN) 933 + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); 945 934 946 - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || 947 - !vifs->bf_vif->bss_conf.ps || 948 - iwl_mvm_vif_low_latency(mvmvif)); 935 + mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || 936 + mvm->ps_disabled || 937 + !vif->bss_conf.ps || 938 + iwl_mvm_vif_low_latency(mvmvif)); 949 939 950 - return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); 940 + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); 951 941 } 952 942 953 943 int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) ··· 973 953 if (ret) 974 954 return ret; 975 955 976 - return iwl_mvm_power_set_ba(mvm, &vifs); 956 + if (vifs.bss_vif) 957 + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); 958 + 959 + return 0; 977 960 } 978 961 979 962 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) ··· 1011 988 return ret; 1012 989 } 1013 990 1014 - return iwl_mvm_power_set_ba(mvm, &vifs); 991 + if (vifs.bss_vif) 992 + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); 993 + 994 + return 0; 1015 995 } 1016 996 1017 997 int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, ··· 1051 1025 IWL_BF_CMD_CONFIG_D0I3, 1052 1026 .bf_enable_beacon_filter = cpu_to_le32(1), 1053 1027 }; 1054 - ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, 1055 - flags, true); 1028 + /* 1029 + * When beacon storing is supported - disable beacon filtering 1030 + * altogether - the latest beacon will be sent when exiting d0i3 1031 + */ 1032 + if (fw_has_capa(&mvm->fw->ucode_capa, 1033 + IWL_UCODE_TLV_CAPA_BEACON_STORING)) 1034 + ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, 1035 + true); 1036 + else 1037 + ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, 1038 + flags, true); 1056 1039 } else { 1057 1040 if (mvmvif->bf_data.bf_enabled) 1058 1041 ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+16
drivers/net/wireless/intel/iwlwifi/mvm/quota.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 33 * 35 34 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 35 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 36 + * Copyright(c) 2016 Intel Deutschland GmbH 37 37 * All rights reserved. 38 38 * 39 39 * Redistribution and use in source and binary forms, with or without ··· 76 74 int n_interfaces[MAX_BINDINGS]; 77 75 int colors[MAX_BINDINGS]; 78 76 int low_latency[MAX_BINDINGS]; 77 + #ifdef CONFIG_IWLWIFI_DEBUGFS 78 + int dbgfs_min[MAX_BINDINGS]; 79 + #endif 79 80 int n_low_latency_bindings; 80 81 struct ieee80211_vif *disabled_vif; 81 82 }; ··· 133 128 WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color); 134 129 135 130 data->n_interfaces[id]++; 131 + 132 + #ifdef CONFIG_IWLWIFI_DEBUGFS 133 + if (mvmvif->dbgfs_quota_min) 134 + data->dbgfs_min[id] = max(data->dbgfs_min[id], 135 + mvmvif->dbgfs_quota_min); 136 + #endif 136 137 137 138 if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { 138 139 data->n_low_latency_bindings++; ··· 270 259 271 260 if (data.n_interfaces[i] <= 0) 272 261 cmd.quotas[idx].quota = cpu_to_le32(0); 262 + #ifdef CONFIG_IWLWIFI_DEBUGFS 263 + else if (data.dbgfs_min[i]) 264 + cmd.quotas[idx].quota = 265 + cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100); 266 + #endif 273 267 else if (data.n_low_latency_bindings == 1 && n_non_lowlat && 274 268 data.low_latency[i]) 275 269 /*
+2 -1
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
··· 2062 2062 } 2063 2063 2064 2064 /* try decreasing first if applicable */ 2065 - if (weak != TPC_INVALID) { 2065 + if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && 2066 + weak != TPC_INVALID) { 2066 2067 if (weak_tpt == IWL_INVALID_VALUE && 2067 2068 (strong_tpt == IWL_INVALID_VALUE || 2068 2069 current_tpt >= strong_tpt)) {
+5 -8
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
··· 201 201 struct iwl_rx_mpdu_desc *desc, 202 202 struct ieee80211_rx_status *rx_status) 203 203 { 204 - int energy_a, energy_b, energy_c, max_energy; 204 + int energy_a, energy_b, max_energy; 205 205 206 206 energy_a = desc->energy_a; 207 207 energy_a = energy_a ? -energy_a : S8_MIN; 208 208 energy_b = desc->energy_b; 209 209 energy_b = energy_b ? -energy_b : S8_MIN; 210 - energy_c = desc->energy_c; 211 - energy_c = energy_c ? -energy_c : S8_MIN; 212 210 max_energy = max(energy_a, energy_b); 213 - max_energy = max(max_energy, energy_c); 214 211 215 - IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", 216 - energy_a, energy_b, energy_c, max_energy); 212 + IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", 213 + energy_a, energy_b, max_energy); 217 214 218 215 rx_status->signal = max_energy; 219 216 rx_status->chains = 0; /* TODO: phy info */ 220 217 rx_status->chain_signal[0] = energy_a; 221 218 rx_status->chain_signal[1] = energy_b; 222 - rx_status->chain_signal[2] = energy_c; 219 + rx_status->chain_signal[2] = S8_MIN; 223 220 } 224 221 225 222 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, ··· 291 294 struct ieee80211_rx_status *rx_status; 292 295 struct iwl_rx_packet *pkt = rxb_addr(rxb); 293 296 struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; 294 - struct ieee80211_hdr *hdr = (void *)(desc + 1); 297 + struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); 295 298 u32 len = le16_to_cpu(desc->mpdu_len); 296 299 u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); 297 300 struct ieee80211_sta *sta = NULL;
+6 -3
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
··· 930 930 if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) 931 931 return -ENOBUFS; 932 932 933 - if (type == mvm->scan_type) 933 + if (type == mvm->scan_type) { 934 + IWL_DEBUG_SCAN(mvm, 935 + "Ignoring UMAC scan config of the same type\n"); 934 936 return 0; 937 + } 935 938 936 939 cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; 937 940 ··· 1112 1109 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params, 1113 1110 vif)); 1114 1111 1115 - if (type == IWL_MVM_SCAN_SCHED) 1112 + if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) 1116 1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); 1117 1114 1118 1115 if (iwl_mvm_scan_use_ebs(mvm, vif)) ··· 1354 1351 1355 1352 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1356 1353 hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); 1357 - ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED); 1354 + ret = iwl_mvm_scan_umac(mvm, vif, &params, type); 1358 1355 } else { 1359 1356 hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; 1360 1357 ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+36 -14
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
··· 7 7 * 8 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 34 33 * 35 34 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 36 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 36 + * Copyright(c) 2016 Intel Deutschland GmbH 37 37 * All rights reserved. 38 38 * 39 39 * Redistribution and use in source and binary forms, with or without ··· 69 67 #include "mvm.h" 70 68 #include "sta.h" 71 69 #include "rs.h" 70 + 71 + /* 72 + * New version of ADD_STA_sta command added new fields at the end of the 73 + * structure, so sending the size of the relevant API's structure is enough to 74 + * support both API versions. 75 + */ 76 + static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 77 + { 78 + return iwl_mvm_has_new_rx_api(mvm) ? 79 + sizeof(struct iwl_mvm_add_sta_cmd) : 80 + sizeof(struct iwl_mvm_add_sta_cmd_v7); 81 + } 72 82 73 83 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 74 84 enum nl80211_iftype iftype) ··· 201 187 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 202 188 203 189 status = ADD_STA_SUCCESS; 204 - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), 190 + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 191 + iwl_mvm_add_sta_cmd_size(mvm), 205 192 &add_sta_cmd, &status); 206 193 if (ret) 207 194 return ret; 208 195 209 - switch (status) { 196 + switch (status & IWL_ADD_STA_STATUS_MASK) { 210 197 case ADD_STA_SUCCESS: 211 198 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 212 199 break; ··· 372 357 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 373 358 374 359 status = ADD_STA_SUCCESS; 375 - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 360 + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 361 + iwl_mvm_add_sta_cmd_size(mvm), 376 362 &cmd, &status); 377 363 if (ret) 378 364 return ret; 379 365 380 - switch (status) { 366 + switch (status & IWL_ADD_STA_STATUS_MASK) { 381 367 case ADD_STA_SUCCESS: 382 368 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 383 369 mvmsta->sta_id); ··· 639 623 if (addr) 640 624 memcpy(cmd.addr, addr, ETH_ALEN); 641 625 642 - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 626 + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 627 + iwl_mvm_add_sta_cmd_size(mvm), 643 628 &cmd, &status); 644 629 if (ret) 645 630 return ret; 646 631 647 - switch (status) { 632 + switch (status & IWL_ADD_STA_STATUS_MASK) { 648 633 case ADD_STA_SUCCESS: 649 634 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 650 635 return 0; ··· 836 819 #define IWL_MAX_RX_BA_SESSIONS 16 837 820 838 821 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 839 - int tid, u16 ssn, bool start) 822 + int tid, u16 ssn, bool start, u8 buf_size) 840 823 { 841 824 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 842 825 struct iwl_mvm_add_sta_cmd cmd = {}; ··· 856 839 if (start) { 857 840 cmd.add_immediate_ba_tid = (u8) tid; 858 841 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 842 + cmd.rx_ba_window = cpu_to_le16((u16)buf_size); 859 843 } else { 860 844 cmd.remove_immediate_ba_tid = (u8) tid; 861 845 } ··· 864 846 STA_MODIFY_REMOVE_BA_TID; 865 847 866 848 status = ADD_STA_SUCCESS; 867 - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 849 + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 850 + iwl_mvm_add_sta_cmd_size(mvm), 868 851 &cmd, &status); 869 852 if (ret) 870 853 return ret; 871 854 872 - switch (status) { 855 + switch (status & IWL_ADD_STA_STATUS_MASK) { 873 856 case ADD_STA_SUCCESS: 874 857 IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", 875 858 start ? "start" : "stopp"); ··· 923 904 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 924 905 925 906 status = ADD_STA_SUCCESS; 926 - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), 907 + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 908 + iwl_mvm_add_sta_cmd_size(mvm), 927 909 &cmd, &status); 928 910 if (ret) 929 911 return ret; 930 912 931 - switch (status) { 913 + switch (status & IWL_ADD_STA_STATUS_MASK) { 932 914 case ADD_STA_SUCCESS: 933 915 break; 934 916 default: ··· 1660 1640 }; 1661 1641 int ret; 1662 1642 1663 - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1643 + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1644 + iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1664 1645 if (ret) 1665 1646 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1666 1647 } ··· 1752 1731 1753 1732 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 1754 1733 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 1755 - sizeof(cmd), &cmd); 1734 + iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1756 1735 if (ret) 1757 1736 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1758 1737 } ··· 1787 1766 }; 1788 1767 int ret; 1789 1768 1790 - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); 1769 + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1770 + iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1791 1771 if (ret) 1792 1772 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 1793 1773 }
+1 -1
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
··· 401 401 402 402 /* AMPDU */ 403 403 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 404 - int tid, u16 ssn, bool start); 404 + int tid, u16 ssn, bool start, u8 buf_size); 405 405 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 406 406 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 407 407 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+8 -9
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
··· 194 194 return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); 195 195 } 196 196 197 - int iwl_mvm_get_temp(struct iwl_mvm *mvm) 197 + int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) 198 198 { 199 199 struct iwl_notification_wait wait_temp_notif; 200 200 static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, 201 201 DTS_MEASUREMENT_NOTIF_WIDE) }; 202 - int ret, temp; 202 + int ret; 203 203 204 204 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) 205 205 temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; ··· 208 208 209 209 iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, 210 210 temp_notif, ARRAY_SIZE(temp_notif), 211 - iwl_mvm_temp_notif_wait, &temp); 211 + iwl_mvm_temp_notif_wait, temp); 212 212 213 213 ret = iwl_mvm_get_temp_cmd(mvm); 214 214 if (ret) { ··· 219 219 220 220 ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, 221 221 IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); 222 - if (ret) { 222 + if (ret) 223 223 IWL_ERR(mvm, "Getting the temperature timed out\n"); 224 - return ret; 225 - } 226 224 227 - return temp; 225 + return ret; 228 226 } 229 227 230 228 static void check_exit_ctkill(struct work_struct *work) ··· 231 233 struct iwl_mvm *mvm; 232 234 u32 duration; 233 235 s32 temp; 236 + int ret; 234 237 235 238 tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); 236 239 mvm = container_of(tt, struct iwl_mvm, thermal_throttle); ··· 249 250 goto reschedule; 250 251 } 251 252 252 - temp = iwl_mvm_get_temp(mvm); 253 + ret = iwl_mvm_get_temp(mvm, &temp); 253 254 254 255 iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); 255 256 256 257 __iwl_mvm_mac_stop(mvm); 257 258 258 - if (temp < 0) 259 + if (ret) 259 260 goto reschedule; 260 261 261 262 IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
+33
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
··· 736 736 iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); 737 737 } 738 738 739 + static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, 740 + u32 status) 741 + { 742 + struct iwl_fw_dbg_trigger_tlv *trig; 743 + struct iwl_fw_dbg_trigger_tx_status *status_trig; 744 + int i; 745 + 746 + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) 747 + return; 748 + 749 + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); 750 + status_trig = (void *)trig->data; 751 + 752 + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) 753 + return; 754 + 755 + for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { 756 + /* don't collect on status 0 */ 757 + if (!status_trig->statuses[i].status) 758 + break; 759 + 760 + if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) 761 + continue; 762 + 763 + iwl_mvm_fw_dbg_collect_trig(mvm, trig, 764 + "Tx status %d was received", 765 + status & TX_STATUS_MSK); 766 + break; 767 + } 768 + } 769 + 739 770 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, 740 771 struct iwl_rx_packet *pkt) 741 772 { ··· 814 783 default: 815 784 break; 816 785 } 786 + 787 + iwl_mvm_tx_status_check_trigger(mvm, status); 817 788 818 789 info->status.rates[0].count = tx_resp->failure_frame + 1; 819 790 iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+2 -4
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
··· 937 937 } 938 938 939 939 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 940 - bool value) 940 + bool prev) 941 941 { 942 942 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 943 943 int res; 944 944 945 945 lockdep_assert_held(&mvm->mutex); 946 946 947 - if (mvmvif->low_latency == value) 947 + if (iwl_mvm_vif_low_latency(mvmvif) == prev) 948 948 return 0; 949 - 950 - mvmvif->low_latency = value; 951 949 952 950 res = iwl_mvm_update_quotas(mvm, false, NULL); 953 951 if (res)
+131 -3
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 7 7 * 8 8 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. 9 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 + * Copyright(c) 2016 Intel Deutschland GmbH 10 11 * 11 12 * This program is free software; you can redistribute it and/or modify 12 13 * it under the terms of version 2 of the GNU General Public License as ··· 67 66 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 68 67 69 68 #include <linux/module.h> 69 + #include <linux/pm_runtime.h> 70 70 #include <linux/pci.h> 71 71 #include <linux/pci-aspm.h> 72 72 #include <linux/acpi.h> ··· 629 627 if (ret) 630 628 goto out_free_drv; 631 629 630 + /* if RTPM is in use, enable it in our device */ 631 + if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { 632 + pm_runtime_set_active(&pdev->dev); 633 + pm_runtime_set_autosuspend_delay(&pdev->dev, 634 + iwlwifi_mod_params.d0i3_entry_delay); 635 + pm_runtime_use_autosuspend(&pdev->dev); 636 + pm_runtime_allow(&pdev->dev); 637 + } 638 + 632 639 return 0; 633 640 634 641 out_free_drv: ··· 704 693 return 0; 705 694 } 706 695 707 - static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); 696 + int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans) 697 + { 698 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 699 + int ret; 700 + 701 + if (test_bit(STATUS_FW_ERROR, &trans->status)) 702 + return 0; 703 + 704 + set_bit(STATUS_TRANS_GOING_IDLE, &trans->status); 705 + 706 + /* config the fw */ 707 + ret = iwl_op_mode_enter_d0i3(trans->op_mode); 708 + if (ret == 1) { 709 + IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n"); 710 + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); 711 + return -EBUSY; 712 + } 713 + if (ret) 714 + goto err; 715 + 716 + ret = wait_event_timeout(trans_pcie->d0i3_waitq, 717 + test_bit(STATUS_TRANS_IDLE, &trans->status), 718 + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 719 + if (!ret) { 720 + IWL_ERR(trans, "Timeout entering D0i3\n"); 721 + ret = -ETIMEDOUT; 722 + goto err; 723 + } 724 + 725 + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); 726 + 727 + return 0; 728 + err: 729 + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); 730 + iwl_trans_fw_error(trans); 731 + return ret; 732 + } 733 + 734 + int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans) 735 + { 736 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 737 + int ret; 738 + 739 + /* sometimes a D0i3 entry is not followed through */ 740 + if (!test_bit(STATUS_TRANS_IDLE, &trans->status)) 741 + return 0; 742 + 743 + /* config the fw */ 744 + ret = iwl_op_mode_exit_d0i3(trans->op_mode); 745 + if (ret) 746 + goto err; 747 + 748 + /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */ 749 + 750 + ret = wait_event_timeout(trans_pcie->d0i3_waitq, 751 + !test_bit(STATUS_TRANS_IDLE, &trans->status), 752 + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); 753 + if (!ret) { 754 + IWL_ERR(trans, "Timeout exiting D0i3\n"); 755 + ret = -ETIMEDOUT; 756 + goto err; 757 + } 758 + 759 + return 0; 760 + err: 761 + clear_bit(STATUS_TRANS_IDLE, &trans->status); 762 + iwl_trans_fw_error(trans); 763 + return ret; 764 + } 765 + 766 + #ifdef CONFIG_IWLWIFI_PCIE_RTPM 767 + static int iwl_pci_runtime_suspend(struct device *device) 768 + { 769 + struct pci_dev *pdev = to_pci_dev(device); 770 + struct iwl_trans *trans = pci_get_drvdata(pdev); 771 + int ret; 772 + 773 + IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); 774 + 775 + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 776 + ret = iwl_pci_fw_enter_d0i3(trans); 777 + if (ret < 0) 778 + return ret; 779 + } 780 + 781 + trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; 782 + 783 + iwl_trans_d3_suspend(trans, false, false); 784 + 785 + return 0; 786 + } 787 + 788 + static int iwl_pci_runtime_resume(struct device *device) 789 + { 790 + struct pci_dev *pdev = to_pci_dev(device); 791 + struct iwl_trans *trans = pci_get_drvdata(pdev); 792 + enum iwl_d3_status d3_status; 793 + 794 + IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); 795 + 796 + iwl_trans_d3_resume(trans, &d3_status, false, false); 797 + 798 + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 799 + return iwl_pci_fw_exit_d0i3(trans); 800 + 801 + return 0; 802 + } 803 + #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 804 + 805 + static const struct dev_pm_ops iwl_dev_pm_ops = { 806 + SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, 807 + iwl_pci_resume) 808 + #ifdef CONFIG_IWLWIFI_PCIE_RTPM 809 + SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, 810 + iwl_pci_runtime_resume, 811 + NULL) 812 + #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 813 + }; 708 814 709 815 #define IWL_PM_OPS (&iwl_dev_pm_ops) 710 816 711 - #else 817 + #else /* CONFIG_PM_SLEEP */ 712 818 713 819 #define IWL_PM_OPS NULL 714 820 715 - #endif 821 + #endif /* CONFIG_PM_SLEEP */ 716 822 717 823 static struct pci_driver iwl_pci_driver = { 718 824 .name = DRV_NAME,
+31 -11
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
··· 2 2 * 3 3 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. 4 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 + * Copyright(c) 2016 Intel Deutschland GmbH 5 6 * 6 7 * Portions of this file are derived from the ipw3945 project, as well 7 8 * as portions of the ieee80211 subsystem header files. ··· 57 56 #define RX_NUM_QUEUES 1 58 57 #define RX_POST_REQ_ALLOC 2 59 58 #define RX_CLAIM_REQ_ALLOC 8 60 - #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) 61 - #define RX_LOW_WATERMARK 8 59 + #define RX_PENDING_WATERMARK 16 62 60 63 61 struct iwl_host_cmd; 64 62 65 63 /*This file includes the declaration that are internal to the 66 64 * trans_pcie layer */ 67 65 66 + /** 67 + * struct iwl_rx_mem_buffer 68 + * @page_dma: bus address of rxb page 69 + * @page: driver's pointer to the rxb page 70 + * @vid: index of this rxb in the global table 71 + */ 68 72 struct iwl_rx_mem_buffer { 69 73 dma_addr_t page_dma; 70 74 struct page *page; 75 + u16 vid; 71 76 struct list_head list; 72 77 }; 73 78 ··· 97 90 98 91 /** 99 92 * struct iwl_rxq - Rx queue 100 - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 93 + * @id: queue index 94 + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). 95 + * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. 101 96 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 97 + * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) 98 + * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) 102 99 * @read: Shared index to newest available Rx buffer 103 100 * @write: Shared index to oldest written Rx packet 104 101 * @free_count: Number of pre-allocated buffers in rx_free ··· 114 103 * @rb_stts: driver's pointer to receive buffer status 115 104 * @rb_stts_dma: bus address of receive buffer status 116 105 * @lock: 117 - * @pool: initial pool of iwl_rx_mem_buffer for the queue 118 - * @queue: actual rx queue 106 + * @queue: actual rx queue. Not used for multi-rx queue. 119 107 * 120 108 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 121 109 */ 122 110 struct iwl_rxq { 123 - __le32 *bd; 111 + int id; 112 + void *bd; 124 113 dma_addr_t bd_dma; 114 + __le32 *used_bd; 115 + dma_addr_t used_bd_dma; 125 116 u32 read; 126 117 u32 write; 127 118 u32 free_count; 128 119 u32 used_count; 129 120 u32 write_actual; 121 + u32 queue_size; 130 122 struct list_head rx_free; 131 123 struct list_head rx_used; 132 124 bool need_update; 133 125 struct iwl_rb_status *rb_stts; 134 126 dma_addr_t rb_stts_dma; 135 127 spinlock_t lock; 136 - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; 128 + struct napi_struct napi; 137 129 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 138 130 }; 139 131 140 132 /** 141 133 * struct iwl_rb_allocator - Rx allocator 142 - * @pool: initial pool of allocator 143 134 * @req_pending: number of requests the allcator had not processed yet 144 135 * @req_ready: number of requests honored and ready for claiming 145 136 * @rbd_allocated: RBDs with pages allocated and ready to be handled to ··· 153 140 * @rx_alloc: work struct for background calls 154 141 */ 155 142 struct iwl_rb_allocator { 156 - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; 157 143 atomic_t req_pending; 158 144 atomic_t req_ready; 159 145 struct list_head rbd_allocated; ··· 292 280 bool ampdu; 293 281 bool block; 294 282 unsigned long wd_timeout; 283 + struct sk_buff_head overflow_q; 295 284 }; 296 285 297 286 static inline dma_addr_t ··· 310 297 /** 311 298 * struct iwl_trans_pcie - PCIe transport specific data 312 299 * @rxq: all the RX queue data 300 + * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues 301 + * @global_table: table mapping received VID from hw to rxb 313 302 * @rba: allocator for RX replenishing 314 303 * @drv - pointer to iwl_drv 315 304 * @trans: pointer to the generic transport area ··· 338 323 * @fw_mon_size: size of the buffer for the firmware monitor 339 324 */ 340 325 struct iwl_trans_pcie { 341 - struct iwl_rxq rxq; 326 + struct iwl_rxq *rxq; 327 + struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE]; 328 + struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; 342 329 struct iwl_rb_allocator rba; 343 330 struct iwl_trans *trans; 344 331 struct iwl_drv *drv; 345 332 346 333 struct net_device napi_dev; 347 - struct napi_struct napi; 348 334 349 335 struct __percpu iwl_tso_hdr_page *tso_hdr_page; 350 336 ··· 375 359 bool ucode_write_complete; 376 360 wait_queue_head_t ucode_write_waitq; 377 361 wait_queue_head_t wait_command_queue; 362 + wait_queue_head_t d0i3_waitq; 378 363 379 364 u8 cmd_queue; 380 365 u8 cmd_fifo; ··· 595 578 return 0; 596 579 } 597 580 #endif 581 + 582 + int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); 583 + int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); 598 584 599 585 #endif /* __iwl_trans_int_pcie_h__ */
+356 -180
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
··· 2 2 * 3 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 + * Copyright(c) 2016 Intel Deutschland GmbH 5 6 * 6 7 * Portions of this file are derived from the ipw3945 project, as well 7 8 * as portions of the ieee80211 subsystem header files. ··· 141 140 */ 142 141 static int iwl_rxq_space(const struct iwl_rxq *rxq) 143 142 { 144 - /* Make sure RX_QUEUE_SIZE is a power of 2 */ 145 - BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); 143 + /* Make sure rx queue size is a power of 2 */ 144 + WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); 146 145 147 146 /* 148 147 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity ··· 150 149 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well 151 150 * defined for negative dividends. 152 151 */ 153 - return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); 152 + return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); 154 153 } 155 154 156 155 /* ··· 159 158 static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) 160 159 { 161 160 return cpu_to_le32((u32)(dma_addr >> 8)); 161 + } 162 + 163 + static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) 164 + { 165 + iwl_write_prph(trans, ofs, val & 0xffffffff); 166 + iwl_write_prph(trans, ofs + 4, val >> 32); 162 167 } 163 168 164 169 /* ··· 180 173 /* 181 174 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue 182 175 */ 183 - static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) 176 + static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, 177 + struct iwl_rxq *rxq) 184 178 { 185 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 186 - struct iwl_rxq *rxq = &trans_pcie->rxq; 187 179 u32 reg; 188 180 189 181 lockdep_assert_held(&rxq->lock); ··· 207 201 } 208 202 209 203 rxq->write_actual = round_down(rxq->write, 8); 210 - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 204 + if (trans->cfg->mq_rx_supported) 205 + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), 206 + rxq->write_actual); 207 + else 208 + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); 211 209 } 212 210 213 211 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) 214 212 { 215 213 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 216 - struct iwl_rxq *rxq = &trans_pcie->rxq; 214 + int i; 215 + 216 + for (i = 0; i < trans->num_rx_queues; i++) { 217 + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 218 + 219 + if (!rxq->need_update) 220 + continue; 221 + spin_lock(&rxq->lock); 222 + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 223 + rxq->need_update = false; 224 + spin_unlock(&rxq->lock); 225 + } 226 + } 227 + 228 + static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, 229 + struct iwl_rxq *rxq) 230 + { 231 + struct iwl_rx_mem_buffer *rxb; 232 + 233 + /* 234 + * If the device isn't enabled - no need to try to add buffers... 235 + * This can happen when we stop the device and still have an interrupt 236 + * pending. We stop the APM before we sync the interrupts because we 237 + * have to (see comment there). On the other hand, since the APM is 238 + * stopped, we cannot access the HW (in particular not prph). 239 + * So don't try to restock if the APM has been already stopped. 240 + */ 241 + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) 242 + return; 217 243 218 244 spin_lock(&rxq->lock); 245 + while (rxq->free_count) { 246 + __le64 *bd = (__le64 *)rxq->bd; 219 247 220 - if (!rxq->need_update) 221 - goto exit_unlock; 248 + /* Get next free Rx buffer, remove from free list */ 249 + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, 250 + list); 251 + list_del(&rxb->list); 222 252 223 - iwl_pcie_rxq_inc_wr_ptr(trans); 224 - rxq->need_update = false; 225 - 226 - exit_unlock: 253 + /* 12 first bits are expected to be empty */ 254 + WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); 255 + /* Point to Rx buffer via next RBD in circular buffer */ 256 + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); 257 + rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; 258 + rxq->free_count--; 259 + } 227 260 spin_unlock(&rxq->lock); 261 + 262 + /* 263 + * If we've added more space for the firmware to place data, tell it. 264 + * Increment device's write pointer in multiples of 8. 265 + */ 266 + if (rxq->write_actual != (rxq->write & ~0x7)) { 267 + spin_lock(&rxq->lock); 268 + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 269 + spin_unlock(&rxq->lock); 270 + } 228 271 } 229 272 230 273 /* ··· 287 232 * also updates the memory address in the firmware to reference the new 288 233 * target buffer. 289 234 */ 290 - static void iwl_pcie_rxq_restock(struct iwl_trans *trans) 235 + static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) 291 236 { 292 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 293 - struct iwl_rxq *rxq = &trans_pcie->rxq; 294 237 struct iwl_rx_mem_buffer *rxb; 295 238 296 239 /* ··· 304 251 305 252 spin_lock(&rxq->lock); 306 253 while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { 254 + __le32 *bd = (__le32 *)rxq->bd; 307 255 /* The overwritten rxb must be a used one */ 308 256 rxb = rxq->queue[rxq->write]; 309 257 BUG_ON(rxb && rxb->page); ··· 315 261 list_del(&rxb->list); 316 262 317 263 /* Point to Rx buffer via next RBD in circular buffer */ 318 - rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 264 + bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); 319 265 rxq->queue[rxq->write] = rxb; 320 266 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 321 267 rxq->free_count--; ··· 326 272 * Increment device's write pointer in multiples of 8. */ 327 273 if (rxq->write_actual != (rxq->write & ~0x7)) { 328 274 spin_lock(&rxq->lock); 329 - iwl_pcie_rxq_inc_wr_ptr(trans); 275 + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); 330 276 spin_unlock(&rxq->lock); 331 277 } 332 278 } ··· 339 285 gfp_t priority) 340 286 { 341 287 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 342 - struct iwl_rxq *rxq = &trans_pcie->rxq; 343 288 struct page *page; 344 289 gfp_t gfp_mask = priority; 345 - 346 - if (rxq->free_count > RX_LOW_WATERMARK) 347 - gfp_mask |= __GFP_NOWARN; 348 290 349 291 if (trans_pcie->rx_page_order > 0) 350 292 gfp_mask |= __GFP_COMP; ··· 351 301 if (net_ratelimit()) 352 302 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", 353 303 trans_pcie->rx_page_order); 354 - /* Issue an error if the hardware has consumed more than half 355 - * of its free buffer list and we don't have enough 356 - * pre-allocated buffers. 304 + /* 305 + * Issue an error if we don't have enough pre-allocated 306 + * buffers. 357 307 ` */ 358 - if (rxq->free_count <= RX_LOW_WATERMARK && 359 - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && 360 - net_ratelimit()) 308 + if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) 361 309 IWL_CRIT(trans, 362 - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", 363 - rxq->free_count); 310 + "Failed to alloc_pages\n"); 364 311 return NULL; 365 312 } 366 313 return page; ··· 372 325 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 373 326 * allocated buffers. 374 327 */ 375 - static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) 328 + static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, 329 + struct iwl_rxq *rxq) 376 330 { 377 331 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 378 - struct iwl_rxq *rxq = &trans_pcie->rxq; 379 332 struct iwl_rx_mem_buffer *rxb; 380 333 struct page *page; 381 334 ··· 419 372 __free_pages(page, trans_pcie->rx_page_order); 420 373 return; 421 374 } 422 - /* dma address must be no more than 36 bits */ 423 - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 424 - /* and also 256 byte aligned! */ 425 - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 426 375 427 376 spin_lock(&rxq->lock); 428 377 ··· 429 386 } 430 387 } 431 388 432 - static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) 389 + static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) 433 390 { 434 391 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 435 - struct iwl_rxq *rxq = &trans_pcie->rxq; 436 392 int i; 437 393 438 - lockdep_assert_held(&rxq->lock); 439 - 440 - for (i = 0; i < RX_QUEUE_SIZE; i++) { 441 - if (!rxq->pool[i].page) 394 + for (i = 0; i < MQ_RX_POOL_SIZE; i++) { 395 + if (!trans_pcie->rx_pool[i].page) 442 396 continue; 443 - dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 397 + dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, 444 398 PAGE_SIZE << trans_pcie->rx_page_order, 445 399 DMA_FROM_DEVICE); 446 - __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); 447 - rxq->pool[i].page = NULL; 400 + __free_pages(trans_pcie->rx_pool[i].page, 401 + trans_pcie->rx_page_order); 402 + trans_pcie->rx_pool[i].page = NULL; 448 403 } 449 - } 450 - 451 - /* 452 - * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free 453 - * 454 - * When moving to rx_free an page is allocated for the slot. 455 - * 456 - * Also restock the Rx queue via iwl_pcie_rxq_restock. 457 - * This is called only during initialization 458 - */ 459 - static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 460 - { 461 - iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); 462 - 463 - iwl_pcie_rxq_restock(trans); 464 404 } 465 405 466 406 /* ··· 470 444 while (pending) { 471 445 int i; 472 446 struct list_head local_allocated; 447 + gfp_t gfp_mask = GFP_KERNEL; 448 + 449 + /* Do not post a warning if there are only a few requests */ 450 + if (pending < RX_PENDING_WATERMARK) 451 + gfp_mask |= __GFP_NOWARN; 473 452 474 453 INIT_LIST_HEAD(&local_allocated); 475 454 ··· 494 463 BUG_ON(rxb->page); 495 464 496 465 /* Alloc a new receive buffer */ 497 - page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); 466 + page = iwl_pcie_rx_alloc_page(trans, gfp_mask); 498 467 if (!page) 499 468 continue; 500 469 rxb->page = page; ··· 508 477 __free_pages(page, trans_pcie->rx_page_order); 509 478 continue; 510 479 } 511 - /* dma address must be no more than 36 bits */ 512 - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 513 - /* and also 256 byte aligned! */ 514 - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); 515 480 516 481 /* move the allocated entry to the out list */ 517 482 list_move(&rxb->list, &local_allocated); ··· 588 561 static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 589 562 { 590 563 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 591 - struct iwl_rxq *rxq = &trans_pcie->rxq; 592 564 struct iwl_rb_allocator *rba = &trans_pcie->rba; 593 565 struct device *dev = trans->dev; 566 + int i; 567 + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 568 + sizeof(__le32); 594 569 595 - memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 596 - 597 - spin_lock_init(&rxq->lock); 598 - spin_lock_init(&rba->lock); 599 - 600 - if (WARN_ON(rxq->bd || rxq->rb_stts)) 570 + if (WARN_ON(trans_pcie->rxq)) 601 571 return -EINVAL; 602 572 603 - /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ 604 - rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 605 - &rxq->bd_dma, GFP_KERNEL); 606 - if (!rxq->bd) 607 - goto err_bd; 573 + trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), 574 + GFP_KERNEL); 575 + if (!trans_pcie->rxq) 576 + return -EINVAL; 608 577 609 - /*Allocate the driver's pointer to receive buffer status */ 610 - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 611 - &rxq->rb_stts_dma, GFP_KERNEL); 612 - if (!rxq->rb_stts) 613 - goto err_rb_stts; 578 + spin_lock_init(&rba->lock); 614 579 580 + for (i = 0; i < trans->num_rx_queues; i++) { 581 + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 582 + 583 + spin_lock_init(&rxq->lock); 584 + if (trans->cfg->mq_rx_supported) 585 + rxq->queue_size = MQ_RX_TABLE_SIZE; 586 + else 587 + rxq->queue_size = RX_QUEUE_SIZE; 588 + 589 + /* 590 + * Allocate the circular buffer of Read Buffer Descriptors 591 + * (RBDs) 592 + */ 593 + rxq->bd = dma_zalloc_coherent(dev, 594 + free_size * rxq->queue_size, 595 + &rxq->bd_dma, GFP_KERNEL); 596 + if (!rxq->bd) 597 + goto err; 598 + 599 + if (trans->cfg->mq_rx_supported) { 600 + rxq->used_bd = dma_zalloc_coherent(dev, 601 + sizeof(__le32) * 602 + rxq->queue_size, 603 + &rxq->used_bd_dma, 604 + GFP_KERNEL); 605 + if (!rxq->used_bd) 606 + goto err; 607 + } 608 + 609 + /*Allocate the driver's pointer to receive buffer status */ 610 + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), 611 + &rxq->rb_stts_dma, 612 + GFP_KERNEL); 613 + if (!rxq->rb_stts) 614 + goto err; 615 + } 615 616 return 0; 616 617 617 - err_rb_stts: 618 - dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, 619 - rxq->bd, rxq->bd_dma); 620 - rxq->bd_dma = 0; 621 - rxq->bd = NULL; 622 - err_bd: 618 + err: 619 + for (i = 0; i < trans->num_rx_queues; i++) { 620 + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 621 + 622 + if (rxq->bd) 623 + dma_free_coherent(dev, free_size * rxq->queue_size, 624 + rxq->bd, rxq->bd_dma); 625 + rxq->bd_dma = 0; 626 + rxq->bd = NULL; 627 + 628 + if (rxq->rb_stts) 629 + dma_free_coherent(trans->dev, 630 + sizeof(struct iwl_rb_status), 631 + rxq->rb_stts, rxq->rb_stts_dma); 632 + 633 + if (rxq->used_bd) 634 + dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, 635 + rxq->used_bd, rxq->used_bd_dma); 636 + rxq->used_bd_dma = 0; 637 + rxq->used_bd = NULL; 638 + } 639 + kfree(trans_pcie->rxq); 640 + 623 641 return -ENOMEM; 624 642 } 625 643 ··· 731 659 iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); 732 660 } 733 661 734 - static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 662 + static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) 735 663 { 664 + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 665 + u32 rb_size, enabled = 0; 736 666 int i; 737 667 668 + switch (trans_pcie->rx_buf_size) { 669 + case IWL_AMSDU_4K: 670 + rb_size = RFH_RXF_DMA_RB_SIZE_4K; 671 + break; 672 + case IWL_AMSDU_8K: 673 + rb_size = RFH_RXF_DMA_RB_SIZE_8K; 674 + break; 675 + case IWL_AMSDU_12K: 676 + rb_size = RFH_RXF_DMA_RB_SIZE_12K; 677 + break; 678 + default: 679 + WARN_ON(1); 680 + rb_size = RFH_RXF_DMA_RB_SIZE_4K; 681 + } 682 + 683 + /* Stop Rx DMA */ 684 + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); 685 + /* disable free amd used rx queue operation */ 686 + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); 687 + 688 + for (i = 0; i < trans->num_rx_queues; i++) { 689 + /* Tell device where to find RBD free table in DRAM */ 690 + iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), 691 + (u64)(trans_pcie->rxq[i].bd_dma)); 692 + /* Tell device where to find RBD used table in DRAM */ 693 + iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), 694 + (u64)(trans_pcie->rxq[i].used_bd_dma)); 695 + /* Tell device where in DRAM to update its Rx status */ 696 + iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), 697 + trans_pcie->rxq[i].rb_stts_dma); 698 + /* Reset device indice tables */ 699 + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); 700 + iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); 701 + iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); 702 + 703 + enabled |= BIT(i) | BIT(i + 16); 704 + } 705 + 706 + /* restock default queue */ 707 + iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); 708 + 709 + /* 710 + * Enable Rx DMA 711 + * Single frame mode 712 + * Rx buffer size 4 or 8k or 12k 713 + * Min RB size 4 or 8 714 + * 512 RBDs 715 + */ 716 + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 717 + RFH_DMA_EN_ENABLE_VAL | 718 + rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | 719 + RFH_RXF_DMA_MIN_RB_4_8 | 720 + RFH_RXF_DMA_RBDCB_SIZE_512); 721 + 722 + iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | 723 + RFH_GEN_CFG_SERVICE_DMA_SNOOP); 724 + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); 725 + 726 + /* Set interrupt coalescing timer to default (2048 usecs) */ 727 + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); 728 + } 729 + 730 + static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) 731 + { 738 732 lockdep_assert_held(&rxq->lock); 739 733 740 734 INIT_LIST_HEAD(&rxq->rx_free); 741 735 INIT_LIST_HEAD(&rxq->rx_used); 742 736 rxq->free_count = 0; 743 737 rxq->used_count = 0; 744 - 745 - for (i = 0; i < RX_QUEUE_SIZE; i++) 746 - list_add(&rxq->pool[i].list, &rxq->rx_used); 747 738 } 748 739 749 - static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) 740 + static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 750 741 { 751 - int i; 752 - 753 - lockdep_assert_held(&rba->lock); 754 - 755 - INIT_LIST_HEAD(&rba->rbd_allocated); 756 - INIT_LIST_HEAD(&rba->rbd_empty); 757 - 758 - for (i = 0; i < RX_POOL_SIZE; i++) 759 - list_add(&rba->pool[i].list, &rba->rbd_empty); 760 - } 761 - 762 - static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) 763 - { 764 - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 765 - struct iwl_rb_allocator *rba = &trans_pcie->rba; 766 - int i; 767 - 768 - lockdep_assert_held(&rba->lock); 769 - 770 - for (i = 0; i < RX_POOL_SIZE; i++) { 771 - if (!rba->pool[i].page) 772 - continue; 773 - dma_unmap_page(trans->dev, rba->pool[i].page_dma, 774 - PAGE_SIZE << trans_pcie->rx_page_order, 775 - DMA_FROM_DEVICE); 776 - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); 777 - rba->pool[i].page = NULL; 778 - } 742 + WARN_ON(1); 743 + return 0; 779 744 } 780 745 781 746 int iwl_pcie_rx_init(struct iwl_trans *trans) 782 747 { 783 748 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 784 - struct iwl_rxq *rxq = &trans_pcie->rxq; 749 + struct iwl_rxq *def_rxq; 785 750 struct iwl_rb_allocator *rba = &trans_pcie->rba; 786 - int i, err; 751 + int i, err, num_rbds, allocator_pool_size; 787 752 788 - if (!rxq->bd) { 753 + if (!trans_pcie->rxq) { 789 754 err = iwl_pcie_rx_alloc(trans); 790 755 if (err) 791 756 return err; 792 757 } 758 + def_rxq = trans_pcie->rxq; 793 759 if (!rba->alloc_wq) 794 760 rba->alloc_wq = alloc_workqueue("rb_allocator", 795 761 WQ_HIGHPRI | WQ_UNBOUND, 1); ··· 836 726 spin_lock(&rba->lock); 837 727 atomic_set(&rba->req_pending, 0); 838 728 atomic_set(&rba->req_ready, 0); 839 - /* free all first - we might be reconfigured for a different size */ 840 - iwl_pcie_rx_free_rba(trans); 841 - iwl_pcie_rx_init_rba(rba); 729 + INIT_LIST_HEAD(&rba->rbd_allocated); 730 + INIT_LIST_HEAD(&rba->rbd_empty); 842 731 spin_unlock(&rba->lock); 843 732 844 - spin_lock(&rxq->lock); 845 - 846 733 /* free all first - we might be reconfigured for a different size */ 847 - iwl_pcie_rxq_free_rbs(trans); 848 - iwl_pcie_rx_init_rxb_lists(rxq); 734 + iwl_pcie_free_rbs_pool(trans); 849 735 850 736 for (i = 0; i < RX_QUEUE_SIZE; i++) 851 - rxq->queue[i] = NULL; 737 + def_rxq->queue[i] = NULL; 852 738 853 - /* Set us so that we have processed and used all buffers, but have 854 - * not restocked the Rx queue with fresh buffers */ 855 - rxq->read = rxq->write = 0; 856 - rxq->write_actual = 0; 857 - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 858 - spin_unlock(&rxq->lock); 739 + for (i = 0; i < trans->num_rx_queues; i++) { 740 + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 859 741 860 - iwl_pcie_rx_replenish(trans); 742 + rxq->id = i; 861 743 862 - iwl_pcie_rx_hw_init(trans, rxq); 744 + spin_lock(&rxq->lock); 745 + /* 746 + * Set read write pointer to reflect that we have processed 747 + * and used all buffers, but have not restocked the Rx queue 748 + * with fresh buffers 749 + */ 750 + rxq->read = 0; 751 + rxq->write = 0; 752 + rxq->write_actual = 0; 753 + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 863 754 864 - spin_lock(&rxq->lock); 865 - iwl_pcie_rxq_inc_wr_ptr(trans); 866 - spin_unlock(&rxq->lock); 755 + iwl_pcie_rx_init_rxb_lists(rxq); 756 + 757 + if (!rxq->napi.poll) 758 + netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, 759 + iwl_pcie_dummy_napi_poll, 64); 760 + 761 + spin_unlock(&rxq->lock); 762 + } 763 + 764 + /* move the pool to the default queue and allocator ownerships */ 765 + num_rbds = trans->cfg->mq_rx_supported ? 766 + MQ_RX_POOL_SIZE : RX_QUEUE_SIZE; 767 + allocator_pool_size = trans->num_rx_queues * 768 + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); 769 + for (i = 0; i < num_rbds; i++) { 770 + struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; 771 + 772 + if (i < allocator_pool_size) 773 + list_add(&rxb->list, &rba->rbd_empty); 774 + else 775 + list_add(&rxb->list, &def_rxq->rx_used); 776 + trans_pcie->global_table[i] = rxb; 777 + rxb->vid = (u16)i; 778 + } 779 + 780 + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); 781 + if (trans->cfg->mq_rx_supported) { 782 + iwl_pcie_rx_mq_hw_init(trans); 783 + } else { 784 + iwl_pcie_rxq_restock(trans, def_rxq); 785 + iwl_pcie_rx_hw_init(trans, def_rxq); 786 + } 787 + 788 + spin_lock(&def_rxq->lock); 789 + iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); 790 + spin_unlock(&def_rxq->lock); 867 791 868 792 return 0; 869 793 } ··· 905 761 void iwl_pcie_rx_free(struct iwl_trans *trans) 906 762 { 907 763 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 908 - struct iwl_rxq *rxq = &trans_pcie->rxq; 909 764 struct iwl_rb_allocator *rba = &trans_pcie->rba; 765 + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : 766 + sizeof(__le32); 767 + int i; 910 768 911 - /*if rxq->bd is NULL, it means that nothing has been allocated, 912 - * exit now */ 913 - if (!rxq->bd) { 769 + /* 770 + * if rxq is NULL, it means that nothing has been allocated, 771 + * exit now 772 + */ 773 + if (!trans_pcie->rxq) { 914 774 IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); 915 775 return; 916 776 } ··· 925 777 rba->alloc_wq = NULL; 926 778 } 927 779 928 - spin_lock(&rba->lock); 929 - iwl_pcie_rx_free_rba(trans); 930 - spin_unlock(&rba->lock); 780 + iwl_pcie_free_rbs_pool(trans); 931 781 932 - spin_lock(&rxq->lock); 933 - iwl_pcie_rxq_free_rbs(trans); 934 - spin_unlock(&rxq->lock); 782 + for (i = 0; i < trans->num_rx_queues; i++) { 783 + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 935 784 936 - dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, 937 - rxq->bd, rxq->bd_dma); 938 - rxq->bd_dma = 0; 939 - rxq->bd = NULL; 785 + if (rxq->bd) 786 + dma_free_coherent(trans->dev, 787 + free_size * rxq->queue_size, 788 + rxq->bd, rxq->bd_dma); 789 + rxq->bd_dma = 0; 790 + rxq->bd = NULL; 940 791 941 - if (rxq->rb_stts) 942 - dma_free_coherent(trans->dev, 943 - sizeof(struct iwl_rb_status), 944 - rxq->rb_stts, rxq->rb_stts_dma); 945 - else 946 - IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); 947 - rxq->rb_stts_dma = 0; 948 - rxq->rb_stts = NULL; 792 + if (rxq->rb_stts) 793 + dma_free_coherent(trans->dev, 794 + sizeof(struct iwl_rb_status), 795 + rxq->rb_stts, rxq->rb_stts_dma); 796 + else 797 + IWL_DEBUG_INFO(trans, 798 + "Free rxq->rb_stts which is NULL\n"); 799 + 800 + if (rxq->used_bd) 801 + dma_free_coherent(trans->dev, 802 + sizeof(__le32) * rxq->queue_size, 803 + rxq->used_bd, rxq->used_bd_dma); 804 + rxq->used_bd_dma = 0; 805 + rxq->used_bd = NULL; 806 + 807 + if (rxq->napi.poll) 808 + netif_napi_del(&rxq->napi); 809 + } 810 + kfree(trans_pcie->rxq); 949 811 } 950 812 951 813 /* ··· 999 841 } 1000 842 1001 843 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 844 + struct iwl_rxq *rxq, 1002 845 struct iwl_rx_mem_buffer *rxb, 1003 846 bool emergency) 1004 847 { 1005 848 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1006 - struct iwl_rxq *rxq = &trans_pcie->rxq; 1007 849 struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 1008 850 bool page_stolen = false; 1009 851 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; ··· 1069 911 index = SEQ_TO_INDEX(sequence); 1070 912 cmd_index = get_cmd_index(&txq->q, index); 1071 913 1072 - iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); 914 + if (rxq->id == 0) 915 + iwl_op_mode_rx(trans->op_mode, &rxq->napi, 916 + &rxcb); 917 + else 918 + iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, 919 + &rxcb, rxq->id); 1073 920 1074 921 if (reclaim) { 1075 922 kzfree(txq->entries[cmd_index].free_buf); ··· 1138 975 static void iwl_pcie_rx_handle(struct iwl_trans *trans) 1139 976 { 1140 977 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1141 - struct iwl_rxq *rxq = &trans_pcie->rxq; 978 + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 1142 979 u32 r, i, j, count = 0; 1143 980 bool emergency = false; 1144 981 ··· 1156 993 while (i != r) { 1157 994 struct iwl_rx_mem_buffer *rxb; 1158 995 1159 - if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) 996 + if (unlikely(rxq->used_count == rxq->queue_size / 2)) 1160 997 emergency = true; 1161 998 1162 - rxb = rxq->queue[i]; 1163 - rxq->queue[i] = NULL; 999 + if (trans->cfg->mq_rx_supported) { 1000 + /* 1001 + * used_bd is a 32 bit but only 12 are used to retrieve 1002 + * the vid 1003 + */ 1004 + u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); 1005 + 1006 + rxb = trans_pcie->global_table[vid]; 1007 + } else { 1008 + rxb = rxq->queue[i]; 1009 + rxq->queue[i] = NULL; 1010 + } 1164 1011 1165 1012 IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); 1166 - iwl_pcie_rx_handle_rb(trans, rxb, emergency); 1013 + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); 1167 1014 1168 - i = (i + 1) & RX_QUEUE_MASK; 1015 + i = (i + 1) & (rxq->queue_size - 1); 1169 1016 1170 1017 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 1171 1018 * try to claim the pre-allocated buffers from the allocator */ ··· 1213 1040 count++; 1214 1041 if (count == 8) { 1215 1042 count = 0; 1216 - if (rxq->used_count < RX_QUEUE_SIZE / 3) 1043 + if (rxq->used_count < rxq->queue_size / 3) 1217 1044 emergency = false; 1218 1045 spin_unlock(&rxq->lock); 1219 - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); 1046 + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1220 1047 spin_lock(&rxq->lock); 1221 1048 } 1222 1049 } ··· 1228 1055 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { 1229 1056 rxq->read = i; 1230 1057 spin_unlock(&rxq->lock); 1231 - iwl_pcie_rxq_restock(trans); 1058 + if (trans->cfg->mq_rx_supported) 1059 + iwl_pcie_rxq_mq_restock(trans, rxq); 1060 + else 1061 + iwl_pcie_rxq_restock(trans, rxq); 1232 1062 goto restart; 1233 1063 } 1234 1064 } ··· 1253 1077 * will be restocked by the next call of iwl_pcie_rxq_restock. 1254 1078 */ 1255 1079 if (unlikely(emergency && count)) 1256 - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); 1080 + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); 1257 1081 1258 - if (trans_pcie->napi.poll) 1259 - napi_gro_flush(&trans_pcie->napi, false); 1082 + if (rxq->napi.poll) 1083 + napi_gro_flush(&rxq->napi, false); 1260 1084 } 1261 1085 1262 1086 /*
+106 -47
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 72 72 #include <linux/bitops.h> 73 73 #include <linux/gfp.h> 74 74 #include <linux/vmalloc.h> 75 + #include <linux/pm_runtime.h> 75 76 76 77 #include "iwl-drv.h" 77 78 #include "iwl-trans.h" ··· 1219 1218 _iwl_trans_pcie_stop_device(trans, true); 1220 1219 } 1221 1220 1222 - static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) 1221 + static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, 1222 + bool reset) 1223 1223 { 1224 1224 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1225 1225 1226 - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { 1226 + if (!reset) { 1227 1227 /* Enable persistence mode to avoid reset */ 1228 1228 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 1229 1229 CSR_HW_IF_CONFIG_REG_PERSIST_MODE); ··· 1248 1246 iwl_clear_bit(trans, CSR_GP_CNTRL, 1249 1247 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 1250 1248 1251 - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) { 1249 + if (reset) { 1252 1250 /* 1253 1251 * reset TX queues -- some of their registers reset during S3 1254 1252 * so if we don't reset everything here the D3 image would try ··· 1262 1260 1263 1261 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, 1264 1262 enum iwl_d3_status *status, 1265 - bool test) 1263 + bool test, bool reset) 1266 1264 { 1267 1265 u32 val; 1268 1266 int ret; ··· 1297 1295 1298 1296 iwl_pcie_set_pwr(trans, false); 1299 1297 1300 - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { 1298 + if (!reset) { 1301 1299 iwl_clear_bit(trans, CSR_GP_CNTRL, 1302 1300 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 1303 1301 } else { ··· 1354 1352 clear_bit(STATUS_RFKILL, &trans->status); 1355 1353 /* ... rfkill can call stop_device and set it false if needed */ 1356 1354 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1355 + 1356 + /* Make sure we sync here, because we'll need full access later */ 1357 + if (low_power) 1358 + pm_runtime_resume(trans->dev); 1357 1359 1358 1360 return 0; 1359 1361 } ··· 1428 1422 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 1429 1423 } 1430 1424 1431 - static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) 1432 - { 1433 - WARN_ON(1); 1434 - return 0; 1435 - } 1436 - 1437 1425 static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1438 1426 const struct iwl_trans_config *trans_cfg) 1439 1427 { ··· 1464 1464 * As this function may be called again in some corner cases don't 1465 1465 * do anything if NAPI was already initialized. 1466 1466 */ 1467 - if (!trans_pcie->napi.poll) { 1467 + if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) 1468 1468 init_dummy_netdev(&trans_pcie->napi_dev); 1469 - netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, 1470 - iwl_pcie_dummy_napi_poll, 64); 1471 - } 1472 1469 } 1473 1470 1474 1471 void iwl_trans_pcie_free(struct iwl_trans *trans) 1475 1472 { 1476 1473 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1477 1474 int i; 1475 + 1476 + /* TODO: check if this is really needed */ 1477 + pm_runtime_disable(trans->dev); 1478 1478 1479 1479 synchronize_irq(trans_pcie->pci_dev->irq); 1480 1480 ··· 1488 1488 iounmap(trans_pcie->hw_base); 1489 1489 pci_release_regions(trans_pcie->pci_dev); 1490 1490 pci_disable_device(trans_pcie->pci_dev); 1491 - 1492 - if (trans_pcie->napi.poll) 1493 - netif_napi_del(&trans_pcie->napi); 1494 1491 1495 1492 iwl_pcie_free_fw_monitor(trans); 1496 1493 ··· 1828 1831 spin_lock_irqsave(&trans_pcie->ref_lock, flags); 1829 1832 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); 1830 1833 trans_pcie->ref_count++; 1834 + pm_runtime_get(&trans_pcie->pci_dev->dev); 1831 1835 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 1832 1836 } 1833 1837 ··· 1847 1849 return; 1848 1850 } 1849 1851 trans_pcie->ref_count--; 1852 + 1853 + pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); 1854 + pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); 1855 + 1850 1856 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); 1851 1857 } 1852 1858 ··· 2003 2001 { 2004 2002 struct iwl_trans *trans = file->private_data; 2005 2003 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2006 - struct iwl_rxq *rxq = &trans_pcie->rxq; 2007 - char buf[256]; 2008 - int pos = 0; 2009 - const size_t bufsz = sizeof(buf); 2004 + char *buf; 2005 + int pos = 0, i, ret; 2006 + size_t bufsz = sizeof(buf); 2010 2007 2011 - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", 2012 - rxq->read); 2013 - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", 2014 - rxq->write); 2015 - pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", 2016 - rxq->write_actual); 2017 - pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", 2018 - rxq->need_update); 2019 - pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", 2020 - rxq->free_count); 2021 - if (rxq->rb_stts) { 2022 - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", 2023 - le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); 2024 - } else { 2025 - pos += scnprintf(buf + pos, bufsz - pos, 2026 - "closed_rb_num: Not Allocated\n"); 2008 + bufsz = sizeof(char) * 121 * trans->num_rx_queues; 2009 + 2010 + if (!trans_pcie->rxq) 2011 + return -EAGAIN; 2012 + 2013 + buf = kzalloc(bufsz, GFP_KERNEL); 2014 + if (!buf) 2015 + return -ENOMEM; 2016 + 2017 + for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { 2018 + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; 2019 + 2020 + pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", 2021 + i); 2022 + pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", 2023 + rxq->read); 2024 + pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", 2025 + rxq->write); 2026 + pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", 2027 + rxq->write_actual); 2028 + pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", 2029 + rxq->need_update); 2030 + pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", 2031 + rxq->free_count); 2032 + if (rxq->rb_stts) { 2033 + pos += scnprintf(buf + pos, bufsz - pos, 2034 + "\tclosed_rb_num: %u\n", 2035 + le16_to_cpu(rxq->rb_stts->closed_rb_num) & 2036 + 0x0FFF); 2037 + } else { 2038 + pos += scnprintf(buf + pos, bufsz - pos, 2039 + "\tclosed_rb_num: Not Allocated\n"); 2027 2040 } 2028 - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2041 + } 2042 + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2043 + kfree(buf); 2044 + 2045 + return ret; 2029 2046 } 2030 2047 2031 2048 static ssize_t iwl_dbgfs_interrupt_read(struct file *file, ··· 2209 2188 { 2210 2189 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2211 2190 int max_len = PAGE_SIZE << trans_pcie->rx_page_order; 2212 - struct iwl_rxq *rxq = &trans_pcie->rxq; 2191 + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 2192 + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2213 2193 u32 i, r, j, rb_len = 0; 2214 2194 2215 2195 spin_lock(&rxq->lock); ··· 2405 2383 u32 len, num_rbs; 2406 2384 u32 monitor_len; 2407 2385 int i, ptr; 2408 - bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); 2386 + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && 2387 + !trans->cfg->mq_rx_supported; 2409 2388 2410 2389 /* transport dump header */ 2411 2390 len = sizeof(*dump_data); ··· 2461 2438 len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); 2462 2439 2463 2440 if (dump_rbs) { 2441 + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ 2442 + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; 2464 2443 /* RBs */ 2465 - num_rbs = le16_to_cpu(ACCESS_ONCE( 2466 - trans_pcie->rxq.rb_stts->closed_rb_num)) 2444 + num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) 2467 2445 & 0x0FFF; 2468 - num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; 2446 + num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; 2469 2447 len += num_rbs * (sizeof(*data) + 2470 2448 sizeof(struct iwl_fw_error_dump_rb) + 2471 2449 (PAGE_SIZE << trans_pcie->rx_page_order)); ··· 2517 2493 return dump_data; 2518 2494 } 2519 2495 2496 + #ifdef CONFIG_PM_SLEEP 2497 + static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 2498 + { 2499 + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2500 + return iwl_pci_fw_enter_d0i3(trans); 2501 + 2502 + return 0; 2503 + } 2504 + 2505 + static void iwl_trans_pcie_resume(struct iwl_trans *trans) 2506 + { 2507 + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) 2508 + iwl_pci_fw_exit_d0i3(trans); 2509 + } 2510 + #endif /* CONFIG_PM_SLEEP */ 2511 + 2520 2512 static const struct iwl_trans_ops trans_ops_pcie = { 2521 2513 .start_hw = iwl_trans_pcie_start_hw, 2522 2514 .op_mode_leave = iwl_trans_pcie_op_mode_leave, ··· 2542 2502 2543 2503 .d3_suspend = iwl_trans_pcie_d3_suspend, 2544 2504 .d3_resume = iwl_trans_pcie_d3_resume, 2505 + 2506 + #ifdef CONFIG_PM_SLEEP 2507 + .suspend = iwl_trans_pcie_suspend, 2508 + .resume = iwl_trans_pcie_resume, 2509 + #endif /* CONFIG_PM_SLEEP */ 2545 2510 2546 2511 .send_cmd = iwl_trans_pcie_send_hcmd, 2547 2512 ··· 2586 2541 struct iwl_trans_pcie *trans_pcie; 2587 2542 struct iwl_trans *trans; 2588 2543 u16 pci_cmd; 2589 - int ret; 2544 + int ret, addr_size; 2590 2545 2591 2546 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2592 2547 &pdev->dev, cfg, &trans_ops_pcie, 0); ··· 2624 2579 PCIE_LINK_STATE_CLKPM); 2625 2580 } 2626 2581 2582 + if (cfg->mq_rx_supported) 2583 + addr_size = 64; 2584 + else 2585 + addr_size = 36; 2586 + 2627 2587 pci_set_master(pdev); 2628 2588 2629 - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2589 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); 2630 2590 if (!ret) 2631 - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2591 + ret = pci_set_consistent_dma_mask(pdev, 2592 + DMA_BIT_MASK(addr_size)); 2632 2593 if (ret) { 2633 2594 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2634 2595 if (!ret) ··· 2737 2686 /* Initialize the wait queue for commands */ 2738 2687 init_waitqueue_head(&trans_pcie->wait_command_queue); 2739 2688 2689 + init_waitqueue_head(&trans_pcie->d0i3_waitq); 2690 + 2740 2691 ret = iwl_pcie_alloc_ict(trans); 2741 2692 if (ret) 2742 2693 goto out_pci_disable_msi; ··· 2752 2699 } 2753 2700 2754 2701 trans_pcie->inta_mask = CSR_INI_SET_MASK; 2702 + 2703 + #ifdef CONFIG_IWLWIFI_PCIE_RTPM 2704 + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; 2705 + #else 2706 + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2707 + #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ 2755 2708 2756 2709 return trans; 2757 2710
+77 -10
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
··· 1 1 /****************************************************************************** 2 2 * 3 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH 4 + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 5 + * Copyright(c) 2016 Intel Deutschland GmbH 5 6 * 6 7 * Portions of this file are derived from the ipw3945 project, as well 7 8 * as portions of the ieee80211 subsystem header files. ··· 34 33 #include <linux/sched.h> 35 34 #include <net/ip6_checksum.h> 36 35 #include <net/tso.h> 37 - #include <net/ip6_checksum.h> 38 36 39 37 #include "iwl-debug.h" 40 38 #include "iwl-csr.h" ··· 571 571 return ret; 572 572 573 573 spin_lock_init(&txq->lock); 574 + __skb_queue_head_init(&txq->overflow_q); 574 575 575 576 /* 576 577 * Tell nic where to find circular buffer of Tx Frame Descriptors for ··· 622 621 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); 623 622 } 624 623 txq->active = false; 624 + 625 + while (!skb_queue_empty(&txq->overflow_q)) { 626 + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 627 + 628 + iwl_op_mode_free_skb(trans->op_mode, skb); 629 + } 630 + 625 631 spin_unlock_bh(&txq->lock); 626 632 627 633 /* just in case - this queue may have been stopped */ ··· 1060 1052 1061 1053 iwl_pcie_txq_progress(txq); 1062 1054 1063 - if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1064 - iwl_wake_queue(trans, txq); 1055 + if (iwl_queue_space(&txq->q) > txq->q.low_mark && 1056 + test_bit(txq_id, trans_pcie->queue_stopped)) { 1057 + struct sk_buff_head skbs; 1058 + 1059 + __skb_queue_head_init(&skbs); 1060 + skb_queue_splice_init(&txq->overflow_q, &skbs); 1061 + 1062 + /* 1063 + * This is tricky: we are in reclaim path which is non 1064 + * re-entrant, so noone will try to take the access the 1065 + * txq data from that path. We stopped tx, so we can't 1066 + * have tx as well. Bottom line, we can unlock and re-lock 1067 + * later. 1068 + */ 1069 + spin_unlock_bh(&txq->lock); 1070 + 1071 + while (!skb_queue_empty(&skbs)) { 1072 + struct sk_buff *skb = __skb_dequeue(&skbs); 1073 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1074 + u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; 1075 + struct iwl_device_cmd *dev_cmd = 1076 + info->driver_data[dev_cmd_idx]; 1077 + 1078 + /* 1079 + * Note that we can very well be overflowing again. 1080 + * In that case, iwl_queue_space will be small again 1081 + * and we won't wake mac80211's queue. 1082 + */ 1083 + iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id); 1084 + } 1085 + spin_lock_bh(&txq->lock); 1086 + 1087 + if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1088 + iwl_wake_queue(trans, txq); 1089 + } 1065 1090 1066 1091 if (q->read_ptr == q->write_ptr) { 1067 1092 IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); ··· 1727 1686 wake_up(&trans_pcie->wait_command_queue); 1728 1687 } 1729 1688 1689 + if (meta->flags & CMD_MAKE_TRANS_IDLE) { 1690 + IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", 1691 + iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1692 + set_bit(STATUS_TRANS_IDLE, &trans->status); 1693 + wake_up(&trans_pcie->d0i3_waitq); 1694 + } 1695 + 1696 + if (meta->flags & CMD_WAKE_UP_TRANS) { 1697 + IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", 1698 + iwl_get_cmd_string(trans, cmd->hdr.cmd)); 1699 + clear_bit(STATUS_TRANS_IDLE, &trans->status); 1700 + wake_up(&trans_pcie->d0i3_waitq); 1701 + } 1702 + 1730 1703 meta->flags = 0; 1731 1704 1732 1705 spin_unlock_bh(&txq->lock); ··· 2216 2161 2217 2162 csum = skb_checksum(skb, offs, skb->len - offs, 0); 2218 2163 *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); 2164 + 2165 + skb->ip_summed = CHECKSUM_UNNECESSARY; 2219 2166 } 2220 2167 2221 2168 if (skb_is_nonlinear(skb) && ··· 2233 2176 hdr_len = ieee80211_hdrlen(fc); 2234 2177 2235 2178 spin_lock(&txq->lock); 2179 + 2180 + if (iwl_queue_space(q) < q->high_mark) { 2181 + iwl_stop_queue(trans, txq); 2182 + 2183 + /* don't put the packet on the ring, if there is no room */ 2184 + if (unlikely(iwl_queue_space(q) < 3)) { 2185 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2186 + 2187 + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] = 2188 + dev_cmd; 2189 + __skb_queue_tail(&txq->overflow_q, skb); 2190 + 2191 + spin_unlock(&txq->lock); 2192 + return 0; 2193 + } 2194 + } 2236 2195 2237 2196 /* In AGG mode, the index in the ring must correspond to the WiFi 2238 2197 * sequence number. This is a HW requirements to help the SCD to parse ··· 2354 2281 * At this point the frame is "transmitted" successfully 2355 2282 * and we will get a TX status notification eventually. 2356 2283 */ 2357 - if (iwl_queue_space(q) < q->high_mark) { 2358 - if (wait_write_ptr) 2359 - iwl_pcie_txq_inc_wr_ptr(trans, txq); 2360 - else 2361 - iwl_stop_queue(trans, txq); 2362 - } 2363 2284 spin_unlock(&txq->lock); 2364 2285 return 0; 2365 2286 out_err:
+10 -5
drivers/net/wireless/intersil/hostap/hostap_hw.c
··· 836 836 spin_lock_bh(&local->baplock); 837 837 838 838 res = hfa384x_setup_bap(dev, BAP0, rid, 0); 839 - if (!res) 840 - res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); 839 + if (res) 840 + goto unlock; 841 + 842 + res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); 843 + if (res) 844 + goto unlock; 841 845 842 846 if (le16_to_cpu(rec.len) == 0) { 843 847 /* RID not available */ 844 848 res = -ENODATA; 849 + goto unlock; 845 850 } 846 851 847 852 rlen = (le16_to_cpu(rec.len) - 1) * 2; 848 - if (!res && exact_len && rlen != len) { 853 + if (exact_len && rlen != len) { 849 854 printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " 850 855 "rid=0x%04x, len=%d (expected %d)\n", 851 856 dev->name, rid, rlen, len); 852 857 res = -ENODATA; 853 858 } 854 859 855 - if (!res) 856 - res = hfa384x_from_bap(dev, BAP0, buf, len); 860 + res = hfa384x_from_bap(dev, BAP0, buf, len); 857 861 862 + unlock: 858 863 spin_unlock_bh(&local->baplock); 859 864 mutex_unlock(&local->rid_bap_mtx); 860 865
+38
drivers/net/wireless/marvell/libertas/cfg.c
··· 2039 2039 2040 2040 2041 2041 2042 + int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, 2043 + bool enabled, int timeout) 2044 + { 2045 + struct lbs_private *priv = wiphy_priv(wiphy); 2046 + 2047 + if (!(priv->fwcapinfo & FW_CAPINFO_PS)) { 2048 + if (!enabled) 2049 + return 0; 2050 + else 2051 + return -EINVAL; 2052 + } 2053 + /* firmware does not work well with too long latency with power saving 2054 + * enabled, so do not enable it if there is only polling, no 2055 + * interrupts (like in some sdio hosts which can only 2056 + * poll for sdio irqs) 2057 + */ 2058 + if (priv->is_polling) { 2059 + if (!enabled) 2060 + return 0; 2061 + else 2062 + return -EINVAL; 2063 + } 2064 + if (!enabled) { 2065 + priv->psmode = LBS802_11POWERMODECAM; 2066 + if (priv->psstate != PS_STATE_FULL_POWER) 2067 + lbs_set_ps_mode(priv, 2068 + PS_MODE_ACTION_EXIT_PS, 2069 + true); 2070 + return 0; 2071 + } 2072 + if (priv->psmode != LBS802_11POWERMODECAM) 2073 + return 0; 2074 + priv->psmode = LBS802_11POWERMODEMAX_PSP; 2075 + if (priv->connect_status == LBS_CONNECTED) 2076 + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, true); 2077 + return 0; 2078 + } 2042 2079 2043 2080 /* 2044 2081 * Initialization ··· 2094 2057 .change_virtual_intf = lbs_change_intf, 2095 2058 .join_ibss = lbs_join_ibss, 2096 2059 .leave_ibss = lbs_leave_ibss, 2060 + .set_power_mgmt = lbs_set_power_mgmt, 2097 2061 }; 2098 2062 2099 2063
+7 -33
drivers/net/wireless/marvell/libertas/cmd.c
··· 957 957 958 958 /* Exit_PS command needs to be queued in the header always. */ 959 959 if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) { 960 - struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf; 960 + struct cmd_ds_802_11_ps_mode *psm = (void *)cmdnode->cmdbuf; 961 961 962 962 if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { 963 963 if (priv->psstate != PS_STATE_FULL_POWER) ··· 1387 1387 * PS command. Ignore it if it is not Exit_PS. 1388 1388 * otherwise send it down immediately. 1389 1389 */ 1390 - struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1]; 1390 + struct cmd_ds_802_11_ps_mode *psm = (void *)cmd; 1391 1391 1392 1392 lbs_deb_host( 1393 1393 "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", ··· 1428 1428 * check if in power save mode, if yes, put the device back 1429 1429 * to PS mode 1430 1430 */ 1431 - #ifdef TODO 1432 - /* 1433 - * This was the old code for libertas+wext. Someone that 1434 - * understands this beast should re-code it in a sane way. 1435 - * 1436 - * I actually don't understand why this is related to WPA 1437 - * and to connection status, shouldn't powering should be 1438 - * independ of such things? 1439 - */ 1440 1431 if ((priv->psmode != LBS802_11POWERMODECAM) && 1441 1432 (priv->psstate == PS_STATE_FULL_POWER) && 1442 - ((priv->connect_status == LBS_CONNECTED) || 1443 - lbs_mesh_connected(priv))) { 1444 - if (priv->secinfo.WPAenabled || 1445 - priv->secinfo.WPA2enabled) { 1446 - /* check for valid WPA group keys */ 1447 - if (priv->wpa_mcast_key.len || 1448 - priv->wpa_unicast_key.len) { 1449 - lbs_deb_host( 1450 - "EXEC_NEXT_CMD: WPA enabled and GTK_SET" 1451 - " go back to PS_SLEEP"); 1452 - lbs_set_ps_mode(priv, 1453 - PS_MODE_ACTION_ENTER_PS, 1454 - false); 1455 - } 1456 - } else { 1457 - lbs_deb_host( 1458 - "EXEC_NEXT_CMD: cmdpendingq empty, " 1459 - "go back to PS_SLEEP"); 1460 - lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, 1461 - false); 1462 - } 1433 + (priv->connect_status == LBS_CONNECTED)) { 1434 + lbs_deb_host( 1435 + "EXEC_NEXT_CMD: cmdpendingq empty, go back to PS_SLEEP"); 1436 + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, 1437 + false); 1463 1438 } 1464 - #endif 1465 1439 } 1466 1440 1467 1441 ret = 0;
+8 -1
drivers/net/wireless/marvell/libertas/cmdresp.c
··· 123 123 priv->cmd_timed_out = 0; 124 124 125 125 if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { 126 - struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; 126 + /* struct cmd_ds_802_11_ps_mode also contains 127 + * the header 128 + */ 129 + struct cmd_ds_802_11_ps_mode *psmode = (void *)resp; 127 130 u16 action = le16_to_cpu(psmode->action); 128 131 129 132 lbs_deb_host( ··· 255 252 if (priv->psstate == PS_STATE_FULL_POWER) { 256 253 lbs_deb_cmd( 257 254 "EVENT: in FULL POWER mode, ignoring PS_SLEEP\n"); 255 + break; 256 + } 257 + if (!list_empty(&priv->cmdpendingq)) { 258 + lbs_deb_cmd("EVENT: commands in queue, do not sleep\n"); 258 259 break; 259 260 } 260 261 priv->psstate = PS_STATE_PRE_SLEEP;
+1
drivers/net/wireless/marvell/libertas/dev.h
··· 99 99 /* Hardware access */ 100 100 void *card; 101 101 bool iface_running; 102 + u8 is_polling; /* host has to poll the card irq */ 102 103 u8 fw_ready; 103 104 u8 surpriseremoved; 104 105 u8 setup_fw_on_resume;
+1 -1
drivers/net/wireless/marvell/libertas/if_sdio.c
··· 1267 1267 priv->reset_card = if_sdio_reset_card; 1268 1268 priv->power_save = if_sdio_power_save; 1269 1269 priv->power_restore = if_sdio_power_restore; 1270 - 1270 + priv->is_polling = !(func->card->host->caps & MMC_CAP_SDIO_IRQ); 1271 1271 ret = if_sdio_power_on(card); 1272 1272 if (ret) 1273 1273 goto err_activate_card;
+1
drivers/net/wireless/marvell/libertas/if_usb.c
··· 267 267 priv->enter_deep_sleep = NULL; 268 268 priv->exit_deep_sleep = NULL; 269 269 priv->reset_deep_sleep_wakeup = NULL; 270 + priv->is_polling = false; 270 271 #ifdef CONFIG_OLPC 271 272 if (machine_is_olpc()) 272 273 priv->reset_card = if_usb_reset_olpc_card;
+6 -1
drivers/net/wireless/marvell/libertas/main.c
··· 1060 1060 1061 1061 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { 1062 1062 priv->psmode = LBS802_11POWERMODECAM; 1063 - lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); 1063 + /* no need to wakeup if already woken up, 1064 + * on suspend, this exit ps command is not processed 1065 + * the driver hangs 1066 + */ 1067 + if (priv->psstate != PS_STATE_FULL_POWER) 1068 + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); 1064 1069 } 1065 1070 1066 1071 if (priv->is_deep_sleep) {
+10
drivers/net/wireless/marvell/mwifiex/README
··· 237 237 238 238 cat fw_dump 239 239 240 + verext 241 + This command is used to get extended firmware version string using 242 + different configuration parameters. 243 + 244 + Usage: 245 + echo "[version_str_sel]" > verext 246 + cat verext 247 + 248 + [version_str_sel]: firmware support several extend version 249 + string cases, include 0/1/10/20/21/99 240 250 ===============================================================================
+230 -18
drivers/net/wireless/marvell/mwifiex/cfg80211.c
··· 1962 1962 { 1963 1963 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 1964 1964 1965 + if (!mwifiex_stop_bg_scan(priv)) 1966 + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); 1967 + 1965 1968 if (mwifiex_deauthenticate(priv, NULL)) 1966 1969 return -EFAULT; 1967 1970 ··· 2220 2217 "info: Trying to associate to %s and bssid %pM\n", 2221 2218 (char *)sme->ssid, sme->bssid); 2222 2219 2220 + if (!mwifiex_stop_bg_scan(priv)) 2221 + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); 2222 + 2223 2223 ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, 2224 2224 priv->bss_mode, sme->channel, sme, 0); 2225 2225 if (!ret) { ··· 2426 2420 return -EBUSY; 2427 2421 } 2428 2422 2423 + if (!mwifiex_stop_bg_scan(priv)) 2424 + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); 2425 + 2429 2426 user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); 2430 2427 if (!user_scan_cfg) 2431 2428 return -ENOMEM; ··· 2493 2484 } 2494 2485 } 2495 2486 } 2487 + return 0; 2488 + } 2489 + 2490 + /* CFG802.11 operation handler for sched_scan_start. 2491 + * 2492 + * This function issues a bgscan config request to the firmware based upon 2493 + * the user specified sched_scan configuration. On successful completion, 2494 + * firmware will generate BGSCAN_REPORT event, driver should issue bgscan 2495 + * query command to get sched_scan results from firmware. 2496 + */ 2497 + static int 2498 + mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy, 2499 + struct net_device *dev, 2500 + struct cfg80211_sched_scan_request *request) 2501 + { 2502 + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2503 + int i, offset; 2504 + struct ieee80211_channel *chan; 2505 + struct mwifiex_bg_scan_cfg *bgscan_cfg; 2506 + struct ieee_types_header *ie; 2507 + 2508 + if (!request || (!request->n_ssids && !request->n_match_sets)) { 2509 + wiphy_err(wiphy, "%s : Invalid Sched_scan parameters", 2510 + __func__); 2511 + return -EINVAL; 2512 + } 2513 + 2514 + wiphy_info(wiphy, "sched_scan start : n_ssids=%d n_match_sets=%d ", 2515 + request->n_ssids, request->n_match_sets); 2516 + wiphy_info(wiphy, "n_channels=%d interval=%d ie_len=%d\n", 2517 + request->n_channels, request->scan_plans->interval, 2518 + (int)request->ie_len); 2519 + 2520 + bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL); 2521 + if (!bgscan_cfg) 2522 + return -ENOMEM; 2523 + 2524 + if (priv->scan_request || priv->scan_aborting) 2525 + bgscan_cfg->start_later = true; 2526 + 2527 + bgscan_cfg->num_ssids = request->n_match_sets; 2528 + bgscan_cfg->ssid_list = request->match_sets; 2529 + 2530 + if (request->ie && request->ie_len) { 2531 + offset = 0; 2532 + for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { 2533 + if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR) 2534 + continue; 2535 + priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_BGSCAN; 2536 + ie = (struct ieee_types_header *)(request->ie + offset); 2537 + memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len); 2538 + offset += sizeof(*ie) + ie->len; 2539 + 2540 + if (offset >= request->ie_len) 2541 + break; 2542 + } 2543 + } 2544 + 2545 + for (i = 0; i < min_t(u32, request->n_channels, 2546 + MWIFIEX_BG_SCAN_CHAN_MAX); i++) { 2547 + chan = request->channels[i]; 2548 + bgscan_cfg->chan_list[i].chan_number = chan->hw_value; 2549 + bgscan_cfg->chan_list[i].radio_type = chan->band; 2550 + 2551 + if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids) 2552 + bgscan_cfg->chan_list[i].scan_type = 2553 + MWIFIEX_SCAN_TYPE_PASSIVE; 2554 + else 2555 + bgscan_cfg->chan_list[i].scan_type = 2556 + MWIFIEX_SCAN_TYPE_ACTIVE; 2557 + 2558 + bgscan_cfg->chan_list[i].scan_time = 0; 2559 + } 2560 + 2561 + bgscan_cfg->chan_per_scan = min_t(u32, request->n_channels, 2562 + MWIFIEX_BG_SCAN_CHAN_MAX); 2563 + 2564 + /* Use at least 15 second for per scan cycle */ 2565 + bgscan_cfg->scan_interval = (request->scan_plans->interval > 2566 + MWIFIEX_BGSCAN_INTERVAL) ? 2567 + request->scan_plans->interval : 2568 + MWIFIEX_BGSCAN_INTERVAL; 2569 + 2570 + bgscan_cfg->repeat_count = MWIFIEX_BGSCAN_REPEAT_COUNT; 2571 + bgscan_cfg->report_condition = MWIFIEX_BGSCAN_SSID_MATCH | 2572 + MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE; 2573 + bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; 2574 + bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; 2575 + bgscan_cfg->enable = true; 2576 + if (request->min_rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) { 2577 + bgscan_cfg->report_condition |= MWIFIEX_BGSCAN_SSID_RSSI_MATCH; 2578 + bgscan_cfg->rssi_threshold = request->min_rssi_thold; 2579 + } 2580 + 2581 + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, 2582 + HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { 2583 + kfree(bgscan_cfg); 2584 + return -EFAULT; 2585 + } 2586 + 2587 + priv->sched_scanning = true; 2588 + 2589 + kfree(bgscan_cfg); 2590 + return 0; 2591 + } 2592 + 2593 + /* CFG802.11 operation handler for sched_scan_stop. 2594 + * 2595 + * This function issues a bgscan config command to disable 2596 + * previous bgscan configuration in the firmware 2597 + */ 2598 + static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy, 2599 + struct net_device *dev) 2600 + { 2601 + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 2602 + 2603 + wiphy_info(wiphy, "sched scan stop!"); 2604 + mwifiex_stop_bg_scan(priv); 2605 + 2496 2606 return 0; 2497 2607 } 2498 2608 ··· 2976 2848 mwifiex_dev_debugfs_remove(priv); 2977 2849 #endif 2978 2850 2851 + if (priv->sched_scanning) 2852 + priv->sched_scanning = false; 2853 + 2979 2854 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 2980 2855 2981 2856 skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) ··· 3175 3044 sizeof(byte_seq)); 3176 3045 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 3177 3046 3178 - if (first_pat) 3047 + if (first_pat) { 3179 3048 first_pat = false; 3180 - else 3049 + mwifiex_dbg(priv->adapter, INFO, "Wake on patterns\n"); 3050 + } else { 3181 3051 mef_entry->filter[filt_num].filt_action = TYPE_AND; 3052 + } 3182 3053 3183 3054 filt_num++; 3184 3055 } ··· 3206 3073 mef_entry->filter[filt_num].offset = 56; 3207 3074 mef_entry->filter[filt_num].filt_type = TYPE_EQ; 3208 3075 mef_entry->filter[filt_num].filt_action = TYPE_OR; 3076 + mwifiex_dbg(priv->adapter, INFO, "Wake on magic packet\n"); 3209 3077 } 3210 3078 return ret; 3211 3079 } ··· 3277 3143 3278 3144 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 3279 3145 3280 - if (!priv->media_connected) { 3146 + if (!priv->media_connected && !wowlan->nd_config) { 3281 3147 mwifiex_dbg(adapter, ERROR, 3282 3148 "Can not configure WOWLAN in disconnected state\n"); 3283 3149 return 0; ··· 3289 3155 return ret; 3290 3156 } 3291 3157 3158 + memset(&hs_cfg, 0, sizeof(hs_cfg)); 3159 + hs_cfg.conditions = le32_to_cpu(adapter->hs_cfg.conditions); 3160 + 3161 + if (wowlan->nd_config) { 3162 + mwifiex_dbg(adapter, INFO, "Wake on net detect\n"); 3163 + hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; 3164 + mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev, 3165 + wowlan->nd_config); 3166 + } 3167 + 3292 3168 if (wowlan->disconnect) { 3293 - memset(&hs_cfg, 0, sizeof(hs_cfg)); 3294 - hs_cfg.is_invoke_hostcmd = false; 3295 - hs_cfg.conditions = HS_CFG_COND_MAC_EVENT; 3296 - hs_cfg.gpio = adapter->hs_cfg.gpio; 3297 - hs_cfg.gap = adapter->hs_cfg.gap; 3298 - ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, 3299 - MWIFIEX_SYNC_CMD, &hs_cfg); 3300 - if (ret) { 3301 - mwifiex_dbg(adapter, ERROR, 3302 - "Failed to set HS params\n"); 3303 - return ret; 3304 - } 3169 + hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; 3170 + mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n"); 3171 + } 3172 + 3173 + hs_cfg.is_invoke_hostcmd = false; 3174 + hs_cfg.gpio = adapter->hs_cfg.gpio; 3175 + hs_cfg.gap = adapter->hs_cfg.gap; 3176 + ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, 3177 + MWIFIEX_SYNC_CMD, &hs_cfg); 3178 + if (ret) { 3179 + mwifiex_dbg(adapter, ERROR, 3180 + "Failed to set HS params\n"); 3181 + return ret; 3305 3182 } 3306 3183 3307 3184 return ret; ··· 3320 3175 3321 3176 static int mwifiex_cfg80211_resume(struct wiphy *wiphy) 3322 3177 { 3178 + struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); 3179 + struct mwifiex_private *priv = 3180 + mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 3181 + struct mwifiex_ds_wakeup_reason wakeup_reason; 3182 + struct cfg80211_wowlan_wakeup wakeup_report; 3183 + int i; 3184 + 3185 + mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD, 3186 + &wakeup_reason); 3187 + memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup)); 3188 + 3189 + wakeup_report.pattern_idx = -1; 3190 + 3191 + switch (wakeup_reason.hs_wakeup_reason) { 3192 + case NO_HSWAKEUP_REASON: 3193 + break; 3194 + case BCAST_DATA_MATCHED: 3195 + break; 3196 + case MCAST_DATA_MATCHED: 3197 + break; 3198 + case UCAST_DATA_MATCHED: 3199 + break; 3200 + case MASKTABLE_EVENT_MATCHED: 3201 + break; 3202 + case NON_MASKABLE_EVENT_MATCHED: 3203 + if (wiphy->wowlan_config->disconnect) 3204 + wakeup_report.disconnect = true; 3205 + if (wiphy->wowlan_config->nd_config) 3206 + wakeup_report.net_detect = adapter->nd_info; 3207 + break; 3208 + case NON_MASKABLE_CONDITION_MATCHED: 3209 + break; 3210 + case MAGIC_PATTERN_MATCHED: 3211 + if (wiphy->wowlan_config->magic_pkt) 3212 + wakeup_report.magic_pkt = true; 3213 + if (wiphy->wowlan_config->n_patterns) 3214 + wakeup_report.pattern_idx = 1; 3215 + break; 3216 + case CONTROL_FRAME_MATCHED: 3217 + break; 3218 + case MANAGEMENT_FRAME_MATCHED: 3219 + break; 3220 + default: 3221 + break; 3222 + } 3223 + 3224 + if ((wakeup_reason.hs_wakeup_reason > 0) && 3225 + (wakeup_reason.hs_wakeup_reason <= 7)) 3226 + cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report, 3227 + GFP_KERNEL); 3228 + 3229 + if (adapter->nd_info) { 3230 + for (i = 0 ; i < adapter->nd_info->n_matches ; i++) 3231 + kfree(adapter->nd_info->matches[i]); 3232 + kfree(adapter->nd_info); 3233 + adapter->nd_info = NULL; 3234 + } 3235 + 3323 3236 return 0; 3324 3237 } 3325 3238 ··· 3793 3590 freq = ieee80211_channel_to_frequency(curr_bss->channel, band); 3794 3591 chan = ieee80211_get_channel(wiphy, freq); 3795 3592 3796 - if (curr_bss->bcn_ht_oper) { 3797 - second_chan_offset = curr_bss->bcn_ht_oper->ht_param & 3593 + if (priv->ht_param_present) { 3594 + second_chan_offset = priv->assoc_resp_ht_param & 3798 3595 IEEE80211_HT_PARAM_CHA_SEC_OFFSET; 3799 3596 chan_type = mwifiex_sec_chan_offset_to_chan_type 3800 3597 (second_chan_offset); ··· 3904 3701 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, 3905 3702 .set_antenna = mwifiex_cfg80211_set_antenna, 3906 3703 .del_station = mwifiex_cfg80211_del_station, 3704 + .sched_scan_start = mwifiex_cfg80211_sched_scan_start, 3705 + .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop, 3907 3706 #ifdef CONFIG_PM 3908 3707 .suspend = mwifiex_cfg80211_suspend, 3909 3708 .resume = mwifiex_cfg80211_resume, ··· 3925 3720 3926 3721 #ifdef CONFIG_PM 3927 3722 static const struct wiphy_wowlan_support mwifiex_wowlan_support = { 3928 - .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, 3723 + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | 3724 + WIPHY_WOWLAN_NET_DETECT, 3929 3725 .n_patterns = MWIFIEX_MEF_MAX_FILTERS, 3930 3726 .pattern_min_len = 1, 3931 3727 .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, 3932 3728 .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, 3729 + .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, 3933 3730 }; 3934 3731 #endif 3935 3732 ··· 4036 3829 wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | 4037 3830 WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | 4038 3831 WIPHY_FLAG_AP_UAPSD | 3832 + WIPHY_FLAG_SUPPORTS_SCHED_SCAN | 4039 3833 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | 4040 3834 WIPHY_FLAG_HAS_CHANNEL_SWITCH | 4041 3835 WIPHY_FLAG_PS_ON_BY_DEFAULT; ··· 4054 3846 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 4055 3847 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 4056 3848 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 3849 + 3850 + wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; 3851 + wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; 3852 + wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH; 4057 3853 4058 3854 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; 4059 3855 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
+13
drivers/net/wireless/marvell/mwifiex/cmdevt.c
··· 1657 1657 1658 1658 return 0; 1659 1659 } 1660 + 1661 + /* This function handles the command response of hs wakeup reason 1662 + * command. 1663 + */ 1664 + int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, 1665 + struct host_cmd_ds_command *resp, 1666 + struct host_cmd_ds_wakeup_reason *wakeup_reason) 1667 + { 1668 + wakeup_reason->wakeup_reason = 1669 + resp->params.hs_wakeup_reason.wakeup_reason; 1670 + 1671 + return 0; 1672 + }
+49 -2
drivers/net/wireless/marvell/mwifiex/debugfs.c
··· 95 95 96 96 mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1); 97 97 98 - if (!priv->version_str[0]) 99 - mwifiex_get_ver_ext(priv); 98 + mwifiex_get_ver_ext(priv, 0); 100 99 101 100 p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); 102 101 p += sprintf(p, "driver_version = %s", fmt); ··· 582 583 return ret; 583 584 } 584 585 586 + /* debugfs verext file write handler. 587 + * This function is called when the 'verext' file is opened for write 588 + */ 589 + static ssize_t 590 + mwifiex_verext_write(struct file *file, const char __user *ubuf, 591 + size_t count, loff_t *ppos) 592 + { 593 + int ret; 594 + u32 versionstrsel; 595 + struct mwifiex_private *priv = (void *)file->private_data; 596 + char buf[16]; 597 + 598 + memset(buf, 0, sizeof(buf)); 599 + 600 + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) 601 + return -EFAULT; 602 + 603 + ret = kstrtou32(buf, 10, &versionstrsel); 604 + if (ret) 605 + return ret; 606 + 607 + priv->versionstrsel = versionstrsel; 608 + 609 + return count; 610 + } 611 + 612 + /* Proc verext file read handler. 613 + * This function is called when the 'verext' file is opened for reading 614 + * This function can be used read driver exteneed verion string. 615 + */ 616 + static ssize_t 617 + mwifiex_verext_read(struct file *file, char __user *ubuf, 618 + size_t count, loff_t *ppos) 619 + { 620 + struct mwifiex_private *priv = 621 + (struct mwifiex_private *)file->private_data; 622 + char buf[256]; 623 + int ret; 624 + 625 + mwifiex_get_ver_ext(priv, priv->versionstrsel); 626 + ret = snprintf(buf, sizeof(buf), "version string: %s\n", 627 + priv->version_str); 628 + 629 + return simple_read_from_buffer(ubuf, count, ppos, buf, ret); 630 + } 631 + 585 632 /* Proc memrw file write handler. 586 633 * This function is called when the 'memrw' file is opened for writing 587 634 * This function can be used to write to a memory location. ··· 985 940 MWIFIEX_DFS_FILE_OPS(debug_mask); 986 941 MWIFIEX_DFS_FILE_OPS(timeshare_coex); 987 942 MWIFIEX_DFS_FILE_WRITE_OPS(reset); 943 + MWIFIEX_DFS_FILE_OPS(verext); 988 944 989 945 /* 990 946 * This function creates the debug FS directory structure and the files. ··· 1014 968 MWIFIEX_DFS_ADD_FILE(debug_mask); 1015 969 MWIFIEX_DFS_ADD_FILE(timeshare_coex); 1016 970 MWIFIEX_DFS_ADD_FILE(reset); 971 + MWIFIEX_DFS_ADD_FILE(verext); 1017 972 } 1018 973 1019 974 /*
+24
drivers/net/wireless/marvell/mwifiex/decl.h
··· 122 122 #define BLOCK_NUMBER_OFFSET 15 123 123 #define SDIO_HEADER_OFFSET 28 124 124 125 + #define MWIFIEX_SIZE_4K 0x4000 126 + 125 127 enum mwifiex_bss_type { 126 128 MWIFIEX_BSS_TYPE_STA = 0, 127 129 MWIFIEX_BSS_TYPE_UAP = 1, ··· 272 270 bool is_11h_enabled; 273 271 bool is_11h_active; 274 272 } __packed; 273 + 274 + #define MWIFIEX_FW_DUMP_IDX 0xff 275 + #define MWIFIEX_FW_DUMP_MAX_MEMSIZE 0x160000 276 + #define MWIFIEX_DRV_INFO_IDX 20 277 + #define FW_DUMP_MAX_NAME_LEN 8 278 + #define FW_DUMP_HOST_READY 0xEE 279 + #define FW_DUMP_DONE 0xFF 280 + #define FW_DUMP_READ_DONE 0xFE 281 + 282 + struct memory_type_mapping { 283 + u8 mem_name[FW_DUMP_MAX_NAME_LEN]; 284 + u8 *mem_ptr; 285 + u32 mem_size; 286 + u8 done_flag; 287 + }; 288 + 289 + enum rdwr_status { 290 + RDWR_STATUS_SUCCESS = 0, 291 + RDWR_STATUS_FAILURE = 1, 292 + RDWR_STATUS_DONE = 2 293 + }; 294 + 275 295 #endif /* !_MWIFIEX_DECL_H_ */
+88 -2
drivers/net/wireless/marvell/mwifiex/fw.h
··· 96 96 #define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2) 97 97 98 98 #define MAX_POLL_TRIES 100 99 - #define MAX_FIRMWARE_POLL_TRIES 100 99 + #define MAX_FIRMWARE_POLL_TRIES 150 100 100 101 101 #define FIRMWARE_READY_SDIO 0xfedc 102 102 #define FIRMWARE_READY_PCIE 0xfedcba00 ··· 144 144 #define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) 145 145 #define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) 146 146 #define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) 147 + #define TLV_TYPE_BGSCAN_START_LATER (PROPRIETARY_TLV_BASE_ID + 30) 147 148 #define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) 148 149 #define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32) 149 150 #define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35) ··· 178 177 #define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148) 179 178 #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) 180 179 #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) 180 + #define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176) 181 181 #define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) 182 182 #define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184) 183 183 #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) ··· 333 331 #define HostCmd_CMD_802_11_MAC_ADDRESS 0x004D 334 332 #define HostCmd_CMD_802_11D_DOMAIN_INFO 0x005b 335 333 #define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e 334 + #define HostCmd_CMD_802_11_BG_SCAN_CONFIG 0x006b 336 335 #define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c 337 336 #define HostCmd_CMD_WMM_GET_STATUS 0x0071 338 337 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 ··· 373 370 #define HostCmd_CMD_MGMT_FRAME_REG 0x010c 374 371 #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d 375 372 #define HostCmd_CMD_11AC_CFG 0x0112 373 + #define HostCmd_CMD_HS_WAKEUP_REASON 0x0116 376 374 #define HostCmd_CMD_TDLS_CONFIG 0x0100 377 375 #define HostCmd_CMD_MC_POLICY 0x0121 378 376 #define HostCmd_CMD_TDLS_OPER 0x0122 ··· 527 523 #define EVENT_CHANNEL_REPORT_RDY 0x00000054 528 524 #define EVENT_TX_DATA_PAUSE 0x00000055 529 525 #define EVENT_EXT_SCAN_REPORT 0x00000058 526 + #define EVENT_BG_SCAN_STOPPED 0x00000065 530 527 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f 531 528 #define EVENT_MULTI_CHAN_INFO 0x0000006a 532 529 #define EVENT_TX_STATUS_REPORT 0x00000074 ··· 544 539 545 540 #define MWIFIEX_MAX_PATTERN_LEN 40 546 541 #define MWIFIEX_MAX_OFFSET_LEN 100 542 + #define MWIFIEX_MAX_ND_MATCH_SETS 10 543 + 547 544 #define STACK_NBYTES 100 548 545 #define TYPE_DNUM 1 549 546 #define TYPE_BYTESEQ 2 ··· 607 600 #define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10 608 601 #define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01 609 602 #define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20 603 + 604 + enum HS_WAKEUP_REASON { 605 + NO_HSWAKEUP_REASON = 0, 606 + BCAST_DATA_MATCHED, 607 + MCAST_DATA_MATCHED, 608 + UCAST_DATA_MATCHED, 609 + MASKTABLE_EVENT_MATCHED, 610 + NON_MASKABLE_EVENT_MATCHED, 611 + NON_MASKABLE_CONDITION_MATCHED, 612 + MAGIC_PATTERN_MATCHED, 613 + CONTROL_FRAME_MATCHED, 614 + MANAGEMENT_FRAME_MATCHED, 615 + RESERVED 616 + }; 610 617 611 618 struct txpd { 612 619 u8 bss_type; ··· 752 731 struct mwifiex_ie_types_num_probes { 753 732 struct mwifiex_ie_types_header header; 754 733 __le16 num_probes; 734 + } __packed; 735 + 736 + struct mwifiex_ie_types_repeat_count { 737 + struct mwifiex_ie_types_header header; 738 + __le16 repeat_count; 739 + } __packed; 740 + 741 + struct mwifiex_ie_types_min_rssi_threshold { 742 + struct mwifiex_ie_types_header header; 743 + __le16 rssi_threshold; 744 + } __packed; 745 + 746 + struct mwifiex_ie_types_bgscan_start_later { 747 + struct mwifiex_ie_types_header header; 748 + __le16 start_later; 755 749 } __packed; 756 750 757 751 struct mwifiex_ie_types_scan_chan_gap { ··· 1063 1027 __le16 cap_info_bitmap; 1064 1028 __le16 status_code; 1065 1029 __le16 a_id; 1066 - u8 ie_buffer[1]; 1030 + u8 ie_buffer[0]; 1067 1031 } __packed; 1068 1032 1069 1033 struct host_cmd_ds_802_11_associate_rsp { ··· 1461 1425 u16 scan_chan_gap; 1462 1426 } __packed; 1463 1427 1428 + #define MWIFIEX_BG_SCAN_CHAN_MAX 38 1429 + #define MWIFIEX_BSS_MODE_INFRA 1 1430 + #define MWIFIEX_BGSCAN_ACT_GET 0x0000 1431 + #define MWIFIEX_BGSCAN_ACT_SET 0x0001 1432 + #define MWIFIEX_BGSCAN_ACT_SET_ALL 0xff01 1433 + /** ssid match */ 1434 + #define MWIFIEX_BGSCAN_SSID_MATCH 0x0001 1435 + /** ssid match and RSSI exceeded */ 1436 + #define MWIFIEX_BGSCAN_SSID_RSSI_MATCH 0x0004 1437 + /**wait for all channel scan to complete to report scan result*/ 1438 + #define MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE 0x80000000 1439 + 1440 + struct mwifiex_bg_scan_cfg { 1441 + u16 action; 1442 + u8 enable; 1443 + u8 bss_type; 1444 + u8 chan_per_scan; 1445 + u32 scan_interval; 1446 + u32 report_condition; 1447 + u8 num_probes; 1448 + u8 rssi_threshold; 1449 + u8 snr_threshold; 1450 + u16 repeat_count; 1451 + u16 start_later; 1452 + struct cfg80211_match_set *ssid_list; 1453 + u8 num_ssids; 1454 + struct mwifiex_user_scan_chan chan_list[MWIFIEX_BG_SCAN_CHAN_MAX]; 1455 + u16 scan_chan_gap; 1456 + } __packed; 1457 + 1464 1458 struct ie_body { 1465 1459 u8 grp_key_oui[4]; 1466 1460 u8 ptk_cnt[2]; ··· 1534 1468 u8 channel; 1535 1469 u8 reserved; 1536 1470 __le64 tsf; 1471 + } __packed; 1472 + 1473 + struct host_cmd_ds_802_11_bg_scan_config { 1474 + __le16 action; 1475 + u8 enable; 1476 + u8 bss_type; 1477 + u8 chan_per_scan; 1478 + u8 reserved; 1479 + __le16 reserved1; 1480 + __le32 scan_interval; 1481 + __le32 reserved2; 1482 + __le32 report_condition; 1483 + __le16 reserved3; 1484 + u8 tlv[0]; 1537 1485 } __packed; 1538 1486 1539 1487 struct host_cmd_ds_802_11_bg_scan_query { ··· 2179 2099 __le16 reserved; 2180 2100 } __packed; 2181 2101 2102 + struct host_cmd_ds_wakeup_reason { 2103 + u16 wakeup_reason; 2104 + } __packed; 2105 + 2182 2106 struct host_cmd_ds_command { 2183 2107 __le16 command; 2184 2108 __le16 size; ··· 2208 2124 struct host_cmd_ds_802_11_scan scan; 2209 2125 struct host_cmd_ds_802_11_scan_ext ext_scan; 2210 2126 struct host_cmd_ds_802_11_scan_rsp scan_resp; 2127 + struct host_cmd_ds_802_11_bg_scan_config bg_scan_config; 2211 2128 struct host_cmd_ds_802_11_bg_scan_query bg_scan_query; 2212 2129 struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp; 2213 2130 struct host_cmd_ds_802_11_associate associate; ··· 2255 2170 struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; 2256 2171 struct host_cmd_ds_multi_chan_policy mc_policy; 2257 2172 struct host_cmd_ds_robust_coex coex; 2173 + struct host_cmd_ds_wakeup_reason hs_wakeup_reason; 2258 2174 } params; 2259 2175 } __packed; 2260 2176
+12 -4
drivers/net/wireless/marvell/mwifiex/init.c
··· 741 741 u32 poll_num = 1; 742 742 743 743 if (adapter->if_ops.check_fw_status) { 744 - adapter->winner = 0; 745 - 746 744 /* check if firmware is already running */ 747 745 ret = adapter->if_ops.check_fw_status(adapter, poll_num); 748 746 if (!ret) { ··· 748 750 "WLAN FW already running! Skip FW dnld\n"); 749 751 return 0; 750 752 } 753 + } 754 + 755 + /* check if we are the winner for downloading FW */ 756 + if (adapter->if_ops.check_winner_status) { 757 + adapter->winner = 0; 758 + ret = adapter->if_ops.check_winner_status(adapter); 751 759 752 760 poll_num = MAX_FIRMWARE_POLL_TRIES; 761 + if (ret) { 762 + mwifiex_dbg(adapter, MSG, 763 + "WLAN read winner status failed!\n"); 764 + return ret; 765 + } 753 766 754 - /* check if we are the winner for downloading FW */ 755 767 if (!adapter->winner) { 756 768 mwifiex_dbg(adapter, MSG, 757 - "FW already running! Skip FW dnld\n"); 769 + "WLAN is not the winner! Skip FW dnld\n"); 758 770 goto poll_fw; 759 771 } 760 772 }
+5
drivers/net/wireless/marvell/mwifiex/ioctl.h
··· 271 271 u32 gap; 272 272 }; 273 273 274 + struct mwifiex_ds_wakeup_reason { 275 + u16 hs_wakeup_reason; 276 + }; 277 + 274 278 #define DEEP_SLEEP_ON 1 275 279 #define DEEP_SLEEP_OFF 0 276 280 #define DEEP_SLEEP_IDLE_TIME 100 ··· 418 414 #define MWIFIEX_VSIE_MASK_SCAN 0x01 419 415 #define MWIFIEX_VSIE_MASK_ASSOC 0x02 420 416 #define MWIFIEX_VSIE_MASK_ADHOC 0x04 417 + #define MWIFIEX_VSIE_MASK_BGSCAN 0x08 421 418 422 419 enum { 423 420 MWIFIEX_FUNC_INIT = 1,
+15
drivers/net/wireless/marvell/mwifiex/join.c
··· 644 644 struct mwifiex_bssdescriptor *bss_desc; 645 645 bool enable_data = true; 646 646 u16 cap_info, status_code, aid; 647 + const u8 *ie_ptr; 648 + struct ieee80211_ht_operation *assoc_resp_ht_oper; 647 649 648 650 assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; 649 651 ··· 734 732 priv->curr_bss_params.wmm_uapsd_enabled 735 733 = ((bss_desc->wmm_ie.qos_info_bitmap & 736 734 IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0); 735 + 736 + /* Store the bandwidth information from assoc response */ 737 + ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer, 738 + priv->assoc_rsp_size 739 + - sizeof(struct ieee_types_assoc_rsp)); 740 + if (ie_ptr) { 741 + assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr 742 + + sizeof(struct ieee_types_header)); 743 + priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param; 744 + priv->ht_param_present = true; 745 + } else { 746 + priv->ht_param_present = false; 747 + } 737 748 738 749 mwifiex_dbg(priv->adapter, INFO, 739 750 "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
+14
drivers/net/wireless/marvell/mwifiex/main.c
··· 132 132 } 133 133 } 134 134 135 + if (adapter->nd_info) { 136 + for (i = 0 ; i < adapter->nd_info->n_matches ; i++) 137 + kfree(adapter->nd_info->matches[i]); 138 + kfree(adapter->nd_info); 139 + adapter->nd_info = NULL; 140 + } 141 + 135 142 vfree(adapter->chan_stats); 136 143 kfree(adapter); 137 144 return 0; ··· 752 745 } 753 746 754 747 mwifiex_queue_main_work(priv->adapter); 748 + 749 + if (priv->sched_scanning) { 750 + mwifiex_dbg(priv->adapter, INFO, 751 + "aborting bgscan on ndo_stop\n"); 752 + mwifiex_stop_bg_scan(priv); 753 + cfg80211_sched_scan_stopped(priv->wdev.wiphy); 754 + } 755 755 756 756 return 0; 757 757 }
+23 -21
drivers/net/wireless/marvell/mwifiex/main.h
··· 198 198 buf, len, false); \ 199 199 } while (0) 200 200 201 + /** Min BGSCAN interval 15 second */ 202 + #define MWIFIEX_BGSCAN_INTERVAL 15000 203 + /** default repeat count */ 204 + #define MWIFIEX_BGSCAN_REPEAT_COUNT 6 205 + 201 206 struct mwifiex_dbg { 202 207 u32 num_cmd_host_to_card_failure; 203 208 u32 num_cmd_sleep_cfm_host_to_card_failure; ··· 298 293 #define WMM_HIGHEST_PRIORITY 7 299 294 #define HIGH_PRIO_TID 7 300 295 #define LOW_PRIO_TID 0 296 + #define MWIFIEX_WMM_DRV_DELAY_MAX 510 301 297 302 298 struct mwifiex_wmm_desc { 303 299 struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; ··· 489 483 struct ieee80211_channel chan; 490 484 }; 491 485 492 - #define MWIFIEX_FW_DUMP_IDX 0xff 493 - #define MWIFIEX_DRV_INFO_IDX 20 494 - #define FW_DUMP_MAX_NAME_LEN 8 495 - #define FW_DUMP_HOST_READY 0xEE 496 - #define FW_DUMP_DONE 0xFF 497 - #define FW_DUMP_READ_DONE 0xFE 498 - 499 - struct memory_type_mapping { 500 - u8 mem_name[FW_DUMP_MAX_NAME_LEN]; 501 - u8 *mem_ptr; 502 - u32 mem_size; 503 - u8 done_flag; 504 - }; 505 - 506 - enum rdwr_status { 507 - RDWR_STATUS_SUCCESS = 0, 508 - RDWR_STATUS_FAILURE = 1, 509 - RDWR_STATUS_DONE = 2 510 - }; 511 - 512 486 enum mwifiex_iface_work_flags { 513 487 MWIFIEX_IFACE_WORK_DEVICE_DUMP, 514 488 MWIFIEX_IFACE_WORK_CARD_RESET, ··· 602 616 spinlock_t curr_bcn_buf_lock; 603 617 struct wireless_dev wdev; 604 618 struct mwifiex_chan_freq_power cfp; 619 + u32 versionstrsel; 605 620 char version_str[128]; 606 621 #ifdef CONFIG_DEBUG_FS 607 622 struct dentry *dfs_dev_dir; ··· 627 640 u32 mgmt_frame_mask; 628 641 struct mwifiex_roc_cfg roc_cfg; 629 642 bool scan_aborting; 643 + u8 sched_scanning; 630 644 u8 csa_chan; 631 645 unsigned long csa_expire_time; 632 646 u8 del_list_idx; ··· 655 667 struct mwifiex_ds_mem_rw mem_rw; 656 668 struct sk_buff_head bypass_txq; 657 669 struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX]; 670 + u8 assoc_resp_ht_param; 671 + bool ht_param_present; 658 672 }; 659 673 660 674 ··· 781 791 int (*init_if) (struct mwifiex_adapter *); 782 792 void (*cleanup_if) (struct mwifiex_adapter *); 783 793 int (*check_fw_status) (struct mwifiex_adapter *, u32); 794 + int (*check_winner_status)(struct mwifiex_adapter *); 784 795 int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); 785 796 int (*register_dev) (struct mwifiex_adapter *); 786 797 void (*unregister_dev) (struct mwifiex_adapter *); ··· 985 994 u8 active_scan_triggered; 986 995 bool usb_mc_status; 987 996 bool usb_mc_setup; 997 + struct cfg80211_wowlan_nd_info *nd_info; 988 998 }; 989 999 990 1000 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); ··· 1188 1196 struct host_cmd_ds_command *resp); 1189 1197 int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, 1190 1198 void *buf); 1199 + int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, 1200 + struct host_cmd_ds_command *cmd, 1201 + void *data_buf); 1202 + int mwifiex_stop_bg_scan(struct mwifiex_private *priv); 1191 1203 1192 1204 /* 1193 1205 * This function checks if the queuing is RA based or not. ··· 1413 1417 1414 1418 int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len); 1415 1419 1416 - int mwifiex_get_ver_ext(struct mwifiex_private *priv); 1420 + int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel); 1417 1421 1418 1422 int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, 1419 1423 struct ieee80211_channel *chan, ··· 1582 1586 void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); 1583 1587 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); 1584 1588 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); 1589 + int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, 1590 + int cmd_type, 1591 + struct mwifiex_ds_wakeup_reason *wakeup_reason); 1592 + int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, 1593 + struct host_cmd_ds_command *resp, 1594 + struct host_cmd_ds_wakeup_reason *wakeup_reason); 1585 1595 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter); 1586 1596 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid); 1587 1597 int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
+240 -81
drivers/net/wireless/marvell/mwifiex/pcie.c
··· 37 37 38 38 static struct semaphore add_remove_card_sem; 39 39 40 - static struct memory_type_mapping mem_type_mapping_tbl[] = { 41 - {"ITCM", NULL, 0, 0xF0}, 42 - {"DTCM", NULL, 0, 0xF1}, 43 - {"SQRAM", NULL, 0, 0xF2}, 44 - {"IRAM", NULL, 0, 0xF3}, 45 - {"APU", NULL, 0, 0xF4}, 46 - {"CIU", NULL, 0, 0xF5}, 47 - {"ICU", NULL, 0, 0xF6}, 48 - {"MAC", NULL, 0, 0xF7}, 49 - }; 50 - 51 40 static int 52 41 mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, 53 42 size_t size, int flags) ··· 195 206 card->pcie.blksz_fw_dl = data->blksz_fw_dl; 196 207 card->pcie.tx_buf_size = data->tx_buf_size; 197 208 card->pcie.can_dump_fw = data->can_dump_fw; 209 + card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl; 210 + card->pcie.num_mem_types = data->num_mem_types; 198 211 card->pcie.can_ext_scan = data->can_ext_scan; 199 212 } 200 213 ··· 314 323 struct pcie_service_card *card = adapter->card; 315 324 316 325 *data = ioread32(card->pci_mmap1 + reg); 326 + if (*data == 0xffffffff) 327 + return 0xffffffff; 317 328 318 329 return 0; 319 330 } ··· 2000 2007 2001 2008 /* 2002 2009 * This function checks the firmware status in card. 2003 - * 2004 - * The winner interface is also determined by this function. 2005 2010 */ 2006 2011 static int 2007 2012 mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) 2008 2013 { 2009 2014 int ret = 0; 2010 - u32 firmware_stat, winner_status; 2015 + u32 firmware_stat; 2011 2016 struct pcie_service_card *card = adapter->card; 2012 2017 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; 2013 2018 u32 tries; ··· 2045 2054 } 2046 2055 } 2047 2056 2048 - if (ret) { 2049 - if (mwifiex_read_reg(adapter, reg->fw_status, 2050 - &winner_status)) 2051 - ret = -1; 2052 - else if (!winner_status) { 2053 - mwifiex_dbg(adapter, INFO, 2054 - "PCI-E is the winner\n"); 2055 - adapter->winner = 1; 2056 - } else { 2057 - mwifiex_dbg(adapter, ERROR, 2058 - "PCI-E is not the winner <%#x,%d>, exit dnld\n", 2059 - ret, adapter->winner); 2060 - } 2057 + return ret; 2058 + } 2059 + 2060 + /* This function checks if WLAN is the winner. 2061 + */ 2062 + static int 2063 + mwifiex_check_winner_status(struct mwifiex_adapter *adapter) 2064 + { 2065 + u32 winner = 0; 2066 + int ret = 0; 2067 + struct pcie_service_card *card = adapter->card; 2068 + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; 2069 + 2070 + if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) { 2071 + ret = -1; 2072 + } else if (!winner) { 2073 + mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n"); 2074 + adapter->winner = 1; 2075 + } else { 2076 + mwifiex_dbg(adapter, ERROR, 2077 + "PCI-E is not the winner <%#x,%d>, exit dnld\n", 2078 + ret, adapter->winner); 2061 2079 } 2062 2080 2063 2081 return ret; ··· 2075 2075 /* 2076 2076 * This function reads the interrupt status from card. 2077 2077 */ 2078 - static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) 2078 + static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, 2079 + int msg_id) 2079 2080 { 2080 2081 u32 pcie_ireg; 2081 2082 unsigned long flags; 2083 + struct pcie_service_card *card = adapter->card; 2082 2084 2083 2085 if (!mwifiex_pcie_ok_to_access_hw(adapter)) 2084 2086 return; 2085 2087 2086 - if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { 2087 - mwifiex_dbg(adapter, ERROR, "Read register failed\n"); 2088 - return; 2089 - } 2088 + if (card->msix_enable && msg_id >= 0) { 2089 + pcie_ireg = BIT(msg_id); 2090 + } else { 2091 + if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, 2092 + &pcie_ireg)) { 2093 + mwifiex_dbg(adapter, ERROR, "Read register failed\n"); 2094 + return; 2095 + } 2090 2096 2091 - if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { 2097 + if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg) 2098 + return; 2099 + 2092 2100 2093 2101 mwifiex_pcie_disable_host_int(adapter); 2094 2102 ··· 2107 2099 "Write register failed\n"); 2108 2100 return; 2109 2101 } 2110 - spin_lock_irqsave(&adapter->int_lock, flags); 2111 - adapter->int_status |= pcie_ireg; 2112 - spin_unlock_irqrestore(&adapter->int_lock, flags); 2113 - 2114 - if (!adapter->pps_uapsd_mode && 2115 - adapter->ps_state == PS_STATE_SLEEP && 2116 - mwifiex_pcie_ok_to_access_hw(adapter)) { 2117 - /* Potentially for PCIe we could get other 2118 - * interrupts like shared. Don't change power 2119 - * state until cookie is set */ 2120 - adapter->ps_state = PS_STATE_AWAKE; 2121 - adapter->pm_wakeup_fw_try = false; 2122 - del_timer(&adapter->wakeup_timer); 2123 - } 2124 2102 } 2103 + 2104 + if (!adapter->pps_uapsd_mode && 2105 + adapter->ps_state == PS_STATE_SLEEP && 2106 + mwifiex_pcie_ok_to_access_hw(adapter)) { 2107 + /* Potentially for PCIe we could get other 2108 + * interrupts like shared. Don't change power 2109 + * state until cookie is set 2110 + */ 2111 + adapter->ps_state = PS_STATE_AWAKE; 2112 + adapter->pm_wakeup_fw_try = false; 2113 + del_timer(&adapter->wakeup_timer); 2114 + } 2115 + 2116 + spin_lock_irqsave(&adapter->int_lock, flags); 2117 + adapter->int_status |= pcie_ireg; 2118 + spin_unlock_irqrestore(&adapter->int_lock, flags); 2119 + mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg); 2125 2120 } 2126 2121 2127 2122 /* ··· 2135 2124 */ 2136 2125 static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) 2137 2126 { 2138 - struct pci_dev *pdev = (struct pci_dev *)context; 2127 + struct mwifiex_msix_context *ctx = context; 2128 + struct pci_dev *pdev = ctx->dev; 2139 2129 struct pcie_service_card *card; 2140 2130 struct mwifiex_adapter *adapter; 2141 2131 ··· 2156 2144 if (adapter->surprise_removed) 2157 2145 goto exit; 2158 2146 2159 - mwifiex_interrupt_status(adapter); 2147 + if (card->msix_enable) 2148 + mwifiex_interrupt_status(adapter, ctx->msg_id); 2149 + else 2150 + mwifiex_interrupt_status(adapter, -1); 2151 + 2160 2152 mwifiex_queue_main_work(adapter); 2161 2153 2162 2154 exit: ··· 2180 2164 * In case of Rx packets received, the packets are uploaded from card to 2181 2165 * host and processed accordingly. 2182 2166 */ 2183 - static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) 2167 + static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) 2184 2168 { 2185 2169 int ret; 2186 2170 u32 pcie_ireg; ··· 2260 2244 return 0; 2261 2245 } 2262 2246 2247 + static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter) 2248 + { 2249 + int ret; 2250 + u32 pcie_ireg; 2251 + unsigned long flags; 2252 + 2253 + spin_lock_irqsave(&adapter->int_lock, flags); 2254 + /* Clear out unused interrupts */ 2255 + pcie_ireg = adapter->int_status; 2256 + adapter->int_status = 0; 2257 + spin_unlock_irqrestore(&adapter->int_lock, flags); 2258 + 2259 + if (pcie_ireg & HOST_INTR_DNLD_DONE) { 2260 + mwifiex_dbg(adapter, INTR, 2261 + "info: TX DNLD Done\n"); 2262 + ret = mwifiex_pcie_send_data_complete(adapter); 2263 + if (ret) 2264 + return ret; 2265 + } 2266 + if (pcie_ireg & HOST_INTR_UPLD_RDY) { 2267 + mwifiex_dbg(adapter, INTR, 2268 + "info: Rx DATA\n"); 2269 + ret = mwifiex_pcie_process_recv_data(adapter); 2270 + if (ret) 2271 + return ret; 2272 + } 2273 + if (pcie_ireg & HOST_INTR_EVENT_RDY) { 2274 + mwifiex_dbg(adapter, INTR, 2275 + "info: Rx EVENT\n"); 2276 + ret = mwifiex_pcie_process_event_ready(adapter); 2277 + if (ret) 2278 + return ret; 2279 + } 2280 + 2281 + if (pcie_ireg & HOST_INTR_CMD_DONE) { 2282 + if (adapter->cmd_sent) { 2283 + mwifiex_dbg(adapter, INTR, 2284 + "info: CMD sent Interrupt\n"); 2285 + adapter->cmd_sent = false; 2286 + } 2287 + /* Handle command response */ 2288 + ret = mwifiex_pcie_process_cmd_complete(adapter); 2289 + if (ret) 2290 + return ret; 2291 + } 2292 + 2293 + mwifiex_dbg(adapter, INTR, 2294 + "info: cmd_sent=%d data_sent=%d\n", 2295 + adapter->cmd_sent, adapter->data_sent); 2296 + 2297 + return 0; 2298 + } 2299 + 2300 + static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) 2301 + { 2302 + struct pcie_service_card *card = adapter->card; 2303 + 2304 + if (card->msix_enable) 2305 + return mwifiex_process_msix_int(adapter); 2306 + else 2307 + return mwifiex_process_pcie_int(adapter); 2308 + } 2309 + 2263 2310 /* 2264 2311 * This function downloads data from driver to card. 2265 2312 * ··· 2357 2278 { 2358 2279 int ret, tries; 2359 2280 u8 ctrl_data; 2281 + u32 fw_status; 2360 2282 struct pcie_service_card *card = adapter->card; 2361 2283 const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; 2362 2284 2363 - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); 2285 + if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) 2286 + return RDWR_STATUS_FAILURE; 2287 + 2288 + ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, 2289 + reg->fw_dump_host_ready); 2364 2290 if (ret) { 2365 2291 mwifiex_dbg(adapter, ERROR, 2366 2292 "PCIE write err\n"); ··· 2378 2294 return RDWR_STATUS_SUCCESS; 2379 2295 if (doneflag && ctrl_data == doneflag) 2380 2296 return RDWR_STATUS_DONE; 2381 - if (ctrl_data != FW_DUMP_HOST_READY) { 2297 + if (ctrl_data != reg->fw_dump_host_ready) { 2382 2298 mwifiex_dbg(adapter, WARN, 2383 2299 "The ctrl reg was changed, re-try again!\n"); 2384 2300 ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, 2385 - FW_DUMP_HOST_READY); 2301 + reg->fw_dump_host_ready); 2386 2302 if (ret) { 2387 2303 mwifiex_dbg(adapter, ERROR, 2388 2304 "PCIE write err\n"); ··· 2402 2318 struct pcie_service_card *card = adapter->card; 2403 2319 const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; 2404 2320 unsigned int reg, reg_start, reg_end; 2405 - u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0; 2321 + u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num; 2322 + u8 idx, i, read_reg, doneflag = 0; 2406 2323 enum rdwr_status stat; 2407 2324 u32 memory_size; 2408 2325 int ret; ··· 2411 2326 if (!card->pcie.can_dump_fw) 2412 2327 return; 2413 2328 2414 - for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { 2415 - struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; 2329 + for (idx = 0; idx < adapter->num_mem_types; idx++) { 2330 + struct memory_type_mapping *entry = 2331 + &adapter->mem_type_mapping_tbl[idx]; 2416 2332 2417 2333 if (entry->mem_ptr) { 2418 2334 vfree(entry->mem_ptr); ··· 2422 2336 entry->mem_size = 0; 2423 2337 } 2424 2338 2425 - mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n"); 2339 + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); 2426 2340 2427 2341 /* Read the number of the memories which will dump */ 2428 2342 stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); ··· 2430 2344 return; 2431 2345 2432 2346 reg = creg->fw_dump_start; 2433 - mwifiex_read_reg_byte(adapter, reg, &dump_num); 2347 + mwifiex_read_reg_byte(adapter, reg, &fw_dump_num); 2348 + 2349 + /* W8997 chipset firmware dump will be restore in single region*/ 2350 + if (fw_dump_num == 0) 2351 + dump_num = 1; 2352 + else 2353 + dump_num = fw_dump_num; 2434 2354 2435 2355 /* Read the length of every memory which will dump */ 2436 2356 for (idx = 0; idx < dump_num; idx++) { 2437 - struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; 2438 - 2439 - stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); 2440 - if (stat == RDWR_STATUS_FAILURE) 2441 - return; 2442 - 2357 + struct memory_type_mapping *entry = 2358 + &adapter->mem_type_mapping_tbl[idx]; 2443 2359 memory_size = 0; 2444 - reg = creg->fw_dump_start; 2445 - for (i = 0; i < 4; i++) { 2446 - mwifiex_read_reg_byte(adapter, reg, &read_reg); 2447 - memory_size |= (read_reg << (i * 8)); 2360 + if (fw_dump_num != 0) { 2361 + stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); 2362 + if (stat == RDWR_STATUS_FAILURE) 2363 + return; 2364 + 2365 + reg = creg->fw_dump_start; 2366 + for (i = 0; i < 4; i++) { 2367 + mwifiex_read_reg_byte(adapter, reg, &read_reg); 2368 + memory_size |= (read_reg << (i * 8)); 2448 2369 reg++; 2370 + } 2371 + } else { 2372 + memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE; 2449 2373 } 2450 2374 2451 2375 if (memory_size == 0) { 2452 2376 mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); 2453 2377 ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, 2454 - FW_DUMP_READ_DONE); 2378 + creg->fw_dump_read_done); 2455 2379 if (ret) { 2456 2380 mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); 2457 2381 return; ··· 2496 2400 mwifiex_read_reg_byte(adapter, reg, dbg_ptr); 2497 2401 if (dbg_ptr < end_ptr) { 2498 2402 dbg_ptr++; 2499 - } else { 2500 - mwifiex_dbg(adapter, ERROR, 2501 - "Allocated buf not enough\n"); 2502 - return; 2403 + continue; 2503 2404 } 2405 + mwifiex_dbg(adapter, ERROR, 2406 + "pre-allocated buf not enough\n"); 2407 + tmp_ptr = 2408 + vzalloc(memory_size + MWIFIEX_SIZE_4K); 2409 + if (!tmp_ptr) 2410 + return; 2411 + memcpy(tmp_ptr, entry->mem_ptr, memory_size); 2412 + vfree(entry->mem_ptr); 2413 + entry->mem_ptr = tmp_ptr; 2414 + tmp_ptr = NULL; 2415 + dbg_ptr = entry->mem_ptr + memory_size; 2416 + memory_size += MWIFIEX_SIZE_4K; 2417 + end_ptr = entry->mem_ptr + memory_size; 2504 2418 } 2505 2419 2506 2420 if (stat != RDWR_STATUS_DONE) ··· 2522 2416 break; 2523 2417 } while (true); 2524 2418 } 2525 - mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n"); 2419 + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); 2526 2420 } 2527 2421 2528 2422 static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) ··· 2701 2595 2702 2596 static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) 2703 2597 { 2704 - int ret; 2598 + int ret, i, j; 2705 2599 struct pcie_service_card *card = adapter->card; 2706 2600 struct pci_dev *pdev = card->dev; 2601 + 2602 + if (card->pcie.reg->msix_support) { 2603 + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) 2604 + card->msix_entries[i].entry = i; 2605 + ret = pci_enable_msix_exact(pdev, card->msix_entries, 2606 + MWIFIEX_NUM_MSIX_VECTORS); 2607 + if (!ret) { 2608 + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) { 2609 + card->msix_ctx[i].dev = pdev; 2610 + card->msix_ctx[i].msg_id = i; 2611 + 2612 + ret = request_irq(card->msix_entries[i].vector, 2613 + mwifiex_pcie_interrupt, 0, 2614 + "MWIFIEX_PCIE_MSIX", 2615 + &card->msix_ctx[i]); 2616 + if (ret) 2617 + break; 2618 + } 2619 + 2620 + if (ret) { 2621 + mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n", 2622 + ret); 2623 + for (j = 0; j < i; j++) 2624 + free_irq(card->msix_entries[j].vector, 2625 + &card->msix_ctx[i]); 2626 + pci_disable_msix(pdev); 2627 + } else { 2628 + mwifiex_dbg(adapter, MSG, "MSIx enabled!"); 2629 + card->msix_enable = 1; 2630 + return 0; 2631 + } 2632 + } 2633 + } 2707 2634 2708 2635 if (pci_enable_msi(pdev) != 0) 2709 2636 pci_disable_msi(pdev); ··· 2745 2606 2746 2607 mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable); 2747 2608 2609 + card->share_irq_ctx.dev = pdev; 2610 + card->share_irq_ctx.msg_id = -1; 2748 2611 ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, 2749 - "MRVL_PCIE", pdev); 2612 + "MRVL_PCIE", &card->share_irq_ctx); 2750 2613 if (ret) { 2751 2614 pr_err("request_irq failed: ret=%d\n", ret); 2752 2615 adapter->card = NULL; ··· 2776 2635 return -1; 2777 2636 2778 2637 adapter->tx_buf_size = card->pcie.tx_buf_size; 2779 - adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; 2780 - adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); 2638 + adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl; 2639 + adapter->num_mem_types = card->pcie.num_mem_types; 2781 2640 strcpy(adapter->fw_name, card->pcie.firmware); 2782 2641 adapter->ext_scan = card->pcie.can_ext_scan; 2783 2642 ··· 2794 2653 { 2795 2654 struct pcie_service_card *card = adapter->card; 2796 2655 const struct mwifiex_pcie_card_reg *reg; 2656 + struct pci_dev *pdev = card->dev; 2657 + int i; 2797 2658 2798 2659 if (card) { 2799 - mwifiex_dbg(adapter, INFO, 2800 - "%s(): calling free_irq()\n", __func__); 2801 - free_irq(card->dev->irq, card->dev); 2660 + if (card->msix_enable) { 2661 + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) 2662 + synchronize_irq(card->msix_entries[i].vector); 2663 + 2664 + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) 2665 + free_irq(card->msix_entries[i].vector, 2666 + &card->msix_ctx[i]); 2667 + 2668 + card->msix_enable = 0; 2669 + pci_disable_msix(pdev); 2670 + } else { 2671 + mwifiex_dbg(adapter, INFO, 2672 + "%s(): calling free_irq()\n", __func__); 2673 + free_irq(card->dev->irq, &card->share_irq_ctx); 2674 + 2675 + if (card->msi_enable) 2676 + pci_disable_msi(pdev); 2677 + } 2802 2678 2803 2679 reg = card->pcie.reg; 2804 2680 if (reg->sleep_cookie) ··· 2833 2675 .init_if = mwifiex_pcie_init, 2834 2676 .cleanup_if = mwifiex_pcie_cleanup, 2835 2677 .check_fw_status = mwifiex_check_fw_status, 2678 + .check_winner_status = mwifiex_check_winner_status, 2836 2679 .prog_fw = mwifiex_prog_fw_w_helper, 2837 2680 .register_dev = mwifiex_register_dev, 2838 2681 .unregister_dev = mwifiex_unregister_dev,
+49 -1
drivers/net/wireless/marvell/mwifiex/pcie.h
··· 26 26 #include <linux/pcieport_if.h> 27 27 #include <linux/interrupt.h> 28 28 29 + #include "decl.h" 29 30 #include "main.h" 30 31 31 32 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" ··· 136 135 u16 fw_dump_ctrl; 137 136 u16 fw_dump_start; 138 137 u16 fw_dump_end; 138 + u8 fw_dump_host_ready; 139 + u8 fw_dump_read_done; 140 + u8 msix_support; 139 141 }; 140 142 141 143 static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { ··· 170 166 .ring_tx_start_ptr = 0, 171 167 .pfu_enabled = 0, 172 168 .sleep_cookie = 1, 169 + .msix_support = 0, 173 170 }; 174 171 175 172 static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { ··· 205 200 .fw_dump_ctrl = 0xcf4, 206 201 .fw_dump_start = 0xcf8, 207 202 .fw_dump_end = 0xcff, 203 + .fw_dump_host_ready = 0xee, 204 + .fw_dump_read_done = 0xfe, 205 + .msix_support = 0, 208 206 }; 209 207 210 208 static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { ··· 239 231 .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, 240 232 .pfu_enabled = 1, 241 233 .sleep_cookie = 0, 234 + .fw_dump_ctrl = 0xcf4, 235 + .fw_dump_start = 0xcf8, 236 + .fw_dump_end = 0xcff, 237 + .fw_dump_host_ready = 0xcc, 238 + .fw_dump_read_done = 0xdd, 239 + .msix_support = 1, 240 + }; 241 + 242 + static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = { 243 + {"ITCM", NULL, 0, 0xF0}, 244 + {"DTCM", NULL, 0, 0xF1}, 245 + {"SQRAM", NULL, 0, 0xF2}, 246 + {"IRAM", NULL, 0, 0xF3}, 247 + {"APU", NULL, 0, 0xF4}, 248 + {"CIU", NULL, 0, 0xF5}, 249 + {"ICU", NULL, 0, 0xF6}, 250 + {"MAC", NULL, 0, 0xF7}, 251 + }; 252 + 253 + static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = { 254 + {"DUMP", NULL, 0, 0xDD}, 242 255 }; 243 256 244 257 struct mwifiex_pcie_device { ··· 268 239 u16 blksz_fw_dl; 269 240 u16 tx_buf_size; 270 241 bool can_dump_fw; 242 + struct memory_type_mapping *mem_type_mapping_tbl; 243 + u8 num_mem_types; 271 244 bool can_ext_scan; 272 245 }; 273 246 ··· 288 257 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 289 258 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 290 259 .can_dump_fw = true, 260 + .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897, 261 + .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897), 291 262 .can_ext_scan = true, 292 263 }; 293 264 ··· 298 265 .reg = &mwifiex_reg_8997, 299 266 .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, 300 267 .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, 301 - .can_dump_fw = false, 268 + .can_dump_fw = true, 269 + .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997, 270 + .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997), 302 271 .can_ext_scan = true, 303 272 }; 304 273 ··· 324 289 u64 paddr; 325 290 u32 reserved; 326 291 } __packed; 292 + 293 + #define MWIFIEX_NUM_MSIX_VECTORS 4 294 + 295 + struct mwifiex_msix_context { 296 + struct pci_dev *dev; 297 + u16 msg_id; 298 + }; 327 299 328 300 struct pcie_service_card { 329 301 struct pci_dev *dev; ··· 369 327 void __iomem *pci_mmap; 370 328 void __iomem *pci_mmap1; 371 329 int msi_enable; 330 + int msix_enable; 331 + #ifdef CONFIG_PCI 332 + struct msix_entry msix_entries[MWIFIEX_NUM_MSIX_VECTORS]; 333 + #endif 334 + struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS]; 335 + struct mwifiex_msix_context share_irq_ctx; 372 336 }; 373 337 374 338 static inline int
+310
drivers/net/wireless/marvell/mwifiex/scan.c
··· 547 547 return chan_idx; 548 548 } 549 549 550 + /* This function creates a channel list tlv for bgscan config, based 551 + * on region/band information. 552 + */ 553 + static int 554 + mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv, 555 + const struct mwifiex_bg_scan_cfg 556 + *bgscan_cfg_in, 557 + struct mwifiex_chan_scan_param_set 558 + *scan_chan_list) 559 + { 560 + enum ieee80211_band band; 561 + struct ieee80211_supported_band *sband; 562 + struct ieee80211_channel *ch; 563 + struct mwifiex_adapter *adapter = priv->adapter; 564 + int chan_idx = 0, i; 565 + 566 + for (band = 0; (band < IEEE80211_NUM_BANDS); band++) { 567 + if (!priv->wdev.wiphy->bands[band]) 568 + continue; 569 + 570 + sband = priv->wdev.wiphy->bands[band]; 571 + 572 + for (i = 0; (i < sband->n_channels) ; i++) { 573 + ch = &sband->channels[i]; 574 + if (ch->flags & IEEE80211_CHAN_DISABLED) 575 + continue; 576 + scan_chan_list[chan_idx].radio_type = band; 577 + 578 + if (bgscan_cfg_in->chan_list[0].scan_time) 579 + scan_chan_list[chan_idx].max_scan_time = 580 + cpu_to_le16((u16)bgscan_cfg_in-> 581 + chan_list[0].scan_time); 582 + else if (ch->flags & IEEE80211_CHAN_NO_IR) 583 + scan_chan_list[chan_idx].max_scan_time = 584 + cpu_to_le16(adapter->passive_scan_time); 585 + else 586 + scan_chan_list[chan_idx].max_scan_time = 587 + cpu_to_le16(adapter-> 588 + specific_scan_time); 589 + 590 + if (ch->flags & IEEE80211_CHAN_NO_IR) 591 + scan_chan_list[chan_idx].chan_scan_mode_bitmap 592 + |= MWIFIEX_PASSIVE_SCAN; 593 + else 594 + scan_chan_list[chan_idx].chan_scan_mode_bitmap 595 + &= ~MWIFIEX_PASSIVE_SCAN; 596 + 597 + scan_chan_list[chan_idx].chan_number = 598 + (u32)ch->hw_value; 599 + chan_idx++; 600 + } 601 + } 602 + return chan_idx; 603 + } 604 + 550 605 /* This function appends rate TLV to scan config command. */ 551 606 static int 552 607 mwifiex_append_rate_tlv(struct mwifiex_private *priv, ··· 2092 2037 u8 is_bgscan_resp; 2093 2038 __le64 fw_tsf = 0; 2094 2039 u8 *radio_type; 2040 + struct cfg80211_wowlan_nd_match *pmatch; 2041 + struct cfg80211_sched_scan_request *nd_config = NULL; 2095 2042 2096 2043 is_bgscan_resp = (le16_to_cpu(resp->command) 2097 2044 == HostCmd_CMD_802_11_BG_SCAN_QUERY); ··· 2156 2099 (struct mwifiex_ie_types_data **) 2157 2100 &chan_band_tlv); 2158 2101 2102 + #ifdef CONFIG_PM 2103 + if (priv->wdev.wiphy->wowlan_config) 2104 + nd_config = priv->wdev.wiphy->wowlan_config->nd_config; 2105 + #endif 2106 + 2107 + if (nd_config) { 2108 + adapter->nd_info = 2109 + kzalloc(sizeof(struct cfg80211_wowlan_nd_match) + 2110 + sizeof(struct cfg80211_wowlan_nd_match *) * 2111 + scan_rsp->number_of_sets, GFP_ATOMIC); 2112 + 2113 + if (adapter->nd_info) 2114 + adapter->nd_info->n_matches = scan_rsp->number_of_sets; 2115 + } 2116 + 2159 2117 for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) { 2160 2118 /* 2161 2119 * If the TSF TLV was appended to the scan results, save this ··· 2187 2115 radio_type = &chan_band->radio_type; 2188 2116 } else { 2189 2117 radio_type = NULL; 2118 + } 2119 + 2120 + if (chan_band_tlv && adapter->nd_info) { 2121 + adapter->nd_info->matches[idx] = 2122 + kzalloc(sizeof(*pmatch) + 2123 + sizeof(u32), GFP_ATOMIC); 2124 + 2125 + pmatch = adapter->nd_info->matches[idx]; 2126 + 2127 + if (pmatch) { 2128 + memset(pmatch, 0, sizeof(*pmatch)); 2129 + if (chan_band_tlv) { 2130 + pmatch->n_channels = 1; 2131 + pmatch->channels[0] = 2132 + chan_band->chan_number; 2133 + } 2134 + } 2190 2135 } 2191 2136 2192 2137 ret = mwifiex_parse_single_response_buf(priv, &bss_info, ··· 2240 2151 /* Size is equal to the sizeof(fixed portions) + the TLV len + header */ 2241 2152 cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved) 2242 2153 + scan_cfg->tlv_buf_len + S_DS_GEN)); 2154 + 2155 + return 0; 2156 + } 2157 + 2158 + /* This function prepares an background scan config command to be sent 2159 + * to the firmware 2160 + */ 2161 + int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, 2162 + struct host_cmd_ds_command *cmd, 2163 + void *data_buf) 2164 + { 2165 + struct host_cmd_ds_802_11_bg_scan_config *bgscan_config = 2166 + &cmd->params.bg_scan_config; 2167 + struct mwifiex_bg_scan_cfg *bgscan_cfg_in = data_buf; 2168 + u8 *tlv_pos = bgscan_config->tlv; 2169 + u8 num_probes; 2170 + u32 ssid_len, chan_idx, scan_type, scan_dur, chan_num; 2171 + int i; 2172 + struct mwifiex_ie_types_num_probes *num_probes_tlv; 2173 + struct mwifiex_ie_types_repeat_count *repeat_count_tlv; 2174 + struct mwifiex_ie_types_min_rssi_threshold *rssi_threshold_tlv; 2175 + struct mwifiex_ie_types_bgscan_start_later *start_later_tlv; 2176 + struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; 2177 + struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv; 2178 + struct mwifiex_chan_scan_param_set *temp_chan; 2179 + 2180 + cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG); 2181 + cmd->size = cpu_to_le16(sizeof(*bgscan_config) + S_DS_GEN); 2182 + 2183 + bgscan_config->action = cpu_to_le16(bgscan_cfg_in->action); 2184 + bgscan_config->enable = bgscan_cfg_in->enable; 2185 + bgscan_config->bss_type = bgscan_cfg_in->bss_type; 2186 + bgscan_config->scan_interval = 2187 + cpu_to_le32(bgscan_cfg_in->scan_interval); 2188 + bgscan_config->report_condition = 2189 + cpu_to_le32(bgscan_cfg_in->report_condition); 2190 + 2191 + /* stop sched scan */ 2192 + if (!bgscan_config->enable) 2193 + return 0; 2194 + 2195 + bgscan_config->chan_per_scan = bgscan_cfg_in->chan_per_scan; 2196 + 2197 + num_probes = (bgscan_cfg_in->num_probes ? bgscan_cfg_in-> 2198 + num_probes : priv->adapter->scan_probes); 2199 + 2200 + if (num_probes) { 2201 + num_probes_tlv = (struct mwifiex_ie_types_num_probes *)tlv_pos; 2202 + num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES); 2203 + num_probes_tlv->header.len = 2204 + cpu_to_le16(sizeof(num_probes_tlv->num_probes)); 2205 + num_probes_tlv->num_probes = cpu_to_le16((u16)num_probes); 2206 + 2207 + tlv_pos += sizeof(num_probes_tlv->header) + 2208 + le16_to_cpu(num_probes_tlv->header.len); 2209 + } 2210 + 2211 + if (bgscan_cfg_in->repeat_count) { 2212 + repeat_count_tlv = 2213 + (struct mwifiex_ie_types_repeat_count *)tlv_pos; 2214 + repeat_count_tlv->header.type = 2215 + cpu_to_le16(TLV_TYPE_REPEAT_COUNT); 2216 + repeat_count_tlv->header.len = 2217 + cpu_to_le16(sizeof(repeat_count_tlv->repeat_count)); 2218 + repeat_count_tlv->repeat_count = 2219 + cpu_to_le16(bgscan_cfg_in->repeat_count); 2220 + 2221 + tlv_pos += sizeof(repeat_count_tlv->header) + 2222 + le16_to_cpu(repeat_count_tlv->header.len); 2223 + } 2224 + 2225 + if (bgscan_cfg_in->rssi_threshold) { 2226 + rssi_threshold_tlv = 2227 + (struct mwifiex_ie_types_min_rssi_threshold *)tlv_pos; 2228 + rssi_threshold_tlv->header.type = 2229 + cpu_to_le16(TLV_TYPE_RSSI_LOW); 2230 + rssi_threshold_tlv->header.len = 2231 + cpu_to_le16(sizeof(rssi_threshold_tlv->rssi_threshold)); 2232 + rssi_threshold_tlv->rssi_threshold = 2233 + cpu_to_le16(bgscan_cfg_in->rssi_threshold); 2234 + 2235 + tlv_pos += sizeof(rssi_threshold_tlv->header) + 2236 + le16_to_cpu(rssi_threshold_tlv->header.len); 2237 + } 2238 + 2239 + for (i = 0; i < bgscan_cfg_in->num_ssids; i++) { 2240 + ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len; 2241 + 2242 + wildcard_ssid_tlv = 2243 + (struct mwifiex_ie_types_wildcard_ssid_params *)tlv_pos; 2244 + wildcard_ssid_tlv->header.type = 2245 + cpu_to_le16(TLV_TYPE_WILDCARDSSID); 2246 + wildcard_ssid_tlv->header.len = cpu_to_le16( 2247 + (u16)(ssid_len + sizeof(wildcard_ssid_tlv-> 2248 + max_ssid_length))); 2249 + 2250 + /* max_ssid_length = 0 tells firmware to perform 2251 + * specific scan for the SSID filled, whereas 2252 + * max_ssid_length = IEEE80211_MAX_SSID_LEN is for 2253 + * wildcard scan. 2254 + */ 2255 + if (ssid_len) 2256 + wildcard_ssid_tlv->max_ssid_length = 0; 2257 + else 2258 + wildcard_ssid_tlv->max_ssid_length = 2259 + IEEE80211_MAX_SSID_LEN; 2260 + 2261 + memcpy(wildcard_ssid_tlv->ssid, 2262 + bgscan_cfg_in->ssid_list[i].ssid.ssid, ssid_len); 2263 + 2264 + tlv_pos += (sizeof(wildcard_ssid_tlv->header) 2265 + + le16_to_cpu(wildcard_ssid_tlv->header.len)); 2266 + } 2267 + 2268 + chan_list_tlv = (struct mwifiex_ie_types_chan_list_param_set *)tlv_pos; 2269 + 2270 + if (bgscan_cfg_in->chan_list[0].chan_number) { 2271 + dev_dbg(priv->adapter->dev, "info: bgscan: Using supplied channel list\n"); 2272 + 2273 + chan_list_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); 2274 + 2275 + for (chan_idx = 0; 2276 + chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX && 2277 + bgscan_cfg_in->chan_list[chan_idx].chan_number; 2278 + chan_idx++) { 2279 + temp_chan = chan_list_tlv->chan_scan_param + chan_idx; 2280 + 2281 + /* Increment the TLV header length by size appended */ 2282 + le16_add_cpu(&chan_list_tlv->header.len, 2283 + sizeof(chan_list_tlv->chan_scan_param)); 2284 + 2285 + temp_chan->chan_number = 2286 + bgscan_cfg_in->chan_list[chan_idx].chan_number; 2287 + temp_chan->radio_type = 2288 + bgscan_cfg_in->chan_list[chan_idx].radio_type; 2289 + 2290 + scan_type = 2291 + bgscan_cfg_in->chan_list[chan_idx].scan_type; 2292 + 2293 + if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) 2294 + temp_chan->chan_scan_mode_bitmap 2295 + |= MWIFIEX_PASSIVE_SCAN; 2296 + else 2297 + temp_chan->chan_scan_mode_bitmap 2298 + &= ~MWIFIEX_PASSIVE_SCAN; 2299 + 2300 + if (bgscan_cfg_in->chan_list[chan_idx].scan_time) { 2301 + scan_dur = (u16)bgscan_cfg_in-> 2302 + chan_list[chan_idx].scan_time; 2303 + } else { 2304 + scan_dur = (scan_type == 2305 + MWIFIEX_SCAN_TYPE_PASSIVE) ? 2306 + priv->adapter->passive_scan_time : 2307 + priv->adapter->specific_scan_time; 2308 + } 2309 + 2310 + temp_chan->min_scan_time = cpu_to_le16(scan_dur); 2311 + temp_chan->max_scan_time = cpu_to_le16(scan_dur); 2312 + } 2313 + } else { 2314 + dev_dbg(priv->adapter->dev, 2315 + "info: bgscan: Creating full region channel list\n"); 2316 + chan_num = 2317 + mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in, 2318 + chan_list_tlv-> 2319 + chan_scan_param); 2320 + le16_add_cpu(&chan_list_tlv->header.len, 2321 + chan_num * 2322 + sizeof(chan_list_tlv->chan_scan_param[0])); 2323 + } 2324 + 2325 + tlv_pos += (sizeof(chan_list_tlv->header) 2326 + + le16_to_cpu(chan_list_tlv->header.len)); 2327 + 2328 + if (bgscan_cfg_in->start_later) { 2329 + start_later_tlv = 2330 + (struct mwifiex_ie_types_bgscan_start_later *)tlv_pos; 2331 + start_later_tlv->header.type = 2332 + cpu_to_le16(TLV_TYPE_BGSCAN_START_LATER); 2333 + start_later_tlv->header.len = 2334 + cpu_to_le16(sizeof(start_later_tlv->start_later)); 2335 + start_later_tlv->start_later = 2336 + cpu_to_le16(bgscan_cfg_in->start_later); 2337 + 2338 + tlv_pos += sizeof(start_later_tlv->header) + 2339 + le16_to_cpu(start_later_tlv->header.len); 2340 + } 2341 + 2342 + /* Append vendor specific IE TLV */ 2343 + mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos); 2344 + 2345 + le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv); 2346 + 2347 + return 0; 2348 + } 2349 + 2350 + int mwifiex_stop_bg_scan(struct mwifiex_private *priv) 2351 + { 2352 + struct mwifiex_bg_scan_cfg *bgscan_cfg; 2353 + 2354 + if (!priv->sched_scanning) { 2355 + dev_dbg(priv->adapter->dev, "bgscan already stopped!\n"); 2356 + return 0; 2357 + } 2358 + 2359 + bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL); 2360 + if (!bgscan_cfg) 2361 + return -ENOMEM; 2362 + 2363 + bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; 2364 + bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; 2365 + bgscan_cfg->enable = false; 2366 + 2367 + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, 2368 + HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { 2369 + kfree(bgscan_cfg); 2370 + return -EFAULT; 2371 + } 2372 + 2373 + kfree(bgscan_cfg); 2374 + priv->sched_scanning = false; 2243 2375 2244 2376 return 0; 2245 2377 }
+20 -15
drivers/net/wireless/marvell/mwifiex/sdio.c
··· 181 181 182 182 /* Disable Host Sleep */ 183 183 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), 184 - MWIFIEX_ASYNC_CMD); 184 + MWIFIEX_SYNC_CMD); 185 185 186 186 return 0; 187 187 } ··· 1039 1039 1040 1040 /* 1041 1041 * This function checks the firmware status in card. 1042 - * 1043 - * The winner interface is also determined by this function. 1044 1042 */ 1045 1043 static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, 1046 1044 u32 poll_num) 1047 1045 { 1048 - struct sdio_mmc_card *card = adapter->card; 1049 1046 int ret = 0; 1050 1047 u16 firmware_stat; 1051 1048 u32 tries; 1052 - u8 winner_status; 1053 1049 1054 - /* Wait for firmware initialization event */ 1055 1050 for (tries = 0; tries < poll_num; tries++) { 1056 1051 ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); 1057 1052 if (ret) ··· 1060 1065 } 1061 1066 } 1062 1067 1063 - if (ret) { 1064 - if (mwifiex_read_reg 1065 - (adapter, card->reg->status_reg_0, &winner_status)) 1066 - winner_status = 0; 1068 + return ret; 1069 + } 1067 1070 1068 - if (winner_status) 1069 - adapter->winner = 0; 1070 - else 1071 - adapter->winner = 1; 1072 - } 1071 + /* This function checks if WLAN is the winner. 1072 + */ 1073 + static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) 1074 + { 1075 + int ret = 0; 1076 + u8 winner = 0; 1077 + struct sdio_mmc_card *card = adapter->card; 1078 + 1079 + if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) 1080 + return -1; 1081 + 1082 + if (winner) 1083 + adapter->winner = 0; 1084 + else 1085 + adapter->winner = 1; 1086 + 1073 1087 return ret; 1074 1088 } 1075 1089 ··· 2624 2620 .init_if = mwifiex_init_sdio, 2625 2621 .cleanup_if = mwifiex_cleanup_sdio, 2626 2622 .check_fw_status = mwifiex_check_fw_status, 2623 + .check_winner_status = mwifiex_check_winner_status, 2627 2624 .prog_fw = mwifiex_prog_fw_w_helper, 2628 2625 .register_dev = mwifiex_register_dev, 2629 2626 .unregister_dev = mwifiex_unregister_dev,
+23
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
··· 1813 1813 return 0; 1814 1814 } 1815 1815 1816 + /* This function prepares command to get HS wakeup reason. 1817 + * 1818 + * Preparation includes - 1819 + * - Setting command ID, action and proper size 1820 + * - Ensuring correct endian-ness 1821 + */ 1822 + static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv, 1823 + struct host_cmd_ds_command *cmd) 1824 + { 1825 + cmd->command = cpu_to_le16(HostCmd_CMD_HS_WAKEUP_REASON); 1826 + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_wakeup_reason) + 1827 + S_DS_GEN); 1828 + 1829 + return 0; 1830 + } 1831 + 1816 1832 /* 1817 1833 * This function prepares the commands before sending them to the firmware. 1818 1834 * ··· 1888 1872 break; 1889 1873 case HostCmd_CMD_802_11_SCAN: 1890 1874 ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf); 1875 + break; 1876 + case HostCmd_CMD_802_11_BG_SCAN_CONFIG: 1877 + ret = mwifiex_cmd_802_11_bg_scan_config(priv, cmd_ptr, 1878 + data_buf); 1891 1879 break; 1892 1880 case HostCmd_CMD_802_11_BG_SCAN_QUERY: 1893 1881 ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr); ··· 2082 2062 case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: 2083 2063 ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action, 2084 2064 data_buf); 2065 + break; 2066 + case HostCmd_CMD_HS_WAKEUP_REASON: 2067 + ret = mwifiex_cmd_get_wakeup_reason(priv, cmd_ptr); 2085 2068 break; 2086 2069 case HostCmd_CMD_MC_POLICY: 2087 2070 ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
+6
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
··· 1076 1076 break; 1077 1077 case HostCmd_CMD_802_11_BG_SCAN_QUERY: 1078 1078 ret = mwifiex_ret_802_11_scan(priv, resp); 1079 + cfg80211_sched_scan_results(priv->wdev.wiphy); 1079 1080 mwifiex_dbg(adapter, CMD, 1080 1081 "info: CMD_RESP: BG_SCAN result is ready!\n"); 1082 + break; 1083 + case HostCmd_CMD_802_11_BG_SCAN_CONFIG: 1081 1084 break; 1082 1085 case HostCmd_CMD_TXPWR_CFG: 1083 1086 ret = mwifiex_ret_tx_power_cfg(priv, resp); ··· 1235 1232 break; 1236 1233 case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: 1237 1234 ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); 1235 + break; 1236 + case HostCmd_CMD_HS_WAKEUP_REASON: 1237 + ret = mwifiex_ret_wakeup_reason(priv, resp, data_buf); 1238 1238 break; 1239 1239 case HostCmd_CMD_TDLS_CONFIG: 1240 1240 break;
+16 -4
drivers/net/wireless/marvell/mwifiex/sta_event.c
··· 92 92 priv->is_data_rate_auto = true; 93 93 priv->data_rate = 0; 94 94 95 + priv->assoc_resp_ht_param = 0; 96 + priv->ht_param_present = false; 97 + 95 98 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || 96 99 GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data) 97 100 mwifiex_hist_data_reset(priv); ··· 610 607 611 608 case EVENT_PS_AWAKE: 612 609 mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); 613 - if (!adapter->pps_uapsd_mode && priv->port_open && 610 + if (!adapter->pps_uapsd_mode && 611 + (priv->port_open || 612 + (priv->bss_mode == NL80211_IFTYPE_ADHOC)) && 614 613 priv->media_connected && adapter->sleep_period.period) { 615 - adapter->pps_uapsd_mode = true; 616 - mwifiex_dbg(adapter, EVENT, 617 - "event: PPS/UAPSD mode activated\n"); 614 + adapter->pps_uapsd_mode = true; 615 + mwifiex_dbg(adapter, EVENT, 616 + "event: PPS/UAPSD mode activated\n"); 618 617 } 619 618 adapter->tx_lock_flag = false; 620 619 if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { ··· 689 684 mwifiex_dbg(adapter, EVENT, "event: BGS_REPORT\n"); 690 685 ret = mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_QUERY, 691 686 HostCmd_ACT_GEN_GET, 0, NULL, false); 687 + break; 688 + 689 + case EVENT_BG_SCAN_STOPPED: 690 + dev_dbg(adapter->dev, "event: BGS_STOPPED\n"); 691 + cfg80211_sched_scan_stopped(priv->wdev.wiphy); 692 + if (priv->sched_scanning) 693 + priv->sched_scanning = false; 692 694 break; 693 695 694 696 case EVENT_PORT_RELEASE:
+32 -1
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
··· 504 504 } 505 505 } 506 506 507 + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); 508 + 509 + if (priv && priv->sched_scanning) { 510 + #ifdef CONFIG_PM 511 + if (!priv->wdev.wiphy->wowlan_config->nd_config) { 512 + #endif 513 + mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); 514 + mwifiex_stop_bg_scan(priv); 515 + cfg80211_sched_scan_stopped(priv->wdev.wiphy); 516 + #ifdef CONFIG_PM 517 + } 518 + #endif 519 + } 520 + 507 521 if (adapter->hs_activated) { 508 522 mwifiex_dbg(adapter, CMD, 509 523 "cmd: HS Already activated\n"); ··· 1128 1114 * with requisite parameters and calls the IOCTL handler. 1129 1115 */ 1130 1116 int 1131 - mwifiex_get_ver_ext(struct mwifiex_private *priv) 1117 + mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel) 1132 1118 { 1133 1119 struct mwifiex_ver_ext ver_ext; 1134 1120 1135 1121 memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext)); 1122 + ver_ext.version_str_sel = version_str_sel; 1136 1123 if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT, 1137 1124 HostCmd_ACT_GEN_GET, 0, &ver_ext, true)) 1138 1125 return -1; ··· 1464 1449 return -EFAULT; 1465 1450 1466 1451 return 0; 1452 + } 1453 + 1454 + /* This function get Host Sleep wake up reason. 1455 + * 1456 + */ 1457 + int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, 1458 + int cmd_type, 1459 + struct mwifiex_ds_wakeup_reason *wakeup_reason) 1460 + { 1461 + int status = 0; 1462 + 1463 + status = mwifiex_send_cmd(priv, HostCmd_CMD_HS_WAKEUP_REASON, 1464 + HostCmd_ACT_GEN_GET, 0, wakeup_reason, 1465 + cmd_type == MWIFIEX_SYNC_CMD); 1466 + 1467 + return status; 1467 1468 }
+5 -2
drivers/net/wireless/marvell/mwifiex/wmm.c
··· 438 438 mwifiex_set_ba_params(priv); 439 439 mwifiex_reset_11n_rx_seq_num(priv); 440 440 441 + priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX; 441 442 atomic_set(&priv->wmm.tx_pkts_queued, 0); 442 443 atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); 443 444 } ··· 476 475 priv = adapter->priv[i]; 477 476 if (!priv) 478 477 continue; 479 - if (!priv->port_open) 478 + if (!priv->port_open && 479 + (priv->bss_mode != NL80211_IFTYPE_ADHOC)) 480 480 continue; 481 481 if (adapter->if_ops.is_port_ready && 482 482 !adapter->if_ops.is_port_ready(priv)) ··· 1101 1099 1102 1100 priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; 1103 1101 1104 - if (!priv_tmp->port_open || 1102 + if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) && 1103 + !priv_tmp->port_open) || 1105 1104 (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)) 1106 1105 continue; 1107 1106
+9 -9
drivers/net/wireless/ralink/rt2x00/rt2x00.h
··· 107 107 * amount of bytes needed to move the data. 108 108 */ 109 109 #define ALIGN_SIZE(__skb, __header) \ 110 - ( ((unsigned long)((__skb)->data + (__header))) & 3 ) 110 + (((unsigned long)((__skb)->data + (__header))) & 3) 111 111 112 112 /* 113 113 * Constants for extra TX headroom for alignment purposes. ··· 128 128 #define SLOT_TIME 20 129 129 #define SHORT_SLOT_TIME 9 130 130 #define SIFS 10 131 - #define PIFS ( SIFS + SLOT_TIME ) 132 - #define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) 133 - #define DIFS ( PIFS + SLOT_TIME ) 134 - #define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) 135 - #define EIFS ( SIFS + DIFS + \ 136 - GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) 137 - #define SHORT_EIFS ( SIFS + SHORT_DIFS + \ 138 - GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) 131 + #define PIFS (SIFS + SLOT_TIME) 132 + #define SHORT_PIFS (SIFS + SHORT_SLOT_TIME) 133 + #define DIFS (PIFS + SLOT_TIME) 134 + #define SHORT_DIFS (SHORT_PIFS + SHORT_SLOT_TIME) 135 + #define EIFS (SIFS + DIFS + \ 136 + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10)) 137 + #define SHORT_EIFS (SIFS + SHORT_DIFS + \ 138 + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10)) 139 139 140 140 enum rt2x00_chip_intf { 141 141 RT2X00_CHIP_INTF_PCI,
+2 -2
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
··· 629 629 data += sprintf(data, "register\tbase\twords\twordsize\n"); 630 630 #define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \ 631 631 { \ 632 - if(debug->__name.read) \ 632 + if (debug->__name.read) \ 633 633 data += sprintf(data, __stringify(__name) \ 634 634 "\t%d\t%d\t%d\n", \ 635 635 debug->__name.word_base, \ ··· 699 699 700 700 #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ 701 701 ({ \ 702 - if(debug->__name.read) { \ 702 + if (debug->__name.read) { \ 703 703 (__intf)->__name##_off_entry = \ 704 704 debugfs_create_u32(__stringify(__name) "_offset", \ 705 705 S_IRUSR | S_IWUSR, \
+10 -10
drivers/net/wireless/ralink/rt2x00/rt61pci.h
··· 138 138 #define PAIRWISE_TA_TABLE_BASE 0x1a00 139 139 140 140 #define SHARED_KEY_ENTRY(__idx) \ 141 - ( SHARED_KEY_TABLE_BASE + \ 142 - ((__idx) * sizeof(struct hw_key_entry)) ) 141 + (SHARED_KEY_TABLE_BASE + \ 142 + ((__idx) * sizeof(struct hw_key_entry))) 143 143 #define PAIRWISE_KEY_ENTRY(__idx) \ 144 - ( PAIRWISE_KEY_TABLE_BASE + \ 145 - ((__idx) * sizeof(struct hw_key_entry)) ) 144 + (PAIRWISE_KEY_TABLE_BASE + \ 145 + ((__idx) * sizeof(struct hw_key_entry))) 146 146 #define PAIRWISE_TA_ENTRY(__idx) \ 147 - ( PAIRWISE_TA_TABLE_BASE + \ 148 - ((__idx) * sizeof(struct hw_pairwise_ta_entry)) ) 147 + (PAIRWISE_TA_TABLE_BASE + \ 148 + ((__idx) * sizeof(struct hw_pairwise_ta_entry))) 149 149 150 150 struct hw_key_entry { 151 151 u8 key[16]; ··· 180 180 #define HW_BEACON_BASE3 0x2f00 181 181 182 182 #define HW_BEACON_OFFSET(__index) \ 183 - ( HW_BEACON_BASE0 + (__index * 0x0100) ) 183 + (HW_BEACON_BASE0 + (__index * 0x0100)) 184 184 185 185 /* 186 186 * HOST-MCU shared memory. ··· 1287 1287 /* 1288 1288 * DMA descriptor defines. 1289 1289 */ 1290 - #define TXD_DESC_SIZE ( 16 * sizeof(__le32) ) 1291 - #define TXINFO_SIZE ( 6 * sizeof(__le32) ) 1292 - #define RXD_DESC_SIZE ( 16 * sizeof(__le32) ) 1290 + #define TXD_DESC_SIZE (16 * sizeof(__le32)) 1291 + #define TXINFO_SIZE (6 * sizeof(__le32)) 1292 + #define RXD_DESC_SIZE (16 * sizeof(__le32)) 1293 1293 1294 1294 /* 1295 1295 * TX descriptor format for TX, PRIO and Beacon Ring.
+1 -1
drivers/net/wireless/ti/wlcore/Kconfig
··· 13 13 14 14 config WLCORE_SPI 15 15 tristate "TI wlcore SPI support" 16 - depends on WLCORE && SPI_MASTER 16 + depends on WLCORE && SPI_MASTER && OF 17 17 select CRC7 18 18 ---help--- 19 19 This module adds support for the SPI interface of adapters using
+1 -1
drivers/net/wireless/ti/wlcore/event.c
··· 38 38 39 39 int wlcore_event_fw_logger(struct wl1271 *wl) 40 40 { 41 - u32 ret; 41 + int ret; 42 42 struct fw_logger_information fw_log; 43 43 u8 *buffer; 44 44 u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
+82 -4
drivers/net/wireless/ti/wlcore/spi.c
··· 30 30 #include <linux/spi/spi.h> 31 31 #include <linux/wl12xx.h> 32 32 #include <linux/platform_device.h> 33 + #include <linux/of_irq.h> 34 + #include <linux/regulator/consumer.h> 33 35 34 36 #include "wlcore.h" 35 37 #include "wl12xx_80211.h" ··· 83 81 struct wl12xx_spi_glue { 84 82 struct device *dev; 85 83 struct platform_device *core; 84 + struct regulator *reg; /* Power regulator */ 86 85 }; 87 86 88 87 static void wl12xx_spi_reset(struct device *child) ··· 321 318 return 0; 322 319 } 323 320 321 + /** 322 + * wl12xx_spi_set_power - power on/off the wl12xx unit 323 + * @child: wl12xx device handle. 324 + * @enable: true/false to power on/off the unit. 325 + * 326 + * use the WiFi enable regulator to enable/disable the WiFi unit. 327 + */ 328 + static int wl12xx_spi_set_power(struct device *child, bool enable) 329 + { 330 + int ret = 0; 331 + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); 332 + 333 + WARN_ON(!glue->reg); 334 + 335 + /* Update regulator state */ 336 + if (enable) { 337 + ret = regulator_enable(glue->reg); 338 + if (ret) 339 + dev_err(child, "Power enable failure\n"); 340 + } else { 341 + ret = regulator_disable(glue->reg); 342 + if (ret) 343 + dev_err(child, "Power disable failure\n"); 344 + } 345 + 346 + return ret; 347 + } 348 + 324 349 static struct wl1271_if_operations spi_ops = { 325 350 .read = wl12xx_spi_raw_read, 326 351 .write = wl12xx_spi_raw_write, 327 352 .reset = wl12xx_spi_reset, 328 353 .init = wl12xx_spi_init, 354 + .power = wl12xx_spi_set_power, 329 355 .set_block_size = NULL, 330 356 }; 357 + 358 + static const struct of_device_id wlcore_spi_of_match_table[] = { 359 + { .compatible = "ti,wl1271" }, 360 + { } 361 + }; 362 + MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table); 363 + 364 + /** 365 + * wlcore_probe_of - DT node parsing. 366 + * @spi: SPI slave device parameters. 367 + * @res: resource parameters. 368 + * @glue: wl12xx SPI bus to slave device glue parameters. 369 + * @pdev_data: wlcore device parameters 370 + */ 371 + static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, 372 + struct wlcore_platdev_data *pdev_data) 373 + { 374 + struct device_node *dt_node = spi->dev.of_node; 375 + int ret; 376 + 377 + if (of_find_property(dt_node, "clock-xtal", NULL)) 378 + pdev_data->ref_clock_xtal = true; 379 + 380 + ret = of_property_read_u32(dt_node, "ref-clock-frequency", 381 + &pdev_data->ref_clock_freq); 382 + if (IS_ERR_VALUE(ret)) { 383 + dev_err(glue->dev, 384 + "can't get reference clock frequency (%d)\n", ret); 385 + return ret; 386 + } 387 + 388 + return 0; 389 + } 331 390 332 391 static int wl1271_probe(struct spi_device *spi) 333 392 { ··· 399 334 int ret; 400 335 401 336 memset(&pdev_data, 0x00, sizeof(pdev_data)); 402 - 403 - /* TODO: add DT parsing when needed */ 404 337 405 338 pdev_data.if_ops = &spi_ops; 406 339 ··· 415 352 /* This is the only SPI value that we need to set here, the rest 416 353 * comes from the board-peripherals file */ 417 354 spi->bits_per_word = 32; 355 + 356 + glue->reg = devm_regulator_get(&spi->dev, "vwlan"); 357 + if (PTR_ERR(glue->reg) == -EPROBE_DEFER) 358 + return -EPROBE_DEFER; 359 + if (IS_ERR(glue->reg)) { 360 + dev_err(glue->dev, "can't get regulator\n"); 361 + return PTR_ERR(glue->reg); 362 + } 363 + 364 + ret = wlcore_probe_of(spi, glue, &pdev_data); 365 + if (IS_ERR_VALUE(ret)) { 366 + dev_err(glue->dev, 367 + "can't get device tree parameters (%d)\n", ret); 368 + return ret; 369 + } 418 370 419 371 ret = spi_setup(spi); 420 372 if (ret < 0) { ··· 448 370 memset(res, 0x00, sizeof(res)); 449 371 450 372 res[0].start = spi->irq; 451 - res[0].flags = IORESOURCE_IRQ; 373 + res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq); 452 374 res[0].name = "irq"; 453 375 454 376 ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); ··· 486 408 return 0; 487 409 } 488 410 489 - 490 411 static struct spi_driver wl1271_spi_driver = { 491 412 .driver = { 492 413 .name = "wl1271_spi", 414 + .of_match_table = of_match_ptr(wlcore_spi_of_match_table), 493 415 }, 494 416 495 417 .probe = wl1271_probe,
+3
include/linux/bcma/bcma.h
··· 151 151 #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ 152 152 #define BCMA_CORE_USB30_DEV 0x83D 153 153 #define BCMA_CORE_ARM_CR4 0x83E 154 + #define BCMA_CORE_GCI 0x840 155 + #define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */ 154 156 #define BCMA_CORE_ARM_CA7 0x847 155 157 #define BCMA_CORE_SYS_MEM 0x849 156 158 #define BCMA_CORE_DEFAULT 0xFFF ··· 201 199 #define BCMA_PKG_ID_BCM4707 1 202 200 #define BCMA_PKG_ID_BCM4708 2 203 201 #define BCMA_PKG_ID_BCM4709 0 202 + #define BCMA_CHIP_ID_BCM47094 53030 204 203 #define BCMA_CHIP_ID_BCM53018 53018 205 204 206 205 /* Board types (on PCI usually equals to the subsystem dev id) */
+25 -6
include/linux/bcma/bcma_driver_chipcommon.h
··· 217 217 #define BCMA_CC_CLKDIV_JTAG_SHIFT 8 218 218 #define BCMA_CC_CLKDIV_UART 0x000000FF 219 219 #define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ 220 + #define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001 221 + #define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002 222 + #define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004 223 + #define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ 224 + #define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040 220 225 #define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ 221 226 #define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ 222 227 #define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ ··· 356 351 #define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ 357 352 #define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ 358 353 #define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ 359 - #define BCMA_CC_CHIPCTL_ADDR 0x0650 360 - #define BCMA_CC_CHIPCTL_DATA 0x0654 361 - #define BCMA_CC_REGCTL_ADDR 0x0658 362 - #define BCMA_CC_REGCTL_DATA 0x065C 363 - #define BCMA_CC_PLLCTL_ADDR 0x0660 364 - #define BCMA_CC_PLLCTL_DATA 0x0664 354 + #define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650 355 + #define BCMA_CC_PMU_CHIPCTL_DATA 0x0654 356 + #define BCMA_CC_PMU_REGCTL_ADDR 0x0658 357 + #define BCMA_CC_PMU_REGCTL_DATA 0x065C 358 + #define BCMA_CC_PMU_PLLCTL_ADDR 0x0660 359 + #define BCMA_CC_PMU_PLLCTL_DATA 0x0664 365 360 #define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ 366 361 #define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ 367 362 #define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF ··· 571 566 * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) 572 567 */ 573 568 struct bcma_chipcommon_pmu { 569 + struct bcma_device *core; /* Can be separated core or just ChipCommon one */ 574 570 u8 rev; /* PMU revision */ 575 571 u32 crystalfreq; /* The active crystal frequency (in kHz) */ 576 572 }; ··· 665 659 bcma_cc_write32(cc, offset, bcma_cc_read32(cc, offset) | (set)) 666 660 #define bcma_cc_maskset32(cc, offset, mask, set) \ 667 661 bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) 662 + 663 + /* PMU registers access */ 664 + #define bcma_pmu_read32(cc, offset) \ 665 + bcma_read32((cc)->pmu.core, offset) 666 + #define bcma_pmu_write32(cc, offset, val) \ 667 + bcma_write32((cc)->pmu.core, offset, val) 668 + 669 + #define bcma_pmu_mask32(cc, offset, mask) \ 670 + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask)) 671 + #define bcma_pmu_set32(cc, offset, set) \ 672 + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set)) 673 + #define bcma_pmu_maskset32(cc, offset, mask, set) \ 674 + bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set)) 668 675 669 676 extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); 670 677