Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-2020-08-04' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.9

Second set of patches for v5.9. mt76 has most of patches this time.
Otherwise it's just smaller fixes and cleanups to other drivers.

There was a major conflict in mt76 driver between wireless-drivers and
wireless-drivers-next. I solved that by merging the former to the
latter.

Major changes:

rtw88

* add support for ieee80211_ops::change_interface

* add support for enabling and disabling beacon

* add debugfs file for testing h2c

mt76

* ARP filter offload for 7663

* runtime power management for 7663

* testmode support for mfg calibration

* support for more channels
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+5599 -1281
+1 -1
Documentation/networking/device_drivers/wifi/intel/ipw2100.rst
··· 78 78 solution intended to be used for purposes other than development, please 79 79 obtain a tested driver from Intel Customer Support at: 80 80 81 - http://www.intel.com/support/wireless/sb/CS-006408.htm 81 + https://www.intel.com/support/wireless/sb/CS-006408.htm 82 82 83 83 1. Introduction 84 84 ===============
+11 -12
drivers/bcma/driver_gpio.c
··· 122 122 static int bcma_gpio_irq_init(struct bcma_drv_cc *cc) 123 123 { 124 124 struct gpio_chip *chip = &cc->gpio; 125 + struct gpio_irq_chip *girq = &chip->irq; 125 126 int hwirq, err; 126 127 127 128 if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC) ··· 137 136 bcma_chipco_gpio_intmask(cc, ~0, 0); 138 137 bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO); 139 138 140 - err = gpiochip_irqchip_add(chip, 141 - &bcma_gpio_irq_chip, 142 - 0, 143 - handle_simple_irq, 144 - IRQ_TYPE_NONE); 145 - if (err) { 146 - free_irq(hwirq, cc); 147 - return err; 148 - } 139 + girq->chip = &bcma_gpio_irq_chip; 140 + /* This will let us handle the parent IRQ in the driver */ 141 + girq->parent_handler = NULL; 142 + girq->num_parents = 0; 143 + girq->parents = NULL; 144 + girq->default_type = IRQ_TYPE_NONE; 145 + girq->handler = handle_simple_irq; 149 146 150 147 return 0; 151 148 } ··· 211 212 else 212 213 chip->base = -1; 213 214 214 - err = gpiochip_add_data(chip, cc); 215 + err = bcma_gpio_irq_init(cc); 215 216 if (err) 216 217 return err; 217 218 218 - err = bcma_gpio_irq_init(cc); 219 + err = gpiochip_add_data(chip, cc); 219 220 if (err) { 220 - gpiochip_remove(chip); 221 + bcma_gpio_irq_exit(cc); 221 222 return err; 222 223 } 223 224
+3 -5
drivers/bcma/scan.c
··· 219 219 static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr, 220 220 u32 type, u8 port) 221 221 { 222 - u32 addrl, addrh, sizeh = 0; 222 + u32 addrl; 223 223 u32 size; 224 224 225 225 u32 ent = bcma_erom_get_ent(bus, eromptr); ··· 233 233 234 234 addrl = ent & SCAN_ADDR_ADDR; 235 235 if (ent & SCAN_ADDR_AG32) 236 - addrh = bcma_erom_get_ent(bus, eromptr); 237 - else 238 - addrh = 0; 236 + bcma_erom_get_ent(bus, eromptr); 239 237 240 238 if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) { 241 239 size = bcma_erom_get_ent(bus, eromptr); 242 240 if (size & SCAN_SIZE_SG32) 243 - sizeh = bcma_erom_get_ent(bus, eromptr); 241 + bcma_erom_get_ent(bus, eromptr); 244 242 } 245 243 246 244 return addrl;
+7 -7
drivers/net/wireless/broadcom/b43/main.c
··· 734 734 } 735 735 736 736 /* DummyTransmission function, as documented on 737 - * http://bcm-v4.sipsolutions.net/802.11/DummyTransmission 737 + * https://bcm-v4.sipsolutions.net/802.11/DummyTransmission 738 738 */ 739 739 void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on) 740 740 { ··· 1198 1198 } 1199 1199 } 1200 1200 1201 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */ 1201 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/BmacCorePllReset */ 1202 1202 void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev) 1203 1203 { 1204 1204 struct bcma_drv_cc *bcma_cc __maybe_unused; ··· 2290 2290 return -EPROTO; 2291 2291 } 2292 2292 2293 - /* http://bcm-v4.sipsolutions.net/802.11/Init/Firmware */ 2293 + /* https://bcm-v4.sipsolutions.net/802.11/Init/Firmware */ 2294 2294 static int b43_try_request_fw(struct b43_request_fw_context *ctx) 2295 2295 { 2296 2296 struct b43_wldev *dev = ctx->dev; ··· 2843 2843 } 2844 2844 2845 2845 /* Initialize the GPIOs 2846 - * http://bcm-specs.sipsolutions.net/GPIO 2846 + * https://bcm-specs.sipsolutions.net/GPIO 2847 2847 */ 2848 2848 2849 2849 #ifdef CONFIG_B43_SSB ··· 2971 2971 } 2972 2972 } 2973 2973 2974 - /* http://bcm-specs.sipsolutions.net/SuspendMAC */ 2974 + /* https://bcm-specs.sipsolutions.net/SuspendMAC */ 2975 2975 void b43_mac_suspend(struct b43_wldev *dev) 2976 2976 { 2977 2977 int i; ··· 3004 3004 dev->mac_suspended++; 3005 3005 } 3006 3006 3007 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */ 3007 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MacPhyClkSet */ 3008 3008 void b43_mac_phy_clock_set(struct b43_wldev *dev, bool on) 3009 3009 { 3010 3010 u32 tmp; ··· 3231 3231 } 3232 3232 3233 3233 /* Initialize the chip 3234 - * http://bcm-specs.sipsolutions.net/ChipInit 3234 + * https://bcm-specs.sipsolutions.net/ChipInit 3235 3235 */ 3236 3236 static int b43_chip_init(struct b43_wldev *dev) 3237 3237 {
+1 -1
drivers/net/wireless/broadcom/b43/phy_common.c
··· 559 559 return dev->phy.chandef->width == NL80211_CHAN_WIDTH_40; 560 560 } 561 561 562 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */ 562 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */ 563 563 void b43_phy_force_clock(struct b43_wldev *dev, bool force) 564 564 { 565 565 u32 tmp;
+6 -6
drivers/net/wireless/broadcom/b43/phy_g.c
··· 357 357 b43_dummy_transmission(dev, false, true); 358 358 } 359 359 360 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 360 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 361 361 static void b43_nrssi_hw_write(struct b43_wldev *dev, u16 offset, s16 val) 362 362 { 363 363 b43_phy_write(dev, B43_PHY_NRSSILT_CTRL, offset); 364 364 b43_phy_write(dev, B43_PHY_NRSSILT_DATA, (u16) val); 365 365 } 366 366 367 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 367 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 368 368 static s16 b43_nrssi_hw_read(struct b43_wldev *dev, u16 offset) 369 369 { 370 370 u16 val; ··· 375 375 return (s16) val; 376 376 } 377 377 378 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 378 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 379 379 static void b43_nrssi_hw_update(struct b43_wldev *dev, u16 val) 380 380 { 381 381 u16 i; ··· 389 389 } 390 390 } 391 391 392 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 392 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 393 393 static void b43_nrssi_mem_update(struct b43_wldev *dev) 394 394 { 395 395 struct b43_phy_g *gphy = dev->phy.g; ··· 1575 1575 b43_write16(dev, 0x03E4, (b43_read16(dev, 0x03E4) & 0xFFC0) | 0x0004); 1576 1576 } 1577 1577 1578 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */ 1578 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/Init/B6 */ 1579 1579 static void b43_phy_initb6(struct b43_wldev *dev) 1580 1580 { 1581 1581 struct b43_phy *phy = &dev->phy; ··· 2746 2746 return 0; 2747 2747 } 2748 2748 2749 - /* http://bcm-specs.sipsolutions.net/EstimatePowerOut 2749 + /* https://bcm-specs.sipsolutions.net/EstimatePowerOut 2750 2750 * This function converts a TSSI value to dBm in Q5.2 2751 2751 */ 2752 2752 static s8 b43_gphy_estimate_power_out(struct b43_wldev *dev, s8 tssi)
+1 -1
drivers/net/wireless/broadcom/b43/phy_ht.c
··· 1018 1018 phy->ht = NULL; 1019 1019 } 1020 1020 1021 - /* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ 1021 + /* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ 1022 1022 static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev, 1023 1023 bool blocked) 1024 1024 {
+1 -1
drivers/net/wireless/broadcom/b43/phy_lp.c
··· 70 70 dev->phy.lp = NULL; 71 71 } 72 72 73 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */ 73 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */ 74 74 static void lpphy_read_band_sprom(struct b43_wldev *dev) 75 75 { 76 76 struct ssb_sprom *sprom = dev->dev->bus_sprom;
+75 -75
drivers/net/wireless/broadcom/b43/phy_n.c
··· 98 98 (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)); 99 99 } 100 100 101 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ 101 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */ 102 102 static u8 b43_nphy_get_rx_core_state(struct b43_wldev *dev) 103 103 { 104 104 return (b43_phy_read(dev, B43_NPHY_RFSEQCA) & B43_NPHY_RFSEQCA_RXEN) >> ··· 109 109 * RF (just without b43_nphy_rf_ctl_intc_override) 110 110 **************************************************/ 111 111 112 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ 112 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */ 113 113 static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, 114 114 enum b43_nphy_rf_sequence seq) 115 115 { ··· 146 146 /* TODO */ 147 147 } 148 148 149 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */ 149 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverrideRev7 */ 150 150 static void b43_nphy_rf_ctl_override_rev7(struct b43_wldev *dev, u16 field, 151 151 u16 value, u8 core, bool off, 152 152 u8 override) ··· 193 193 } 194 194 } 195 195 196 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */ 196 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverideOneToMany */ 197 197 static void b43_nphy_rf_ctl_override_one_to_many(struct b43_wldev *dev, 198 198 enum n_rf_ctl_over_cmd cmd, 199 199 u16 value, u8 core, bool off) ··· 237 237 } 238 238 } 239 239 240 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ 240 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */ 241 241 static void b43_nphy_rf_ctl_override(struct b43_wldev *dev, u16 field, 242 242 u16 value, u8 core, bool off) 243 243 { ··· 382 382 } 383 383 } 384 384 385 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ 385 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */ 386 386 static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev, 387 387 enum n_intc_override intc_override, 388 388 u16 value, u8 core) ··· 490 490 * Various PHY ops 491 491 **************************************************/ 492 492 493 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ 493 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ 494 494 static void b43_nphy_write_clip_detection(struct b43_wldev *dev, 495 495 const u16 *clip_st) 496 496 { ··· 498 498 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); 499 499 } 500 500 501 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ 501 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ 502 502 static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) 503 503 { 504 504 clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES); 505 505 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); 506 506 } 507 507 508 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ 508 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ 509 509 static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) 510 510 { 511 511 u16 tmp; ··· 526 526 return tmp; 527 527 } 528 528 529 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ 529 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */ 530 530 static void b43_nphy_reset_cca(struct b43_wldev *dev) 531 531 { 532 532 u16 bbcfg; ··· 540 540 b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX); 541 541 } 542 542 543 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ 543 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */ 544 544 static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable) 545 545 { 546 546 struct b43_phy *phy = &dev->phy; ··· 564 564 } 565 565 } 566 566 567 - /* http://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */ 567 + /* https://bcm-v4.sipsolutions.net/PHY/N/Read_Lpf_Bw_Ctl */ 568 568 static u16 b43_nphy_read_lpf_ctl(struct b43_wldev *dev, u16 offset) 569 569 { 570 570 if (!offset) ··· 572 572 return b43_ntab_read(dev, B43_NTAB16(7, offset)) & 0x7; 573 573 } 574 574 575 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ 575 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */ 576 576 static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev) 577 577 { 578 578 struct b43_phy_n *nphy = dev->phy.n; ··· 628 628 b43_nphy_stay_in_carrier_search(dev, 0); 629 629 } 630 630 631 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ 631 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */ 632 632 static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, 633 633 u8 *events, u8 *delays, u8 length) 634 634 { ··· 805 805 } 806 806 807 807 /* Calibrate resistors in LPF of PLL? 808 - * http://bcm-v4.sipsolutions.net/PHY/radio205x_rcal 808 + * https://bcm-v4.sipsolutions.net/PHY/radio205x_rcal 809 809 */ 810 810 static u8 b43_radio_2057_rcal(struct b43_wldev *dev) 811 811 { ··· 919 919 } 920 920 921 921 /* Calibrate the internal RC oscillator? 922 - * http://bcm-v4.sipsolutions.net/PHY/radio2057_rccal 922 + * https://bcm-v4.sipsolutions.net/PHY/radio2057_rccal 923 923 */ 924 924 static u16 b43_radio_2057_rccal(struct b43_wldev *dev) 925 925 { ··· 1030 1030 b43_radio_mask(dev, R2057_RFPLL_MASTER, ~0x8); 1031 1031 } 1032 1032 1033 - /* http://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ 1033 + /* https://bcm-v4.sipsolutions.net/802.11/Radio/2057/Init */ 1034 1034 static void b43_radio_2057_init(struct b43_wldev *dev) 1035 1035 { 1036 1036 b43_radio_2057_init_pre(dev); ··· 1117 1117 e->radio_tx1_mixg_boost_tune); 1118 1118 } 1119 1119 1120 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ 1120 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */ 1121 1121 static void b43_radio_2056_setup(struct b43_wldev *dev, 1122 1122 const struct b43_nphy_channeltab_entry_rev3 *e) 1123 1123 { ··· 1356 1356 1357 1357 /* 1358 1358 * Initialize a Broadcom 2056 N-radio 1359 - * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init 1359 + * https://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init 1360 1360 */ 1361 1361 static void b43_radio_init2056(struct b43_wldev *dev) 1362 1362 { ··· 1406 1406 b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); 1407 1407 } 1408 1408 1409 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ 1409 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */ 1410 1410 static void b43_radio_2055_setup(struct b43_wldev *dev, 1411 1411 const struct b43_nphy_channeltab_entry_rev2 *e) 1412 1412 { ··· 1480 1480 1481 1481 /* 1482 1482 * Initialize a Broadcom 2055 N-radio 1483 - * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init 1483 + * https://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init 1484 1484 */ 1485 1485 static void b43_radio_init2055(struct b43_wldev *dev) 1486 1486 { ··· 1499 1499 * Samples 1500 1500 **************************************************/ 1501 1501 1502 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ 1502 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ 1503 1503 static int b43_nphy_load_samples(struct b43_wldev *dev, 1504 1504 struct cordic_iq *samples, u16 len) { 1505 1505 struct b43_phy_n *nphy = dev->phy.n; ··· 1526 1526 return 0; 1527 1527 } 1528 1528 1529 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ 1529 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */ 1530 1530 static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, 1531 1531 bool test) 1532 1532 { ··· 1569 1569 return (i < 0) ? 0 : len; 1570 1570 } 1571 1571 1572 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ 1572 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */ 1573 1573 static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops, 1574 1574 u16 wait, bool iqmode, bool dac_test, 1575 1575 bool modify_bbmult) ··· 1650 1650 * RSSI 1651 1651 **************************************************/ 1652 1652 1653 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ 1653 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ 1654 1654 static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, 1655 1655 s8 offset, u8 core, 1656 1656 enum n_rail_type rail, ··· 1895 1895 } 1896 1896 } 1897 1897 1898 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ 1898 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */ 1899 1899 static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, 1900 1900 enum n_rssi_type type) 1901 1901 { ··· 1907 1907 b43_nphy_rev2_rssi_select(dev, code, type); 1908 1908 } 1909 1909 1910 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ 1910 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */ 1911 1911 static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, 1912 1912 enum n_rssi_type rssi_type, u8 *buf) 1913 1913 { ··· 1936 1936 } 1937 1937 } 1938 1938 1939 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ 1939 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */ 1940 1940 static int b43_nphy_poll_rssi(struct b43_wldev *dev, enum n_rssi_type rssi_type, 1941 1941 s32 *buf, u8 nsamp) 1942 1942 { ··· 2025 2025 return out; 2026 2026 } 2027 2027 2028 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ 2028 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ 2029 2029 static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev) 2030 2030 { 2031 2031 struct b43_phy *phy = &dev->phy; ··· 2287 2287 b43_nphy_write_clip_detection(dev, clip_state); 2288 2288 } 2289 2289 2290 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ 2290 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */ 2291 2291 static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type) 2292 2292 { 2293 2293 int i, j, vcm; ··· 2453 2453 2454 2454 /* 2455 2455 * RSSI Calibration 2456 - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal 2456 + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal 2457 2457 */ 2458 2458 static void b43_nphy_rssi_cal(struct b43_wldev *dev) 2459 2459 { ··· 2680 2680 b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4); 2681 2681 } 2682 2682 2683 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 2683 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 2684 2684 static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev) 2685 2685 { 2686 2686 if (dev->phy.rev >= 19) ··· 3433 3433 B43_NPHY_FINERX2_CGC_DECGC); 3434 3434 } 3435 3435 3436 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ 3436 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */ 3437 3437 static void b43_nphy_workarounds(struct b43_wldev *dev) 3438 3438 { 3439 3439 struct b43_phy *phy = &dev->phy; ··· 3468 3468 3469 3469 /* 3470 3470 * Transmits a known value for LO calibration 3471 - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone 3471 + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone 3472 3472 */ 3473 3473 static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val, 3474 3474 bool iqmode, bool dac_test, bool modify_bbmult) ··· 3481 3481 return 0; 3482 3482 } 3483 3483 3484 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ 3484 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */ 3485 3485 static void b43_nphy_update_txrx_chain(struct b43_wldev *dev) 3486 3486 { 3487 3487 struct b43_phy_n *nphy = dev->phy.n; ··· 3509 3509 ~B43_NPHY_RFSEQMODE_CAOVER); 3510 3510 } 3511 3511 3512 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ 3512 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */ 3513 3513 static void b43_nphy_stop_playback(struct b43_wldev *dev) 3514 3514 { 3515 3515 struct b43_phy *phy = &dev->phy; ··· 3546 3546 b43_nphy_stay_in_carrier_search(dev, 0); 3547 3547 } 3548 3548 3549 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ 3549 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */ 3550 3550 static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core, 3551 3551 struct nphy_txgains target, 3552 3552 struct nphy_iqcal_params *params) ··· 3595 3595 * Tx and Rx 3596 3596 **************************************************/ 3597 3597 3598 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ 3598 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */ 3599 3599 static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable) 3600 3600 { 3601 3601 struct b43_phy *phy = &dev->phy; ··· 3732 3732 b43_nphy_stay_in_carrier_search(dev, 0); 3733 3733 } 3734 3734 3735 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ 3735 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */ 3736 3736 static void b43_nphy_tx_power_fix(struct b43_wldev *dev) 3737 3737 { 3738 3738 struct b43_phy *phy = &dev->phy; ··· 3926 3926 /* 3927 3927 * Stop radio and transmit known signal. Then check received signal strength to 3928 3928 * get TSSI (Transmit Signal Strength Indicator). 3929 - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi 3929 + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi 3930 3930 */ 3931 3931 static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev) 3932 3932 { ··· 3978 3978 nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF; 3979 3979 } 3980 3980 3981 - /* http://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */ 3981 + /* https://bcm-v4.sipsolutions.net/PHY/N/TxPwrLimitToTbl */ 3982 3982 static void b43_nphy_tx_prepare_adjusted_power_table(struct b43_wldev *dev) 3983 3983 { 3984 3984 struct b43_phy_n *nphy = dev->phy.n; ··· 4039 4039 } 4040 4040 } 4041 4041 4042 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */ 4042 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlSetup */ 4043 4043 static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev) 4044 4044 { 4045 4045 struct b43_phy *phy = &dev->phy; ··· 4272 4272 } 4273 4273 } 4274 4274 4275 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ 4275 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */ 4276 4276 static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable) 4277 4277 { 4278 4278 struct b43_phy_n *nphy = dev->phy.n; ··· 4310 4310 4311 4311 /* 4312 4312 * TX low-pass filter bandwidth setup 4313 - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw 4313 + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw 4314 4314 */ 4315 4315 static void b43_nphy_tx_lpf_bw(struct b43_wldev *dev) 4316 4316 { ··· 4333 4333 } 4334 4334 } 4335 4335 4336 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ 4336 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */ 4337 4337 static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est, 4338 4338 u16 samps, u8 time, bool wait) 4339 4339 { ··· 4372 4372 memset(est, 0, sizeof(*est)); 4373 4373 } 4374 4374 4375 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ 4375 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */ 4376 4376 static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write, 4377 4377 struct b43_phy_n_iq_comp *pcomp) 4378 4378 { ··· 4391 4391 4392 4392 #if 0 4393 4393 /* Ready but not used anywhere */ 4394 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ 4394 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ 4395 4395 static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) 4396 4396 { 4397 4397 u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; ··· 4414 4414 b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]); 4415 4415 } 4416 4416 4417 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ 4417 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */ 4418 4418 static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core) 4419 4419 { 4420 4420 u8 rxval, txval; ··· 4476 4476 } 4477 4477 #endif 4478 4478 4479 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ 4479 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ 4480 4480 static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) 4481 4481 { 4482 4482 int i; ··· 4574 4574 b43_nphy_rx_iq_coeffs(dev, true, &new); 4575 4575 } 4576 4576 4577 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ 4577 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */ 4578 4578 static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev) 4579 4579 { 4580 4580 u16 array[4]; ··· 4586 4586 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]); 4587 4587 } 4588 4588 4589 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ 4589 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */ 4590 4590 static void b43_nphy_spur_workaround(struct b43_wldev *dev) 4591 4591 { 4592 4592 struct b43_phy_n *nphy = dev->phy.n; ··· 4645 4645 b43_nphy_stay_in_carrier_search(dev, 0); 4646 4646 } 4647 4647 4648 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ 4648 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */ 4649 4649 static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev) 4650 4650 { 4651 4651 struct b43_phy_n *nphy = dev->phy.n; ··· 4713 4713 4714 4714 /* 4715 4715 * Restore RSSI Calibration 4716 - * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal 4716 + * https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal 4717 4717 */ 4718 4718 static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev) 4719 4719 { ··· 4822 4822 } 4823 4823 } 4824 4824 4825 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ 4825 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */ 4826 4826 static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev) 4827 4827 { 4828 4828 struct b43_phy *phy = &dev->phy; ··· 4921 4921 } 4922 4922 } 4923 4923 4924 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ 4924 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */ 4925 4925 static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core) 4926 4926 { 4927 4927 struct b43_phy_n *nphy = dev->phy.n; ··· 4955 4955 b43_phy_write(dev, offset, filter[i]); 4956 4956 } 4957 4957 4958 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */ 4958 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */ 4959 4959 static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev) 4960 4960 { 4961 4961 b43_nphy_pa_set_tx_dig_filter(dev, 0x2C5, 4962 4962 tbl_tx_filter_coef_rev4[2]); 4963 4963 } 4964 4964 4965 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */ 4965 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */ 4966 4966 static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev) 4967 4967 { 4968 4968 /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */ ··· 5002 5002 } 5003 5003 } 5004 5004 5005 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */ 5005 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */ 5006 5006 static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev) 5007 5007 { 5008 5008 struct b43_phy_n *nphy = dev->phy.n; ··· 5077 5077 return target; 5078 5078 } 5079 5079 5080 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */ 5080 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */ 5081 5081 static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev) 5082 5082 { 5083 5083 u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs; ··· 5106 5106 } 5107 5107 } 5108 5108 5109 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */ 5109 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */ 5110 5110 static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev) 5111 5111 { 5112 5112 struct b43_phy *phy = &dev->phy; ··· 5207 5207 } 5208 5208 } 5209 5209 5210 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */ 5210 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */ 5211 5211 static void b43_nphy_save_cal(struct b43_wldev *dev) 5212 5212 { 5213 5213 struct b43_phy *phy = &dev->phy; ··· 5278 5278 b43_nphy_stay_in_carrier_search(dev, 0); 5279 5279 } 5280 5280 5281 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */ 5281 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */ 5282 5282 static void b43_nphy_restore_cal(struct b43_wldev *dev) 5283 5283 { 5284 5284 struct b43_phy *phy = &dev->phy; ··· 5366 5366 b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs); 5367 5367 } 5368 5368 5369 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */ 5369 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */ 5370 5370 static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev, 5371 5371 struct nphy_txgains target, 5372 5372 bool full, bool mphase) ··· 5599 5599 return error; 5600 5600 } 5601 5601 5602 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */ 5602 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */ 5603 5603 static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev) 5604 5604 { 5605 5605 struct b43_phy_n *nphy = dev->phy.n; ··· 5634 5634 } 5635 5635 } 5636 5636 5637 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */ 5637 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */ 5638 5638 static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev, 5639 5639 struct nphy_txgains target, u8 type, bool debug) 5640 5640 { ··· 5821 5821 return -1; 5822 5822 } 5823 5823 5824 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */ 5824 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */ 5825 5825 static int b43_nphy_cal_rx_iq(struct b43_wldev *dev, 5826 5826 struct nphy_txgains target, u8 type, bool debug) 5827 5827 { ··· 5834 5834 return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug); 5835 5835 } 5836 5836 5837 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */ 5837 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreSetState */ 5838 5838 static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask) 5839 5839 { 5840 5840 struct b43_phy *phy = &dev->phy; ··· 5939 5939 * N-PHY init 5940 5940 **************************************************/ 5941 5941 5942 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ 5942 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */ 5943 5943 static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble) 5944 5944 { 5945 5945 u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG); ··· 5953 5953 b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg); 5954 5954 } 5955 5955 5956 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ 5956 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */ 5957 5957 static void b43_nphy_bphy_init(struct b43_wldev *dev) 5958 5958 { 5959 5959 unsigned int i; ··· 5972 5972 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); 5973 5973 } 5974 5974 5975 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ 5975 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */ 5976 5976 static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init) 5977 5977 { 5978 5978 if (dev->phy.rev >= 7) ··· 6246 6246 b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); 6247 6247 } 6248 6248 6249 - /* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ 6249 + /* https://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */ 6250 6250 static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid) 6251 6251 { 6252 6252 switch (dev->dev->bus_type) { ··· 6265 6265 } 6266 6266 } 6267 6267 6268 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ 6268 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */ 6269 6269 static void b43_nphy_channel_setup(struct b43_wldev *dev, 6270 6270 const struct b43_phy_n_sfo_cfg *e, 6271 6271 struct ieee80211_channel *new_channel) ··· 6372 6372 b43_nphy_spur_workaround(dev); 6373 6373 } 6374 6374 6375 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */ 6375 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */ 6376 6376 static int b43_nphy_set_channel(struct b43_wldev *dev, 6377 6377 struct ieee80211_channel *channel, 6378 6378 enum nl80211_channel_type channel_type) ··· 6589 6589 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); 6590 6590 } 6591 6591 6592 - /* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ 6592 + /* https://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ 6593 6593 static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, 6594 6594 bool blocked) 6595 6595 { ··· 6643 6643 } 6644 6644 } 6645 6645 6646 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ 6646 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ 6647 6647 static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) 6648 6648 { 6649 6649 struct b43_phy *phy = &dev->phy;
+1 -1
drivers/net/wireless/broadcom/b43/radio_2056.c
··· 3072 3072 .phy_regs.phy_bw5 = r4, \ 3073 3073 .phy_regs.phy_bw6 = r5 3074 3074 3075 - /* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */ 3075 + /* https://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */ 3076 3076 static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_phy_rev3[] = { 3077 3077 { .freq = 4920, 3078 3078 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
+2 -2
drivers/net/wireless/broadcom/b43/tables_nphy.c
··· 3620 3620 ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); 3621 3621 } 3622 3622 3623 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */ 3623 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables */ 3624 3624 void b43_nphy_tables_init(struct b43_wldev *dev) 3625 3625 { 3626 3626 if (dev->phy.rev >= 16) ··· 3633 3633 b43_nphy_tables_init_rev0(dev); 3634 3634 } 3635 3635 3636 - /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ 3636 + /* https://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */ 3637 3637 static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) 3638 3638 { 3639 3639 struct b43_phy *phy = &dev->phy;
+4 -4
drivers/net/wireless/broadcom/b43legacy/main.c
··· 591 591 } 592 592 593 593 /* DummyTransmission function, as documented on 594 - * http://bcm-specs.sipsolutions.net/DummyTransmission 594 + * https://bcm-specs.sipsolutions.net/DummyTransmission 595 595 */ 596 596 void b43legacy_dummy_transmission(struct b43legacy_wldev *dev) 597 597 { ··· 1870 1870 } 1871 1871 1872 1872 /* Initialize the GPIOs 1873 - * http://bcm-specs.sipsolutions.net/GPIO 1873 + * https://bcm-specs.sipsolutions.net/GPIO 1874 1874 */ 1875 1875 static int b43legacy_gpio_init(struct b43legacy_wldev *dev) 1876 1876 { ··· 1960 1960 } 1961 1961 } 1962 1962 1963 - /* http://bcm-specs.sipsolutions.net/SuspendMAC */ 1963 + /* https://bcm-specs.sipsolutions.net/SuspendMAC */ 1964 1964 void b43legacy_mac_suspend(struct b43legacy_wldev *dev) 1965 1965 { 1966 1966 int i; ··· 2141 2141 } 2142 2142 2143 2143 /* Initialize the chip 2144 - * http://bcm-specs.sipsolutions.net/ChipInit 2144 + * https://bcm-specs.sipsolutions.net/ChipInit 2145 2145 */ 2146 2146 static int b43legacy_chip_init(struct b43legacy_wldev *dev) 2147 2147 {
+4 -4
drivers/net/wireless/broadcom/b43legacy/phy.c
··· 129 129 } 130 130 131 131 /* initialize B PHY power control 132 - * as described in http://bcm-specs.sipsolutions.net/InitPowerControl 132 + * as described in https://bcm-specs.sipsolutions.net/InitPowerControl 133 133 */ 134 134 static void b43legacy_phy_init_pctl(struct b43legacy_wldev *dev) 135 135 { ··· 1461 1461 b43legacy_phy_write(dev, 0x0060, value); 1462 1462 } 1463 1463 1464 - /* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ 1464 + /* https://bcm-specs.sipsolutions.net/LocalOscillator/Measure */ 1465 1465 void b43legacy_phy_lo_g_measure(struct b43legacy_wldev *dev) 1466 1466 { 1467 1467 static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 }; ··· 1721 1721 } 1722 1722 } 1723 1723 1724 - /* http://bcm-specs.sipsolutions.net/EstimatePowerOut 1724 + /* https://bcm-specs.sipsolutions.net/EstimatePowerOut 1725 1725 * This function converts a TSSI value to dBm in Q5.2 1726 1726 */ 1727 1727 static s8 b43legacy_phy_estimate_power_out(struct b43legacy_wldev *dev, s8 tssi) ··· 1747 1747 return dbm; 1748 1748 } 1749 1749 1750 - /* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ 1750 + /* https://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */ 1751 1751 void b43legacy_phy_xmitpower(struct b43legacy_wldev *dev) 1752 1752 { 1753 1753 struct b43legacy_phy *phy = &dev->phy;
+4 -4
drivers/net/wireless/broadcom/b43legacy/radio.c
··· 313 313 return ret[channel - 1]; 314 314 } 315 315 316 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 316 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 317 317 void b43legacy_nrssi_hw_write(struct b43legacy_wldev *dev, u16 offset, s16 val) 318 318 { 319 319 b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_CTRL, offset); 320 320 b43legacy_phy_write(dev, B43legacy_PHY_NRSSILT_DATA, (u16)val); 321 321 } 322 322 323 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 323 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 324 324 s16 b43legacy_nrssi_hw_read(struct b43legacy_wldev *dev, u16 offset) 325 325 { 326 326 u16 val; ··· 331 331 return (s16)val; 332 332 } 333 333 334 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 334 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 335 335 void b43legacy_nrssi_hw_update(struct b43legacy_wldev *dev, u16 val) 336 336 { 337 337 u16 i; ··· 345 345 } 346 346 } 347 347 348 - /* http://bcm-specs.sipsolutions.net/NRSSILookupTable */ 348 + /* https://bcm-specs.sipsolutions.net/NRSSILookupTable */ 349 349 void b43legacy_nrssi_mem_update(struct b43legacy_wldev *dev) 350 350 { 351 351 struct b43legacy_phy *phy = &dev->phy;
+8
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 84 84 85 85 #define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000) 86 86 87 + #define BRCMF_PS_MAX_TIMEOUT_MS 2000 88 + 87 89 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \ 88 90 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16)) 89 91 ··· 2944 2942 else 2945 2943 bphy_err(drvr, "error (%d)\n", err); 2946 2944 } 2945 + 2946 + err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret", 2947 + min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS)); 2948 + if (err) 2949 + bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err); 2950 + 2947 2951 done: 2948 2952 brcmf_dbg(TRACE, "Exit\n"); 2949 2953 return err;
+19 -20
drivers/net/wireless/cisco/airo.c
··· 74 74 75 75 static int airo_pci_probe(struct pci_dev *, const struct pci_device_id *); 76 76 static void airo_pci_remove(struct pci_dev *); 77 - static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state); 78 - static int airo_pci_resume(struct pci_dev *pdev); 77 + static int __maybe_unused airo_pci_suspend(struct device *dev); 78 + static int __maybe_unused airo_pci_resume(struct device *dev); 79 + 80 + static SIMPLE_DEV_PM_OPS(airo_pci_pm_ops, 81 + airo_pci_suspend, 82 + airo_pci_resume); 79 83 80 84 static struct pci_driver airo_driver = { 81 - .name = DRV_NAME, 82 - .id_table = card_ids, 83 - .probe = airo_pci_probe, 84 - .remove = airo_pci_remove, 85 - .suspend = airo_pci_suspend, 86 - .resume = airo_pci_resume, 85 + .name = DRV_NAME, 86 + .id_table = card_ids, 87 + .probe = airo_pci_probe, 88 + .remove = airo_pci_remove, 89 + .driver.pm = &airo_pci_pm_ops, 87 90 }; 88 91 #endif /* CONFIG_PCI */ 89 92 ··· 5576 5573 pci_disable_device(pdev); 5577 5574 } 5578 5575 5579 - static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state) 5576 + static int __maybe_unused airo_pci_suspend(struct device *dev_d) 5580 5577 { 5581 - struct net_device *dev = pci_get_drvdata(pdev); 5578 + struct net_device *dev = dev_get_drvdata(dev_d); 5582 5579 struct airo_info *ai = dev->ml_priv; 5583 5580 Cmd cmd; 5584 5581 Resp rsp; ··· 5594 5591 return -EAGAIN; 5595 5592 disable_MAC(ai, 0); 5596 5593 netif_device_detach(dev); 5597 - ai->power = state; 5594 + ai->power = PMSG_SUSPEND; 5598 5595 cmd.cmd = HOSTSLEEP; 5599 5596 issuecommand(ai, &cmd, &rsp); 5600 5597 5601 - pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 5602 - pci_save_state(pdev); 5603 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5598 + device_wakeup_enable(dev_d); 5604 5599 return 0; 5605 5600 } 5606 5601 5607 - static int airo_pci_resume(struct pci_dev *pdev) 5602 + static int __maybe_unused airo_pci_resume(struct device *dev_d) 5608 5603 { 5609 - struct net_device *dev = pci_get_drvdata(pdev); 5604 + struct net_device *dev = dev_get_drvdata(dev_d); 5610 5605 struct airo_info *ai = dev->ml_priv; 5611 - pci_power_t prev_state = pdev->current_state; 5606 + pci_power_t prev_state = to_pci_dev(dev_d)->current_state; 5612 5607 5613 - pci_set_power_state(pdev, PCI_D0); 5614 - pci_restore_state(pdev); 5615 - pci_enable_wake(pdev, PCI_D0, 0); 5608 + device_wakeup_disable(dev_d); 5616 5609 5617 5610 if (prev_state != PCI_D1) { 5618 5611 reset_card(dev, 0);
+2 -2
drivers/net/wireless/intel/ipw2x00/Kconfig
··· 28 28 You will also very likely need the Wireless Tools in order to 29 29 configure your card: 30 30 31 - <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 31 + <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 32 32 33 33 It is recommended that you compile this driver as a module (M) 34 34 rather than built-in (Y). This driver requires firmware at device ··· 90 90 You will also very likely need the Wireless Tools in order to 91 91 configure your card: 92 92 93 - <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 93 + <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 94 94 95 95 It is recommended that you compile this driver as a module (M) 96 96 rather than built-in (Y). This driver requires firmware at device
+60 -63
drivers/net/wireless/intel/ipw2x00/ipw2100.c
··· 2295 2295 return -ENOMEM; 2296 2296 2297 2297 packet->rxp = (struct ipw2100_rx *)packet->skb->data; 2298 - packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data, 2298 + packet->dma_addr = dma_map_single(&priv->pci_dev->dev, 2299 + packet->skb->data, 2299 2300 sizeof(struct ipw2100_rx), 2300 - PCI_DMA_FROMDEVICE); 2301 - if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) { 2301 + DMA_FROM_DEVICE); 2302 + if (dma_mapping_error(&priv->pci_dev->dev, packet->dma_addr)) { 2302 2303 dev_kfree_skb(packet->skb); 2303 2304 return -ENOMEM; 2304 2305 } ··· 2480 2479 return; 2481 2480 } 2482 2481 2483 - pci_unmap_single(priv->pci_dev, 2484 - packet->dma_addr, 2485 - sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); 2482 + dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr, 2483 + sizeof(struct ipw2100_rx), DMA_FROM_DEVICE); 2486 2484 2487 2485 skb_put(packet->skb, status->frame_size); 2488 2486 ··· 2563 2563 return; 2564 2564 } 2565 2565 2566 - pci_unmap_single(priv->pci_dev, packet->dma_addr, 2567 - sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); 2566 + dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr, 2567 + sizeof(struct ipw2100_rx), DMA_FROM_DEVICE); 2568 2568 memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), 2569 2569 packet->skb->data, status->frame_size); 2570 2570 ··· 2689 2689 2690 2690 /* Sync the DMA for the RX buffer so CPU is sure to get 2691 2691 * the correct values */ 2692 - pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, 2693 - sizeof(struct ipw2100_rx), 2694 - PCI_DMA_FROMDEVICE); 2692 + dma_sync_single_for_cpu(&priv->pci_dev->dev, packet->dma_addr, 2693 + sizeof(struct ipw2100_rx), 2694 + DMA_FROM_DEVICE); 2695 2695 2696 2696 if (unlikely(ipw2100_corruption_check(priv, i))) { 2697 2697 ipw2100_corruption_detected(priv, i); ··· 2923 2923 (packet->index + 1 + i) % txq->entries, 2924 2924 tbd->host_addr, tbd->buf_length); 2925 2925 2926 - pci_unmap_single(priv->pci_dev, 2927 - tbd->host_addr, 2928 - tbd->buf_length, PCI_DMA_TODEVICE); 2926 + dma_unmap_single(&priv->pci_dev->dev, tbd->host_addr, 2927 + tbd->buf_length, DMA_TO_DEVICE); 2929 2928 } 2930 2929 2931 2930 libipw_txb_free(packet->info.d_struct.txb); ··· 3164 3165 tbd->buf_length = packet->info.d_struct.txb-> 3165 3166 fragments[i]->len - LIBIPW_3ADDR_LEN; 3166 3167 3167 - tbd->host_addr = pci_map_single(priv->pci_dev, 3168 + tbd->host_addr = dma_map_single(&priv->pci_dev->dev, 3168 3169 packet->info.d_struct. 3169 - txb->fragments[i]-> 3170 - data + 3170 + txb->fragments[i]->data + 3171 3171 LIBIPW_3ADDR_LEN, 3172 3172 tbd->buf_length, 3173 - PCI_DMA_TODEVICE); 3174 - if (pci_dma_mapping_error(priv->pci_dev, 3175 - tbd->host_addr)) { 3173 + DMA_TO_DEVICE); 3174 + if (dma_mapping_error(&priv->pci_dev->dev, tbd->host_addr)) { 3176 3175 IPW_DEBUG_TX("dma mapping error\n"); 3177 3176 break; 3178 3177 } ··· 3179 3182 txq->next, tbd->host_addr, 3180 3183 tbd->buf_length); 3181 3184 3182 - pci_dma_sync_single_for_device(priv->pci_dev, 3183 - tbd->host_addr, 3184 - tbd->buf_length, 3185 - PCI_DMA_TODEVICE); 3185 + dma_sync_single_for_device(&priv->pci_dev->dev, 3186 + tbd->host_addr, 3187 + tbd->buf_length, 3188 + DMA_TO_DEVICE); 3186 3189 3187 3190 txq->next++; 3188 3191 txq->next %= txq->entries; ··· 3437 3440 return -ENOMEM; 3438 3441 3439 3442 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { 3440 - v = pci_zalloc_consistent(priv->pci_dev, 3441 - sizeof(struct ipw2100_cmd_header), 3442 - &p); 3443 + v = dma_alloc_coherent(&priv->pci_dev->dev, 3444 + sizeof(struct ipw2100_cmd_header), &p, 3445 + GFP_KERNEL); 3443 3446 if (!v) { 3444 3447 printk(KERN_ERR DRV_NAME ": " 3445 3448 "%s: PCI alloc failed for msg " ··· 3458 3461 return 0; 3459 3462 3460 3463 for (j = 0; j < i; j++) { 3461 - pci_free_consistent(priv->pci_dev, 3462 - sizeof(struct ipw2100_cmd_header), 3463 - priv->msg_buffers[j].info.c_struct.cmd, 3464 - priv->msg_buffers[j].info.c_struct. 3465 - cmd_phys); 3464 + dma_free_coherent(&priv->pci_dev->dev, 3465 + sizeof(struct ipw2100_cmd_header), 3466 + priv->msg_buffers[j].info.c_struct.cmd, 3467 + priv->msg_buffers[j].info.c_struct.cmd_phys); 3466 3468 } 3467 3469 3468 3470 kfree(priv->msg_buffers); ··· 3492 3496 return; 3493 3497 3494 3498 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { 3495 - pci_free_consistent(priv->pci_dev, 3496 - sizeof(struct ipw2100_cmd_header), 3497 - priv->msg_buffers[i].info.c_struct.cmd, 3498 - priv->msg_buffers[i].info.c_struct. 3499 - cmd_phys); 3499 + dma_free_coherent(&priv->pci_dev->dev, 3500 + sizeof(struct ipw2100_cmd_header), 3501 + priv->msg_buffers[i].info.c_struct.cmd, 3502 + priv->msg_buffers[i].info.c_struct.cmd_phys); 3500 3503 } 3501 3504 3502 3505 kfree(priv->msg_buffers); ··· 4318 4323 IPW_DEBUG_INFO("enter\n"); 4319 4324 4320 4325 q->size = entries * sizeof(struct ipw2100_status); 4321 - q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); 4326 + q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic, 4327 + GFP_KERNEL); 4322 4328 if (!q->drv) { 4323 4329 IPW_DEBUG_WARNING("Can not allocate status queue.\n"); 4324 4330 return -ENOMEM; ··· 4335 4339 IPW_DEBUG_INFO("enter\n"); 4336 4340 4337 4341 if (priv->status_queue.drv) { 4338 - pci_free_consistent(priv->pci_dev, priv->status_queue.size, 4339 - priv->status_queue.drv, 4340 - priv->status_queue.nic); 4342 + dma_free_coherent(&priv->pci_dev->dev, 4343 + priv->status_queue.size, 4344 + priv->status_queue.drv, 4345 + priv->status_queue.nic); 4341 4346 priv->status_queue.drv = NULL; 4342 4347 } 4343 4348 ··· 4354 4357 4355 4358 q->entries = entries; 4356 4359 q->size = entries * sizeof(struct ipw2100_bd); 4357 - q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); 4360 + q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic, 4361 + GFP_KERNEL); 4358 4362 if (!q->drv) { 4359 4363 IPW_DEBUG_INFO 4360 4364 ("can't allocate shared memory for buffer descriptors\n"); ··· 4375 4377 return; 4376 4378 4377 4379 if (q->drv) { 4378 - pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic); 4380 + dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv, 4381 + q->nic); 4379 4382 q->drv = NULL; 4380 4383 } 4381 4384 ··· 4429 4430 4430 4431 priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH, 4431 4432 sizeof(struct ipw2100_tx_packet), 4432 - GFP_ATOMIC); 4433 + GFP_KERNEL); 4433 4434 if (!priv->tx_buffers) { 4434 4435 bd_queue_free(priv, &priv->tx_queue); 4435 4436 return -ENOMEM; 4436 4437 } 4437 4438 4438 4439 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { 4439 - v = pci_alloc_consistent(priv->pci_dev, 4440 - sizeof(struct ipw2100_data_header), 4441 - &p); 4440 + v = dma_alloc_coherent(&priv->pci_dev->dev, 4441 + sizeof(struct ipw2100_data_header), &p, 4442 + GFP_KERNEL); 4442 4443 if (!v) { 4443 4444 printk(KERN_ERR DRV_NAME 4444 4445 ": %s: PCI alloc failed for tx " "buffers.\n", ··· 4458 4459 return 0; 4459 4460 4460 4461 for (j = 0; j < i; j++) { 4461 - pci_free_consistent(priv->pci_dev, 4462 - sizeof(struct ipw2100_data_header), 4463 - priv->tx_buffers[j].info.d_struct.data, 4464 - priv->tx_buffers[j].info.d_struct. 4465 - data_phys); 4462 + dma_free_coherent(&priv->pci_dev->dev, 4463 + sizeof(struct ipw2100_data_header), 4464 + priv->tx_buffers[j].info.d_struct.data, 4465 + priv->tx_buffers[j].info.d_struct.data_phys); 4466 4466 } 4467 4467 4468 4468 kfree(priv->tx_buffers); ··· 4538 4540 priv->tx_buffers[i].info.d_struct.txb = NULL; 4539 4541 } 4540 4542 if (priv->tx_buffers[i].info.d_struct.data) 4541 - pci_free_consistent(priv->pci_dev, 4542 - sizeof(struct ipw2100_data_header), 4543 - priv->tx_buffers[i].info.d_struct. 4544 - data, 4545 - priv->tx_buffers[i].info.d_struct. 4546 - data_phys); 4543 + dma_free_coherent(&priv->pci_dev->dev, 4544 + sizeof(struct ipw2100_data_header), 4545 + priv->tx_buffers[i].info.d_struct.data, 4546 + priv->tx_buffers[i].info.d_struct.data_phys); 4547 4547 } 4548 4548 4549 4549 kfree(priv->tx_buffers); ··· 4604 4608 return 0; 4605 4609 4606 4610 for (j = 0; j < i; j++) { 4607 - pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr, 4611 + dma_unmap_single(&priv->pci_dev->dev, 4612 + priv->rx_buffers[j].dma_addr, 4608 4613 sizeof(struct ipw2100_rx_packet), 4609 - PCI_DMA_FROMDEVICE); 4614 + DMA_FROM_DEVICE); 4610 4615 dev_kfree_skb(priv->rx_buffers[j].skb); 4611 4616 } 4612 4617 ··· 4659 4662 4660 4663 for (i = 0; i < RX_QUEUE_LENGTH; i++) { 4661 4664 if (priv->rx_buffers[i].rxp) { 4662 - pci_unmap_single(priv->pci_dev, 4665 + dma_unmap_single(&priv->pci_dev->dev, 4663 4666 priv->rx_buffers[i].dma_addr, 4664 4667 sizeof(struct ipw2100_rx), 4665 - PCI_DMA_FROMDEVICE); 4668 + DMA_FROM_DEVICE); 4666 4669 dev_kfree_skb(priv->rx_buffers[i].skb); 4667 4670 } 4668 4671 } ··· 6193 6196 pci_set_master(pci_dev); 6194 6197 pci_set_drvdata(pci_dev, priv); 6195 6198 6196 - err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); 6199 + err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); 6197 6200 if (err) { 6198 6201 printk(KERN_WARNING DRV_NAME 6199 6202 "Error calling pci_set_dma_mask.\n");
+29 -27
drivers/net/wireless/intel/ipw2x00/ipw2200.c
··· 3442 3442 /* In the reset function, these buffers may have been allocated 3443 3443 * to an SKB, so we need to unmap and free potential storage */ 3444 3444 if (rxq->pool[i].skb != NULL) { 3445 - pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 3446 - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 3445 + dma_unmap_single(&priv->pci_dev->dev, 3446 + rxq->pool[i].dma_addr, 3447 + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); 3447 3448 dev_kfree_skb(rxq->pool[i].skb); 3448 3449 rxq->pool[i].skb = NULL; 3449 3450 } ··· 3775 3774 return -ENOMEM; 3776 3775 3777 3776 q->bd = 3778 - pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); 3777 + dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count, 3778 + &q->q.dma_addr, GFP_KERNEL); 3779 3779 if (!q->bd) { 3780 3780 IPW_ERROR("pci_alloc_consistent(%zd) failed\n", 3781 3781 sizeof(q->bd[0]) * count); ··· 3818 3816 3819 3817 /* unmap chunks if any */ 3820 3818 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { 3821 - pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), 3819 + dma_unmap_single(&dev->dev, 3820 + le32_to_cpu(bd->u.data.chunk_ptr[i]), 3822 3821 le16_to_cpu(bd->u.data.chunk_len[i]), 3823 - PCI_DMA_TODEVICE); 3822 + DMA_TO_DEVICE); 3824 3823 if (txq->txb[txq->q.last_used]) { 3825 3824 libipw_txb_free(txq->txb[txq->q.last_used]); 3826 3825 txq->txb[txq->q.last_used] = NULL; ··· 3853 3850 } 3854 3851 3855 3852 /* free buffers belonging to queue itself */ 3856 - pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, 3857 - q->dma_addr); 3853 + dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, 3854 + q->dma_addr); 3858 3855 kfree(txq->txb); 3859 3856 3860 3857 /* 0 fill whole structure */ ··· 5199 5196 list_del(element); 5200 5197 5201 5198 rxb->dma_addr = 5202 - pci_map_single(priv->pci_dev, rxb->skb->data, 5203 - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5199 + dma_map_single(&priv->pci_dev->dev, rxb->skb->data, 5200 + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); 5204 5201 5205 5202 list_add_tail(&rxb->list, &rxq->rx_free); 5206 5203 rxq->free_count++; ··· 5233 5230 5234 5231 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 5235 5232 if (rxq->pool[i].skb != NULL) { 5236 - pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, 5237 - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 5233 + dma_unmap_single(&priv->pci_dev->dev, 5234 + rxq->pool[i].dma_addr, 5235 + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); 5238 5236 dev_kfree_skb(rxq->pool[i].skb); 5239 5237 } 5240 5238 } ··· 8267 8263 } 8268 8264 priv->rxq->queue[i] = NULL; 8269 8265 8270 - pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 8271 - IPW_RX_BUF_SIZE, 8272 - PCI_DMA_FROMDEVICE); 8266 + dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr, 8267 + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); 8273 8268 8274 8269 pkt = (struct ipw_rx_packet *)rxb->skb->data; 8275 8270 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", ··· 8420 8417 rxb->skb = NULL; 8421 8418 } 8422 8419 8423 - pci_unmap_single(priv->pci_dev, rxb->dma_addr, 8424 - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 8420 + dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr, 8421 + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); 8425 8422 list_add_tail(&rxb->list, &priv->rxq->rx_used); 8426 8423 8427 8424 i = (i + 1) % RX_QUEUE_SIZE; ··· 10220 10217 txb->fragments[i]->len - hdr_len); 10221 10218 10222 10219 tfd->u.data.chunk_ptr[i] = 10223 - cpu_to_le32(pci_map_single 10224 - (priv->pci_dev, 10225 - txb->fragments[i]->data + hdr_len, 10226 - txb->fragments[i]->len - hdr_len, 10227 - PCI_DMA_TODEVICE)); 10220 + cpu_to_le32(dma_map_single(&priv->pci_dev->dev, 10221 + txb->fragments[i]->data + hdr_len, 10222 + txb->fragments[i]->len - hdr_len, 10223 + DMA_TO_DEVICE)); 10228 10224 tfd->u.data.chunk_len[i] = 10229 10225 cpu_to_le16(txb->fragments[i]->len - hdr_len); 10230 10226 } ··· 10253 10251 dev_kfree_skb_any(txb->fragments[i]); 10254 10252 txb->fragments[i] = skb; 10255 10253 tfd->u.data.chunk_ptr[i] = 10256 - cpu_to_le32(pci_map_single 10257 - (priv->pci_dev, skb->data, 10258 - remaining_bytes, 10259 - PCI_DMA_TODEVICE)); 10254 + cpu_to_le32(dma_map_single(&priv->pci_dev->dev, 10255 + skb->data, 10256 + remaining_bytes, 10257 + DMA_TO_DEVICE)); 10260 10258 10261 10259 le32_add_cpu(&tfd->u.data.num_chunks, 1); 10262 10260 } ··· 11622 11620 11623 11621 pci_set_master(pdev); 11624 11622 11625 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 11623 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 11626 11624 if (!err) 11627 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 11625 + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 11628 11626 if (err) { 11629 11627 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 11630 11628 goto out_pci_disable_device;
+1 -1
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
··· 480 480 if (!iwlwifi_mod_params.enable_ini) 481 481 return; 482 482 483 - res = request_firmware(&fw, "iwl-debug-yoyo.bin", dev); 483 + res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev); 484 484 if (res) 485 485 return; 486 486
+1 -1
drivers/net/wireless/intersil/Kconfig
··· 30 30 31 31 For more information refer to the p54 wiki: 32 32 33 - http://wireless.kernel.org/en/users/Drivers/p54 33 + http://wireless.wiki.kernel.org/en/users/Drivers/p54 34 34 35 35 Note: You need a motherboard with DMA support to use any of these cards 36 36
+3 -3
drivers/net/wireless/intersil/hostap/hostap_hw.c
··· 3366 3366 } 3367 3367 3368 3368 3369 - #if (defined(PRISM2_PCI) && defined(CONFIG_PM)) || defined(PRISM2_PCCARD) 3370 - static void prism2_suspend(struct net_device *dev) 3369 + #if defined(PRISM2_PCI) || defined(PRISM2_PCCARD) 3370 + static void __maybe_unused prism2_suspend(struct net_device *dev) 3371 3371 { 3372 3372 struct hostap_interface *iface; 3373 3373 struct local_info *local; ··· 3385 3385 /* Disable hardware and firmware */ 3386 3386 prism2_hw_shutdown(dev, 0); 3387 3387 } 3388 - #endif /* (PRISM2_PCI && CONFIG_PM) || PRISM2_PCCARD */ 3388 + #endif /* PRISM2_PCI || PRISM2_PCCARD */ 3389 3389 3390 3390 3391 3391 /* These might at some point be compiled separately and used as separate
+9 -23
drivers/net/wireless/intersil/hostap/hostap_pci.c
··· 403 403 pci_disable_device(pdev); 404 404 } 405 405 406 - 407 - #ifdef CONFIG_PM 408 - static int prism2_pci_suspend(struct pci_dev *pdev, pm_message_t state) 406 + static int __maybe_unused prism2_pci_suspend(struct device *dev_d) 409 407 { 410 - struct net_device *dev = pci_get_drvdata(pdev); 408 + struct net_device *dev = dev_get_drvdata(dev_d); 411 409 412 410 if (netif_running(dev)) { 413 411 netif_stop_queue(dev); 414 412 netif_device_detach(dev); 415 413 } 416 414 prism2_suspend(dev); 417 - pci_save_state(pdev); 418 - pci_disable_device(pdev); 419 - pci_set_power_state(pdev, PCI_D3hot); 420 415 421 416 return 0; 422 417 } 423 418 424 - static int prism2_pci_resume(struct pci_dev *pdev) 419 + static int __maybe_unused prism2_pci_resume(struct device *dev_d) 425 420 { 426 - struct net_device *dev = pci_get_drvdata(pdev); 427 - int err; 421 + struct net_device *dev = dev_get_drvdata(dev_d); 428 422 429 - err = pci_enable_device(pdev); 430 - if (err) { 431 - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 432 - dev->name); 433 - return err; 434 - } 435 - pci_restore_state(pdev); 436 423 prism2_hw_config(dev, 0); 437 424 if (netif_running(dev)) { 438 425 netif_device_attach(dev); ··· 428 441 429 442 return 0; 430 443 } 431 - #endif /* CONFIG_PM */ 432 - 433 444 434 445 MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); 446 + 447 + static SIMPLE_DEV_PM_OPS(prism2_pci_pm_ops, 448 + prism2_pci_suspend, 449 + prism2_pci_resume); 435 450 436 451 static struct pci_driver prism2_pci_driver = { 437 452 .name = "hostap_pci", 438 453 .id_table = prism2_pci_id_table, 439 454 .probe = prism2_pci_probe, 440 455 .remove = prism2_pci_remove, 441 - #ifdef CONFIG_PM 442 - .suspend = prism2_pci_suspend, 443 - .resume = prism2_pci_resume, 444 - #endif /* CONFIG_PM */ 456 + .driver.pm = &prism2_pci_pm_ops, 445 457 }; 446 458 447 459 module_pci_driver(prism2_pci_driver);
+2 -2
drivers/net/wireless/intersil/orinoco/Kconfig
··· 27 27 28 28 You will also very likely also need the Wireless Tools in order to 29 29 configure your card and that /etc/pcmcia/wireless.opts works : 30 - <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html> 30 + <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html> 31 31 32 32 config HERMES_PRISM 33 33 bool "Support Prism 2/2.5 chipset" ··· 120 120 121 121 You will very likely need the Wireless Tools in order to 122 122 configure your card and that /etc/pcmcia/wireless.opts works: 123 - <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 123 + <https://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 124 124 125 125 config PCMCIA_SPECTRUM 126 126 tristate "Symbol Spectrum24 Trilogy PCMCIA card support"
+3 -3
drivers/net/wireless/intersil/p54/Kconfig
··· 10 10 also need to be enabled in order to support any devices. 11 11 12 12 These devices require softmac firmware which can be found at 13 - <http://wireless.kernel.org/en/users/Drivers/p54> 13 + <http://wireless.wiki.kernel.org/en/users/Drivers/p54> 14 14 15 15 If you choose to build a module, it'll be called p54common. 16 16 ··· 22 22 This driver is for USB isl38xx based wireless cards. 23 23 24 24 These devices require softmac firmware which can be found at 25 - <http://wireless.kernel.org/en/users/Drivers/p54> 25 + <http://wireless.wiki.kernel.org/en/users/Drivers/p54> 26 26 27 27 If you choose to build a module, it'll be called p54usb. 28 28 ··· 36 36 supported by the fullmac driver/firmware. 37 37 38 38 This driver requires softmac firmware which can be found at 39 - <http://wireless.kernel.org/en/users/Drivers/p54> 39 + <http://wireless.wiki.kernel.org/en/users/Drivers/p54> 40 40 41 41 If you choose to build a module, it'll be called p54pci. 42 42
+1 -1
drivers/net/wireless/intersil/p54/fwio.c
··· 132 132 if (priv->fw_var < 0x500) 133 133 wiphy_info(priv->hw->wiphy, 134 134 "you are using an obsolete firmware. " 135 - "visit http://wireless.kernel.org/en/users/Drivers/p54 " 135 + "visit http://wireless.wiki.kernel.org/en/users/Drivers/p54 " 136 136 "and grab one for \"kernel >= 2.6.28\"!\n"); 137 137 138 138 if (priv->fw_var >= 0x300) {
+35 -30
drivers/net/wireless/intersil/p54/p54pci.c
··· 153 153 if (!skb) 154 154 break; 155 155 156 - mapping = pci_map_single(priv->pdev, 156 + mapping = dma_map_single(&priv->pdev->dev, 157 157 skb_tail_pointer(skb), 158 158 priv->common.rx_mtu + 32, 159 - PCI_DMA_FROMDEVICE); 159 + DMA_FROM_DEVICE); 160 160 161 - if (pci_dma_mapping_error(priv->pdev, mapping)) { 161 + if (dma_mapping_error(&priv->pdev->dev, mapping)) { 162 162 dev_kfree_skb_any(skb); 163 163 dev_err(&priv->pdev->dev, 164 164 "RX DMA Mapping error\n"); ··· 215 215 len = priv->common.rx_mtu; 216 216 } 217 217 dma_addr = le32_to_cpu(desc->host_addr); 218 - pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, 219 - priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); 218 + dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr, 219 + priv->common.rx_mtu + 32, 220 + DMA_FROM_DEVICE); 220 221 skb_put(skb, len); 221 222 222 223 if (p54_rx(dev, skb)) { 223 - pci_unmap_single(priv->pdev, dma_addr, 224 - priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); 224 + dma_unmap_single(&priv->pdev->dev, dma_addr, 225 + priv->common.rx_mtu + 32, 226 + DMA_FROM_DEVICE); 225 227 rx_buf[i] = NULL; 226 228 desc->host_addr = cpu_to_le32(0); 227 229 } else { 228 230 skb_trim(skb, 0); 229 - pci_dma_sync_single_for_device(priv->pdev, dma_addr, 230 - priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); 231 + dma_sync_single_for_device(&priv->pdev->dev, dma_addr, 232 + priv->common.rx_mtu + 32, 233 + DMA_FROM_DEVICE); 231 234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); 232 235 } 233 236 ··· 261 258 skb = tx_buf[i]; 262 259 tx_buf[i] = NULL; 263 260 264 - pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), 265 - le16_to_cpu(desc->len), PCI_DMA_TODEVICE); 261 + dma_unmap_single(&priv->pdev->dev, 262 + le32_to_cpu(desc->host_addr), 263 + le16_to_cpu(desc->len), DMA_TO_DEVICE); 266 264 267 265 desc->host_addr = 0; 268 266 desc->device_addr = 0; ··· 338 334 idx = le32_to_cpu(ring_control->host_idx[1]); 339 335 i = idx % ARRAY_SIZE(ring_control->tx_data); 340 336 341 - mapping = pci_map_single(priv->pdev, skb->data, skb->len, 342 - PCI_DMA_TODEVICE); 343 - if (pci_dma_mapping_error(priv->pdev, mapping)) { 337 + mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len, 338 + DMA_TO_DEVICE); 339 + if (dma_mapping_error(&priv->pdev->dev, mapping)) { 344 340 spin_unlock_irqrestore(&priv->lock, flags); 345 341 p54_free_skb(dev, skb); 346 342 dev_err(&priv->pdev->dev, "TX DMA mapping error\n"); ··· 382 378 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { 383 379 desc = &ring_control->rx_data[i]; 384 380 if (desc->host_addr) 385 - pci_unmap_single(priv->pdev, 381 + dma_unmap_single(&priv->pdev->dev, 386 382 le32_to_cpu(desc->host_addr), 387 383 priv->common.rx_mtu + 32, 388 - PCI_DMA_FROMDEVICE); 384 + DMA_FROM_DEVICE); 389 385 kfree_skb(priv->rx_buf_data[i]); 390 386 priv->rx_buf_data[i] = NULL; 391 387 } ··· 393 389 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) { 394 390 desc = &ring_control->rx_mgmt[i]; 395 391 if (desc->host_addr) 396 - pci_unmap_single(priv->pdev, 392 + dma_unmap_single(&priv->pdev->dev, 397 393 le32_to_cpu(desc->host_addr), 398 394 priv->common.rx_mtu + 32, 399 - PCI_DMA_FROMDEVICE); 395 + DMA_FROM_DEVICE); 400 396 kfree_skb(priv->rx_buf_mgmt[i]); 401 397 priv->rx_buf_mgmt[i] = NULL; 402 398 } ··· 404 400 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) { 405 401 desc = &ring_control->tx_data[i]; 406 402 if (desc->host_addr) 407 - pci_unmap_single(priv->pdev, 403 + dma_unmap_single(&priv->pdev->dev, 408 404 le32_to_cpu(desc->host_addr), 409 405 le16_to_cpu(desc->len), 410 - PCI_DMA_TODEVICE); 406 + DMA_TO_DEVICE); 411 407 412 408 p54_free_skb(dev, priv->tx_buf_data[i]); 413 409 priv->tx_buf_data[i] = NULL; ··· 416 412 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) { 417 413 desc = &ring_control->tx_mgmt[i]; 418 414 if (desc->host_addr) 419 - pci_unmap_single(priv->pdev, 415 + dma_unmap_single(&priv->pdev->dev, 420 416 le32_to_cpu(desc->host_addr), 421 417 le16_to_cpu(desc->len), 422 - PCI_DMA_TODEVICE); 418 + DMA_TO_DEVICE); 423 419 424 420 p54_free_skb(dev, priv->tx_buf_mgmt[i]); 425 421 priv->tx_buf_mgmt[i] = NULL; ··· 572 568 goto err_disable_dev; 573 569 } 574 570 575 - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 571 + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 576 572 if (!err) 577 - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 573 + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 578 574 if (err) { 579 575 dev_err(&pdev->dev, "No suitable DMA available\n"); 580 576 goto err_free_reg; ··· 607 603 goto err_free_dev; 608 604 } 609 605 610 - priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control), 611 - &priv->ring_control_dma); 606 + priv->ring_control = dma_alloc_coherent(&pdev->dev, 607 + sizeof(*priv->ring_control), 608 + &priv->ring_control_dma, GFP_KERNEL); 612 609 if (!priv->ring_control) { 613 610 dev_err(&pdev->dev, "Cannot allocate rings\n"); 614 611 err = -ENOMEM; ··· 628 623 if (!err) 629 624 return 0; 630 625 631 - pci_free_consistent(pdev, sizeof(*priv->ring_control), 632 - priv->ring_control, priv->ring_control_dma); 626 + dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control), 627 + priv->ring_control, priv->ring_control_dma); 633 628 634 629 err_iounmap: 635 630 iounmap(priv->map); ··· 658 653 wait_for_completion(&priv->fw_loaded); 659 654 p54_unregister_common(dev); 660 655 release_firmware(priv->firmware); 661 - pci_free_consistent(pdev, sizeof(*priv->ring_control), 662 - priv->ring_control, priv->ring_control_dma); 656 + dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control), 657 + priv->ring_control, priv->ring_control_dma); 663 658 iounmap(priv->map); 664 659 pci_release_regions(pdev); 665 660 pci_disable_device(pdev);
+1 -1
drivers/net/wireless/intersil/p54/p54usb.c
··· 36 36 * Note: 37 37 * 38 38 * Always update our wiki's device list (located at: 39 - * http://wireless.kernel.org/en/users/Drivers/p54/devices ), 39 + * http://wireless.wiki.kernel.org/en/users/Drivers/p54/devices ), 40 40 * whenever you add a new device. 41 41 */ 42 42
+1 -1
drivers/net/wireless/intersil/prism54/isl_oid.h
··· 143 143 * together with a CSMA contention. Without this all frames are 144 144 * sent with a CSMA contention. 145 145 * Bibliography: 146 - * http://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html 146 + * https://www.hpl.hp.com/personal/Jean_Tourrilhes/Papers/Packet.Frame.Grouping.html 147 147 */ 148 148 enum dot11_maxframeburst_t { 149 149 /* Values for DOT11_OID_MAXFRAMEBURST */
+14 -16
drivers/net/wireless/intersil/prism54/islpci_dev.c
··· 636 636 */ 637 637 638 638 /* perform the allocation */ 639 - priv->driver_mem_address = pci_alloc_consistent(priv->pdev, 640 - HOST_MEM_BLOCK, 641 - &priv-> 642 - device_host_address); 639 + priv->driver_mem_address = dma_alloc_coherent(&priv->pdev->dev, 640 + HOST_MEM_BLOCK, 641 + &priv->device_host_address, 642 + GFP_KERNEL); 643 643 644 644 if (!priv->driver_mem_address) { 645 645 /* error allocating the block of PCI memory */ ··· 692 692 693 693 /* map the allocated skb data area to pci */ 694 694 priv->pci_map_rx_address[counter] = 695 - pci_map_single(priv->pdev, (void *) skb->data, 696 - MAX_FRAGMENT_SIZE_RX + 2, 697 - PCI_DMA_FROMDEVICE); 698 - if (pci_dma_mapping_error(priv->pdev, 699 - priv->pci_map_rx_address[counter])) { 695 + dma_map_single(&priv->pdev->dev, (void *)skb->data, 696 + MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); 697 + if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[counter])) { 700 698 priv->pci_map_rx_address[counter] = 0; 701 699 /* error mapping the buffer to device 702 700 accessible memory address */ ··· 725 727 726 728 /* free consistent DMA area... */ 727 729 if (priv->driver_mem_address) 728 - pci_free_consistent(priv->pdev, HOST_MEM_BLOCK, 729 - priv->driver_mem_address, 730 - priv->device_host_address); 730 + dma_free_coherent(&priv->pdev->dev, HOST_MEM_BLOCK, 731 + priv->driver_mem_address, 732 + priv->device_host_address); 731 733 732 734 /* clear some dangling pointers */ 733 735 priv->driver_mem_address = NULL; ··· 739 741 for (counter = 0; counter < ISL38XX_CB_MGMT_QSIZE; counter++) { 740 742 struct islpci_membuf *buf = &priv->mgmt_rx[counter]; 741 743 if (buf->pci_addr) 742 - pci_unmap_single(priv->pdev, buf->pci_addr, 743 - buf->size, PCI_DMA_FROMDEVICE); 744 + dma_unmap_single(&priv->pdev->dev, buf->pci_addr, 745 + buf->size, DMA_FROM_DEVICE); 744 746 buf->pci_addr = 0; 745 747 kfree(buf->mem); 746 748 buf->size = 0; ··· 750 752 /* clean up data rx buffers */ 751 753 for (counter = 0; counter < ISL38XX_CB_RX_QSIZE; counter++) { 752 754 if (priv->pci_map_rx_address[counter]) 753 - pci_unmap_single(priv->pdev, 755 + dma_unmap_single(&priv->pdev->dev, 754 756 priv->pci_map_rx_address[counter], 755 757 MAX_FRAGMENT_SIZE_RX + 2, 756 - PCI_DMA_FROMDEVICE); 758 + DMA_FROM_DEVICE); 757 759 priv->pci_map_rx_address[counter] = 0; 758 760 759 761 if (priv->data_low_rx[counter])
+10 -14
drivers/net/wireless/intersil/prism54/islpci_eth.c
··· 50 50 skb, skb->data, skb->len, skb->truesize); 51 51 #endif 52 52 53 - pci_unmap_single(priv->pdev, 53 + dma_unmap_single(&priv->pdev->dev, 54 54 priv->pci_map_tx_address[index], 55 - skb->len, PCI_DMA_TODEVICE); 55 + skb->len, DMA_TO_DEVICE); 56 56 dev_kfree_skb_irq(skb); 57 57 skb = NULL; 58 58 } ··· 176 176 #endif 177 177 178 178 /* map the skb buffer to pci memory for DMA operation */ 179 - pci_map_address = pci_map_single(priv->pdev, 180 - (void *) skb->data, skb->len, 181 - PCI_DMA_TODEVICE); 182 - if (pci_dma_mapping_error(priv->pdev, pci_map_address)) { 179 + pci_map_address = dma_map_single(&priv->pdev->dev, (void *)skb->data, 180 + skb->len, DMA_TO_DEVICE); 181 + if (dma_mapping_error(&priv->pdev->dev, pci_map_address)) { 183 182 printk(KERN_WARNING "%s: cannot map buffer to PCI\n", 184 183 ndev->name); 185 184 goto drop_free; ··· 322 323 #endif 323 324 324 325 /* delete the streaming DMA mapping before processing the skb */ 325 - pci_unmap_single(priv->pdev, 326 - priv->pci_map_rx_address[index], 327 - MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE); 326 + dma_unmap_single(&priv->pdev->dev, priv->pci_map_rx_address[index], 327 + MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); 328 328 329 329 /* update the skb structure and align the buffer */ 330 330 skb_put(skb, size); ··· 429 431 430 432 /* set the streaming DMA mapping for proper PCI bus operation */ 431 433 priv->pci_map_rx_address[index] = 432 - pci_map_single(priv->pdev, (void *) skb->data, 433 - MAX_FRAGMENT_SIZE_RX + 2, 434 - PCI_DMA_FROMDEVICE); 435 - if (pci_dma_mapping_error(priv->pdev, 436 - priv->pci_map_rx_address[index])) { 434 + dma_map_single(&priv->pdev->dev, (void *)skb->data, 435 + MAX_FRAGMENT_SIZE_RX + 2, DMA_FROM_DEVICE); 436 + if (dma_mapping_error(&priv->pdev->dev, priv->pci_map_rx_address[index])) { 437 437 /* error mapping the buffer to device accessible memory address */ 438 438 DEBUG(SHOW_ERROR_MESSAGES, 439 439 "Error mapping DMA address\n");
+14 -25
drivers/net/wireless/intersil/prism54/islpci_hotplug.c
··· 26 26 /* In this order: vendor, device, subvendor, subdevice, class, class_mask, 27 27 * driver_data 28 28 * If you have an update for this please contact prism54-devel@prism54.org 29 - * The latest list can be found at http://wireless.kernel.org/en/users/Drivers/p54 */ 29 + * The latest list can be found at http://wireless.wiki.kernel.org/en/users/Drivers/p54 30 + */ 30 31 static const struct pci_device_id prism54_id_tbl[] = { 31 32 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ 32 33 { ··· 64 63 65 64 static int prism54_probe(struct pci_dev *, const struct pci_device_id *); 66 65 static void prism54_remove(struct pci_dev *); 67 - static int prism54_suspend(struct pci_dev *, pm_message_t state); 68 - static int prism54_resume(struct pci_dev *); 66 + static int __maybe_unused prism54_suspend(struct device *); 67 + static int __maybe_unused prism54_resume(struct device *); 68 + 69 + static SIMPLE_DEV_PM_OPS(prism54_pm_ops, prism54_suspend, prism54_resume); 69 70 70 71 static struct pci_driver prism54_driver = { 71 72 .name = DRV_NAME, 72 73 .id_table = prism54_id_tbl, 73 74 .probe = prism54_probe, 74 75 .remove = prism54_remove, 75 - .suspend = prism54_suspend, 76 - .resume = prism54_resume, 76 + .driver.pm = &prism54_pm_ops, 77 77 }; 78 78 79 79 /****************************************************************************** ··· 108 106 } 109 107 110 108 /* enable PCI DMA */ 111 - if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 109 + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) { 112 110 printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); 113 111 goto do_pci_disable_device; 114 112 } ··· 245 243 pci_disable_device(pdev); 246 244 } 247 245 248 - static int 249 - prism54_suspend(struct pci_dev *pdev, pm_message_t state) 246 + static int __maybe_unused 247 + prism54_suspend(struct device *dev) 250 248 { 251 - struct net_device *ndev = pci_get_drvdata(pdev); 249 + struct net_device *ndev = dev_get_drvdata(dev); 252 250 islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; 253 251 BUG_ON(!priv); 254 - 255 - 256 - pci_save_state(pdev); 257 252 258 253 /* tell the device not to trigger interrupts for now... */ 259 254 isl38xx_disable_interrupts(priv->device_base); ··· 265 266 return 0; 266 267 } 267 268 268 - static int 269 - prism54_resume(struct pci_dev *pdev) 269 + static int __maybe_unused 270 + prism54_resume(struct device *dev) 270 271 { 271 - struct net_device *ndev = pci_get_drvdata(pdev); 272 + struct net_device *ndev = dev_get_drvdata(dev); 272 273 islpci_private *priv = ndev ? netdev_priv(ndev) : NULL; 273 - int err; 274 274 275 275 BUG_ON(!priv); 276 276 277 277 printk(KERN_NOTICE "%s: got resume request\n", ndev->name); 278 - 279 - err = pci_enable_device(pdev); 280 - if (err) { 281 - printk(KERN_ERR "%s: pci_enable_device failed on resume\n", 282 - ndev->name); 283 - return err; 284 - } 285 - 286 - pci_restore_state(pdev); 287 278 288 279 /* alright let's go into the PREBOOT state */ 289 280 islpci_reset(priv, 1);
+11 -10
drivers/net/wireless/intersil/prism54/islpci_mgt.c
··· 115 115 buf->size = MGMT_FRAME_SIZE; 116 116 } 117 117 if (buf->pci_addr == 0) { 118 - buf->pci_addr = pci_map_single(priv->pdev, buf->mem, 118 + buf->pci_addr = dma_map_single(&priv->pdev->dev, 119 + buf->mem, 119 120 MGMT_FRAME_SIZE, 120 - PCI_DMA_FROMDEVICE); 121 - if (pci_dma_mapping_error(priv->pdev, buf->pci_addr)) { 121 + DMA_FROM_DEVICE); 122 + if (dma_mapping_error(&priv->pdev->dev, buf->pci_addr)) { 122 123 printk(KERN_WARNING 123 124 "Failed to make memory DMA'able.\n"); 124 125 return -ENOMEM; ··· 204 203 #endif 205 204 206 205 err = -ENOMEM; 207 - buf.pci_addr = pci_map_single(priv->pdev, buf.mem, frag_len, 208 - PCI_DMA_TODEVICE); 209 - if (pci_dma_mapping_error(priv->pdev, buf.pci_addr)) { 206 + buf.pci_addr = dma_map_single(&priv->pdev->dev, buf.mem, frag_len, 207 + DMA_TO_DEVICE); 208 + if (dma_mapping_error(&priv->pdev->dev, buf.pci_addr)) { 210 209 printk(KERN_WARNING "%s: cannot map PCI memory for mgmt\n", 211 210 ndev->name); 212 211 goto error_free; ··· 303 302 } 304 303 305 304 /* Ensure the results of device DMA are visible to the CPU. */ 306 - pci_dma_sync_single_for_cpu(priv->pdev, buf->pci_addr, 307 - buf->size, PCI_DMA_FROMDEVICE); 305 + dma_sync_single_for_cpu(&priv->pdev->dev, buf->pci_addr, 306 + buf->size, DMA_FROM_DEVICE); 308 307 309 308 /* Perform endianess conversion for PIMFOR header in-place. */ 310 309 header = pimfor_decode_header(buf->mem, frag_len); ··· 415 414 for (; priv->index_mgmt_tx < curr_frag; priv->index_mgmt_tx++) { 416 415 int index = priv->index_mgmt_tx % ISL38XX_CB_MGMT_QSIZE; 417 416 struct islpci_membuf *buf = &priv->mgmt_tx[index]; 418 - pci_unmap_single(priv->pdev, buf->pci_addr, buf->size, 419 - PCI_DMA_TODEVICE); 417 + dma_unmap_single(&priv->pdev->dev, buf->pci_addr, buf->size, 418 + DMA_TO_DEVICE); 420 419 buf->pci_addr = 0; 421 420 kfree(buf->mem); 422 421 buf->mem = NULL;
+1 -1
drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
··· 398 398 new_node->rx_reorder_ptr = kcalloc(win_size, sizeof(void *), 399 399 GFP_KERNEL); 400 400 if (!new_node->rx_reorder_ptr) { 401 - kfree((u8 *) new_node); 401 + kfree(new_node); 402 402 mwifiex_dbg(priv->adapter, ERROR, 403 403 "%s: failed to alloc reorder_ptr\n", __func__); 404 404 return;
+4
drivers/net/wireless/mediatek/mt76/Kconfig
··· 12 12 tristate 13 13 depends on MT76_CORE 14 14 15 + config MT76_SDIO 16 + tristate 17 + depends on MT76_CORE 18 + 15 19 config MT76x02_LIB 16 20 tristate 17 21 select MT76_CORE
+3
drivers/net/wireless/mediatek/mt76/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 obj-$(CONFIG_MT76_CORE) += mt76.o 3 3 obj-$(CONFIG_MT76_USB) += mt76-usb.o 4 + obj-$(CONFIG_MT76_SDIO) += mt76-sdio.o 4 5 obj-$(CONFIG_MT76x02_LIB) += mt76x02-lib.o 5 6 obj-$(CONFIG_MT76x02_USB) += mt76x02-usb.o 6 7 ··· 10 9 tx.o agg-rx.o mcu.o 11 10 12 11 mt76-$(CONFIG_PCI) += pci.o 12 + mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o 13 13 14 14 mt76-usb-y := usb.o usb_trace.o 15 + mt76-sdio-y := sdio.o 15 16 16 17 CFLAGS_trace.o := -I$(src) 17 18 CFLAGS_usb_trace.o := -I$(src)
+2 -5
drivers/net/wireless/mediatek/mt76/debugfs.c
··· 9 9 { 10 10 struct mt76_dev *dev = data; 11 11 12 - dev->bus->wr(dev, dev->debugfs_reg, val); 12 + __mt76_wr(dev, dev->debugfs_reg, val); 13 13 return 0; 14 14 } 15 15 ··· 18 18 { 19 19 struct mt76_dev *dev = data; 20 20 21 - *val = dev->bus->rr(dev, dev->debugfs_reg); 21 + *val = __mt76_rr(dev, dev->debugfs_reg); 22 22 return 0; 23 23 } 24 24 ··· 53 53 54 54 mt76_for_each_q_rx(dev, i) { 55 55 struct mt76_queue *q = &dev->q_rx[i]; 56 - 57 - if (!q->ndesc) 58 - continue; 59 56 60 57 queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued; 61 58 seq_printf(s, "%d: queued=%d head=%d tail=%d\n",
+6
drivers/net/wireless/mediatek/mt76/dma.c
··· 370 370 tx_info.buf[n].len, DMA_TO_DEVICE); 371 371 372 372 free: 373 + #ifdef CONFIG_NL80211_TESTMODE 374 + /* fix tx_done accounting on queue overflow */ 375 + if (tx_info.skb == dev->test.tx_skb) 376 + dev->test.tx_done--; 377 + #endif 378 + 373 379 e.skb = tx_info.skb; 374 380 e.txwi = t; 375 381 dev->drv->tx_complete_skb(dev, qid, &e);
+5
drivers/net/wireless/mediatek/mt76/eeprom.c
··· 74 74 &data[i]); 75 75 } 76 76 77 + #ifdef CONFIG_NL80211_TESTMODE 78 + dev->test.mtd_name = devm_kstrdup(dev->dev, part, GFP_KERNEL); 79 + dev->test.mtd_offset = offset; 80 + #endif 81 + 77 82 out_put_node: 78 83 of_node_put(np); 79 84 return ret;
+32 -5
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 58 58 CHAN5G(132, 5660), 59 59 CHAN5G(136, 5680), 60 60 CHAN5G(140, 5700), 61 + CHAN5G(144, 5720), 61 62 62 63 CHAN5G(149, 5745), 63 64 CHAN5G(153, 5765), 64 65 CHAN5G(157, 5785), 65 66 CHAN5G(161, 5805), 66 67 CHAN5G(165, 5825), 68 + CHAN5G(169, 5845), 69 + CHAN5G(173, 5865), 67 70 }; 68 71 69 72 static const struct ieee80211_tpt_blink mt76_tpt_blink[] = { ··· 282 279 283 280 wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; 284 281 wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH | 285 - WIPHY_FLAG_SUPPORTS_TDLS; 282 + WIPHY_FLAG_SUPPORTS_TDLS | 283 + WIPHY_FLAG_AP_UAPSD; 286 284 287 285 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 288 286 wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); ··· 293 289 wiphy->available_antennas_rx = dev->phy.antenna_mask; 294 290 295 291 hw->txq_data_size = sizeof(struct mt76_txq); 292 + hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; 296 293 297 294 if (!hw->max_tx_fragments) 298 295 hw->max_tx_fragments = 16; ··· 305 300 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 306 301 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 307 302 ieee80211_hw_set(hw, TX_AMSDU); 308 - ieee80211_hw_set(hw, TX_FRAG_LIST); 303 + 304 + /* TODO: avoid linearization for SDIO */ 305 + if (!mt76_is_sdio(dev)) 306 + ieee80211_hw_set(hw, TX_FRAG_LIST); 307 + 309 308 ieee80211_hw_set(hw, MFP_CAPABLE); 310 309 ieee80211_hw_set(hw, AP_LINK_PS); 311 310 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ··· 441 432 442 433 tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev); 443 434 435 + dev->wq = alloc_ordered_workqueue("mt76", 0); 436 + if (!dev->wq) { 437 + ieee80211_free_hw(hw); 438 + return NULL; 439 + } 440 + 444 441 return dev; 445 442 } 446 443 EXPORT_SYMBOL_GPL(mt76_alloc_device); ··· 500 485 501 486 void mt76_free_device(struct mt76_dev *dev) 502 487 { 503 - mt76_tx_free(dev); 488 + if (dev->wq) { 489 + destroy_workqueue(dev->wq); 490 + dev->wq = NULL; 491 + } 492 + if (mt76_is_mmio(dev)) 493 + mt76_tx_free(dev); 504 494 ieee80211_free_hw(dev->hw); 505 495 } 506 496 EXPORT_SYMBOL_GPL(mt76_free_device); ··· 520 500 return; 521 501 } 522 502 503 + #ifdef CONFIG_NL80211_TESTMODE 504 + if (dev->test.state == MT76_TM_STATE_RX_FRAMES) { 505 + dev->test.rx_stats.packets[q]++; 506 + if (status->flag & RX_FLAG_FAILED_FCS_CRC) 507 + dev->test.rx_stats.fcs_error[q]++; 508 + } 509 + #endif 523 510 __skb_queue_tail(&dev->rx_skb[q], skb); 524 511 } 525 512 EXPORT_SYMBOL_GPL(mt76_rx); ··· 564 537 return &msband->chan[idx]; 565 538 } 566 539 567 - static void 568 - mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 540 + void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time) 569 541 { 570 542 struct mt76_channel_state *state = phy->chan_state; 571 543 ··· 572 546 phy->survey_time)); 573 547 phy->survey_time = time; 574 548 } 549 + EXPORT_SYMBOL_GPL(mt76_update_survey_active_time); 575 550 576 551 void mt76_update_survey(struct mt76_dev *dev) 577 552 {
+112 -3
drivers/net/wireless/mediatek/mt76/mt76.h
··· 15 15 #include <linux/average.h> 16 16 #include <net/mac80211.h> 17 17 #include "util.h" 18 + #include "testmode.h" 18 19 19 20 #define MT_TX_RING_SIZE 256 20 21 #define MT_MCU_RING_SIZE 32 ··· 34 33 enum mt76_bus_type { 35 34 MT76_BUS_MMIO, 36 35 MT76_BUS_USB, 36 + MT76_BUS_SDIO, 37 37 }; 38 38 39 39 struct mt76_bus_ops { ··· 54 52 55 53 #define mt76_is_usb(dev) ((dev)->bus->type == MT76_BUS_USB) 56 54 #define mt76_is_mmio(dev) ((dev)->bus->type == MT76_BUS_MMIO) 55 + #define mt76_is_sdio(dev) ((dev)->bus->type == MT76_BUS_SDIO) 57 56 58 57 enum mt76_txq_id { 59 58 MT_TXQ_VO = IEEE80211_AC_VO, ··· 97 94 union { 98 95 struct mt76_txwi_cache *txwi; 99 96 struct urb *urb; 97 + int buf_sz; 100 98 }; 101 99 enum mt76_txq_id qid; 102 100 bool skip_buf0:1; ··· 150 146 int len, bool wait_resp); 151 147 int (*mcu_skb_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, 152 148 int cmd, bool wait_resp); 149 + u32 (*mcu_rr)(struct mt76_dev *dev, u32 offset); 150 + void (*mcu_wr)(struct mt76_dev *dev, u32 offset, u32 val); 153 151 int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, 154 152 const struct mt76_reg_pair *rp, int len); 155 153 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, ··· 296 290 MT76_STATE_POWER_OFF, 297 291 MT76_STATE_SUSPEND, 298 292 MT76_STATE_ROC, 293 + MT76_STATE_PM, 299 294 }; 300 295 301 296 struct mt76_hw_cap { ··· 429 422 u16 data_len; 430 423 431 424 struct tasklet_struct rx_tasklet; 432 - struct workqueue_struct *wq; 433 425 struct work_struct stat_work; 434 426 435 427 u8 out_ep[__MT_EP_OUT_MAX]; ··· 443 437 u32 base; 444 438 bool burst; 445 439 } mcu; 440 + }; 441 + 442 + struct mt76_sdio { 443 + struct task_struct *tx_kthread; 444 + struct task_struct *kthread; 445 + struct work_struct stat_work; 446 + 447 + unsigned long state; 448 + 449 + struct sdio_func *func; 450 + 451 + struct { 452 + struct mutex lock; 453 + int pse_data_quota; 454 + int ple_data_quota; 455 + int pse_mcu_quota; 456 + int deficit; 457 + } sched; 446 458 }; 447 459 448 460 struct mt76_mmio { ··· 499 475 s8 chain_signal[IEEE80211_MAX_CHAINS]; 500 476 }; 501 477 478 + struct mt76_testmode_ops { 479 + int (*set_state)(struct mt76_dev *dev, enum mt76_testmode_state state); 480 + int (*set_params)(struct mt76_dev *dev, struct nlattr **tb, 481 + enum mt76_testmode_state new_state); 482 + int (*dump_stats)(struct mt76_dev *dev, struct sk_buff *msg); 483 + }; 484 + 485 + struct mt76_testmode_data { 486 + enum mt76_testmode_state state; 487 + 488 + u32 param_set[DIV_ROUND_UP(NUM_MT76_TM_ATTRS, 32)]; 489 + struct sk_buff *tx_skb; 490 + 491 + u32 tx_count; 492 + u16 tx_msdu_len; 493 + 494 + u8 tx_rate_mode; 495 + u8 tx_rate_idx; 496 + u8 tx_rate_nss; 497 + u8 tx_rate_sgi; 498 + u8 tx_rate_ldpc; 499 + 500 + u8 tx_antenna_mask; 501 + 502 + u32 freq_offset; 503 + 504 + u8 tx_power[4]; 505 + u8 tx_power_control; 506 + 507 + const char *mtd_name; 508 + u32 mtd_offset; 509 + 510 + u32 tx_pending; 511 + u32 tx_queued; 512 + u32 tx_done; 513 + struct { 514 + u64 packets[__MT_RXQ_MAX]; 515 + u64 fcs_error[__MT_RXQ_MAX]; 516 + } rx_stats; 517 + }; 518 + 502 519 struct mt76_phy { 503 520 struct ieee80211_hw *hw; 504 521 struct mt76_dev *dev; ··· 555 490 556 491 struct mt76_sband sband_2g; 557 492 struct mt76_sband sband_5g; 493 + 494 + u32 vif_mask; 558 495 559 496 int txpower_cur; 560 497 u8 antenna_mask; ··· 639 572 640 573 u32 rxfilter; 641 574 575 + #ifdef CONFIG_NL80211_TESTMODE 576 + const struct mt76_testmode_ops *test_ops; 577 + struct mt76_testmode_data test; 578 + #endif 579 + 580 + struct workqueue_struct *wq; 581 + 642 582 union { 643 583 struct mt76_mmio mmio; 644 584 struct mt76_usb usb; 585 + struct mt76_sdio sdio; 645 586 }; 646 587 }; 647 588 ··· 880 805 return nss_delta[nss - 1]; 881 806 } 882 807 808 + static inline bool mt76_testmode_enabled(struct mt76_dev *dev) 809 + { 810 + #ifdef CONFIG_NL80211_TESTMODE 811 + return dev->test.state != MT76_TM_STATE_OFF; 812 + #else 813 + return false; 814 + #endif 815 + } 816 + 883 817 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); 884 818 void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta, 885 819 struct mt76_wcid *wcid, struct sk_buff *skb); ··· 908 824 bool mt76_has_tx_pending(struct mt76_phy *phy); 909 825 void mt76_set_channel(struct mt76_phy *phy); 910 826 void mt76_update_survey(struct mt76_dev *dev); 827 + void mt76_update_survey_active_time(struct mt76_phy *phy, ktime_t time); 911 828 int mt76_get_survey(struct ieee80211_hw *hw, int idx, 912 829 struct survey_info *survey); 913 830 void mt76_set_stream_caps(struct mt76_phy *phy, bool vht); ··· 962 877 const u8 *mac); 963 878 void mt76_sw_scan_complete(struct ieee80211_hw *hw, 964 879 struct ieee80211_vif *vif); 880 + int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 881 + void *data, int len); 882 + int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 883 + struct netlink_callback *cb, void *data, int len); 884 + int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state); 885 + 886 + static inline void mt76_testmode_reset(struct mt76_dev *dev, bool disable) 887 + { 888 + #ifdef CONFIG_NL80211_TESTMODE 889 + enum mt76_testmode_state state = MT76_TM_STATE_IDLE; 890 + 891 + if (disable || dev->test.state == MT76_TM_STATE_OFF) 892 + state = MT76_TM_STATE_OFF; 893 + 894 + mt76_testmode_set_state(dev, state); 895 + #endif 896 + } 897 + 965 898 966 899 /* internal */ 967 900 static inline struct ieee80211_hw * ··· 1004 901 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, 1005 902 struct napi_struct *napi); 1006 903 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames); 904 + void mt76_testmode_tx_pending(struct mt76_dev *dev); 1007 905 1008 906 /* usb */ 1009 907 static inline bool mt76u_urb_error(struct urb *urb) ··· 1039 935 return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 1040 936 } 1041 937 1042 - int mt76u_skb_dma_info(struct sk_buff *skb, u32 info); 938 + int mt76_skb_adjust_pad(struct sk_buff *skb); 1043 939 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, 1044 940 u8 req_type, u16 val, u16 offset, 1045 941 void *buf, size_t len); 1046 942 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 1047 943 const u16 offset, const u32 val); 1048 - void mt76u_deinit(struct mt76_dev *dev); 1049 944 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf, 1050 945 bool ext); 1051 946 int mt76u_alloc_mcu_queue(struct mt76_dev *dev); ··· 1053 950 void mt76u_stop_rx(struct mt76_dev *dev); 1054 951 int mt76u_resume_rx(struct mt76_dev *dev); 1055 952 void mt76u_queues_deinit(struct mt76_dev *dev); 953 + 954 + int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 955 + const struct mt76_bus_ops *bus_ops); 956 + int mt76s_alloc_queues(struct mt76_dev *dev); 957 + void mt76s_stop_txrx(struct mt76_dev *dev); 958 + void mt76s_deinit(struct mt76_dev *dev); 1056 959 1057 960 struct sk_buff * 1058 961 mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
+3 -3
drivers/net/wireless/mediatek/mt76/mt7603/main.c
··· 44 44 45 45 mutex_lock(&dev->mt76.mutex); 46 46 47 - mvif->idx = ffs(~dev->vif_mask) - 1; 47 + mvif->idx = ffs(~dev->mphy.vif_mask) - 1; 48 48 if (mvif->idx >= MT7603_MAX_INTERFACES) { 49 49 ret = -ENOSPC; 50 50 goto out; ··· 65 65 } 66 66 67 67 idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; 68 - dev->vif_mask |= BIT(mvif->idx); 68 + dev->mphy.vif_mask |= BIT(mvif->idx); 69 69 INIT_LIST_HEAD(&mvif->sta.poll_list); 70 70 mvif->sta.wcid.idx = idx; 71 71 mvif->sta.wcid.hw_key_idx = -1; ··· 107 107 spin_unlock_bh(&dev->sta_poll_lock); 108 108 109 109 mutex_lock(&dev->mt76.mutex); 110 - dev->vif_mask &= ~BIT(mvif->idx); 110 + dev->mphy.vif_mask &= ~BIT(mvif->idx); 111 111 mutex_unlock(&dev->mt76.mutex); 112 112 } 113 113
-2
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
··· 108 108 109 109 u32 rxfilter; 110 110 111 - u8 vif_mask; 112 - 113 111 struct list_head sta_poll_list; 114 112 spinlock_t sta_poll_lock; 115 113
+17 -2
drivers/net/wireless/mediatek/mt76/mt7615/Kconfig
··· 28 28 which has the same feature set as a MT7615, but limited to 29 29 2.4 GHz only. 30 30 31 + config MT7663_USB_SDIO_COMMON 32 + tristate 33 + select MT7615_COMMON 34 + 31 35 config MT7663U 32 36 tristate "MediaTek MT7663U (USB) support" 33 37 select MT76_USB 34 - select MT7615_COMMON 38 + select MT7663_USB_SDIO_COMMON 35 39 depends on MAC80211 36 40 depends on USB 37 41 help 38 - This adds support for MT7663U 802.11ax 2x2:2 wireless devices. 42 + This adds support for MT7663U 802.11ac 2x2:2 wireless devices. 43 + 44 + To compile this driver as a module, choose M here. 45 + 46 + config MT7663S 47 + tristate "MediaTek MT7663S (SDIO) support" 48 + select MT76_SDIO 49 + select MT7663_USB_SDIO_COMMON 50 + depends on MAC80211 51 + depends on MMC 52 + help 53 + This adds support for MT7663S 802.11ac 2x2:2 wireless devices. 39 54 40 55 To compile this driver as a module, choose M here.
+6 -1
drivers/net/wireless/mediatek/mt76/mt7615/Makefile
··· 2 2 3 3 obj-$(CONFIG_MT7615_COMMON) += mt7615-common.o 4 4 obj-$(CONFIG_MT7615E) += mt7615e.o 5 + obj-$(CONFIG_MT7663_USB_SDIO_COMMON) += mt7663-usb-sdio-common.o 5 6 obj-$(CONFIG_MT7663U) += mt7663u.o 7 + obj-$(CONFIG_MT7663S) += mt7663s.o 6 8 7 9 CFLAGS_trace.o := -I$(src) 8 10 9 11 mt7615-common-y := main.o init.o mcu.o eeprom.o mac.o \ 10 12 debugfs.o trace.o 13 + mt7615-common-$(CONFIG_NL80211_TESTMODE) += testmode.o 11 14 12 15 mt7615e-y := pci.o pci_init.o dma.o pci_mac.o mmio.o 13 16 mt7615e-$(CONFIG_MT7622_WMAC) += soc.o 14 17 15 - mt7663u-y := usb.o usb_mcu.o usb_init.o 18 + mt7663-usb-sdio-common-y := usb_sdio.o 19 + mt7663u-y := usb.o usb_mcu.o 20 + mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o
+101 -1
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
··· 6 6 mt7615_radar_pattern_set(void *data, u64 val) 7 7 { 8 8 struct mt7615_dev *dev = data; 9 + int err; 9 10 10 11 if (!mt7615_wait_for_mcu_init(dev)) 11 12 return 0; 12 13 13 - return mt7615_mcu_rdd_send_pattern(dev); 14 + mt7615_mutex_acquire(dev); 15 + err = mt7615_mcu_rdd_send_pattern(dev); 16 + mt7615_mutex_release(dev); 17 + 18 + return err; 14 19 } 15 20 16 21 DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_pattern, NULL, ··· 50 45 51 46 DEFINE_DEBUGFS_ATTRIBUTE(fops_scs, mt7615_scs_get, 52 47 mt7615_scs_set, "%lld\n"); 48 + 49 + static int 50 + mt7615_pm_set(void *data, u64 val) 51 + { 52 + struct mt7615_dev *dev = data; 53 + 54 + if (!mt7615_wait_for_mcu_init(dev)) 55 + return 0; 56 + 57 + return mt7615_pm_set_enable(dev, val); 58 + } 59 + 60 + static int 61 + mt7615_pm_get(void *data, u64 *val) 62 + { 63 + struct mt7615_dev *dev = data; 64 + 65 + *val = dev->pm.enable; 66 + 67 + return 0; 68 + } 69 + 70 + DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n"); 71 + 72 + static int 73 + mt7615_pm_idle_timeout_set(void *data, u64 val) 74 + { 75 + struct mt7615_dev *dev = data; 76 + 77 + dev->pm.idle_timeout = msecs_to_jiffies(val); 78 + 79 + return 0; 80 + } 81 + 82 + static int 83 + mt7615_pm_idle_timeout_get(void *data, u64 *val) 84 + { 85 + struct mt7615_dev *dev = data; 86 + 87 + *val = jiffies_to_msecs(dev->pm.idle_timeout); 88 + 89 + return 0; 90 + } 91 + 92 + DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7615_pm_idle_timeout_get, 93 + mt7615_pm_idle_timeout_set, "%lld\n"); 53 94 54 95 static int 55 96 mt7615_dbdc_set(void *data, u64 val) ··· 135 84 return 0; 136 85 137 86 dev->fw_debug = val; 87 + 88 + mt7615_mutex_acquire(dev); 138 89 mt7615_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0); 90 + mt7615_mutex_release(dev); 139 91 140 92 return 0; 141 93 } ··· 165 111 if (!mt7615_wait_for_mcu_init(dev)) 166 112 return 0; 167 113 114 + mt7615_mutex_acquire(dev); 115 + 168 116 skb = alloc_skb(1, GFP_KERNEL); 169 117 if (!skb) 170 118 return -ENOMEM; 171 119 172 120 skb_put(skb, 1); 173 121 mt76_tx_queue_skb_raw(dev, 0, skb, 0); 122 + 123 + mt7615_mutex_release(dev); 174 124 175 125 return 0; 176 126 } ··· 225 167 { 226 168 struct mt7615_dev *dev = file->private; 227 169 170 + mt7615_mutex_acquire(dev); 171 + 228 172 mt7615_ampdu_stat_read_phy(&dev->phy, file); 229 173 mt7615_ampdu_stat_read_phy(mt7615_ext_phy(dev), file); 174 + 175 + mt7615_mutex_release(dev); 230 176 231 177 return 0; 232 178 } ··· 283 221 return 0; 284 222 285 223 /* cpu */ 224 + mt7615_mutex_acquire(dev); 286 225 temp = mt7615_mcu_get_temperature(dev, 0); 226 + mt7615_mutex_release(dev); 227 + 287 228 seq_printf(s, "Temperature: %d\n", temp); 288 229 289 230 return 0; ··· 297 232 { 298 233 struct mt7615_dev *dev = dev_get_drvdata(s->private); 299 234 int i; 235 + 236 + mt7615_mutex_acquire(dev); 300 237 301 238 for (i = 0; i < 16; i++) { 302 239 int j, wmm_idx = i % MT7615_MAX_WMM_SETS; ··· 319 252 } 320 253 seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen); 321 254 } 255 + 256 + mt7615_mutex_release(dev); 322 257 323 258 return 0; 324 259 } ··· 354 285 return 0; 355 286 } 356 287 288 + static int 289 + mt7615_rf_reg_set(void *data, u64 val) 290 + { 291 + struct mt7615_dev *dev = data; 292 + 293 + mt7615_rf_wr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg, val); 294 + 295 + return 0; 296 + } 297 + 298 + static int 299 + mt7615_rf_reg_get(void *data, u64 *val) 300 + { 301 + struct mt7615_dev *dev = data; 302 + 303 + *val = mt7615_rf_rr(dev, dev->debugfs_rf_wf, dev->debugfs_rf_reg); 304 + 305 + return 0; 306 + } 307 + 308 + DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_reg, mt7615_rf_reg_get, mt7615_rf_reg_set, 309 + "0x%08llx\n"); 310 + 357 311 int mt7615_init_debugfs(struct mt7615_dev *dev) 358 312 { 359 313 struct dentry *dir; ··· 397 305 debugfs_create_file("scs", 0600, dir, dev, &fops_scs); 398 306 debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc); 399 307 debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); 308 + debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); 309 + debugfs_create_file("idle-timeout", 0600, dir, dev, 310 + &fops_pm_idle_timeout); 400 311 debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, 401 312 mt7615_radio_read); 402 313 debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern); ··· 418 323 &fops_reset_test); 419 324 debugfs_create_devm_seqfile(dev->mt76.dev, "temperature", dir, 420 325 mt7615_read_temperature); 326 + 327 + debugfs_create_u32("rf_wfidx", 0600, dir, &dev->debugfs_rf_wf); 328 + debugfs_create_u32("rf_regidx", 0600, dir, &dev->debugfs_rf_reg); 329 + debugfs_create_file_unsafe("rf_regval", 0600, dir, dev, 330 + &fops_rf_reg); 421 331 422 332 return 0; 423 333 }
-4
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
··· 122 122 123 123 mt7615_tx_cleanup(dev); 124 124 125 - rcu_read_lock(); 126 - mt7615_mac_sta_poll(dev); 127 - rcu_read_unlock(); 128 - 129 125 tasklet_schedule(&dev->mt76.tx_tasklet); 130 126 131 127 return 0;
+14 -3
drivers/net/wireless/mediatek/mt76/mt7615/init.c
··· 285 285 if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) 286 286 return; 287 287 288 + mt7615_mutex_acquire(dev); 288 289 mt7615_dfs_init_radar_detector(phy); 290 + mt7615_mutex_release(dev); 289 291 } 290 292 291 293 static void ··· 323 321 324 322 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 325 323 ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); 324 + ieee80211_hw_set(hw, WANT_MONITOR_VIF); 326 325 327 326 if (is_mt7615(&phy->dev->mt76)) 328 327 hw->max_tx_fragments = MT_TXP_MAX_BUF_NUM; ··· 408 405 mphy->sband_2g.sband.n_channels = 0; 409 406 mphy->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; 410 407 411 - /* The second interface does not get any packets unless it has a vif */ 412 - ieee80211_hw_set(mphy->hw, WANT_MONITOR_VIF); 413 - 414 408 ret = mt76_register_phy(mphy); 415 409 if (ret) 416 410 ieee80211_free_hw(mphy->hw); ··· 437 437 dev->phy.dev = dev; 438 438 dev->phy.mt76 = &dev->mt76.phy; 439 439 dev->mt76.phy.priv = &dev->phy; 440 + 441 + INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work); 442 + INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); 443 + init_completion(&dev->pm.wake_cmpl); 444 + spin_lock_init(&dev->pm.txq_lock); 445 + set_bit(MT76_STATE_PM, &dev->mphy.state); 440 446 INIT_DELAYED_WORK(&dev->phy.mac_work, mt7615_mac_work); 441 447 INIT_DELAYED_WORK(&dev->phy.scan_work, mt7615_scan_work); 442 448 skb_queue_head_init(&dev->phy.scan_event_list); ··· 456 450 timer_setup(&dev->phy.roc_timer, mt7615_roc_timer, 0); 457 451 458 452 mt7615_init_wiphy(hw); 453 + dev->pm.idle_timeout = MT7615_PM_TIMEOUT; 459 454 dev->mphy.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 460 455 dev->mphy.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 461 456 dev->mphy.sband_5g.sband.vht_cap.cap |= ··· 464 457 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 465 458 mt7615_cap_dbdc_disable(dev); 466 459 dev->phy.dfs_state = -1; 460 + 461 + #ifdef CONFIG_NL80211_TESTMODE 462 + dev->mt76.test_ops = &mt7615_testmode_ops; 463 + #endif 467 464 } 468 465 EXPORT_SYMBOL_GPL(mt7615_init_device);
+282 -30
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
··· 186 186 status->freq = ieee80211_channel_to_frequency(chfreq, status->band); 187 187 } 188 188 189 + static void mt7615_mac_fill_tm_rx(struct mt7615_dev *dev, __le32 *rxv) 190 + { 191 + #ifdef CONFIG_NL80211_TESTMODE 192 + u32 rxv1 = le32_to_cpu(rxv[0]); 193 + u32 rxv3 = le32_to_cpu(rxv[2]); 194 + u32 rxv4 = le32_to_cpu(rxv[3]); 195 + u32 rxv5 = le32_to_cpu(rxv[4]); 196 + u8 cbw = FIELD_GET(MT_RXV1_FRAME_MODE, rxv1); 197 + u8 mode = FIELD_GET(MT_RXV1_TX_MODE, rxv1); 198 + s16 foe = FIELD_GET(MT_RXV5_FOE, rxv5); 199 + u32 foe_const = (BIT(cbw + 1) & 0xf) * 10000; 200 + 201 + if (!mode) { 202 + /* CCK */ 203 + foe &= ~BIT(11); 204 + foe *= 1000; 205 + foe >>= 11; 206 + } else { 207 + if (foe > 2048) 208 + foe -= 4096; 209 + 210 + foe = (foe * foe_const) >> 15; 211 + } 212 + 213 + dev->test.last_freq_offset = foe; 214 + dev->test.last_rcpi[0] = FIELD_GET(MT_RXV4_RCPI0, rxv4); 215 + dev->test.last_rcpi[1] = FIELD_GET(MT_RXV4_RCPI1, rxv4); 216 + dev->test.last_rcpi[2] = FIELD_GET(MT_RXV4_RCPI2, rxv4); 217 + dev->test.last_rcpi[3] = FIELD_GET(MT_RXV4_RCPI3, rxv4); 218 + dev->test.last_ib_rssi = FIELD_GET(MT_RXV3_IB_RSSI, rxv3); 219 + dev->test.last_wb_rssi = FIELD_GET(MT_RXV3_WB_RSSI, rxv3); 220 + #endif 221 + } 222 + 189 223 static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb) 190 224 { 191 225 struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; ··· 435 401 status->chain_signal[i]); 436 402 } 437 403 404 + mt7615_mac_fill_tm_rx(dev, rxd); 405 + 438 406 rxd += 6; 439 407 if ((u8 *)rxd - skb->data >= skb->len) 440 408 return -EINVAL; ··· 529 493 struct ieee80211_sta *sta, int pid, 530 494 struct ieee80211_key_conf *key, bool beacon) 531 495 { 496 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 497 + u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 532 498 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 533 499 struct ieee80211_tx_rate *rate = &info->control.rates[0]; 534 - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 500 + bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 535 501 bool multicast = is_multicast_ether_addr(hdr->addr1); 536 502 struct ieee80211_vif *vif = info->control.vif; 503 + bool is_mmio = mt76_is_mmio(&dev->mt76); 504 + u32 val, sz_txd = is_mmio ? MT_TXD_SIZE : MT_USB_TXD_SIZE; 537 505 struct mt76_phy *mphy = &dev->mphy; 538 - bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY; 539 - bool is_usb = mt76_is_usb(&dev->mt76); 540 - int tx_count = 8; 541 - u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0; 542 506 __le16 fc = hdr->frame_control; 543 - u32 val, sz_txd = is_usb ? MT_USB_TXD_SIZE : MT_TXD_SIZE; 507 + int tx_count = 8; 544 508 u16 seqno = 0; 545 509 546 510 if (vif) { ··· 566 530 p_fmt = MT_TX_TYPE_FW; 567 531 q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0; 568 532 } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 569 - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 533 + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 570 534 q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0; 571 535 } else { 572 - p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT; 536 + p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF; 573 537 q_idx = wmm_idx * MT7615_MAX_WMM_SETS + 574 538 mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb)); 575 539 } ··· 653 617 } 654 618 655 619 val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count); 656 - if (ieee80211_is_data_qos(hdr->frame_control)) { 657 - seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 658 - val |= MT_TXD3_SN_VALID; 659 - } else if (ieee80211_is_back_req(hdr->frame_control)) { 660 - struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; 620 + if (info->flags & IEEE80211_TX_CTL_INJECTED) { 621 + seqno = le16_to_cpu(hdr->seq_ctrl); 661 622 662 - seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num)); 663 - val |= MT_TXD3_SN_VALID; 623 + if (ieee80211_is_back_req(hdr->frame_control)) { 624 + struct ieee80211_bar *bar; 625 + 626 + bar = (struct ieee80211_bar *)skb->data; 627 + seqno = le16_to_cpu(bar->start_seq_num); 628 + } 629 + 630 + val |= MT_TXD3_SN_VALID | 631 + FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno)); 664 632 } 665 - val |= FIELD_PREP(MT_TXD3_SEQ, seqno); 666 633 667 634 txwi[3] |= cpu_to_le32(val); 668 635 ··· 675 636 txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) | 676 637 FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) | 677 638 FIELD_PREP(MT_TXD7_SPE_IDX, 0x18); 678 - if (is_usb) 639 + if (!is_mmio) 679 640 txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) | 680 641 FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype); 681 642 ··· 917 878 struct mt7615_dev *dev = phy->dev; 918 879 struct mt7615_wtbl_desc *wd; 919 880 881 + if (work_pending(&dev->wtbl_work)) 882 + return -EBUSY; 883 + 920 884 wd = kzalloc(sizeof(*wd), GFP_ATOMIC); 921 885 if (!wd) 922 886 return -ENOMEM; ··· 930 888 mt7615_mac_update_rate_desc(phy, sta, probe_rate, rates, 931 889 &wd->rate); 932 890 list_add_tail(&wd->node, &dev->wd_head); 933 - queue_work(dev->mt76.usb.wq, &dev->wtbl_work); 891 + queue_work(dev->mt76.wq, &dev->wtbl_work); 934 892 935 893 return 0; 894 + } 895 + 896 + u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid) 897 + { 898 + u32 addr, val, val2; 899 + u8 offset; 900 + 901 + addr = mt7615_mac_wtbl_addr(dev, wcid) + 11 * 4; 902 + 903 + offset = tid * 12; 904 + addr += 4 * (offset / 32); 905 + offset %= 32; 906 + 907 + val = mt76_rr(dev, addr); 908 + val >>= (tid % 32); 909 + 910 + if (offset > 20) { 911 + addr += 4; 912 + val2 = mt76_rr(dev, addr); 913 + val |= val2 << (32 - offset); 914 + } 915 + 916 + return val & GENMASK(11, 0); 936 917 } 937 918 938 919 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, ··· 967 902 struct mt7615_rate_desc rd; 968 903 u32 w5, w27, addr; 969 904 970 - if (mt76_is_usb(&dev->mt76)) { 905 + if (!mt76_is_mmio(&dev->mt76)) { 971 906 mt7615_mac_queue_rate_update(phy, sta, probe_rate, rates); 972 907 return; 973 908 } ··· 1026 961 1027 962 sta->rate_count = 2 * MT7615_RATE_RETRY * n_rates; 1028 963 sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 964 + sta->rate_probe = !!probe_rate; 1029 965 } 1030 966 EXPORT_SYMBOL_GPL(mt7615_mac_set_rates); 1031 967 ··· 1235 1169 phy = dev->mt76.phy2->priv; 1236 1170 1237 1171 mt7615_mac_set_rates(phy, sta, NULL, sta->rates); 1238 - sta->rate_probe = false; 1239 1172 } 1240 1173 spin_unlock_bh(&dev->mt76.lock); 1241 1174 } else { ··· 1438 1373 } 1439 1374 1440 1375 dev_kfree_skb(skb); 1376 + 1377 + rcu_read_lock(); 1378 + mt7615_mac_sta_poll(dev); 1379 + rcu_read_unlock(); 1380 + 1381 + tasklet_schedule(&dev->mt76.tx_tasklet); 1441 1382 } 1442 1383 1443 1384 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, ··· 1533 1462 bool ext_phy = phy != &dev->phy; 1534 1463 u32 reg, mask; 1535 1464 1536 - mutex_lock(&dev->mt76.mutex); 1465 + mt7615_mutex_acquire(dev); 1537 1466 1538 1467 if (phy->scs_en == enable) 1539 1468 goto out; ··· 1560 1489 phy->scs_en = enable; 1561 1490 1562 1491 out: 1563 - mutex_unlock(&dev->mt76.mutex); 1492 + mt7615_mutex_release(dev); 1564 1493 } 1565 1494 1566 1495 void mt7615_mac_enable_nf(struct mt7615_dev *dev, bool ext_phy) ··· 1750 1679 state->noise = -(phy->noise >> 4); 1751 1680 } 1752 1681 1753 - void mt7615_update_channel(struct mt76_dev *mdev) 1682 + static void __mt7615_update_channel(struct mt7615_dev *dev) 1754 1683 { 1755 - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1684 + struct mt76_dev *mdev = &dev->mt76; 1756 1685 1757 1686 mt7615_phy_update_channel(&mdev->phy, 0); 1758 1687 if (mdev->phy2) ··· 1761 1690 /* reset obss airtime */ 1762 1691 mt76_set(dev, MT_WF_RMAC_MIB_TIME0, MT_WF_RMAC_MIB_RXTIME_CLR); 1763 1692 } 1693 + 1694 + void mt7615_update_channel(struct mt76_dev *mdev) 1695 + { 1696 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 1697 + 1698 + if (mt7615_pm_wake(dev)) 1699 + return; 1700 + 1701 + __mt7615_update_channel(dev); 1702 + mt7615_pm_power_save_sched(dev); 1703 + } 1764 1704 EXPORT_SYMBOL_GPL(mt7615_update_channel); 1705 + 1706 + static void mt7615_update_survey(struct mt7615_dev *dev) 1707 + { 1708 + struct mt76_dev *mdev = &dev->mt76; 1709 + ktime_t cur_time; 1710 + 1711 + __mt7615_update_channel(dev); 1712 + cur_time = ktime_get_boottime(); 1713 + 1714 + mt76_update_survey_active_time(&mdev->phy, cur_time); 1715 + if (mdev->phy2) 1716 + mt76_update_survey_active_time(mdev->phy2, cur_time); 1717 + } 1765 1718 1766 1719 static void 1767 1720 mt7615_mac_update_mib_stats(struct mt7615_phy *phy) ··· 1835 1740 } 1836 1741 } 1837 1742 1743 + void mt7615_pm_wake_work(struct work_struct *work) 1744 + { 1745 + struct mt7615_dev *dev; 1746 + struct mt76_phy *mphy; 1747 + int i; 1748 + 1749 + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 1750 + pm.wake_work); 1751 + mphy = dev->phy.mt76; 1752 + 1753 + if (mt7615_driver_own(dev)) { 1754 + dev_err(mphy->dev->dev, "failed to wake device\n"); 1755 + goto out; 1756 + } 1757 + 1758 + spin_lock_bh(&dev->pm.txq_lock); 1759 + for (i = 0; i < IEEE80211_NUM_ACS; i++) { 1760 + struct mt7615_sta *msta = dev->pm.tx_q[i].msta; 1761 + struct mt76_wcid *wcid = msta ? &msta->wcid : NULL; 1762 + struct ieee80211_sta *sta = NULL; 1763 + 1764 + if (!dev->pm.tx_q[i].skb) 1765 + continue; 1766 + 1767 + if (msta && wcid->sta) 1768 + sta = container_of((void *)msta, struct ieee80211_sta, 1769 + drv_priv); 1770 + 1771 + mt76_tx(mphy, sta, wcid, dev->pm.tx_q[i].skb); 1772 + dev->pm.tx_q[i].skb = NULL; 1773 + } 1774 + spin_unlock_bh(&dev->pm.txq_lock); 1775 + 1776 + tasklet_schedule(&dev->mt76.tx_tasklet); 1777 + 1778 + out: 1779 + ieee80211_wake_queues(mphy->hw); 1780 + complete_all(&dev->pm.wake_cmpl); 1781 + } 1782 + 1783 + int mt7615_pm_wake(struct mt7615_dev *dev) 1784 + { 1785 + struct mt76_phy *mphy = dev->phy.mt76; 1786 + 1787 + if (!mt7615_firmware_offload(dev)) 1788 + return 0; 1789 + 1790 + if (!mt76_is_mmio(mphy->dev)) 1791 + return 0; 1792 + 1793 + if (!test_bit(MT76_STATE_PM, &mphy->state)) 1794 + return 0; 1795 + 1796 + if (test_bit(MT76_HW_SCANNING, &mphy->state) || 1797 + test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) 1798 + return 0; 1799 + 1800 + if (queue_work(dev->mt76.wq, &dev->pm.wake_work)) 1801 + reinit_completion(&dev->pm.wake_cmpl); 1802 + 1803 + if (!wait_for_completion_timeout(&dev->pm.wake_cmpl, 3 * HZ)) { 1804 + ieee80211_wake_queues(mphy->hw); 1805 + return -ETIMEDOUT; 1806 + } 1807 + 1808 + return 0; 1809 + } 1810 + EXPORT_SYMBOL_GPL(mt7615_pm_wake); 1811 + 1812 + void mt7615_pm_power_save_sched(struct mt7615_dev *dev) 1813 + { 1814 + struct mt76_phy *mphy = dev->phy.mt76; 1815 + 1816 + if (!mt7615_firmware_offload(dev)) 1817 + return; 1818 + 1819 + if (!mt76_is_mmio(mphy->dev)) 1820 + return; 1821 + 1822 + if (!dev->pm.enable || !test_bit(MT76_STATE_RUNNING, &mphy->state)) 1823 + return; 1824 + 1825 + dev->pm.last_activity = jiffies; 1826 + 1827 + if (test_bit(MT76_HW_SCANNING, &mphy->state) || 1828 + test_bit(MT76_HW_SCHED_SCANNING, &mphy->state)) 1829 + return; 1830 + 1831 + if (!test_bit(MT76_STATE_PM, &mphy->state)) 1832 + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, 1833 + dev->pm.idle_timeout); 1834 + } 1835 + EXPORT_SYMBOL_GPL(mt7615_pm_power_save_sched); 1836 + 1837 + void mt7615_pm_power_save_work(struct work_struct *work) 1838 + { 1839 + struct mt7615_dev *dev; 1840 + unsigned long delta; 1841 + 1842 + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 1843 + pm.ps_work.work); 1844 + 1845 + delta = dev->pm.idle_timeout; 1846 + if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 1847 + delta = dev->pm.last_activity + delta - jiffies; 1848 + goto out; 1849 + } 1850 + 1851 + if (!mt7615_firmware_own(dev)) 1852 + return; 1853 + out: 1854 + queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta); 1855 + } 1856 + 1857 + static void 1858 + mt7615_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 1859 + { 1860 + struct mt7615_phy *phy = priv; 1861 + struct mt7615_dev *dev = phy->dev; 1862 + bool ext_phy = phy != &dev->phy; 1863 + 1864 + if (mt7615_mcu_set_bss_pm(dev, vif, dev->pm.enable)) 1865 + return; 1866 + 1867 + if (dev->pm.enable) { 1868 + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 1869 + mt76_set(dev, MT_WF_RFCR(ext_phy), 1870 + MT_WF_RFCR_DROP_OTHER_BEACON); 1871 + } else { 1872 + vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER; 1873 + mt76_clear(dev, MT_WF_RFCR(ext_phy), 1874 + MT_WF_RFCR_DROP_OTHER_BEACON); 1875 + } 1876 + } 1877 + 1878 + int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable) 1879 + { 1880 + struct mt76_phy *mphy = dev->phy.mt76; 1881 + 1882 + if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) 1883 + return -EOPNOTSUPP; 1884 + 1885 + mt7615_mutex_acquire(dev); 1886 + 1887 + if (dev->pm.enable == enable) 1888 + goto out; 1889 + 1890 + dev->pm.enable = enable; 1891 + ieee80211_iterate_active_interfaces(mphy->hw, 1892 + IEEE80211_IFACE_ITER_RESUME_ALL, 1893 + mt7615_pm_interface_iter, mphy->priv); 1894 + out: 1895 + mt7615_mutex_release(dev); 1896 + 1897 + return 0; 1898 + } 1899 + 1838 1900 void mt7615_mac_work(struct work_struct *work) 1839 1901 { 1840 1902 struct mt7615_phy *phy; ··· 2001 1749 mac_work.work); 2002 1750 mdev = &phy->dev->mt76; 2003 1751 2004 - mutex_lock(&mdev->mutex); 1752 + mt7615_mutex_acquire(phy->dev); 2005 1753 2006 - mt76_update_survey(mdev); 1754 + mt7615_update_survey(phy->dev); 2007 1755 if (++phy->mac_work_count == 5) { 2008 1756 phy->mac_work_count = 0; 2009 1757 ··· 2011 1759 mt7615_mac_scs_check(phy); 2012 1760 } 2013 1761 2014 - mutex_unlock(&mdev->mutex); 1762 + mt7615_mutex_release(phy->dev); 2015 1763 2016 1764 mt76_tx_status_check(mdev, NULL, false); 2017 1765 ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, ··· 2115 1863 napi_disable(&dev->mt76.napi[1]); 2116 1864 napi_disable(&dev->mt76.tx_napi); 2117 1865 2118 - mutex_lock(&dev->mt76.mutex); 1866 + mt7615_mutex_acquire(dev); 2119 1867 2120 1868 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_PDMA_STOPPED); 2121 1869 ··· 2148 1896 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE); 2149 1897 mt7615_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE); 2150 1898 2151 - mutex_unlock(&dev->mt76.mutex); 2152 - 2153 1899 mt7615_update_beacons(dev); 1900 + 1901 + mt7615_mutex_release(dev); 2154 1902 2155 1903 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->phy.mac_work, 2156 1904 MT7615_WATCHDOG_TIME);
+5
drivers/net/wireless/mediatek/mt76/mt7615/mac.h
··· 100 100 #define MT_RXV2_GROUP_ID GENMASK(26, 21) 101 101 #define MT_RXV2_LENGTH GENMASK(20, 0) 102 102 103 + #define MT_RXV3_WB_RSSI GENMASK(31, 24) 104 + #define MT_RXV3_IB_RSSI GENMASK(23, 16) 105 + 103 106 #define MT_RXV4_RCPI3 GENMASK(31, 24) 104 107 #define MT_RXV4_RCPI2 GENMASK(23, 16) 105 108 #define MT_RXV4_RCPI1 GENMASK(15, 8) 106 109 #define MT_RXV4_RCPI0 GENMASK(7, 0) 110 + 111 + #define MT_RXV5_FOE GENMASK(11, 0) 107 112 108 113 #define MT_RXV6_NF3 GENMASK(31, 24) 109 114 #define MT_RXV6_NF2 GENMASK(23, 16)
+269 -63
drivers/net/wireless/mediatek/mt76/mt7615/main.c
··· 24 24 return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state); 25 25 } 26 26 27 + static void mt7615_free_pending_tx_skbs(struct mt7615_dev *dev, 28 + struct mt7615_sta *msta) 29 + { 30 + int i; 31 + 32 + spin_lock_bh(&dev->pm.txq_lock); 33 + for (i = 0; i < IEEE80211_NUM_ACS; i++) { 34 + if (msta && dev->pm.tx_q[i].msta != msta) 35 + continue; 36 + 37 + dev_kfree_skb(dev->pm.tx_q[i].skb); 38 + dev->pm.tx_q[i].skb = NULL; 39 + } 40 + spin_unlock_bh(&dev->pm.txq_lock); 41 + } 42 + 27 43 static int mt7615_start(struct ieee80211_hw *hw) 28 44 { 29 45 struct mt7615_dev *dev = mt7615_hw_dev(hw); ··· 49 33 if (!mt7615_wait_for_mcu_init(dev)) 50 34 return -EIO; 51 35 52 - mutex_lock(&dev->mt76.mutex); 36 + mt7615_mutex_acquire(dev); 53 37 54 38 running = mt7615_dev_running(dev); 55 39 ··· 76 60 if (!running) 77 61 mt7615_mac_reset_counters(dev); 78 62 79 - mutex_unlock(&dev->mt76.mutex); 63 + mt7615_mutex_release(dev); 80 64 81 65 return 0; 82 66 } ··· 90 74 del_timer_sync(&phy->roc_timer); 91 75 cancel_work_sync(&phy->roc_work); 92 76 93 - mutex_lock(&dev->mt76.mutex); 77 + cancel_delayed_work_sync(&dev->pm.ps_work); 78 + cancel_work_sync(&dev->pm.wake_work); 79 + 80 + mt7615_free_pending_tx_skbs(dev, NULL); 81 + 82 + mt7615_mutex_acquire(dev); 83 + 84 + mt76_testmode_reset(&dev->mt76, true); 94 85 95 86 clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); 96 87 cancel_delayed_work_sync(&phy->scan_work); ··· 112 89 mt7615_mcu_set_mac_enable(dev, 0, false); 113 90 } 114 91 115 - mutex_unlock(&dev->mt76.mutex); 92 + mt7615_mutex_release(dev); 116 93 } 117 94 118 95 static int get_omac_idx(enum nl80211_iftype type, u32 mask) ··· 158 135 bool ext_phy = phy != &dev->phy; 159 136 int idx, ret = 0; 160 137 161 - mutex_lock(&dev->mt76.mutex); 138 + mt7615_mutex_acquire(dev); 162 139 163 - mvif->idx = ffs(~dev->vif_mask) - 1; 140 + mt76_testmode_reset(&dev->mt76, true); 141 + 142 + if (vif->type == NL80211_IFTYPE_MONITOR && 143 + is_zero_ether_addr(vif->addr)) 144 + phy->monitor_vif = vif; 145 + 146 + mvif->idx = ffs(~dev->mphy.vif_mask) - 1; 164 147 if (mvif->idx >= MT7615_MAX_INTERFACES) { 165 148 ret = -ENOSPC; 166 149 goto out; ··· 186 157 else 187 158 mvif->wmm_idx = mvif->idx % MT7615_MAX_WMM_SETS; 188 159 189 - dev->vif_mask |= BIT(mvif->idx); 160 + dev->mphy.vif_mask |= BIT(mvif->idx); 190 161 dev->omac_mask |= BIT(mvif->omac_idx); 191 162 phy->omac_mask |= BIT(mvif->omac_idx); 192 163 ··· 209 180 } 210 181 211 182 ret = mt7615_mcu_add_dev_info(dev, vif, true); 183 + if (ret) 184 + goto out; 185 + 186 + if (dev->pm.enable) { 187 + ret = mt7615_mcu_set_bss_pm(dev, vif, true); 188 + if (ret) 189 + goto out; 190 + 191 + vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; 192 + mt76_set(dev, MT_WF_RFCR(ext_phy), 193 + MT_WF_RFCR_DROP_OTHER_BEACON); 194 + } 212 195 out: 213 - mutex_unlock(&dev->mt76.mutex); 196 + mt7615_mutex_release(dev); 214 197 215 198 return ret; 216 199 } ··· 238 197 239 198 /* TODO: disable beacon for the bss */ 240 199 200 + mt7615_mutex_acquire(dev); 201 + 202 + mt76_testmode_reset(&dev->mt76, true); 203 + if (vif == phy->monitor_vif) 204 + phy->monitor_vif = NULL; 205 + 206 + mt7615_free_pending_tx_skbs(dev, msta); 207 + 208 + if (dev->pm.enable) { 209 + bool ext_phy = phy != &dev->phy; 210 + 211 + mt7615_mcu_set_bss_pm(dev, vif, false); 212 + mt76_clear(dev, MT_WF_RFCR(ext_phy), 213 + MT_WF_RFCR_DROP_OTHER_BEACON); 214 + } 241 215 mt7615_mcu_add_dev_info(dev, vif, false); 242 216 243 217 rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 244 218 if (vif->txq) 245 219 mt76_txq_remove(&dev->mt76, vif->txq); 246 220 247 - mutex_lock(&dev->mt76.mutex); 248 - dev->vif_mask &= ~BIT(mvif->idx); 221 + dev->mphy.vif_mask &= ~BIT(mvif->idx); 249 222 dev->omac_mask &= ~BIT(mvif->omac_idx); 250 223 phy->omac_mask &= ~BIT(mvif->omac_idx); 251 - mutex_unlock(&dev->mt76.mutex); 224 + 225 + mt7615_mutex_release(dev); 252 226 253 227 spin_lock_bh(&dev->sta_poll_lock); 254 228 if (!list_empty(&msta->poll_list)) ··· 290 234 phy->dfs_state = -1; 291 235 } 292 236 293 - static int mt7615_set_channel(struct mt7615_phy *phy) 237 + int mt7615_set_channel(struct mt7615_phy *phy) 294 238 { 295 239 struct mt7615_dev *dev = phy->dev; 296 240 bool ext_phy = phy != &dev->phy; ··· 298 242 299 243 cancel_delayed_work_sync(&phy->mac_work); 300 244 301 - mutex_lock(&dev->mt76.mutex); 245 + mt7615_mutex_acquire(dev); 246 + 302 247 set_bit(MT76_RESET, &phy->mt76->state); 303 248 304 249 mt7615_init_dfs_state(phy); ··· 317 260 mt7615_mac_set_timing(phy); 318 261 ret = mt7615_dfs_init_radar_detector(phy); 319 262 mt7615_mac_cca_stats_reset(phy); 320 - mt7615_mcu_set_sku_en(phy, true); 263 + mt7615_mcu_set_sku_en(phy, !mt76_testmode_enabled(&dev->mt76)); 321 264 322 265 mt7615_mac_reset_counters(dev); 323 266 phy->noise = 0; ··· 325 268 326 269 out: 327 270 clear_bit(MT76_RESET, &phy->mt76->state); 328 - mutex_unlock(&dev->mt76.mutex); 271 + 272 + mt7615_mutex_release(dev); 329 273 330 274 mt76_txq_schedule_all(phy->mt76); 331 - ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, 332 - MT7615_WATCHDOG_TIME); 275 + 276 + if (!mt76_testmode_enabled(&dev->mt76)) 277 + ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work, 278 + MT7615_WATCHDOG_TIME); 279 + 333 280 return ret; 334 281 } 335 282 ··· 362 301 wd->key.cmd = cmd; 363 302 364 303 list_add_tail(&wd->node, &dev->wd_head); 365 - queue_work(dev->mt76.usb.wq, &dev->wtbl_work); 304 + queue_work(dev->mt76.wq, &dev->wtbl_work); 366 305 367 306 return 0; 368 307 } ··· 376 315 struct mt7615_sta *msta = sta ? (struct mt7615_sta *)sta->drv_priv : 377 316 &mvif->sta; 378 317 struct mt76_wcid *wcid = &msta->wcid; 379 - int idx = key->keyidx; 318 + int idx = key->keyidx, err; 380 319 381 320 /* The hardware does not support per-STA RX GTK, fallback 382 321 * to software mode for these. ··· 406 345 return -EOPNOTSUPP; 407 346 } 408 347 348 + mt7615_mutex_acquire(dev); 349 + 409 350 if (cmd == SET_KEY) { 410 351 key->hw_key_idx = wcid->idx; 411 352 wcid->hw_key_idx = idx; ··· 417 354 mt76_wcid_key_setup(&dev->mt76, wcid, 418 355 cmd == SET_KEY ? key : NULL); 419 356 420 - if (mt76_is_usb(&dev->mt76)) 421 - return mt7615_queue_key_update(dev, cmd, msta, key); 357 + if (mt76_is_mmio(&dev->mt76)) 358 + err = mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); 359 + else 360 + err = mt7615_queue_key_update(dev, cmd, msta, key); 422 361 423 - return mt7615_mac_wtbl_set_key(dev, wcid, key, cmd); 362 + mt7615_mutex_release(dev); 363 + 364 + return err; 424 365 } 425 366 426 367 static int mt7615_config(struct ieee80211_hw *hw, u32 changed) ··· 436 369 437 370 if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | 438 371 IEEE80211_CONF_CHANGE_POWER)) { 372 + #ifdef CONFIG_NL80211_TESTMODE 373 + if (dev->mt76.test.state != MT76_TM_STATE_OFF) { 374 + mt7615_mutex_acquire(dev); 375 + mt76_testmode_reset(&dev->mt76, false); 376 + mt7615_mutex_release(dev); 377 + } 378 + #endif 439 379 ieee80211_stop_queues(hw); 440 380 ret = mt7615_set_channel(phy); 441 381 ieee80211_wake_queues(hw); 442 382 } 443 383 444 - mutex_lock(&dev->mt76.mutex); 384 + mt7615_mutex_acquire(dev); 445 385 446 386 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 387 + mt76_testmode_reset(&dev->mt76, true); 388 + 447 389 if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) 448 390 phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; 449 391 else ··· 461 385 mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter); 462 386 } 463 387 464 - mutex_unlock(&dev->mt76.mutex); 388 + mt7615_mutex_release(dev); 465 389 466 390 return ret; 467 391 } ··· 472 396 { 473 397 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 474 398 struct mt7615_dev *dev = mt7615_hw_dev(hw); 399 + int err; 400 + 401 + mt7615_mutex_acquire(dev); 475 402 476 403 queue = mt7615_lmac_mapping(dev, queue); 477 404 queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS; 405 + err = mt7615_mcu_set_wmm(dev, queue, params); 478 406 479 - return mt7615_mcu_set_wmm(dev, queue, params); 407 + mt7615_mutex_release(dev); 408 + 409 + return err; 480 410 } 481 411 482 412 static void mt7615_configure_filter(struct ieee80211_hw *hw, ··· 501 419 MT_WF_RFCR1_DROP_CFACK; 502 420 u32 flags = 0; 503 421 422 + mt7615_mutex_acquire(dev); 423 + 504 424 #define MT76_FILTER(_flag, _hw) do { \ 505 425 flags |= *total_flags & FIF_##_flag; \ 506 426 phy->rxfilter &= ~(_hw); \ 507 - phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ 427 + if (!mt76_testmode_enabled(&dev->mt76)) \ 428 + phy->rxfilter |= !(flags & FIF_##_flag) * (_hw);\ 508 429 } while (0) 509 430 510 431 phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | ··· 540 455 mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags); 541 456 else 542 457 mt76_set(dev, MT_WF_RFCR1(band), ctl_flags); 458 + 459 + mt7615_mutex_release(dev); 543 460 } 544 461 545 462 static void mt7615_bss_info_changed(struct ieee80211_hw *hw, ··· 552 465 struct mt7615_dev *dev = mt7615_hw_dev(hw); 553 466 struct mt7615_phy *phy = mt7615_hw_phy(hw); 554 467 555 - mutex_lock(&dev->mt76.mutex); 468 + mt7615_mutex_acquire(dev); 556 469 557 470 if (changed & BSS_CHANGED_ERP_SLOT) { 558 471 int slottime = info->use_short_slot ? 9 : 20; ··· 578 491 if (changed & BSS_CHANGED_PS) 579 492 mt7615_mcu_set_vif_ps(dev, vif); 580 493 581 - mutex_unlock(&dev->mt76.mutex); 494 + if (changed & BSS_CHANGED_ARP_FILTER) 495 + mt7615_mcu_update_arp_filter(hw, vif, info); 496 + 497 + mt7615_mutex_release(dev); 582 498 } 583 499 584 500 static void ··· 591 501 { 592 502 struct mt7615_dev *dev = mt7615_hw_dev(hw); 593 503 594 - mutex_lock(&dev->mt76.mutex); 504 + mt7615_mutex_acquire(dev); 595 505 mt7615_mcu_add_beacon(dev, hw, vif, true); 596 - mutex_unlock(&dev->mt76.mutex); 506 + mt7615_mutex_release(dev); 597 507 } 598 508 599 509 int mt7615_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, ··· 602 512 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 603 513 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 604 514 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 605 - int idx; 515 + int idx, err; 606 516 607 517 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); 608 518 if (idx < 0) ··· 614 524 msta->wcid.idx = idx; 615 525 msta->wcid.ext_phy = mvif->band_idx; 616 526 527 + err = mt7615_pm_wake(dev); 528 + if (err) 529 + return err; 530 + 617 531 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { 618 532 struct mt7615_phy *phy; 619 533 ··· 628 534 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 629 535 mt7615_mcu_sta_add(dev, vif, sta, true); 630 536 537 + mt7615_pm_power_save_sched(dev); 538 + 631 539 return 0; 632 540 } 633 541 EXPORT_SYMBOL_GPL(mt7615_mac_sta_add); ··· 639 543 { 640 544 struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 641 545 struct mt7615_sta *msta = (struct mt7615_sta *)sta->drv_priv; 546 + 547 + mt7615_free_pending_tx_skbs(dev, msta); 548 + mt7615_pm_wake(dev); 642 549 643 550 mt7615_mcu_sta_add(dev, vif, sta, false); 644 551 mt7615_mac_wtbl_update(dev, msta->wcid.idx, ··· 658 559 if (!list_empty(&msta->poll_list)) 659 560 list_del_init(&msta->poll_list); 660 561 spin_unlock_bh(&dev->sta_poll_lock); 562 + 563 + mt7615_pm_power_save_sched(dev); 661 564 } 662 565 EXPORT_SYMBOL_GPL(mt7615_mac_sta_remove); 663 566 ··· 683 582 break; 684 583 } 685 584 msta->n_rates = i; 686 - mt7615_mac_set_rates(phy, msta, NULL, msta->rates); 687 - msta->rate_probe = false; 585 + if (!test_bit(MT76_STATE_PM, &phy->mt76->state)) 586 + mt7615_mac_set_rates(phy, msta, NULL, msta->rates); 688 587 spin_unlock_bh(&dev->mt76.lock); 588 + } 589 + 590 + static void 591 + mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 592 + { 593 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 594 + struct mt7615_phy *phy = mt7615_hw_phy(hw); 595 + struct mt76_phy *mphy = phy->mt76; 596 + 597 + if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 598 + return; 599 + 600 + if (test_bit(MT76_STATE_PM, &mphy->state)) { 601 + queue_work(dev->mt76.wq, &dev->pm.wake_work); 602 + return; 603 + } 604 + 605 + tasklet_schedule(&dev->mt76.tx_tasklet); 689 606 } 690 607 691 608 static void mt7615_tx(struct ieee80211_hw *hw, ··· 715 596 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 716 597 struct ieee80211_vif *vif = info->control.vif; 717 598 struct mt76_wcid *wcid = &dev->mt76.global_wcid; 599 + struct mt7615_sta *msta = NULL; 600 + int qid; 718 601 719 602 if (control->sta) { 720 - struct mt7615_sta *sta; 721 - 722 - sta = (struct mt7615_sta *)control->sta->drv_priv; 723 - wcid = &sta->wcid; 603 + msta = (struct mt7615_sta *)control->sta->drv_priv; 604 + wcid = &msta->wcid; 724 605 } 725 606 726 607 if (vif && !control->sta) { 727 608 struct mt7615_vif *mvif; 728 609 729 610 mvif = (struct mt7615_vif *)vif->drv_priv; 730 - wcid = &mvif->sta.wcid; 611 + msta = &mvif->sta; 612 + wcid = &msta->wcid; 731 613 } 732 614 733 - mt76_tx(mphy, control->sta, wcid, skb); 615 + if (!test_bit(MT76_STATE_PM, &mphy->state)) { 616 + mt76_tx(mphy, control->sta, wcid, skb); 617 + return; 618 + } 619 + 620 + qid = skb_get_queue_mapping(skb); 621 + if (qid >= MT_TXQ_PSD) { 622 + qid = IEEE80211_AC_BE; 623 + skb_set_queue_mapping(skb, qid); 624 + } 625 + 626 + spin_lock_bh(&dev->pm.txq_lock); 627 + if (!dev->pm.tx_q[qid].skb) { 628 + ieee80211_stop_queues(hw); 629 + dev->pm.tx_q[qid].msta = msta; 630 + dev->pm.tx_q[qid].skb = skb; 631 + queue_work(dev->mt76.wq, &dev->pm.wake_work); 632 + } else { 633 + dev_kfree_skb(skb); 634 + } 635 + spin_unlock_bh(&dev->pm.txq_lock); 734 636 } 735 637 736 638 static int mt7615_set_rts_threshold(struct ieee80211_hw *hw, u32 val) ··· 759 619 struct mt7615_dev *dev = mt7615_hw_dev(hw); 760 620 struct mt7615_phy *phy = mt7615_hw_phy(hw); 761 621 762 - mutex_lock(&dev->mt76.mutex); 622 + mt7615_mutex_acquire(dev); 763 623 mt7615_mcu_set_rts_thresh(phy, val); 764 - mutex_unlock(&dev->mt76.mutex); 624 + mt7615_mutex_release(dev); 765 625 766 626 return 0; 767 627 } ··· 785 645 786 646 mtxq = (struct mt76_txq *)txq->drv_priv; 787 647 788 - mutex_lock(&dev->mt76.mutex); 648 + mt7615_mutex_acquire(dev); 649 + 789 650 switch (action) { 790 651 case IEEE80211_AMPDU_RX_START: 791 652 mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, ··· 801 660 mtxq->aggr = true; 802 661 mtxq->send_bar = false; 803 662 mt7615_mcu_add_tx_ba(dev, params, true); 663 + ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); 664 + ieee80211_send_bar(vif, sta->addr, tid, 665 + IEEE80211_SN_TO_SEQ(ssn)); 804 666 break; 805 667 case IEEE80211_AMPDU_TX_STOP_FLUSH: 806 668 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ··· 811 667 mt7615_mcu_add_tx_ba(dev, params, false); 812 668 break; 813 669 case IEEE80211_AMPDU_TX_START: 670 + ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid); 671 + params->ssn = ssn; 814 672 mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn); 815 673 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; 816 674 break; ··· 822 676 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 823 677 break; 824 678 } 825 - mutex_unlock(&dev->mt76.mutex); 679 + mt7615_mutex_release(dev); 826 680 827 681 return ret; 828 682 } ··· 867 721 u32 t32[2]; 868 722 } tsf; 869 723 870 - mutex_lock(&dev->mt76.mutex); 724 + mt7615_mutex_acquire(dev); 871 725 872 726 mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 873 727 tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0); 874 728 tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1); 875 729 876 - mutex_unlock(&dev->mt76.mutex); 730 + mt7615_mutex_release(dev); 877 731 878 732 return tsf.t64; 733 + } 734 + 735 + static void 736 + mt7615_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 737 + u64 timestamp) 738 + { 739 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 740 + union { 741 + u64 t64; 742 + u32 t32[2]; 743 + } tsf = { .t64 = timestamp, }; 744 + 745 + mt7615_mutex_acquire(dev); 746 + 747 + mt76_wr(dev, MT_LPON_UTTR0, tsf.t32[0]); 748 + mt76_wr(dev, MT_LPON_UTTR1, tsf.t32[1]); 749 + /* TSF software overwrite */ 750 + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_WRITE); 751 + 752 + mt7615_mutex_release(dev); 879 753 } 880 754 881 755 static void ··· 904 738 struct mt7615_phy *phy = mt7615_hw_phy(hw); 905 739 struct mt7615_dev *dev = phy->dev; 906 740 907 - mutex_lock(&dev->mt76.mutex); 741 + mt7615_mutex_acquire(dev); 908 742 phy->coverage_class = max_t(s16, coverage_class, 0); 909 743 mt7615_mac_set_timing(phy); 910 - mutex_unlock(&dev->mt76.mutex); 744 + mt7615_mutex_release(dev); 911 745 } 912 746 913 747 static int ··· 924 758 if ((BIT(hweight8(tx_ant)) - 1) != tx_ant) 925 759 tx_ant = BIT(ffs(tx_ant) - 1) - 1; 926 760 927 - mutex_lock(&dev->mt76.mutex); 761 + mt7615_mutex_acquire(dev); 928 762 929 763 phy->mt76->antenna_mask = tx_ant; 930 764 if (ext_phy) { ··· 937 771 938 772 mt76_set_stream_caps(phy->mt76, true); 939 773 940 - mutex_unlock(&dev->mt76.mutex); 774 + mt7615_mutex_release(dev); 941 775 942 776 return 0; 943 777 } ··· 960 794 if (!test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) 961 795 return; 962 796 797 + mt7615_mutex_acquire(phy->dev); 963 798 ieee80211_iterate_active_interfaces(phy->mt76->hw, 964 799 IEEE80211_IFACE_ITER_RESUME_ALL, 965 800 mt7615_roc_iter, phy); 801 + mt7615_mutex_release(phy->dev); 966 802 ieee80211_remain_on_channel_expired(phy->mt76->hw); 967 803 } 968 804 ··· 1012 844 mt7615_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 1013 845 struct ieee80211_scan_request *req) 1014 846 { 847 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 1015 848 struct mt76_phy *mphy = hw->priv; 849 + int err; 1016 850 1017 - return mt7615_mcu_hw_scan(mphy->priv, vif, req); 851 + mt7615_mutex_acquire(dev); 852 + err = mt7615_mcu_hw_scan(mphy->priv, vif, req); 853 + mt7615_mutex_release(dev); 854 + 855 + return err; 1018 856 } 1019 857 1020 858 static void 1021 859 mt7615_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1022 860 { 861 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 1023 862 struct mt76_phy *mphy = hw->priv; 1024 863 864 + mt7615_mutex_acquire(dev); 1025 865 mt7615_mcu_cancel_hw_scan(mphy->priv, vif); 866 + mt7615_mutex_release(dev); 1026 867 } 1027 868 1028 869 static int ··· 1039 862 struct cfg80211_sched_scan_request *req, 1040 863 struct ieee80211_scan_ies *ies) 1041 864 { 865 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 1042 866 struct mt76_phy *mphy = hw->priv; 1043 867 int err; 1044 868 869 + mt7615_mutex_acquire(dev); 870 + 1045 871 err = mt7615_mcu_sched_scan_req(mphy->priv, vif, req); 1046 872 if (err < 0) 1047 - return err; 873 + goto out; 1048 874 1049 - return mt7615_mcu_sched_scan_enable(mphy->priv, vif, true); 875 + err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, true); 876 + out: 877 + mt7615_mutex_release(dev); 878 + 879 + return err; 1050 880 } 1051 881 1052 882 static int 1053 883 mt7615_stop_sched_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1054 884 { 885 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 1055 886 struct mt76_phy *mphy = hw->priv; 887 + int err; 1056 888 1057 - return mt7615_mcu_sched_scan_enable(mphy->priv, vif, false); 889 + mt7615_mutex_acquire(dev); 890 + err = mt7615_mcu_sched_scan_enable(mphy->priv, vif, false); 891 + mt7615_mutex_release(dev); 892 + 893 + return err; 1058 894 } 1059 895 1060 896 static int mt7615_remain_on_channel(struct ieee80211_hw *hw, ··· 1082 892 if (test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)) 1083 893 return 0; 1084 894 895 + mt7615_mutex_acquire(phy->dev); 896 + 1085 897 err = mt7615_mcu_set_roc(phy, vif, chan, duration); 1086 898 if (err < 0) { 1087 899 clear_bit(MT76_STATE_ROC, &phy->mt76->state); 1088 - return err; 900 + goto out; 1089 901 } 1090 902 1091 903 if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, HZ)) { 1092 904 mt7615_mcu_set_roc(phy, vif, NULL, 0); 1093 905 clear_bit(MT76_STATE_ROC, &phy->mt76->state); 1094 - 1095 - return -ETIMEDOUT; 906 + err = -ETIMEDOUT; 1096 907 } 1097 908 1098 - return 0; 909 + out: 910 + mt7615_mutex_release(phy->dev); 911 + 912 + return err; 1099 913 } 1100 914 1101 915 static int mt7615_cancel_remain_on_channel(struct ieee80211_hw *hw, ··· 1113 919 del_timer_sync(&phy->roc_timer); 1114 920 cancel_work_sync(&phy->roc_work); 1115 921 922 + mt7615_mutex_acquire(phy->dev); 1116 923 mt7615_mcu_set_roc(phy, vif, NULL, 0); 924 + mt7615_mutex_release(phy->dev); 1117 925 1118 926 return 0; 1119 927 } ··· 1129 933 bool ext_phy = phy != &dev->phy; 1130 934 int err = 0; 1131 935 1132 - mutex_lock(&dev->mt76.mutex); 936 + cancel_delayed_work_sync(&dev->pm.ps_work); 937 + mt7615_free_pending_tx_skbs(dev, NULL); 938 + 939 + mt7615_mutex_acquire(dev); 1133 940 1134 941 clear_bit(MT76_STATE_RUNNING, &phy->mt76->state); 1135 942 cancel_delayed_work_sync(&phy->scan_work); ··· 1148 949 if (!mt7615_dev_running(dev)) 1149 950 err = mt7615_mcu_set_hif_suspend(dev, true); 1150 951 1151 - mutex_unlock(&dev->mt76.mutex); 952 + mt7615_mutex_release(dev); 1152 953 1153 954 return err; 1154 955 } ··· 1159 960 struct mt7615_phy *phy = mt7615_hw_phy(hw); 1160 961 bool running, ext_phy = phy != &dev->phy; 1161 962 1162 - mutex_lock(&dev->mt76.mutex); 963 + mt7615_mutex_acquire(dev); 1163 964 1164 965 running = mt7615_dev_running(dev); 1165 966 set_bit(MT76_STATE_RUNNING, &phy->mt76->state); ··· 1169 970 1170 971 err = mt7615_mcu_set_hif_suspend(dev, false); 1171 972 if (err < 0) { 1172 - mutex_unlock(&dev->mt76.mutex); 973 + mt7615_mutex_release(dev); 1173 974 return err; 1174 975 } 1175 976 } ··· 1183 984 MT7615_WATCHDOG_TIME); 1184 985 mt76_clear(dev, MT_WF_RFCR(ext_phy), MT_WF_RFCR_DROP_OTHER_BEACON); 1185 986 1186 - mutex_unlock(&dev->mt76.mutex); 987 + mt7615_mutex_release(dev); 1187 988 1188 989 return 0; 1189 990 } ··· 1200 1001 struct ieee80211_vif *vif, 1201 1002 struct cfg80211_gtk_rekey_data *data) 1202 1003 { 1004 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 1005 + 1006 + mt7615_mutex_acquire(dev); 1203 1007 mt7615_mcu_update_gtk_rekey(hw, vif, data); 1008 + mt7615_mutex_release(dev); 1204 1009 } 1205 1010 #endif /* CONFIG_PM */ 1206 1011 ··· 1224 1021 .set_key = mt7615_set_key, 1225 1022 .ampdu_action = mt7615_ampdu_action, 1226 1023 .set_rts_threshold = mt7615_set_rts_threshold, 1227 - .wake_tx_queue = mt76_wake_tx_queue, 1024 + .wake_tx_queue = mt7615_wake_tx_queue, 1228 1025 .sta_rate_tbl_update = mt7615_sta_rate_tbl_update, 1229 1026 .sw_scan_start = mt76_sw_scan, 1230 1027 .sw_scan_complete = mt76_sw_scan_complete, ··· 1233 1030 .channel_switch_beacon = mt7615_channel_switch_beacon, 1234 1031 .get_stats = mt7615_get_stats, 1235 1032 .get_tsf = mt7615_get_tsf, 1033 + .set_tsf = mt7615_set_tsf, 1236 1034 .get_survey = mt76_get_survey, 1237 1035 .get_antenna = mt76_get_antenna, 1238 1036 .set_antenna = mt7615_set_antenna, ··· 1244 1040 .sched_scan_stop = mt7615_stop_sched_scan, 1245 1041 .remain_on_channel = mt7615_remain_on_channel, 1246 1042 .cancel_remain_on_channel = mt7615_cancel_remain_on_channel, 1043 + CFG80211_TESTMODE_CMD(mt76_testmode_cmd) 1044 + CFG80211_TESTMODE_DUMP(mt76_testmode_dump) 1247 1045 #ifdef CONFIG_PM 1248 1046 .suspend = mt7615_suspend, 1249 1047 .resume = mt7615_resume,
+313 -58
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
··· 146 146 mcu_txd->cid = mcu_cmd; 147 147 break; 148 148 case MCU_CE_PREFIX: 149 - mcu_txd->set_query = MCU_Q_SET; 149 + if (cmd & MCU_QUERY_MASK) 150 + mcu_txd->set_query = MCU_Q_QUERY; 151 + else 152 + mcu_txd->set_query = MCU_Q_SET; 150 153 mcu_txd->cid = mcu_cmd; 151 154 break; 152 155 default: 153 156 mcu_txd->cid = MCU_CMD_EXT_CID; 154 - mcu_txd->set_query = MCU_Q_SET; 155 - mcu_txd->ext_cid = cmd; 157 + if (cmd & MCU_QUERY_PREFIX) 158 + mcu_txd->set_query = MCU_Q_QUERY; 159 + else 160 + mcu_txd->set_query = MCU_Q_SET; 161 + mcu_txd->ext_cid = mcu_cmd; 156 162 mcu_txd->ext_cid_ack = 1; 157 163 break; 158 164 } ··· 186 180 struct mt7615_mcu_rxd *rxd = (struct mt7615_mcu_rxd *)skb->data; 187 181 int ret = 0; 188 182 189 - if (seq != rxd->seq) 190 - return -EAGAIN; 183 + if (seq != rxd->seq) { 184 + ret = -EAGAIN; 185 + goto out; 186 + } 191 187 192 188 switch (cmd) { 193 189 case MCU_CMD_PATCH_SEM_CONTROL: ··· 199 191 case MCU_EXT_CMD_GET_TEMP: 200 192 skb_pull(skb, sizeof(*rxd)); 201 193 ret = le32_to_cpu(*(__le32 *)skb->data); 194 + break; 195 + case MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX: 196 + skb_pull(skb, sizeof(*rxd)); 197 + ret = le32_to_cpu(*(__le32 *)&skb->data[8]); 202 198 break; 203 199 case MCU_UNI_CMD_DEV_INFO_UPDATE: 204 200 case MCU_UNI_CMD_BSS_INFO_UPDATE: ··· 217 205 ret = le32_to_cpu(event->status); 218 206 break; 219 207 } 208 + case MCU_CMD_REG_READ: { 209 + struct mt7615_mcu_reg_event *event; 210 + 211 + skb_pull(skb, sizeof(*rxd)); 212 + event = (struct mt7615_mcu_reg_event *)skb->data; 213 + ret = (int)le32_to_cpu(event->val); 214 + break; 215 + } 220 216 default: 221 217 break; 222 218 } 219 + out: 223 220 dev_kfree_skb(skb); 224 221 225 222 return ret; ··· 291 270 return __mt76_mcu_skb_send_msg(mdev, skb, cmd, wait_resp); 292 271 } 293 272 EXPORT_SYMBOL_GPL(mt7615_mcu_msg_send); 273 + 274 + u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg) 275 + { 276 + struct { 277 + __le32 wifi_stream; 278 + __le32 address; 279 + __le32 data; 280 + } req = { 281 + .wifi_stream = cpu_to_le32(wf), 282 + .address = cpu_to_le32(reg), 283 + }; 284 + 285 + return __mt76_mcu_send_msg(&dev->mt76, 286 + MCU_EXT_CMD_RF_REG_ACCESS | MCU_QUERY_PREFIX, 287 + &req, sizeof(req), true); 288 + } 289 + 290 + int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val) 291 + { 292 + struct { 293 + __le32 wifi_stream; 294 + __le32 address; 295 + __le32 data; 296 + } req = { 297 + .wifi_stream = cpu_to_le32(wf), 298 + .address = cpu_to_le32(reg), 299 + .data = cpu_to_le32(val), 300 + }; 301 + 302 + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_RF_REG_ACCESS, &req, 303 + sizeof(req), false); 304 + } 294 305 295 306 static void 296 307 mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif) ··· 980 927 } 981 928 982 929 static void 930 + mt7615_mcu_sta_uapsd(struct sk_buff *skb, struct ieee80211_vif *vif, 931 + struct ieee80211_sta *sta) 932 + { 933 + struct sta_rec_uapsd *uapsd; 934 + struct tlv *tlv; 935 + 936 + if (vif->type != NL80211_IFTYPE_AP || !sta->wme) 937 + return; 938 + 939 + tlv = mt7615_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd)); 940 + uapsd = (struct sta_rec_uapsd *)tlv; 941 + 942 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) { 943 + uapsd->dac_map |= BIT(3); 944 + uapsd->tac_map |= BIT(3); 945 + } 946 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) { 947 + uapsd->dac_map |= BIT(2); 948 + uapsd->tac_map |= BIT(2); 949 + } 950 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) { 951 + uapsd->dac_map |= BIT(1); 952 + uapsd->tac_map |= BIT(1); 953 + } 954 + if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) { 955 + uapsd->dac_map |= BIT(0); 956 + uapsd->tac_map |= BIT(0); 957 + } 958 + uapsd->max_sp = sta->max_sp; 959 + } 960 + 961 + static void 983 962 mt7615_mcu_wtbl_ba_tlv(struct sk_buff *skb, 984 963 struct ieee80211_ampdu_params *params, 985 964 bool enable, bool tx, void *sta_wtbl, ··· 1273 1188 return PTR_ERR(sskb); 1274 1189 1275 1190 mt7615_mcu_sta_basic_tlv(sskb, vif, sta, enable); 1276 - if (enable && sta) 1191 + if (enable && sta) { 1277 1192 mt7615_mcu_sta_ht_tlv(sskb, sta); 1193 + mt7615_mcu_sta_uapsd(sskb, vif, sta); 1194 + } 1278 1195 1279 1196 wtbl_hdr = mt7615_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET, 1280 1197 NULL, &wskb); ··· 1293 1206 skb = enable ? wskb : sskb; 1294 1207 1295 1208 err = __mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, true); 1296 - if (err < 0) 1209 + if (err < 0) { 1210 + skb = enable ? sskb : wskb; 1211 + dev_kfree_skb(skb); 1212 + 1297 1213 return err; 1214 + } 1298 1215 1299 1216 cmd = enable ? MCU_EXT_CMD_STA_REC_UPDATE : MCU_EXT_CMD_WTBL_UPDATE; 1300 1217 skb = enable ? sskb : wskb; ··· 1376 1285 return PTR_ERR(skb); 1377 1286 1378 1287 mt7615_mcu_sta_basic_tlv(skb, vif, sta, enable); 1379 - if (enable && sta) 1288 + if (enable && sta) { 1380 1289 mt7615_mcu_sta_ht_tlv(skb, sta); 1290 + mt7615_mcu_sta_uapsd(skb, vif, sta); 1291 + } 1381 1292 1382 1293 sta_wtbl = mt7615_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); 1383 1294 ··· 1522 1429 u8 pad[3]; 1523 1430 } __packed hdr; 1524 1431 struct mt7615_bss_basic_tlv basic; 1432 + struct mt7615_bss_qos_tlv qos; 1525 1433 } basic_req = { 1526 1434 .hdr = { 1527 1435 .bss_idx = mvif->idx, ··· 1537 1443 .wmm_idx = mvif->wmm_idx, 1538 1444 .active = true, /* keep bss deactivated */ 1539 1445 .phymode = 0x38, 1446 + }, 1447 + .qos = { 1448 + .tag = cpu_to_le16(UNI_BSS_INFO_QBSS), 1449 + .len = cpu_to_le16(sizeof(struct mt7615_bss_qos_tlv)), 1450 + .qos = vif->bss_conf.qos, 1540 1451 }, 1541 1452 }; 1542 1453 struct { ··· 1907 1808 1908 1809 int mt7615_driver_own(struct mt7615_dev *dev) 1909 1810 { 1811 + struct mt76_phy *mphy = &dev->mt76.phy; 1910 1812 struct mt76_dev *mdev = &dev->mt76; 1911 - u32 addr; 1813 + int i; 1912 1814 1913 - addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; 1914 - mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); 1815 + if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) 1816 + goto out; 1915 1817 1916 1818 mt7622_trigger_hif_int(dev, true); 1917 1819 1918 - addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 1919 - if (!mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000)) { 1920 - dev_err(dev->mt76.dev, "Timeout for driver own\n"); 1921 - return -EIO; 1820 + for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { 1821 + u32 addr; 1822 + 1823 + addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; 1824 + mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); 1825 + 1826 + addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 1827 + if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50)) 1828 + break; 1922 1829 } 1923 1830 1924 1831 mt7622_trigger_hif_int(dev, false); 1832 + 1833 + if (i == MT7615_DRV_OWN_RETRY_COUNT) { 1834 + dev_err(mdev->dev, "driver own failed\n"); 1835 + set_bit(MT76_STATE_PM, &mphy->state); 1836 + return -EIO; 1837 + } 1838 + 1839 + out: 1840 + dev->pm.last_activity = jiffies; 1925 1841 1926 1842 return 0; 1927 1843 } ··· 1944 1830 1945 1831 int mt7615_firmware_own(struct mt7615_dev *dev) 1946 1832 { 1833 + struct mt76_phy *mphy = &dev->mt76.phy; 1834 + int err = 0; 1947 1835 u32 addr; 1948 1836 1949 - addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 1837 + if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) 1838 + return 0; 1839 + 1950 1840 mt7622_trigger_hif_int(dev, true); 1951 1841 1842 + addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 1952 1843 mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN); 1953 1844 1954 - if (!is_mt7615(&dev->mt76) && 1845 + if (is_mt7622(&dev->mt76) && 1955 1846 !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 1956 - MT_CFG_LPCR_HOST_FW_OWN, 3000)) { 1847 + MT_CFG_LPCR_HOST_FW_OWN, 300)) { 1957 1848 dev_err(dev->mt76.dev, "Timeout for firmware own\n"); 1958 - return -EIO; 1849 + clear_bit(MT76_STATE_PM, &mphy->state); 1850 + err = -EIO; 1959 1851 } 1852 + 1960 1853 mt7622_trigger_hif_int(dev, false); 1961 1854 1962 - return 0; 1855 + return err; 1963 1856 } 1964 1857 EXPORT_SYMBOL_GPL(mt7615_firmware_own); 1965 1858 ··· 2846 2725 .center_chan2 = ieee80211_frequency_to_channel(freq2), 2847 2726 }; 2848 2727 2728 + #ifdef CONFIG_NL80211_TESTMODE 2729 + if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES && 2730 + dev->mt76.test.tx_antenna_mask) { 2731 + req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask); 2732 + req.rx_streams_mask = dev->mt76.test.tx_antenna_mask; 2733 + } 2734 + #endif 2735 + 2849 2736 if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) 2850 2737 req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; 2851 2738 else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && ··· 2865 2736 req.band_idx = phy != &dev->phy; 2866 2737 req.bw = mt7615_mcu_chan_bw(chandef); 2867 2738 2868 - mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); 2739 + if (mt76_testmode_enabled(&dev->mt76)) 2740 + memset(req.txpower_sku, 0x3f, 49); 2741 + else 2742 + mt7615_mcu_set_txpower_sku(phy, req.txpower_sku); 2869 2743 2870 2744 return __mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true); 2871 2745 } ··· 2884 2752 2885 2753 return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_GET_TEMP, &req, 2886 2754 sizeof(req), true); 2755 + } 2756 + 2757 + int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, 2758 + u32 val) 2759 + { 2760 + struct { 2761 + u8 test_mode_en; 2762 + u8 param_idx; 2763 + u8 _rsv[2]; 2764 + 2765 + __le32 value; 2766 + 2767 + u8 pad[8]; 2768 + } req = { 2769 + .test_mode_en = test_mode, 2770 + .param_idx = param, 2771 + .value = cpu_to_le32(val), 2772 + }; 2773 + 2774 + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_ATE_CTRL, &req, 2775 + sizeof(req), false); 2887 2776 } 2888 2777 2889 2778 int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable) ··· 3485 3332 return ret; 3486 3333 } 3487 3334 3488 - #ifdef CONFIG_PM 3489 - int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend) 3490 - { 3491 - struct { 3492 - struct { 3493 - u8 hif_type; /* 0x0: HIF_SDIO 3494 - * 0x1: HIF_USB 3495 - * 0x2: HIF_PCIE 3496 - */ 3497 - u8 pad[3]; 3498 - } __packed hdr; 3499 - struct hif_suspend_tlv { 3500 - __le16 tag; 3501 - __le16 len; 3502 - u8 suspend; 3503 - } __packed hif_suspend; 3504 - } req = { 3505 - .hif_suspend = { 3506 - .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */ 3507 - .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), 3508 - .suspend = suspend, 3509 - }, 3510 - }; 3511 - 3512 - if (mt76_is_mmio(&dev->mt76)) 3513 - req.hdr.hif_type = 2; 3514 - else if (mt76_is_usb(&dev->mt76)) 3515 - req.hdr.hif_type = 1; 3516 - 3517 - return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, 3518 - &req, sizeof(req), true); 3519 - } 3520 - EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend); 3521 - 3522 - static int 3523 - mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, 3524 - bool enable) 3335 + int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, 3336 + bool enable) 3525 3337 { 3526 3338 struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 3527 3339 struct { ··· 3525 3407 return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_BSS_CONNECTED, 3526 3408 &req, sizeof(req), false); 3527 3409 } 3410 + 3411 + #ifdef CONFIG_PM 3412 + int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend) 3413 + { 3414 + struct { 3415 + struct { 3416 + u8 hif_type; /* 0x0: HIF_SDIO 3417 + * 0x1: HIF_USB 3418 + * 0x2: HIF_PCIE 3419 + */ 3420 + u8 pad[3]; 3421 + } __packed hdr; 3422 + struct hif_suspend_tlv { 3423 + __le16 tag; 3424 + __le16 len; 3425 + u8 suspend; 3426 + } __packed hif_suspend; 3427 + } req = { 3428 + .hif_suspend = { 3429 + .tag = cpu_to_le16(0), /* 0: UNI_HIF_CTRL_BASIC */ 3430 + .len = cpu_to_le16(sizeof(struct hif_suspend_tlv)), 3431 + .suspend = suspend, 3432 + }, 3433 + }; 3434 + 3435 + if (mt76_is_mmio(&dev->mt76)) 3436 + req.hdr.hif_type = 2; 3437 + else if (mt76_is_usb(&dev->mt76)) 3438 + req.hdr.hif_type = 1; 3439 + 3440 + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_HIF_CTRL, 3441 + &req, sizeof(req), true); 3442 + } 3443 + EXPORT_SYMBOL_GPL(mt7615_mcu_set_hif_suspend); 3528 3444 3529 3445 static int 3530 3446 mt7615_mcu_set_wow_ctrl(struct mt7615_phy *phy, struct ieee80211_vif *vif, ··· 3694 3542 &req, sizeof(req), true); 3695 3543 } 3696 3544 3545 + static int 3546 + mt7615_mcu_set_arp_filter(struct mt7615_dev *dev, struct ieee80211_vif *vif, 3547 + bool suspend) 3548 + { 3549 + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 3550 + struct { 3551 + struct { 3552 + u8 bss_idx; 3553 + u8 pad[3]; 3554 + } __packed hdr; 3555 + struct mt7615_arpns_tlv arpns; 3556 + } req = { 3557 + .hdr = { 3558 + .bss_idx = mvif->idx, 3559 + }, 3560 + .arpns = { 3561 + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), 3562 + .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)), 3563 + .mode = suspend, 3564 + }, 3565 + }; 3566 + 3567 + return __mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD_OFFLOAD, 3568 + &req, sizeof(req), true); 3569 + } 3570 + 3697 3571 void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, 3698 3572 struct ieee80211_vif *vif) 3699 3573 { ··· 3732 3554 mt7615_mcu_set_bss_pm(phy->dev, vif, suspend); 3733 3555 3734 3556 mt7615_mcu_set_gtk_rekey(phy->dev, vif, suspend); 3557 + mt7615_mcu_set_arp_filter(phy->dev, vif, suspend); 3735 3558 3736 3559 mt7615_mcu_set_suspend_mode(phy->dev, vif, suspend, 1, true); 3737 3560 ··· 3832 3653 sizeof(req), false); 3833 3654 } 3834 3655 3656 + int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, 3657 + struct ieee80211_vif *vif, 3658 + struct ieee80211_bss_conf *info) 3659 + { 3660 + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 3661 + struct mt7615_dev *dev = mt7615_hw_dev(hw); 3662 + struct sk_buff *skb; 3663 + int i, len = min_t(int, info->arp_addr_cnt, 3664 + IEEE80211_BSS_ARP_ADDR_LIST_LEN); 3665 + struct { 3666 + struct { 3667 + u8 bss_idx; 3668 + u8 pad[3]; 3669 + } __packed hdr; 3670 + struct mt7615_arpns_tlv arp; 3671 + } req_hdr = { 3672 + .hdr = { 3673 + .bss_idx = mvif->idx, 3674 + }, 3675 + .arp = { 3676 + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), 3677 + .len = cpu_to_le16(sizeof(struct mt7615_arpns_tlv)), 3678 + .ips_num = len, 3679 + .mode = 2, /* update */ 3680 + .option = 1, 3681 + }, 3682 + }; 3683 + 3684 + if (!mt7615_firmware_offload(dev)) 3685 + return 0; 3686 + 3687 + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 3688 + sizeof(req_hdr) + len * sizeof(__be32)); 3689 + if (!skb) 3690 + return -ENOMEM; 3691 + 3692 + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); 3693 + for (i = 0; i < len; i++) { 3694 + u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); 3695 + 3696 + memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); 3697 + } 3698 + 3699 + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, 3700 + MCU_UNI_CMD_OFFLOAD, true); 3701 + } 3702 + 3835 3703 int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw, 3836 3704 struct ieee80211_vif *vif) 3837 3705 { ··· 3900 3674 return __mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, 3901 3675 &req, sizeof(req), false); 3902 3676 } 3677 + 3678 + u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset) 3679 + { 3680 + struct { 3681 + __le32 addr; 3682 + __le32 val; 3683 + } __packed req = { 3684 + .addr = cpu_to_le32(offset), 3685 + }; 3686 + 3687 + return __mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, 3688 + &req, sizeof(req), true); 3689 + } 3690 + EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr); 3691 + 3692 + void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val) 3693 + { 3694 + struct { 3695 + __le32 addr; 3696 + __le32 val; 3697 + } __packed req = { 3698 + .addr = cpu_to_le32(offset), 3699 + .val = cpu_to_le32(val), 3700 + }; 3701 + 3702 + __mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, 3703 + &req, sizeof(req), false); 3704 + } 3705 + EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr);
+51 -3
drivers/net/wireless/mediatek/mt76/mt7615/mcu.h
··· 81 81 MCU_EVENT_GENERIC = 0x01, 82 82 MCU_EVENT_ACCESS_REG = 0x02, 83 83 MCU_EVENT_MT_PATCH_SEM = 0x04, 84 + MCU_EVENT_REG_ACCESS = 0x05, 84 85 MCU_EVENT_SCAN_DONE = 0x0d, 85 86 MCU_EVENT_ROC = 0x10, 86 87 MCU_EVENT_BSS_ABSENCE = 0x11, ··· 239 238 #define MCU_FW_PREFIX BIT(31) 240 239 #define MCU_UNI_PREFIX BIT(30) 241 240 #define MCU_CE_PREFIX BIT(29) 241 + #define MCU_QUERY_PREFIX BIT(28) 242 242 #define MCU_CMD_MASK ~(MCU_FW_PREFIX | MCU_UNI_PREFIX | \ 243 - MCU_CE_PREFIX) 243 + MCU_CE_PREFIX | MCU_QUERY_PREFIX) 244 + 245 + #define MCU_QUERY_MASK BIT(16) 244 246 245 247 enum { 246 248 MCU_CMD_TARGET_ADDRESS_LEN_REQ = MCU_FW_PREFIX | 0x01, ··· 258 254 }; 259 255 260 256 enum { 257 + MCU_EXT_CMD_RF_REG_ACCESS = 0x02, 261 258 MCU_EXT_CMD_PM_STATE_CTRL = 0x07, 262 259 MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, 263 260 MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11, ··· 271 266 MCU_EXT_CMD_GET_TEMP = 0x2c, 272 267 MCU_EXT_CMD_WTBL_UPDATE = 0x32, 273 268 MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, 269 + MCU_EXT_CMD_ATE_CTRL = 0x3d, 274 270 MCU_EXT_CMD_PROTECT_CTRL = 0x3e, 275 271 MCU_EXT_CMD_DBDC_CTRL = 0x45, 276 272 MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, ··· 291 285 MCU_UNI_CMD_SUSPEND = MCU_UNI_PREFIX | 0x05, 292 286 MCU_UNI_CMD_OFFLOAD = MCU_UNI_PREFIX | 0x06, 293 287 MCU_UNI_CMD_HIF_CTRL = MCU_UNI_PREFIX | 0x07, 288 + }; 289 + 290 + enum { 291 + MCU_ATE_SET_FREQ_OFFSET = 0xa, 292 + MCU_ATE_SET_TX_POWER_CONTROL = 0x15, 294 293 }; 295 294 296 295 struct mt7615_mcu_uni_event { ··· 432 421 __le16 pad; 433 422 } __packed; 434 423 424 + struct mt7615_mcu_reg_event { 425 + __le32 reg; 426 + __le32 val; 427 + } __packed; 428 + 435 429 struct mt7615_mcu_bss_event { 436 430 u8 bss_idx; 437 431 u8 is_absent; ··· 467 451 */ 468 452 __le16 sta_idx; 469 453 u8 nonht_basic_phy; 454 + u8 pad[3]; 455 + } __packed; 456 + 457 + struct mt7615_bss_qos_tlv { 458 + __le16 tag; 459 + __le16 len; 460 + u8 qos; 470 461 u8 pad[3]; 471 462 } __packed; 472 463 ··· 568 545 u8 rsv1[8]; 569 546 } __packed; 570 547 548 + struct mt7615_arpns_tlv { 549 + __le16 tag; 550 + __le16 len; 551 + u8 mode; 552 + u8 ips_num; 553 + u8 option; 554 + u8 pad[1]; 555 + } __packed; 556 + 571 557 /* offload mcu commands */ 572 558 enum { 573 559 MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03, ··· 589 557 MCU_CMD_SET_P2P_OPPPS = MCU_CE_PREFIX | 0x33, 590 558 MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61, 591 559 MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62, 560 + MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0, 561 + MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0, 592 562 }; 593 563 594 564 #define MCU_CMD_ACK BIT(0) ··· 603 569 UNI_BSS_INFO_BASIC = 0, 604 570 UNI_BSS_INFO_RLM = 2, 605 571 UNI_BSS_INFO_BCN_CONTENT = 7, 572 + UNI_BSS_INFO_QBSS = 15, 573 + UNI_BSS_INFO_UAPSD = 19, 606 574 }; 607 575 608 576 enum { ··· 616 580 }; 617 581 618 582 enum { 619 - UNI_OFFLOAD_OFFLOAD_ARPNS_IPV4, 620 - UNI_OFFLOAD_OFFLOAD_ARPNS_IPV6, 583 + UNI_OFFLOAD_OFFLOAD_ARP, 584 + UNI_OFFLOAD_OFFLOAD_ND, 621 585 UNI_OFFLOAD_OFFLOAD_GTK_REKEY, 622 586 UNI_OFFLOAD_OFFLOAD_BMC_RPY_DETECT, 623 587 }; ··· 918 882 sizeof(struct sta_rec_basic) + \ 919 883 sizeof(struct sta_rec_ht) + \ 920 884 sizeof(struct sta_rec_vht) + \ 885 + sizeof(struct sta_rec_uapsd) + \ 921 886 sizeof(struct tlv) + \ 922 887 MT7615_WTBL_UPDATE_MAX_SIZE) 923 888 ··· 1006 969 u8 ba_en; 1007 970 __le16 ssn; 1008 971 __le16 winsize; 972 + } __packed; 973 + 974 + struct sta_rec_uapsd { 975 + __le16 tag; 976 + __le16 len; 977 + u8 dac_map; 978 + u8 tac_map; 979 + u8 max_sp; 980 + u8 rsv0; 981 + __le16 listen_interval; 982 + u8 rsv1[2]; 1009 983 } __packed; 1010 984 1011 985 enum {
+47 -2
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
··· 17 17 [MT_CSR_BASE] = 0x07000, 18 18 [MT_PLE_BASE] = 0x08000, 19 19 [MT_PSE_BASE] = 0x0c000, 20 - [MT_PHY_BASE] = 0x10000, 21 20 [MT_CFG_BASE] = 0x20200, 22 21 [MT_AGG_BASE] = 0x20a00, 23 22 [MT_TMAC_BASE] = 0x21000, ··· 43 44 [MT_CSR_BASE] = 0x07000, 44 45 [MT_PLE_BASE] = 0x08000, 45 46 [MT_PSE_BASE] = 0x0c000, 46 - [MT_PHY_BASE] = 0x10000, 47 + [MT_PP_BASE] = 0x0e000, 47 48 [MT_CFG_BASE] = 0x20000, 48 49 [MT_AGG_BASE] = 0x22000, 49 50 [MT_TMAC_BASE] = 0x24000, ··· 139 140 mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0); 140 141 } 141 142 143 + static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr) 144 + { 145 + if (addr < 0x100000) 146 + return addr; 147 + 148 + return mt7615_reg_map(dev, addr); 149 + } 150 + 151 + static u32 mt7615_rr(struct mt76_dev *mdev, u32 offset) 152 + { 153 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 154 + u32 addr = __mt7615_reg_addr(dev, offset); 155 + 156 + return dev->bus_ops->rr(mdev, addr); 157 + } 158 + 159 + static void mt7615_wr(struct mt76_dev *mdev, u32 offset, u32 val) 160 + { 161 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 162 + u32 addr = __mt7615_reg_addr(dev, offset); 163 + 164 + dev->bus_ops->wr(mdev, addr, val); 165 + } 166 + 167 + static u32 mt7615_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) 168 + { 169 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 170 + u32 addr = __mt7615_reg_addr(dev, offset); 171 + 172 + return dev->bus_ops->rmw(mdev, addr, mask, val); 173 + } 174 + 142 175 int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base, 143 176 int irq, const u32 *map) 144 177 { ··· 190 159 .sta_remove = mt7615_mac_sta_remove, 191 160 .update_survey = mt7615_update_channel, 192 161 }; 162 + struct mt76_bus_ops *bus_ops; 193 163 struct ieee80211_ops *ops; 194 164 struct mt7615_dev *dev; 195 165 struct mt76_dev *mdev; ··· 213 181 mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | 214 182 (mt76_rr(dev, MT_HW_REV) & 0xff); 215 183 dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 184 + 185 + dev->bus_ops = dev->mt76.bus; 186 + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), 187 + GFP_KERNEL); 188 + if (!bus_ops) { 189 + ret = -ENOMEM; 190 + goto error; 191 + } 192 + 193 + bus_ops->rr = mt7615_rr; 194 + bus_ops->wr = mt7615_wr; 195 + bus_ops->rmw = mt7615_rmw; 196 + dev->mt76.bus = bus_ops; 216 197 217 198 ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler, 218 199 IRQF_SHARED, KBUILD_MODNAME, dev);
+91 -4
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
··· 4 4 #ifndef __MT7615_H 5 5 #define __MT7615_H 6 6 7 + #include <linux/completion.h> 7 8 #include <linux/interrupt.h> 8 9 #include <linux/ktime.h> 9 10 #include <linux/regmap.h> ··· 19 18 #define MT7615_WTBL_STA (MT7615_WTBL_RESERVED - \ 20 19 MT7615_MAX_INTERFACES) 21 20 21 + #define MT7615_PM_TIMEOUT (HZ / 12) 22 22 #define MT7615_WATCHDOG_TIME (HZ / 10) 23 23 #define MT7615_HW_SCAN_TIMEOUT (HZ / 10) 24 24 #define MT7615_RESET_TIMEOUT (30 * HZ) ··· 32 30 33 31 #define MT7615_RX_RING_SIZE 1024 34 32 #define MT7615_RX_MCU_RING_SIZE 512 33 + 34 + #define MT7615_DRV_OWN_RETRY_COUNT 10 35 35 36 36 #define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin" 37 37 #define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin" ··· 173 169 struct mt76_phy *mt76; 174 170 struct mt7615_dev *dev; 175 171 172 + struct ieee80211_vif *monitor_vif; 173 + 176 174 u32 rxfilter; 177 175 u32 omac_mask; 178 176 ··· 246 240 struct mt76_phy mphy; 247 241 }; 248 242 243 + const struct mt76_bus_ops *bus_ops; 249 244 struct tasklet_struct irq_tasklet; 250 245 251 246 struct mt7615_phy phy; 252 - u32 vif_mask; 253 247 u32 omac_mask; 254 248 255 249 u16 chainmask; ··· 286 280 287 281 struct work_struct wtbl_work; 288 282 struct list_head wd_head; 283 + 284 + u32 debugfs_rf_wf; 285 + u32 debugfs_rf_reg; 286 + 287 + #ifdef CONFIG_NL80211_TESTMODE 288 + struct { 289 + u32 *reg_backup; 290 + 291 + s16 last_freq_offset; 292 + u8 last_rcpi[4]; 293 + s8 last_ib_rssi; 294 + s8 last_wb_rssi; 295 + } test; 296 + #endif 297 + 298 + struct { 299 + bool enable; 300 + 301 + spinlock_t txq_lock; 302 + struct { 303 + struct mt7615_sta *msta; 304 + struct sk_buff *skb; 305 + } tx_q[IEEE80211_NUM_ACS]; 306 + 307 + struct work_struct wake_work; 308 + struct completion wake_cmpl; 309 + 310 + struct delayed_work ps_work; 311 + unsigned long last_activity; 312 + unsigned long idle_timeout; 313 + } pm; 289 314 }; 290 315 291 316 enum tx_pkt_queue_idx { ··· 409 372 extern const struct ieee80211_ops mt7615_ops; 410 373 extern const u32 mt7615e_reg_map[__MT_BASE_MAX]; 411 374 extern const u32 mt7663e_reg_map[__MT_BASE_MAX]; 375 + extern const u32 mt7663_usb_sdio_reg_map[__MT_BASE_MAX]; 412 376 extern struct pci_driver mt7615_pci_driver; 413 377 extern struct platform_driver mt7622_wmac_driver; 378 + extern const struct mt76_testmode_ops mt7615_testmode_ops; 414 379 415 380 #ifdef CONFIG_MT7622_WMAC 416 381 int mt7622_wmac_init(struct mt7615_dev *dev); ··· 447 408 void mt7615_mac_set_rates(struct mt7615_phy *phy, struct mt7615_sta *sta, 448 409 struct ieee80211_tx_rate *probe_rate, 449 410 struct ieee80211_tx_rate *rates); 411 + int mt7615_pm_set_enable(struct mt7615_dev *dev, bool enable); 412 + void mt7615_pm_wake_work(struct work_struct *work); 413 + int mt7615_pm_wake(struct mt7615_dev *dev); 414 + void mt7615_pm_power_save_sched(struct mt7615_dev *dev); 415 + void mt7615_pm_power_save_work(struct work_struct *work); 450 416 int mt7615_mcu_del_wtbl_all(struct mt7615_dev *dev); 451 417 int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd); 452 418 int mt7615_mcu_set_wmm(struct mt7615_dev *dev, u8 queue, ··· 506 462 return MT7615_WTBL_SIZE; 507 463 } 508 464 465 + static inline void mt7615_mutex_acquire(struct mt7615_dev *dev) 466 + __acquires(&dev->mt76.mutex) 467 + { 468 + mutex_lock(&dev->mt76.mutex); 469 + mt7615_pm_wake(dev); 470 + } 471 + 472 + static inline void mt7615_mutex_release(struct mt7615_dev *dev) 473 + __releases(&dev->mt76.mutex) 474 + { 475 + mt7615_pm_power_save_sched(dev); 476 + mutex_unlock(&dev->mt76.mutex); 477 + } 478 + 509 479 static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac) 510 480 { 511 481 static const u8 lmac_queue_map[] = { ··· 543 485 struct ieee80211_supported_band *sband); 544 486 void mt7615_phy_init(struct mt7615_dev *dev); 545 487 void mt7615_mac_init(struct mt7615_dev *dev); 488 + int mt7615_set_channel(struct mt7615_phy *phy); 546 489 547 490 int mt7615_mcu_restart(struct mt76_dev *dev); 548 491 void mt7615_update_channel(struct mt76_dev *mdev); ··· 575 516 enum mt7615_cipher_type cipher, 576 517 enum set_key_cmd cmd); 577 518 void mt7615_mac_reset_work(struct work_struct *work); 519 + u32 mt7615_mac_get_sta_tid_sn(struct mt7615_dev *dev, int wcid, u8 tid); 578 520 579 521 int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); 580 522 int mt7615_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, 581 523 int len, bool wait_resp); 524 + u32 mt7615_rf_rr(struct mt7615_dev *dev, u32 wf, u32 reg); 525 + int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val); 582 526 int mt7615_mcu_set_dbdc(struct mt7615_dev *dev); 583 527 int mt7615_mcu_set_eeprom(struct mt7615_dev *dev); 584 528 int mt7615_mcu_set_mac_enable(struct mt7615_dev *dev, int band, bool enable); 585 529 int mt7615_mcu_set_rts_thresh(struct mt7615_phy *phy, u32 val); 586 530 int mt7615_mcu_get_temperature(struct mt7615_dev *dev, int index); 531 + int mt7615_mcu_set_tx_power(struct mt7615_phy *phy); 587 532 void mt7615_mcu_exit(struct mt7615_dev *dev); 588 533 void mt7615_mcu_fill_msg(struct mt7615_dev *dev, struct sk_buff *skb, 589 534 int cmd, int *wait_seq); ··· 626 563 const struct mt7615_dfs_pulse *pulse); 627 564 int mt7615_mcu_set_radar_th(struct mt7615_dev *dev, int index, 628 565 const struct mt7615_dfs_pattern *pattern); 566 + int mt7615_mcu_set_test_param(struct mt7615_dev *dev, u8 param, bool test_mode, 567 + u32 val); 629 568 int mt7615_mcu_set_sku_en(struct mt7615_phy *phy, bool enable); 630 569 int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy); 631 570 int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy); ··· 644 579 int mt7615_init_debugfs(struct mt7615_dev *dev); 645 580 int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq); 646 581 582 + int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, 583 + bool enable); 647 584 int mt7615_mcu_set_hif_suspend(struct mt7615_dev *dev, bool suspend); 648 585 void mt7615_mcu_set_suspend_iter(void *priv, u8 *mac, 649 586 struct ieee80211_vif *vif); 650 587 int mt7615_mcu_update_gtk_rekey(struct ieee80211_hw *hw, 651 588 struct ieee80211_vif *vif, 652 589 struct cfg80211_gtk_rekey_data *key); 653 - 590 + int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, 591 + struct ieee80211_vif *vif, 592 + struct ieee80211_bss_conf *info); 654 593 int __mt7663_load_firmware(struct mt7615_dev *dev); 594 + u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset); 595 + void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val); 655 596 656 597 /* usb */ 657 - void mt7663u_wtbl_work(struct work_struct *work); 598 + int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 599 + enum mt76_txq_id qid, struct mt76_wcid *wcid, 600 + struct ieee80211_sta *sta, 601 + struct mt76_tx_info *tx_info); 602 + bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update); 603 + void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, 604 + enum mt76_txq_id qid, 605 + struct mt76_queue_entry *e); 606 + void mt7663_usb_sdio_wtbl_work(struct work_struct *work); 607 + int mt7663_usb_sdio_register_device(struct mt7615_dev *dev); 658 608 int mt7663u_mcu_init(struct mt7615_dev *dev); 659 - int mt7663u_register_device(struct mt7615_dev *dev); 609 + 610 + /* sdio */ 611 + u32 mt7663s_read_pcr(struct mt7615_dev *dev); 612 + int mt7663s_mcu_init(struct mt7615_dev *dev); 613 + int mt7663s_driver_own(struct mt7615_dev *dev); 614 + int mt7663s_firmware_own(struct mt7615_dev *dev); 615 + int mt7663s_kthread_run(void *data); 616 + void mt7663s_sdio_irq(struct sdio_func *func); 660 617 661 618 #endif
+4
drivers/net/wireless/mediatek/mt76/mt7615/pci.c
··· 75 75 bool hif_suspend; 76 76 int i, err; 77 77 78 + err = mt7615_pm_wake(dev); 79 + if (err < 0) 80 + return err; 81 + 78 82 hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) && 79 83 mt7615_firmware_offload(dev); 80 84 if (hif_suspend) {
+4
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
··· 70 70 71 71 mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); 72 72 dev = container_of(mt76, struct mt7615_dev, mt76); 73 + 74 + if (test_bit(MT76_STATE_PM, &mt76->phy.state)) 75 + return; 76 + 73 77 val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | 74 78 FIELD_PREP(MT_LED_STATUS_OFF, delay_off) | 75 79 FIELD_PREP(MT_LED_STATUS_ON, delay_on);
-1
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
··· 155 155 spin_lock_bh(&dev->mt76.lock); 156 156 mt7615_mac_set_rates(phy, msta, &info->control.rates[0], 157 157 msta->rates); 158 - msta->rate_probe = true; 159 158 spin_unlock_bh(&dev->mt76.lock); 160 159 } 161 160
+31 -2
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
··· 14 14 MT_CSR_BASE, 15 15 MT_PLE_BASE, 16 16 MT_PSE_BASE, 17 - MT_PHY_BASE, 18 17 MT_CFG_BASE, 19 18 MT_AGG_BASE, 20 19 MT_TMAC_BASE, ··· 28 29 MT_PCIE_REMAP_BASE2, 29 30 MT_TOP_MISC_BASE, 30 31 MT_EFUSE_ADDR_BASE, 32 + MT_PP_BASE, 31 33 __MT_BASE_MAX, 32 34 }; 33 35 ··· 153 153 154 154 #define MT_PLE(ofs) ((dev)->reg_map[MT_PLE_BASE] + (ofs)) 155 155 156 + #define MT_PLE_PG_HIF0_GROUP MT_PLE(0x110) 157 + #define MT_HIF0_MIN_QUOTA GENMASK(11, 0) 156 158 #define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0) 157 159 #define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4) 158 160 #define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8) ··· 164 162 ((n) << 2)) 165 163 166 164 #define MT_PSE(ofs) ((dev)->reg_map[MT_PSE_BASE] + (ofs)) 165 + #define MT_PSE_PG_HIF0_GROUP MT_PSE(0x110) 166 + #define MT_HIF0_MIN_QUOTA GENMASK(11, 0) 167 + #define MT_PSE_PG_HIF1_GROUP MT_PSE(0x118) 168 + #define MT_HIF1_MIN_QUOTA GENMASK(11, 0) 167 169 #define MT_PSE_QUEUE_EMPTY MT_PSE(0x0b4) 168 170 #define MT_HIF_0_EMPTY_MASK BIT(16) 169 171 #define MT_HIF_1_EMPTY_MASK BIT(17) ··· 175 169 #define MT_PSE_PG_INFO MT_PSE(0x194) 176 170 #define MT_PSE_SRC_CNT GENMASK(27, 16) 177 171 178 - #define MT_WF_PHY_BASE ((dev)->reg_map[MT_PHY_BASE]) 172 + #define MT_PP(ofs) ((dev)->reg_map[MT_PP_BASE] + (ofs)) 173 + #define MT_PP_TXDWCNT MT_PP(0x0) 174 + #define MT_PP_TXDWCNT_TX0_ADD_DW_CNT GENMASK(7, 0) 175 + #define MT_PP_TXDWCNT_TX1_ADD_DW_CNT GENMASK(15, 8) 176 + 177 + #define MT_WF_PHY_BASE 0x82070000 179 178 #define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) 180 179 181 180 #define MT_WF_PHY_WF2_RFCTRL0(n) MT_WF_PHY(0x1900 + (n) * 0x400) ··· 224 213 #define MT_WF_PHY_RXTD2_BASE MT_WF_PHY(0x2a00) 225 214 #define MT_WF_PHY_RXTD2(_n) (MT_WF_PHY_RXTD2_BASE + ((_n) << 2)) 226 215 216 + #define MT_WF_PHY_RFINTF3_0(_n) MT_WF_PHY(0x1100 + (_n) * 0x400) 217 + #define MT_WF_PHY_RFINTF3_0_ANT GENMASK(7, 4) 218 + 227 219 #define MT_WF_CFG_BASE ((dev)->reg_map[MT_CFG_BASE]) 228 220 #define MT_WF_CFG(ofs) (MT_WF_CFG_BASE + (ofs)) 229 221 ··· 269 255 270 256 #define MT_WF_ARB_BASE ((dev)->reg_map[MT_ARB_BASE]) 271 257 #define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs)) 258 + 259 + #define MT_ARB_RQCR MT_WF_ARB(0x070) 260 + #define MT_ARB_RQCR_RX_START BIT(0) 261 + #define MT_ARB_RQCR_RXV_START BIT(4) 262 + #define MT_ARB_RQCR_RXV_R_EN BIT(7) 263 + #define MT_ARB_RQCR_RXV_T_EN BIT(8) 264 + #define MT_ARB_RQCR_BAND_SHIFT 16 272 265 273 266 #define MT_ARB_SCR MT_WF_ARB(0x080) 274 267 #define MT_ARB_SCR_TX0_DISABLE BIT(8) ··· 438 417 439 418 #define MT_LPON_T0CR MT_LPON(0x010) 440 419 #define MT_LPON_T0CR_MODE GENMASK(1, 0) 420 + #define MT_LPON_T0CR_WRITE BIT(0) 441 421 442 422 #define MT_LPON_UTTR0 MT_LPON(0x018) 443 423 #define MT_LPON_UTTR1 MT_LPON(0x01c) ··· 571 549 #define MT_WL_TX_EN BIT(23) 572 550 #define MT_WL_RX_BUSY BIT(30) 573 551 #define MT_WL_TX_BUSY BIT(31) 552 + 553 + #define MT_MCU_PTA_BASE 0x81060000 554 + #define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n)) 555 + 556 + #define MT_ANT_SWITCH_CON(n) MT_MCU_PTA(0x0c8) 557 + #define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8)) 558 + #define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8)) 574 559 575 560 #endif
+478
drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 MediaTek Inc. 3 + * 4 + * Author: Felix Fietkau <nbd@nbd.name> 5 + * Lorenzo Bianconi <lorenzo@kernel.org> 6 + * Sean Wang <sean.wang@mediatek.com> 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/iopoll.h> 11 + #include <linux/module.h> 12 + 13 + #include <linux/mmc/host.h> 14 + #include <linux/mmc/sdio_ids.h> 15 + #include <linux/mmc/sdio_func.h> 16 + 17 + #include "mt7615.h" 18 + #include "sdio.h" 19 + #include "mac.h" 20 + 21 + static const struct sdio_device_id mt7663s_table[] = { 22 + { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7603) }, 23 + { } /* Terminating entry */ 24 + }; 25 + 26 + static u32 mt7663s_read_whisr(struct mt76_dev *dev) 27 + { 28 + return sdio_readl(dev->sdio.func, MCR_WHISR, NULL); 29 + } 30 + 31 + u32 mt7663s_read_pcr(struct mt7615_dev *dev) 32 + { 33 + struct mt76_sdio *sdio = &dev->mt76.sdio; 34 + 35 + return sdio_readl(sdio->func, MCR_WHLPCR, NULL); 36 + } 37 + 38 + static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset) 39 + { 40 + struct sdio_func *func = dev->sdio.func; 41 + u32 val = ~0, status; 42 + int err; 43 + 44 + sdio_claim_host(func); 45 + 46 + sdio_writel(func, offset, MCR_H2DSM0R, &err); 47 + if (err < 0) { 48 + dev_err(dev->dev, "failed setting address [err=%d]\n", err); 49 + goto out; 50 + } 51 + 52 + sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err); 53 + if (err < 0) { 54 + dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 55 + goto out; 56 + } 57 + 58 + err = readx_poll_timeout(mt7663s_read_whisr, dev, status, 59 + status & H2D_SW_INT_READ, 0, 1000000); 60 + if (err < 0) { 61 + dev_err(dev->dev, "query whisr timeout\n"); 62 + goto out; 63 + } 64 + 65 + sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err); 66 + if (err < 0) { 67 + dev_err(dev->dev, "failed setting read mode [err=%d]\n", err); 68 + goto out; 69 + } 70 + 71 + val = sdio_readl(func, MCR_H2DSM0R, &err); 72 + if (err < 0) { 73 + dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 74 + goto out; 75 + } 76 + 77 + if (val != offset) { 78 + dev_err(dev->dev, "register mismatch\n"); 79 + val = ~0; 80 + goto out; 81 + } 82 + 83 + val = sdio_readl(func, MCR_D2HRM1R, &err); 84 + if (err < 0) 85 + dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err); 86 + 87 + out: 88 + sdio_release_host(func); 89 + 90 + return val; 91 + } 92 + 93 + static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val) 94 + { 95 + struct sdio_func *func = dev->sdio.func; 96 + u32 status; 97 + int err; 98 + 99 + sdio_claim_host(func); 100 + 101 + sdio_writel(func, offset, MCR_H2DSM0R, &err); 102 + if (err < 0) { 103 + dev_err(dev->dev, "failed setting address [err=%d]\n", err); 104 + goto out; 105 + } 106 + 107 + sdio_writel(func, val, MCR_H2DSM1R, &err); 108 + if (err < 0) { 109 + dev_err(dev->dev, 110 + "failed setting write value [err=%d]\n", err); 111 + goto out; 112 + } 113 + 114 + sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err); 115 + if (err < 0) { 116 + dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 117 + goto out; 118 + } 119 + 120 + err = readx_poll_timeout(mt7663s_read_whisr, dev, status, 121 + status & H2D_SW_INT_WRITE, 0, 1000000); 122 + if (err < 0) { 123 + dev_err(dev->dev, "query whisr timeout\n"); 124 + goto out; 125 + } 126 + 127 + sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err); 128 + if (err < 0) { 129 + dev_err(dev->dev, "failed setting write mode [err=%d]\n", err); 130 + goto out; 131 + } 132 + 133 + val = sdio_readl(func, MCR_H2DSM0R, &err); 134 + if (err < 0) { 135 + dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err); 136 + goto out; 137 + } 138 + 139 + if (val != offset) 140 + dev_err(dev->dev, "register mismatch\n"); 141 + 142 + out: 143 + sdio_release_host(func); 144 + } 145 + 146 + static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset) 147 + { 148 + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 149 + return dev->mcu_ops->mcu_rr(dev, offset); 150 + else 151 + return mt7663s_read_mailbox(dev, offset); 152 + } 153 + 154 + static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val) 155 + { 156 + if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) 157 + dev->mcu_ops->mcu_wr(dev, offset, val); 158 + else 159 + mt7663s_write_mailbox(dev, offset, val); 160 + } 161 + 162 + static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val) 163 + { 164 + val |= mt7663s_rr(dev, offset) & ~mask; 165 + mt7663s_wr(dev, offset, val); 166 + 167 + return val; 168 + } 169 + 170 + static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset, 171 + const void *data, int len) 172 + { 173 + const u32 *val = data; 174 + int i; 175 + 176 + for (i = 0; i < len / sizeof(u32); i++) { 177 + mt7663s_wr(dev, offset, val[i]); 178 + offset += sizeof(u32); 179 + } 180 + } 181 + 182 + static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset, 183 + void *data, int len) 184 + { 185 + u32 *val = data; 186 + int i; 187 + 188 + for (i = 0; i < len / sizeof(u32); i++) { 189 + val[i] = mt7663s_rr(dev, offset); 190 + offset += sizeof(u32); 191 + } 192 + } 193 + 194 + static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base, 195 + const struct mt76_reg_pair *data, 196 + int len) 197 + { 198 + int i; 199 + 200 + for (i = 0; i < len; i++) { 201 + mt7663s_wr(dev, data->reg, data->value); 202 + data++; 203 + } 204 + 205 + return 0; 206 + } 207 + 208 + static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base, 209 + struct mt76_reg_pair *data, 210 + int len) 211 + { 212 + int i; 213 + 214 + for (i = 0; i < len; i++) { 215 + data->value = mt7663s_rr(dev, data->reg); 216 + data++; 217 + } 218 + 219 + return 0; 220 + } 221 + 222 + static void mt7663s_init_work(struct work_struct *work) 223 + { 224 + struct mt7615_dev *dev; 225 + 226 + dev = container_of(work, struct mt7615_dev, mcu_work); 227 + if (mt7663s_mcu_init(dev)) 228 + return; 229 + 230 + mt7615_mcu_set_eeprom(dev); 231 + mt7615_mac_init(dev); 232 + mt7615_phy_init(dev); 233 + mt7615_mcu_del_wtbl_all(dev); 234 + mt7615_check_offload_capability(dev); 235 + } 236 + 237 + static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func) 238 + { 239 + u32 status, ctrl; 240 + int ret; 241 + 242 + sdio_claim_host(func); 243 + 244 + ret = sdio_enable_func(func); 245 + if (ret < 0) 246 + goto release; 247 + 248 + /* Get ownership from the device */ 249 + sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR, 250 + MCR_WHLPCR, &ret); 251 + if (ret < 0) 252 + goto disable_func; 253 + 254 + ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, 255 + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); 256 + if (ret < 0) { 257 + dev_err(dev->mt76.dev, "Cannot get ownership from device"); 258 + goto disable_func; 259 + } 260 + 261 + ret = sdio_set_block_size(func, 512); 262 + if (ret < 0) 263 + goto disable_func; 264 + 265 + /* Enable interrupt */ 266 + sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret); 267 + if (ret < 0) 268 + goto disable_func; 269 + 270 + ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN; 271 + sdio_writel(func, ctrl, MCR_WHIER, &ret); 272 + if (ret < 0) 273 + goto disable_func; 274 + 275 + /* set WHISR as read clear and Rx aggregation number as 16 */ 276 + ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16); 277 + sdio_writel(func, ctrl, MCR_WHCR, &ret); 278 + if (ret < 0) 279 + goto disable_func; 280 + 281 + ret = sdio_claim_irq(func, mt7663s_sdio_irq); 282 + if (ret < 0) 283 + goto disable_func; 284 + 285 + sdio_release_host(func); 286 + 287 + return 0; 288 + 289 + disable_func: 290 + sdio_disable_func(func); 291 + release: 292 + sdio_release_host(func); 293 + 294 + return ret; 295 + } 296 + 297 + static int mt7663s_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, 298 + struct ieee80211_sta *sta) 299 + { 300 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 301 + struct mt76_sdio *sdio = &mdev->sdio; 302 + u32 pse, ple; 303 + int err; 304 + 305 + err = mt7615_mac_sta_add(mdev, vif, sta); 306 + if (err < 0) 307 + return err; 308 + 309 + /* init sched data quota */ 310 + pse = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); 311 + ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); 312 + 313 + mutex_lock(&sdio->sched.lock); 314 + sdio->sched.pse_data_quota = pse; 315 + sdio->sched.ple_data_quota = ple; 316 + mutex_unlock(&sdio->sched.lock); 317 + 318 + return 0; 319 + } 320 + 321 + static int mt7663s_probe(struct sdio_func *func, 322 + const struct sdio_device_id *id) 323 + { 324 + static const struct mt76_driver_ops drv_ops = { 325 + .txwi_size = MT_USB_TXD_SIZE, 326 + .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, 327 + .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, 328 + .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, 329 + .tx_status_data = mt7663_usb_sdio_tx_status_data, 330 + .rx_skb = mt7615_queue_rx_skb, 331 + .sta_ps = mt7615_sta_ps, 332 + .sta_add = mt7663s_sta_add, 333 + .sta_remove = mt7615_mac_sta_remove, 334 + .update_survey = mt7615_update_channel, 335 + }; 336 + static const struct mt76_bus_ops mt7663s_ops = { 337 + .rr = mt7663s_rr, 338 + .rmw = mt7663s_rmw, 339 + .wr = mt7663s_wr, 340 + .write_copy = mt7663s_write_copy, 341 + .read_copy = mt7663s_read_copy, 342 + .wr_rp = mt7663s_wr_rp, 343 + .rd_rp = mt7663s_rd_rp, 344 + .type = MT76_BUS_SDIO, 345 + }; 346 + struct ieee80211_ops *ops; 347 + struct mt7615_dev *dev; 348 + struct mt76_dev *mdev; 349 + int ret; 350 + 351 + ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops), 352 + GFP_KERNEL); 353 + if (!ops) 354 + return -ENOMEM; 355 + 356 + mdev = mt76_alloc_device(&func->dev, sizeof(*dev), ops, &drv_ops); 357 + if (!mdev) 358 + return -ENOMEM; 359 + 360 + dev = container_of(mdev, struct mt7615_dev, mt76); 361 + 362 + INIT_WORK(&dev->mcu_work, mt7663s_init_work); 363 + dev->reg_map = mt7663_usb_sdio_reg_map; 364 + dev->ops = ops; 365 + sdio_set_drvdata(func, dev); 366 + 367 + mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev, 368 + "mt7663s_tx"); 369 + if (IS_ERR(mdev->sdio.tx_kthread)) 370 + return PTR_ERR(mdev->sdio.tx_kthread); 371 + 372 + ret = mt76s_init(mdev, func, &mt7663s_ops); 373 + if (ret < 0) 374 + goto err_free; 375 + 376 + ret = mt7663s_hw_init(dev, func); 377 + if (ret) 378 + goto err_free; 379 + 380 + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | 381 + (mt76_rr(dev, MT_HW_REV) & 0xff); 382 + dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 383 + 384 + ret = mt76s_alloc_queues(&dev->mt76); 385 + if (ret) 386 + goto err_deinit; 387 + 388 + ret = mt7663_usb_sdio_register_device(dev); 389 + if (ret) 390 + goto err_deinit; 391 + 392 + return 0; 393 + 394 + err_deinit: 395 + mt76s_deinit(&dev->mt76); 396 + err_free: 397 + mt76_free_device(&dev->mt76); 398 + 399 + return ret; 400 + } 401 + 402 + static void mt7663s_remove(struct sdio_func *func) 403 + { 404 + struct mt7615_dev *dev = sdio_get_drvdata(func); 405 + 406 + if (!test_and_clear_bit(MT76_STATE_INITIALIZED, &dev->mphy.state)) 407 + return; 408 + 409 + ieee80211_unregister_hw(dev->mt76.hw); 410 + mt76s_deinit(&dev->mt76); 411 + mt76_free_device(&dev->mt76); 412 + } 413 + 414 + #ifdef CONFIG_PM 415 + static int mt7663s_suspend(struct device *dev) 416 + { 417 + struct sdio_func *func = dev_to_sdio_func(dev); 418 + struct mt7615_dev *mdev = sdio_get_drvdata(func); 419 + 420 + if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && 421 + mt7615_firmware_offload(mdev)) { 422 + int err; 423 + 424 + err = mt7615_mcu_set_hif_suspend(mdev, true); 425 + if (err < 0) 426 + return err; 427 + } 428 + 429 + mt76s_stop_txrx(&mdev->mt76); 430 + 431 + return mt7663s_firmware_own(mdev); 432 + } 433 + 434 + static int mt7663s_resume(struct device *dev) 435 + { 436 + struct sdio_func *func = dev_to_sdio_func(dev); 437 + struct mt7615_dev *mdev = sdio_get_drvdata(func); 438 + int err; 439 + 440 + err = mt7663s_driver_own(mdev); 441 + if (err) 442 + return err; 443 + 444 + if (!test_bit(MT76_STATE_SUSPEND, &mdev->mphy.state) && 445 + mt7615_firmware_offload(mdev)) 446 + err = mt7615_mcu_set_hif_suspend(mdev, false); 447 + 448 + return err; 449 + } 450 + 451 + static const struct dev_pm_ops mt7663s_pm_ops = { 452 + .suspend = mt7663s_suspend, 453 + .resume = mt7663s_resume, 454 + }; 455 + #endif 456 + 457 + MODULE_DEVICE_TABLE(sdio, mt7663s_table); 458 + MODULE_FIRMWARE(MT7663_OFFLOAD_FIRMWARE_N9); 459 + MODULE_FIRMWARE(MT7663_OFFLOAD_ROM_PATCH); 460 + MODULE_FIRMWARE(MT7663_FIRMWARE_N9); 461 + MODULE_FIRMWARE(MT7663_ROM_PATCH); 462 + 463 + static struct sdio_driver mt7663s_driver = { 464 + .name = KBUILD_MODNAME, 465 + .probe = mt7663s_probe, 466 + .remove = mt7663s_remove, 467 + .id_table = mt7663s_table, 468 + #ifdef CONFIG_PM 469 + .drv = { 470 + .pm = &mt7663s_pm_ops, 471 + } 472 + #endif 473 + }; 474 + module_sdio_driver(mt7663s_driver); 475 + 476 + MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 477 + MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 478 + MODULE_LICENSE("Dual BSD/GPL");
+115
drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 MediaTek Inc. 3 + * 4 + * Author: Sean Wang <sean.wang@mediatek.com> 5 + */ 6 + 7 + #ifndef __MT76S_H 8 + #define __MT76S_H 9 + 10 + #define MT_PSE_PAGE_SZ 128 11 + 12 + #define MCR_WCIR 0x0000 13 + #define MCR_WHLPCR 0x0004 14 + #define WHLPCR_FW_OWN_REQ_CLR BIT(9) 15 + #define WHLPCR_FW_OWN_REQ_SET BIT(8) 16 + #define WHLPCR_IS_DRIVER_OWN BIT(8) 17 + #define WHLPCR_INT_EN_CLR BIT(1) 18 + #define WHLPCR_INT_EN_SET BIT(0) 19 + 20 + #define MCR_WSDIOCSR 0x0008 21 + #define MCR_WHCR 0x000C 22 + #define W_INT_CLR_CTRL BIT(1) 23 + #define RECV_MAILBOX_RD_CLR_EN BIT(2) 24 + #define MAX_HIF_RX_LEN_NUM GENMASK(13, 8) 25 + #define RX_ENHANCE_MODE BIT(16) 26 + 27 + #define MCR_WHISR 0x0010 28 + #define MCR_WHIER 0x0014 29 + #define WHIER_D2H_SW_INT GENMASK(31, 8) 30 + #define WHIER_FW_OWN_BACK_INT_EN BIT(7) 31 + #define WHIER_ABNORMAL_INT_EN BIT(6) 32 + #define WHIER_RX1_DONE_INT_EN BIT(2) 33 + #define WHIER_RX0_DONE_INT_EN BIT(1) 34 + #define WHIER_TX_DONE_INT_EN BIT(0) 35 + #define WHIER_DEFAULT (WHIER_RX0_DONE_INT_EN | \ 36 + WHIER_RX1_DONE_INT_EN | \ 37 + WHIER_TX_DONE_INT_EN | \ 38 + WHIER_ABNORMAL_INT_EN | \ 39 + WHIER_D2H_SW_INT) 40 + 41 + #define MCR_WASR 0x0020 42 + #define MCR_WSICR 0x0024 43 + #define MCR_WTSR0 0x0028 44 + #define TQ0_CNT GENMASK(7, 0) 45 + #define TQ1_CNT GENMASK(15, 8) 46 + #define TQ2_CNT GENMASK(23, 16) 47 + #define TQ3_CNT GENMASK(31, 24) 48 + 49 + #define MCR_WTSR1 0x002c 50 + #define TQ4_CNT GENMASK(7, 0) 51 + #define TQ5_CNT GENMASK(15, 8) 52 + #define TQ6_CNT GENMASK(23, 16) 53 + #define TQ7_CNT GENMASK(31, 24) 54 + 55 + #define MCR_WTDR1 0x0034 56 + #define MCR_WRDR0 0x0050 57 + #define MCR_WRDR1 0x0054 58 + #define MCR_WRDR(p) (0x0050 + 4 * (p)) 59 + #define MCR_H2DSM0R 0x0070 60 + #define H2D_SW_INT_READ BIT(16) 61 + #define H2D_SW_INT_WRITE BIT(17) 62 + 63 + #define MCR_H2DSM1R 0x0074 64 + #define MCR_D2HRM0R 0x0078 65 + #define MCR_D2HRM1R 0x007c 66 + #define MCR_D2HRM2R 0x0080 67 + #define MCR_WRPLR 0x0090 68 + #define RX0_PACKET_LENGTH GENMASK(15, 0) 69 + #define RX1_PACKET_LENGTH GENMASK(31, 16) 70 + 71 + #define MCR_WTMDR 0x00b0 72 + #define MCR_WTMCR 0x00b4 73 + #define MCR_WTMDPCR0 0x00b8 74 + #define MCR_WTMDPCR1 0x00bc 75 + #define MCR_WPLRCR 0x00d4 76 + #define MCR_WSR 0x00D8 77 + #define MCR_CLKIOCR 0x0100 78 + #define MCR_CMDIOCR 0x0104 79 + #define MCR_DAT0IOCR 0x0108 80 + #define MCR_DAT1IOCR 0x010C 81 + #define MCR_DAT2IOCR 0x0110 82 + #define MCR_DAT3IOCR 0x0114 83 + #define MCR_CLKDLYCR 0x0118 84 + #define MCR_CMDDLYCR 0x011C 85 + #define MCR_ODATDLYCR 0x0120 86 + #define MCR_IDATDLYCR1 0x0124 87 + #define MCR_IDATDLYCR2 0x0128 88 + #define MCR_ILCHCR 0x012C 89 + #define MCR_WTQCR0 0x0130 90 + #define MCR_WTQCR1 0x0134 91 + #define MCR_WTQCR2 0x0138 92 + #define MCR_WTQCR3 0x013C 93 + #define MCR_WTQCR4 0x0140 94 + #define MCR_WTQCR5 0x0144 95 + #define MCR_WTQCR6 0x0148 96 + #define MCR_WTQCR7 0x014C 97 + #define MCR_WTQCR(x) (0x130 + 4 * (x)) 98 + #define TXQ_CNT_L GENMASK(15, 0) 99 + #define TXQ_CNT_H GENMASK(31, 16) 100 + 101 + #define MCR_SWPCDBGR 0x0154 102 + 103 + struct mt76s_intr { 104 + u32 isr; 105 + struct { 106 + u32 wtqcr[8]; 107 + } tx; 108 + struct { 109 + u16 num[2]; 110 + u16 len[2][16]; 111 + } rx; 112 + u32 rec_mb[2]; 113 + } __packed; 114 + 115 + #endif
+162
drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2020 MediaTek Inc. 3 + * 4 + * Author: Felix Fietkau <nbd@nbd.name> 5 + * Lorenzo Bianconi <lorenzo@kernel.org> 6 + * Sean Wang <sean.wang@mediatek.com> 7 + */ 8 + #include <linux/kernel.h> 9 + #include <linux/mmc/sdio_func.h> 10 + #include <linux/module.h> 11 + #include <linux/iopoll.h> 12 + 13 + #include "mt7615.h" 14 + #include "mac.h" 15 + #include "mcu.h" 16 + #include "regs.h" 17 + #include "sdio.h" 18 + 19 + static int mt7663s_mcu_init_sched(struct mt7615_dev *dev) 20 + { 21 + struct mt76_sdio *sdio = &dev->mt76.sdio; 22 + u32 pse0, ple, pse1, txdwcnt; 23 + 24 + pse0 = mt76_get_field(dev, MT_PSE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); 25 + pse1 = mt76_get_field(dev, MT_PSE_PG_HIF1_GROUP, MT_HIF1_MIN_QUOTA); 26 + ple = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP, MT_HIF0_MIN_QUOTA); 27 + txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT, 28 + MT_PP_TXDWCNT_TX1_ADD_DW_CNT); 29 + 30 + mutex_lock(&sdio->sched.lock); 31 + 32 + sdio->sched.pse_data_quota = pse0; 33 + sdio->sched.ple_data_quota = ple; 34 + sdio->sched.pse_mcu_quota = pse1; 35 + sdio->sched.deficit = txdwcnt << 2; 36 + 37 + mutex_unlock(&sdio->sched.lock); 38 + 39 + return 0; 40 + } 41 + 42 + static int 43 + mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, 44 + int cmd, bool wait_resp) 45 + { 46 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 47 + int ret, seq; 48 + 49 + mutex_lock(&mdev->mcu.mutex); 50 + 51 + mt7615_mcu_fill_msg(dev, skb, cmd, &seq); 52 + ret = mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0); 53 + if (ret) 54 + goto out; 55 + 56 + mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q); 57 + if (wait_resp) 58 + ret = mt7615_mcu_wait_response(dev, cmd, seq); 59 + 60 + out: 61 + mutex_unlock(&mdev->mcu.mutex); 62 + 63 + return ret; 64 + } 65 + 66 + int mt7663s_driver_own(struct mt7615_dev *dev) 67 + { 68 + struct sdio_func *func = dev->mt76.sdio.func; 69 + struct mt76_phy *mphy = &dev->mt76.phy; 70 + u32 status; 71 + int ret; 72 + 73 + if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) 74 + goto out; 75 + 76 + sdio_claim_host(func); 77 + 78 + sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0); 79 + 80 + ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, 81 + status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000); 82 + if (ret < 0) { 83 + dev_err(dev->mt76.dev, "Cannot get ownership from device"); 84 + set_bit(MT76_STATE_PM, &mphy->state); 85 + sdio_release_host(func); 86 + 87 + return ret; 88 + } 89 + 90 + sdio_release_host(func); 91 + 92 + out: 93 + dev->pm.last_activity = jiffies; 94 + 95 + return 0; 96 + } 97 + 98 + int mt7663s_firmware_own(struct mt7615_dev *dev) 99 + { 100 + struct sdio_func *func = dev->mt76.sdio.func; 101 + struct mt76_phy *mphy = &dev->mt76.phy; 102 + u32 status; 103 + int ret; 104 + 105 + if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) 106 + return 0; 107 + 108 + sdio_claim_host(func); 109 + 110 + sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0); 111 + 112 + ret = readx_poll_timeout(mt7663s_read_pcr, dev, status, 113 + !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000); 114 + if (ret < 0) { 115 + dev_err(dev->mt76.dev, "Cannot set ownership to device"); 116 + clear_bit(MT76_STATE_PM, &mphy->state); 117 + } 118 + 119 + sdio_release_host(func); 120 + 121 + return ret; 122 + } 123 + 124 + int mt7663s_mcu_init(struct mt7615_dev *dev) 125 + { 126 + static const struct mt76_mcu_ops mt7663s_mcu_ops = { 127 + .headroom = sizeof(struct mt7615_mcu_txd), 128 + .tailroom = MT_USB_TAIL_SIZE, 129 + .mcu_skb_send_msg = mt7663s_mcu_send_message, 130 + .mcu_send_msg = mt7615_mcu_msg_send, 131 + .mcu_restart = mt7615_mcu_restart, 132 + .mcu_rr = mt7615_mcu_reg_rr, 133 + .mcu_wr = mt7615_mcu_reg_wr, 134 + }; 135 + int ret; 136 + 137 + ret = mt7663s_driver_own(dev); 138 + if (ret) 139 + return ret; 140 + 141 + dev->mt76.mcu_ops = &mt7663s_mcu_ops, 142 + 143 + ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); 144 + if (ret) { 145 + mt7615_mcu_restart(&dev->mt76); 146 + if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, 147 + MT_TOP_MISC2_FW_N9_RDY, 0, 500)) 148 + return -EIO; 149 + } 150 + 151 + ret = __mt7663_load_firmware(dev); 152 + if (ret) 153 + return ret; 154 + 155 + ret = mt7663s_mcu_init_sched(dev); 156 + if (ret) 157 + return ret; 158 + 159 + set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state); 160 + 161 + return 0; 162 + }
+268
drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 MediaTek Inc. 3 + * 4 + * Author: Felix Fietkau <nbd@nbd.name> 5 + * Lorenzo Bianconi <lorenzo@kernel.org> 6 + * Sean Wang <sean.wang@mediatek.com> 7 + */ 8 + 9 + #include <linux/kernel.h> 10 + #include <linux/iopoll.h> 11 + #include <linux/module.h> 12 + 13 + #include <linux/mmc/host.h> 14 + #include <linux/mmc/sdio_ids.h> 15 + #include <linux/mmc/sdio_func.h> 16 + 17 + #include "../trace.h" 18 + #include "mt7615.h" 19 + #include "sdio.h" 20 + #include "mac.h" 21 + 22 + static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data) 23 + { 24 + struct mt76_sdio *sdio = &dev->mt76.sdio; 25 + 26 + mutex_lock(&sdio->sched.lock); 27 + sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */ 28 + FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */ 29 + FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */ 30 + FIELD_GET(TXQ_CNT_H, data[1]); /* VO */ 31 + sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */ 32 + FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */ 33 + FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */ 34 + FIELD_GET(TXQ_CNT_L, data[4]); /* VO */ 35 + sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]); 36 + mutex_unlock(&sdio->sched.lock); 37 + } 38 + 39 + static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len, 40 + int buf_len) 41 + { 42 + int len = min_t(int, data_len, MT_SKB_HEAD_LEN); 43 + struct sk_buff *skb; 44 + 45 + skb = alloc_skb(len, GFP_KERNEL); 46 + if (!skb) 47 + return NULL; 48 + 49 + skb_put_data(skb, data, len); 50 + if (data_len > len) { 51 + struct page *page; 52 + 53 + data += len; 54 + page = virt_to_head_page(data); 55 + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 56 + page, data - page_address(page), 57 + data_len - len, buf_len); 58 + get_page(page); 59 + } 60 + 61 + return skb; 62 + } 63 + 64 + static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid, 65 + struct mt76s_intr *intr) 66 + { 67 + struct mt76_queue *q = &dev->mt76.q_rx[qid]; 68 + struct mt76_sdio *sdio = &dev->mt76.sdio; 69 + int len = 0, err, i, order; 70 + struct page *page; 71 + u8 *buf; 72 + 73 + for (i = 0; i < intr->rx.num[qid]; i++) 74 + len += round_up(intr->rx.len[qid][i] + 4, 4); 75 + 76 + if (!len) 77 + return 0; 78 + 79 + if (len > sdio->func->cur_blksize) 80 + len = roundup(len, sdio->func->cur_blksize); 81 + 82 + order = get_order(len); 83 + page = __dev_alloc_pages(GFP_KERNEL, order); 84 + if (!page) 85 + return -ENOMEM; 86 + 87 + buf = page_address(page); 88 + 89 + err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); 90 + if (err < 0) { 91 + dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err); 92 + __free_pages(page, order); 93 + return err; 94 + } 95 + 96 + for (i = 0; i < intr->rx.num[qid]; i++) { 97 + int index = (q->tail + i) % q->ndesc; 98 + struct mt76_queue_entry *e = &q->entry[index]; 99 + 100 + len = intr->rx.len[qid][i]; 101 + e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4)); 102 + if (!e->skb) 103 + break; 104 + 105 + buf += round_up(len + 4, 4); 106 + if (q->queued + i + 1 == q->ndesc) 107 + break; 108 + } 109 + __free_pages(page, order); 110 + 111 + spin_lock_bh(&q->lock); 112 + q->tail = (q->tail + i) % q->ndesc; 113 + q->queued += i; 114 + spin_unlock_bh(&q->lock); 115 + 116 + return err; 117 + } 118 + 119 + static int mt7663s_tx_update_sched(struct mt7615_dev *dev, 120 + struct mt76_queue_entry *e, 121 + bool mcu) 122 + { 123 + struct mt76_sdio *sdio = &dev->mt76.sdio; 124 + struct mt76_phy *mphy = &dev->mt76.phy; 125 + struct ieee80211_hdr *hdr; 126 + int size, ret = -EBUSY; 127 + 128 + size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ); 129 + 130 + if (mcu) { 131 + if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state)) 132 + return 0; 133 + 134 + mutex_lock(&sdio->sched.lock); 135 + if (sdio->sched.pse_mcu_quota > size) { 136 + sdio->sched.pse_mcu_quota -= size; 137 + ret = 0; 138 + } 139 + mutex_unlock(&sdio->sched.lock); 140 + 141 + return ret; 142 + } 143 + 144 + hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE); 145 + if (ieee80211_is_ctl(hdr->frame_control)) 146 + return 0; 147 + 148 + mutex_lock(&sdio->sched.lock); 149 + if (sdio->sched.pse_data_quota > size && 150 + sdio->sched.ple_data_quota > 0) { 151 + sdio->sched.pse_data_quota -= size; 152 + sdio->sched.ple_data_quota--; 153 + ret = 0; 154 + } 155 + mutex_unlock(&sdio->sched.lock); 156 + 157 + return ret; 158 + } 159 + 160 + static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q) 161 + { 162 + bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q; 163 + struct mt76_sdio *sdio = &dev->mt76.sdio; 164 + int nframes = 0; 165 + 166 + while (q->first != q->tail) { 167 + struct mt76_queue_entry *e = &q->entry[q->first]; 168 + int err, len = e->skb->len; 169 + 170 + if (mt7663s_tx_update_sched(dev, e, mcu)) 171 + break; 172 + 173 + if (len > sdio->func->cur_blksize) 174 + len = roundup(len, sdio->func->cur_blksize); 175 + 176 + /* TODO: skb_walk_frags and then write to SDIO port */ 177 + err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len); 178 + if (err) { 179 + dev_err(dev->mt76.dev, "sdio write failed: %d\n", err); 180 + return -EIO; 181 + } 182 + 183 + e->done = true; 184 + q->first = (q->first + 1) % q->ndesc; 185 + nframes++; 186 + } 187 + 188 + return nframes; 189 + } 190 + 191 + static int mt7663s_tx_run_queues(struct mt7615_dev *dev) 192 + { 193 + int i, nframes = 0; 194 + 195 + for (i = 0; i < MT_TXQ_MCU_WA; i++) { 196 + int ret; 197 + 198 + ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q); 199 + if (ret < 0) 200 + return ret; 201 + 202 + nframes += ret; 203 + } 204 + 205 + return nframes; 206 + } 207 + 208 + int mt7663s_kthread_run(void *data) 209 + { 210 + struct mt7615_dev *dev = data; 211 + struct mt76_phy *mphy = &dev->mt76.phy; 212 + 213 + while (!kthread_should_stop()) { 214 + int ret; 215 + 216 + cond_resched(); 217 + 218 + sdio_claim_host(dev->mt76.sdio.func); 219 + ret = mt7663s_tx_run_queues(dev); 220 + sdio_release_host(dev->mt76.sdio.func); 221 + 222 + if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) { 223 + set_current_state(TASK_INTERRUPTIBLE); 224 + schedule(); 225 + } else { 226 + wake_up_process(dev->mt76.sdio.kthread); 227 + } 228 + } 229 + 230 + return 0; 231 + } 232 + 233 + void mt7663s_sdio_irq(struct sdio_func *func) 234 + { 235 + struct mt7615_dev *dev = sdio_get_drvdata(func); 236 + struct mt76_sdio *sdio = &dev->mt76.sdio; 237 + struct mt76s_intr intr; 238 + 239 + /* disable interrupt */ 240 + sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0); 241 + 242 + do { 243 + sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr)); 244 + trace_dev_irq(&dev->mt76, intr.isr, 0); 245 + 246 + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state)) 247 + goto out; 248 + 249 + if (intr.isr & WHIER_RX0_DONE_INT_EN) { 250 + mt7663s_rx_run_queue(dev, 0, &intr); 251 + wake_up_process(sdio->kthread); 252 + } 253 + 254 + if (intr.isr & WHIER_RX1_DONE_INT_EN) { 255 + mt7663s_rx_run_queue(dev, 1, &intr); 256 + wake_up_process(sdio->kthread); 257 + } 258 + 259 + if (intr.isr & WHIER_TX_DONE_INT_EN) { 260 + mt7663s_refill_sched_quota(dev, intr.tx.wtqcr); 261 + mt7663s_tx_run_queues(dev); 262 + wake_up_process(sdio->kthread); 263 + } 264 + } while (intr.isr); 265 + out: 266 + /* enable interrupt */ 267 + sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0); 268 + }
+363
drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ 3 + 4 + #include "mt7615.h" 5 + #include "eeprom.h" 6 + #include "mcu.h" 7 + 8 + enum { 9 + TM_CHANGED_TXPOWER_CTRL, 10 + TM_CHANGED_TXPOWER, 11 + TM_CHANGED_FREQ_OFFSET, 12 + 13 + /* must be last */ 14 + NUM_TM_CHANGED 15 + }; 16 + 17 + 18 + static const u8 tm_change_map[] = { 19 + [TM_CHANGED_TXPOWER_CTRL] = MT76_TM_ATTR_TX_POWER_CONTROL, 20 + [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER, 21 + [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET, 22 + }; 23 + 24 + static const u32 reg_backup_list[] = { 25 + MT_WF_PHY_RFINTF3_0(0), 26 + MT_WF_PHY_RFINTF3_0(1), 27 + MT_WF_PHY_RFINTF3_0(2), 28 + MT_WF_PHY_RFINTF3_0(3), 29 + MT_ANT_SWITCH_CON(2), 30 + MT_ANT_SWITCH_CON(3), 31 + MT_ANT_SWITCH_CON(4), 32 + MT_ANT_SWITCH_CON(6), 33 + MT_ANT_SWITCH_CON(7), 34 + MT_ANT_SWITCH_CON(8), 35 + }; 36 + 37 + static const struct { 38 + u16 wf; 39 + u16 reg; 40 + } rf_backup_list[] = { 41 + { 0, 0x48 }, 42 + { 1, 0x48 }, 43 + { 2, 0x48 }, 44 + { 3, 0x48 }, 45 + }; 46 + 47 + static int 48 + mt7615_tm_set_tx_power(struct mt7615_phy *phy) 49 + { 50 + struct mt7615_dev *dev = phy->dev; 51 + struct mt76_phy *mphy = phy->mt76; 52 + int i, ret, n_chains = hweight8(mphy->antenna_mask); 53 + struct cfg80211_chan_def *chandef = &mphy->chandef; 54 + int freq = chandef->center_freq1, len, target_chains; 55 + u8 *data, *eep = (u8 *)dev->mt76.eeprom.data; 56 + enum nl80211_band band = chandef->chan->band; 57 + struct sk_buff *skb; 58 + struct { 59 + u8 center_chan; 60 + u8 dbdc_idx; 61 + u8 band; 62 + u8 rsv; 63 + } __packed req_hdr = { 64 + .center_chan = ieee80211_frequency_to_channel(freq), 65 + .band = band, 66 + .dbdc_idx = phy != &dev->phy, 67 + }; 68 + u8 *tx_power = NULL; 69 + 70 + if (dev->mt76.test.state != MT76_TM_STATE_OFF) 71 + tx_power = dev->mt76.test.tx_power; 72 + 73 + len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0; 74 + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len); 75 + if (!skb) 76 + return -ENOMEM; 77 + 78 + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); 79 + data = skb_put_data(skb, eep + MT_EE_NIC_CONF_0, len); 80 + 81 + target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains; 82 + for (i = 0; i < target_chains; i++) { 83 + int index; 84 + 85 + ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i); 86 + if (ret < 0) 87 + return -EINVAL; 88 + 89 + index = ret - MT_EE_NIC_CONF_0; 90 + if (tx_power && tx_power[i]) 91 + data[ret - MT_EE_NIC_CONF_0] = tx_power[i]; 92 + } 93 + 94 + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, 95 + MCU_EXT_CMD_SET_TX_POWER_CTRL, false); 96 + } 97 + 98 + static void 99 + mt7615_tm_reg_backup_restore(struct mt7615_dev *dev) 100 + { 101 + u32 *b = dev->test.reg_backup; 102 + int n_regs = ARRAY_SIZE(reg_backup_list); 103 + int n_rf_regs = ARRAY_SIZE(rf_backup_list); 104 + int i; 105 + 106 + if (dev->mt76.test.state == MT76_TM_STATE_OFF) { 107 + for (i = 0; i < n_regs; i++) 108 + mt76_wr(dev, reg_backup_list[i], b[i]); 109 + 110 + for (i = 0; i < n_rf_regs; i++) 111 + mt7615_rf_wr(dev, rf_backup_list[i].wf, 112 + rf_backup_list[i].reg, b[n_regs + i]); 113 + return; 114 + } 115 + 116 + if (b) 117 + return; 118 + 119 + b = devm_kzalloc(dev->mt76.dev, 4 * (n_regs + n_rf_regs), 120 + GFP_KERNEL); 121 + if (!b) 122 + return; 123 + 124 + dev->test.reg_backup = b; 125 + for (i = 0; i < n_regs; i++) 126 + b[i] = mt76_rr(dev, reg_backup_list[i]); 127 + for (i = 0; i < n_rf_regs; i++) 128 + b[n_regs + i] = mt7615_rf_rr(dev, rf_backup_list[i].wf, 129 + rf_backup_list[i].reg); 130 + } 131 + 132 + 133 + static void 134 + mt7615_tm_init_phy(struct mt7615_dev *dev, struct mt7615_phy *phy) 135 + { 136 + unsigned int total_flags = ~0; 137 + 138 + if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state)) 139 + return; 140 + 141 + mutex_unlock(&dev->mt76.mutex); 142 + mt7615_set_channel(phy); 143 + mt7615_ops.configure_filter(phy->mt76->hw, 0, &total_flags, 0); 144 + mutex_lock(&dev->mt76.mutex); 145 + 146 + mt7615_tm_reg_backup_restore(dev); 147 + } 148 + 149 + static void 150 + mt7615_tm_init(struct mt7615_dev *dev) 151 + { 152 + mt7615_tm_init_phy(dev, &dev->phy); 153 + 154 + if (dev->mt76.phy2) 155 + mt7615_tm_init_phy(dev, dev->mt76.phy2->priv); 156 + } 157 + 158 + static void 159 + mt7615_tm_set_rx_enable(struct mt7615_dev *dev, bool en) 160 + { 161 + u32 rqcr_mask = (MT_ARB_RQCR_RX_START | 162 + MT_ARB_RQCR_RXV_START | 163 + MT_ARB_RQCR_RXV_R_EN | 164 + MT_ARB_RQCR_RXV_T_EN) * 165 + (BIT(0) | BIT(MT_ARB_RQCR_BAND_SHIFT)); 166 + 167 + if (en) { 168 + mt76_clear(dev, MT_ARB_SCR, 169 + MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE); 170 + mt76_set(dev, MT_ARB_RQCR, rqcr_mask); 171 + } else { 172 + mt76_set(dev, MT_ARB_SCR, 173 + MT_ARB_SCR_RX0_DISABLE | MT_ARB_SCR_RX1_DISABLE); 174 + mt76_clear(dev, MT_ARB_RQCR, rqcr_mask); 175 + } 176 + } 177 + 178 + static void 179 + mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en) 180 + { 181 + struct mt76_testmode_data *td = &dev->mt76.test; 182 + u8 mask = td->tx_antenna_mask; 183 + int i; 184 + 185 + if (!mask) 186 + return; 187 + 188 + if (!en) 189 + mask = dev->phy.chainmask; 190 + 191 + for (i = 0; i < 4; i++) { 192 + mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i), 193 + MT_WF_PHY_RFINTF3_0_ANT, 194 + td->tx_antenna_mask & BIT(i) ? 0 : 0xa); 195 + 196 + } 197 + 198 + /* 2.4 GHz band */ 199 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(3), MT_ANT_SWITCH_CON_MODE(0), 200 + (td->tx_antenna_mask & BIT(0)) ? 0x8 : 0x1b); 201 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(2), 202 + (td->tx_antenna_mask & BIT(1)) ? 0xe : 0x1b); 203 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(6), MT_ANT_SWITCH_CON_MODE1(0), 204 + (td->tx_antenna_mask & BIT(2)) ? 0x0 : 0xf); 205 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(2), 206 + (td->tx_antenna_mask & BIT(3)) ? 0x6 : 0xf); 207 + 208 + /* 5 GHz band */ 209 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(4), MT_ANT_SWITCH_CON_MODE(1), 210 + (td->tx_antenna_mask & BIT(0)) ? 0xd : 0x1b); 211 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(2), MT_ANT_SWITCH_CON_MODE(3), 212 + (td->tx_antenna_mask & BIT(1)) ? 0x13 : 0x1b); 213 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(7), MT_ANT_SWITCH_CON_MODE1(1), 214 + (td->tx_antenna_mask & BIT(2)) ? 0x5 : 0xf); 215 + mt76_rmw_field(dev, MT_ANT_SWITCH_CON(8), MT_ANT_SWITCH_CON_MODE1(3), 216 + (td->tx_antenna_mask & BIT(3)) ? 0xb : 0xf); 217 + 218 + for (i = 0; i < 4; i++) { 219 + u32 val; 220 + 221 + val = mt7615_rf_rr(dev, i, 0x48); 222 + val &= ~(0x3ff << 20); 223 + if (td->tx_antenna_mask & BIT(i)) 224 + val |= 3 << 20; 225 + else 226 + val |= (2 << 28) | (2 << 26) | (8 << 20); 227 + mt7615_rf_wr(dev, i, 0x48, val); 228 + } 229 + } 230 + 231 + static void 232 + mt7615_tm_set_tx_frames(struct mt7615_dev *dev, bool en) 233 + { 234 + struct ieee80211_tx_info *info; 235 + struct sk_buff *skb = dev->mt76.test.tx_skb; 236 + 237 + mt7615_mcu_set_chan_info(&dev->phy, MCU_EXT_CMD_SET_RX_PATH); 238 + mt7615_tm_set_tx_antenna(dev, en); 239 + mt7615_tm_set_rx_enable(dev, !en); 240 + if (!en || !skb) 241 + return; 242 + 243 + info = IEEE80211_SKB_CB(skb); 244 + info->control.vif = dev->phy.monitor_vif; 245 + } 246 + 247 + static void 248 + mt7615_tm_update_params(struct mt7615_dev *dev, u32 changed) 249 + { 250 + struct mt76_testmode_data *td = &dev->mt76.test; 251 + bool en = dev->mt76.test.state != MT76_TM_STATE_OFF; 252 + 253 + if (changed & BIT(TM_CHANGED_TXPOWER_CTRL)) 254 + mt7615_mcu_set_test_param(dev, MCU_ATE_SET_TX_POWER_CONTROL, 255 + en, en && td->tx_power_control); 256 + if (changed & BIT(TM_CHANGED_FREQ_OFFSET)) 257 + mt7615_mcu_set_test_param(dev, MCU_ATE_SET_FREQ_OFFSET, 258 + en, en ? td->freq_offset : 0); 259 + if (changed & BIT(TM_CHANGED_TXPOWER)) 260 + mt7615_tm_set_tx_power(&dev->phy); 261 + } 262 + 263 + static int 264 + mt7615_tm_set_state(struct mt76_dev *mdev, enum mt76_testmode_state state) 265 + { 266 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 267 + struct mt76_testmode_data *td = &mdev->test; 268 + enum mt76_testmode_state prev_state = td->state; 269 + 270 + mdev->test.state = state; 271 + 272 + if (prev_state == MT76_TM_STATE_TX_FRAMES) 273 + mt7615_tm_set_tx_frames(dev, false); 274 + else if (state == MT76_TM_STATE_TX_FRAMES) 275 + mt7615_tm_set_tx_frames(dev, true); 276 + 277 + if (state <= MT76_TM_STATE_IDLE) 278 + mt7615_tm_init(dev); 279 + 280 + if ((state == MT76_TM_STATE_IDLE && 281 + prev_state == MT76_TM_STATE_OFF) || 282 + (state == MT76_TM_STATE_OFF && 283 + prev_state == MT76_TM_STATE_IDLE)) { 284 + u32 changed = 0; 285 + int i; 286 + 287 + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { 288 + u16 cur = tm_change_map[i]; 289 + 290 + if (td->param_set[cur / 32] & BIT(cur % 32)) 291 + changed |= BIT(i); 292 + } 293 + 294 + mt7615_tm_update_params(dev, changed); 295 + } 296 + 297 + return 0; 298 + } 299 + 300 + static int 301 + mt7615_tm_set_params(struct mt76_dev *mdev, struct nlattr **tb, 302 + enum mt76_testmode_state new_state) 303 + { 304 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 305 + struct mt76_testmode_data *td = &dev->mt76.test; 306 + u32 changed = 0; 307 + int i; 308 + 309 + BUILD_BUG_ON(NUM_TM_CHANGED >= 32); 310 + 311 + if (new_state == MT76_TM_STATE_OFF || 312 + td->state == MT76_TM_STATE_OFF) 313 + return 0; 314 + 315 + if (td->tx_antenna_mask & ~dev->phy.chainmask) 316 + return -EINVAL; 317 + 318 + for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) { 319 + if (tb[tm_change_map[i]]) 320 + changed |= BIT(i); 321 + } 322 + 323 + mt7615_tm_update_params(dev, changed); 324 + 325 + return 0; 326 + } 327 + 328 + static int 329 + mt7615_tm_dump_stats(struct mt76_dev *mdev, struct sk_buff *msg) 330 + { 331 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 332 + void *rx, *rssi; 333 + int i; 334 + 335 + rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX); 336 + if (!rx) 337 + return -ENOMEM; 338 + 339 + if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, dev->test.last_freq_offset) || 340 + nla_put_s32(msg, MT76_TM_RX_ATTR_IB_RSSI, dev->test.last_ib_rssi) || 341 + nla_put_s32(msg, MT76_TM_RX_ATTR_WB_RSSI, dev->test.last_wb_rssi)) 342 + return -ENOMEM; 343 + 344 + rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI); 345 + if (!rssi) 346 + return -ENOMEM; 347 + 348 + for (i = 0; i < ARRAY_SIZE(dev->test.last_rcpi); i++) 349 + if (nla_put_u8(msg, i, dev->test.last_rcpi[i])) 350 + return -ENOMEM; 351 + 352 + nla_nest_end(msg, rssi); 353 + 354 + nla_nest_end(msg, rx); 355 + 356 + return 0; 357 + } 358 + 359 + const struct mt76_testmode_ops mt7615_testmode_ops = { 360 + .set_state = mt7615_tm_set_state, 361 + .set_params = mt7615_tm_set_params, 362 + .dump_stats = mt7615_tm_dump_stats, 363 + };
+17 -229
drivers/net/wireless/mediatek/mt76/mt7615/usb.c
··· 15 15 #include "mcu.h" 16 16 #include "regs.h" 17 17 18 - static const u32 mt7663u_reg_map[] = { 19 - [MT_TOP_CFG_BASE] = 0x80020000, 20 - [MT_HW_BASE] = 0x80000000, 21 - [MT_DMA_SHDL_BASE] = 0x5000a000, 22 - [MT_HIF_BASE] = 0x50000000, 23 - [MT_CSR_BASE] = 0x40000000, 24 - [MT_EFUSE_ADDR_BASE] = 0x78011000, 25 - [MT_TOP_MISC_BASE] = 0x81020000, 26 - [MT_PLE_BASE] = 0x82060000, 27 - [MT_PSE_BASE] = 0x82068000, 28 - [MT_PHY_BASE] = 0x82070000, 29 - [MT_WTBL_BASE_ADDR] = 0x820e0000, 30 - [MT_CFG_BASE] = 0x820f0000, 31 - [MT_AGG_BASE] = 0x820f2000, 32 - [MT_ARB_BASE] = 0x820f3000, 33 - [MT_TMAC_BASE] = 0x820f4000, 34 - [MT_RMAC_BASE] = 0x820f5000, 35 - [MT_DMA_BASE] = 0x820f7000, 36 - [MT_PF_BASE] = 0x820f8000, 37 - [MT_WTBL_BASE_ON] = 0x820f9000, 38 - [MT_WTBL_BASE_OFF] = 0x820f9800, 39 - [MT_LPON_BASE] = 0x820fb000, 40 - [MT_MIB_BASE] = 0x820fd000, 41 - }; 42 - 43 18 static const struct usb_device_id mt7615_device_table[] = { 44 19 { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7663, 0xff, 0xff, 0xff) }, 45 20 { }, ··· 39 64 mt76u_queues_deinit(&dev->mt76); 40 65 } 41 66 42 - static void 43 - mt7663u_mac_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, 44 - enum mt76_txq_id qid, struct ieee80211_sta *sta, 45 - struct sk_buff *skb) 67 + static void mt7663u_init_work(struct work_struct *work) 46 68 { 47 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 48 - struct ieee80211_key_conf *key = info->control.hw_key; 49 - __le32 *txwi; 50 - int pid; 51 - 52 - if (!wcid) 53 - wcid = &dev->mt76.global_wcid; 54 - 55 - pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); 56 - 57 - txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); 58 - memset(txwi, 0, MT_USB_TXD_SIZE); 59 - mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); 60 - skb_push(skb, MT_USB_TXD_SIZE); 61 - } 62 - 63 - static int 64 - __mt7663u_mac_set_rates(struct mt7615_dev *dev, 65 - struct mt7615_wtbl_desc *wd) 66 - { 67 - struct mt7615_rate_desc *rate = &wd->rate; 68 - struct mt7615_sta *sta = wd->sta; 69 - u32 w5, w27, addr, val; 70 - 71 - lockdep_assert_held(&dev->mt76.mutex); 72 - 73 - if (!sta) 74 - return -EINVAL; 75 - 76 - if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 77 - return -ETIMEDOUT; 78 - 79 - addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx); 80 - 81 - w27 = mt76_rr(dev, addr + 27 * 4); 82 - w27 &= ~MT_WTBL_W27_CC_BW_SEL; 83 - w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw); 84 - 85 - w5 = mt76_rr(dev, addr + 5 * 4); 86 - w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 87 - MT_WTBL_W5_MPDU_OK_COUNT | 88 - MT_WTBL_W5_MPDU_FAIL_COUNT | 89 - MT_WTBL_W5_RATE_IDX); 90 - w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) | 91 - FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 92 - rate->bw_idx ? rate->bw_idx - 1 : 7); 93 - 94 - mt76_wr(dev, MT_WTBL_RIUCR0, w5); 95 - 96 - mt76_wr(dev, MT_WTBL_RIUCR1, 97 - FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) | 98 - FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) | 99 - FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1])); 100 - 101 - mt76_wr(dev, MT_WTBL_RIUCR2, 102 - FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) | 103 - FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) | 104 - FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) | 105 - FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2])); 106 - 107 - mt76_wr(dev, MT_WTBL_RIUCR3, 108 - FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) | 109 - FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) | 110 - FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3])); 111 - 112 - mt76_wr(dev, MT_WTBL_UPDATE, 113 - FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) | 114 - MT_WTBL_UPDATE_RATE_UPDATE | 115 - MT_WTBL_UPDATE_TX_COUNT_CLEAR); 116 - 117 - mt76_wr(dev, addr + 27 * 4, w27); 118 - 119 - mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 120 - val = mt76_rr(dev, MT_LPON_UTTR0); 121 - sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; 122 - 123 - if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 124 - mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 125 - 126 - sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates; 127 - sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 128 - 129 - return 0; 130 - } 131 - 132 - static int 133 - __mt7663u_mac_set_key(struct mt7615_dev *dev, 134 - struct mt7615_wtbl_desc *wd) 135 - { 136 - struct mt7615_key_desc *key = &wd->key; 137 - struct mt7615_sta *sta = wd->sta; 138 - enum mt7615_cipher_type cipher; 139 - struct mt76_wcid *wcid; 140 - int err; 141 - 142 - lockdep_assert_held(&dev->mt76.mutex); 143 - 144 - if (!sta) 145 - return -EINVAL; 146 - 147 - cipher = mt7615_mac_get_cipher(key->cipher); 148 - if (cipher == MT_CIPHER_NONE) 149 - return -EOPNOTSUPP; 150 - 151 - wcid = &wd->sta->wcid; 152 - 153 - mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd); 154 - err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, 155 - cipher, key->cmd); 156 - if (err < 0) 157 - return err; 158 - 159 - err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, 160 - key->cmd); 161 - if (err < 0) 162 - return err; 163 - 164 - if (key->cmd == SET_KEY) 165 - wcid->cipher |= BIT(cipher); 166 - else 167 - wcid->cipher &= ~BIT(cipher); 168 - 169 - return 0; 170 - } 171 - 172 - void mt7663u_wtbl_work(struct work_struct *work) 173 - { 174 - struct mt7615_wtbl_desc *wd, *wd_next; 175 69 struct mt7615_dev *dev; 176 70 177 - dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 178 - wtbl_work); 71 + dev = container_of(work, struct mt7615_dev, mcu_work); 72 + if (mt7663u_mcu_init(dev)) 73 + return; 179 74 180 - list_for_each_entry_safe(wd, wd_next, &dev->wd_head, node) { 181 - spin_lock_bh(&dev->mt76.lock); 182 - list_del(&wd->node); 183 - spin_unlock_bh(&dev->mt76.lock); 184 - 185 - mutex_lock(&dev->mt76.mutex); 186 - switch (wd->type) { 187 - case MT7615_WTBL_RATE_DESC: 188 - __mt7663u_mac_set_rates(dev, wd); 189 - break; 190 - case MT7615_WTBL_KEY_DESC: 191 - __mt7663u_mac_set_key(dev, wd); 192 - break; 193 - } 194 - mutex_unlock(&dev->mt76.mutex); 195 - 196 - kfree(wd); 197 - } 198 - } 199 - 200 - static void 201 - mt7663u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid, 202 - struct mt76_queue_entry *e) 203 - { 204 - skb_pull(e->skb, MT_USB_HDR_SIZE + MT_USB_TXD_SIZE); 205 - mt76_tx_complete_skb(mdev, e->skb); 206 - } 207 - 208 - static int 209 - mt7663u_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 210 - enum mt76_txq_id qid, struct mt76_wcid *wcid, 211 - struct ieee80211_sta *sta, 212 - struct mt76_tx_info *tx_info) 213 - { 214 - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 215 - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb); 216 - 217 - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 218 - struct mt7615_sta *msta; 219 - 220 - msta = container_of(wcid, struct mt7615_sta, wcid); 221 - spin_lock_bh(&dev->mt76.lock); 222 - mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], 223 - msta->rates); 224 - msta->rate_probe = true; 225 - spin_unlock_bh(&dev->mt76.lock); 226 - } 227 - mt7663u_mac_write_txwi(dev, wcid, qid, sta, tx_info->skb); 228 - 229 - return mt76u_skb_dma_info(tx_info->skb, tx_info->skb->len); 230 - } 231 - 232 - static bool mt7663u_tx_status_data(struct mt76_dev *mdev, u8 *update) 233 - { 234 - struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 235 - 236 - mutex_lock(&dev->mt76.mutex); 237 - mt7615_mac_sta_poll(dev); 238 - mutex_unlock(&dev->mt76.mutex); 239 - 240 - return 0; 75 + mt7615_mcu_set_eeprom(dev); 76 + mt7615_mac_init(dev); 77 + mt7615_phy_init(dev); 78 + mt7615_mcu_del_wtbl_all(dev); 79 + mt7615_check_offload_capability(dev); 241 80 } 242 81 243 82 static int mt7663u_probe(struct usb_interface *usb_intf, ··· 60 271 static const struct mt76_driver_ops drv_ops = { 61 272 .txwi_size = MT_USB_TXD_SIZE, 62 273 .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ, 63 - .tx_prepare_skb = mt7663u_tx_prepare_skb, 64 - .tx_complete_skb = mt7663u_tx_complete_skb, 65 - .tx_status_data = mt7663u_tx_status_data, 274 + .tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb, 275 + .tx_complete_skb = mt7663_usb_sdio_tx_complete_skb, 276 + .tx_status_data = mt7663_usb_sdio_tx_status_data, 66 277 .rx_skb = mt7615_queue_rx_skb, 67 278 .sta_ps = mt7615_sta_ps, 68 279 .sta_add = mt7615_mac_sta_add, ··· 92 303 93 304 usb_set_intfdata(usb_intf, dev); 94 305 95 - dev->reg_map = mt7663u_reg_map; 306 + INIT_WORK(&dev->mcu_work, mt7663u_init_work); 307 + dev->reg_map = mt7663_usb_sdio_reg_map; 96 308 dev->ops = ops; 97 309 ret = mt76u_init(mdev, usb_intf, true); 98 310 if (ret < 0) ··· 132 342 if (ret) 133 343 goto error_free_q; 134 344 135 - ret = mt7663u_register_device(dev); 345 + ret = mt7663_usb_sdio_register_device(dev); 136 346 if (ret) 137 347 goto error_free_q; 138 348 ··· 141 351 error_free_q: 142 352 mt76u_queues_deinit(&dev->mt76); 143 353 error: 144 - mt76u_deinit(&dev->mt76); 145 354 usb_set_intfdata(usb_intf, NULL); 146 355 usb_put_dev(interface_to_usbdev(usb_intf)); 147 356 148 - ieee80211_free_hw(mdev->hw); 357 + mt76_free_device(&dev->mt76); 149 358 150 359 return ret; 151 360 } ··· 162 373 usb_set_intfdata(usb_intf, NULL); 163 374 usb_put_dev(interface_to_usbdev(usb_intf)); 164 375 165 - mt76u_deinit(&dev->mt76); 166 - ieee80211_free_hw(dev->mt76.hw); 376 + mt76_free_device(&dev->mt76); 167 377 } 168 378 169 379 #ifdef CONFIG_PM
-145
drivers/net/wireless/mediatek/mt76/mt7615/usb_init.c
··· 1 - // SPDX-License-Identifier: GPL-2.0 2 - /* Copyright (C) 2019 MediaTek Inc. 3 - * 4 - * Author: Felix Fietkau <nbd@nbd.name> 5 - * Lorenzo Bianconi <lorenzo@kernel.org> 6 - * Sean Wang <sean.wang@mediatek.com> 7 - */ 8 - 9 - #include <linux/kernel.h> 10 - #include <linux/module.h> 11 - 12 - #include "mt7615.h" 13 - #include "mac.h" 14 - #include "regs.h" 15 - 16 - static int mt7663u_dma_sched_init(struct mt7615_dev *dev) 17 - { 18 - int i; 19 - 20 - mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), 21 - MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, 22 - FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | 23 - FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); 24 - 25 - /* disable refill group 5 - group 15 and raise group 2 26 - * and 3 as high priority. 27 - */ 28 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006); 29 - mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16)); 30 - 31 - for (i = 0; i < 5; i++) 32 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), 33 - FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) | 34 - FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff)); 35 - 36 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); 37 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); 38 - 39 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444); 40 - 41 - /* group pririority from high to low: 42 - * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0. 43 - */ 44 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f); 45 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); 46 - mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c); 47 - 48 - mt76_wr(dev, MT_UDMA_WLCFG_1, 49 - FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) | 50 - FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1)); 51 - 52 - /* setup UDMA Rx Flush */ 53 - mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); 54 - /* hif reset */ 55 - mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N); 56 - 57 - mt76_set(dev, MT_UDMA_WLCFG_0, 58 - MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN | 59 - MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN | 60 - MT_WL_TX_TMOUT_FUNC_EN); 61 - mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO, 62 - FIELD_PREP(MT_WL_RX_AGG_LMT, 32) | 63 - FIELD_PREP(MT_WL_RX_AGG_TO, 100)); 64 - 65 - return 0; 66 - } 67 - 68 - static int mt7663u_init_hardware(struct mt7615_dev *dev) 69 - { 70 - int ret, idx; 71 - 72 - ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE); 73 - if (ret < 0) 74 - return ret; 75 - 76 - ret = mt7663u_dma_sched_init(dev); 77 - if (ret) 78 - return ret; 79 - 80 - set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); 81 - 82 - /* Beacon and mgmt frames should occupy wcid 0 */ 83 - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); 84 - if (idx) 85 - return -ENOSPC; 86 - 87 - dev->mt76.global_wcid.idx = idx; 88 - dev->mt76.global_wcid.hw_key_idx = -1; 89 - rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); 90 - 91 - return 0; 92 - } 93 - 94 - static void mt7663u_init_work(struct work_struct *work) 95 - { 96 - struct mt7615_dev *dev; 97 - 98 - dev = container_of(work, struct mt7615_dev, mcu_work); 99 - if (mt7663u_mcu_init(dev)) 100 - return; 101 - 102 - mt7615_mcu_set_eeprom(dev); 103 - mt7615_mac_init(dev); 104 - mt7615_phy_init(dev); 105 - mt7615_mcu_del_wtbl_all(dev); 106 - mt7615_check_offload_capability(dev); 107 - } 108 - 109 - int mt7663u_register_device(struct mt7615_dev *dev) 110 - { 111 - struct ieee80211_hw *hw = mt76_hw(dev); 112 - int err; 113 - 114 - INIT_WORK(&dev->wtbl_work, mt7663u_wtbl_work); 115 - INIT_WORK(&dev->mcu_work, mt7663u_init_work); 116 - INIT_LIST_HEAD(&dev->wd_head); 117 - mt7615_init_device(dev); 118 - 119 - err = mt7663u_init_hardware(dev); 120 - if (err) 121 - return err; 122 - 123 - hw->extra_tx_headroom += MT_USB_HDR_SIZE + MT_USB_TXD_SIZE; 124 - /* check hw sg support in order to enable AMSDU */ 125 - hw->max_tx_fragments = dev->mt76.usb.sg_en ? MT_HW_TXP_MAX_BUF_NUM : 1; 126 - 127 - err = mt76_register_device(&dev->mt76, true, mt7615_rates, 128 - ARRAY_SIZE(mt7615_rates)); 129 - if (err < 0) 130 - return err; 131 - 132 - if (!dev->mt76.usb.sg_en) { 133 - struct ieee80211_sta_vht_cap *vht_cap; 134 - 135 - /* decrease max A-MSDU size if SG is not supported */ 136 - vht_cap = &dev->mphy.sband_5g.sband.vht_cap; 137 - vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 138 - } 139 - 140 - ieee80211_queue_work(hw, &dev->mcu_work); 141 - mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); 142 - mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); 143 - 144 - return mt7615_init_debugfs(dev); 145 - }
+5 -2
drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
··· 28 28 else 29 29 ep = MT_EP_OUT_AC_BE; 30 30 31 - ret = mt76u_skb_dma_info(skb, skb->len); 31 + put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); 32 + ret = mt76_skb_adjust_pad(skb); 32 33 if (ret < 0) 33 34 goto out; 34 35 35 36 ret = mt76u_bulk_msg(&dev->mt76, skb->data, skb->len, NULL, 36 37 1000, ep); 37 - dev_kfree_skb(skb); 38 38 if (ret < 0) 39 39 goto out; 40 40 ··· 43 43 44 44 out: 45 45 mutex_unlock(&mdev->mcu.mutex); 46 + dev_kfree_skb(skb); 46 47 47 48 return ret; 48 49 } ··· 61 60 62 61 dev->mt76.mcu_ops = &mt7663u_mcu_ops, 63 62 63 + /* usb does not support runtime-pm */ 64 + clear_bit(MT76_STATE_PM, &dev->mphy.state); 64 65 mt76_set(dev, MT_UDMA_TX_QSEL, MT_FW_DL_EN); 65 66 66 67 if (test_and_clear_bit(MT76_STATE_POWER_OFF, &dev->mphy.state)) {
+394
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 MediaTek Inc. 3 + * 4 + * Author: Lorenzo Bianconi <lorenzo@kernel.org> 5 + * Sean Wang <sean.wang@mediatek.com> 6 + */ 7 + 8 + #include <linux/kernel.h> 9 + #include <linux/module.h> 10 + #include <linux/usb.h> 11 + 12 + #include "mt7615.h" 13 + #include "mac.h" 14 + #include "mcu.h" 15 + #include "regs.h" 16 + 17 + const u32 mt7663_usb_sdio_reg_map[] = { 18 + [MT_TOP_CFG_BASE] = 0x80020000, 19 + [MT_HW_BASE] = 0x80000000, 20 + [MT_DMA_SHDL_BASE] = 0x5000a000, 21 + [MT_HIF_BASE] = 0x50000000, 22 + [MT_CSR_BASE] = 0x40000000, 23 + [MT_EFUSE_ADDR_BASE] = 0x78011000, 24 + [MT_TOP_MISC_BASE] = 0x81020000, 25 + [MT_PLE_BASE] = 0x82060000, 26 + [MT_PSE_BASE] = 0x82068000, 27 + [MT_PP_BASE] = 0x8206c000, 28 + [MT_WTBL_BASE_ADDR] = 0x820e0000, 29 + [MT_CFG_BASE] = 0x820f0000, 30 + [MT_AGG_BASE] = 0x820f2000, 31 + [MT_ARB_BASE] = 0x820f3000, 32 + [MT_TMAC_BASE] = 0x820f4000, 33 + [MT_RMAC_BASE] = 0x820f5000, 34 + [MT_DMA_BASE] = 0x820f7000, 35 + [MT_PF_BASE] = 0x820f8000, 36 + [MT_WTBL_BASE_ON] = 0x820f9000, 37 + [MT_WTBL_BASE_OFF] = 0x820f9800, 38 + [MT_LPON_BASE] = 0x820fb000, 39 + [MT_MIB_BASE] = 0x820fd000, 40 + }; 41 + EXPORT_SYMBOL_GPL(mt7663_usb_sdio_reg_map); 42 + 43 + static void 44 + mt7663_usb_sdio_write_txwi(struct mt7615_dev *dev, struct mt76_wcid *wcid, 45 + enum mt76_txq_id qid, struct ieee80211_sta *sta, 46 + struct sk_buff *skb) 47 + { 48 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 49 + struct ieee80211_key_conf *key = info->control.hw_key; 50 + __le32 *txwi; 51 + int pid; 52 + 53 + if (!wcid) 54 + wcid = &dev->mt76.global_wcid; 55 + 56 + pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb); 57 + 58 + txwi = (__le32 *)(skb->data - MT_USB_TXD_SIZE); 59 + memset(txwi, 0, MT_USB_TXD_SIZE); 60 + mt7615_mac_write_txwi(dev, txwi, skb, wcid, sta, pid, key, false); 61 + skb_push(skb, MT_USB_TXD_SIZE); 62 + } 63 + 64 + static int 65 + mt7663_usb_sdio_set_rates(struct mt7615_dev *dev, 66 + struct mt7615_wtbl_desc *wd) 67 + { 68 + struct mt7615_rate_desc *rate = &wd->rate; 69 + struct mt7615_sta *sta = wd->sta; 70 + u32 w5, w27, addr, val; 71 + 72 + lockdep_assert_held(&dev->mt76.mutex); 73 + 74 + if (!sta) 75 + return -EINVAL; 76 + 77 + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 78 + return -ETIMEDOUT; 79 + 80 + addr = mt7615_mac_wtbl_addr(dev, sta->wcid.idx); 81 + 82 + w27 = mt76_rr(dev, addr + 27 * 4); 83 + w27 &= ~MT_WTBL_W27_CC_BW_SEL; 84 + w27 |= FIELD_PREP(MT_WTBL_W27_CC_BW_SEL, rate->bw); 85 + 86 + w5 = mt76_rr(dev, addr + 5 * 4); 87 + w5 &= ~(MT_WTBL_W5_BW_CAP | MT_WTBL_W5_CHANGE_BW_RATE | 88 + MT_WTBL_W5_MPDU_OK_COUNT | 89 + MT_WTBL_W5_MPDU_FAIL_COUNT | 90 + MT_WTBL_W5_RATE_IDX); 91 + w5 |= FIELD_PREP(MT_WTBL_W5_BW_CAP, rate->bw) | 92 + FIELD_PREP(MT_WTBL_W5_CHANGE_BW_RATE, 93 + rate->bw_idx ? rate->bw_idx - 1 : 7); 94 + 95 + mt76_wr(dev, MT_WTBL_RIUCR0, w5); 96 + 97 + mt76_wr(dev, MT_WTBL_RIUCR1, 98 + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, rate->probe_val) | 99 + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, rate->val[0]) | 100 + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, rate->val[1])); 101 + 102 + mt76_wr(dev, MT_WTBL_RIUCR2, 103 + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, rate->val[1] >> 8) | 104 + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, rate->val[1]) | 105 + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, rate->val[2]) | 106 + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, rate->val[2])); 107 + 108 + mt76_wr(dev, MT_WTBL_RIUCR3, 109 + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, rate->val[2] >> 4) | 110 + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, rate->val[3]) | 111 + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, rate->val[3])); 112 + 113 + mt76_wr(dev, MT_WTBL_UPDATE, 114 + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, sta->wcid.idx) | 115 + MT_WTBL_UPDATE_RATE_UPDATE | 116 + MT_WTBL_UPDATE_TX_COUNT_CLEAR); 117 + 118 + mt76_wr(dev, addr + 27 * 4, w27); 119 + 120 + sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; 121 + 122 + mt76_set(dev, MT_LPON_T0CR, MT_LPON_T0CR_MODE); /* TSF read */ 123 + val = mt76_rr(dev, MT_LPON_UTTR0); 124 + sta->rate_set_tsf = (val & ~BIT(0)) | rate->rateset; 125 + 126 + if (!(sta->wcid.tx_info & MT_WCID_TX_INFO_SET)) 127 + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 128 + 129 + sta->rate_count = 2 * MT7615_RATE_RETRY * sta->n_rates; 130 + sta->wcid.tx_info |= MT_WCID_TX_INFO_SET; 131 + 132 + return 0; 133 + } 134 + 135 + static int 136 + mt7663_usb_sdio_set_key(struct mt7615_dev *dev, 137 + struct mt7615_wtbl_desc *wd) 138 + { 139 + struct mt7615_key_desc *key = &wd->key; 140 + struct mt7615_sta *sta = wd->sta; 141 + enum mt7615_cipher_type cipher; 142 + struct mt76_wcid *wcid; 143 + int err; 144 + 145 + lockdep_assert_held(&dev->mt76.mutex); 146 + 147 + if (!sta) { 148 + err = -EINVAL; 149 + goto out; 150 + } 151 + 152 + cipher = mt7615_mac_get_cipher(key->cipher); 153 + if (cipher == MT_CIPHER_NONE) { 154 + err = -EOPNOTSUPP; 155 + goto out; 156 + } 157 + 158 + wcid = &wd->sta->wcid; 159 + 160 + mt7615_mac_wtbl_update_cipher(dev, wcid, cipher, key->cmd); 161 + err = mt7615_mac_wtbl_update_key(dev, wcid, key->key, key->keylen, 162 + cipher, key->cmd); 163 + if (err < 0) 164 + goto out; 165 + 166 + err = mt7615_mac_wtbl_update_pk(dev, wcid, cipher, key->keyidx, 167 + key->cmd); 168 + if (err < 0) 169 + goto out; 170 + 171 + if (key->cmd == SET_KEY) 172 + wcid->cipher |= BIT(cipher); 173 + else 174 + wcid->cipher &= ~BIT(cipher); 175 + out: 176 + kfree(key->key); 177 + 178 + return err; 179 + } 180 + 181 + void mt7663_usb_sdio_wtbl_work(struct work_struct *work) 182 + { 183 + struct mt7615_wtbl_desc *wd, *wd_next; 184 + struct list_head wd_list; 185 + struct mt7615_dev *dev; 186 + 187 + dev = (struct mt7615_dev *)container_of(work, struct mt7615_dev, 188 + wtbl_work); 189 + 190 + INIT_LIST_HEAD(&wd_list); 191 + spin_lock_bh(&dev->mt76.lock); 192 + list_splice_init(&dev->wd_head, &wd_list); 193 + spin_unlock_bh(&dev->mt76.lock); 194 + 195 + list_for_each_entry_safe(wd, wd_next, &wd_list, node) { 196 + list_del(&wd->node); 197 + 198 + mt7615_mutex_acquire(dev); 199 + 200 + switch (wd->type) { 201 + case MT7615_WTBL_RATE_DESC: 202 + mt7663_usb_sdio_set_rates(dev, wd); 203 + break; 204 + case MT7615_WTBL_KEY_DESC: 205 + mt7663_usb_sdio_set_key(dev, wd); 206 + break; 207 + } 208 + 209 + mt7615_mutex_release(dev); 210 + 211 + kfree(wd); 212 + } 213 + } 214 + EXPORT_SYMBOL_GPL(mt7663_usb_sdio_wtbl_work); 215 + 216 + bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update) 217 + { 218 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 219 + 220 + mt7615_mutex_acquire(dev); 221 + mt7615_mac_sta_poll(dev); 222 + mt7615_mutex_release(dev); 223 + 224 + return 0; 225 + } 226 + EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data); 227 + 228 + void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, 229 + enum mt76_txq_id qid, 230 + struct mt76_queue_entry *e) 231 + { 232 + unsigned int headroom = MT_USB_TXD_SIZE; 233 + 234 + if (mt76_is_usb(mdev)) 235 + headroom += MT_USB_HDR_SIZE; 236 + skb_pull(e->skb, headroom); 237 + 238 + mt76_tx_complete_skb(mdev, e->skb); 239 + } 240 + EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb); 241 + 242 + int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 243 + enum mt76_txq_id qid, struct mt76_wcid *wcid, 244 + struct ieee80211_sta *sta, 245 + struct mt76_tx_info *tx_info) 246 + { 247 + struct mt7615_sta *msta = container_of(wcid, struct mt7615_sta, wcid); 248 + struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); 249 + struct sk_buff *skb = tx_info->skb; 250 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 251 + 252 + if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) && 253 + !msta->rate_probe) { 254 + /* request to configure sampling rate */ 255 + spin_lock_bh(&dev->mt76.lock); 256 + mt7615_mac_set_rates(&dev->phy, msta, &info->control.rates[0], 257 + msta->rates); 258 + spin_unlock_bh(&dev->mt76.lock); 259 + } 260 + 261 + mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb); 262 + if (mt76_is_usb(mdev)) 263 + put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len))); 264 + 265 + return mt76_skb_adjust_pad(skb); 266 + } 267 + EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb); 268 + 269 + static int mt7663u_dma_sched_init(struct mt7615_dev *dev) 270 + { 271 + int i; 272 + 273 + mt76_rmw(dev, MT_DMA_SHDL(MT_DMASHDL_PKT_MAX_SIZE), 274 + MT_DMASHDL_PKT_MAX_SIZE_PLE | MT_DMASHDL_PKT_MAX_SIZE_PSE, 275 + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PLE, 1) | 276 + FIELD_PREP(MT_DMASHDL_PKT_MAX_SIZE_PSE, 8)); 277 + 278 + /* disable refill group 5 - group 15 and raise group 2 279 + * and 3 as high priority. 280 + */ 281 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_REFILL), 0xffe00006); 282 + mt76_clear(dev, MT_DMA_SHDL(MT_DMASHDL_PAGE), BIT(16)); 283 + 284 + for (i = 0; i < 5; i++) 285 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_GROUP_QUOTA(i)), 286 + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MIN, 0x3) | 287 + FIELD_PREP(MT_DMASHDL_GROUP_QUOTA_MAX, 0x1ff)); 288 + 289 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(0)), 0x42104210); 290 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(1)), 0x42104210); 291 + 292 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_Q_MAP(2)), 0x4444); 293 + 294 + /* group pririority from high to low: 295 + * 15 (cmd groups) > 4 > 3 > 2 > 1 > 0. 296 + */ 297 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET0), 0x6501234f); 298 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_SCHED_SET1), 0xedcba987); 299 + mt76_wr(dev, MT_DMA_SHDL(MT_DMASHDL_OPTIONAL), 0x7004801c); 300 + 301 + mt76_wr(dev, MT_UDMA_WLCFG_1, 302 + FIELD_PREP(MT_WL_TX_TMOUT_LMT, 80000) | 303 + FIELD_PREP(MT_WL_RX_AGG_PKT_LMT, 1)); 304 + 305 + /* setup UDMA Rx Flush */ 306 + mt76_clear(dev, MT_UDMA_WLCFG_0, MT_WL_RX_FLUSH); 307 + /* hif reset */ 308 + mt76_set(dev, MT_HIF_RST, MT_HIF_LOGIC_RST_N); 309 + 310 + mt76_set(dev, MT_UDMA_WLCFG_0, 311 + MT_WL_RX_AGG_EN | MT_WL_RX_EN | MT_WL_TX_EN | 312 + MT_WL_RX_MPSZ_PAD0 | MT_TICK_1US_EN | 313 + MT_WL_TX_TMOUT_FUNC_EN); 314 + mt76_rmw(dev, MT_UDMA_WLCFG_0, MT_WL_RX_AGG_LMT | MT_WL_RX_AGG_TO, 315 + FIELD_PREP(MT_WL_RX_AGG_LMT, 32) | 316 + FIELD_PREP(MT_WL_RX_AGG_TO, 100)); 317 + 318 + return 0; 319 + } 320 + 321 + static int mt7663_usb_sdio_init_hardware(struct mt7615_dev *dev) 322 + { 323 + int ret, idx; 324 + 325 + ret = mt7615_eeprom_init(dev, MT_EFUSE_BASE); 326 + if (ret < 0) 327 + return ret; 328 + 329 + if (mt76_is_usb(&dev->mt76)) { 330 + ret = mt7663u_dma_sched_init(dev); 331 + if (ret) 332 + return ret; 333 + } 334 + 335 + set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state); 336 + 337 + /* Beacon and mgmt frames should occupy wcid 0 */ 338 + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7615_WTBL_STA - 1); 339 + if (idx) 340 + return -ENOSPC; 341 + 342 + dev->mt76.global_wcid.idx = idx; 343 + dev->mt76.global_wcid.hw_key_idx = -1; 344 + rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); 345 + 346 + return 0; 347 + } 348 + 349 + int mt7663_usb_sdio_register_device(struct mt7615_dev *dev) 350 + { 351 + struct ieee80211_hw *hw = mt76_hw(dev); 352 + int err; 353 + 354 + INIT_WORK(&dev->wtbl_work, mt7663_usb_sdio_wtbl_work); 355 + INIT_LIST_HEAD(&dev->wd_head); 356 + mt7615_init_device(dev); 357 + 358 + err = mt7663_usb_sdio_init_hardware(dev); 359 + if (err) 360 + return err; 361 + 362 + /* check hw sg support in order to enable AMSDU */ 363 + if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76)) 364 + hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM; 365 + else 366 + hw->max_tx_fragments = 1; 367 + hw->extra_tx_headroom += MT_USB_TXD_SIZE; 368 + if (mt76_is_usb(&dev->mt76)) 369 + hw->extra_tx_headroom += MT_USB_HDR_SIZE; 370 + 371 + err = mt76_register_device(&dev->mt76, true, mt7615_rates, 372 + ARRAY_SIZE(mt7615_rates)); 373 + if (err < 0) 374 + return err; 375 + 376 + if (!dev->mt76.usb.sg_en) { 377 + struct ieee80211_sta_vht_cap *vht_cap; 378 + 379 + /* decrease max A-MSDU size if SG is not supported */ 380 + vht_cap = &dev->mphy.sband_5g.sband.vht_cap; 381 + vht_cap->cap &= ~IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; 382 + } 383 + 384 + ieee80211_queue_work(hw, &dev->mcu_work); 385 + mt7615_init_txpower(dev, &dev->mphy.sband_2g.sband); 386 + mt7615_init_txpower(dev, &dev->mphy.sband_5g.sband); 387 + 388 + return mt7615_init_debugfs(dev); 389 + } 390 + EXPORT_SYMBOL_GPL(mt7663_usb_sdio_register_device); 391 + 392 + MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 393 + MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 394 + MODULE_LICENSE("Dual BSD/GPL");
+2 -4
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
··· 277 277 err: 278 278 usb_set_intfdata(usb_intf, NULL); 279 279 usb_put_dev(interface_to_usbdev(usb_intf)); 280 - mt76u_deinit(&dev->mt76); 280 + mt76_free_device(&dev->mt76); 281 281 282 - ieee80211_free_hw(mdev->hw); 283 282 return ret; 284 283 } 285 284 ··· 296 297 usb_set_intfdata(usb_intf, NULL); 297 298 usb_put_dev(interface_to_usbdev(usb_intf)); 298 299 299 - mt76u_deinit(&dev->mt76); 300 - ieee80211_free_hw(dev->mt76.hw); 300 + mt76_free_device(&dev->mt76); 301 301 } 302 302 303 303 static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf,
-1
drivers/net/wireless/mediatek/mt76/mt76x02.h
··· 80 80 81 81 struct mutex phy_mutex; 82 82 83 - u16 vif_mask; 84 83 u16 chainmask; 85 84 86 85 u8 txdone_seq;
+1 -1
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 439 439 memset(msta, 0, sizeof(*msta)); 440 440 } 441 441 442 - dev->vif_mask = 0; 442 + dev->mphy.vif_mask = 0; 443 443 dev->mt76.beacon_mask = 0; 444 444 } 445 445
+2 -1
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
··· 56 56 */ 57 57 info = FIELD_PREP(MT_TXD_INFO_LEN, round_up(skb->len, 4)) | 58 58 FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags; 59 + put_unaligned_le32(info, skb_push(skb, sizeof(info))); 59 60 60 - return mt76u_skb_dma_info(skb, info); 61 + return mt76_skb_adjust_pad(skb); 61 62 } 62 63 63 64 int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
+5 -2
drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
··· 87 87 u32 info; 88 88 int ret; 89 89 90 - if (test_bit(MT76_REMOVED, &dev->phy.state)) 91 - return 0; 90 + if (test_bit(MT76_REMOVED, &dev->phy.state)) { 91 + ret = 0; 92 + goto out; 93 + } 92 94 93 95 if (wait_resp) { 94 96 seq = ++dev->mcu.msg_seq & 0xf; ··· 113 111 if (wait_resp) 114 112 ret = mt76x02u_mcu_wait_resp(dev, seq); 115 113 114 + out: 116 115 consume_skb(skb); 117 116 118 117 return ret;
+4 -4
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
··· 305 305 unsigned int idx = 0; 306 306 307 307 /* Allow to change address in HW if we create first interface. */ 308 - if (!dev->vif_mask && 308 + if (!dev->mphy.vif_mask && 309 309 (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) || 310 310 memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1))) 311 311 mt76x02_mac_setaddr(dev, vif->addr); ··· 330 330 idx += 8; 331 331 332 332 /* vif is already set or idx is 8 for AP/Mesh/... */ 333 - if (dev->vif_mask & BIT(idx) || 333 + if (dev->mphy.vif_mask & BIT(idx) || 334 334 (vif->type != NL80211_IFTYPE_STATION && idx > 7)) 335 335 return -EBUSY; 336 336 337 - dev->vif_mask |= BIT(idx); 337 + dev->mphy.vif_mask |= BIT(idx); 338 338 339 339 mt76x02_vif_init(dev, vif, idx); 340 340 return 0; ··· 348 348 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 349 349 350 350 mt76_txq_remove(&dev->mt76, vif->txq); 351 - dev->vif_mask &= ~BIT(mvif->idx); 351 + dev->mphy.vif_mask &= ~BIT(mvif->idx); 352 352 } 353 353 EXPORT_SYMBOL_GPL(mt76x02_remove_interface); 354 354
+1
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
··· 39 39 extern const struct ieee80211_ops mt76x2_ops; 40 40 41 41 int mt76x2_register_device(struct mt76x02_dev *dev); 42 + int mt76x2_resume_device(struct mt76x02_dev *dev); 42 43 43 44 void mt76x2_phy_power_on(struct mt76x02_dev *dev); 44 45 void mt76x2_stop_hardware(struct mt76x02_dev *dev);
+63 -7
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
··· 9 9 10 10 #include "mt76x2.h" 11 11 12 - static const struct pci_device_id mt76pci_device_table[] = { 12 + static const struct pci_device_id mt76x2e_device_table[] = { 13 13 { PCI_DEVICE(0x14c3, 0x7662) }, 14 14 { PCI_DEVICE(0x14c3, 0x7612) }, 15 15 { PCI_DEVICE(0x14c3, 0x7602) }, ··· 17 17 }; 18 18 19 19 static int 20 - mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 20 + mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id) 21 21 { 22 22 static const struct mt76_driver_ops drv_ops = { 23 23 .txwi_size = sizeof(struct mt76x02_txwi), ··· 93 93 } 94 94 95 95 static void 96 - mt76pci_remove(struct pci_dev *pdev) 96 + mt76x2e_remove(struct pci_dev *pdev) 97 97 { 98 98 struct mt76_dev *mdev = pci_get_drvdata(pdev); 99 99 struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); ··· 103 103 mt76_free_device(mdev); 104 104 } 105 105 106 - MODULE_DEVICE_TABLE(pci, mt76pci_device_table); 106 + static int __maybe_unused 107 + mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state) 108 + { 109 + struct mt76_dev *mdev = pci_get_drvdata(pdev); 110 + int i, err; 111 + 112 + napi_disable(&mdev->tx_napi); 113 + tasklet_kill(&mdev->pre_tbtt_tasklet); 114 + tasklet_kill(&mdev->tx_tasklet); 115 + 116 + mt76_for_each_q_rx(mdev, i) 117 + napi_disable(&mdev->napi[i]); 118 + 119 + pci_enable_wake(pdev, pci_choose_state(pdev, state), true); 120 + pci_save_state(pdev); 121 + err = pci_set_power_state(pdev, pci_choose_state(pdev, state)); 122 + if (err) 123 + goto restore; 124 + 125 + return 0; 126 + 127 + restore: 128 + mt76_for_each_q_rx(mdev, i) 129 + napi_enable(&mdev->napi[i]); 130 + napi_enable(&mdev->tx_napi); 131 + 132 + return err; 133 + } 134 + 135 + static int __maybe_unused 136 + mt76x2e_resume(struct pci_dev *pdev) 137 + { 138 + struct mt76_dev *mdev = pci_get_drvdata(pdev); 139 + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); 140 + int i, err; 141 + 142 + err = pci_set_power_state(pdev, PCI_D0); 143 + if (err) 144 + return err; 145 + 146 + pci_restore_state(pdev); 147 + 148 + mt76_for_each_q_rx(mdev, i) { 149 + napi_enable(&mdev->napi[i]); 150 + napi_schedule(&mdev->napi[i]); 151 + } 152 + napi_enable(&mdev->tx_napi); 153 + napi_schedule(&mdev->tx_napi); 154 + 155 + return mt76x2_resume_device(dev); 156 + } 157 + 158 + MODULE_DEVICE_TABLE(pci, mt76x2e_device_table); 107 159 MODULE_FIRMWARE(MT7662_FIRMWARE); 108 160 MODULE_FIRMWARE(MT7662_ROM_PATCH); 109 161 MODULE_LICENSE("Dual BSD/GPL"); 110 162 111 163 static struct pci_driver mt76pci_driver = { 112 164 .name = KBUILD_MODNAME, 113 - .id_table = mt76pci_device_table, 114 - .probe = mt76pci_probe, 115 - .remove = mt76pci_remove, 165 + .id_table = mt76x2e_device_table, 166 + .probe = mt76x2e_probe, 167 + .remove = mt76x2e_remove, 168 + #ifdef CONFIG_PM 169 + .suspend = mt76x2e_suspend, 170 + .resume = mt76x2e_resume, 171 + #endif /* CONFIG_PM */ 116 172 }; 117 173 118 174 module_pci_driver(mt76pci_driver);
+17
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
··· 217 217 mt76x2_power_on_rf(dev, 1); 218 218 } 219 219 220 + int mt76x2_resume_device(struct mt76x02_dev *dev) 221 + { 222 + int err; 223 + 224 + mt76x02_dma_disable(dev); 225 + mt76x2_reset_wlan(dev, true); 226 + mt76x2_power_on(dev); 227 + 228 + err = mt76x2_mac_reset(dev, true); 229 + if (err) 230 + return err; 231 + 232 + mt76x02_mac_start(dev); 233 + 234 + return mt76x2_mcu_init(dev); 235 + } 236 + 220 237 static int mt76x2_init_hardware(struct mt76x02_dev *dev) 221 238 { 222 239 int ret;
+3 -5
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
··· 16 16 { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */ 17 17 { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ 18 18 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ 19 + { USB_DEVICE(0x0e8d, 0x7632) }, /* HC-M7662BU1 */ 19 20 { USB_DEVICE(0x2c4e, 0x0103) }, /* Mercury UD13 */ 20 21 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ 21 22 { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */ ··· 75 74 return 0; 76 75 77 76 err: 78 - ieee80211_free_hw(mt76_hw(dev)); 79 - mt76u_deinit(&dev->mt76); 77 + mt76_free_device(&dev->mt76); 80 78 usb_set_intfdata(intf, NULL); 81 79 usb_put_dev(udev); 82 80 ··· 91 91 set_bit(MT76_REMOVED, &dev->mphy.state); 92 92 ieee80211_unregister_hw(hw); 93 93 mt76x2u_cleanup(dev); 94 - mt76u_deinit(&dev->mt76); 95 - 96 - ieee80211_free_hw(hw); 94 + mt76_free_device(&dev->mt76); 97 95 usb_set_intfdata(intf, NULL); 98 96 usb_put_dev(udev); 99 97 }
+10 -1
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
··· 178 178 seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n", 179 179 FIELD_GET(MT_ETBF_TX_FB_TRI, cnt)); 180 180 181 - /* Tx SU counters */ 181 + /* Tx SU & MU counters */ 182 + cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy)); 183 + seq_printf(s, "Tx multi-user Beamforming counts: %ld\n", 184 + FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt)); 185 + cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy)); 186 + seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt); 187 + cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy)); 188 + seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt); 182 189 cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy)); 183 190 seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt); 184 191 ··· 391 384 return 0; 392 385 } 393 386 387 + #ifdef CONFIG_MAC80211_DEBUGFS 394 388 /** per-station debugfs **/ 395 389 396 390 /* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */ ··· 469 461 debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate); 470 462 debugfs_create_file("stats", 0400, dir, sta, &fops_sta_stats); 471 463 } 464 + #endif
+11 -10
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
··· 79 79 } 80 80 } 81 81 82 + static void 83 + mt7915_tx_cleanup(struct mt7915_dev *dev) 84 + { 85 + mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false); 86 + mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false); 87 + mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false); 88 + mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false); 89 + } 90 + 82 91 static int mt7915_poll_tx(struct napi_struct *napi, int budget) 83 92 { 84 - static const u8 queue_map[] = { 85 - MT_TXQ_MCU, 86 - MT_TXQ_MCU_WA, 87 - MT_TXQ_BE 88 - }; 89 93 struct mt7915_dev *dev; 90 - int i; 91 94 92 95 dev = container_of(napi, struct mt7915_dev, mt76.tx_napi); 93 96 94 - for (i = 0; i < ARRAY_SIZE(queue_map); i++) 95 - mt76_queue_tx_cleanup(dev, queue_map[i], false); 97 + mt7915_tx_cleanup(dev); 96 98 97 99 if (napi_complete_done(napi, 0)) 98 100 mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL); 99 101 100 - for (i = 0; i < ARRAY_SIZE(queue_map); i++) 101 - mt76_queue_tx_cleanup(dev, queue_map[i], false); 102 + mt7915_tx_cleanup(dev); 102 103 103 104 mt7915_mac_sta_poll(dev); 104 105
+26 -18
drivers/net/wireless/mediatek/mt76/mt7915/init.c
··· 417 417 418 418 he_cap_elem->mac_cap_info[0] = 419 419 IEEE80211_HE_MAC_CAP0_HTC_HE; 420 - he_cap_elem->mac_cap_info[1] = 421 - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US | 422 - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_1; 423 - he_cap_elem->mac_cap_info[2] = 424 - IEEE80211_HE_MAC_CAP2_BSR; 425 420 he_cap_elem->mac_cap_info[3] = 426 421 IEEE80211_HE_MAC_CAP3_OMI_CONTROL | 427 422 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_RESERVED; ··· 438 443 IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | 439 444 IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ; 440 445 441 - /* TODO: OFDMA */ 442 - 443 446 switch (i) { 444 447 case NL80211_IFTYPE_AP: 445 448 he_cap_elem->mac_cap_info[0] |= 446 449 IEEE80211_HE_MAC_CAP0_TWT_RES; 450 + he_cap_elem->mac_cap_info[2] |= 451 + IEEE80211_HE_MAC_CAP2_BSR; 447 452 he_cap_elem->mac_cap_info[4] |= 448 453 IEEE80211_HE_MAC_CAP4_BQR; 454 + he_cap_elem->mac_cap_info[5] |= 455 + IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX; 449 456 he_cap_elem->phy_cap_info[3] |= 450 457 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | 451 458 IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; 452 459 he_cap_elem->phy_cap_info[6] |= 453 460 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; 454 - he_cap_elem->phy_cap_info[9] |= 455 - IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU; 456 461 break; 457 462 case NL80211_IFTYPE_STATION: 458 463 he_cap_elem->mac_cap_info[0] |= 459 464 IEEE80211_HE_MAC_CAP0_TWT_REQ; 460 - he_cap_elem->mac_cap_info[3] |= 461 - IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED; 465 + he_cap_elem->mac_cap_info[1] |= 466 + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US; 462 467 463 468 if (band == NL80211_BAND_2GHZ) 464 469 he_cap_elem->phy_cap_info[0] |= ··· 468 473 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G; 469 474 470 475 he_cap_elem->phy_cap_info[1] |= 471 - IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A; 476 + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | 477 + IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US; 478 + he_cap_elem->phy_cap_info[3] |= 479 + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK | 480 + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK; 481 + he_cap_elem->phy_cap_info[6] |= 482 + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | 483 + IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE | 484 + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT; 485 + he_cap_elem->phy_cap_info[7] |= 486 + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | 487 + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI; 472 488 he_cap_elem->phy_cap_info[8] |= 473 489 IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | 474 490 IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | 475 - IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU; 491 + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | 492 + IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484; 476 493 he_cap_elem->phy_cap_info[9] |= 477 - IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU; 494 + IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM | 495 + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | 496 + IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU | 497 + IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU | 498 + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | 499 + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB; 478 500 break; 479 - #ifdef CONFIG_MAC80211_MESH 480 - case NL80211_IFTYPE_MESH_POINT: 481 - break; 482 - #endif 483 501 } 484 502 485 503 he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+46 -49
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
··· 178 178 179 179 static void 180 180 mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status, 181 - struct mt7915_rxv *rxv, 182 - struct ieee80211_radiotap_he *he) 181 + struct ieee80211_radiotap_he *he, 182 + __le32 *rxv) 183 183 { 184 184 u32 ru_h, ru_l; 185 185 u8 ru, offs = 0; 186 186 187 - ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv->v[0])); 188 - ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv->v[1])); 187 + ru_l = FIELD_GET(MT_PRXV_HE_RU_ALLOC_L, le32_to_cpu(rxv[0])); 188 + ru_h = FIELD_GET(MT_PRXV_HE_RU_ALLOC_H, le32_to_cpu(rxv[1])); 189 189 ru = (u8)(ru_l | ru_h << 4); 190 190 191 191 status->bw = RATE_INFO_BW_HE_RU; ··· 228 228 static void 229 229 mt7915_mac_decode_he_radiotap(struct sk_buff *skb, 230 230 struct mt76_rx_status *status, 231 - struct mt7915_rxv *rxv) 231 + __le32 *rxv, u32 phy) 232 232 { 233 233 /* TODO: struct ieee80211_radiotap_he_mu */ 234 234 static const struct ieee80211_radiotap_he known = { ··· 245 245 HE_BITS(DATA2_TXOP_KNOWN), 246 246 }; 247 247 struct ieee80211_radiotap_he *he = NULL; 248 - __le32 v2 = rxv->v[2]; 249 - __le32 v11 = rxv->v[11]; 250 - __le32 v14 = rxv->v[14]; 251 - u32 ltf_size = le32_get_bits(v2, MT_CRXV_HE_LTF_SIZE) + 1; 248 + u32 ltf_size = le32_get_bits(rxv[2], MT_CRXV_HE_LTF_SIZE) + 1; 252 249 253 250 he = skb_push(skb, sizeof(known)); 254 251 memcpy(he, &known, sizeof(known)); 255 252 256 - he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, v14) | 257 - HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, v2); 258 - he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, v2) | 253 + he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) | 254 + HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]); 255 + he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) | 259 256 le16_encode_bits(ltf_size, 260 257 IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); 261 - he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, v14) | 262 - HE_PREP(DATA6_DOPPLER, DOPPLER, v14); 258 + he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) | 259 + HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]); 263 260 264 - switch (rxv->phy) { 261 + switch (phy) { 265 262 case MT_PHY_TYPE_HE_SU: 266 263 he->data1 |= HE_BITS(DATA1_FORMAT_SU) | 267 264 HE_BITS(DATA1_UL_DL_KNOWN) | 268 265 HE_BITS(DATA1_BEAM_CHANGE_KNOWN) | 269 266 HE_BITS(DATA1_SPTL_REUSE_KNOWN); 270 267 271 - he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, v14) | 272 - HE_PREP(DATA3_UL_DL, UPLINK, v2); 273 - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11); 268 + he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) | 269 + HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 270 + he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); 274 271 break; 275 272 case MT_PHY_TYPE_HE_EXT_SU: 276 273 he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) | 277 274 HE_BITS(DATA1_UL_DL_KNOWN); 278 275 279 - he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2); 276 + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 280 277 break; 281 278 case MT_PHY_TYPE_HE_MU: 282 279 he->data1 |= HE_BITS(DATA1_FORMAT_MU) | 283 280 HE_BITS(DATA1_UL_DL_KNOWN) | 284 281 HE_BITS(DATA1_SPTL_REUSE_KNOWN); 285 282 286 - he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, v2); 287 - he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, v11); 283 + he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]); 284 + he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]); 288 285 289 - mt7915_mac_decode_he_radiotap_ru(status, rxv, he); 286 + mt7915_mac_decode_he_radiotap_ru(status, he, rxv); 290 287 break; 291 288 case MT_PHY_TYPE_HE_TB: 292 289 he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) | ··· 292 295 HE_BITS(DATA1_SPTL_REUSE3_KNOWN) | 293 296 HE_BITS(DATA1_SPTL_REUSE4_KNOWN); 294 297 295 - he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, v11) | 296 - HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, v11) | 297 - HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, v11) | 298 - HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, v11); 298 + he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) | 299 + HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) | 300 + HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) | 301 + HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]); 299 302 300 - mt7915_mac_decode_he_radiotap_ru(status, rxv, he); 303 + mt7915_mac_decode_he_radiotap_ru(status, he, rxv); 301 304 break; 302 305 default: 303 306 break; ··· 311 314 struct mt7915_phy *phy = &dev->phy; 312 315 struct ieee80211_supported_band *sband; 313 316 struct ieee80211_hdr *hdr; 314 - struct mt7915_rxv rxv = {}; 315 317 __le32 *rxd = (__le32 *)skb->data; 318 + __le32 *rxv = NULL; 319 + u32 mode = 0; 316 320 u32 rxd1 = le32_to_cpu(rxd[1]); 317 321 u32 rxd2 = le32_to_cpu(rxd[2]); 318 322 u32 rxd3 = le32_to_cpu(rxd[3]); ··· 425 427 if (rxd1 & MT_RXD1_NORMAL_GROUP_3) { 426 428 u32 v0, v1, v2; 427 429 428 - memcpy(rxv.v, rxd, sizeof(rxv.v)); 429 - 430 + rxv = rxd; 430 431 rxd += 2; 431 432 if ((u8 *)rxd - skb->data >= skb->len) 432 433 return -EINVAL; 433 434 434 - v0 = le32_to_cpu(rxv.v[0]); 435 - v1 = le32_to_cpu(rxv.v[1]); 436 - v2 = le32_to_cpu(rxv.v[2]); 435 + v0 = le32_to_cpu(rxv[0]); 436 + v1 = le32_to_cpu(rxv[1]); 437 + v2 = le32_to_cpu(rxv[2]); 437 438 438 439 if (v0 & MT_PRXV_HT_AD_CODE) 439 440 status->enc_flags |= RX_ENC_FLAG_LDPC; ··· 463 466 return -EINVAL; 464 467 465 468 idx = i = FIELD_GET(MT_PRXV_TX_RATE, v0); 466 - rxv.phy = FIELD_GET(MT_CRXV_TX_MODE, v2); 469 + mode = FIELD_GET(MT_CRXV_TX_MODE, v2); 467 470 468 - switch (rxv.phy) { 471 + switch (mode) { 469 472 case MT_PHY_TYPE_CCK: 470 473 cck = true; 471 474 /* fall through */ ··· 500 503 if (gi <= NL80211_RATE_INFO_HE_GI_3_2) 501 504 status->he_gi = gi; 502 505 503 - if (idx & MT_PRXV_TX_DCM) 504 - status->he_dcm = true; 506 + status->he_dcm = !!(idx & MT_PRXV_TX_DCM); 505 507 break; 506 508 default: 507 509 return -EINVAL; ··· 511 515 case IEEE80211_STA_RX_BW_20: 512 516 break; 513 517 case IEEE80211_STA_RX_BW_40: 514 - if (rxv.phy & MT_PHY_TYPE_HE_EXT_SU && 518 + if (mode & MT_PHY_TYPE_HE_EXT_SU && 515 519 (idx & MT_PRXV_TX_ER_SU_106T)) { 516 520 status->bw = RATE_INFO_BW_HE_RU; 517 521 status->he_ru = ··· 531 535 } 532 536 533 537 status->enc_flags |= RX_ENC_FLAG_STBC_MASK * stbc; 534 - if (rxv.phy < MT_PHY_TYPE_HE_SU && gi) 538 + if (mode < MT_PHY_TYPE_HE_SU && gi) 535 539 status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 536 540 } 537 541 } ··· 544 548 mt76_insert_ccmp_hdr(skb, key_id); 545 549 } 546 550 547 - if (status->flag & RX_FLAG_RADIOTAP_HE) 548 - mt7915_mac_decode_he_radiotap(skb, status, &rxv); 551 + if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) 552 + mt7915_mac_decode_he_radiotap(skb, status, rxv, mode); 549 553 550 554 hdr = mt76_skb_get_hdr(skb); 551 555 if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) ··· 587 591 fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2; 588 592 fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4; 589 593 590 - if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) { 591 - q_idx = wmm_idx * MT7915_MAX_WMM_SETS + 592 - skb_get_queue_mapping(skb); 593 - p_fmt = MT_TX_TYPE_CT; 594 - } else if (beacon) { 595 - q_idx = MT_LMAC_BCN0; 594 + if (beacon) { 596 595 p_fmt = MT_TX_TYPE_FW; 597 - } else { 598 - q_idx = MT_LMAC_ALTX0; 596 + q_idx = MT_LMAC_BCN0; 597 + } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) { 599 598 p_fmt = MT_TX_TYPE_CT; 599 + q_idx = MT_LMAC_ALTX0; 600 + } else { 601 + p_fmt = MT_TX_TYPE_CT; 602 + q_idx = wmm_idx * MT7915_MAX_WMM_SETS + 603 + mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb)); 600 604 } 601 605 602 606 val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | ··· 612 616 FIELD_PREP(MT_TXD1_TID, 613 617 skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 614 618 FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx); 619 + 615 620 if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0) 616 621 val |= MT_TXD1_TGID; 617 622
-17
drivers/net/wireless/mediatek/mt76/mt7915/mac.h
··· 128 128 #define MT_CRXV_HE_BEAM_CHNG BIT(13) 129 129 #define MT_CRXV_HE_DOPPLER BIT(16) 130 130 131 - struct mt7915_rxv { 132 - u32 phy; 133 - 134 - /* P-RXV: bit 0~1, C-RXV: bit 2~19 */ 135 - __le32 v[20]; 136 - }; 137 - 138 131 enum tx_header_format { 139 132 MT_HDR_FORMAT_802_3, 140 133 MT_HDR_FORMAT_CMD, ··· 140 147 MT_TX_TYPE_SF, 141 148 MT_TX_TYPE_CMD, 142 149 MT_TX_TYPE_FW, 143 - }; 144 - 145 - enum tx_pkt_queue_idx { 146 - MT_LMAC_AC00, 147 - MT_LMAC_AC01, 148 - MT_LMAC_AC02, 149 - MT_LMAC_AC03, 150 - MT_LMAC_ALTX0 = 0x10, 151 - MT_LMAC_BMC0 = 0x10, 152 - MT_LMAC_BCN0 = 0x12, 153 150 }; 154 151 155 152 enum tx_port_idx {
+6 -7
drivers/net/wireless/mediatek/mt76/mt7915/main.c
··· 125 125 126 126 mutex_lock(&dev->mt76.mutex); 127 127 128 - mvif->idx = ffs(~phy->vif_mask) - 1; 128 + mvif->idx = ffs(~phy->mt76->vif_mask) - 1; 129 129 if (mvif->idx >= MT7915_MAX_INTERFACES) { 130 130 ret = -ENOSPC; 131 131 goto out; ··· 150 150 if (ret) 151 151 goto out; 152 152 153 - phy->vif_mask |= BIT(mvif->idx); 153 + phy->mt76->vif_mask |= BIT(mvif->idx); 154 154 phy->omac_mask |= BIT(mvif->omac_idx); 155 155 156 156 idx = MT7915_WTBL_RESERVED - mvif->idx; ··· 194 194 mt76_txq_remove(&dev->mt76, vif->txq); 195 195 196 196 mutex_lock(&dev->mt76.mutex); 197 - phy->vif_mask &= ~BIT(mvif->idx); 197 + phy->mt76->vif_mask &= ~BIT(mvif->idx); 198 198 phy->omac_mask &= ~BIT(mvif->omac_idx); 199 199 mutex_unlock(&dev->mt76.mutex); 200 200 ··· 350 350 mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 351 351 const struct ieee80211_tx_queue_params *params) 352 352 { 353 + struct mt7915_dev *dev = mt7915_hw_dev(hw); 353 354 struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 354 355 355 356 /* no need to update right away, we'll get BSS_CHANGED_QOS */ 356 - mvif->wmm[queue].cw_min = params->cw_min; 357 - mvif->wmm[queue].cw_max = params->cw_max; 358 - mvif->wmm[queue].aifs = params->aifs; 359 - mvif->wmm[queue].txop = params->txop; 357 + queue = mt7915_lmac_mapping(dev, queue); 358 + mvif->queue_params[queue] = *params; 360 359 361 360 return 0; 362 361 }
+100 -25
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
··· 312 312 struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data; 313 313 int ret = 0; 314 314 315 - if (seq != rxd->seq) 316 - return -EAGAIN; 315 + if (seq != rxd->seq) { 316 + ret = -EAGAIN; 317 + goto out; 318 + } 317 319 318 320 switch (cmd) { 319 321 case -MCU_CMD_PATCH_SEM_CONTROL: ··· 332 330 default: 333 331 break; 334 332 } 333 + out: 335 334 dev_kfree_skb(skb); 336 335 337 336 return ret; ··· 508 505 mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb) 509 506 { 510 507 struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data; 511 - u16 wcidx = le16_to_cpu(ra->wlan_idx); 512 - struct mt76_wcid *wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 513 - struct mt7915_sta *msta = container_of(wcid, struct mt7915_sta, wcid); 514 - struct mt7915_sta_stats *stats = &msta->stats; 515 - struct mt76_phy *mphy = &dev->mphy; 516 508 struct rate_info rate = {}, prob_rate = {}; 509 + u16 probe = le16_to_cpu(ra->prob_up_rate); 517 510 u16 attempts = le16_to_cpu(ra->attempts); 518 511 u16 curr = le16_to_cpu(ra->curr_rate); 519 - u16 probe = le16_to_cpu(ra->prob_up_rate); 512 + u16 wcidx = le16_to_cpu(ra->wlan_idx); 513 + struct mt76_phy *mphy = &dev->mphy; 514 + struct mt7915_sta_stats *stats; 515 + struct mt7915_sta *msta; 516 + struct mt76_wcid *wcid; 517 + 518 + if (wcidx >= MT76_N_WCIDS) 519 + return; 520 + 521 + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 522 + msta = container_of(wcid, struct mt7915_sta, wcid); 523 + stats = &msta->stats; 520 524 521 525 if (msta->wcid.ext_phy && dev->mt76.phy2) 522 526 mphy = dev->mt76.phy2; ··· 1176 1166 struct wtbl_req_hdr *wtbl_hdr; 1177 1167 struct tlv *sta_wtbl; 1178 1168 struct sk_buff *skb; 1169 + int ret; 1170 + 1171 + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, 1172 + MT7915_STA_UPDATE_MAX_SIZE); 1173 + if (IS_ERR(skb)) 1174 + return PTR_ERR(skb); 1175 + 1176 + sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); 1177 + 1178 + wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, 1179 + &skb); 1180 + mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); 1181 + 1182 + ret = __mt76_mcu_skb_send_msg(&dev->mt76, skb, 1183 + MCU_EXT_CMD_STA_REC_UPDATE, true); 1184 + if (ret) 1185 + return ret; 1179 1186 1180 1187 skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, 1181 1188 MT7915_STA_UPDATE_MAX_SIZE); ··· 1200 1173 return PTR_ERR(skb); 1201 1174 1202 1175 mt7915_mcu_sta_ba_tlv(skb, params, enable, tx); 1203 - sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv)); 1204 - 1205 - wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_SET, sta_wtbl, 1206 - &skb); 1207 - mt7915_mcu_wtbl_ba_tlv(skb, params, enable, tx, sta_wtbl, wtbl_hdr); 1208 1176 1209 1177 return __mt76_mcu_skb_send_msg(&dev->mt76, skb, 1210 1178 MCU_EXT_CMD_STA_REC_UPDATE, true); ··· 1488 1466 HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]); 1489 1467 } 1490 1468 1469 + static int 1470 + mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif, 1471 + struct ieee80211_sta *sta) 1472 + { 1473 + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 1474 + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 1475 + struct sk_buff *skb; 1476 + int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_muru); 1477 + 1478 + if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he) 1479 + return 0; 1480 + 1481 + skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len); 1482 + if (IS_ERR(skb)) 1483 + return PTR_ERR(skb); 1484 + 1485 + /* starec muru */ 1486 + mt7915_mcu_sta_muru_tlv(skb, sta); 1487 + 1488 + return __mt76_mcu_skb_send_msg(&dev->mt76, skb, 1489 + MCU_EXT_CMD_STA_REC_UPDATE, true); 1490 + } 1491 + 1491 1492 static void 1492 1493 mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb, 1493 1494 struct ieee80211_sta *sta) 1494 1495 { 1495 1496 struct tlv *tlv; 1496 1497 1498 + /* starec ht */ 1497 1499 if (sta->ht_cap.ht_supported) { 1498 1500 struct sta_rec_ht *ht; 1499 1501 1500 - /* starec ht */ 1501 1502 tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); 1502 1503 ht = (struct sta_rec_ht *)tlv; 1503 1504 ht->ht_cap = cpu_to_le16(sta->ht_cap.cap); ··· 1540 1495 /* starec he */ 1541 1496 if (sta->he_cap.has_he) 1542 1497 mt7915_mcu_sta_he_tlv(skb, sta); 1543 - 1544 - /* starec muru */ 1545 - if (sta->he_cap.has_he || sta->vht_cap.vht_supported) 1546 - mt7915_mcu_sta_muru_tlv(skb, sta); 1547 1498 } 1548 1499 1549 1500 static void ··· 2105 2064 MCU_EXT_CMD_STA_REC_UPDATE, true); 2106 2065 } 2107 2066 2067 + static int 2068 + mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif, 2069 + struct ieee80211_sta *sta) 2070 + { 2071 + #define MT_STA_BSS_GROUP 1 2072 + struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; 2073 + struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv; 2074 + struct { 2075 + __le32 action; 2076 + u8 wlan_idx_lo; 2077 + u8 status; 2078 + u8 wlan_idx_hi; 2079 + u8 rsv0[5]; 2080 + __le32 val; 2081 + u8 rsv1[8]; 2082 + } __packed req = { 2083 + .action = cpu_to_le32(MT_STA_BSS_GROUP), 2084 + .wlan_idx_lo = to_wcid_lo(msta->wcid.idx), 2085 + .wlan_idx_hi = to_wcid_hi(msta->wcid.idx), 2086 + .val = cpu_to_le32(mvif->idx), 2087 + }; 2088 + 2089 + return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_SET_DRR_CTRL, 2090 + &req, sizeof(req), true); 2091 + } 2092 + 2108 2093 int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif, 2109 2094 struct ieee80211_sta *sta, bool enable) 2110 2095 { ··· 2140 2073 return 0; 2141 2074 2142 2075 /* must keep the order */ 2076 + ret = mt7915_mcu_add_group(dev, vif, sta); 2077 + if (ret) 2078 + return ret; 2079 + 2143 2080 ret = mt7915_mcu_add_txbf(dev, vif, sta, enable); 2081 + if (ret) 2082 + return ret; 2083 + 2084 + ret = mt7915_mcu_add_mu(dev, vif, sta); 2144 2085 if (ret) 2145 2086 return ret; 2146 2087 ··· 2898 2823 int ac; 2899 2824 2900 2825 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 2826 + struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; 2901 2827 struct edca *e = &req.edca[ac]; 2902 2828 2903 2829 e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS; 2904 - e->aifs = mvif->wmm[ac].aifs; 2905 - e->txop = cpu_to_le16(mvif->wmm[ac].txop); 2830 + e->aifs = q->aifs; 2831 + e->txop = cpu_to_le16(q->txop); 2906 2832 2907 - if (mvif->wmm[ac].cw_min) 2908 - e->cw_min = fls(mvif->wmm[ac].cw_max); 2833 + if (q->cw_min) 2834 + e->cw_min = fls(q->cw_min); 2909 2835 else 2910 2836 e->cw_min = 5; 2911 2837 2912 - if (mvif->wmm[ac].cw_max) 2913 - e->cw_max = cpu_to_le16(fls(mvif->wmm[ac].cw_max)); 2838 + if (q->cw_max) 2839 + e->cw_max = cpu_to_le16(fls(q->cw_max)); 2914 2840 else 2915 2841 e->cw_max = cpu_to_le16(10); 2916 2842 } 2917 - 2918 2843 return __mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, 2919 2844 &req, sizeof(req), true); 2920 2845 }
+3 -3
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
··· 201 201 MCU_EXT_CMD_EDCA_UPDATE = 0x27, 202 202 MCU_EXT_CMD_DEV_INFO_UPDATE = 0x2A, 203 203 MCU_EXT_CMD_THERMAL_CTRL = 0x2c, 204 + MCU_EXT_CMD_SET_DRR_CTRL = 0x36, 204 205 MCU_EXT_CMD_SET_RDD_CTRL = 0x3a, 205 206 MCU_EXT_CMD_PROTECT_CTRL = 0x3e, 206 207 MCU_EXT_CMD_MAC_INIT_CTRL = 0x46, ··· 654 653 bool ofdma_ul_en; 655 654 bool mimo_dl_en; 656 655 bool mimo_ul_en; 657 - bool rsv[4]; 656 + u8 rsv[4]; 658 657 } cfg; 659 658 660 659 struct { ··· 665 664 bool lt16_sigb; 666 665 bool rx_su_comp_sigb; 667 666 bool rx_su_non_comp_sigb; 668 - bool rsv; 667 + u8 rsv; 669 668 } ofdma_dl; 670 669 671 670 struct { ··· 952 951 sizeof(struct sta_rec_ba) + \ 953 952 sizeof(struct sta_rec_vht) + \ 954 953 sizeof(struct tlv) + \ 955 - sizeof(struct sta_rec_muru) + \ 956 954 MT7915_WTBL_UPDATE_MAX_SIZE) 957 955 958 956 #define MT7915_WTBL_UPDATE_BA_SIZE (sizeof(struct wtbl_req_hdr) + \
+27 -8
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
··· 99 99 u8 band_idx; 100 100 u8 wmm_idx; 101 101 102 - struct { 103 - u16 cw_min; 104 - u16 cw_max; 105 - u16 txop; 106 - u8 aifs; 107 - } wmm[IEEE80211_NUM_ACS]; 108 - 109 102 struct mt7915_sta sta; 110 103 struct mt7915_dev *dev; 104 + 105 + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; 111 106 }; 112 107 113 108 struct mib_stats { ··· 120 125 struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES]; 121 126 122 127 u32 rxfilter; 123 - u32 vif_mask; 124 128 u32 omac_mask; 125 129 126 130 u16 noise; ··· 194 200 }; 195 201 196 202 enum { 203 + MT_LMAC_AC00, 204 + MT_LMAC_AC01, 205 + MT_LMAC_AC02, 206 + MT_LMAC_AC03, 207 + MT_LMAC_ALTX0 = 0x10, 208 + MT_LMAC_BMC0, 209 + MT_LMAC_BCN0, 210 + }; 211 + 212 + enum { 197 213 MT_RX_SEL0, 198 214 MT_RX_SEL1, 199 215 }; ··· 256 252 return NULL; 257 253 258 254 return phy->priv; 255 + } 256 + 257 + static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac) 258 + { 259 + static const u8 lmac_queue_map[] = { 260 + [IEEE80211_AC_BK] = MT_LMAC_AC00, 261 + [IEEE80211_AC_BE] = MT_LMAC_AC01, 262 + [IEEE80211_AC_VI] = MT_LMAC_AC02, 263 + [IEEE80211_AC_VO] = MT_LMAC_AC03, 264 + }; 265 + 266 + if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map))) 267 + return MT_LMAC_AC01; /* BE */ 268 + 269 + return lmac_queue_map[ac]; 259 270 } 260 271 261 272 static inline void
+1 -1
drivers/net/wireless/mediatek/mt76/mt7915/pci.c
··· 103 103 static const struct mt76_driver_ops drv_ops = { 104 104 /* txwi_size = txd size + txp size */ 105 105 .txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp), 106 - .drv_flags = MT_DRV_TXWI_NO_FREE, 106 + .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ, 107 107 .survey_flags = SURVEY_INFO_TIME_TX | 108 108 SURVEY_INFO_TIME_RX | 109 109 SURVEY_INFO_TIME_BSS_RX,
+5
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
··· 117 117 #define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048) 118 118 #define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0) 119 119 120 + #define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090) 121 + #define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0) 122 + 120 123 #define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098) 121 124 #define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0) 122 125 #define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c) 123 126 #define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0) 124 127 128 + #define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0) 129 + #define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4) 125 130 #define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc) 126 131 127 132 #define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, 0x100 + ((n) << 4))
+1
drivers/net/wireless/mediatek/mt76/pci.c
··· 3 3 * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org> 4 4 */ 5 5 6 + #include "mt76.h" 6 7 #include <linux/pci.h> 7 8 8 9 void mt76_pci_disable_aspm(struct pci_dev *pdev)
+368
drivers/net/wireless/mediatek/mt76/sdio.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 MediaTek Inc. 3 + * 4 + * This file is written based on mt76/usb.c. 5 + * 6 + * Author: Felix Fietkau <nbd@nbd.name> 7 + * Lorenzo Bianconi <lorenzo@kernel.org> 8 + * Sean Wang <sean.wang@mediatek.com> 9 + */ 10 + 11 + #include <linux/iopoll.h> 12 + #include <linux/kernel.h> 13 + #include <linux/module.h> 14 + #include <linux/mmc/sdio_func.h> 15 + #include <linux/sched.h> 16 + #include <linux/kthread.h> 17 + 18 + #include "mt76.h" 19 + 20 + static int 21 + mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid) 22 + { 23 + struct mt76_queue *q = &dev->q_rx[qid]; 24 + 25 + spin_lock_init(&q->lock); 26 + q->entry = devm_kcalloc(dev->dev, 27 + MT_NUM_RX_ENTRIES, sizeof(*q->entry), 28 + GFP_KERNEL); 29 + if (!q->entry) 30 + return -ENOMEM; 31 + 32 + q->ndesc = MT_NUM_RX_ENTRIES; 33 + q->head = q->tail = 0; 34 + q->queued = 0; 35 + 36 + return 0; 37 + } 38 + 39 + static int mt76s_alloc_tx(struct mt76_dev *dev) 40 + { 41 + struct mt76_queue *q; 42 + int i; 43 + 44 + for (i = 0; i < MT_TXQ_MCU_WA; i++) { 45 + INIT_LIST_HEAD(&dev->q_tx[i].swq); 46 + 47 + q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); 48 + if (!q) 49 + return -ENOMEM; 50 + 51 + spin_lock_init(&q->lock); 52 + q->hw_idx = i; 53 + dev->q_tx[i].q = q; 54 + 55 + q->entry = devm_kcalloc(dev->dev, 56 + MT_NUM_TX_ENTRIES, sizeof(*q->entry), 57 + GFP_KERNEL); 58 + if (!q->entry) 59 + return -ENOMEM; 60 + 61 + q->ndesc = MT_NUM_TX_ENTRIES; 62 + } 63 + 64 + return 0; 65 + } 66 + 67 + void mt76s_stop_txrx(struct mt76_dev *dev) 68 + { 69 + struct mt76_sdio *sdio = &dev->sdio; 70 + 71 + cancel_work_sync(&sdio->stat_work); 72 + clear_bit(MT76_READING_STATS, &dev->phy.state); 73 + 74 + mt76_tx_status_check(dev, NULL, true); 75 + } 76 + EXPORT_SYMBOL_GPL(mt76s_stop_txrx); 77 + 78 + int mt76s_alloc_queues(struct mt76_dev *dev) 79 + { 80 + int err; 81 + 82 + err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN); 83 + if (err < 0) 84 + return err; 85 + 86 + return mt76s_alloc_tx(dev); 87 + } 88 + EXPORT_SYMBOL_GPL(mt76s_alloc_queues); 89 + 90 + static struct mt76_queue_entry * 91 + mt76s_get_next_rx_entry(struct mt76_queue *q) 92 + { 93 + struct mt76_queue_entry *e = NULL; 94 + 95 + spin_lock_bh(&q->lock); 96 + if (q->queued > 0) { 97 + e = &q->entry[q->head]; 98 + q->head = (q->head + 1) % q->ndesc; 99 + q->queued--; 100 + } 101 + spin_unlock_bh(&q->lock); 102 + 103 + return e; 104 + } 105 + 106 + static int 107 + mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q) 108 + { 109 + int qid = q - &dev->q_rx[MT_RXQ_MAIN]; 110 + int nframes = 0; 111 + 112 + while (true) { 113 + struct mt76_queue_entry *e; 114 + 115 + if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state)) 116 + break; 117 + 118 + e = mt76s_get_next_rx_entry(q); 119 + if (!e || !e->skb) 120 + break; 121 + 122 + dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb); 123 + e->skb = NULL; 124 + nframes++; 125 + } 126 + if (qid == MT_RXQ_MAIN) 127 + mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL); 128 + 129 + return nframes; 130 + } 131 + 132 + static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid) 133 + { 134 + struct mt76_sw_queue *sq = &dev->q_tx[qid]; 135 + u32 n_dequeued = 0, n_sw_dequeued = 0; 136 + struct mt76_queue_entry entry; 137 + struct mt76_queue *q = sq->q; 138 + bool wake; 139 + 140 + while (q->queued > n_dequeued) { 141 + if (!q->entry[q->head].done) 142 + break; 143 + 144 + if (q->entry[q->head].schedule) { 145 + q->entry[q->head].schedule = false; 146 + n_sw_dequeued++; 147 + } 148 + 149 + entry = q->entry[q->head]; 150 + q->entry[q->head].done = false; 151 + q->head = (q->head + 1) % q->ndesc; 152 + n_dequeued++; 153 + 154 + if (qid == MT_TXQ_MCU) 155 + dev_kfree_skb(entry.skb); 156 + else 157 + dev->drv->tx_complete_skb(dev, qid, &entry); 158 + } 159 + 160 + spin_lock_bh(&q->lock); 161 + 162 + sq->swq_queued -= n_sw_dequeued; 163 + q->queued -= n_dequeued; 164 + 165 + wake = q->stopped && q->queued < q->ndesc - 8; 166 + if (wake) 167 + q->stopped = false; 168 + 169 + if (!q->queued) 170 + wake_up(&dev->tx_wait); 171 + 172 + spin_unlock_bh(&q->lock); 173 + 174 + if (qid == MT_TXQ_MCU) 175 + goto out; 176 + 177 + mt76_txq_schedule(&dev->phy, qid); 178 + 179 + if (wake) 180 + ieee80211_wake_queue(dev->hw, qid); 181 + 182 + wake_up_process(dev->sdio.tx_kthread); 183 + out: 184 + return n_dequeued; 185 + } 186 + 187 + static void mt76s_tx_status_data(struct work_struct *work) 188 + { 189 + struct mt76_sdio *sdio; 190 + struct mt76_dev *dev; 191 + u8 update = 1; 192 + u16 count = 0; 193 + 194 + sdio = container_of(work, struct mt76_sdio, stat_work); 195 + dev = container_of(sdio, struct mt76_dev, sdio); 196 + 197 + while (true) { 198 + if (test_bit(MT76_REMOVED, &dev->phy.state)) 199 + break; 200 + 201 + if (!dev->drv->tx_status_data(dev, &update)) 202 + break; 203 + count++; 204 + } 205 + 206 + if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 207 + queue_work(dev->wq, &sdio->stat_work); 208 + else 209 + clear_bit(MT76_READING_STATS, &dev->phy.state); 210 + } 211 + 212 + static int 213 + mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, 214 + struct sk_buff *skb, struct mt76_wcid *wcid, 215 + struct ieee80211_sta *sta) 216 + { 217 + struct mt76_queue *q = dev->q_tx[qid].q; 218 + struct mt76_tx_info tx_info = { 219 + .skb = skb, 220 + }; 221 + int err, len = skb->len; 222 + u16 idx = q->tail; 223 + 224 + if (q->queued == q->ndesc) 225 + return -ENOSPC; 226 + 227 + skb->prev = skb->next = NULL; 228 + err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info); 229 + if (err < 0) 230 + return err; 231 + 232 + q->entry[q->tail].skb = tx_info.skb; 233 + q->entry[q->tail].buf_sz = len; 234 + q->tail = (q->tail + 1) % q->ndesc; 235 + q->queued++; 236 + 237 + return idx; 238 + } 239 + 240 + static int 241 + mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid, 242 + struct sk_buff *skb, u32 tx_info) 243 + { 244 + struct mt76_queue *q = dev->q_tx[qid].q; 245 + int ret = -ENOSPC, len = skb->len; 246 + 247 + spin_lock_bh(&q->lock); 248 + if (q->queued == q->ndesc) 249 + goto out; 250 + 251 + ret = mt76_skb_adjust_pad(skb); 252 + if (ret) 253 + goto out; 254 + 255 + q->entry[q->tail].buf_sz = len; 256 + q->entry[q->tail].skb = skb; 257 + q->tail = (q->tail + 1) % q->ndesc; 258 + q->queued++; 259 + 260 + out: 261 + spin_unlock_bh(&q->lock); 262 + 263 + return ret; 264 + } 265 + 266 + static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) 267 + { 268 + struct mt76_sdio *sdio = &dev->sdio; 269 + 270 + wake_up_process(sdio->tx_kthread); 271 + } 272 + 273 + static const struct mt76_queue_ops sdio_queue_ops = { 274 + .tx_queue_skb = mt76s_tx_queue_skb, 275 + .kick = mt76s_tx_kick, 276 + .tx_queue_skb_raw = mt76s_tx_queue_skb_raw, 277 + }; 278 + 279 + static int mt76s_kthread_run(void *data) 280 + { 281 + struct mt76_dev *dev = data; 282 + struct mt76_phy *mphy = &dev->phy; 283 + 284 + while (!kthread_should_stop()) { 285 + int i, nframes = 0; 286 + 287 + cond_resched(); 288 + 289 + /* rx processing */ 290 + local_bh_disable(); 291 + rcu_read_lock(); 292 + 293 + mt76_for_each_q_rx(dev, i) 294 + nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]); 295 + 296 + rcu_read_unlock(); 297 + local_bh_enable(); 298 + 299 + /* tx processing */ 300 + for (i = 0; i < MT_TXQ_MCU_WA; i++) 301 + nframes += mt76s_process_tx_queue(dev, i); 302 + 303 + if (dev->drv->tx_status_data && 304 + !test_and_set_bit(MT76_READING_STATS, &mphy->state)) 305 + queue_work(dev->wq, &dev->sdio.stat_work); 306 + 307 + if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) { 308 + set_current_state(TASK_INTERRUPTIBLE); 309 + schedule(); 310 + } 311 + } 312 + 313 + return 0; 314 + } 315 + 316 + void mt76s_deinit(struct mt76_dev *dev) 317 + { 318 + struct mt76_sdio *sdio = &dev->sdio; 319 + int i; 320 + 321 + kthread_stop(sdio->kthread); 322 + kthread_stop(sdio->tx_kthread); 323 + mt76s_stop_txrx(dev); 324 + 325 + sdio_claim_host(sdio->func); 326 + sdio_release_irq(sdio->func); 327 + sdio_release_host(sdio->func); 328 + 329 + mt76_for_each_q_rx(dev, i) { 330 + struct mt76_queue *q = &dev->q_rx[i]; 331 + int j; 332 + 333 + for (j = 0; j < q->ndesc; j++) { 334 + struct mt76_queue_entry *e = &q->entry[j]; 335 + 336 + if (!e->skb) 337 + continue; 338 + 339 + dev_kfree_skb(e->skb); 340 + e->skb = NULL; 341 + } 342 + } 343 + } 344 + EXPORT_SYMBOL_GPL(mt76s_deinit); 345 + 346 + int mt76s_init(struct mt76_dev *dev, struct sdio_func *func, 347 + const struct mt76_bus_ops *bus_ops) 348 + { 349 + struct mt76_sdio *sdio = &dev->sdio; 350 + 351 + sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s"); 352 + if (IS_ERR(sdio->kthread)) 353 + return PTR_ERR(sdio->kthread); 354 + 355 + INIT_WORK(&sdio->stat_work, mt76s_tx_status_data); 356 + 357 + mutex_init(&sdio->sched.lock); 358 + dev->queue_ops = &sdio_queue_ops; 359 + dev->bus = bus_ops; 360 + dev->sdio.func = func; 361 + 362 + return 0; 363 + } 364 + EXPORT_SYMBOL_GPL(mt76s_init); 365 + 366 + MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>"); 367 + MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>"); 368 + MODULE_LICENSE("Dual BSD/GPL");
+497
drivers/net/wireless/mediatek/mt76/testmode.c
··· 1 + // SPDX-License-Identifier: ISC 2 + /* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ 3 + #include "mt76.h" 4 + 5 + static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = { 6 + [MT76_TM_ATTR_RESET] = { .type = NLA_FLAG }, 7 + [MT76_TM_ATTR_STATE] = { .type = NLA_U8 }, 8 + [MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 }, 9 + [MT76_TM_ATTR_TX_RATE_MODE] = { .type = NLA_U8 }, 10 + [MT76_TM_ATTR_TX_RATE_NSS] = { .type = NLA_U8 }, 11 + [MT76_TM_ATTR_TX_RATE_IDX] = { .type = NLA_U8 }, 12 + [MT76_TM_ATTR_TX_RATE_SGI] = { .type = NLA_U8 }, 13 + [MT76_TM_ATTR_TX_RATE_LDPC] = { .type = NLA_U8 }, 14 + [MT76_TM_ATTR_TX_ANTENNA] = { .type = NLA_U8 }, 15 + [MT76_TM_ATTR_TX_POWER_CONTROL] = { .type = NLA_U8 }, 16 + [MT76_TM_ATTR_TX_POWER] = { .type = NLA_NESTED }, 17 + [MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 }, 18 + }; 19 + 20 + void mt76_testmode_tx_pending(struct mt76_dev *dev) 21 + { 22 + struct mt76_testmode_data *td = &dev->test; 23 + struct mt76_wcid *wcid = &dev->global_wcid; 24 + struct sk_buff *skb = td->tx_skb; 25 + struct mt76_queue *q; 26 + int qid; 27 + 28 + if (!skb || !td->tx_pending) 29 + return; 30 + 31 + qid = skb_get_queue_mapping(skb); 32 + q = dev->q_tx[qid].q; 33 + 34 + spin_lock_bh(&q->lock); 35 + 36 + while (td->tx_pending > 0 && q->queued < q->ndesc / 2) { 37 + int ret; 38 + 39 + ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL); 40 + if (ret < 0) 41 + break; 42 + 43 + td->tx_pending--; 44 + td->tx_queued++; 45 + } 46 + 47 + dev->queue_ops->kick(dev, q); 48 + 49 + spin_unlock_bh(&q->lock); 50 + } 51 + 52 + 53 + static int 54 + mt76_testmode_tx_init(struct mt76_dev *dev) 55 + { 56 + struct mt76_testmode_data *td = &dev->test; 57 + struct ieee80211_tx_info *info; 58 + struct ieee80211_hdr *hdr; 59 + struct sk_buff *skb; 60 + u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 61 + IEEE80211_FCTL_FROMDS; 62 + struct ieee80211_tx_rate *rate; 63 + u8 max_nss = hweight8(dev->phy.antenna_mask); 64 + 65 + if (td->tx_antenna_mask) 66 + max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); 67 + 68 + skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL); 69 + if (!skb) 70 + return -ENOMEM; 71 + 72 + dev_kfree_skb(td->tx_skb); 73 + td->tx_skb = skb; 74 + hdr = __skb_put_zero(skb, td->tx_msdu_len); 75 + hdr->frame_control = cpu_to_le16(fc); 76 + memcpy(hdr->addr1, dev->macaddr, sizeof(dev->macaddr)); 77 + memcpy(hdr->addr2, dev->macaddr, sizeof(dev->macaddr)); 78 + memcpy(hdr->addr3, dev->macaddr, sizeof(dev->macaddr)); 79 + 80 + info = IEEE80211_SKB_CB(skb); 81 + info->flags = IEEE80211_TX_CTL_INJECTED | 82 + IEEE80211_TX_CTL_NO_ACK | 83 + IEEE80211_TX_CTL_NO_PS_BUFFER; 84 + rate = &info->control.rates[0]; 85 + rate->count = 1; 86 + rate->idx = td->tx_rate_idx; 87 + 88 + switch (td->tx_rate_mode) { 89 + case MT76_TM_TX_MODE_CCK: 90 + if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ) 91 + return -EINVAL; 92 + 93 + if (rate->idx > 4) 94 + return -EINVAL; 95 + break; 96 + case MT76_TM_TX_MODE_OFDM: 97 + if (dev->phy.chandef.chan->band != NL80211_BAND_2GHZ) 98 + break; 99 + 100 + if (rate->idx > 8) 101 + return -EINVAL; 102 + 103 + rate->idx += 4; 104 + break; 105 + case MT76_TM_TX_MODE_HT: 106 + if (rate->idx > 8 * max_nss && 107 + !(rate->idx == 32 && 108 + dev->phy.chandef.width >= NL80211_CHAN_WIDTH_40)) 109 + return -EINVAL; 110 + 111 + rate->flags |= IEEE80211_TX_RC_MCS; 112 + break; 113 + case MT76_TM_TX_MODE_VHT: 114 + if (rate->idx > 9) 115 + return -EINVAL; 116 + 117 + if (td->tx_rate_nss > max_nss) 118 + return -EINVAL; 119 + 120 + ieee80211_rate_set_vht(rate, td->tx_rate_idx, td->tx_rate_nss); 121 + rate->flags |= IEEE80211_TX_RC_VHT_MCS; 122 + break; 123 + default: 124 + break; 125 + } 126 + 127 + if (td->tx_rate_sgi) 128 + rate->flags |= IEEE80211_TX_RC_SHORT_GI; 129 + 130 + if (td->tx_rate_ldpc) 131 + info->flags |= IEEE80211_TX_CTL_LDPC; 132 + 133 + if (td->tx_rate_mode >= MT76_TM_TX_MODE_HT) { 134 + switch (dev->phy.chandef.width) { 135 + case NL80211_CHAN_WIDTH_40: 136 + rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; 137 + break; 138 + case NL80211_CHAN_WIDTH_80: 139 + rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; 140 + break; 141 + case NL80211_CHAN_WIDTH_80P80: 142 + case NL80211_CHAN_WIDTH_160: 143 + rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; 144 + break; 145 + default: 146 + break; 147 + } 148 + } 149 + 150 + skb_set_queue_mapping(skb, IEEE80211_AC_BE); 151 + 152 + return 0; 153 + } 154 + 155 + static void 156 + mt76_testmode_tx_start(struct mt76_dev *dev) 157 + { 158 + struct mt76_testmode_data *td = &dev->test; 159 + 160 + td->tx_queued = 0; 161 + td->tx_done = 0; 162 + td->tx_pending = td->tx_count; 163 + tasklet_schedule(&dev->tx_tasklet); 164 + } 165 + 166 + static void 167 + mt76_testmode_tx_stop(struct mt76_dev *dev) 168 + { 169 + struct mt76_testmode_data *td = &dev->test; 170 + 171 + tasklet_disable(&dev->tx_tasklet); 172 + 173 + td->tx_pending = 0; 174 + 175 + tasklet_enable(&dev->tx_tasklet); 176 + 177 + wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ); 178 + 179 + dev_kfree_skb(td->tx_skb); 180 + td->tx_skb = NULL; 181 + } 182 + 183 + static inline void 184 + mt76_testmode_param_set(struct mt76_testmode_data *td, u16 idx) 185 + { 186 + td->param_set[idx / 32] |= BIT(idx % 32); 187 + } 188 + 189 + static inline bool 190 + mt76_testmode_param_present(struct mt76_testmode_data *td, u16 idx) 191 + { 192 + return td->param_set[idx / 32] & BIT(idx % 32); 193 + } 194 + 195 + static void 196 + mt76_testmode_init_defaults(struct mt76_dev *dev) 197 + { 198 + struct mt76_testmode_data *td = &dev->test; 199 + 200 + if (td->tx_msdu_len > 0) 201 + return; 202 + 203 + td->tx_msdu_len = 1024; 204 + td->tx_count = 1; 205 + td->tx_rate_mode = MT76_TM_TX_MODE_OFDM; 206 + td->tx_rate_nss = 1; 207 + } 208 + 209 + static int 210 + __mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state) 211 + { 212 + enum mt76_testmode_state prev_state = dev->test.state; 213 + int err; 214 + 215 + if (prev_state == MT76_TM_STATE_TX_FRAMES) 216 + mt76_testmode_tx_stop(dev); 217 + 218 + if (state == MT76_TM_STATE_TX_FRAMES) { 219 + err = mt76_testmode_tx_init(dev); 220 + if (err) 221 + return err; 222 + } 223 + 224 + err = dev->test_ops->set_state(dev, state); 225 + if (err) { 226 + if (state == MT76_TM_STATE_TX_FRAMES) 227 + mt76_testmode_tx_stop(dev); 228 + 229 + return err; 230 + } 231 + 232 + if (state == MT76_TM_STATE_TX_FRAMES) 233 + mt76_testmode_tx_start(dev); 234 + else if (state == MT76_TM_STATE_RX_FRAMES) { 235 + memset(&dev->test.rx_stats, 0, sizeof(dev->test.rx_stats)); 236 + } 237 + 238 + dev->test.state = state; 239 + 240 + return 0; 241 + } 242 + 243 + int mt76_testmode_set_state(struct mt76_dev *dev, enum mt76_testmode_state state) 244 + { 245 + struct mt76_testmode_data *td = &dev->test; 246 + struct ieee80211_hw *hw = dev->phy.hw; 247 + 248 + if (state == td->state && state == MT76_TM_STATE_OFF) 249 + return 0; 250 + 251 + if (state > MT76_TM_STATE_OFF && 252 + (!test_bit(MT76_STATE_RUNNING, &dev->phy.state) || 253 + !(hw->conf.flags & IEEE80211_CONF_MONITOR))) 254 + return -ENOTCONN; 255 + 256 + if (state != MT76_TM_STATE_IDLE && 257 + td->state != MT76_TM_STATE_IDLE) { 258 + int ret; 259 + 260 + ret = __mt76_testmode_set_state(dev, MT76_TM_STATE_IDLE); 261 + if (ret) 262 + return ret; 263 + } 264 + 265 + return __mt76_testmode_set_state(dev, state); 266 + 267 + } 268 + EXPORT_SYMBOL(mt76_testmode_set_state); 269 + 270 + static int 271 + mt76_tm_get_u8(struct nlattr *attr, u8 *dest, u8 min, u8 max) 272 + { 273 + u8 val; 274 + 275 + if (!attr) 276 + return 0; 277 + 278 + val = nla_get_u8(attr); 279 + if (val < min || val > max) 280 + return -EINVAL; 281 + 282 + *dest = val; 283 + return 0; 284 + } 285 + 286 + int mt76_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 287 + void *data, int len) 288 + { 289 + struct mt76_phy *phy = hw->priv; 290 + struct mt76_dev *dev = phy->dev; 291 + struct mt76_testmode_data *td = &dev->test; 292 + struct nlattr *tb[NUM_MT76_TM_ATTRS]; 293 + u32 state; 294 + int err; 295 + int i; 296 + 297 + if (!dev->test_ops) 298 + return -EOPNOTSUPP; 299 + 300 + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, 301 + mt76_tm_policy, NULL); 302 + if (err) 303 + return err; 304 + 305 + err = -EINVAL; 306 + 307 + mutex_lock(&dev->mutex); 308 + 309 + if (tb[MT76_TM_ATTR_RESET]) { 310 + mt76_testmode_set_state(dev, MT76_TM_STATE_OFF); 311 + memset(td, 0, sizeof(*td)); 312 + } 313 + 314 + mt76_testmode_init_defaults(dev); 315 + 316 + if (tb[MT76_TM_ATTR_TX_COUNT]) 317 + td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]); 318 + 319 + if (tb[MT76_TM_ATTR_TX_LENGTH]) { 320 + u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); 321 + 322 + if (val > IEEE80211_MAX_FRAME_LEN || 323 + val < sizeof(struct ieee80211_hdr)) 324 + goto out; 325 + 326 + td->tx_msdu_len = val; 327 + } 328 + 329 + if (tb[MT76_TM_ATTR_TX_RATE_IDX]) 330 + td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]); 331 + 332 + if (mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_MODE], &td->tx_rate_mode, 333 + 0, MT76_TM_TX_MODE_MAX) || 334 + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_NSS], &td->tx_rate_nss, 335 + 1, hweight8(phy->antenna_mask)) || 336 + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_SGI], &td->tx_rate_sgi, 0, 1) || 337 + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_RATE_LDPC], &td->tx_rate_ldpc, 0, 1) || 338 + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_ANTENNA], &td->tx_antenna_mask, 1, 339 + phy->antenna_mask) || 340 + mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL], 341 + &td->tx_power_control, 0, 1)) 342 + goto out; 343 + 344 + if (tb[MT76_TM_ATTR_FREQ_OFFSET]) 345 + td->freq_offset = nla_get_u32(tb[MT76_TM_ATTR_FREQ_OFFSET]); 346 + 347 + if (tb[MT76_TM_ATTR_STATE]) { 348 + state = nla_get_u32(tb[MT76_TM_ATTR_STATE]); 349 + if (state > MT76_TM_STATE_MAX) 350 + goto out; 351 + } else { 352 + state = td->state; 353 + } 354 + 355 + if (tb[MT76_TM_ATTR_TX_POWER]) { 356 + struct nlattr *cur; 357 + int idx = 0; 358 + int rem; 359 + 360 + nla_for_each_nested(cur, tb[MT76_TM_ATTR_TX_POWER], rem) { 361 + if (nla_len(cur) != 1 || 362 + idx >= ARRAY_SIZE(td->tx_power)) 363 + goto out; 364 + 365 + td->tx_power[idx++] = nla_get_u8(cur); 366 + } 367 + } 368 + 369 + if (dev->test_ops->set_params) { 370 + err = dev->test_ops->set_params(dev, tb, state); 371 + if (err) 372 + goto out; 373 + } 374 + 375 + for (i = MT76_TM_ATTR_STATE; i < ARRAY_SIZE(tb); i++) 376 + if (tb[i]) 377 + mt76_testmode_param_set(td, i); 378 + 379 + err = 0; 380 + if (tb[MT76_TM_ATTR_STATE]) 381 + err = mt76_testmode_set_state(dev, state); 382 + 383 + out: 384 + mutex_unlock(&dev->mutex); 385 + 386 + return err; 387 + } 388 + EXPORT_SYMBOL(mt76_testmode_cmd); 389 + 390 + static int 391 + mt76_testmode_dump_stats(struct mt76_dev *dev, struct sk_buff *msg) 392 + { 393 + struct mt76_testmode_data *td = &dev->test; 394 + u64 rx_packets = 0; 395 + u64 rx_fcs_error = 0; 396 + int i; 397 + 398 + for (i = 0; i < ARRAY_SIZE(td->rx_stats.packets); i++) { 399 + rx_packets += td->rx_stats.packets[i]; 400 + rx_fcs_error += td->rx_stats.fcs_error[i]; 401 + } 402 + 403 + if (nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_PENDING, td->tx_pending) || 404 + nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_QUEUED, td->tx_queued) || 405 + nla_put_u32(msg, MT76_TM_STATS_ATTR_TX_DONE, td->tx_done) || 406 + nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_PACKETS, rx_packets, 407 + MT76_TM_STATS_ATTR_PAD) || 408 + nla_put_u64_64bit(msg, MT76_TM_STATS_ATTR_RX_FCS_ERROR, rx_fcs_error, 409 + MT76_TM_STATS_ATTR_PAD)) 410 + return -EMSGSIZE; 411 + 412 + if (dev->test_ops->dump_stats) 413 + return dev->test_ops->dump_stats(dev, msg); 414 + 415 + return 0; 416 + } 417 + 418 + int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg, 419 + struct netlink_callback *cb, void *data, int len) 420 + { 421 + struct mt76_phy *phy = hw->priv; 422 + struct mt76_dev *dev = phy->dev; 423 + struct mt76_testmode_data *td = &dev->test; 424 + struct nlattr *tb[NUM_MT76_TM_ATTRS] = {}; 425 + int err = 0; 426 + void *a; 427 + int i; 428 + 429 + if (!dev->test_ops) 430 + return -EOPNOTSUPP; 431 + 432 + if (cb->args[2]++ > 0) 433 + return -ENOENT; 434 + 435 + if (data) { 436 + err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len, 437 + mt76_tm_policy, NULL); 438 + if (err) 439 + return err; 440 + } 441 + 442 + mutex_lock(&dev->mutex); 443 + 444 + if (tb[MT76_TM_ATTR_STATS]) { 445 + a = nla_nest_start(msg, MT76_TM_ATTR_STATS); 446 + err = mt76_testmode_dump_stats(dev, msg); 447 + nla_nest_end(msg, a); 448 + 449 + goto out; 450 + } 451 + 452 + mt76_testmode_init_defaults(dev); 453 + 454 + err = -EMSGSIZE; 455 + if (nla_put_u32(msg, MT76_TM_ATTR_STATE, td->state)) 456 + goto out; 457 + 458 + if (td->mtd_name && 459 + (nla_put_string(msg, MT76_TM_ATTR_MTD_PART, td->mtd_name) || 460 + nla_put_u32(msg, MT76_TM_ATTR_MTD_OFFSET, td->mtd_offset))) 461 + goto out; 462 + 463 + if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) || 464 + nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) || 465 + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) || 466 + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) || 467 + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) || 468 + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_SGI, td->tx_rate_sgi) || 469 + nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_LDPC, td->tx_rate_ldpc) || 470 + (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_ANTENNA) && 471 + nla_put_u8(msg, MT76_TM_ATTR_TX_ANTENNA, td->tx_antenna_mask)) || 472 + (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER_CONTROL) && 473 + nla_put_u8(msg, MT76_TM_ATTR_TX_POWER_CONTROL, td->tx_power_control)) || 474 + (mt76_testmode_param_present(td, MT76_TM_ATTR_FREQ_OFFSET) && 475 + nla_put_u8(msg, MT76_TM_ATTR_FREQ_OFFSET, td->freq_offset))) 476 + goto out; 477 + 478 + if (mt76_testmode_param_present(td, MT76_TM_ATTR_TX_POWER)) { 479 + a = nla_nest_start(msg, MT76_TM_ATTR_TX_POWER); 480 + if (!a) 481 + goto out; 482 + 483 + for (i = 0; i < ARRAY_SIZE(td->tx_power); i++) 484 + if (nla_put_u8(msg, i, td->tx_power[i])) 485 + goto out; 486 + 487 + nla_nest_end(msg, a); 488 + } 489 + 490 + err = 0; 491 + 492 + out: 493 + mutex_unlock(&dev->mutex); 494 + 495 + return err; 496 + } 497 + EXPORT_SYMBOL(mt76_testmode_dump);
+156
drivers/net/wireless/mediatek/mt76/testmode.h
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + /* 3 + * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> 4 + */ 5 + #ifndef __MT76_TESTMODE_H 6 + #define __MT76_TESTMODE_H 7 + 8 + /** 9 + * enum mt76_testmode_attr - testmode attributes inside NL80211_ATTR_TESTDATA 10 + * 11 + * @MT76_TM_ATTR_UNSPEC: (invalid attribute) 12 + * 13 + * @MT76_TM_ATTR_RESET: reset parameters to default (flag) 14 + * @MT76_TM_ATTR_STATE: test state (u32), see &enum mt76_testmode_state 15 + * 16 + * @MT76_TM_ATTR_MTD_PART: mtd partition used for eeprom data (string) 17 + * @MT76_TM_ATTR_MTD_OFFSET: offset of eeprom data within the partition (u32) 18 + * 19 + * @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting 20 + * state to MT76_TM_STATE_TX_FRAMES (u32) 21 + * @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32) 22 + * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32) 23 + * @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode) 24 + * @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8) 25 + * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8) 26 + * @MT76_TM_ATTR_TX_RATE_SGI: packet tx use short guard interval (u8) 27 + * @MT76_TM_ATTR_TX_RATE_LDPC: packet tx enable LDPC (u8) 28 + * 29 + * @MT76_TM_ATTR_TX_ANTENNA: tx antenna mask (u8) 30 + * @MT76_TM_ATTR_TX_POWER_CONTROL: enable tx power control (u8) 31 + * @MT76_TM_ATTR_TX_POWER: per-antenna tx power array (nested, u8 attrs) 32 + * 33 + * @MT76_TM_ATTR_FREQ_OFFSET: RF frequency offset (u32) 34 + * 35 + * @MT76_TM_ATTR_STATS: statistics (nested, see &enum mt76_testmode_stats_attr) 36 + */ 37 + enum mt76_testmode_attr { 38 + MT76_TM_ATTR_UNSPEC, 39 + 40 + MT76_TM_ATTR_RESET, 41 + MT76_TM_ATTR_STATE, 42 + 43 + MT76_TM_ATTR_MTD_PART, 44 + MT76_TM_ATTR_MTD_OFFSET, 45 + 46 + MT76_TM_ATTR_TX_COUNT, 47 + MT76_TM_ATTR_TX_LENGTH, 48 + MT76_TM_ATTR_TX_RATE_MODE, 49 + MT76_TM_ATTR_TX_RATE_NSS, 50 + MT76_TM_ATTR_TX_RATE_IDX, 51 + MT76_TM_ATTR_TX_RATE_SGI, 52 + MT76_TM_ATTR_TX_RATE_LDPC, 53 + 54 + MT76_TM_ATTR_TX_ANTENNA, 55 + MT76_TM_ATTR_TX_POWER_CONTROL, 56 + MT76_TM_ATTR_TX_POWER, 57 + 58 + MT76_TM_ATTR_FREQ_OFFSET, 59 + 60 + MT76_TM_ATTR_STATS, 61 + 62 + /* keep last */ 63 + NUM_MT76_TM_ATTRS, 64 + MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1, 65 + }; 66 + 67 + /** 68 + * enum mt76_testmode_state - statistics attributes 69 + * 70 + * @MT76_TM_STATS_ATTR_TX_PENDING: pending tx frames (u32) 71 + * @MT76_TM_STATS_ATTR_TX_QUEUED: queued tx frames (u32) 72 + * @MT76_TM_STATS_ATTR_TX_QUEUED: completed tx frames (u32) 73 + * 74 + * @MT76_TM_STATS_ATTR_RX_PACKETS: number of rx packets (u64) 75 + * @MT76_TM_STATS_ATTR_RX_FCS_ERROR: number of rx packets with FCS error (u64) 76 + * @MT76_TM_STATS_ATTR_LAST_RX: information about the last received packet 77 + * see &enum mt76_testmode_rx_attr 78 + */ 79 + enum mt76_testmode_stats_attr { 80 + MT76_TM_STATS_ATTR_UNSPEC, 81 + MT76_TM_STATS_ATTR_PAD, 82 + 83 + MT76_TM_STATS_ATTR_TX_PENDING, 84 + MT76_TM_STATS_ATTR_TX_QUEUED, 85 + MT76_TM_STATS_ATTR_TX_DONE, 86 + 87 + MT76_TM_STATS_ATTR_RX_PACKETS, 88 + MT76_TM_STATS_ATTR_RX_FCS_ERROR, 89 + MT76_TM_STATS_ATTR_LAST_RX, 90 + 91 + /* keep last */ 92 + NUM_MT76_TM_STATS_ATTRS, 93 + MT76_TM_STATS_ATTR_MAX = NUM_MT76_TM_STATS_ATTRS - 1, 94 + }; 95 + 96 + 97 + /** 98 + * enum mt76_testmode_rx_attr - packet rx information 99 + * 100 + * @MT76_TM_RX_ATTR_FREQ_OFFSET: frequency offset (s32) 101 + * @MT76_TM_RX_ATTR_RCPI: received channel power indicator (array, u8) 102 + * @MT76_TM_RX_ATTR_IB_RSSI: internal inband RSSI (s8) 103 + * @MT76_TM_RX_ATTR_WB_RSSI: internal wideband RSSI (s8) 104 + */ 105 + enum mt76_testmode_rx_attr { 106 + MT76_TM_RX_ATTR_UNSPEC, 107 + 108 + MT76_TM_RX_ATTR_FREQ_OFFSET, 109 + MT76_TM_RX_ATTR_RCPI, 110 + MT76_TM_RX_ATTR_IB_RSSI, 111 + MT76_TM_RX_ATTR_WB_RSSI, 112 + 113 + /* keep last */ 114 + NUM_MT76_TM_RX_ATTRS, 115 + MT76_TM_RX_ATTR_MAX = NUM_MT76_TM_RX_ATTRS - 1, 116 + }; 117 + 118 + /** 119 + * enum mt76_testmode_state - phy test state 120 + * 121 + * @MT76_TM_STATE_OFF: test mode disabled (normal operation) 122 + * @MT76_TM_STATE_IDLE: test mode enabled, but idle 123 + * @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames 124 + * @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics 125 + */ 126 + enum mt76_testmode_state { 127 + MT76_TM_STATE_OFF, 128 + MT76_TM_STATE_IDLE, 129 + MT76_TM_STATE_TX_FRAMES, 130 + MT76_TM_STATE_RX_FRAMES, 131 + 132 + /* keep last */ 133 + NUM_MT76_TM_STATES, 134 + MT76_TM_STATE_MAX = NUM_MT76_TM_STATES - 1, 135 + }; 136 + 137 + /** 138 + * enum mt76_testmode_tx_mode - packet tx phy mode 139 + * 140 + * @MT76_TM_TX_MODE_CCK: legacy CCK mode 141 + * @MT76_TM_TX_MODE_OFDM: legacy OFDM mode 142 + * @MT76_TM_TX_MODE_HT: 802.11n MCS 143 + * @MT76_TM_TX_MODE_VHT: 802.11ac MCS 144 + */ 145 + enum mt76_testmode_tx_mode { 146 + MT76_TM_TX_MODE_CCK, 147 + MT76_TM_TX_MODE_OFDM, 148 + MT76_TM_TX_MODE_HT, 149 + MT76_TM_TX_MODE_VHT, 150 + 151 + /* keep last */ 152 + NUM_MT76_TM_TX_MODES, 153 + MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1, 154 + }; 155 + 156 + #endif
+47
drivers/net/wireless/mediatek/mt76/tx.c
··· 236 236 struct ieee80211_hw *hw; 237 237 struct sk_buff_head list; 238 238 239 + #ifdef CONFIG_NL80211_TESTMODE 240 + if (skb == dev->test.tx_skb) { 241 + dev->test.tx_done++; 242 + if (dev->test.tx_queued == dev->test.tx_done) 243 + wake_up(&dev->tx_wait); 244 + } 245 + #endif 246 + 239 247 if (!skb->prev) { 240 248 hw = mt76_tx_status_get_hw(dev, skb); 241 249 ieee80211_free_txskb(hw, skb); ··· 266 258 struct mt76_queue *q; 267 259 int qid = skb_get_queue_mapping(skb); 268 260 bool ext_phy = phy != &dev->phy; 261 + 262 + if (mt76_testmode_enabled(dev)) { 263 + ieee80211_free_txskb(phy->hw, skb); 264 + return; 265 + } 269 266 270 267 if (WARN_ON(qid >= MT_TXQ_PSD)) { 271 268 qid = MT_TXQ_BE; ··· 592 579 mt76_txq_schedule_all(&dev->phy); 593 580 if (dev->phy2) 594 581 mt76_txq_schedule_all(dev->phy2); 582 + 583 + #ifdef CONFIG_NL80211_TESTMODE 584 + if (dev->test.tx_pending) 585 + mt76_testmode_tx_pending(dev); 586 + #endif 595 587 } 596 588 597 589 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta, ··· 677 659 return wmm_queue_map[ac]; 678 660 } 679 661 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq); 662 + 663 + int mt76_skb_adjust_pad(struct sk_buff *skb) 664 + { 665 + struct sk_buff *iter, *last = skb; 666 + u32 pad; 667 + 668 + /* Add zero pad of 4 - 7 bytes */ 669 + pad = round_up(skb->len, 4) + 4 - skb->len; 670 + 671 + /* First packet of a A-MSDU burst keeps track of the whole burst 672 + * length, need to update length of it and the last packet. 673 + */ 674 + skb_walk_frags(skb, iter) { 675 + last = iter; 676 + if (!iter->next) { 677 + skb->data_len += pad; 678 + skb->len += pad; 679 + break; 680 + } 681 + } 682 + 683 + if (skb_pad(last, pad)) 684 + return -ENOMEM; 685 + 686 + __skb_put(last, pad); 687 + 688 + return 0; 689 + } 690 + EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
+16 -74
drivers/net/wireless/mediatek/mt76/usb.c
··· 672 672 static void mt76u_rx_tasklet(unsigned long data) 673 673 { 674 674 struct mt76_dev *dev = (struct mt76_dev *)data; 675 - struct mt76_queue *q; 676 675 int i; 677 676 678 677 rcu_read_lock(); 679 - for (i = 0; i < __MT_RXQ_MAX; i++) { 680 - q = &dev->q_rx[i]; 681 - if (!q->ndesc) 682 - continue; 683 - 684 - mt76u_process_rx_queue(dev, q); 685 - } 678 + mt76_for_each_q_rx(dev, i) 679 + mt76u_process_rx_queue(dev, &dev->q_rx[i]); 686 680 rcu_read_unlock(); 687 681 } 688 682 ··· 750 756 751 757 static void mt76u_free_rx(struct mt76_dev *dev) 752 758 { 753 - struct mt76_queue *q; 754 759 int i; 755 760 756 - for (i = 0; i < __MT_RXQ_MAX; i++) { 757 - q = &dev->q_rx[i]; 758 - if (!q->ndesc) 759 - continue; 760 - 761 - mt76u_free_rx_queue(dev, q); 762 - } 761 + mt76_for_each_q_rx(dev, i) 762 + mt76u_free_rx_queue(dev, &dev->q_rx[i]); 763 763 } 764 764 765 765 void mt76u_stop_rx(struct mt76_dev *dev) 766 766 { 767 - struct mt76_queue *q; 768 - int i, j; 767 + int i; 769 768 770 - for (i = 0; i < __MT_RXQ_MAX; i++) { 771 - q = &dev->q_rx[i]; 772 - if (!q->ndesc) 773 - continue; 769 + mt76_for_each_q_rx(dev, i) { 770 + struct mt76_queue *q = &dev->q_rx[i]; 771 + int j; 774 772 775 773 for (j = 0; j < q->ndesc; j++) 776 774 usb_poison_urb(q->entry[j].urb); ··· 774 788 775 789 int mt76u_resume_rx(struct mt76_dev *dev) 776 790 { 777 - struct mt76_queue *q; 778 - int i, j, err; 791 + int i; 779 792 780 - for (i = 0; i < __MT_RXQ_MAX; i++) { 781 - q = &dev->q_rx[i]; 782 - 783 - if (!q->ndesc) 784 - continue; 793 + mt76_for_each_q_rx(dev, i) { 794 + struct mt76_queue *q = &dev->q_rx[i]; 795 + int err, j; 785 796 786 797 for (j = 0; j < q->ndesc; j++) 787 798 usb_unpoison_urb(q->entry[j].urb); ··· 842 859 843 860 if (dev->drv->tx_status_data && 844 861 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state)) 845 - queue_work(dev->usb.wq, &dev->usb.stat_work); 862 + queue_work(dev->wq, &dev->usb.stat_work); 846 863 if (wake) 847 864 ieee80211_wake_queue(dev->hw, i); 848 865 } ··· 868 885 } 869 886 870 887 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state)) 871 - queue_work(usb->wq, &usb->stat_work); 888 + queue_work(dev->wq, &usb->stat_work); 872 889 else 873 890 clear_bit(MT76_READING_STATS, &dev->phy.state); 874 891 } ··· 903 920 904 921 return urb->num_sgs; 905 922 } 906 - 907 - int mt76u_skb_dma_info(struct sk_buff *skb, u32 info) 908 - { 909 - struct sk_buff *iter, *last = skb; 910 - u32 pad; 911 - 912 - put_unaligned_le32(info, skb_push(skb, sizeof(info))); 913 - /* Add zero pad of 4 - 7 bytes */ 914 - pad = round_up(skb->len, 4) + 4 - skb->len; 915 - 916 - /* First packet of a A-MSDU burst keeps track of the whole burst 917 - * length, need to update length of it and the last packet. 918 - */ 919 - skb_walk_frags(skb, iter) { 920 - last = iter; 921 - if (!iter->next) { 922 - skb->data_len += pad; 923 - skb->len += pad; 924 - break; 925 - } 926 - } 927 - 928 - if (skb_pad(last, pad)) 929 - return -ENOMEM; 930 - __skb_put(last, pad); 931 - 932 - return 0; 933 - } 934 - EXPORT_SYMBOL_GPL(mt76u_skb_dma_info); 935 923 936 924 static int 937 925 mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid, ··· 1115 1161 .kick = mt76u_tx_kick, 1116 1162 }; 1117 1163 1118 - void mt76u_deinit(struct mt76_dev *dev) 1119 - { 1120 - if (dev->usb.wq) { 1121 - destroy_workqueue(dev->usb.wq); 1122 - dev->usb.wq = NULL; 1123 - } 1124 - } 1125 - EXPORT_SYMBOL_GPL(mt76u_deinit); 1126 - 1127 1164 int mt76u_init(struct mt76_dev *dev, 1128 1165 struct usb_interface *intf, bool ext) 1129 1166 { ··· 1136 1191 tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev); 1137 1192 tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev); 1138 1193 INIT_WORK(&usb->stat_work, mt76u_tx_status_data); 1139 - 1140 - usb->wq = alloc_workqueue("mt76u", WQ_UNBOUND, 0); 1141 - if (!usb->wq) 1142 - return -ENOMEM; 1143 1194 1144 1195 usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1); 1145 1196 if (usb->data_len < 32) ··· 1160 1219 return 0; 1161 1220 1162 1221 error: 1163 - mt76u_deinit(dev); 1222 + destroy_workqueue(dev->wq); 1223 + 1164 1224 return err; 1165 1225 } 1166 1226 EXPORT_SYMBOL_GPL(mt76u_init);
+2 -2
drivers/net/wireless/mediatek/mt76/util.c
··· 13 13 14 14 timeout /= 10; 15 15 do { 16 - cur = dev->bus->rr(dev, offset) & mask; 16 + cur = __mt76_rr(dev, offset) & mask; 17 17 if (cur == val) 18 18 return true; 19 19 ··· 31 31 32 32 timeout /= 10; 33 33 do { 34 - cur = dev->bus->rr(dev, offset) & mask; 34 + cur = __mt76_rr(dev, offset) & mask; 35 35 if (cur == val) 36 36 return true; 37 37
+3 -1
drivers/net/wireless/mediatek/mt7601u/mcu.c
··· 116 116 int sent, ret; 117 117 u8 seq = 0; 118 118 119 - if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) 119 + if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) { 120 + consume_skb(skb); 120 121 return 0; 122 + } 121 123 122 124 mutex_lock(&dev->mcu.mutex); 123 125
+2 -4
drivers/net/wireless/microchip/wilc1000/sdio.c
··· 6 6 7 7 #include <linux/clk.h> 8 8 #include <linux/mmc/sdio_func.h> 9 + #include <linux/mmc/sdio_ids.h> 9 10 #include <linux/mmc/host.h> 10 11 #include <linux/mmc/sdio.h> 11 12 #include <linux/of_irq.h> ··· 16 15 17 16 #define SDIO_MODALIAS "wilc1000_sdio" 18 17 19 - #define SDIO_VENDOR_ID_WILC 0x0296 20 - #define SDIO_DEVICE_ID_WILC 0x5347 21 - 22 18 static const struct sdio_device_id wilc_sdio_ids[] = { 23 - { SDIO_DEVICE(SDIO_VENDOR_ID_WILC, SDIO_DEVICE_ID_WILC) }, 19 + { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, 24 20 { }, 25 21 }; 26 22
+4 -1
drivers/net/wireless/quantenna/qtnfmac/core.c
··· 446 446 } 447 447 448 448 wiphy = qtnf_wiphy_allocate(bus, pdev); 449 - if (!wiphy) 449 + if (!wiphy) { 450 + if (pdev) 451 + platform_device_unregister(pdev); 450 452 return ERR_PTR(-ENOMEM); 453 + } 451 454 452 455 mac = wiphy_priv(wiphy); 453 456
+1 -2
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
··· 1834 1834 .id_table = rt2400pci_device_table, 1835 1835 .probe = rt2400pci_probe, 1836 1836 .remove = rt2x00pci_remove, 1837 - .suspend = rt2x00pci_suspend, 1838 - .resume = rt2x00pci_resume, 1837 + .driver.pm = &rt2x00pci_pm_ops, 1839 1838 }; 1840 1839 1841 1840 module_pci_driver(rt2400pci_driver);
+1 -2
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
··· 2132 2132 .id_table = rt2500pci_device_table, 2133 2133 .probe = rt2500pci_probe, 2134 2134 .remove = rt2x00pci_remove, 2135 - .suspend = rt2x00pci_suspend, 2136 - .resume = rt2x00pci_resume, 2135 + .driver.pm = &rt2x00pci_pm_ops, 2137 2136 }; 2138 2137 2139 2138 module_pci_driver(rt2500pci_driver);
+1 -2
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
··· 455 455 .id_table = rt2800pci_device_table, 456 456 .probe = rt2800pci_probe, 457 457 .remove = rt2x00pci_remove, 458 - .suspend = rt2x00pci_suspend, 459 - .resume = rt2x00pci_resume, 458 + .driver.pm = &rt2x00pci_pm_ops, 460 459 }; 461 460 462 461 module_pci_driver(rt2800pci_driver);
+2 -3
drivers/net/wireless/ralink/rt2x00/rt2x00.h
··· 1487 1487 */ 1488 1488 int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev); 1489 1489 void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev); 1490 - #ifdef CONFIG_PM 1491 - int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state); 1490 + 1491 + int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev); 1492 1492 int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev); 1493 - #endif /* CONFIG_PM */ 1494 1493 1495 1494 #endif /* RT2X00_H */
+1 -3
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
··· 1556 1556 /* 1557 1557 * Device state handlers 1558 1558 */ 1559 - #ifdef CONFIG_PM 1560 - int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state) 1559 + int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev) 1561 1560 { 1562 1561 rt2x00_dbg(rt2x00dev, "Going to sleep\n"); 1563 1562 ··· 1613 1614 return 0; 1614 1615 } 1615 1616 EXPORT_SYMBOL_GPL(rt2x00lib_resume); 1616 - #endif /* CONFIG_PM */ 1617 1617 1618 1618 /* 1619 1619 * rt2x00lib module information.
+8 -23
drivers/net/wireless/ralink/rt2x00/rt2x00pci.c
··· 169 169 } 170 170 EXPORT_SYMBOL_GPL(rt2x00pci_remove); 171 171 172 - #ifdef CONFIG_PM 173 - int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state) 172 + static int __maybe_unused rt2x00pci_suspend(struct device *dev) 174 173 { 175 - struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); 174 + struct ieee80211_hw *hw = dev_get_drvdata(dev); 176 175 struct rt2x00_dev *rt2x00dev = hw->priv; 177 - int retval; 178 176 179 - retval = rt2x00lib_suspend(rt2x00dev, state); 180 - if (retval) 181 - return retval; 182 - 183 - pci_save_state(pci_dev); 184 - pci_disable_device(pci_dev); 185 - return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); 177 + return rt2x00lib_suspend(rt2x00dev); 186 178 } 187 - EXPORT_SYMBOL_GPL(rt2x00pci_suspend); 188 179 189 - int rt2x00pci_resume(struct pci_dev *pci_dev) 180 + static int __maybe_unused rt2x00pci_resume(struct device *dev) 190 181 { 191 - struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); 182 + struct ieee80211_hw *hw = dev_get_drvdata(dev); 192 183 struct rt2x00_dev *rt2x00dev = hw->priv; 193 184 194 - if (pci_set_power_state(pci_dev, PCI_D0) || 195 - pci_enable_device(pci_dev)) { 196 - rt2x00_err(rt2x00dev, "Failed to resume device\n"); 197 - return -EIO; 198 - } 199 - 200 - pci_restore_state(pci_dev); 201 185 return rt2x00lib_resume(rt2x00dev); 202 186 } 203 - EXPORT_SYMBOL_GPL(rt2x00pci_resume); 204 - #endif /* CONFIG_PM */ 187 + 188 + SIMPLE_DEV_PM_OPS(rt2x00pci_pm_ops, rt2x00pci_suspend, rt2x00pci_resume); 189 + EXPORT_SYMBOL_GPL(rt2x00pci_pm_ops); 205 190 206 191 /* 207 192 * rt2x00pci module information.
+2 -7
drivers/net/wireless/ralink/rt2x00/rt2x00pci.h
··· 21 21 */ 22 22 int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops); 23 23 void rt2x00pci_remove(struct pci_dev *pci_dev); 24 - #ifdef CONFIG_PM 25 - int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state); 26 - int rt2x00pci_resume(struct pci_dev *pci_dev); 27 - #else 28 - #define rt2x00pci_suspend NULL 29 - #define rt2x00pci_resume NULL 30 - #endif /* CONFIG_PM */ 24 + 25 + extern const struct dev_pm_ops rt2x00pci_pm_ops; 31 26 32 27 #endif /* RT2X00PCI_H */
+1 -1
drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
··· 130 130 struct ieee80211_hw *hw = platform_get_drvdata(pdev); 131 131 struct rt2x00_dev *rt2x00dev = hw->priv; 132 132 133 - return rt2x00lib_suspend(rt2x00dev, state); 133 + return rt2x00lib_suspend(rt2x00dev); 134 134 } 135 135 EXPORT_SYMBOL_GPL(rt2x00soc_suspend); 136 136
+1 -1
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
··· 886 886 struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); 887 887 struct rt2x00_dev *rt2x00dev = hw->priv; 888 888 889 - return rt2x00lib_suspend(rt2x00dev, state); 889 + return rt2x00lib_suspend(rt2x00dev); 890 890 } 891 891 EXPORT_SYMBOL_GPL(rt2x00usb_suspend); 892 892
+1 -2
drivers/net/wireless/ralink/rt2x00/rt61pci.c
··· 3009 3009 .id_table = rt61pci_device_table, 3010 3010 .probe = rt61pci_probe, 3011 3011 .remove = rt2x00pci_remove, 3012 - .suspend = rt2x00pci_suspend, 3013 - .resume = rt2x00pci_resume, 3012 + .driver.pm = &rt2x00pci_pm_ops, 3014 3013 }; 3015 3014 3016 3015 module_pci_driver(rt61pci_driver);
+3 -5
drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
··· 894 894 (low_power ? ", 32k" : "")); 895 895 896 896 seq_printf(m, 897 - "\n %-35s = %02x %02x %02x %02x %02x %02x (0x%x/0x%x)", 897 + "\n %-35s = %6ph (0x%x/0x%x)", 898 898 "Power mode cmd(lps/rpwm)", 899 - btcoexist->pwr_mode_val[0], btcoexist->pwr_mode_val[1], 900 - btcoexist->pwr_mode_val[2], btcoexist->pwr_mode_val[3], 901 - btcoexist->pwr_mode_val[4], btcoexist->pwr_mode_val[5], 899 + btcoexist->pwr_mode_val, 902 900 btcoexist->bt_info.lps_val, 903 901 btcoexist->bt_info.rpwm_val); 904 902 } ··· 1316 1318 { 1317 1319 struct rtl_priv *rtlpriv = adapter; 1318 1320 struct btc_coexist *btcoexist = rtl_btc_coexist(rtlpriv); 1319 - u8 ant_num = 2, chip_type, single_ant_path = 0; 1321 + u8 ant_num, chip_type, single_ant_path; 1320 1322 1321 1323 if (!btcoexist) 1322 1324 return false;
+2 -1
drivers/net/wireless/realtek/rtw88/coex.c
··· 1962 1962 if (coex_stat->wl_under_ips) 1963 1963 return; 1964 1964 1965 - if (coex->freeze && !coex_stat->bt_setup_link) 1965 + if (coex->freeze && coex_dm->reason == COEX_RSN_BTINFO && 1966 + !coex_stat->bt_setup_link) 1966 1967 return; 1967 1968 1968 1969 coex_stat->cnt_wl[COEX_CNT_WL_COEXRUN]++;
+30
drivers/net/wireless/realtek/rtw88/debug.c
··· 344 344 return count; 345 345 } 346 346 347 + static ssize_t rtw_debugfs_set_h2c(struct file *filp, 348 + const char __user *buffer, 349 + size_t count, loff_t *loff) 350 + { 351 + struct rtw_debugfs_priv *debugfs_priv = filp->private_data; 352 + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; 353 + char tmp[32 + 1]; 354 + u8 param[8]; 355 + int num; 356 + 357 + rtw_debugfs_copy_from_user(tmp, sizeof(tmp), buffer, count, 3); 358 + 359 + num = sscanf(tmp, "%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx,%hhx", 360 + &param[0], &param[1], &param[2], &param[3], 361 + &param[4], &param[5], &param[6], &param[7]); 362 + if (num != 8) { 363 + rtw_info(rtwdev, "invalid H2C command format for debug\n"); 364 + return -EINVAL; 365 + } 366 + 367 + rtw_fw_h2c_cmd_dbg(rtwdev, param); 368 + 369 + return count; 370 + } 371 + 347 372 static ssize_t rtw_debugfs_set_rf_write(struct file *filp, 348 373 const char __user *buffer, 349 374 size_t count, loff_t *loff) ··· 833 808 .cb_write = rtw_debugfs_set_write_reg, 834 809 }; 835 810 811 + static struct rtw_debugfs_priv rtw_debug_priv_h2c = { 812 + .cb_write = rtw_debugfs_set_h2c, 813 + }; 814 + 836 815 static struct rtw_debugfs_priv rtw_debug_priv_rf_write = { 837 816 .cb_write = rtw_debugfs_set_rf_write, 838 817 }; ··· 906 877 rtw_debugfs_add_r(phy_info); 907 878 rtw_debugfs_add_r(coex_info); 908 879 rtw_debugfs_add_rw(coex_enable); 880 + rtw_debugfs_add_w(h2c); 909 881 rtw_debugfs_add_r(mac_0); 910 882 rtw_debugfs_add_r(mac_1); 911 883 rtw_debugfs_add_r(mac_2);
+11 -6
drivers/net/wireless/realtek/rtw88/fw.c
··· 253 253 spin_unlock(&rtwdev->h2c.lock); 254 254 } 255 255 256 + void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c) 257 + { 258 + rtw_fw_send_h2c_command(rtwdev, h2c); 259 + } 260 + 256 261 static void rtw_fw_send_h2c_packet(struct rtw_dev *rtwdev, u8 *h2c_pkt) 257 262 { 258 263 int ret; ··· 461 456 SET_RA_INFO_INIT_RA_LVL(h2c_pkt, si->init_ra_lv); 462 457 SET_RA_INFO_SGI_EN(h2c_pkt, si->sgi_enable); 463 458 SET_RA_INFO_BW_MODE(h2c_pkt, si->bw_mode); 464 - SET_RA_INFO_LDPC(h2c_pkt, si->ldpc_en); 459 + SET_RA_INFO_LDPC(h2c_pkt, !!si->ldpc_en); 465 460 SET_RA_INFO_NO_UPDATE(h2c_pkt, no_update); 466 461 SET_RA_INFO_VHT_EN(h2c_pkt, si->vht_enable); 467 462 SET_RA_INFO_DIS_PT(h2c_pkt, disable_pt); ··· 920 915 return skb_new; 921 916 } 922 917 923 - static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb) 918 + static void rtw_fill_rsvd_page_desc(struct rtw_dev *rtwdev, struct sk_buff *skb, 919 + enum rtw_rsvd_packet_type type) 924 920 { 925 - struct rtw_tx_pkt_info pkt_info; 921 + struct rtw_tx_pkt_info pkt_info = {0}; 926 922 struct rtw_chip_info *chip = rtwdev->chip; 927 923 u8 *pkt_desc; 928 924 929 - memset(&pkt_info, 0, sizeof(pkt_info)); 930 - rtw_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb); 925 + rtw_tx_rsvd_page_pkt_info_update(rtwdev, &pkt_info, skb, type); 931 926 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz); 932 927 memset(pkt_desc, 0, chip->tx_pkt_desc_sz); 933 928 rtw_tx_fill_tx_desc(&pkt_info, skb); ··· 1266 1261 * And iter->len will be added with size of tx_desc_sz. 1267 1262 */ 1268 1263 if (rsvd_pkt->add_txdesc) 1269 - rtw_fill_rsvd_page_desc(rtwdev, iter); 1264 + rtw_fill_rsvd_page_desc(rtwdev, iter, rsvd_pkt->type); 1270 1265 1271 1266 rsvd_pkt->skb = iter; 1272 1267 rsvd_pkt->page = total_page;
+2
drivers/net/wireless/realtek/rtw88/fw.h
··· 563 563 void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev, 564 564 struct cfg80211_ssid *ssid); 565 565 void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable); 566 + void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c); 567 + 566 568 #endif
+27
drivers/net/wireless/realtek/rtw88/mac80211.c
··· 231 231 mutex_unlock(&rtwdev->mutex); 232 232 } 233 233 234 + static int rtw_ops_change_interface(struct ieee80211_hw *hw, 235 + struct ieee80211_vif *vif, 236 + enum nl80211_iftype type, bool p2p) 237 + { 238 + struct rtw_dev *rtwdev = hw->priv; 239 + 240 + rtw_info(rtwdev, "change vif %pM (%d)->(%d), p2p (%d)->(%d)\n", 241 + vif->addr, vif->type, type, vif->p2p, p2p); 242 + 243 + rtw_ops_remove_interface(hw, vif); 244 + 245 + vif->type = type; 246 + vif->p2p = p2p; 247 + 248 + return rtw_ops_add_interface(hw, vif); 249 + } 250 + 234 251 static void rtw_ops_configure_filter(struct ieee80211_hw *hw, 235 252 unsigned int changed_flags, 236 253 unsigned int *new_flags, ··· 389 372 390 373 if (changed & BSS_CHANGED_BEACON) 391 374 rtw_fw_download_rsvd_page(rtwdev); 375 + 376 + if (changed & BSS_CHANGED_BEACON_ENABLED) { 377 + if (conf->enable_beacon) 378 + rtw_write32_set(rtwdev, REG_FWHW_TXQ_CTRL, 379 + BIT_EN_BCNQ_DL); 380 + else 381 + rtw_write32_clr(rtwdev, REG_FWHW_TXQ_CTRL, 382 + BIT_EN_BCNQ_DL); 383 + } 392 384 393 385 if (changed & BSS_CHANGED_MU_GROUPS) 394 386 rtw_chip_set_gid_table(rtwdev, vif, conf); ··· 853 827 .config = rtw_ops_config, 854 828 .add_interface = rtw_ops_add_interface, 855 829 .remove_interface = rtw_ops_remove_interface, 830 + .change_interface = rtw_ops_change_interface, 856 831 .configure_filter = rtw_ops_configure_filter, 857 832 .bss_info_changed = rtw_ops_bss_info_changed, 858 833 .conf_tx = rtw_ops_conf_tx,
+6 -5
drivers/net/wireless/realtek/rtw88/main.c
··· 722 722 stbc_en = VHT_STBC_EN; 723 723 if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC) 724 724 ldpc_en = VHT_LDPC_EN; 725 - if (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80) 726 - is_support_sgi = true; 727 725 } else if (sta->ht_cap.ht_supported) { 728 726 ra_mask |= (sta->ht_cap.mcs.rx_mask[1] << 20) | 729 727 (sta->ht_cap.mcs.rx_mask[0] << 12); ··· 729 731 stbc_en = HT_STBC_EN; 730 732 if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING) 731 733 ldpc_en = HT_LDPC_EN; 732 - if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20 || 733 - sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 734 - is_support_sgi = true; 735 734 } 736 735 737 736 if (efuse->hw_cap.nss == 1) ··· 770 775 switch (sta->bandwidth) { 771 776 case IEEE80211_STA_RX_BW_80: 772 777 bw_mode = RTW_CHANNEL_WIDTH_80; 778 + is_support_sgi = sta->vht_cap.vht_supported && 779 + (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80); 773 780 break; 774 781 case IEEE80211_STA_RX_BW_40: 775 782 bw_mode = RTW_CHANNEL_WIDTH_40; 783 + is_support_sgi = sta->ht_cap.ht_supported && 784 + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40); 776 785 break; 777 786 default: 778 787 bw_mode = RTW_CHANNEL_WIDTH_20; 788 + is_support_sgi = sta->ht_cap.ht_supported && 789 + (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20); 779 790 break; 780 791 } 781 792
+2
drivers/net/wireless/realtek/rtw88/main.h
··· 592 592 bool dis_qselseq; 593 593 bool en_hwseq; 594 594 u8 hw_ssn_sel; 595 + bool nav_use_hdr; 596 + bool bt_null; 595 597 }; 596 598 597 599 struct rtw_rx_pkt_stat {
+1
drivers/net/wireless/realtek/rtw88/reg.h
··· 61 61 #define BIT_FSPI_EN BIT(19) 62 62 #define BIT_EN_SIC BIT(12) 63 63 #define BIT_BT_AOD_GPIO3 BIT(9) 64 + #define BIT_PO_BT_PTA_PINS BIT(9) 64 65 #define BIT_BT_PTA_EN BIT(5) 65 66 #define BIT_WLRFE_4_5_EN BIT(2) 66 67
+404 -1
drivers/net/wireless/realtek/rtw88/rtw8821c.c
··· 649 649 rtw8821c_do_iqk(rtwdev); 650 650 } 651 651 652 + /* for coex */ 653 + static void rtw8821c_coex_cfg_init(struct rtw_dev *rtwdev) 654 + { 655 + /* enable TBTT nterrupt */ 656 + rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION); 657 + 658 + /* BT report packet sample rate */ 659 + rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, SAMPLE_RATE_MASK, 660 + SAMPLE_RATE); 661 + 662 + /* enable BT counter statistics */ 663 + rtw_write8(rtwdev, REG_BT_STAT_CTRL, BT_CNT_ENABLE); 664 + 665 + /* enable PTA (3-wire function form BT side) */ 666 + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN); 667 + rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS); 668 + 669 + /* enable PTA (tx/rx signal form WiFi side) */ 670 + rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN); 671 + /* wl tx signal to PTA not case EDCCA */ 672 + rtw_write8_clr(rtwdev, REG_QUEUE_CTRL, BIT_PTA_EDCCA_EN); 673 + /* GNT_BT=1 while select both */ 674 + rtw_write16_set(rtwdev, REG_BT_COEX_V2, BIT_GNT_BT_POLARITY); 675 + 676 + /* beacon queue always hi-pri */ 677 + rtw_write8_mask(rtwdev, REG_BT_COEX_TABLE_H + 3, BIT_BCN_QUEUE, 678 + BCN_PRI_EN); 679 + } 680 + 681 + static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type, 682 + u8 pos_type) 683 + { 684 + struct rtw_coex *coex = &rtwdev->coex; 685 + struct rtw_coex_dm *coex_dm = &coex->dm; 686 + struct rtw_coex_rfe *coex_rfe = &coex->rfe; 687 + u32 switch_status = FIELD_PREP(CTRL_TYPE_MASK, ctrl_type) | pos_type; 688 + bool polarity_inverse; 689 + u8 regval = 0; 690 + 691 + if (switch_status == coex_dm->cur_switch_status) 692 + return; 693 + 694 + coex_dm->cur_switch_status = switch_status; 695 + 696 + if (coex_rfe->ant_switch_diversity && 697 + ctrl_type == COEX_SWITCH_CTRL_BY_BBSW) 698 + ctrl_type = COEX_SWITCH_CTRL_BY_ANTDIV; 699 + 700 + polarity_inverse = (coex_rfe->ant_switch_polarity == 1); 701 + 702 + switch (ctrl_type) { 703 + default: 704 + case COEX_SWITCH_CTRL_BY_BBSW: 705 + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); 706 + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); 707 + /* BB SW, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */ 708 + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 709 + DPDT_CTRL_PIN); 710 + 711 + if (pos_type == COEX_SWITCH_TO_WLG_BT) { 712 + if (coex_rfe->rfe_module_type != 0x4 && 713 + coex_rfe->rfe_module_type != 0x2) 714 + regval = 0x3; 715 + else 716 + regval = (!polarity_inverse ? 0x2 : 0x1); 717 + } else if (pos_type == COEX_SWITCH_TO_WLG) { 718 + regval = (!polarity_inverse ? 0x2 : 0x1); 719 + } else { 720 + regval = (!polarity_inverse ? 0x1 : 0x2); 721 + } 722 + 723 + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15, 724 + regval); 725 + break; 726 + case COEX_SWITCH_CTRL_BY_PTA: 727 + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); 728 + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); 729 + /* PTA, DPDT use RFE_ctrl8 and RFE_ctrl9 as ctrl pin */ 730 + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 731 + PTA_CTRL_PIN); 732 + 733 + regval = (!polarity_inverse ? 0x2 : 0x1); 734 + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_R_RFE_SEL_15, 735 + regval); 736 + break; 737 + case COEX_SWITCH_CTRL_BY_ANTDIV: 738 + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); 739 + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); 740 + rtw_write8_mask(rtwdev, REG_RFE_CTRL8, BIT_MASK_RFE_SEL89, 741 + ANTDIC_CTRL_PIN); 742 + break; 743 + case COEX_SWITCH_CTRL_BY_MAC: 744 + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); 745 + 746 + regval = (!polarity_inverse ? 0x0 : 0x1); 747 + rtw_write8_mask(rtwdev, REG_PAD_CTRL1, BIT_SW_DPDT_SEL_DATA, 748 + regval); 749 + break; 750 + case COEX_SWITCH_CTRL_BY_FW: 751 + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); 752 + rtw_write32_set(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); 753 + break; 754 + case COEX_SWITCH_CTRL_BY_BT: 755 + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_SEL_EN); 756 + rtw_write32_clr(rtwdev, REG_LED_CFG, BIT_DPDT_WL_SEL); 757 + break; 758 + } 759 + 760 + if (ctrl_type == COEX_SWITCH_CTRL_BY_BT) { 761 + rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1); 762 + rtw_write32_clr(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2); 763 + } else { 764 + rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE1); 765 + rtw_write32_set(rtwdev, REG_CTRL_TYPE, BIT_CTRL_TYPE2); 766 + } 767 + } 768 + 769 + static void rtw8821c_coex_cfg_gnt_fix(struct rtw_dev *rtwdev) 770 + {} 771 + 772 + static void rtw8821c_coex_cfg_gnt_debug(struct rtw_dev *rtwdev) 773 + { 774 + rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_BTGP_SPI_EN); 775 + rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_BTGP_JTAG_EN); 776 + rtw_write32_clr(rtwdev, REG_GPIO_MUXCFG, BIT_FSPI_EN); 777 + rtw_write32_clr(rtwdev, REG_PAD_CTRL1, BIT_LED1DIS); 778 + rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_SDIO_INT); 779 + rtw_write32_clr(rtwdev, REG_SYS_SDIO_CTRL, BIT_DBG_GNT_WL_BT); 780 + } 781 + 782 + static void rtw8821c_coex_cfg_rfe_type(struct rtw_dev *rtwdev) 783 + { 784 + struct rtw_coex *coex = &rtwdev->coex; 785 + struct rtw_coex_rfe *coex_rfe = &coex->rfe; 786 + struct rtw_efuse *efuse = &rtwdev->efuse; 787 + 788 + coex_rfe->rfe_module_type = efuse->rfe_option; 789 + coex_rfe->ant_switch_polarity = 0; 790 + coex_rfe->ant_switch_exist = true; 791 + coex_rfe->wlg_at_btg = false; 792 + 793 + switch (coex_rfe->rfe_module_type) { 794 + case 0: 795 + case 8: 796 + case 1: 797 + case 9: /* 1-Ant, Main, WLG */ 798 + default: /* 2-Ant, DPDT, WLG */ 799 + break; 800 + case 2: 801 + case 10: /* 1-Ant, Main, BTG */ 802 + case 7: 803 + case 15: /* 2-Ant, DPDT, BTG */ 804 + coex_rfe->wlg_at_btg = true; 805 + break; 806 + case 3: 807 + case 11: /* 1-Ant, Aux, WLG */ 808 + coex_rfe->ant_switch_polarity = 1; 809 + break; 810 + case 4: 811 + case 12: /* 1-Ant, Aux, BTG */ 812 + coex_rfe->wlg_at_btg = true; 813 + coex_rfe->ant_switch_polarity = 1; 814 + break; 815 + case 5: 816 + case 13: /* 2-Ant, no switch, WLG */ 817 + case 6: 818 + case 14: /* 2-Ant, no antenna switch, WLG */ 819 + coex_rfe->ant_switch_exist = false; 820 + break; 821 + } 822 + } 823 + 824 + static void rtw8821c_coex_cfg_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr) 825 + { 826 + struct rtw_coex *coex = &rtwdev->coex; 827 + struct rtw_coex_dm *coex_dm = &coex->dm; 828 + struct rtw_efuse *efuse = &rtwdev->efuse; 829 + bool share_ant = efuse->share_ant; 830 + 831 + if (share_ant) 832 + return; 833 + 834 + if (wl_pwr == coex_dm->cur_wl_pwr_lvl) 835 + return; 836 + 837 + coex_dm->cur_wl_pwr_lvl = wl_pwr; 838 + } 839 + 840 + static void rtw8821c_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain) 841 + {} 842 + 652 843 static void 653 844 rtw8821c_txagc_swing_offset(struct rtw_dev *rtwdev, u8 pwr_idx_offset, 654 845 s8 pwr_idx_offset_lower, ··· 1484 1293 .config_bfee = rtw8821c_bf_config_bfee, 1485 1294 .set_gid_table = rtw_bf_set_gid_table, 1486 1295 .cfg_csi_rate = rtw_bf_cfg_csi_rate, 1296 + 1297 + .coex_set_init = rtw8821c_coex_cfg_init, 1298 + .coex_set_ant_switch = rtw8821c_coex_cfg_ant_switch, 1299 + .coex_set_gnt_fix = rtw8821c_coex_cfg_gnt_fix, 1300 + .coex_set_gnt_debug = rtw8821c_coex_cfg_gnt_debug, 1301 + .coex_set_rfe_type = rtw8821c_coex_cfg_rfe_type, 1302 + .coex_set_wl_tx_power = rtw8821c_coex_cfg_wl_tx_power, 1303 + .coex_set_wl_rx_gain = rtw8821c_coex_cfg_wl_rx_gain, 1487 1304 }; 1305 + 1306 + /* rssi in percentage % (dbm = % - 100) */ 1307 + static const u8 wl_rssi_step_8821c[] = {101, 45, 101, 40}; 1308 + static const u8 bt_rssi_step_8821c[] = {101, 101, 101, 101}; 1309 + 1310 + /* Shared-Antenna Coex Table */ 1311 + static const struct coex_table_para table_sant_8821c[] = { 1312 + {0x55555555, 0x55555555}, /* case-0 */ 1313 + {0x55555555, 0x55555555}, 1314 + {0x66555555, 0x66555555}, 1315 + {0xaaaaaaaa, 0xaaaaaaaa}, 1316 + {0x5a5a5a5a, 0x5a5a5a5a}, 1317 + {0xfafafafa, 0xfafafafa}, /* case-5 */ 1318 + {0x6a5a5555, 0xaaaaaaaa}, 1319 + {0x6a5a56aa, 0x6a5a56aa}, 1320 + {0x6a5a5a5a, 0x6a5a5a5a}, 1321 + {0x66555555, 0x5a5a5a5a}, 1322 + {0x66555555, 0x6a5a5a5a}, /* case-10 */ 1323 + {0x66555555, 0xaaaaaaaa}, 1324 + {0x66555555, 0x6a5a5aaa}, 1325 + {0x66555555, 0x6aaa6aaa}, 1326 + {0x66555555, 0x6a5a5aaa}, 1327 + {0x66555555, 0xaaaaaaaa}, /* case-15 */ 1328 + {0xffff55ff, 0xfafafafa}, 1329 + {0xffff55ff, 0x6afa5afa}, 1330 + {0xaaffffaa, 0xfafafafa}, 1331 + {0xaa5555aa, 0x5a5a5a5a}, 1332 + {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */ 1333 + {0xaa5555aa, 0xaaaaaaaa}, 1334 + {0xffffffff, 0x55555555}, 1335 + {0xffffffff, 0x5a5a5a5a}, 1336 + {0xffffffff, 0x5a5a5a5a}, 1337 + {0xffffffff, 0x5a5a5aaa}, /* case-25 */ 1338 + {0x55555555, 0x5a5a5a5a}, 1339 + {0x55555555, 0xaaaaaaaa}, 1340 + {0x66555555, 0x6a5a6a5a}, 1341 + {0x66556655, 0x66556655}, 1342 + {0x66556aaa, 0x6a5a6aaa}, /* case-30 */ 1343 + {0xffffffff, 0x5aaa5aaa}, 1344 + {0x56555555, 0x5a5a5aaa} 1345 + }; 1346 + 1347 + /* Non-Shared-Antenna Coex Table */ 1348 + static const struct coex_table_para table_nsant_8821c[] = { 1349 + {0xffffffff, 0xffffffff}, /* case-100 */ 1350 + {0xffff55ff, 0xfafafafa}, 1351 + {0x66555555, 0x66555555}, 1352 + {0xaaaaaaaa, 0xaaaaaaaa}, 1353 + {0x5a5a5a5a, 0x5a5a5a5a}, 1354 + {0xffffffff, 0xffffffff}, /* case-105 */ 1355 + {0x5afa5afa, 0x5afa5afa}, 1356 + {0x55555555, 0xfafafafa}, 1357 + {0x66555555, 0xfafafafa}, 1358 + {0x66555555, 0x5a5a5a5a}, 1359 + {0x66555555, 0x6a5a5a5a}, /* case-110 */ 1360 + {0x66555555, 0xaaaaaaaa}, 1361 + {0xffff55ff, 0xfafafafa}, 1362 + {0xffff55ff, 0x5afa5afa}, 1363 + {0xffff55ff, 0xaaaaaaaa}, 1364 + {0xffff55ff, 0xffff55ff}, /* case-115 */ 1365 + {0xaaffffaa, 0x5afa5afa}, 1366 + {0xaaffffaa, 0xaaaaaaaa}, 1367 + {0xffffffff, 0xfafafafa}, 1368 + {0xffff55ff, 0xfafafafa}, 1369 + {0xffffffff, 0xaaaaaaaa}, /* case-120 */ 1370 + {0xffff55ff, 0x5afa5afa}, 1371 + {0xffff55ff, 0x5afa5afa}, 1372 + {0x55ff55ff, 0x55ff55ff} 1373 + }; 1374 + 1375 + /* Shared-Antenna TDMA */ 1376 + static const struct coex_tdma_para tdma_sant_8821c[] = { 1377 + { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */ 1378 + { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */ 1379 + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, 1380 + { {0x61, 0x35, 0x03, 0x11, 0x11} }, 1381 + { {0x61, 0x20, 0x03, 0x11, 0x11} }, 1382 + { {0x61, 0x3a, 0x03, 0x11, 0x11} }, /* case-5 */ 1383 + { {0x61, 0x45, 0x03, 0x11, 0x10} }, 1384 + { {0x61, 0x35, 0x03, 0x11, 0x10} }, 1385 + { {0x61, 0x30, 0x03, 0x11, 0x10} }, 1386 + { {0x61, 0x20, 0x03, 0x11, 0x10} }, 1387 + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */ 1388 + { {0x61, 0x08, 0x03, 0x11, 0x15} }, 1389 + { {0x61, 0x08, 0x03, 0x10, 0x14} }, 1390 + { {0x51, 0x08, 0x03, 0x10, 0x54} }, 1391 + { {0x51, 0x08, 0x03, 0x10, 0x55} }, 1392 + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */ 1393 + { {0x51, 0x45, 0x03, 0x10, 0x50} }, 1394 + { {0x51, 0x3a, 0x03, 0x11, 0x50} }, 1395 + { {0x51, 0x30, 0x03, 0x10, 0x50} }, 1396 + { {0x51, 0x21, 0x03, 0x10, 0x50} }, 1397 + { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */ 1398 + { {0x51, 0x4a, 0x03, 0x10, 0x50} }, 1399 + { {0x51, 0x08, 0x03, 0x30, 0x54} }, 1400 + { {0x55, 0x08, 0x03, 0x10, 0x54} }, 1401 + { {0x65, 0x10, 0x03, 0x11, 0x10} }, 1402 + { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */ 1403 + { {0x51, 0x21, 0x03, 0x10, 0x50} }, 1404 + { {0x61, 0x08, 0x03, 0x11, 0x11} } 1405 + }; 1406 + 1407 + /* Non-Shared-Antenna TDMA */ 1408 + static const struct coex_tdma_para tdma_nsant_8821c[] = { 1409 + { {0x00, 0x00, 0x00, 0x40, 0x00} }, /* case-100 */ 1410 + { {0x61, 0x45, 0x03, 0x11, 0x11} }, 1411 + { {0x61, 0x25, 0x03, 0x11, 0x11} }, 1412 + { {0x61, 0x35, 0x03, 0x11, 0x11} }, 1413 + { {0x61, 0x20, 0x03, 0x11, 0x11} }, 1414 + { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-105 */ 1415 + { {0x61, 0x45, 0x03, 0x11, 0x10} }, 1416 + { {0x61, 0x30, 0x03, 0x11, 0x10} }, 1417 + { {0x61, 0x30, 0x03, 0x11, 0x10} }, 1418 + { {0x61, 0x20, 0x03, 0x11, 0x10} }, 1419 + { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-110 */ 1420 + { {0x61, 0x10, 0x03, 0x11, 0x11} }, 1421 + { {0x61, 0x08, 0x03, 0x10, 0x14} }, 1422 + { {0x51, 0x08, 0x03, 0x10, 0x54} }, 1423 + { {0x51, 0x08, 0x03, 0x10, 0x55} }, 1424 + { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-115 */ 1425 + { {0x51, 0x45, 0x03, 0x10, 0x50} }, 1426 + { {0x51, 0x3a, 0x03, 0x10, 0x50} }, 1427 + { {0x51, 0x30, 0x03, 0x10, 0x50} }, 1428 + { {0x51, 0x21, 0x03, 0x10, 0x50} }, 1429 + { {0x51, 0x21, 0x03, 0x10, 0x50} }, /* case-120 */ 1430 + { {0x51, 0x10, 0x03, 0x10, 0x50} } 1431 + }; 1432 + 1433 + static const struct coex_5g_afh_map afh_5g_8821c[] = { {0, 0, 0} }; 1434 + 1435 + /* wl_tx_dec_power, bt_tx_dec_power, wl_rx_gain, bt_rx_lna_constrain */ 1436 + static const struct coex_rf_para rf_para_tx_8821c[] = { 1437 + {0, 0, false, 7}, /* for normal */ 1438 + {0, 20, false, 7}, /* for WL-CPT */ 1439 + {8, 17, true, 4}, 1440 + {7, 18, true, 4}, 1441 + {6, 19, true, 4}, 1442 + {5, 20, true, 4} 1443 + }; 1444 + 1445 + static const struct coex_rf_para rf_para_rx_8821c[] = { 1446 + {0, 0, false, 7}, /* for normal */ 1447 + {0, 20, false, 7}, /* for WL-CPT */ 1448 + {3, 24, true, 5}, 1449 + {2, 26, true, 5}, 1450 + {1, 27, true, 5}, 1451 + {0, 28, true, 5} 1452 + }; 1453 + 1454 + static_assert(ARRAY_SIZE(rf_para_tx_8821c) == ARRAY_SIZE(rf_para_rx_8821c)); 1488 1455 1489 1456 static const u8 rtw8821c_pwrtrk_5gb_n[][RTW_PWR_TRK_TBL_SZ] = { 1490 1457 {0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, ··· 1720 1371 5, 6, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 9, 9 1721 1372 }; 1722 1373 1723 - const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { 1374 + static const struct rtw_pwr_track_tbl rtw8821c_rtw_pwr_track_tbl = { 1724 1375 .pwrtrk_5gb_n[0] = rtw8821c_pwrtrk_5gb_n[0], 1725 1376 .pwrtrk_5gb_n[1] = rtw8821c_pwrtrk_5gb_n[1], 1726 1377 .pwrtrk_5gb_n[2] = rtw8821c_pwrtrk_5gb_n[2], ··· 1741 1392 .pwrtrk_2g_cckb_p = rtw8821c_pwrtrk_2g_cck_b_p, 1742 1393 .pwrtrk_2g_ccka_n = rtw8821c_pwrtrk_2g_cck_a_n, 1743 1394 .pwrtrk_2g_ccka_p = rtw8821c_pwrtrk_2g_cck_a_p, 1395 + }; 1396 + 1397 + static const struct rtw_reg_domain coex_info_hw_regs_8821c[] = { 1398 + {0xCB0, MASKDWORD, RTW_REG_DOMAIN_MAC32}, 1399 + {0xCB4, MASKDWORD, RTW_REG_DOMAIN_MAC32}, 1400 + {0xCBA, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, 1401 + {0, 0, RTW_REG_DOMAIN_NL}, 1402 + {0x430, MASKDWORD, RTW_REG_DOMAIN_MAC32}, 1403 + {0x434, MASKDWORD, RTW_REG_DOMAIN_MAC32}, 1404 + {0x42a, MASKLWORD, RTW_REG_DOMAIN_MAC16}, 1405 + {0x426, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, 1406 + {0x45e, BIT(3), RTW_REG_DOMAIN_MAC8}, 1407 + {0x454, MASKLWORD, RTW_REG_DOMAIN_MAC16}, 1408 + {0, 0, RTW_REG_DOMAIN_NL}, 1409 + {0x4c, BIT(24) | BIT(23), RTW_REG_DOMAIN_MAC32}, 1410 + {0x64, BIT(0), RTW_REG_DOMAIN_MAC8}, 1411 + {0x4c6, BIT(4), RTW_REG_DOMAIN_MAC8}, 1412 + {0x40, BIT(5), RTW_REG_DOMAIN_MAC8}, 1413 + {0x1, RFREG_MASK, RTW_REG_DOMAIN_RF_A}, 1414 + {0, 0, RTW_REG_DOMAIN_NL}, 1415 + {0x550, MASKDWORD, RTW_REG_DOMAIN_MAC32}, 1416 + {0x522, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, 1417 + {0x953, BIT(1), RTW_REG_DOMAIN_MAC8}, 1418 + {0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, 1419 + {0x60A, MASKBYTE0, RTW_REG_DOMAIN_MAC8}, 1744 1420 }; 1745 1421 1746 1422 struct rtw_chip_info rtw8821c_hw_spec = { ··· 1814 1440 .iqk_threshold = 8, 1815 1441 .bfer_su_max_num = 2, 1816 1442 .bfer_mu_max_num = 1, 1443 + 1444 + .coex_para_ver = 0x19092746, 1445 + .bt_desired_ver = 0x46, 1446 + .scbd_support = true, 1447 + .new_scbd10_def = false, 1448 + .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF, 1449 + .bt_rssi_type = COEX_BTRSSI_RATIO, 1450 + .ant_isolation = 15, 1451 + .rssi_tolerance = 2, 1452 + .wl_rssi_step = wl_rssi_step_8821c, 1453 + .bt_rssi_step = bt_rssi_step_8821c, 1454 + .table_sant_num = ARRAY_SIZE(table_sant_8821c), 1455 + .table_sant = table_sant_8821c, 1456 + .table_nsant_num = ARRAY_SIZE(table_nsant_8821c), 1457 + .table_nsant = table_nsant_8821c, 1458 + .tdma_sant_num = ARRAY_SIZE(tdma_sant_8821c), 1459 + .tdma_sant = tdma_sant_8821c, 1460 + .tdma_nsant_num = ARRAY_SIZE(tdma_nsant_8821c), 1461 + .tdma_nsant = tdma_nsant_8821c, 1462 + .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8821c), 1463 + .wl_rf_para_tx = rf_para_tx_8821c, 1464 + .wl_rf_para_rx = rf_para_rx_8821c, 1465 + .bt_afh_span_bw20 = 0x24, 1466 + .bt_afh_span_bw40 = 0x36, 1467 + .afh_5g_num = ARRAY_SIZE(afh_5g_8821c), 1468 + .afh_5g = afh_5g_8821c, 1469 + 1470 + .coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8821c), 1471 + .coex_info_hw_regs = coex_info_hw_regs_8821c, 1817 1472 }; 1818 1473 EXPORT_SYMBOL(rtw8821c_hw_spec); 1819 1474
+26
drivers/net/wireless/realtek/rtw88/rtw8821c.h
··· 160 160 le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(11, 8)) 161 161 #define GET_PHY_STAT_P1_HT_RXSC(phy_stat) \ 162 162 le32_get_bits(*((__le32 *)(phy_stat) + 0x01), GENMASK(15, 12)) 163 + #define GET_PHY_STAT_P1_RXEVM_A(phy_stat) \ 164 + le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(7, 0)) 165 + #define GET_PHY_STAT_P1_RXEVM_B(phy_stat) \ 166 + le32_get_bits(*((__le32 *)(phy_stat) + 0x04), GENMASK(15, 8)) 167 + #define GET_PHY_STAT_P1_CFO_TAIL_A(phy_stat) \ 168 + le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(7, 0)) 169 + #define GET_PHY_STAT_P1_CFO_TAIL_B(phy_stat) \ 170 + le32_get_bits(*((__le32 *)(phy_stat) + 0x05), GENMASK(15, 8)) 171 + #define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \ 172 + le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0)) 173 + #define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \ 174 + le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8)) 163 175 164 176 #define REG_INIRTS_RATE_SEL 0x0480 165 177 #define REG_HTSTFWT 0x800 ··· 229 217 #define REG_CCA_CCK 0xfcc 230 218 #define REG_ANTWT 0x1904 231 219 #define REG_IQKFAILMSK 0x1bf0 220 + #define BIT_MASK_R_RFE_SEL_15 GENMASK(31, 28) 221 + #define BIT_SDIO_INT BIT(18) 222 + #define SAMPLE_RATE_MASK GENMASK(5, 0) 223 + #define SAMPLE_RATE 0x5 224 + #define BT_CNT_ENABLE 0x1 225 + #define BIT_BCN_QUEUE BIT(3) 226 + #define BCN_PRI_EN 0x1 227 + #define PTA_CTRL_PIN 0x66 228 + #define DPDT_CTRL_PIN 0x77 229 + #define ANTDIC_CTRL_PIN 0x88 230 + #define REG_CTRL_TYPE 0x67 231 + #define BIT_CTRL_TYPE1 BIT(5) 232 + #define BIT_CTRL_TYPE2 BIT(4) 233 + #define CTRL_TYPE_MASK GENMASK(15, 8) 232 234 233 235 #define RF18_BAND_MASK (BIT(16) | BIT(9) | BIT(8)) 234 236 #define RF18_BAND_2G (0)
+73 -31
drivers/net/wireless/realtek/rtw88/tx.c
··· 61 61 SET_TX_DESC_DISQSELSEQ(txdesc, pkt_info->dis_qselseq); 62 62 SET_TX_DESC_EN_HWSEQ(txdesc, pkt_info->en_hwseq); 63 63 SET_TX_DESC_HW_SSN_SEL(txdesc, pkt_info->hw_ssn_sel); 64 + SET_TX_DESC_NAVUSEHDR(txdesc, pkt_info->nav_use_hdr); 65 + SET_TX_DESC_BT_NULL(txdesc, pkt_info->bt_null); 64 66 } 65 67 EXPORT_SYMBOL(rtw_tx_fill_tx_desc); 66 68 ··· 229 227 spin_unlock_irqrestore(&tx_report->q_lock, flags); 230 228 } 231 229 230 + static void rtw_tx_pkt_info_update_rate(struct rtw_dev *rtwdev, 231 + struct rtw_tx_pkt_info *pkt_info, 232 + struct sk_buff *skb) 233 + { 234 + if (rtwdev->hal.current_band_type == RTW_BAND_2G) { 235 + pkt_info->rate_id = RTW_RATEID_B_20M; 236 + pkt_info->rate = DESC_RATE1M; 237 + } else { 238 + pkt_info->rate_id = RTW_RATEID_G; 239 + pkt_info->rate = DESC_RATE6M; 240 + } 241 + pkt_info->use_rate = true; 242 + pkt_info->dis_rate_fallback = true; 243 + } 244 + 245 + static void rtw_tx_pkt_info_update_sec(struct rtw_dev *rtwdev, 246 + struct rtw_tx_pkt_info *pkt_info, 247 + struct sk_buff *skb) 248 + { 249 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 250 + u8 sec_type = 0; 251 + 252 + if (info && info->control.hw_key) { 253 + struct ieee80211_key_conf *key = info->control.hw_key; 254 + 255 + switch (key->cipher) { 256 + case WLAN_CIPHER_SUITE_WEP40: 257 + case WLAN_CIPHER_SUITE_WEP104: 258 + case WLAN_CIPHER_SUITE_TKIP: 259 + sec_type = 0x01; 260 + break; 261 + case WLAN_CIPHER_SUITE_CCMP: 262 + sec_type = 0x03; 263 + break; 264 + default: 265 + break; 266 + } 267 + } 268 + 269 + pkt_info->sec_type = sec_type; 270 + } 271 + 232 272 static void rtw_tx_mgmt_pkt_info_update(struct rtw_dev *rtwdev, 233 273 struct rtw_tx_pkt_info *pkt_info, 234 274 struct ieee80211_sta *sta, 235 275 struct sk_buff *skb) 236 276 { 237 - pkt_info->use_rate = true; 238 - pkt_info->rate_id = 6; 239 - pkt_info->dis_rate_fallback = true; 277 + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); 240 278 pkt_info->dis_qselseq = true; 241 279 pkt_info->en_hwseq = true; 242 280 pkt_info->hw_ssn_sel = 0; 281 + /* TODO: need to change hw port and hw ssn sel for multiple vifs */ 243 282 } 244 283 245 284 static void rtw_tx_data_pkt_info_update(struct rtw_dev *rtwdev, ··· 355 312 struct rtw_sta_info *si; 356 313 struct ieee80211_vif *vif = NULL; 357 314 __le16 fc = hdr->frame_control; 358 - u8 sec_type = 0; 359 315 bool bmc; 360 316 361 317 if (sta) { ··· 367 325 else if (ieee80211_is_data(fc)) 368 326 rtw_tx_data_pkt_info_update(rtwdev, pkt_info, sta, skb); 369 327 370 - if (info->control.hw_key) { 371 - struct ieee80211_key_conf *key = info->control.hw_key; 372 - 373 - switch (key->cipher) { 374 - case WLAN_CIPHER_SUITE_WEP40: 375 - case WLAN_CIPHER_SUITE_WEP104: 376 - case WLAN_CIPHER_SUITE_TKIP: 377 - sec_type = 0x01; 378 - break; 379 - case WLAN_CIPHER_SUITE_CCMP: 380 - sec_type = 0x03; 381 - break; 382 - default: 383 - break; 384 - } 385 - } 386 - 387 328 bmc = is_broadcast_ether_addr(hdr->addr1) || 388 329 is_multicast_ether_addr(hdr->addr1); 389 330 ··· 374 349 rtw_tx_report_enable(rtwdev, pkt_info); 375 350 376 351 pkt_info->bmc = bmc; 377 - pkt_info->sec_type = sec_type; 352 + rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); 378 353 pkt_info->tx_pkt_size = skb->len; 379 354 pkt_info->offset = chip->tx_pkt_desc_sz; 380 355 pkt_info->qsel = skb->priority; ··· 384 359 rtw_tx_stats(rtwdev, vif, skb); 385 360 } 386 361 387 - void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, 388 - struct rtw_tx_pkt_info *pkt_info, 389 - struct sk_buff *skb) 362 + void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, 363 + struct rtw_tx_pkt_info *pkt_info, 364 + struct sk_buff *skb, 365 + enum rtw_rsvd_packet_type type) 390 366 { 391 367 struct rtw_chip_info *chip = rtwdev->chip; 392 368 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 393 369 bool bmc; 394 370 371 + /* A beacon or dummy reserved page packet indicates that it is the first 372 + * reserved page, and the qsel of it will be set in each hci. 373 + */ 374 + if (type != RSVD_BEACON && type != RSVD_DUMMY) 375 + pkt_info->qsel = TX_DESC_QSEL_MGMT; 376 + 377 + rtw_tx_pkt_info_update_rate(rtwdev, pkt_info, skb); 378 + 395 379 bmc = is_broadcast_ether_addr(hdr->addr1) || 396 380 is_multicast_ether_addr(hdr->addr1); 397 - pkt_info->use_rate = true; 398 - pkt_info->rate_id = 6; 399 - pkt_info->dis_rate_fallback = true; 400 381 pkt_info->bmc = bmc; 401 382 pkt_info->tx_pkt_size = skb->len; 402 383 pkt_info->offset = chip->tx_pkt_desc_sz; 403 - pkt_info->qsel = TX_DESC_QSEL_MGMT; 404 384 pkt_info->ls = true; 385 + if (type == RSVD_PS_POLL) { 386 + pkt_info->nav_use_hdr = true; 387 + } else { 388 + pkt_info->dis_qselseq = true; 389 + pkt_info->en_hwseq = true; 390 + pkt_info->hw_ssn_sel = 0; 391 + } 392 + if (type == RSVD_QOS_NULL) 393 + pkt_info->bt_null = true; 394 + 395 + rtw_tx_pkt_info_update_sec(rtwdev, pkt_info, skb); 396 + 397 + /* TODO: need to change hw port and hw ssn sel for multiple vifs */ 405 398 } 406 399 407 400 struct sk_buff * ··· 442 399 443 400 skb_reserve(skb, tx_pkt_desc_sz); 444 401 skb_put_data(skb, buf, size); 445 - pkt_info->tx_pkt_size = size; 446 - pkt_info->offset = tx_pkt_desc_sz; 402 + rtw_tx_rsvd_page_pkt_info_update(rtwdev, pkt_info, skb, RSVD_BEACON); 447 403 448 404 return skb; 449 405 }
+10 -3
drivers/net/wireless/realtek/rtw88/tx.h
··· 59 59 le32p_replace_bits((__le32 *)(txdesc) + 0x08, value, BIT(15)) 60 60 #define SET_TX_DESC_HW_SSN_SEL(txdesc, value) \ 61 61 le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, GENMASK(7, 6)) 62 + #define SET_TX_DESC_NAVUSEHDR(txdesc, value) \ 63 + le32p_replace_bits((__le32 *)(txdesc) + 0x03, value, BIT(15)) 64 + #define SET_TX_DESC_BT_NULL(txdesc, value) \ 65 + le32p_replace_bits((__le32 *)(txdesc) + 0x02, value, BIT(23)) 62 66 63 67 enum rtw_tx_desc_queue_select { 64 68 TX_DESC_QSEL_TID0 = 0, ··· 87 83 TX_DESC_QSEL_H2C = 19, 88 84 }; 89 85 86 + enum rtw_rsvd_packet_type; 87 + 90 88 void rtw_tx(struct rtw_dev *rtwdev, 91 89 struct ieee80211_tx_control *control, 92 90 struct sk_buff *skb); ··· 102 96 void rtw_tx_fill_tx_desc(struct rtw_tx_pkt_info *pkt_info, struct sk_buff *skb); 103 97 void rtw_tx_report_enqueue(struct rtw_dev *rtwdev, struct sk_buff *skb, u8 sn); 104 98 void rtw_tx_report_handle(struct rtw_dev *rtwdev, struct sk_buff *skb, int src); 105 - void rtw_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, 106 - struct rtw_tx_pkt_info *pkt_info, 107 - struct sk_buff *skb); 99 + void rtw_tx_rsvd_page_pkt_info_update(struct rtw_dev *rtwdev, 100 + struct rtw_tx_pkt_info *pkt_info, 101 + struct sk_buff *skb, 102 + enum rtw_rsvd_packet_type type); 108 103 struct sk_buff * 109 104 rtw_tx_write_data_rsvd_page_get(struct rtw_dev *rtwdev, 110 105 struct rtw_tx_pkt_info *pkt_info,
+1 -1
drivers/net/wireless/ti/wl1251/event.c
··· 70 70 break; 71 71 } 72 72 73 - return 0; 73 + return ret; 74 74 } 75 75 76 76 static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
+3
include/linux/mmc/sdio_ids.h
··· 105 105 #define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 106 106 #define SDIO_DEVICE_ID_MEDIATEK_MT7668 0x7668 107 107 108 + #define SDIO_VENDOR_ID_MICROCHIP_WILC 0x0296 109 + #define SDIO_DEVICE_ID_MICROCHIP_WILC1000 0x5347 110 + 108 111 #define SDIO_VENDOR_ID_SIANO 0x039a 109 112 #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 110 113 #define SDIO_DEVICE_ID_SIANO_NICE 0x0202