Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-2021-04-23' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for v5.13

Third, and final, set of patches for v5.13. We got one more week
before the merge window and this includes from that extra week.
Smaller features to rtw88 and mt76, but mostly this contains fixes.

rtw88

* 8822c: Add gap-k calibration to improve long range performance

mt76

* parse rate power limits from DT

* debugfs file to test firmware crash

* debugfs to disable NAPI threaded mode
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+3433 -1162
+107
Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
··· 72 72 led-sources: 73 73 maxItems: 1 74 74 75 + power-limits: 76 + type: object 77 + additionalProperties: false 78 + patternProperties: 79 + "^r[0-9]+": 80 + type: object 81 + additionalProperties: false 82 + properties: 83 + regdomain: 84 + $ref: /schemas/types.yaml#/definitions/string 85 + description: 86 + Regdomain refers to a legal regulatory region. Different 87 + countries define different levels of allowable transmitter 88 + power, time that a channel can be occupied, and different 89 + available channels 90 + enum: 91 + - FCC 92 + - ETSI 93 + - JP 94 + 95 + patternProperties: 96 + "^txpower-[256]g$": 97 + type: object 98 + additionalProperties: false 99 + patternProperties: 100 + "^b[0-9]+$": 101 + type: object 102 + additionalProperties: false 103 + properties: 104 + channels: 105 + $ref: /schemas/types.yaml#/definitions/uint32-array 106 + minItems: 2 107 + maxItems: 2 108 + description: 109 + Pairs of first and last channel number of the selected 110 + band 111 + 112 + rates-cck: 113 + $ref: /schemas/types.yaml#/definitions/uint8-array 114 + minItems: 4 115 + maxItems: 4 116 + description: 117 + 4 half-dBm per-rate power limit values 118 + 119 + rates-ofdm: 120 + $ref: /schemas/types.yaml#/definitions/uint8-array 121 + minItems: 8 122 + maxItems: 8 123 + description: 124 + 8 half-dBm per-rate power limit values 125 + 126 + rates-mcs: 127 + $ref: /schemas/types.yaml#/definitions/uint8-matrix 128 + description: 129 + Sets of per-rate power limit values for 802.11n/802.11ac 130 + rates for multiple channel bandwidth settings. 131 + Each set starts with the number of channel bandwidth 132 + settings for which the rate set applies, followed by 133 + either 8 or 10 power limit values. The order of the 134 + channel bandwidth settings is 20, 40, 80 and 160 MHz. 135 + maxItems: 4 136 + items: 137 + minItems: 9 138 + maxItems: 11 139 + 140 + rates-ru: 141 + $ref: /schemas/types.yaml#/definitions/uint8-matrix 142 + description: 143 + Sets of per-rate power limit values for 802.11ax rates 144 + for multiple channel bandwidth or resource unit settings. 145 + Each set starts with the number of channel bandwidth or 146 + resource unit settings for which the rate set applies, 147 + followed by 12 power limit values. The order of the 148 + channel resource unit settings is RU26, RU52, RU106, 149 + RU242/SU20, RU484/SU40, RU996/SU80 and RU2x996/SU160. 150 + items: 151 + minItems: 13 152 + maxItems: 13 153 + 154 + txs-delta: 155 + $ref: /schemas/types.yaml#/definitions/uint32-array 156 + description: 157 + Half-dBm power delta for different numbers of antennas 158 + 75 159 required: 76 160 - compatible 77 161 - reg ··· 176 92 177 93 led { 178 94 led-sources = <2>; 95 + }; 96 + 97 + power-limits { 98 + r0 { 99 + regdomain = "FCC"; 100 + txpower-5g { 101 + b0 { 102 + channels = <36 48>; 103 + rates-ofdm = /bits/ 8 <23 23 23 23 23 23 23 23>; 104 + rates-mcs = /bits/ 8 <1 23 23 23 23 23 23 23 23 23 23>, 105 + <3 22 22 22 22 22 22 22 22 22 22>; 106 + rates-ru = /bits/ 8 <3 22 22 22 22 22 22 22 22 22 22 22 22>, 107 + <4 20 20 20 20 20 20 20 20 20 20 20 20>; 108 + }; 109 + b1 { 110 + channels = <100 181>; 111 + rates-ofdm = /bits/ 8 <14 14 14 14 14 14 14 14>; 112 + rates-mcs = /bits/ 8 <4 14 14 14 14 14 14 14 14 14 14>; 113 + txs-delta = <12 9 6>; 114 + rates-ru = /bits/ 8 <7 14 14 14 14 14 14 14 14 14 14 14 14>; 115 + }; 116 + }; 117 + }; 179 118 }; 180 119 }; 181 120 };
+1 -1
drivers/net/wireless/ath/ath10k/htc.c
··· 669 669 670 670 ath10k_dbg(ar, ATH10K_DBG_HTC, 671 671 "bundle tx status %d eid %d req count %d count %d len %d\n", 672 - ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len); 672 + ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len); 673 673 return ret; 674 674 } 675 675
+3
drivers/net/wireless/ath/ath10k/wmi-tlv.c
··· 592 592 GFP_ATOMIC 593 593 ); 594 594 break; 595 + default: 596 + kfree(tb); 597 + return; 595 598 } 596 599 597 600 exit:
+12 -3
drivers/net/wireless/ath/ath11k/mhi.c
··· 349 349 mhi_ctrl->read_reg = ath11k_mhi_op_read_reg; 350 350 mhi_ctrl->write_reg = ath11k_mhi_op_write_reg; 351 351 352 - if (ab->hw_rev == ATH11K_HW_QCA6390_HW20) 353 - ath11k_mhi_config = &ath11k_mhi_config_qca6390; 354 - else if (ab->hw_rev == ATH11K_HW_QCN9074_HW10) 352 + switch (ab->hw_rev) { 353 + case ATH11K_HW_QCN9074_HW10: 355 354 ath11k_mhi_config = &ath11k_mhi_config_qcn9074; 355 + break; 356 + case ATH11K_HW_QCA6390_HW20: 357 + ath11k_mhi_config = &ath11k_mhi_config_qca6390; 358 + break; 359 + default: 360 + ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n", 361 + ab->hw_rev); 362 + mhi_free_controller(mhi_ctrl); 363 + return -EINVAL; 364 + } 356 365 357 366 ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config); 358 367 if (ret) {
+1 -1
drivers/net/wireless/ath/ath11k/qmi.c
··· 2514 2514 2515 2515 ret = ath11k_qmi_request_target_cap(ab); 2516 2516 if (ret < 0) { 2517 - ath11k_warn(ab, "failed to requeqst qmi target capabilities: %d\n", 2517 + ath11k_warn(ab, "failed to request qmi target capabilities: %d\n", 2518 2518 ret); 2519 2519 return ret; 2520 2520 }
+1 -1
drivers/net/wireless/ath/ath9k/htc_drv_init.c
··· 246 246 if (unlikely(r)) { 247 247 ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n", 248 248 reg_offset, r); 249 - return -EIO; 249 + return -1; 250 250 } 251 251 252 252 return be32_to_cpu(val);
+1 -1
drivers/net/wireless/ath/ath9k/hw.c
··· 286 286 287 287 srev = REG_READ(ah, AR_SREV); 288 288 289 - if (srev == -EIO) { 289 + if (srev == -1) { 290 290 ath_err(ath9k_hw_common(ah), 291 291 "Failed to read SREV register"); 292 292 return false;
+1 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
··· 151 151 /* Send down the multicast list first. */ 152 152 cnt = netdev_mc_count(ndev); 153 153 buflen = sizeof(cnt) + (cnt * ETH_ALEN); 154 - buf = kmalloc(buflen, GFP_ATOMIC); 154 + buf = kmalloc(buflen, GFP_KERNEL); 155 155 if (!buf) 156 156 return; 157 157 bufp = buf;
-1
drivers/net/wireless/marvell/libertas_tf/libertas_tf.h
··· 453 453 u8 beacon[MRVL_MAX_BCN_SIZE]; 454 454 }; 455 455 456 - struct lbtf_private; 457 456 struct cmd_ctrl_node; 458 457 459 458 /** Function Prototype Declaration */
+28
drivers/net/wireless/mediatek/mt76/debugfs.c
··· 25 25 DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, 26 26 "0x%08llx\n"); 27 27 28 + static int 29 + mt76_napi_threaded_set(void *data, u64 val) 30 + { 31 + struct mt76_dev *dev = data; 32 + 33 + if (!mt76_is_mmio(dev)) 34 + return -EOPNOTSUPP; 35 + 36 + if (dev->napi_dev.threaded != val) 37 + return dev_set_threaded(&dev->napi_dev, val); 38 + 39 + return 0; 40 + } 41 + 42 + static int 43 + mt76_napi_threaded_get(void *data, u64 *val) 44 + { 45 + struct mt76_dev *dev = data; 46 + 47 + *val = dev->napi_dev.threaded; 48 + return 0; 49 + } 50 + 51 + DEFINE_DEBUGFS_ATTRIBUTE(fops_napi_threaded, mt76_napi_threaded_get, 52 + mt76_napi_threaded_set, "%llu\n"); 53 + 28 54 int mt76_queues_read(struct seq_file *s, void *data) 29 55 { 30 56 struct mt76_dev *dev = dev_get_drvdata(s->private); ··· 128 102 debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); 129 103 debugfs_create_file_unsafe("regval", 0600, dir, dev, 130 104 &fops_regval); 105 + debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev, 106 + &fops_napi_threaded); 131 107 debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom); 132 108 if (dev->otp.data) 133 109 debugfs_create_blob("otp", 0400, dir, &dev->otp);
+5 -5
drivers/net/wireless/mediatek/mt76/dma.c
··· 602 602 return done; 603 603 } 604 604 605 - static int 606 - mt76_dma_rx_poll(struct napi_struct *napi, int budget) 605 + int mt76_dma_rx_poll(struct napi_struct *napi, int budget) 607 606 { 608 607 struct mt76_dev *dev; 609 608 int qid, done = 0, cur; ··· 625 626 626 627 return done; 627 628 } 629 + EXPORT_SYMBOL_GPL(mt76_dma_rx_poll); 628 630 629 631 static int 630 - mt76_dma_init(struct mt76_dev *dev) 632 + mt76_dma_init(struct mt76_dev *dev, 633 + int (*poll)(struct napi_struct *napi, int budget)) 631 634 { 632 635 int i; 633 636 ··· 640 639 dev->napi_dev.threaded = 1; 641 640 642 641 mt76_for_each_q_rx(dev, i) { 643 - netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 644 - 64); 642 + netif_napi_add(&dev->napi_dev, &dev->napi[i], poll, 64); 645 643 mt76_dma_rx_fill(dev, &dev->q_rx[i]); 646 644 napi_enable(&dev->napi[i]); 647 645 }
+1
drivers/net/wireless/mediatek/mt76/dma.h
··· 45 45 EVT_EVENT_DFS_DETECT_RSP, 46 46 }; 47 47 48 + int mt76_dma_rx_poll(struct napi_struct *napi, int budget); 48 49 void mt76_dma_attach(struct mt76_dev *dev); 49 50 void mt76_dma_cleanup(struct mt76_dev *dev); 50 51
+225 -6
drivers/net/wireless/mediatek/mt76/eeprom.c
··· 9 9 #include <linux/etherdevice.h> 10 10 #include "mt76.h" 11 11 12 - static int 13 - mt76_get_of_eeprom(struct mt76_dev *dev, int len) 12 + int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len) 14 13 { 15 14 #if defined(CONFIG_OF) && defined(CONFIG_MTD) 16 15 struct device_node *np = dev->dev->of_node; ··· 17 18 const __be32 *list; 18 19 const char *part; 19 20 phandle phandle; 20 - int offset = 0; 21 21 int size; 22 22 size_t retlen; 23 23 int ret; ··· 52 54 } 53 55 54 56 offset = be32_to_cpup(list); 55 - ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data); 57 + ret = mtd_read(mtd, offset, len, &retlen, eep); 56 58 put_mtd_device(mtd); 57 59 if (ret) 58 60 goto out_put_node; ··· 63 65 } 64 66 65 67 if (of_property_read_bool(dev->dev->of_node, "big-endian")) { 66 - u8 *data = (u8 *)dev->eeprom.data; 68 + u8 *data = (u8 *)eep; 67 69 int i; 68 70 69 71 /* convert eeprom data in Little Endian */ ··· 84 86 return -ENOENT; 85 87 #endif 86 88 } 89 + EXPORT_SYMBOL_GPL(mt76_get_of_eeprom); 87 90 88 91 void 89 92 mt76_eeprom_override(struct mt76_phy *phy) ··· 103 104 } 104 105 EXPORT_SYMBOL_GPL(mt76_eeprom_override); 105 106 107 + static bool mt76_string_prop_find(struct property *prop, const char *str) 108 + { 109 + const char *cp = NULL; 110 + 111 + if (!prop || !str || !str[0]) 112 + return false; 113 + 114 + while ((cp = of_prop_next_string(prop, cp)) != NULL) 115 + if (!strcasecmp(cp, str)) 116 + return true; 117 + 118 + return false; 119 + } 120 + 121 + static struct device_node * 122 + mt76_find_power_limits_node(struct mt76_dev *dev) 123 + { 124 + struct device_node *np = dev->dev->of_node; 125 + const char *const region_names[] = { 126 + [NL80211_DFS_ETSI] = "etsi", 127 + [NL80211_DFS_FCC] = "fcc", 128 + [NL80211_DFS_JP] = "jp", 129 + }; 130 + struct device_node *cur, *fallback = NULL; 131 + const char *region_name = NULL; 132 + 133 + if (dev->region < ARRAY_SIZE(region_names)) 134 + region_name = region_names[dev->region]; 135 + 136 + np = of_get_child_by_name(np, "power-limits"); 137 + if (!np) 138 + return NULL; 139 + 140 + for_each_child_of_node(np, cur) { 141 + struct property *country = of_find_property(cur, "country", NULL); 142 + struct property *regd = of_find_property(cur, "regdomain", NULL); 143 + 144 + if (!country && !regd) { 145 + fallback = cur; 146 + continue; 147 + } 148 + 149 + if (mt76_string_prop_find(country, dev->alpha2) || 150 + mt76_string_prop_find(regd, region_name)) 151 + return cur; 152 + } 153 + 154 + return fallback; 155 + } 156 + 157 + static const __be32 * 158 + mt76_get_of_array(struct device_node *np, char *name, size_t *len, int min) 159 + { 160 + struct property *prop = of_find_property(np, name, NULL); 161 + 162 + if (!prop || !prop->value || prop->length < min * 4) 163 + return NULL; 164 + 165 + *len = prop->length; 166 + 167 + return prop->value; 168 + } 169 + 170 + static struct device_node * 171 + mt76_find_channel_node(struct device_node *np, struct ieee80211_channel *chan) 172 + { 173 + struct device_node *cur; 174 + const __be32 *val; 175 + size_t len; 176 + 177 + for_each_child_of_node(np, cur) { 178 + val = mt76_get_of_array(cur, "channels", &len, 2); 179 + if (!val) 180 + continue; 181 + 182 + while (len >= 2 * sizeof(*val)) { 183 + if (chan->hw_value >= be32_to_cpu(val[0]) && 184 + chan->hw_value <= be32_to_cpu(val[1])) 185 + return cur; 186 + 187 + val += 2; 188 + len -= 2 * sizeof(*val); 189 + } 190 + } 191 + 192 + return NULL; 193 + } 194 + 195 + static s8 196 + mt76_get_txs_delta(struct device_node *np, u8 nss) 197 + { 198 + const __be32 *val; 199 + size_t len; 200 + 201 + val = mt76_get_of_array(np, "txs-delta", &len, nss); 202 + if (!val) 203 + return 0; 204 + 205 + return be32_to_cpu(val[nss - 1]); 206 + } 207 + 208 + static void 209 + mt76_apply_array_limit(s8 *pwr, size_t pwr_len, const __be32 *data, 210 + s8 target_power, s8 nss_delta, s8 *max_power) 211 + { 212 + int i; 213 + 214 + if (!data) 215 + return; 216 + 217 + for (i = 0; i < pwr_len; i++) { 218 + pwr[i] = min_t(s8, target_power, 219 + be32_to_cpu(data[i]) + nss_delta); 220 + *max_power = max(*max_power, pwr[i]); 221 + } 222 + } 223 + 224 + static void 225 + mt76_apply_multi_array_limit(s8 *pwr, size_t pwr_len, s8 pwr_num, 226 + const __be32 *data, size_t len, s8 target_power, 227 + s8 nss_delta, s8 *max_power) 228 + { 229 + int i, cur; 230 + 231 + if (!data) 232 + return; 233 + 234 + len /= 4; 235 + cur = be32_to_cpu(data[0]); 236 + for (i = 0; i < pwr_num; i++) { 237 + if (len < pwr_len + 1) 238 + break; 239 + 240 + mt76_apply_array_limit(pwr + pwr_len * i, pwr_len, data + 1, 241 + target_power, nss_delta, max_power); 242 + if (--cur > 0) 243 + continue; 244 + 245 + data += pwr_len + 1; 246 + len -= pwr_len + 1; 247 + if (!len) 248 + break; 249 + 250 + cur = be32_to_cpu(data[0]); 251 + } 252 + } 253 + 254 + s8 mt76_get_rate_power_limits(struct mt76_phy *phy, 255 + struct ieee80211_channel *chan, 256 + struct mt76_power_limits *dest, 257 + s8 target_power) 258 + { 259 + struct mt76_dev *dev = phy->dev; 260 + struct device_node *np; 261 + const __be32 *val; 262 + char name[16]; 263 + u32 mcs_rates = dev->drv->mcs_rates; 264 + u32 ru_rates = ARRAY_SIZE(dest->ru[0]); 265 + char band; 266 + size_t len; 267 + s8 max_power = 0; 268 + s8 txs_delta; 269 + 270 + if (!mcs_rates) 271 + mcs_rates = 10; 272 + 273 + memset(dest, target_power, sizeof(*dest)); 274 + 275 + if (!IS_ENABLED(CONFIG_OF)) 276 + return target_power; 277 + 278 + np = mt76_find_power_limits_node(dev); 279 + if (!np) 280 + return target_power; 281 + 282 + switch (chan->band) { 283 + case NL80211_BAND_2GHZ: 284 + band = '2'; 285 + break; 286 + case NL80211_BAND_5GHZ: 287 + band = '5'; 288 + break; 289 + default: 290 + return target_power; 291 + } 292 + 293 + snprintf(name, sizeof(name), "txpower-%cg", band); 294 + np = of_get_child_by_name(np, name); 295 + if (!np) 296 + return target_power; 297 + 298 + np = mt76_find_channel_node(np, chan); 299 + if (!np) 300 + return target_power; 301 + 302 + txs_delta = mt76_get_txs_delta(np, hweight8(phy->antenna_mask)); 303 + 304 + val = mt76_get_of_array(np, "rates-cck", &len, ARRAY_SIZE(dest->cck)); 305 + mt76_apply_array_limit(dest->cck, ARRAY_SIZE(dest->cck), val, 306 + target_power, txs_delta, &max_power); 307 + 308 + val = mt76_get_of_array(np, "rates-ofdm", 309 + &len, ARRAY_SIZE(dest->ofdm)); 310 + mt76_apply_array_limit(dest->ofdm, ARRAY_SIZE(dest->ofdm), val, 311 + target_power, txs_delta, &max_power); 312 + 313 + val = mt76_get_of_array(np, "rates-mcs", &len, mcs_rates + 1); 314 + mt76_apply_multi_array_limit(dest->mcs[0], ARRAY_SIZE(dest->mcs[0]), 315 + ARRAY_SIZE(dest->mcs), val, len, 316 + target_power, txs_delta, &max_power); 317 + 318 + val = mt76_get_of_array(np, "rates-ru", &len, ru_rates + 1); 319 + mt76_apply_multi_array_limit(dest->ru[0], ARRAY_SIZE(dest->ru[0]), 320 + ARRAY_SIZE(dest->ru), val, len, 321 + target_power, txs_delta, &max_power); 322 + 323 + return max_power; 324 + } 325 + EXPORT_SYMBOL_GPL(mt76_get_rate_power_limits); 326 + 106 327 int 107 328 mt76_eeprom_init(struct mt76_dev *dev, int len) 108 329 { ··· 331 112 if (!dev->eeprom.data) 332 113 return -ENOMEM; 333 114 334 - return !mt76_get_of_eeprom(dev, len); 115 + return !mt76_get_of_eeprom(dev, dev->eeprom.data, 0, len); 335 116 } 336 117 EXPORT_SYMBOL_GPL(mt76_eeprom_init);
+3
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 428 428 mutex_init(&dev->mcu.mutex); 429 429 dev->tx_worker.fn = mt76_tx_worker; 430 430 431 + spin_lock_init(&dev->token_lock); 432 + idr_init(&dev->token); 433 + 431 434 INIT_LIST_HEAD(&dev->txwi_cache); 432 435 433 436 for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
-4
drivers/net/wireless/mediatek/mt76/mcu.c
··· 99 99 dev_kfree_skb(skb); 100 100 } while (ret == -EAGAIN); 101 101 102 - /* notify driver code to reset the mcu */ 103 - if (ret == -ETIMEDOUT && dev->mcu_ops->mcu_reset) 104 - dev->mcu_ops->mcu_reset(dev); 105 - 106 102 out: 107 103 mutex_unlock(&dev->mcu.mutex); 108 104
+68 -9
drivers/net/wireless/mediatek/mt76/mt76.h
··· 17 17 #include "util.h" 18 18 #include "testmode.h" 19 19 20 - #define MT_MCU_RING_SIZE 32 21 - #define MT_RX_BUF_SIZE 2048 22 - #define MT_SKB_HEAD_LEN 128 20 + #define MT_MCU_RING_SIZE 32 21 + #define MT_RX_BUF_SIZE 2048 22 + #define MT_SKB_HEAD_LEN 128 23 23 24 - #define MT_MAX_NON_AQL_PKT 16 25 - #define MT_TXQ_FREE_THR 32 24 + #define MT_MAX_NON_AQL_PKT 16 25 + #define MT_TXQ_FREE_THR 32 26 + 27 + #define MT76_TOKEN_FREE_THR 64 26 28 27 29 struct mt76_dev; 28 30 struct mt76_phy; ··· 168 166 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 169 167 struct mt76_reg_pair *rp, int len); 170 168 int (*mcu_restart)(struct mt76_dev *dev); 171 - void (*mcu_reset)(struct mt76_dev *dev); 172 169 }; 173 170 174 171 struct mt76_queue_ops { 175 - int (*init)(struct mt76_dev *dev); 172 + int (*init)(struct mt76_dev *dev, 173 + int (*poll)(struct napi_struct *napi, int budget)); 176 174 177 175 int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, 178 176 int idx, int n_desc, int bufsize, ··· 333 331 u32 drv_flags; 334 332 u32 survey_flags; 335 333 u16 txwi_size; 334 + u16 token_size; 335 + u8 mcs_rates; 336 336 337 337 void (*update_survey)(struct mt76_dev *dev); 338 338 ··· 542 538 struct sk_buff *tx_skb; 543 539 544 540 u32 tx_count; 545 - u16 tx_msdu_len; 541 + u16 tx_mpdu_len; 546 542 547 543 u8 tx_rate_mode; 548 544 u8 tx_rate_idx; ··· 661 657 struct mt76_worker tx_worker; 662 658 struct napi_struct tx_napi; 663 659 660 + spinlock_t token_lock; 661 + struct idr token; 662 + int token_count; 663 + 664 664 wait_queue_head_t tx_wait; 665 665 struct sk_buff_head status_list; 666 666 ··· 717 709 struct mt76_usb usb; 718 710 struct mt76_sdio sdio; 719 711 }; 712 + }; 713 + 714 + struct mt76_power_limits { 715 + s8 cck[4]; 716 + s8 ofdm[8]; 717 + s8 mcs[4][10]; 718 + s8 ru[7][12]; 720 719 }; 721 720 722 721 enum mt76_phy_type { ··· 809 794 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76)) 810 795 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76)) 811 796 812 - #define mt76_init_queues(dev) (dev)->mt76.queue_ops->init(&((dev)->mt76)) 797 + #define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__) 813 798 #define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__) 814 799 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__) 815 800 #define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__) ··· 844 829 845 830 int mt76_eeprom_init(struct mt76_dev *dev, int len); 846 831 void mt76_eeprom_override(struct mt76_phy *phy); 832 + int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len); 847 833 848 834 struct mt76_queue * 849 835 mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc, ··· 1022 1006 void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb); 1023 1007 void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid); 1024 1008 void mt76_txq_schedule_all(struct mt76_phy *phy); 1009 + void mt76_tx_worker_run(struct mt76_dev *dev); 1025 1010 void mt76_tx_worker(struct mt76_worker *w); 1026 1011 void mt76_release_buffered_frames(struct ieee80211_hw *hw, 1027 1012 struct ieee80211_sta *sta, ··· 1091 1074 int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, 1092 1075 struct netlink_callback *cb, void *data, int len); 1093 1076 int mt76_testmode_set_state(struct mt76_phy *phy, enum mt76_testmode_state state); 1077 + int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len); 1094 1078 1095 1079 static inline void mt76_testmode_reset(struct mt76_phy *phy, bool disable) 1096 1080 { ··· 1212 1194 1213 1195 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set); 1214 1196 1197 + s8 mt76_get_rate_power_limits(struct mt76_phy *phy, 1198 + struct ieee80211_channel *chan, 1199 + struct mt76_power_limits *dest, 1200 + s8 target_power); 1201 + 1202 + struct mt76_txwi_cache * 1203 + mt76_token_release(struct mt76_dev *dev, int token, bool *wake); 1204 + int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi); 1205 + void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked); 1206 + 1207 + static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 1208 + { 1209 + spin_lock_bh(&dev->token_lock); 1210 + __mt76_set_tx_blocked(dev, blocked); 1211 + spin_unlock_bh(&dev->token_lock); 1212 + } 1213 + 1214 + static inline int 1215 + mt76_token_get(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 1216 + { 1217 + int token; 1218 + 1219 + spin_lock_bh(&dev->token_lock); 1220 + token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, 1221 + GFP_ATOMIC); 1222 + spin_unlock_bh(&dev->token_lock); 1223 + 1224 + return token; 1225 + } 1226 + 1227 + static inline struct mt76_txwi_cache * 1228 + mt76_token_put(struct mt76_dev *dev, int token) 1229 + { 1230 + struct mt76_txwi_cache *txwi; 1231 + 1232 + spin_lock_bh(&dev->token_lock); 1233 + txwi = idr_remove(&dev->token, token); 1234 + spin_unlock_bh(&dev->token_lock); 1235 + 1236 + return txwi; 1237 + } 1215 1238 #endif
+1 -1
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
··· 219 219 return ret; 220 220 221 221 mt76_wr(dev, MT_DELAY_INT_CFG, 0); 222 - ret = mt76_init_queues(dev); 222 + ret = mt76_init_queues(dev, mt76_dma_rx_poll); 223 223 if (ret) 224 224 return ret; 225 225
+2
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
··· 1445 1445 mt76_queue_rx_reset(dev, i); 1446 1446 } 1447 1447 1448 + mt76_tx_status_check(&dev->mt76, NULL, true); 1449 + 1448 1450 mt7603_dma_sched_reset(dev); 1449 1451 1450 1452 mt7603_mac_dma_start(dev);
+2 -3
drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
··· 21 21 struct mt7603_mcu_rxd *rxd; 22 22 23 23 if (!skb) { 24 - dev_err(mdev->dev, 25 - "MCU message %d (seq %d) timed out\n", 26 - cmd, seq); 24 + dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n", 25 + abs(cmd), seq); 27 26 dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT; 28 27 return -ETIMEDOUT; 29 28 }
+31 -1
drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
··· 69 69 mt7615_pm_set(void *data, u64 val) 70 70 { 71 71 struct mt7615_dev *dev = data; 72 + struct mt76_connac_pm *pm = &dev->pm; 72 73 int ret = 0; 73 74 74 75 if (!mt7615_wait_for_mcu_init(dev)) ··· 78 77 if (!mt7615_firmware_offload(dev) || !mt76_is_mmio(&dev->mt76)) 79 78 return -EOPNOTSUPP; 80 79 80 + if (val == pm->enable) 81 + return 0; 82 + 81 83 mt7615_mutex_acquire(dev); 82 84 83 85 if (dev->phy.n_beacon_vif) { ··· 88 84 goto out; 89 85 } 90 86 91 - dev->pm.enable = val; 87 + if (!pm->enable) { 88 + pm->stats.last_wake_event = jiffies; 89 + pm->stats.last_doze_event = jiffies; 90 + } 91 + pm->enable = val; 92 92 out: 93 93 mt7615_mutex_release(dev); 94 94 ··· 110 102 } 111 103 112 104 DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7615_pm_get, mt7615_pm_set, "%lld\n"); 105 + 106 + static int 107 + mt7615_pm_stats(struct seq_file *s, void *data) 108 + { 109 + struct mt7615_dev *dev = dev_get_drvdata(s->private); 110 + struct mt76_connac_pm *pm = &dev->pm; 111 + unsigned long awake_time = pm->stats.awake_time; 112 + unsigned long doze_time = pm->stats.doze_time; 113 + 114 + if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) 115 + awake_time += jiffies - pm->stats.last_wake_event; 116 + else 117 + doze_time += jiffies - pm->stats.last_doze_event; 118 + 119 + seq_printf(s, "awake time: %14u\ndoze time: %15u\n", 120 + jiffies_to_msecs(awake_time), 121 + jiffies_to_msecs(doze_time)); 122 + 123 + return 0; 124 + } 113 125 114 126 static int 115 127 mt7615_pm_idle_timeout_set(void *data, u64 val) ··· 543 515 debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); 544 516 debugfs_create_file("idle-timeout", 0600, dir, dev, 545 517 &fops_pm_idle_timeout); 518 + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, 519 + mt7615_pm_stats); 546 520 debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, 547 521 mt7615_radio_read); 548 522
+41 -6
drivers/net/wireless/mediatek/mt76/mt7615/dma.c
··· 71 71 struct mt7615_dev *dev; 72 72 73 73 dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); 74 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 75 + napi_complete(napi); 76 + queue_work(dev->mt76.wq, &dev->pm.wake_work); 77 + return 0; 78 + } 74 79 75 80 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); 76 - 77 - if (napi_complete_done(napi, 0)) 81 + if (napi_complete(napi)) 78 82 mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev)); 79 83 84 + mt76_connac_pm_unref(&dev->pm); 85 + 80 86 return 0; 87 + } 88 + 89 + static int mt7615_poll_rx(struct napi_struct *napi, int budget) 90 + { 91 + struct mt7615_dev *dev; 92 + int done; 93 + 94 + dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev); 95 + 96 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 97 + napi_complete(napi); 98 + queue_work(dev->mt76.wq, &dev->pm.wake_work); 99 + return 0; 100 + } 101 + done = mt76_dma_rx_poll(napi, budget); 102 + mt76_connac_pm_unref(&dev->pm); 103 + 104 + return done; 81 105 } 82 106 83 107 int mt7615_wait_pdma_busy(struct mt7615_dev *dev) ··· 211 187 if (is_mt7622(&dev->mt76)) 212 188 mt7622_dma_sched_init(dev); 213 189 214 - if (is_mt7663(&dev->mt76)) 190 + if (is_mt7663(&dev->mt76)) { 215 191 mt7663_dma_sched_init(dev); 192 + 193 + mt76_wr(dev, MT_MCU2HOST_INT_ENABLE, MT7663_MCU_CMD_ERROR_MASK); 194 + } 195 + 216 196 } 217 197 218 198 int mt7615_dma_init(struct mt7615_dev *dev) 219 199 { 220 200 int rx_ring_size = MT7615_RX_RING_SIZE; 221 201 int rx_buf_size = MT_RX_BUF_SIZE; 202 + u32 mask; 222 203 int ret; 223 204 224 205 /* Increase buffer size to receive large VHT MPDUs */ ··· 285 256 286 257 mt76_wr(dev, MT_DELAY_INT_CFG, 0); 287 258 288 - ret = mt76_init_queues(dev); 259 + ret = mt76_init_queues(dev, mt7615_poll_rx); 289 260 if (ret < 0) 290 261 return ret; 291 262 ··· 298 269 MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 1000); 299 270 300 271 /* enable interrupts for TX/RX rings */ 301 - mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) | 302 - MT_INT_MCU_CMD); 272 + 273 + mask = MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev); 274 + if (is_mt7663(&dev->mt76)) 275 + mask |= MT7663_INT_MCU_CMD; 276 + else 277 + mask |= MT_INT_MCU_CMD; 278 + 279 + mt7615_irq_enable(dev, mask); 303 280 304 281 mt7615_dma_start(dev); 305 282
+19 -3
drivers/net/wireless/mediatek/mt76/mt7615/init.c
··· 252 252 int delta_idx, delta = mt76_tx_power_nss_delta(n_chains); 253 253 u8 *eep = (u8 *)dev->mt76.eeprom.data; 254 254 enum nl80211_band band = sband->band; 255 + struct mt76_power_limits limits; 255 256 u8 rate_val; 256 257 257 258 delta_idx = mt7615_eeprom_get_power_delta_index(dev, band); ··· 281 280 target_power = max(target_power, eep[index]); 282 281 } 283 282 284 - target_power = DIV_ROUND_UP(target_power + delta, 2); 283 + target_power = mt76_get_rate_power_limits(&dev->mphy, chan, 284 + &limits, 285 + target_power); 286 + target_power += delta; 287 + target_power = DIV_ROUND_UP(target_power, 2); 285 288 chan->max_power = min_t(int, chan->max_reg_power, 286 289 target_power); 287 290 chan->orig_mpwr = target_power; ··· 316 311 memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); 317 312 dev->mt76.region = request->dfs_region; 318 313 314 + mt7615_init_txpower(dev, &mphy->sband_2g.sband); 315 + mt7615_init_txpower(dev, &mphy->sband_5g.sband); 316 + 319 317 mt7615_mutex_acquire(dev); 320 318 321 319 if (chandef->chan->flags & IEEE80211_CHAN_RADAR) 322 320 mt7615_dfs_init_radar_detector(phy); 323 - if (mt7615_firmware_offload(phy->dev)) 321 + 322 + if (mt7615_firmware_offload(phy->dev)) { 324 323 mt76_connac_mcu_set_channel_domain(mphy); 324 + mt76_connac_mcu_set_rate_txpower(mphy); 325 + } 325 326 326 327 mt7615_mutex_release(dev); 327 328 } ··· 502 491 dev->phy.dev = dev; 503 492 dev->phy.mt76 = &dev->mt76.phy; 504 493 dev->mt76.phy.priv = &dev->phy; 494 + dev->mt76.tx_worker.fn = mt7615_tx_worker; 505 495 506 496 INIT_DELAYED_WORK(&dev->pm.ps_work, mt7615_pm_power_save_work); 507 497 INIT_WORK(&dev->pm.wake_work, mt7615_pm_wake_work); 508 - init_completion(&dev->pm.wake_cmpl); 498 + spin_lock_init(&dev->pm.wake.lock); 499 + mutex_init(&dev->pm.mutex); 500 + init_waitqueue_head(&dev->pm.wait); 509 501 spin_lock_init(&dev->pm.txq_lock); 510 502 set_bit(MT76_STATE_PM, &dev->mphy.state); 511 503 INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7615_mac_work); ··· 526 512 527 513 mt7615_init_wiphy(hw); 528 514 dev->pm.idle_timeout = MT7615_PM_TIMEOUT; 515 + dev->pm.stats.last_wake_event = jiffies; 516 + dev->pm.stats.last_doze_event = jiffies; 529 517 mt7615_cap_dbdc_disable(dev); 530 518 dev->phy.dfs_state = -1; 531 519
+25 -19
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
··· 1465 1465 u8 wcid; 1466 1466 1467 1467 trace_mac_tx_free(dev, token); 1468 - 1469 - spin_lock_bh(&dev->token_lock); 1470 - txwi = idr_remove(&dev->token, token); 1471 - spin_unlock_bh(&dev->token_lock); 1472 - 1468 + txwi = mt76_token_put(mdev, token); 1473 1469 if (!txwi) 1474 1470 return; 1475 1471 ··· 1510 1514 1511 1515 dev_kfree_skb(skb); 1512 1516 1513 - if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state)) 1514 - return; 1515 - 1516 1517 rcu_read_lock(); 1517 1518 mt7615_mac_sta_poll(dev); 1518 1519 rcu_read_unlock(); 1519 1520 1520 - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1521 1521 mt76_worker_schedule(&dev->mt76.tx_worker); 1522 1522 } 1523 1523 ··· 1905 1913 pm.wake_work); 1906 1914 mphy = dev->phy.mt76; 1907 1915 1908 - if (!mt7615_mcu_set_drv_ctrl(dev)) 1916 + if (!mt7615_mcu_set_drv_ctrl(dev)) { 1917 + int i; 1918 + 1919 + mt76_for_each_q_rx(&dev->mt76, i) 1920 + napi_schedule(&dev->mt76.napi[i]); 1909 1921 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 1910 - else 1911 - dev_err(mphy->dev->dev, "failed to wake device\n"); 1922 + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); 1923 + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1924 + MT7615_WATCHDOG_TIME); 1925 + } 1912 1926 1913 1927 ieee80211_wake_queues(mphy->hw); 1914 - complete_all(&dev->pm.wake_cmpl); 1928 + wake_up(&dev->pm.wait); 1915 1929 } 1916 1930 1917 1931 void mt7615_pm_power_save_work(struct work_struct *work) ··· 1929 1931 pm.ps_work.work); 1930 1932 1931 1933 delta = dev->pm.idle_timeout; 1934 + if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || 1935 + test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) 1936 + goto out; 1937 + 1932 1938 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 1933 1939 delta = dev->pm.last_activity + delta - jiffies; 1934 1940 goto out; ··· 1975 1973 struct mt76_txwi_cache *txwi; 1976 1974 int id; 1977 1975 1978 - spin_lock_bh(&dev->token_lock); 1979 - idr_for_each_entry(&dev->token, txwi, id) { 1976 + spin_lock_bh(&dev->mt76.token_lock); 1977 + idr_for_each_entry(&dev->mt76.token, txwi, id) { 1980 1978 mt7615_txp_skb_unmap(&dev->mt76, txwi); 1981 - if (txwi->skb) 1982 - dev_kfree_skb_any(txwi->skb); 1979 + if (txwi->skb) { 1980 + struct ieee80211_hw *hw; 1981 + 1982 + hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); 1983 + ieee80211_free_txskb(hw, txwi->skb); 1984 + } 1983 1985 mt76_put_txwi(&dev->mt76, txwi); 1984 1986 } 1985 - spin_unlock_bh(&dev->token_lock); 1986 - idr_destroy(&dev->token); 1987 + spin_unlock_bh(&dev->mt76.token_lock); 1988 + idr_destroy(&dev->mt76.token); 1987 1989 } 1988 1990 EXPORT_SYMBOL_GPL(mt7615_tx_token_put); 1989 1991
+24 -19
drivers/net/wireless/mediatek/mt76/mt7615/main.c
··· 66 66 ret = mt76_connac_mcu_set_channel_domain(phy->mt76); 67 67 if (ret) 68 68 goto out; 69 + 70 + ret = mt76_connac_mcu_set_rate_txpower(phy->mt76); 71 + if (ret) 72 + goto out; 69 73 } 70 74 71 75 ret = mt7615_mcu_set_chan_info(phy, MCU_EXT_CMD_SET_RX_PATH); ··· 351 347 352 348 mt7615_mutex_release(dev); 353 349 354 - mt76_txq_schedule_all(phy->mt76); 355 - 350 + mt76_worker_schedule(&dev->mt76.tx_worker); 356 351 if (!mt76_testmode_enabled(phy->mt76)) 357 352 ieee80211_queue_delayed_work(phy->mt76->hw, 358 353 &phy->mt76->mac_work, ··· 577 574 if (changed & BSS_CHANGED_PS) 578 575 mt76_connac_mcu_set_vif_ps(&dev->mt76, vif); 579 576 580 - if (changed & BSS_CHANGED_ARP_FILTER) 581 - mt7615_mcu_update_arp_filter(hw, vif, info); 577 + if ((changed & BSS_CHANGED_ARP_FILTER) && 578 + mt7615_firmware_offload(dev)) { 579 + struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 580 + 581 + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, 582 + info); 583 + } 582 584 583 585 if (changed & BSS_CHANGED_ASSOC) 584 586 mt7615_mac_set_beacon_filter(phy, vif, info->assoc); ··· 693 685 break; 694 686 } 695 687 msta->n_rates = i; 696 - if (!test_bit(MT76_STATE_PM, &phy->mt76->state)) 688 + if (mt76_connac_pm_ref(phy->mt76, &dev->pm)) { 697 689 mt7615_mac_set_rates(phy, msta, NULL, msta->rates); 690 + mt76_connac_pm_unref(&dev->pm); 691 + } 698 692 spin_unlock_bh(&dev->mt76.lock); 699 693 } 700 694 701 - static void 702 - mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 695 + void mt7615_tx_worker(struct mt76_worker *w) 703 696 { 704 - struct mt7615_dev *dev = mt7615_hw_dev(hw); 705 - struct mt7615_phy *phy = mt7615_hw_phy(hw); 706 - struct mt76_phy *mphy = phy->mt76; 697 + struct mt7615_dev *dev = container_of(w, struct mt7615_dev, 698 + mt76.tx_worker); 707 699 708 - if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 709 - return; 710 - 711 - if (test_bit(MT76_STATE_PM, &mphy->state)) { 700 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 712 701 queue_work(dev->mt76.wq, &dev->pm.wake_work); 713 702 return; 714 703 } 715 704 716 - dev->pm.last_activity = jiffies; 717 - mt76_worker_schedule(&dev->mt76.tx_worker); 705 + mt76_tx_worker_run(&dev->mt76); 706 + mt76_connac_pm_unref(&dev->pm); 718 707 } 719 708 720 709 static void mt7615_tx(struct ieee80211_hw *hw, ··· 739 734 wcid = &msta->wcid; 740 735 } 741 736 742 - if (!test_bit(MT76_STATE_PM, &mphy->state)) { 743 - dev->pm.last_activity = jiffies; 737 + if (mt76_connac_pm_ref(mphy, &dev->pm)) { 744 738 mt76_tx(mphy, control->sta, wcid, skb); 739 + mt76_connac_pm_unref(&dev->pm); 745 740 return; 746 741 } 747 742 ··· 1268 1263 .sta_set_decap_offload = mt7615_sta_set_decap_offload, 1269 1264 .ampdu_action = mt7615_ampdu_action, 1270 1265 .set_rts_threshold = mt7615_set_rts_threshold, 1271 - .wake_tx_queue = mt7615_wake_tx_queue, 1266 + .wake_tx_queue = mt76_wake_tx_queue, 1272 1267 .sta_rate_tbl_update = mt7615_sta_rate_tbl_update, 1273 1268 .sw_scan_start = mt76_sw_scan, 1274 1269 .sw_scan_complete = mt76_sw_scan_complete,
+124 -76
drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
··· 175 175 int ret = 0; 176 176 177 177 if (!skb) { 178 - dev_err(mdev->dev, "Message %ld (seq %d) timeout\n", 179 - cmd & MCU_CMD_MASK, seq); 178 + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 179 + cmd, seq); 180 180 return -ETIMEDOUT; 181 181 } 182 182 ··· 288 288 static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev) 289 289 { 290 290 struct mt76_phy *mphy = &dev->mt76.phy; 291 + struct mt76_connac_pm *pm = &dev->pm; 291 292 struct mt76_dev *mdev = &dev->mt76; 292 293 u32 addr; 293 294 int err; 294 295 295 - addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST; 296 + if (is_mt7663(mdev)) { 297 + /* Clear firmware own via N9 eint */ 298 + mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN); 299 + mt76_poll(dev, MT_CONN_ON_MISC, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); 300 + 301 + addr = MT_CONN_HIF_ON_LPCTL; 302 + } else { 303 + addr = MT_CFG_LPCR_HOST; 304 + } 305 + 296 306 mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN); 297 307 298 308 mt7622_trigger_hif_int(dev, true); 299 309 300 - addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST; 301 310 err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000); 302 311 303 312 mt7622_trigger_hif_int(dev, false); ··· 318 309 319 310 clear_bit(MT76_STATE_PM, &mphy->state); 320 311 312 + pm->stats.last_wake_event = jiffies; 313 + pm->stats.doze_time += pm->stats.last_wake_event - 314 + pm->stats.last_doze_event; 315 + 321 316 return 0; 322 317 } 323 318 324 319 static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev) 325 320 { 326 321 struct mt76_phy *mphy = &dev->mt76.phy; 327 - int i; 322 + struct mt76_connac_pm *pm = &dev->pm; 323 + int i, err = 0; 328 324 329 - if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) 325 + mutex_lock(&pm->mutex); 326 + 327 + if (!test_bit(MT76_STATE_PM, &mphy->state)) 330 328 goto out; 331 329 332 330 for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) { ··· 345 329 346 330 if (i == MT7615_DRV_OWN_RETRY_COUNT) { 347 331 dev_err(dev->mt76.dev, "driver own failed\n"); 348 - set_bit(MT76_STATE_PM, &mphy->state); 349 - return -EIO; 332 + err = -EIO; 333 + goto out; 350 334 } 335 + clear_bit(MT76_STATE_PM, &mphy->state); 351 336 337 + pm->stats.last_wake_event = jiffies; 338 + pm->stats.doze_time += pm->stats.last_wake_event - 339 + pm->stats.last_doze_event; 352 340 out: 353 - dev->pm.last_activity = jiffies; 341 + mutex_unlock(&pm->mutex); 354 342 355 - return 0; 343 + return err; 356 344 } 357 345 358 346 static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev) 359 347 { 360 348 struct mt76_phy *mphy = &dev->mt76.phy; 349 + struct mt76_connac_pm *pm = &dev->pm; 361 350 int err = 0; 362 351 u32 addr; 363 352 364 - if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) 365 - return 0; 353 + mutex_lock(&pm->mutex); 354 + 355 + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) 356 + goto out; 366 357 367 358 mt7622_trigger_hif_int(dev, true); 368 359 ··· 385 362 } 386 363 387 364 mt7622_trigger_hif_int(dev, false); 365 + 366 + pm->stats.last_doze_event = jiffies; 367 + pm->stats.awake_time += pm->stats.last_doze_event - 368 + pm->stats.last_wake_event; 369 + out: 370 + mutex_unlock(&pm->mutex); 388 371 389 372 return err; 390 373 } ··· 453 424 break; 454 425 } 455 426 456 - wiphy_info(mt76_hw(dev)->wiphy, "%s: %*s", type, 427 + wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, 457 428 (int)(skb->len - sizeof(*rxd)), data); 458 429 } 459 430 ··· 1362 1333 const struct firmware *fw = NULL; 1363 1334 int len, ret, sem; 1364 1335 1365 - sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); 1366 - switch (sem) { 1367 - case PATCH_IS_DL: 1368 - return 0; 1369 - case PATCH_NOT_DL_SEM_SUCCESS: 1370 - break; 1371 - default: 1372 - dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); 1373 - return -EAGAIN; 1374 - } 1375 - 1376 1336 ret = firmware_request_nowarn(&fw, name, dev->mt76.dev); 1377 1337 if (ret) 1378 - goto out; 1338 + return ret; 1379 1339 1380 1340 if (!fw || !fw->data || fw->size < sizeof(*hdr)) { 1381 1341 dev_err(dev->mt76.dev, "Invalid firmware\n"); 1382 1342 ret = -EINVAL; 1383 - goto out; 1343 + goto release_fw; 1344 + } 1345 + 1346 + sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true); 1347 + switch (sem) { 1348 + case PATCH_IS_DL: 1349 + goto release_fw; 1350 + case PATCH_NOT_DL_SEM_SUCCESS: 1351 + break; 1352 + default: 1353 + dev_err(dev->mt76.dev, "Failed to get patch semaphore\n"); 1354 + ret = -EAGAIN; 1355 + goto release_fw; 1384 1356 } 1385 1357 1386 1358 hdr = (const struct mt7615_patch_hdr *)(fw->data); ··· 1410 1380 dev_err(dev->mt76.dev, "Failed to start patch\n"); 1411 1381 1412 1382 out: 1413 - release_firmware(fw); 1414 - 1415 1383 sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, false); 1416 1384 switch (sem) { 1417 1385 case PATCH_REL_SEM_SUCCESS: ··· 1419 1391 dev_err(dev->mt76.dev, "Failed to release patch semaphore\n"); 1420 1392 break; 1421 1393 } 1394 + 1395 + release_fw: 1396 + release_firmware(fw); 1422 1397 1423 1398 return ret; 1424 1399 } ··· 2168 2137 { 2169 2138 struct mt76_phy *mphy = phy->mt76; 2170 2139 struct ieee80211_hw *hw = mphy->hw; 2140 + struct mt76_power_limits limits; 2141 + s8 *limits_array = (s8 *)&limits; 2171 2142 int n_chains = hweight8(mphy->antenna_mask); 2172 2143 int tx_power; 2173 2144 int i; 2145 + static const u8 sku_mapping[] = { 2146 + #define SKU_FIELD(_type, _field) \ 2147 + [MT_SKU_##_type] = offsetof(struct mt76_power_limits, _field) 2148 + SKU_FIELD(CCK_1_2, cck[0]), 2149 + SKU_FIELD(CCK_55_11, cck[2]), 2150 + SKU_FIELD(OFDM_6_9, ofdm[0]), 2151 + SKU_FIELD(OFDM_12_18, ofdm[2]), 2152 + SKU_FIELD(OFDM_24_36, ofdm[4]), 2153 + SKU_FIELD(OFDM_48, ofdm[6]), 2154 + SKU_FIELD(OFDM_54, ofdm[7]), 2155 + SKU_FIELD(HT20_0_8, mcs[0][0]), 2156 + SKU_FIELD(HT20_32, ofdm[0]), 2157 + SKU_FIELD(HT20_1_2_9_10, mcs[0][1]), 2158 + SKU_FIELD(HT20_3_4_11_12, mcs[0][3]), 2159 + SKU_FIELD(HT20_5_13, mcs[0][5]), 2160 + SKU_FIELD(HT20_6_14, mcs[0][6]), 2161 + SKU_FIELD(HT20_7_15, mcs[0][7]), 2162 + SKU_FIELD(HT40_0_8, mcs[1][0]), 2163 + SKU_FIELD(HT40_32, ofdm[0]), 2164 + SKU_FIELD(HT40_1_2_9_10, mcs[1][1]), 2165 + SKU_FIELD(HT40_3_4_11_12, mcs[1][3]), 2166 + SKU_FIELD(HT40_5_13, mcs[1][5]), 2167 + SKU_FIELD(HT40_6_14, mcs[1][6]), 2168 + SKU_FIELD(HT40_7_15, mcs[1][7]), 2169 + SKU_FIELD(VHT20_0, mcs[0][0]), 2170 + SKU_FIELD(VHT20_1_2, mcs[0][1]), 2171 + SKU_FIELD(VHT20_3_4, mcs[0][3]), 2172 + SKU_FIELD(VHT20_5_6, mcs[0][5]), 2173 + SKU_FIELD(VHT20_7, mcs[0][7]), 2174 + SKU_FIELD(VHT20_8, mcs[0][8]), 2175 + SKU_FIELD(VHT20_9, mcs[0][9]), 2176 + SKU_FIELD(VHT40_0, mcs[1][0]), 2177 + SKU_FIELD(VHT40_1_2, mcs[1][1]), 2178 + SKU_FIELD(VHT40_3_4, mcs[1][3]), 2179 + SKU_FIELD(VHT40_5_6, mcs[1][5]), 2180 + SKU_FIELD(VHT40_7, mcs[1][7]), 2181 + SKU_FIELD(VHT40_8, mcs[1][8]), 2182 + SKU_FIELD(VHT40_9, mcs[1][9]), 2183 + SKU_FIELD(VHT80_0, mcs[2][0]), 2184 + SKU_FIELD(VHT80_1_2, mcs[2][1]), 2185 + SKU_FIELD(VHT80_3_4, mcs[2][3]), 2186 + SKU_FIELD(VHT80_5_6, mcs[2][5]), 2187 + SKU_FIELD(VHT80_7, mcs[2][7]), 2188 + SKU_FIELD(VHT80_8, mcs[2][8]), 2189 + SKU_FIELD(VHT80_9, mcs[2][9]), 2190 + SKU_FIELD(VHT160_0, mcs[3][0]), 2191 + SKU_FIELD(VHT160_1_2, mcs[3][1]), 2192 + SKU_FIELD(VHT160_3_4, mcs[3][3]), 2193 + SKU_FIELD(VHT160_5_6, mcs[3][5]), 2194 + SKU_FIELD(VHT160_7, mcs[3][7]), 2195 + SKU_FIELD(VHT160_8, mcs[3][8]), 2196 + SKU_FIELD(VHT160_9, mcs[3][9]), 2197 + #undef SKU_FIELD 2198 + }; 2174 2199 2175 2200 tx_power = hw->conf.power_level * 2 - 2176 2201 mt76_tx_power_nss_delta(n_chains); 2202 + 2203 + tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, 2204 + &limits, tx_power); 2177 2205 mphy->txpower_cur = tx_power; 2178 2206 2207 + if (is_mt7663(mphy->dev)) { 2208 + memset(sku, tx_power, MT_SKU_4SS_DELTA + 1); 2209 + return; 2210 + } 2211 + 2179 2212 for (i = 0; i < MT_SKU_1SS_DELTA; i++) 2180 - sku[i] = tx_power; 2213 + sku[i] = limits_array[sku_mapping[i]]; 2181 2214 2182 2215 for (i = 0; i < 4; i++) { 2183 2216 int delta = 0; ··· 2723 2628 2724 2629 return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req, 2725 2630 sizeof(req), false); 2726 - } 2727 - 2728 - int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, 2729 - struct ieee80211_vif *vif, 2730 - struct ieee80211_bss_conf *info) 2731 - { 2732 - struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; 2733 - struct mt7615_dev *dev = mt7615_hw_dev(hw); 2734 - struct sk_buff *skb; 2735 - int i, len = min_t(int, info->arp_addr_cnt, 2736 - IEEE80211_BSS_ARP_ADDR_LIST_LEN); 2737 - struct { 2738 - struct { 2739 - u8 bss_idx; 2740 - u8 pad[3]; 2741 - } __packed hdr; 2742 - struct mt76_connac_arpns_tlv arp; 2743 - } req_hdr = { 2744 - .hdr = { 2745 - .bss_idx = mvif->mt76.idx, 2746 - }, 2747 - .arp = { 2748 - .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), 2749 - .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), 2750 - .ips_num = len, 2751 - .mode = 2, /* update */ 2752 - .option = 1, 2753 - }, 2754 - }; 2755 - 2756 - if (!mt7615_firmware_offload(dev)) 2757 - return 0; 2758 - 2759 - skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 2760 - sizeof(req_hdr) + len * sizeof(__be32)); 2761 - if (!skb) 2762 - return -ENOMEM; 2763 - 2764 - skb_put_data(skb, &req_hdr, sizeof(req_hdr)); 2765 - for (i = 0; i < len; i++) { 2766 - u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); 2767 - 2768 - memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); 2769 - } 2770 - 2771 - return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD, 2772 - true); 2773 2631 } 2774 2632 2775 2633 int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+17 -7
drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
··· 105 105 { 106 106 struct mt7615_dev *dev = from_tasklet(dev, t, irq_tasklet); 107 107 u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev); 108 + u32 mcu_int; 108 109 109 110 mt76_wr(dev, MT_INT_MASK_CSR, 0); 110 111 ··· 129 128 if (intr & MT_INT_RX_DONE(1)) 130 129 napi_schedule(&dev->mt76.napi[1]); 131 130 132 - if (intr & MT_INT_MCU_CMD) { 133 - u32 val = mt76_rr(dev, MT_MCU_CMD); 131 + if (!(intr & (MT_INT_MCU_CMD | MT7663_INT_MCU_CMD))) 132 + return; 134 133 135 - if (val & MT_MCU_CMD_ERROR_MASK) { 136 - dev->reset_state = val; 137 - ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); 138 - wake_up(&dev->reset_wait); 139 - } 134 + if (is_mt7663(&dev->mt76)) { 135 + mcu_int = mt76_rr(dev, MT_MCU2HOST_INT_STATUS); 136 + mcu_int &= MT7663_MCU_CMD_ERROR_MASK; 137 + } else { 138 + mcu_int = mt76_rr(dev, MT_MCU_CMD); 139 + mcu_int &= MT_MCU_CMD_ERROR_MASK; 140 140 } 141 + 142 + if (!mcu_int) 143 + return; 144 + 145 + dev->reset_state = mcu_int; 146 + ieee80211_queue_work(mt76_hw(dev), &dev->reset_work); 147 + wake_up(&dev->reset_wait); 141 148 } 142 149 143 150 static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr) ··· 190 181 .survey_flags = SURVEY_INFO_TIME_TX | 191 182 SURVEY_INFO_TIME_RX | 192 183 SURVEY_INFO_TIME_BSS_RX, 184 + .token_size = MT7615_TOKEN_SIZE, 193 185 .tx_prepare_skb = mt7615_tx_prepare_skb, 194 186 .tx_complete_skb = mt7615_tx_complete_skb, 195 187 .rx_skb = mt7615_queue_rx_skb,
+1 -6
drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
··· 263 263 bool flash_eeprom; 264 264 bool dbdc_support; 265 265 266 - spinlock_t token_lock; 267 - struct idr token; 268 - 269 266 u8 fw_ver; 270 267 271 268 struct work_struct rate_work; ··· 505 508 struct ieee80211_sta *sta, 506 509 struct mt76_tx_info *tx_info); 507 510 511 + void mt7615_tx_worker(struct mt76_worker *w); 508 512 void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 509 513 void mt7615_tx_token_put(struct mt7615_dev *dev); 510 514 void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, ··· 547 549 bool enable); 548 550 int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif, 549 551 bool enable); 550 - int mt7615_mcu_update_arp_filter(struct ieee80211_hw *hw, 551 - struct ieee80211_vif *vif, 552 - struct ieee80211_bss_conf *info); 553 552 int __mt7663_load_firmware(struct mt7615_dev *dev); 554 553 u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset); 555 554 void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
+10 -6
drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
··· 40 40 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 41 41 42 42 INIT_WORK(&dev->mcu_work, mt7615_pci_init_work); 43 - spin_lock_init(&dev->token_lock); 44 - idr_init(&dev->token); 45 - 46 43 ret = mt7615_eeprom_init(dev, addr); 47 44 if (ret < 0) 48 45 return ret; 46 + 47 + if (is_mt7663(&dev->mt76)) { 48 + /* Reset RGU */ 49 + mt76_clear(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1)); 50 + mt76_set(dev, MT_MCU_CIRQ_IRQ_SEL(4), BIT(1)); 51 + } 49 52 50 53 ret = mt7615_dma_init(dev); 51 54 if (ret) ··· 79 76 mt76 = container_of(led_cdev, struct mt76_dev, led_cdev); 80 77 dev = container_of(mt76, struct mt7615_dev, mt76); 81 78 82 - if (test_bit(MT76_STATE_PM, &mt76->phy.state)) 79 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) 83 80 return; 84 81 85 82 val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) | ··· 97 94 val |= MT_LED_CTRL_POLARITY(mt76->led_pin); 98 95 addr = mt7615_reg_map(dev, MT_LED_CTRL); 99 96 mt76_wr(dev, addr, val); 97 + 98 + mt76_connac_pm_unref(&dev->pm); 100 99 } 101 100 102 101 static int ··· 169 164 mt76_unregister_device(&dev->mt76); 170 165 if (mcu_running) 171 166 mt7615_mcu_exit(dev); 172 - mt7615_dma_cleanup(dev); 173 167 174 168 mt7615_tx_token_put(dev); 175 - 169 + mt7615_dma_cleanup(dev); 176 170 tasklet_disable(&dev->irq_tasklet); 177 171 178 172 mt76_free_device(&dev->mt76);
+13 -10
drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
··· 37 37 token = le16_to_cpu(txp->hw.msdu_id[0]) & 38 38 ~MT_MSDU_ID_VALID; 39 39 40 - spin_lock_bh(&dev->token_lock); 41 - t = idr_remove(&dev->token, token); 42 - spin_unlock_bh(&dev->token_lock); 40 + t = mt76_token_put(mdev, token); 43 41 e->skb = t ? t->skb : NULL; 44 42 } 45 43 ··· 159 161 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 160 162 t->skb = tx_info->skb; 161 163 162 - spin_lock_bh(&dev->token_lock); 163 - id = idr_alloc(&dev->token, t, 0, MT7615_TOKEN_SIZE, GFP_ATOMIC); 164 - spin_unlock_bh(&dev->token_lock); 164 + id = mt76_token_get(mdev, &t); 165 165 if (id < 0) 166 166 return id; 167 167 ··· 197 201 mt76_for_each_q_rx(&dev->mt76, i) 198 202 mt76_queue_rx_reset(dev, i); 199 203 204 + mt76_tx_status_check(&dev->mt76, NULL, true); 205 + 200 206 mt7615_dma_start(dev); 201 207 } 202 208 EXPORT_SYMBOL_GPL(mt7615_dma_reset); ··· 206 208 static void 207 209 mt7615_hif_int_event_trigger(struct mt7615_dev *dev, u8 event) 208 210 { 209 - mt76_wr(dev, MT_MCU_INT_EVENT, event); 211 + u32 reg = MT_MCU_INT_EVENT; 212 + 213 + if (is_mt7663(&dev->mt76)) 214 + reg = MT7663_MCU_INT_EVENT; 215 + 216 + mt76_wr(dev, reg, event); 210 217 211 218 mt7622_trigger_hif_int(dev, true); 212 219 mt7622_trigger_hif_int(dev, false); ··· 306 303 307 304 mt7615_hif_int_event_trigger(dev, MT_MCU_INT_EVENT_PDMA_STOPPED); 308 305 309 - mt7615_tx_token_put(dev); 310 - idr_init(&dev->token); 311 - 312 306 if (mt7615_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 313 307 mt7615_dma_reset(dev); 308 + 309 + mt7615_tx_token_put(dev); 310 + idr_init(&dev->mt76.token); 314 311 315 312 mt76_wr(dev, MT_WPDMA_MEM_RNG_ERR, 0); 316 313
+11
drivers/net/wireless/mediatek/mt76/mt7615/regs.h
··· 61 61 #define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19) 62 62 #define MT_PCIE_REMAP_BASE_2 ((dev)->reg_map[MT_PCIE_REMAP_BASE2]) 63 63 64 + #define MT_MCU_CIRQ_BASE 0xc0000 65 + #define MT_MCU_CIRQ(ofs) (MT_MCU_CIRQ_BASE + (ofs)) 66 + 67 + #define MT_MCU_CIRQ_IRQ_SEL(n) MT_MCU_CIRQ((n) << 2) 68 + 64 69 #define MT_HIF(ofs) ((dev)->reg_map[MT_HIF_BASE] + (ofs)) 65 70 #define MT_HIF_RST MT_HIF(0x100) 66 71 #define MT_HIF_LOGIC_RST_N BIT(4) ··· 93 88 #define MT_CFG_LPCR_HOST_FW_OWN BIT(0) 94 89 #define MT_CFG_LPCR_HOST_DRV_OWN BIT(1) 95 90 91 + #define MT_MCU2HOST_INT_STATUS MT_HIF(0x1f0) 92 + #define MT_MCU2HOST_INT_ENABLE MT_HIF(0x1f4) 93 + 94 + #define MT7663_MCU_INT_EVENT MT_HIF(0x108) 96 95 #define MT_MCU_INT_EVENT MT_HIF(0x1f8) 97 96 #define MT_MCU_INT_EVENT_PDMA_STOPPED BIT(0) 98 97 #define MT_MCU_INT_EVENT_PDMA_INIT BIT(1) ··· 111 102 #define MT_INT_RX_DONE_ALL GENMASK(1, 0) 112 103 #define MT_INT_TX_DONE_ALL GENMASK(19, 4) 113 104 #define MT_INT_TX_DONE(_n) BIT((_n) + 4) 105 + #define MT7663_INT_MCU_CMD BIT(29) 114 106 #define MT_INT_MCU_CMD BIT(30) 115 107 116 108 #define MT_WPDMA_GLO_CFG MT_HIF(0x208) ··· 148 138 #define MT_MCU_CMD_PDMA_ERROR BIT(27) 149 139 #define MT_MCU_CMD_PCIE_ERROR BIT(28) 150 140 #define MT_MCU_CMD_ERROR_MASK (GENMASK(5, 1) | GENMASK(28, 24)) 141 + #define MT7663_MCU_CMD_ERROR_MASK GENMASK(5, 2) 151 142 152 143 #define MT_TX_RING_BASE MT_HIF(0x300) 153 144 #define MT_RX_RING_BASE MT_HIF(0x400)
+2 -1
drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
··· 67 67 struct mt7615_rate_desc *rate = &wrd->rate; 68 68 struct mt7615_sta *sta = wrd->sta; 69 69 u32 w5, w27, addr, val; 70 - u16 idx = sta->vif->mt76.omac_idx; 70 + u16 idx; 71 71 72 72 lockdep_assert_held(&dev->mt76.mutex); 73 73 ··· 119 119 120 120 sta->rate_probe = sta->rateset[rate->rateset].probe_rate.idx != -1; 121 121 122 + idx = sta->vif->mt76.omac_idx; 122 123 idx = idx > HW_BSSID_MAX ? HW_BSSID_0 : idx; 123 124 addr = idx > 1 ? MT_LPON_TCR2(idx): MT_LPON_TCR0(idx); 124 125
+53 -1
drivers/net/wireless/mediatek/mt76/mt76_connac.h
··· 53 53 } tx_q[IEEE80211_NUM_ACS]; 54 54 55 55 struct work_struct wake_work; 56 - struct completion wake_cmpl; 56 + wait_queue_head_t wait; 57 + 58 + struct { 59 + spinlock_t lock; 60 + u32 count; 61 + } wake; 62 + struct mutex mutex; 57 63 58 64 struct delayed_work ps_work; 59 65 unsigned long last_activity; 60 66 unsigned long idle_timeout; 67 + 68 + struct { 69 + unsigned long last_wake_event; 70 + unsigned long awake_time; 71 + unsigned long last_doze_event; 72 + unsigned long doze_time; 73 + unsigned int lp_wake; 74 + } stats; 61 75 }; 62 76 63 77 struct mt76_connac_coredump { ··· 97 83 struct mt76_connac_pm *pm); 98 84 void mt76_connac_free_pending_tx_skbs(struct mt76_connac_pm *pm, 99 85 struct mt76_wcid *wcid); 86 + 87 + static inline bool 88 + mt76_connac_pm_ref(struct mt76_phy *phy, struct mt76_connac_pm *pm) 89 + { 90 + bool ret = false; 91 + 92 + spin_lock_bh(&pm->wake.lock); 93 + if (test_bit(MT76_STATE_PM, &phy->state)) 94 + goto out; 95 + 96 + pm->wake.count++; 97 + ret = true; 98 + out: 99 + spin_unlock_bh(&pm->wake.lock); 100 + 101 + return ret; 102 + } 103 + 104 + static inline void 105 + mt76_connac_pm_unref(struct mt76_connac_pm *pm) 106 + { 107 + spin_lock_bh(&pm->wake.lock); 108 + pm->wake.count--; 109 + pm->last_activity = jiffies; 110 + spin_unlock_bh(&pm->wake.lock); 111 + } 112 + 113 + static inline bool 114 + mt76_connac_skip_fw_pmctrl(struct mt76_phy *phy, struct mt76_connac_pm *pm) 115 + { 116 + bool ret; 117 + 118 + spin_lock_bh(&pm->wake.lock); 119 + ret = pm->wake.count || test_and_set_bit(MT76_STATE_PM, &phy->state); 120 + spin_unlock_bh(&pm->wake.lock); 121 + 122 + return ret; 123 + } 100 124 101 125 static inline void 102 126 mt76_connac_mutex_acquire(struct mt76_dev *dev, struct mt76_connac_pm *pm)
+9 -14
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
··· 13 13 if (!mt76_is_mmio(dev)) 14 14 return 0; 15 15 16 + cancel_delayed_work_sync(&pm->ps_work); 16 17 if (!test_bit(MT76_STATE_PM, &phy->state)) 17 18 return 0; 18 19 19 - if (test_bit(MT76_HW_SCANNING, &phy->state) || 20 - test_bit(MT76_HW_SCHED_SCANNING, &phy->state)) 21 - return 0; 22 - 23 - if (queue_work(dev->wq, &pm->wake_work)) 24 - reinit_completion(&pm->wake_cmpl); 25 - 26 - if (!wait_for_completion_timeout(&pm->wake_cmpl, 3 * HZ)) { 20 + queue_work(dev->wq, &pm->wake_work); 21 + if (!wait_event_timeout(pm->wait, 22 + !test_bit(MT76_STATE_PM, &phy->state), 23 + 3 * HZ)) { 27 24 ieee80211_wake_queues(phy->hw); 28 25 return -ETIMEDOUT; 29 26 } ··· 37 40 if (!mt76_is_mmio(dev)) 38 41 return; 39 42 40 - if (!pm->enable || !test_bit(MT76_STATE_RUNNING, &phy->state)) 43 + if (!pm->enable) 41 44 return; 42 45 43 46 pm->last_activity = jiffies; 44 47 45 - if (test_bit(MT76_HW_SCANNING, &phy->state) || 46 - test_bit(MT76_HW_SCHED_SCANNING, &phy->state)) 47 - return; 48 - 49 - if (!test_bit(MT76_STATE_PM, &phy->state)) 48 + if (!test_bit(MT76_STATE_PM, &phy->state)) { 49 + cancel_delayed_work(&phy->mac_work); 50 50 queue_delayed_work(dev->wq, &pm->ps_work, pm->idle_timeout); 51 + } 51 52 } 52 53 EXPORT_SYMBOL_GPL(mt76_connac_power_save_sched); 53 54
+189 -8
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
··· 1528 1528 1529 1529 int mt76_connac_mcu_chip_config(struct mt76_dev *dev) 1530 1530 { 1531 - struct { 1532 - __le16 id; 1533 - u8 type; 1534 - u8 resp_type; 1535 - __le16 data_size; 1536 - __le16 resv; 1537 - u8 data[320]; 1538 - } req = { 1531 + struct mt76_connac_config req = { 1539 1532 .resp_type = 0, 1540 1533 }; 1541 1534 ··· 1538 1545 false); 1539 1546 } 1540 1547 EXPORT_SYMBOL_GPL(mt76_connac_mcu_chip_config); 1548 + 1549 + int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable) 1550 + { 1551 + struct mt76_connac_config req = { 1552 + .resp_type = 0, 1553 + }; 1554 + 1555 + snprintf(req.data, sizeof(req.data), "KeepFullPwr %d", !enable); 1556 + 1557 + return mt76_mcu_send_msg(dev, MCU_CMD_CHIP_CONFIG, &req, sizeof(req), 1558 + false); 1559 + } 1560 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_deep_sleep); 1541 1561 1542 1562 void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, 1543 1563 struct mt76_connac_coredump *coredump) ··· 1565 1559 MT76_CONNAC_COREDUMP_TIMEOUT); 1566 1560 } 1567 1561 EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event); 1562 + 1563 + static void 1564 + mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku, 1565 + struct mt76_power_limits *limits, 1566 + enum nl80211_band band) 1567 + { 1568 + int max_power = is_mt7921(dev) ? 127 : 63; 1569 + int i, offset = sizeof(limits->cck); 1570 + 1571 + memset(sku, max_power, MT_SKU_POWER_LIMIT); 1572 + 1573 + if (band == NL80211_BAND_2GHZ) { 1574 + /* cck */ 1575 + memcpy(sku, limits->cck, sizeof(limits->cck)); 1576 + } 1577 + 1578 + /* ofdm */ 1579 + memcpy(&sku[offset], limits->ofdm, sizeof(limits->ofdm)); 1580 + offset += sizeof(limits->ofdm); 1581 + 1582 + /* ht */ 1583 + for (i = 0; i < 2; i++) { 1584 + memcpy(&sku[offset], limits->mcs[i], 8); 1585 + offset += 8; 1586 + } 1587 + sku[offset++] = limits->mcs[0][0]; 1588 + 1589 + /* vht */ 1590 + for (i = 0; i < ARRAY_SIZE(limits->mcs); i++) { 1591 + memcpy(&sku[offset], limits->mcs[i], 1592 + ARRAY_SIZE(limits->mcs[i])); 1593 + offset += 12; 1594 + } 1595 + 1596 + if (!is_mt7921(dev)) 1597 + return; 1598 + 1599 + /* he */ 1600 + for (i = 0; i < ARRAY_SIZE(limits->ru); i++) { 1601 + memcpy(&sku[offset], limits->ru[i], ARRAY_SIZE(limits->ru[i])); 1602 + offset += ARRAY_SIZE(limits->ru[i]); 1603 + } 1604 + } 1605 + 1606 + static int 1607 + mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy, 1608 + enum nl80211_band band) 1609 + { 1610 + struct mt76_dev *dev = phy->dev; 1611 + int sku_len, batch_len = is_mt7921(dev) ? 8 : 16; 1612 + static const u8 chan_list_2ghz[] = { 1613 + 1, 2, 3, 4, 5, 6, 7, 1614 + 8, 9, 10, 11, 12, 13, 14 1615 + }; 1616 + static const u8 chan_list_5ghz[] = { 1617 + 36, 38, 40, 42, 44, 46, 48, 1618 + 50, 52, 54, 56, 58, 60, 62, 1619 + 64, 100, 102, 104, 106, 108, 110, 1620 + 112, 114, 116, 118, 120, 122, 124, 1621 + 126, 128, 132, 134, 136, 138, 140, 1622 + 142, 144, 149, 151, 153, 155, 157, 1623 + 159, 161, 165 1624 + }; 1625 + struct mt76_connac_sku_tlv sku_tlbv; 1626 + int i, n_chan, batch_size, idx = 0; 1627 + struct mt76_power_limits limits; 1628 + const u8 *ch_list; 1629 + 1630 + sku_len = is_mt7921(dev) ? sizeof(sku_tlbv) : sizeof(sku_tlbv) - 92; 1631 + 1632 + if (band == NL80211_BAND_2GHZ) { 1633 + n_chan = ARRAY_SIZE(chan_list_2ghz); 1634 + ch_list = chan_list_2ghz; 1635 + } else { 1636 + n_chan = ARRAY_SIZE(chan_list_5ghz); 1637 + ch_list = chan_list_5ghz; 1638 + } 1639 + batch_size = DIV_ROUND_UP(n_chan, batch_len); 1640 + 1641 + for (i = 0; i < batch_size; i++) { 1642 + bool last_msg = i == batch_size - 1; 1643 + int num_ch = last_msg ? n_chan % batch_len : batch_len; 1644 + struct mt76_connac_tx_power_limit_tlv tx_power_tlv = { 1645 + .band = band == NL80211_BAND_2GHZ ? 1 : 2, 1646 + .n_chan = num_ch, 1647 + .last_msg = last_msg, 1648 + }; 1649 + struct sk_buff *skb; 1650 + int j, err, msg_len; 1651 + 1652 + msg_len = sizeof(tx_power_tlv) + num_ch * sizeof(sku_tlbv); 1653 + skb = mt76_mcu_msg_alloc(dev, NULL, msg_len); 1654 + if (!skb) 1655 + return -ENOMEM; 1656 + 1657 + BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(tx_power_tlv.alpha2)); 1658 + memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2)); 1659 + 1660 + skb_put_data(skb, &tx_power_tlv, sizeof(tx_power_tlv)); 1661 + for (j = 0; j < num_ch; j++, idx++) { 1662 + struct ieee80211_channel chan = { 1663 + .hw_value = ch_list[idx], 1664 + .band = band, 1665 + }; 1666 + 1667 + mt76_get_rate_power_limits(phy, &chan, &limits, 127); 1668 + 1669 + sku_tlbv.channel = ch_list[idx]; 1670 + mt76_connac_mcu_build_sku(dev, sku_tlbv.pwr_limit, 1671 + &limits, band); 1672 + skb_put_data(skb, &sku_tlbv, sku_len); 1673 + } 1674 + 1675 + err = mt76_mcu_skb_send_msg(dev, skb, 1676 + MCU_CMD_SET_RATE_TX_POWER, false); 1677 + if (err < 0) 1678 + return err; 1679 + } 1680 + 1681 + return 0; 1682 + } 1683 + 1684 + int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy) 1685 + { 1686 + int err; 1687 + 1688 + err = mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_2GHZ); 1689 + if (err < 0) 1690 + return err; 1691 + 1692 + return mt76_connac_mcu_rate_txpower_band(phy, NL80211_BAND_5GHZ); 1693 + } 1694 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_rate_txpower); 1695 + 1696 + int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, 1697 + struct mt76_vif *vif, 1698 + struct ieee80211_bss_conf *info) 1699 + { 1700 + struct sk_buff *skb; 1701 + int i, len = min_t(int, info->arp_addr_cnt, 1702 + IEEE80211_BSS_ARP_ADDR_LIST_LEN); 1703 + struct { 1704 + struct { 1705 + u8 bss_idx; 1706 + u8 pad[3]; 1707 + } __packed hdr; 1708 + struct mt76_connac_arpns_tlv arp; 1709 + } req_hdr = { 1710 + .hdr = { 1711 + .bss_idx = vif->idx, 1712 + }, 1713 + .arp = { 1714 + .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), 1715 + .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), 1716 + .ips_num = len, 1717 + .mode = 2, /* update */ 1718 + .option = 1, 1719 + }, 1720 + }; 1721 + 1722 + skb = mt76_mcu_msg_alloc(dev, NULL, 1723 + sizeof(req_hdr) + len * sizeof(__be32)); 1724 + if (!skb) 1725 + return -ENOMEM; 1726 + 1727 + skb_put_data(skb, &req_hdr, sizeof(req_hdr)); 1728 + for (i = 0; i < len; i++) { 1729 + u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); 1730 + 1731 + memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); 1732 + } 1733 + 1734 + return mt76_mcu_skb_send_msg(dev, skb, MCU_UNI_CMD_OFFLOAD, true); 1735 + } 1736 + EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter); 1568 1737 1569 1738 #ifdef CONFIG_PM 1570 1739
+37
drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
··· 564 564 MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca, 565 565 MCU_CMD_FWLOG_2_HOST = MCU_CE_PREFIX | 0xc5, 566 566 MCU_CMD_GET_WTBL = MCU_CE_PREFIX | 0xcd, 567 + MCU_CMD_GET_TXPWR = MCU_CE_PREFIX | 0xd0, 567 568 }; 568 569 569 570 enum { ··· 896 895 u8 rcpi; 897 896 }; 898 897 898 + #define MT_SKU_POWER_LIMIT 161 899 + 900 + struct mt76_connac_sku_tlv { 901 + u8 channel; 902 + s8 pwr_limit[MT_SKU_POWER_LIMIT]; 903 + } __packed; 904 + 905 + struct mt76_connac_tx_power_limit_tlv { 906 + /* DW0 - common info*/ 907 + u8 ver; 908 + u8 pad0; 909 + __le16 len; 910 + /* DW1 - cmd hint */ 911 + u8 n_chan; /* # channel */ 912 + u8 band; /* 2.4GHz - 5GHz */ 913 + u8 last_msg; 914 + u8 pad1; 915 + /* DW3 */ 916 + u8 alpha2[4]; /* regulatory_request.alpha2 */ 917 + u8 pad2[32]; 918 + } __packed; 919 + 920 + struct mt76_connac_config { 921 + __le16 id; 922 + u8 type; 923 + u8 resp_type; 924 + __le16 data_size; 925 + __le16 resv; 926 + u8 data[320]; 927 + } __packed; 928 + 899 929 #define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id) 900 930 #define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id) 901 931 ··· 1019 987 int mt76_connac_mcu_sched_scan_enable(struct mt76_phy *phy, 1020 988 struct ieee80211_vif *vif, 1021 989 bool enable); 990 + int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev, 991 + struct mt76_vif *vif, 992 + struct ieee80211_bss_conf *info); 1022 993 int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw, 1023 994 struct ieee80211_vif *vif, 1024 995 struct cfg80211_gtk_rekey_data *key); ··· 1029 994 void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac, 1030 995 struct ieee80211_vif *vif); 1031 996 int mt76_connac_mcu_chip_config(struct mt76_dev *dev); 997 + int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable); 1032 998 void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb, 1033 999 struct mt76_connac_coredump *coredump); 1000 + int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy); 1034 1001 #endif /* __MT76_CONNAC_MCU_H */
+2 -3
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
··· 17 17 u32 *rxfce; 18 18 19 19 if (!skb) { 20 - dev_err(mdev->dev, 21 - "MCU message %d (seq %d) timed out\n", cmd, 22 - seq); 20 + dev_err(mdev->dev, "MCU message %02x (seq %d) timed out\n", 21 + abs(cmd), seq); 23 22 dev->mcu_timeout = 1; 24 23 return -ETIMEDOUT; 25 24 }
+3 -1
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 226 226 if (ret) 227 227 return ret; 228 228 229 - ret = mt76_init_queues(dev); 229 + ret = mt76_init_queues(dev, mt76_dma_rx_poll); 230 230 if (ret) 231 231 return ret; 232 232 ··· 471 471 mt76_for_each_q_rx(&dev->mt76, i) { 472 472 mt76_queue_rx_reset(dev, i); 473 473 } 474 + 475 + mt76_tx_status_check(&dev->mt76, NULL, true); 474 476 475 477 mt76x02_mac_start(dev); 476 478
+45 -31
drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
··· 299 299 } 300 300 301 301 static void 302 - mt7915_puts_rate_txpower(struct seq_file *s, s8 *delta, 303 - s8 txpower_cur, int band) 302 + mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy) 304 303 { 305 304 static const char * const sku_group_name[] = { 306 305 "CCK", "OFDM", "HT20", "HT40", ··· 307 308 "RU26", "RU52", "RU106", "RU242/SU20", 308 309 "RU484/SU40", "RU996/SU80", "RU2x996/SU160" 309 310 }; 310 - s8 txpower[MT7915_SKU_RATE_NUM]; 311 + struct mt7915_dev *dev = dev_get_drvdata(s->private); 312 + bool ext_phy = phy != &dev->phy; 313 + u32 reg_base; 311 314 int i, idx = 0; 312 315 313 - for (i = 0; i < MT7915_SKU_RATE_NUM; i++) 314 - txpower[i] = DIV_ROUND_UP(txpower_cur + delta[i], 2); 316 + if (!phy) 317 + return; 315 318 316 - for (i = 0; i < MAX_SKU_RATE_GROUP_NUM; i++) { 317 - const struct sku_group *sku = &mt7915_sku_groups[i]; 318 - u32 offset = sku->offset[band]; 319 + reg_base = MT_TMAC_FP0R0(ext_phy); 320 + seq_printf(s, "\nBand %d\n", ext_phy); 319 321 320 - if (!offset) { 321 - idx += sku->len; 322 - continue; 322 + for (i = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { 323 + u8 cnt, mcs_num = mt7915_sku_group_len[i]; 324 + s8 txpower[12]; 325 + int j; 326 + 327 + if (i == SKU_HT_BW20 || i == SKU_HT_BW40) { 328 + mcs_num = 8; 329 + } else if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160) { 330 + mcs_num = 10; 331 + } else if (i == SKU_HE_RU26) { 332 + reg_base = MT_TMAC_FP0R18(ext_phy); 333 + idx = 0; 323 334 } 324 335 325 - mt76_seq_puts_array(s, sku_group_name[i], 326 - txpower + idx, sku->len); 327 - idx += sku->len; 336 + for (j = 0, cnt = 0; j < DIV_ROUND_UP(mcs_num, 4); j++) { 337 + u32 val; 338 + 339 + if (i == SKU_VHT_BW160 && idx == 60) { 340 + reg_base = MT_TMAC_FP0R15(ext_phy); 341 + idx = 0; 342 + } 343 + 344 + val = mt76_rr(dev, reg_base + (idx / 4) * 4); 345 + 346 + if (idx && idx % 4) 347 + val >>= (idx % 4) * 8; 348 + 349 + while (val > 0 && cnt < mcs_num) { 350 + s8 pwr = FIELD_GET(MT_TMAC_FP_MASK, val); 351 + 352 + txpower[cnt++] = pwr; 353 + val >>= 8; 354 + idx++; 355 + } 356 + } 357 + 358 + mt76_seq_puts_array(s, sku_group_name[i], txpower, mcs_num); 328 359 } 329 360 } 330 361 ··· 362 333 mt7915_read_rate_txpower(struct seq_file *s, void *data) 363 334 { 364 335 struct mt7915_dev *dev = dev_get_drvdata(s->private); 365 - struct mt76_phy *mphy = &dev->mphy; 366 - enum nl80211_band band = mphy->chandef.chan->band; 367 - s8 *delta = dev->rate_power[band]; 368 - s8 txpower_base = mphy->txpower_cur - delta[MT7915_SKU_MAX_DELTA_IDX]; 369 336 370 - seq_puts(s, "Band 0:\n"); 371 - mt7915_puts_rate_txpower(s, delta, txpower_base, band); 372 - 373 - if (dev->mt76.phy2) { 374 - mphy = dev->mt76.phy2; 375 - band = mphy->chandef.chan->band; 376 - delta = dev->rate_power[band]; 377 - txpower_base = mphy->txpower_cur - 378 - delta[MT7915_SKU_MAX_DELTA_IDX]; 379 - 380 - seq_puts(s, "Band 1:\n"); 381 - mt7915_puts_rate_txpower(s, delta, txpower_base, band); 382 - } 337 + mt7915_puts_rate_txpower(s, &dev->phy); 338 + mt7915_puts_rate_txpower(s, mt7915_ext_phy(dev)); 383 339 384 340 return 0; 385 341 }
+1 -1
drivers/net/wireless/mediatek/mt76/mt7915/dma.c
··· 213 213 return ret; 214 214 } 215 215 216 - ret = mt76_init_queues(dev); 216 + ret = mt76_init_queues(dev, mt76_dma_rx_poll); 217 217 if (ret < 0) 218 218 return ret; 219 219
+52 -115
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
··· 8 8 { 9 9 u8 *data = dev->mt76.eeprom.data; 10 10 11 - if (data[offset] == 0xff) 11 + if (data[offset] == 0xff && !dev->flash_mode) 12 12 mt7915_mcu_get_eeprom(dev, offset); 13 13 14 14 return data[offset]; 15 + } 16 + 17 + static int mt7915_eeprom_load_precal(struct mt7915_dev *dev) 18 + { 19 + struct mt76_dev *mdev = &dev->mt76; 20 + u32 val; 21 + 22 + val = mt7915_eeprom_read(dev, MT_EE_DO_PRE_CAL); 23 + if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP)) 24 + return 0; 25 + 26 + val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE; 27 + dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL); 28 + if (!dev->cal) 29 + return -ENOMEM; 30 + 31 + return mt76_get_of_eeprom(mdev, dev->cal, MT_EE_PRECAL, val); 15 32 } 16 33 17 34 static int mt7915_eeprom_load(struct mt7915_dev *dev) ··· 39 22 if (ret < 0) 40 23 return ret; 41 24 42 - if (ret) 25 + if (ret) { 43 26 dev->flash_mode = true; 44 - else 27 + ret = mt7915_eeprom_load_precal(dev); 28 + } else { 45 29 memset(dev->mt76.eeprom.data, -1, MT7915_EEPROM_SIZE); 30 + } 46 31 47 - return 0; 32 + return ret; 48 33 } 49 34 50 35 static int mt7915_check_eeprom(struct mt7915_dev *dev) ··· 170 151 return target_power; 171 152 } 172 153 173 - static const u8 sku_cck_delta_map[] = { 174 - SKU_CCK_GROUP0, 175 - SKU_CCK_GROUP0, 176 - SKU_CCK_GROUP1, 177 - SKU_CCK_GROUP1, 178 - }; 179 - 180 - static const u8 sku_ofdm_delta_map[] = { 181 - SKU_OFDM_GROUP0, 182 - SKU_OFDM_GROUP0, 183 - SKU_OFDM_GROUP1, 184 - SKU_OFDM_GROUP1, 185 - SKU_OFDM_GROUP2, 186 - SKU_OFDM_GROUP2, 187 - SKU_OFDM_GROUP3, 188 - SKU_OFDM_GROUP4, 189 - }; 190 - 191 - static const u8 sku_mcs_delta_map[] = { 192 - SKU_MCS_GROUP0, 193 - SKU_MCS_GROUP1, 194 - SKU_MCS_GROUP1, 195 - SKU_MCS_GROUP2, 196 - SKU_MCS_GROUP2, 197 - SKU_MCS_GROUP3, 198 - SKU_MCS_GROUP4, 199 - SKU_MCS_GROUP5, 200 - SKU_MCS_GROUP6, 201 - SKU_MCS_GROUP7, 202 - SKU_MCS_GROUP8, 203 - SKU_MCS_GROUP9, 204 - }; 205 - 206 - #define SKU_GROUP(_mode, _len, _ofs_2g, _ofs_5g, _map) \ 207 - [_mode] = { \ 208 - .len = _len, \ 209 - .offset = { \ 210 - _ofs_2g, \ 211 - _ofs_5g, \ 212 - }, \ 213 - .delta_map = _map \ 214 - } 215 - 216 - const struct sku_group mt7915_sku_groups[] = { 217 - SKU_GROUP(SKU_CCK, 4, 0x252, 0, sku_cck_delta_map), 218 - SKU_GROUP(SKU_OFDM, 8, 0x254, 0x29d, sku_ofdm_delta_map), 219 - 220 - SKU_GROUP(SKU_HT_BW20, 8, 0x259, 0x2a2, sku_mcs_delta_map), 221 - SKU_GROUP(SKU_HT_BW40, 9, 0x262, 0x2ab, sku_mcs_delta_map), 222 - SKU_GROUP(SKU_VHT_BW20, 12, 0x259, 0x2a2, sku_mcs_delta_map), 223 - SKU_GROUP(SKU_VHT_BW40, 12, 0x262, 0x2ab, sku_mcs_delta_map), 224 - SKU_GROUP(SKU_VHT_BW80, 12, 0, 0x2b4, sku_mcs_delta_map), 225 - SKU_GROUP(SKU_VHT_BW160, 12, 0, 0, sku_mcs_delta_map), 226 - 227 - SKU_GROUP(SKU_HE_RU26, 12, 0x27f, 0x2dd, sku_mcs_delta_map), 228 - SKU_GROUP(SKU_HE_RU52, 12, 0x289, 0x2e7, sku_mcs_delta_map), 229 - SKU_GROUP(SKU_HE_RU106, 12, 0x293, 0x2f1, sku_mcs_delta_map), 230 - SKU_GROUP(SKU_HE_RU242, 12, 0x26b, 0x2bf, sku_mcs_delta_map), 231 - SKU_GROUP(SKU_HE_RU484, 12, 0x275, 0x2c9, sku_mcs_delta_map), 232 - SKU_GROUP(SKU_HE_RU996, 12, 0, 0x2d3, sku_mcs_delta_map), 233 - SKU_GROUP(SKU_HE_RU2x996, 12, 0, 0, sku_mcs_delta_map), 234 - }; 235 - 236 - static s8 237 - mt7915_get_sku_delta(struct mt7915_dev *dev, u32 addr) 154 + s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band) 238 155 { 239 - u32 val = mt7915_eeprom_read(dev, addr); 240 - s8 delta = FIELD_GET(SKU_DELTA_VAL, val); 156 + u32 val; 157 + s8 delta; 241 158 242 - if (!(val & SKU_DELTA_EN)) 159 + if (band == NL80211_BAND_2GHZ) 160 + val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_2G); 161 + else 162 + val = mt7915_eeprom_read(dev, MT_EE_RATE_DELTA_5G); 163 + 164 + if (!(val & MT_EE_RATE_DELTA_EN)) 243 165 return 0; 244 166 245 - return val & SKU_DELTA_ADD ? delta : -delta; 167 + delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val); 168 + 169 + return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta; 246 170 } 247 171 248 - static void 249 - mt7915_eeprom_init_sku_band(struct mt7915_dev *dev, 250 - struct ieee80211_supported_band *sband) 251 - { 252 - int i, band = sband->band; 253 - s8 *rate_power = dev->rate_power[band], max_delta = 0; 254 - u8 idx = 0; 255 - 256 - for (i = 0; i < ARRAY_SIZE(mt7915_sku_groups); i++) { 257 - const struct sku_group *sku = &mt7915_sku_groups[i]; 258 - u32 offset = sku->offset[band]; 259 - int j; 260 - 261 - if (!offset) { 262 - idx += sku->len; 263 - continue; 264 - } 265 - 266 - rate_power[idx++] = mt7915_get_sku_delta(dev, offset); 267 - if (rate_power[idx - 1] > max_delta) 268 - max_delta = rate_power[idx - 1]; 269 - 270 - if (i == SKU_HT_BW20 || i == SKU_VHT_BW20) 271 - offset += 1; 272 - 273 - for (j = 1; j < sku->len; j++) { 274 - u32 addr = offset + sku->delta_map[j]; 275 - 276 - rate_power[idx++] = mt7915_get_sku_delta(dev, addr); 277 - if (rate_power[idx - 1] > max_delta) 278 - max_delta = rate_power[idx - 1]; 279 - } 280 - } 281 - 282 - rate_power[idx] = max_delta; 283 - } 284 - 285 - void mt7915_eeprom_init_sku(struct mt7915_dev *dev) 286 - { 287 - mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_2g.sband); 288 - mt7915_eeprom_init_sku_band(dev, &dev->mphy.sband_5g.sband); 289 - } 172 + const u8 mt7915_sku_group_len[] = { 173 + [SKU_CCK] = 4, 174 + [SKU_OFDM] = 8, 175 + [SKU_HT_BW20] = 8, 176 + [SKU_HT_BW40] = 9, 177 + [SKU_VHT_BW20] = 12, 178 + [SKU_VHT_BW40] = 12, 179 + [SKU_VHT_BW80] = 12, 180 + [SKU_VHT_BW160] = 12, 181 + [SKU_HE_RU26] = 12, 182 + [SKU_HE_RU52] = 12, 183 + [SKU_HE_RU106] = 12, 184 + [SKU_HE_RU242] = 12, 185 + [SKU_HE_RU484] = 12, 186 + [SKU_HE_RU996] = 12, 187 + [SKU_HE_RU2x996] = 12 188 + };
+17 -34
drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
··· 17 17 MT_EE_MAC_ADDR = 0x004, 18 18 MT_EE_MAC_ADDR2 = 0x00a, 19 19 MT_EE_DDIE_FT_VERSION = 0x050, 20 + MT_EE_DO_PRE_CAL = 0x062, 20 21 MT_EE_WIFI_CONF = 0x190, 22 + MT_EE_RATE_DELTA_2G = 0x252, 23 + MT_EE_RATE_DELTA_5G = 0x29d, 21 24 MT_EE_TX0_POWER_2G = 0x2fc, 22 25 MT_EE_TX0_POWER_5G = 0x34b, 23 26 MT_EE_ADIE_FT_VERSION = 0x9a0, 24 27 25 - __MT_EE_MAX = 0xe00 28 + __MT_EE_MAX = 0xe00, 29 + /* 0xe10 ~ 0x5780 used to save group cal data */ 30 + MT_EE_PRECAL = 0xe10 26 31 }; 32 + 33 + #define MT_EE_WIFI_CAL_GROUP BIT(0) 34 + #define MT_EE_WIFI_CAL_DPD GENMASK(2, 1) 35 + #define MT_EE_CAL_UNIT 1024 36 + #define MT_EE_CAL_GROUP_SIZE (44 * MT_EE_CAL_UNIT) 37 + #define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT) 27 38 28 39 #define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0) 29 40 #define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6) ··· 45 34 #define MT_EE_WIFI_CONF7_TSSI0_5G BIT(2) 46 35 #define MT_EE_WIFI_CONF7_TSSI1_5G BIT(4) 47 36 37 + #define MT_EE_RATE_DELTA_MASK GENMASK(5, 0) 38 + #define MT_EE_RATE_DELTA_SIGN BIT(6) 39 + #define MT_EE_RATE_DELTA_EN BIT(7) 40 + 48 41 enum mt7915_eeprom_band { 49 42 MT_EE_BAND_SEL_DEFAULT, 50 43 MT_EE_BAND_SEL_5GHZ, 51 44 MT_EE_BAND_SEL_2GHZ, 52 45 MT_EE_BAND_SEL_DUAL, 53 - }; 54 - 55 - #define SKU_DELTA_VAL GENMASK(5, 0) 56 - #define SKU_DELTA_ADD BIT(6) 57 - #define SKU_DELTA_EN BIT(7) 58 - 59 - enum mt7915_sku_delta_group { 60 - SKU_CCK_GROUP0, 61 - SKU_CCK_GROUP1, 62 - 63 - SKU_OFDM_GROUP0 = 0, 64 - SKU_OFDM_GROUP1, 65 - SKU_OFDM_GROUP2, 66 - SKU_OFDM_GROUP3, 67 - SKU_OFDM_GROUP4, 68 - 69 - SKU_MCS_GROUP0 = 0, 70 - SKU_MCS_GROUP1, 71 - SKU_MCS_GROUP2, 72 - SKU_MCS_GROUP3, 73 - SKU_MCS_GROUP4, 74 - SKU_MCS_GROUP5, 75 - SKU_MCS_GROUP6, 76 - SKU_MCS_GROUP7, 77 - SKU_MCS_GROUP8, 78 - SKU_MCS_GROUP9, 79 46 }; 80 47 81 48 enum mt7915_sku_rate_group { ··· 73 84 SKU_HE_RU996, 74 85 SKU_HE_RU2x996, 75 86 MAX_SKU_RATE_GROUP_NUM, 76 - }; 77 - 78 - struct sku_group { 79 - u8 len; 80 - u16 offset[2]; 81 - const u8 *delta_map; 82 87 }; 83 88 84 89 static inline int ··· 107 124 return eep[MT_EE_WIFI_CONF + 7] & MT_EE_WIFI_CONF7_TSSI0_2G; 108 125 } 109 126 110 - extern const struct sku_group mt7915_sku_groups[]; 127 + extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM]; 111 128 112 129 #endif
+47 -38
drivers/net/wireless/mediatek/mt76/mt7915/init.c
··· 68 68 }; 69 69 70 70 static void 71 + mt7915_init_txpower(struct mt7915_dev *dev, 72 + struct ieee80211_supported_band *sband) 73 + { 74 + int i, n_chains = hweight8(dev->mphy.antenna_mask); 75 + int nss_delta = mt76_tx_power_nss_delta(n_chains); 76 + int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band); 77 + struct mt76_power_limits limits; 78 + 79 + for (i = 0; i < sband->n_channels; i++) { 80 + struct ieee80211_channel *chan = &sband->channels[i]; 81 + u32 target_power = 0; 82 + int j; 83 + 84 + for (j = 0; j < n_chains; j++) { 85 + u32 val; 86 + 87 + val = mt7915_eeprom_get_target_power(dev, chan, j); 88 + target_power = max(target_power, val); 89 + } 90 + 91 + target_power += pwr_delta; 92 + target_power = mt76_get_rate_power_limits(&dev->mphy, chan, 93 + &limits, 94 + target_power); 95 + target_power += nss_delta; 96 + target_power = DIV_ROUND_UP(target_power, 2); 97 + chan->max_power = min_t(int, chan->max_reg_power, 98 + target_power); 99 + chan->orig_mpwr = target_power; 100 + } 101 + } 102 + 103 + static void 71 104 mt7915_regd_notifier(struct wiphy *wiphy, 72 105 struct regulatory_request *request) 73 106 { ··· 110 77 struct mt7915_phy *phy = mphy->priv; 111 78 struct cfg80211_chan_def *chandef = &mphy->chandef; 112 79 80 + memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); 113 81 dev->mt76.region = request->dfs_region; 82 + 83 + mt7915_init_txpower(dev, &mphy->sband_2g.sband); 84 + mt7915_init_txpower(dev, &mphy->sband_5g.sband); 114 85 115 86 if (!(chandef->chan->flags & IEEE80211_CHAN_RADAR)) 116 87 return; ··· 244 207 return mt7915_mcu_set_txbf_type(dev); 245 208 } 246 209 247 - static void 248 - mt7915_init_txpower_band(struct mt7915_dev *dev, 249 - struct ieee80211_supported_band *sband) 250 - { 251 - int i, n_chains = hweight8(dev->mphy.antenna_mask); 252 - 253 - for (i = 0; i < sband->n_channels; i++) { 254 - struct ieee80211_channel *chan = &sband->channels[i]; 255 - u32 target_power = 0; 256 - int j; 257 - 258 - for (j = 0; j < n_chains; j++) { 259 - u32 val; 260 - 261 - val = mt7915_eeprom_get_target_power(dev, chan, j); 262 - target_power = max(target_power, val); 263 - } 264 - 265 - chan->max_power = min_t(int, chan->max_reg_power, 266 - target_power / 2); 267 - chan->orig_mpwr = target_power / 2; 268 - } 269 - } 270 - 271 - static void mt7915_init_txpower(struct mt7915_dev *dev) 272 - { 273 - mt7915_init_txpower_band(dev, &dev->mphy.sband_2g.sband); 274 - mt7915_init_txpower_band(dev, &dev->mphy.sband_5g.sband); 275 - 276 - mt7915_eeprom_init_sku(dev); 277 - } 278 - 279 210 static int mt7915_register_ext_phy(struct mt7915_dev *dev) 280 211 { 281 212 struct mt7915_phy *phy = mt7915_ext_phy(dev); ··· 300 295 301 296 mt7915_mcu_set_eeprom(dev); 302 297 mt7915_mac_init(dev); 303 - mt7915_init_txpower(dev); 298 + mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband); 299 + mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband); 304 300 mt7915_txbf_init(dev); 305 301 } 306 302 ··· 351 345 mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 352 346 353 347 INIT_WORK(&dev->init_work, mt7915_init_work); 354 - spin_lock_init(&dev->token_lock); 355 - idr_init(&dev->token); 356 - 357 348 dev->dbdc_support = !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)); 358 349 359 350 /* If MCU was already running, it is likely in a bad state */ ··· 383 380 ret = mt7915_eeprom_init(dev); 384 381 if (ret < 0) 385 382 return ret; 383 + 384 + 385 + if (dev->flash_mode) { 386 + ret = mt7915_mcu_apply_group_cal(dev); 387 + if (ret) 388 + return ret; 389 + } 386 390 387 391 /* Beacon and mgmt frames should occupy wcid 0 */ 388 392 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1); ··· 750 740 mt7915_unregister_ext_phy(dev); 751 741 mt76_unregister_device(&dev->mt76); 752 742 mt7915_mcu_exit(dev); 753 - mt7915_dma_cleanup(dev); 754 - 755 743 mt7915_tx_token_put(dev); 744 + mt7915_dma_cleanup(dev); 756 745 757 746 mt76_free_device(&dev->mt76); 758 747 }
+34 -61
drivers/net/wireless/mediatek/mt76/mt7915/mac.c
··· 661 661 { 662 662 #ifdef CONFIG_NL80211_TESTMODE 663 663 struct mt76_testmode_data *td = &phy->mt76->test; 664 + const struct ieee80211_rate *r; 665 + u8 bw, mode, nss = td->tx_rate_nss; 664 666 u8 rate_idx = td->tx_rate_idx; 665 - u8 nss = td->tx_rate_nss; 666 - u8 bw, mode; 667 667 u16 rateval = 0; 668 668 u32 val; 669 + bool cck = false; 670 + int band; 669 671 670 672 if (skb != phy->mt76->test.tx_skb) 671 673 return; 672 674 673 675 switch (td->tx_rate_mode) { 674 - case MT76_TM_TX_MODE_CCK: 675 - mode = MT_PHY_TYPE_CCK; 676 - break; 677 676 case MT76_TM_TX_MODE_HT: 678 677 nss = 1 + (rate_idx >> 3); 679 678 mode = MT_PHY_TYPE_HT; ··· 692 693 case MT76_TM_TX_MODE_HE_MU: 693 694 mode = MT_PHY_TYPE_HE_MU; 694 695 break; 696 + case MT76_TM_TX_MODE_CCK: 697 + cck = true; 698 + fallthrough; 695 699 case MT76_TM_TX_MODE_OFDM: 700 + band = phy->mt76->chandef.chan->band; 701 + if (band == NL80211_BAND_2GHZ && !cck) 702 + rate_idx += 4; 703 + 704 + r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx]; 705 + val = cck ? r->hw_value_short : r->hw_value; 706 + 707 + mode = val >> 8; 708 + rate_idx = val & 0xff; 709 + break; 696 710 default: 697 711 mode = MT_PHY_TYPE_OFDM; 698 712 break; ··· 760 748 if (mode >= MT_PHY_TYPE_HE_SU) 761 749 val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf); 762 750 763 - if (td->tx_rate_ldpc || bw > 0) 751 + if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU)) 764 752 val |= MT_TXD6_LDPC; 765 753 754 + txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID); 766 755 txwi[6] |= cpu_to_le32(val); 767 756 txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX, 768 757 phy->test.spe_idx)); ··· 974 961 mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb); 975 962 } 976 963 977 - static void 978 - mt7915_set_tx_blocked(struct mt7915_dev *dev, bool blocked) 979 - { 980 - struct mt76_phy *mphy = &dev->mphy, *mphy2 = dev->mt76.phy2; 981 - struct mt76_queue *q, *q2 = NULL; 982 - 983 - q = mphy->q_tx[0]; 984 - if (blocked == q->blocked) 985 - return; 986 - 987 - q->blocked = blocked; 988 - if (mphy2) { 989 - q2 = mphy2->q_tx[0]; 990 - q2->blocked = blocked; 991 - } 992 - 993 - if (!blocked) 994 - mt76_worker_schedule(&dev->mt76.tx_worker); 995 - } 996 - 997 964 int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 998 965 enum mt76_txq_id qid, struct mt76_wcid *wcid, 999 966 struct ieee80211_sta *sta, ··· 1026 1033 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 1027 1034 t->skb = tx_info->skb; 1028 1035 1029 - spin_lock_bh(&dev->token_lock); 1030 - id = idr_alloc(&dev->token, t, 0, MT7915_TOKEN_SIZE, GFP_ATOMIC); 1031 - if (id >= 0) 1032 - dev->token_count++; 1033 - 1034 - if (dev->token_count >= MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR) 1035 - mt7915_set_tx_blocked(dev, true); 1036 - spin_unlock_bh(&dev->token_lock); 1037 - 1036 + id = mt76_token_consume(mdev, &t); 1038 1037 if (id < 0) 1039 1038 return id; 1040 1039 ··· 1190 1205 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 1191 1206 stat = FIELD_GET(MT_TX_FREE_STATUS, info); 1192 1207 1193 - spin_lock_bh(&dev->token_lock); 1194 - txwi = idr_remove(&dev->token, msdu); 1195 - if (txwi) 1196 - dev->token_count--; 1197 - if (dev->token_count < MT7915_TOKEN_SIZE - MT7915_TOKEN_FREE_THR && 1198 - dev->mphy.q_tx[0]->blocked) 1199 - wake = true; 1200 - spin_unlock_bh(&dev->token_lock); 1201 - 1208 + txwi = mt76_token_release(mdev, msdu, &wake); 1202 1209 if (!txwi) 1203 1210 continue; 1204 1211 ··· 1220 1243 1221 1244 mt7915_mac_sta_poll(dev); 1222 1245 1223 - if (wake) { 1224 - spin_lock_bh(&dev->token_lock); 1225 - mt7915_set_tx_blocked(dev, false); 1226 - spin_unlock_bh(&dev->token_lock); 1227 - } 1246 + if (wake) 1247 + mt76_set_tx_blocked(&dev->mt76, false); 1228 1248 1229 1249 mt76_worker_schedule(&dev->mt76.tx_worker); 1230 1250 ··· 1250 1276 struct mt7915_txp *txp; 1251 1277 1252 1278 txp = mt7915_txwi_to_txp(mdev, e->txwi); 1253 - 1254 - spin_lock_bh(&dev->token_lock); 1255 - t = idr_remove(&dev->token, le16_to_cpu(txp->token)); 1256 - spin_unlock_bh(&dev->token_lock); 1279 + t = mt76_token_put(mdev, le16_to_cpu(txp->token)); 1257 1280 e->skb = t ? t->skb : NULL; 1258 1281 } 1259 1282 ··· 1522 1551 mt76_for_each_q_rx(&dev->mt76, i) 1523 1552 mt76_queue_rx_reset(dev, i); 1524 1553 1554 + mt76_tx_status_check(&dev->mt76, NULL, true); 1555 + 1525 1556 /* re-init prefetch settings after reset */ 1526 1557 mt7915_dma_prefetch(dev); 1527 1558 ··· 1546 1573 struct mt76_txwi_cache *txwi; 1547 1574 int id; 1548 1575 1549 - spin_lock_bh(&dev->token_lock); 1550 - idr_for_each_entry(&dev->token, txwi, id) { 1576 + spin_lock_bh(&dev->mt76.token_lock); 1577 + idr_for_each_entry(&dev->mt76.token, txwi, id) { 1551 1578 mt7915_txp_skb_unmap(&dev->mt76, txwi); 1552 1579 if (txwi->skb) { 1553 1580 struct ieee80211_hw *hw; ··· 1556 1583 ieee80211_free_txskb(hw, txwi->skb); 1557 1584 } 1558 1585 mt76_put_txwi(&dev->mt76, txwi); 1559 - dev->token_count--; 1586 + dev->mt76.token_count--; 1560 1587 } 1561 - spin_unlock_bh(&dev->token_lock); 1562 - idr_destroy(&dev->token); 1588 + spin_unlock_bh(&dev->mt76.token_lock); 1589 + idr_destroy(&dev->mt76.token); 1563 1590 } 1564 1591 1565 1592 /* system error recovery */ ··· 1603 1630 1604 1631 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED); 1605 1632 1606 - mt7915_tx_token_put(dev); 1607 - idr_init(&dev->token); 1608 - 1609 1633 if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) { 1610 1634 mt7915_dma_reset(dev); 1635 + 1636 + mt7915_tx_token_put(dev); 1637 + idr_init(&dev->mt76.token); 1611 1638 1612 1639 mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT); 1613 1640 mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+7 -1
drivers/net/wireless/mediatek/mt76/mt7915/main.c
··· 313 313 mt7915_init_dfs_state(phy); 314 314 mt76_set_channel(phy->mt76); 315 315 316 + if (dev->flash_mode) { 317 + ret = mt7915_mcu_apply_tx_dpd(phy); 318 + if (ret) 319 + goto out; 320 + } 321 + 316 322 ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH)); 317 323 if (ret) 318 324 goto out; ··· 429 423 } 430 424 431 425 if (changed & IEEE80211_CONF_CHANGE_POWER) { 432 - ret = mt7915_mcu_set_sku(phy); 426 + ret = mt7915_mcu_set_txpower_sku(phy); 433 427 if (ret) 434 428 return ret; 435 429 }
+175 -10
drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
··· 217 217 int ret = 0; 218 218 219 219 if (!skb) { 220 - dev_err(mdev->dev, "Message %d (seq %d) timeout\n", 220 + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 221 221 cmd, seq); 222 222 return -ETIMEDOUT; 223 223 } ··· 521 521 break; 522 522 } 523 523 524 - wiphy_info(mt76_hw(dev)->wiphy, "%s: %*s", type, 524 + wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, 525 525 (int)(skb->len - sizeof(*rxd)), data); 526 526 } 527 527 ··· 3327 3327 return 0; 3328 3328 } 3329 3329 3330 + static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx, 3331 + u8 *data, u32 len, int cmd) 3332 + { 3333 + struct { 3334 + u8 dir; 3335 + u8 valid; 3336 + __le16 bitmap; 3337 + s8 precal; 3338 + u8 action; 3339 + u8 band; 3340 + u8 idx; 3341 + u8 rsv[4]; 3342 + __le32 len; 3343 + } req; 3344 + struct sk_buff *skb; 3345 + 3346 + skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len); 3347 + if (!skb) 3348 + return -ENOMEM; 3349 + 3350 + req.idx = idx; 3351 + req.len = cpu_to_le32(len); 3352 + skb_put_data(skb, &req, sizeof(req)); 3353 + skb_put_data(skb, data, len); 3354 + 3355 + return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false); 3356 + } 3357 + 3358 + int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev) 3359 + { 3360 + u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data; 3361 + u32 total = MT_EE_CAL_GROUP_SIZE; 3362 + 3363 + if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP)) 3364 + return 0; 3365 + 3366 + /* 3367 + * Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG 3368 + * Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC 3369 + */ 3370 + while (total > 0) { 3371 + int ret, len; 3372 + 3373 + len = min_t(u32, total, MT_EE_CAL_UNIT); 3374 + 3375 + ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len, 3376 + MCU_EXT_CMD(GROUP_PRE_CAL_INFO)); 3377 + if (ret) 3378 + return ret; 3379 + 3380 + total -= len; 3381 + cal += len; 3382 + idx++; 3383 + } 3384 + 3385 + return 0; 3386 + } 3387 + 3388 + static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur) 3389 + { 3390 + int i; 3391 + 3392 + for (i = 0; i < n_freqs; i++) 3393 + if (cur == freqs[i]) 3394 + return i; 3395 + 3396 + return -1; 3397 + } 3398 + 3399 + static int mt7915_dpd_freq_idx(u16 freq, u8 bw) 3400 + { 3401 + static const u16 freq_list[] = { 3402 + 5180, 5200, 5220, 5240, 3403 + 5260, 5280, 5300, 5320, 3404 + 5500, 5520, 5540, 5560, 3405 + 5580, 5600, 5620, 5640, 3406 + 5660, 5680, 5700, 5745, 3407 + 5765, 5785, 5805, 5825 3408 + }; 3409 + int offset_2g = ARRAY_SIZE(freq_list); 3410 + int idx; 3411 + 3412 + if (freq < 4000) { 3413 + if (freq < 2432) 3414 + return offset_2g; 3415 + if (freq < 2457) 3416 + return offset_2g + 1; 3417 + 3418 + return offset_2g + 2; 3419 + } 3420 + 3421 + if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160) 3422 + return -1; 3423 + 3424 + if (bw != NL80211_CHAN_WIDTH_20) { 3425 + idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), 3426 + freq + 10); 3427 + if (idx >= 0) 3428 + return idx; 3429 + 3430 + idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), 3431 + freq - 10); 3432 + if (idx >= 0) 3433 + return idx; 3434 + } 3435 + 3436 + return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq); 3437 + } 3438 + 3439 + int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy) 3440 + { 3441 + struct mt7915_dev *dev = phy->dev; 3442 + struct cfg80211_chan_def *chandef = &phy->mt76->chandef; 3443 + u16 total = 2, idx, center_freq = chandef->center_freq1; 3444 + u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data; 3445 + 3446 + if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD)) 3447 + return 0; 3448 + 3449 + idx = mt7915_dpd_freq_idx(center_freq, chandef->width); 3450 + if (idx < 0) 3451 + return -EINVAL; 3452 + 3453 + /* Items: Tx DPD, Tx Flatness */ 3454 + idx = idx * 2; 3455 + cal += MT_EE_CAL_GROUP_SIZE; 3456 + 3457 + while (total--) { 3458 + int ret; 3459 + 3460 + cal += (idx * MT_EE_CAL_UNIT); 3461 + ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT, 3462 + MCU_EXT_CMD(DPD_PRE_CAL_INFO)); 3463 + if (ret) 3464 + return ret; 3465 + 3466 + idx++; 3467 + } 3468 + 3469 + return 0; 3470 + } 3471 + 3330 3472 int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index) 3331 3473 { 3332 3474 struct { ··· 3503 3361 sizeof(req), false); 3504 3362 } 3505 3363 3506 - int mt7915_mcu_set_sku(struct mt7915_phy *phy) 3364 + int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy) 3507 3365 { 3366 + #define MT7915_SKU_RATE_NUM 161 3508 3367 struct mt7915_dev *dev = phy->dev; 3509 3368 struct mt76_phy *mphy = phy->mt76; 3510 3369 struct ieee80211_hw *hw = mphy->hw; ··· 3518 3375 .format_id = 4, 3519 3376 .dbdc_idx = phy != &dev->phy, 3520 3377 }; 3521 - int i; 3522 - s8 *delta; 3378 + struct mt76_power_limits limits_array; 3379 + s8 *la = (s8 *)&limits_array; 3380 + int i, idx, n_chains = hweight8(mphy->antenna_mask); 3381 + int tx_power; 3523 3382 3524 - delta = dev->rate_power[mphy->chandef.chan->band]; 3525 - mphy->txpower_cur = hw->conf.power_level * 2 + 3526 - delta[MT7915_SKU_MAX_DELTA_IDX]; 3383 + tx_power = hw->conf.power_level * 2 - 3384 + mt76_tx_power_nss_delta(n_chains); 3527 3385 3528 - for (i = 0; i < MT7915_SKU_RATE_NUM; i++) 3529 - req.val[i] = hw->conf.power_level * 2 + delta[i]; 3386 + tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan, 3387 + &limits_array, tx_power); 3388 + mphy->txpower_cur = tx_power; 3389 + 3390 + for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) { 3391 + u8 mcs_num, len = mt7915_sku_group_len[i]; 3392 + int j; 3393 + 3394 + if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) { 3395 + mcs_num = 10; 3396 + 3397 + if (i == SKU_HT_BW20 || i == SKU_VHT_BW20) 3398 + la = (s8 *)&limits_array + 12; 3399 + } else { 3400 + mcs_num = len; 3401 + } 3402 + 3403 + for (j = 0; j < min_t(u8, mcs_num, len); j++) 3404 + req.val[idx + j] = la[j]; 3405 + 3406 + la += mcs_num; 3407 + idx += len; 3408 + } 3530 3409 3531 3410 return mt76_mcu_send_msg(&dev->mt76, 3532 3411 MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+2
drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
··· 284 284 MCU_EXT_CMD_FW_DBG_CTRL = 0x95, 285 285 MCU_EXT_CMD_SET_RDD_TH = 0x9d, 286 286 MCU_EXT_CMD_SET_SPR = 0xa8, 287 + MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab, 288 + MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac, 287 289 MCU_EXT_CMD_PHY_STAT_INFO = 0xad, 288 290 }; 289 291
+6 -13
drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
··· 32 32 33 33 #define MT7915_EEPROM_SIZE 3584 34 34 #define MT7915_TOKEN_SIZE 8192 35 - #define MT7915_TOKEN_FREE_THR 64 36 35 37 36 #define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ 38 37 #define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ 39 38 #define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */ 40 39 #define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */ 41 - 42 - #define MT7915_SKU_RATE_NUM 161 43 - #define MT7915_SKU_MAX_DELTA_IDX MT7915_SKU_RATE_NUM 44 - #define MT7915_SKU_TABLE_SIZE (MT7915_SKU_RATE_NUM + 1) 45 40 46 41 struct mt7915_vif; 47 42 struct mt7915_sta; ··· 186 191 187 192 u32 hw_pattern; 188 193 189 - spinlock_t token_lock; 190 - int token_count; 191 - struct idr token; 192 - 193 - s8 **rate_power; /* TODO: use mt76_rate_power */ 194 - 195 194 bool dbdc_support; 196 195 bool flash_mode; 197 196 bool fw_debug; 198 197 bool ibf; 198 + 199 + void *cal; 199 200 }; 200 201 201 202 enum { ··· 291 300 int mt7915_eeprom_get_target_power(struct mt7915_dev *dev, 292 301 struct ieee80211_channel *chan, 293 302 u8 chain_idx); 294 - void mt7915_eeprom_init_sku(struct mt7915_dev *dev); 303 + s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band); 295 304 int mt7915_dma_init(struct mt7915_dev *dev); 296 305 void mt7915_dma_prefetch(struct mt7915_dev *dev); 297 306 void mt7915_dma_cleanup(struct mt7915_dev *dev); ··· 341 350 int mt7915_mcu_set_rts_thresh(struct mt7915_phy *phy, u32 val); 342 351 int mt7915_mcu_set_pm(struct mt7915_dev *dev, int band, int enter); 343 352 int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable); 344 - int mt7915_mcu_set_sku(struct mt7915_phy *phy); 353 + int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy); 345 354 int mt7915_mcu_set_txbf_type(struct mt7915_dev *dev); 346 355 int mt7915_mcu_set_txbf_module(struct mt7915_dev *dev); 347 356 int mt7915_mcu_set_txbf_sounding(struct mt7915_dev *dev); ··· 350 359 const struct mt7915_dfs_pulse *pulse); 351 360 int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index, 352 361 const struct mt7915_dfs_pattern *pattern); 362 + int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev); 363 + int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy); 353 364 int mt7915_mcu_get_temperature(struct mt7915_dev *dev, int index); 354 365 int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx); 355 366 int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+1 -25
drivers/net/wireless/mediatek/mt76/mt7915/pci.c
··· 154 154 return IRQ_HANDLED; 155 155 } 156 156 157 - static int 158 - mt7915_alloc_device(struct pci_dev *pdev, struct mt7915_dev *dev) 159 - { 160 - #define NUM_BANDS 2 161 - int i; 162 - s8 **sku; 163 - 164 - sku = devm_kzalloc(&pdev->dev, NUM_BANDS * sizeof(*sku), GFP_KERNEL); 165 - if (!sku) 166 - return -ENOMEM; 167 - 168 - for (i = 0; i < NUM_BANDS; i++) { 169 - sku[i] = devm_kzalloc(&pdev->dev, MT7915_SKU_TABLE_SIZE * 170 - sizeof(**sku), GFP_KERNEL); 171 - if (!sku[i]) 172 - return -ENOMEM; 173 - } 174 - dev->rate_power = sku; 175 - 176 - return 0; 177 - } 178 - 179 157 static void mt7915_pci_init_hif2(struct mt7915_dev *dev) 180 158 { 181 159 struct mt7915_hif *hif; ··· 212 234 .survey_flags = SURVEY_INFO_TIME_TX | 213 235 SURVEY_INFO_TIME_RX | 214 236 SURVEY_INFO_TIME_BSS_RX, 237 + .token_size = MT7915_TOKEN_SIZE, 215 238 .tx_prepare_skb = mt7915_tx_prepare_skb, 216 239 .tx_complete_skb = mt7915_tx_complete_skb, 217 240 .rx_skb = mt7915_queue_rx_skb, ··· 249 270 return -ENOMEM; 250 271 251 272 dev = container_of(mdev, struct mt7915_dev, mt76); 252 - ret = mt7915_alloc_device(pdev, dev); 253 - if (ret) 254 - goto error; 255 273 256 274 ret = mt7915_mmio_init(mdev, pcim_iomap_table(pdev)[0], pdev->irq); 257 275 if (ret)
+5
drivers/net/wireless/mediatek/mt76/mt7915/regs.h
··· 82 82 #define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17) 83 83 #define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18) 84 84 85 + #define MT_TMAC_FP0R0(_band) MT_WF_TMAC(_band, 0x020) 86 + #define MT_TMAC_FP0R15(_band) MT_WF_TMAC(_band, 0x080) 87 + #define MT_TMAC_FP0R18(_band) MT_WF_TMAC(_band, 0x270) 88 + #define MT_TMAC_FP_MASK GENMASK(7, 0) 89 + 85 90 #define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0) 86 91 87 92 #define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+5 -17
drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
··· 257 257 { 258 258 struct mt76_phy *mphy = phy->mt76; 259 259 struct mt76_testmode_data *td = &mphy->test; 260 - struct sk_buff *old = td->tx_skb, *new; 261 260 struct ieee80211_supported_band *sband; 262 261 struct rate_info rate = {}; 263 262 u16 flags = 0, tx_len; 264 263 u32 bitrate; 264 + int ret; 265 265 266 - if (!tx_time || !old) 266 + if (!tx_time) 267 267 return 0; 268 268 269 269 rate.mcs = td->tx_rate_idx; ··· 323 323 bitrate = cfg80211_calculate_bitrate(&rate); 324 324 tx_len = bitrate * tx_time / 10 / 8; 325 325 326 - if (tx_len < sizeof(struct ieee80211_hdr)) 327 - tx_len = sizeof(struct ieee80211_hdr); 328 - else if (tx_len > IEEE80211_MAX_FRAME_LEN) 329 - tx_len = IEEE80211_MAX_FRAME_LEN; 330 - 331 - new = alloc_skb(tx_len, GFP_KERNEL); 332 - if (!new) 333 - return -ENOMEM; 334 - 335 - skb_copy_header(new, old); 336 - __skb_put_zero(new, tx_len); 337 - memcpy(new->data, old->data, sizeof(struct ieee80211_hdr)); 338 - 339 - dev_kfree_skb(old); 340 - td->tx_skb = new; 326 + ret = mt76_testmode_alloc_skb(phy->mt76, tx_len); 327 + if (ret) 328 + return ret; 341 329 342 330 return 0; 343 331 }
+133 -9
drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
··· 9 9 { 10 10 struct mt7921_dev *dev = data; 11 11 12 - dev->fw_debug = (u8)val; 12 + mt7921_mutex_acquire(dev); 13 13 14 + dev->fw_debug = (u8)val; 14 15 mt7921_mcu_fw_log_2_host(dev, dev->fw_debug); 16 + 17 + mt7921_mutex_release(dev); 15 18 16 19 return 0; 17 20 } ··· 149 146 return 0; 150 147 } 151 148 149 + static void 150 + mt7921_seq_puts_array(struct seq_file *file, const char *str, 151 + s8 *val, int len) 152 + { 153 + int i; 154 + 155 + seq_printf(file, "%-16s:", str); 156 + for (i = 0; i < len; i++) 157 + if (val[i] == 127) 158 + seq_printf(file, " %6s", "N.A"); 159 + else 160 + seq_printf(file, " %6d", val[i]); 161 + seq_puts(file, "\n"); 162 + } 163 + 164 + #define mt7921_print_txpwr_entry(prefix, rate) \ 165 + ({ \ 166 + mt7921_seq_puts_array(s, #prefix " (user)", \ 167 + txpwr.data[TXPWR_USER].rate, \ 168 + ARRAY_SIZE(txpwr.data[TXPWR_USER].rate)); \ 169 + mt7921_seq_puts_array(s, #prefix " (eeprom)", \ 170 + txpwr.data[TXPWR_EEPROM].rate, \ 171 + ARRAY_SIZE(txpwr.data[TXPWR_EEPROM].rate)); \ 172 + mt7921_seq_puts_array(s, #prefix " (tmac)", \ 173 + txpwr.data[TXPWR_MAC].rate, \ 174 + ARRAY_SIZE(txpwr.data[TXPWR_MAC].rate)); \ 175 + }) 176 + 177 + static int 178 + mt7921_txpwr(struct seq_file *s, void *data) 179 + { 180 + struct mt7921_dev *dev = dev_get_drvdata(s->private); 181 + struct mt7921_txpwr txpwr; 182 + int ret; 183 + 184 + ret = mt7921_get_txpwr_info(dev, &txpwr); 185 + if (ret) 186 + return ret; 187 + 188 + seq_printf(s, "Tx power table (channel %d)\n", txpwr.ch); 189 + seq_printf(s, "%-16s %6s %6s %6s %6s\n", 190 + " ", "1m", "2m", "5m", "11m"); 191 + mt7921_print_txpwr_entry(CCK, cck); 192 + 193 + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n", 194 + " ", "6m", "9m", "12m", "18m", "24m", "36m", 195 + "48m", "54m"); 196 + mt7921_print_txpwr_entry(OFDM, ofdm); 197 + 198 + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n", 199 + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", 200 + "mcs6", "mcs7"); 201 + mt7921_print_txpwr_entry(HT20, ht20); 202 + 203 + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", 204 + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", 205 + "mcs6", "mcs7", "mcs32"); 206 + mt7921_print_txpwr_entry(HT40, ht40); 207 + 208 + seq_printf(s, "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n", 209 + " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5", 210 + "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11"); 211 + mt7921_print_txpwr_entry(VHT20, vht20); 212 + mt7921_print_txpwr_entry(VHT40, vht40); 213 + mt7921_print_txpwr_entry(VHT80, vht80); 214 + mt7921_print_txpwr_entry(VHT160, vht160); 215 + mt7921_print_txpwr_entry(HE26, he26); 216 + mt7921_print_txpwr_entry(HE52, he52); 217 + mt7921_print_txpwr_entry(HE106, he106); 218 + mt7921_print_txpwr_entry(HE242, he242); 219 + mt7921_print_txpwr_entry(HE484, he484); 220 + mt7921_print_txpwr_entry(HE996, he996); 221 + mt7921_print_txpwr_entry(HE996x2, he996x2); 222 + 223 + return 0; 224 + } 225 + 152 226 static int 153 227 mt7921_pm_set(void *data, u64 val) 154 228 { 155 229 struct mt7921_dev *dev = data; 230 + struct mt76_connac_pm *pm = &dev->pm; 156 231 struct mt76_phy *mphy = dev->phy.mt76; 232 + 233 + if (val == pm->enable) 234 + return 0; 157 235 158 236 mt7921_mutex_acquire(dev); 159 237 160 - dev->pm.enable = val; 238 + if (!pm->enable) { 239 + pm->stats.last_wake_event = jiffies; 240 + pm->stats.last_doze_event = jiffies; 241 + } 242 + pm->enable = val; 161 243 162 244 ieee80211_iterate_active_interfaces(mphy->hw, 163 245 IEEE80211_IFACE_ITER_RESUME_ALL, ··· 263 175 } 264 176 265 177 DEFINE_DEBUGFS_ATTRIBUTE(fops_pm, mt7921_pm_get, mt7921_pm_set, "%lld\n"); 178 + 179 + static int 180 + mt7921_pm_stats(struct seq_file *s, void *data) 181 + { 182 + struct mt7921_dev *dev = dev_get_drvdata(s->private); 183 + struct mt76_connac_pm *pm = &dev->pm; 184 + 185 + unsigned long awake_time = pm->stats.awake_time; 186 + unsigned long doze_time = pm->stats.doze_time; 187 + 188 + if (!test_bit(MT76_STATE_PM, &dev->mphy.state)) 189 + awake_time += jiffies - pm->stats.last_wake_event; 190 + else 191 + doze_time += jiffies - pm->stats.last_doze_event; 192 + 193 + seq_printf(s, "awake time: %14u\ndoze time: %15u\n", 194 + jiffies_to_msecs(awake_time), 195 + jiffies_to_msecs(doze_time)); 196 + 197 + seq_printf(s, "low power wakes: %9d\n", pm->stats.lp_wake); 198 + 199 + return 0; 200 + } 266 201 267 202 static int 268 203 mt7921_pm_idle_timeout_set(void *data, u64 val) ··· 310 199 DEFINE_DEBUGFS_ATTRIBUTE(fops_pm_idle_timeout, mt7921_pm_idle_timeout_get, 311 200 mt7921_pm_idle_timeout_set, "%lld\n"); 312 201 313 - static int mt7921_config(void *data, u64 val) 202 + static int mt7921_chip_reset(void *data, u64 val) 314 203 { 315 204 struct mt7921_dev *dev = data; 316 - int ret; 205 + int ret = 0; 317 206 318 - mt7921_mutex_acquire(dev); 319 - ret = mt76_connac_mcu_chip_config(&dev->mt76); 320 - mt7921_mutex_release(dev); 207 + switch (val) { 208 + case 1: 209 + /* Reset wifisys directly. */ 210 + mt7921_reset(&dev->mt76); 211 + break; 212 + default: 213 + /* Collect the core dump before reset wifisys. */ 214 + mt7921_mutex_acquire(dev); 215 + ret = mt76_connac_mcu_chip_config(&dev->mt76); 216 + mt7921_mutex_release(dev); 217 + break; 218 + } 321 219 322 220 return ret; 323 221 } 324 222 325 - DEFINE_DEBUGFS_ATTRIBUTE(fops_config, NULL, mt7921_config, "%lld\n"); 223 + DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n"); 326 224 327 225 int mt7921_init_debugfs(struct mt7921_dev *dev) 328 226 { ··· 345 225 mt7921_queues_read); 346 226 debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir, 347 227 mt7921_queues_acq); 228 + debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir, 229 + mt7921_txpwr); 348 230 debugfs_create_file("tx_stats", 0400, dir, dev, &mt7921_tx_stats_fops); 349 231 debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug); 350 232 debugfs_create_file("runtime-pm", 0600, dir, dev, &fops_pm); 351 233 debugfs_create_file("idle-timeout", 0600, dir, dev, 352 234 &fops_pm_idle_timeout); 353 - debugfs_create_file("chip_config", 0600, dir, dev, &fops_config); 235 + debugfs_create_file("chip_reset", 0600, dir, dev, &fops_reset); 236 + debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir, 237 + mt7921_pm_stats); 354 238 355 239 return 0; 356 240 }
+182 -58
drivers/net/wireless/mediatek/mt76/mt7921/dma.c
··· 53 53 } 54 54 } 55 55 56 - static void 57 - mt7921_tx_cleanup(struct mt7921_dev *dev) 56 + void mt7921_tx_cleanup(struct mt7921_dev *dev) 58 57 { 59 58 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false); 60 59 mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false); ··· 65 66 66 67 dev = container_of(napi, struct mt7921_dev, mt76.tx_napi); 67 68 68 - mt7921_tx_cleanup(dev); 69 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 70 + napi_complete(napi); 71 + queue_work(dev->mt76.wq, &dev->pm.wake_work); 72 + return 0; 73 + } 69 74 70 - if (napi_complete_done(napi, 0)) 75 + mt7921_tx_cleanup(dev); 76 + if (napi_complete(napi)) 71 77 mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL); 78 + mt76_connac_pm_unref(&dev->pm); 72 79 73 80 return 0; 74 81 } 75 82 76 - void mt7921_dma_prefetch(struct mt7921_dev *dev) 83 + static int mt7921_poll_rx(struct napi_struct *napi, int budget) 84 + { 85 + struct mt7921_dev *dev; 86 + int done; 87 + 88 + dev = container_of(napi->dev, struct mt7921_dev, mt76.napi_dev); 89 + 90 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 91 + napi_complete(napi); 92 + queue_work(dev->mt76.wq, &dev->pm.wake_work); 93 + return 0; 94 + } 95 + done = mt76_dma_rx_poll(napi, budget); 96 + mt76_connac_pm_unref(&dev->pm); 97 + 98 + return done; 99 + } 100 + 101 + static void mt7921_dma_prefetch(struct mt7921_dev *dev) 77 102 { 78 103 #define PREFETCH(base, depth) ((base) << 16 | (depth)) 79 104 ··· 221 198 return dev->bus_ops->rmw(mdev, addr, mask, val); 222 199 } 223 200 224 - static int mt7921_dmashdl_disabled(struct mt7921_dev *dev) 201 + static int mt7921_dma_disable(struct mt7921_dev *dev, bool force) 225 202 { 226 - mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); 203 + if (force) { 204 + /* reset */ 205 + mt76_clear(dev, MT_WFDMA0_RST, 206 + MT_WFDMA0_RST_DMASHDL_ALL_RST | 207 + MT_WFDMA0_RST_LOGIC_RST); 208 + 209 + mt76_set(dev, MT_WFDMA0_RST, 210 + MT_WFDMA0_RST_DMASHDL_ALL_RST | 211 + MT_WFDMA0_RST_LOGIC_RST); 212 + } 213 + 214 + /* disable dmashdl */ 215 + mt76_clear(dev, MT_WFDMA0_GLO_CFG_EXT0, 216 + MT_WFDMA0_CSR_TX_DMASHDL_ENABLE); 227 217 mt76_set(dev, MT_DMASHDL_SW_CONTROL, MT_DMASHDL_DMASHDL_BYPASS); 218 + 219 + /* disable WFDMA0 */ 220 + mt76_clear(dev, MT_WFDMA0_GLO_CFG, 221 + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | 222 + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 223 + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 224 + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 225 + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 226 + 227 + if (!mt76_poll(dev, MT_WFDMA0_GLO_CFG, 228 + MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | 229 + MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000)) 230 + return -ETIMEDOUT; 231 + 232 + return 0; 233 + } 234 + 235 + static int mt7921_dma_enable(struct mt7921_dev *dev) 236 + { 237 + /* configure perfetch settings */ 238 + mt7921_dma_prefetch(dev); 239 + 240 + /* reset dma idx */ 241 + mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 242 + 243 + /* configure delay interrupt */ 244 + mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 245 + 246 + mt76_set(dev, MT_WFDMA0_GLO_CFG, 247 + MT_WFDMA0_GLO_CFG_TX_WB_DDONE | 248 + MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | 249 + MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | 250 + MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 251 + MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 252 + MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 253 + 254 + mt76_set(dev, MT_WFDMA0_GLO_CFG, 255 + MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 256 + 257 + mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); 258 + 259 + /* enable interrupts for TX/RX rings */ 260 + mt7921_irq_enable(dev, 261 + MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 262 + MT_INT_MCU_CMD); 263 + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); 264 + 265 + return 0; 266 + } 267 + 268 + static int mt7921_dma_reset(struct mt7921_dev *dev, bool force) 269 + { 270 + int i, err; 271 + 272 + err = mt7921_dma_disable(dev, force); 273 + if (err) 274 + return err; 275 + 276 + /* reset hw queues */ 277 + for (i = 0; i < __MT_TXQ_MAX; i++) 278 + mt76_queue_reset(dev, dev->mphy.q_tx[i]); 279 + 280 + for (i = 0; i < __MT_MCUQ_MAX; i++) 281 + mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 282 + 283 + mt76_for_each_q_rx(&dev->mt76, i) 284 + mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 285 + 286 + mt76_tx_status_check(&dev->mt76, NULL, true); 287 + 288 + return mt7921_dma_enable(dev); 289 + } 290 + 291 + int mt7921_wfsys_reset(struct mt7921_dev *dev) 292 + { 293 + mt76_set(dev, 0x70002600, BIT(0)); 294 + msleep(200); 295 + mt76_clear(dev, 0x70002600, BIT(0)); 296 + 297 + if (!__mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B, 298 + WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500)) 299 + return -ETIMEDOUT; 300 + 301 + return 0; 302 + } 303 + 304 + int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force) 305 + { 306 + int i, err; 307 + 308 + /* clean up hw queues */ 309 + for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) 310 + mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 311 + 312 + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) 313 + mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 314 + 315 + mt76_for_each_q_rx(&dev->mt76, i) 316 + mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 317 + 318 + if (force) { 319 + err = mt7921_wfsys_reset(dev); 320 + if (err) 321 + return err; 322 + } 323 + err = mt7921_dma_reset(dev, force); 324 + if (err) 325 + return err; 326 + 327 + mt76_for_each_q_rx(&dev->mt76, i) 328 + mt76_queue_rx_reset(dev, i); 329 + 330 + return 0; 331 + } 332 + 333 + int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev) 334 + { 335 + struct mt76_connac_pm *pm = &dev->pm; 336 + int err; 337 + 338 + /* check if the wpdma must be reinitialized */ 339 + if (mt7921_dma_need_reinit(dev)) { 340 + /* disable interrutpts */ 341 + mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); 342 + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 343 + 344 + err = mt7921_wpdma_reset(dev, false); 345 + if (err) { 346 + dev_err(dev->mt76.dev, "wpdma reset failed\n"); 347 + return err; 348 + } 349 + 350 + /* enable interrutpts */ 351 + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 352 + pm->stats.lp_wake++; 353 + } 228 354 229 355 return 0; 230 356 } ··· 398 226 399 227 mt76_dma_attach(&dev->mt76); 400 228 401 - /* reset */ 402 - mt76_clear(dev, MT_WFDMA0_RST, 403 - MT_WFDMA0_RST_DMASHDL_ALL_RST | 404 - MT_WFDMA0_RST_LOGIC_RST); 405 - 406 - mt76_set(dev, MT_WFDMA0_RST, 407 - MT_WFDMA0_RST_DMASHDL_ALL_RST | 408 - MT_WFDMA0_RST_LOGIC_RST); 409 - 410 - ret = mt7921_dmashdl_disabled(dev); 229 + ret = mt7921_dma_disable(dev, true); 411 230 if (ret) 412 231 return ret; 413 - 414 - /* disable WFDMA0 */ 415 - mt76_clear(dev, MT_WFDMA0_GLO_CFG, 416 - MT_WFDMA0_GLO_CFG_TX_DMA_EN | 417 - MT_WFDMA0_GLO_CFG_RX_DMA_EN | 418 - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 419 - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 420 - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 421 - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 422 - 423 - mt76_poll(dev, MT_WFDMA0_GLO_CFG, 424 - MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | 425 - MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000); 426 232 427 233 /* init tx queue */ 428 234 ret = mt7921_init_tx_queues(&dev->phy, MT7921_TXQ_BAND0, ··· 445 295 if (ret) 446 296 return ret; 447 297 448 - ret = mt76_init_queues(dev); 298 + ret = mt76_init_queues(dev, mt7921_poll_rx); 449 299 if (ret < 0) 450 300 return ret; 451 301 ··· 453 303 mt7921_poll_tx, NAPI_POLL_WEIGHT); 454 304 napi_enable(&dev->mt76.tx_napi); 455 305 456 - /* configure perfetch settings */ 457 - mt7921_dma_prefetch(dev); 458 - 459 - /* reset dma idx */ 460 - mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 461 - 462 - /* configure delay interrupt */ 463 - mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 464 - 465 - mt76_set(dev, MT_WFDMA0_GLO_CFG, 466 - MT_WFDMA0_GLO_CFG_TX_WB_DDONE | 467 - MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | 468 - MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | 469 - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 470 - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 471 - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 472 - 473 - mt76_set(dev, MT_WFDMA0_GLO_CFG, 474 - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 475 - 476 - mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); 477 - 478 - /* enable interrupts for TX/RX rings */ 479 - mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 480 - MT_INT_MCU_CMD); 481 - 482 - return 0; 306 + return mt7921_dma_enable(dev); 483 307 } 484 308 485 309 void mt7921_dma_cleanup(struct mt7921_dev *dev)
+18 -18
drivers/net/wireless/mediatek/mt76/mt7921/init.c
··· 58 58 { 59 59 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 60 60 struct mt7921_dev *dev = mt7921_hw_dev(hw); 61 + struct mt7921_phy *phy = mt7921_hw_phy(hw); 61 62 62 63 memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2)); 63 64 dev->mt76.region = request->dfs_region; 64 65 65 66 mt7921_mutex_acquire(dev); 66 67 mt76_connac_mcu_set_channel_domain(hw->priv); 68 + mt76_connac_mcu_set_rate_txpower(phy->mt76); 67 69 mt7921_mutex_release(dev); 68 70 } 69 71 ··· 166 164 mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0); 167 165 } 168 166 169 - static void mt7921_init_work(struct work_struct *work) 170 - { 171 - struct mt7921_dev *dev = container_of(work, struct mt7921_dev, 172 - init_work); 173 - 174 - mt7921_mcu_set_eeprom(dev); 175 - mt7921_mac_init(dev); 176 - } 177 - 178 167 static int mt7921_init_hardware(struct mt7921_dev *dev) 179 168 { 180 169 int ret, idx; 181 - 182 - INIT_WORK(&dev->init_work, mt7921_init_work); 183 - spin_lock_init(&dev->token_lock); 184 - idr_init(&dev->token); 185 170 186 171 ret = mt7921_dma_init(dev); 187 172 if (ret) ··· 189 200 if (ret < 0) 190 201 return ret; 191 202 203 + ret = mt7921_mcu_set_eeprom(dev); 204 + if (ret) 205 + return ret; 206 + 192 207 /* Beacon and mgmt frames should occupy wcid 0 */ 193 208 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); 194 209 if (idx) ··· 202 209 dev->mt76.global_wcid.hw_key_idx = -1; 203 210 dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET; 204 211 rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid); 212 + 213 + mt7921_mac_init(dev); 205 214 206 215 return 0; 207 216 } ··· 216 221 dev->phy.dev = dev; 217 222 dev->phy.mt76 = &dev->mt76.phy; 218 223 dev->mt76.phy.priv = &dev->phy; 224 + dev->mt76.tx_worker.fn = mt7921_tx_worker; 219 225 220 226 INIT_DELAYED_WORK(&dev->pm.ps_work, mt7921_pm_power_save_work); 221 227 INIT_WORK(&dev->pm.wake_work, mt7921_pm_wake_work); 222 - init_completion(&dev->pm.wake_cmpl); 228 + spin_lock_init(&dev->pm.wake.lock); 229 + mutex_init(&dev->pm.mutex); 230 + init_waitqueue_head(&dev->pm.wait); 223 231 spin_lock_init(&dev->pm.txq_lock); 224 232 set_bit(MT76_STATE_PM, &dev->mphy.state); 225 233 INIT_LIST_HEAD(&dev->phy.stats_list); ··· 236 238 237 239 INIT_WORK(&dev->reset_work, mt7921_mac_reset_work); 238 240 241 + dev->pm.idle_timeout = MT7921_PM_TIMEOUT; 242 + dev->pm.stats.last_wake_event = jiffies; 243 + dev->pm.stats.last_doze_event = jiffies; 244 + 239 245 ret = mt7921_init_hardware(dev); 240 246 if (ret) 241 247 return ret; 242 248 243 249 mt7921_init_wiphy(hw); 244 - dev->pm.idle_timeout = MT7921_PM_TIMEOUT; 245 250 dev->mphy.sband_2g.sband.ht_cap.cap |= 246 251 IEEE80211_HT_CAP_LDPC_CODING | 247 252 IEEE80211_HT_CAP_MAX_AMSDU; ··· 265 264 if (ret) 266 265 return ret; 267 266 268 - ieee80211_queue_work(mt76_hw(dev), &dev->init_work); 269 - 270 267 return mt7921_init_debugfs(dev); 271 268 } 272 269 273 270 void mt7921_unregister_device(struct mt7921_dev *dev) 274 271 { 275 272 mt76_unregister_device(&dev->mt76); 276 - mt7921_mcu_exit(dev); 277 273 mt7921_tx_token_put(dev); 274 + mt7921_dma_cleanup(dev); 275 + mt7921_mcu_exit(dev); 278 276 279 277 tasklet_disable(&dev->irq_tasklet); 280 278 mt76_free_device(&dev->mt76);
+40 -153
drivers/net/wireless/mediatek/mt76/mt7921/mac.c
··· 785 785 } 786 786 } 787 787 788 - static void mt7921_set_tx_blocked(struct mt7921_dev *dev, bool blocked) 789 - { 790 - struct mt76_phy *mphy = &dev->mphy; 791 - struct mt76_queue *q; 792 - 793 - q = mphy->q_tx[0]; 794 - if (blocked == q->blocked) 795 - return; 796 - 797 - q->blocked = blocked; 798 - if (!blocked) 799 - mt76_worker_schedule(&dev->mt76.tx_worker); 800 - } 801 - 802 788 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 803 789 enum mt76_txq_id qid, struct mt76_wcid *wcid, 804 790 struct ieee80211_sta *sta, ··· 810 824 t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size); 811 825 t->skb = tx_info->skb; 812 826 813 - spin_lock_bh(&dev->token_lock); 814 - id = idr_alloc(&dev->token, t, 0, MT7921_TOKEN_SIZE, GFP_ATOMIC); 815 - if (id >= 0) 816 - dev->token_count++; 817 - 818 - if (dev->token_count >= MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR) 819 - mt7921_set_tx_blocked(dev, true); 820 - spin_unlock_bh(&dev->token_lock); 821 - 827 + id = mt76_token_consume(mdev, &t); 822 828 if (id < 0) 823 829 return id; 824 830 ··· 972 994 msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info); 973 995 stat = FIELD_GET(MT_TX_FREE_STATUS, info); 974 996 975 - spin_lock_bh(&dev->token_lock); 976 - txwi = idr_remove(&dev->token, msdu); 977 - if (txwi) 978 - dev->token_count--; 979 - if (dev->token_count < MT7921_TOKEN_SIZE - MT7921_TOKEN_FREE_THR && 980 - dev->mphy.q_tx[0]->blocked) 981 - wake = true; 982 - spin_unlock_bh(&dev->token_lock); 983 - 997 + txwi = mt76_token_release(mdev, msdu, &wake); 984 998 if (!txwi) 985 999 continue; 986 1000 ··· 1000 1030 mt76_put_txwi(mdev, txwi); 1001 1031 } 1002 1032 1003 - if (wake) { 1004 - spin_lock_bh(&dev->token_lock); 1005 - mt7921_set_tx_blocked(dev, false); 1006 - spin_unlock_bh(&dev->token_lock); 1007 - } 1033 + if (wake) 1034 + mt76_set_tx_blocked(&dev->mt76, false); 1008 1035 1009 1036 napi_consume_skb(skb, 1); 1010 1037 ··· 1010 1043 napi_consume_skb(skb, 1); 1011 1044 } 1012 1045 1013 - if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state)) 1014 - return; 1015 - 1016 1046 mt7921_mac_sta_poll(dev); 1017 - 1018 - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1019 - 1020 1047 mt76_worker_schedule(&dev->mt76.tx_worker); 1021 1048 } 1022 1049 ··· 1032 1071 u16 token; 1033 1072 1034 1073 txp = mt7921_txwi_to_txp(mdev, e->txwi); 1035 - 1036 1074 token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID; 1037 - spin_lock_bh(&dev->token_lock); 1038 - t = idr_remove(&dev->token, token); 1039 - spin_unlock_bh(&dev->token_lock); 1075 + t = mt76_token_put(mdev, token); 1040 1076 e->skb = t ? t->skb : NULL; 1041 1077 } 1042 1078 ··· 1168 1210 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 1169 1211 } 1170 1212 1171 - int mt7921_wfsys_reset(struct mt7921_dev *dev) 1172 - { 1173 - mt76_set(dev, 0x70002600, BIT(0)); 1174 - msleep(200); 1175 - mt76_clear(dev, 0x70002600, BIT(0)); 1176 - 1177 - return __mt76_poll_msec(&dev->mt76, MT_WFSYS_SW_RST_B, 1178 - WFSYS_SW_INIT_DONE, WFSYS_SW_INIT_DONE, 500); 1179 - } 1180 - 1181 - static void 1182 - mt7921_dma_reset(struct mt7921_dev *dev) 1183 - { 1184 - int i; 1185 - 1186 - /* reset */ 1187 - mt76_clear(dev, MT_WFDMA0_RST, 1188 - MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST); 1189 - 1190 - mt76_set(dev, MT_WFDMA0_RST, 1191 - MT_WFDMA0_RST_DMASHDL_ALL_RST | MT_WFDMA0_RST_LOGIC_RST); 1192 - 1193 - /* disable WFDMA0 */ 1194 - mt76_clear(dev, MT_WFDMA0_GLO_CFG, 1195 - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN | 1196 - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 1197 - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 1198 - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO | 1199 - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 1200 - 1201 - mt76_poll(dev, MT_WFDMA0_GLO_CFG, 1202 - MT_WFDMA0_GLO_CFG_TX_DMA_BUSY | 1203 - MT_WFDMA0_GLO_CFG_RX_DMA_BUSY, 0, 1000); 1204 - 1205 - /* reset hw queues */ 1206 - for (i = 0; i < __MT_TXQ_MAX; i++) 1207 - mt76_queue_reset(dev, dev->mphy.q_tx[i]); 1208 - 1209 - for (i = 0; i < __MT_MCUQ_MAX; i++) 1210 - mt76_queue_reset(dev, dev->mt76.q_mcu[i]); 1211 - 1212 - mt76_for_each_q_rx(&dev->mt76, i) 1213 - mt76_queue_reset(dev, &dev->mt76.q_rx[i]); 1214 - 1215 - /* configure perfetch settings */ 1216 - mt7921_dma_prefetch(dev); 1217 - 1218 - /* reset dma idx */ 1219 - mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0); 1220 - 1221 - /* configure delay interrupt */ 1222 - mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0); 1223 - 1224 - mt76_set(dev, MT_WFDMA0_GLO_CFG, 1225 - MT_WFDMA0_GLO_CFG_TX_WB_DDONE | 1226 - MT_WFDMA0_GLO_CFG_FIFO_LITTLE_ENDIAN | 1227 - MT_WFDMA0_GLO_CFG_CLK_GAT_DIS | 1228 - MT_WFDMA0_GLO_CFG_OMIT_TX_INFO | 1229 - MT_WFDMA0_GLO_CFG_CSR_DISP_BASE_PTR_CHAIN_EN | 1230 - MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2); 1231 - 1232 - mt76_set(dev, MT_WFDMA0_GLO_CFG, 1233 - MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN); 1234 - 1235 - mt76_set(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); 1236 - 1237 - /* enable interrupts for TX/RX rings */ 1238 - mt7921_irq_enable(dev, 1239 - MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 1240 - MT_INT_MCU_CMD); 1241 - } 1242 - 1243 1213 void mt7921_tx_token_put(struct mt7921_dev *dev) 1244 1214 { 1245 1215 struct mt76_txwi_cache *txwi; 1246 1216 int id; 1247 1217 1248 - spin_lock_bh(&dev->token_lock); 1249 - idr_for_each_entry(&dev->token, txwi, id) { 1218 + spin_lock_bh(&dev->mt76.token_lock); 1219 + idr_for_each_entry(&dev->mt76.token, txwi, id) { 1250 1220 mt7921_txp_skb_unmap(&dev->mt76, txwi); 1251 1221 if (txwi->skb) { 1252 1222 struct ieee80211_hw *hw; ··· 1183 1297 ieee80211_free_txskb(hw, txwi->skb); 1184 1298 } 1185 1299 mt76_put_txwi(&dev->mt76, txwi); 1186 - dev->token_count--; 1300 + dev->mt76.token_count--; 1187 1301 } 1188 - spin_unlock_bh(&dev->token_lock); 1189 - idr_destroy(&dev->token); 1302 + spin_unlock_bh(&dev->mt76.token_lock); 1303 + idr_destroy(&dev->mt76.token); 1190 1304 } 1191 1305 1192 1306 static void ··· 1225 1339 napi_disable(&dev->mt76.tx_napi); 1226 1340 1227 1341 mt7921_tx_token_put(dev); 1228 - idr_init(&dev->token); 1342 + idr_init(&dev->mt76.token); 1229 1343 1230 - /* clean up hw queues */ 1231 - for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) 1232 - mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true); 1233 - 1234 - for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++) 1235 - mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true); 1236 - 1237 - mt76_for_each_q_rx(&dev->mt76, i) 1238 - mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]); 1239 - 1240 - mt7921_wfsys_reset(dev); 1241 - mt7921_dma_reset(dev); 1344 + err = mt7921_wpdma_reset(dev, true); 1345 + if (err) 1346 + return err; 1242 1347 1243 1348 mt76_for_each_q_rx(&dev->mt76, i) { 1244 - mt76_queue_rx_reset(dev, i); 1245 1349 napi_enable(&dev->mt76.napi[i]); 1246 1350 napi_schedule(&dev->mt76.napi[i]); 1247 1351 } ··· 1241 1365 mt76_worker_enable(&dev->mt76.tx_worker); 1242 1366 1243 1367 clear_bit(MT76_MCU_RESET, &dev->mphy.state); 1368 + clear_bit(MT76_STATE_PM, &dev->mphy.state); 1244 1369 1245 1370 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); 1246 1371 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 1247 - mt7921_irq_enable(dev, 1248 - MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 1249 - MT_INT_MCU_CMD); 1250 1372 1251 1373 err = mt7921_run_firmware(dev); 1252 1374 if (err) ··· 1285 1411 if (i == 10) 1286 1412 dev_err(dev->mt76.dev, "chip reset failed\n"); 1287 1413 1414 + if (test_and_clear_bit(MT76_HW_SCANNING, &dev->mphy.state)) { 1415 + struct cfg80211_scan_info info = { 1416 + .aborted = true, 1417 + }; 1418 + 1419 + ieee80211_scan_completed(dev->mphy.hw, &info); 1420 + } 1421 + 1288 1422 ieee80211_wake_queues(hw); 1289 1423 ieee80211_iterate_active_interfaces(hw, 1290 1424 IEEE80211_IFACE_ITER_RESUME_ALL, 1291 - mt7921_vif_connect_iter, 0); 1425 + mt7921_vif_connect_iter, NULL); 1292 1426 } 1293 1427 1294 1428 void mt7921_reset(struct mt76_dev *mdev) ··· 1370 1488 mac_work.work); 1371 1489 phy = mphy->priv; 1372 1490 1373 - if (test_bit(MT76_STATE_PM, &mphy->state)) 1374 - goto out; 1375 - 1376 1491 mt7921_mutex_acquire(phy->dev); 1377 1492 1378 1493 mt76_update_survey(mphy->dev); 1379 - if (++mphy->mac_work_count == 5) { 1494 + if (++mphy->mac_work_count == 2) { 1380 1495 mphy->mac_work_count = 0; 1381 1496 1382 1497 mt7921_mac_update_mib_stats(phy); 1383 1498 } 1384 - if (++phy->sta_work_count == 10) { 1499 + if (++phy->sta_work_count == 4) { 1385 1500 phy->sta_work_count = 0; 1386 1501 mt7921_mac_sta_stats_work(phy); 1387 1502 } 1388 1503 1389 1504 mt7921_mutex_release(phy->dev); 1390 - 1391 - out: 1392 1505 ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work, 1393 1506 MT7921_WATCHDOG_TIME); 1394 1507 } ··· 1397 1520 pm.wake_work); 1398 1521 mphy = dev->phy.mt76; 1399 1522 1400 - if (!mt7921_mcu_drv_pmctrl(dev)) 1523 + if (!mt7921_mcu_drv_pmctrl(dev)) { 1524 + int i; 1525 + 1526 + mt76_for_each_q_rx(&dev->mt76, i) 1527 + napi_schedule(&dev->mt76.napi[i]); 1401 1528 mt76_connac_pm_dequeue_skbs(mphy, &dev->pm); 1402 - else 1403 - dev_err(mphy->dev->dev, "failed to wake device\n"); 1529 + mt7921_tx_cleanup(dev); 1530 + ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, 1531 + MT7921_WATCHDOG_TIME); 1532 + } 1404 1533 1405 1534 ieee80211_wake_queues(mphy->hw); 1406 - complete_all(&dev->pm.wake_cmpl); 1535 + wake_up(&dev->pm.wait); 1407 1536 } 1408 1537 1409 1538 void mt7921_pm_power_save_work(struct work_struct *work) ··· 1421 1538 pm.ps_work.work); 1422 1539 1423 1540 delta = dev->pm.idle_timeout; 1541 + if (test_bit(MT76_HW_SCANNING, &dev->mphy.state) || 1542 + test_bit(MT76_HW_SCHED_SCANNING, &dev->mphy.state)) 1543 + goto out; 1544 + 1424 1545 if (time_is_after_jiffies(dev->pm.last_activity + delta)) { 1425 1546 delta = dev->pm.last_activity + delta - jiffies; 1426 1547 goto out;
+25 -37
drivers/net/wireless/mediatek/mt76/mt7921/main.c
··· 182 182 if (err) 183 183 return err; 184 184 185 + err = mt76_connac_mcu_set_rate_txpower(phy->mt76); 186 + if (err) 187 + return err; 188 + 185 189 mt7921_mac_reset_counters(phy); 186 190 set_bit(MT76_STATE_RUNNING, &mphy->state); 187 191 ··· 395 391 clear_bit(MT76_RESET, &phy->mt76->state); 396 392 mt7921_mutex_release(dev); 397 393 398 - mt76_txq_schedule_all(phy->mt76); 399 - 394 + mt76_worker_schedule(&dev->mt76.tx_worker); 400 395 ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mt76->mac_work, 401 396 MT7921_WATCHDOG_TIME); 402 397 ··· 622 619 if (changed & BSS_CHANGED_PS) 623 620 mt7921_mcu_uni_bss_ps(dev, vif); 624 621 625 - if (changed & BSS_CHANGED_ASSOC) 622 + if (changed & BSS_CHANGED_ASSOC) { 623 + mt7921_mcu_sta_add(dev, NULL, vif, true); 626 624 mt7921_bss_bcnft_apply(dev, vif, info->assoc); 625 + } 627 626 628 - if (changed & BSS_CHANGED_ARP_FILTER) 629 - mt7921_mcu_update_arp_filter(hw, vif, info); 627 + if (changed & BSS_CHANGED_ARP_FILTER) { 628 + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 629 + 630 + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, 631 + info); 632 + } 630 633 631 634 mt7921_mutex_release(dev); 632 635 } ··· 643 634 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 644 635 struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; 645 636 struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 646 - int rssi = -ewma_rssi_read(&mvif->rssi); 647 - struct mt76_sta_cmd_info info = { 648 - .sta = sta, 649 - .vif = vif, 650 - .enable = true, 651 - .cmd = MCU_UNI_CMD_STA_REC_UPDATE, 652 - .wcid = &msta->wcid, 653 - .rcpi = to_rcpi(rssi), 654 - }; 655 637 int ret, idx; 656 638 657 639 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1); ··· 669 669 mt7921_mac_wtbl_update(dev, idx, 670 670 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 671 671 672 - ret = mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info); 672 + ret = mt7921_mcu_sta_add(dev, sta, vif, true); 673 673 if (ret) 674 674 return ret; 675 675 ··· 683 683 { 684 684 struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76); 685 685 struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv; 686 - struct mt76_sta_cmd_info info = { 687 - .sta = sta, 688 - .vif = vif, 689 - .cmd = MCU_UNI_CMD_STA_REC_UPDATE, 690 - .wcid = &msta->wcid, 691 - }; 692 686 693 687 mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); 694 688 mt76_connac_pm_wake(&dev->mphy, &dev->pm); 695 689 696 - mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info); 697 - 690 + mt7921_mcu_sta_add(dev, sta, vif, false); 698 691 mt7921_mac_wtbl_update(dev, msta->wcid.idx, 699 692 MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 700 693 ··· 710 717 mt76_connac_power_save_sched(&dev->mphy, &dev->pm); 711 718 } 712 719 713 - static void 714 - mt7921_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 720 + void mt7921_tx_worker(struct mt76_worker *w) 715 721 { 716 - struct mt7921_dev *dev = mt7921_hw_dev(hw); 717 - struct mt7921_phy *phy = mt7921_hw_phy(hw); 718 - struct mt76_phy *mphy = phy->mt76; 722 + struct mt7921_dev *dev = container_of(w, struct mt7921_dev, 723 + mt76.tx_worker); 719 724 720 - if (!test_bit(MT76_STATE_RUNNING, &mphy->state)) 721 - return; 722 - 723 - if (test_bit(MT76_STATE_PM, &mphy->state)) { 725 + if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { 724 726 queue_work(dev->mt76.wq, &dev->pm.wake_work); 725 727 return; 726 728 } 727 729 728 - dev->pm.last_activity = jiffies; 729 - mt76_worker_schedule(&dev->mt76.tx_worker); 730 + mt76_txq_schedule_all(&dev->mphy); 731 + mt76_connac_pm_unref(&dev->pm); 730 732 } 731 733 732 734 static void mt7921_tx(struct ieee80211_hw *hw, ··· 749 761 wcid = &mvif->sta.wcid; 750 762 } 751 763 752 - if (!test_bit(MT76_STATE_PM, &mphy->state)) { 753 - dev->pm.last_activity = jiffies; 764 + if (mt76_connac_pm_ref(mphy, &dev->pm)) { 754 765 mt76_tx(mphy, control->sta, wcid, skb); 766 + mt76_connac_pm_unref(&dev->pm); 755 767 return; 756 768 } 757 769 ··· 1180 1192 .set_key = mt7921_set_key, 1181 1193 .ampdu_action = mt7921_ampdu_action, 1182 1194 .set_rts_threshold = mt7921_set_rts_threshold, 1183 - .wake_tx_queue = mt7921_wake_tx_queue, 1195 + .wake_tx_queue = mt76_wake_tx_queue, 1184 1196 .release_buffered_frames = mt76_release_buffered_frames, 1185 1197 .get_txpower = mt76_get_txpower, 1186 1198 .get_stats = mt7921_get_stats,
+76 -52
drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
··· 160 160 int ret = 0; 161 161 162 162 if (!skb) { 163 - dev_err(mdev->dev, "Message %d (seq %d) timeout\n", 163 + dev_err(mdev->dev, "Message %08x (seq %d) timeout\n", 164 164 cmd, seq); 165 + mt7921_reset(mdev); 166 + 165 167 return -ETIMEDOUT; 166 168 } 167 169 ··· 502 500 if (!msg->content[i]) 503 501 msg->content[i] = ' '; 504 502 } 505 - wiphy_info(mt76_hw(dev)->wiphy, "%*s", len, msg->content); 503 + wiphy_info(mt76_hw(dev)->wiphy, "%.*s", len, msg->content); 506 504 } 507 505 } 508 506 ··· 976 974 .mcu_skb_send_msg = mt7921_mcu_send_message, 977 975 .mcu_parse_response = mt7921_mcu_parse_response, 978 976 .mcu_restart = mt7921_mcu_restart, 979 - .mcu_reset = mt7921_reset, 980 977 }; 981 978 982 979 dev->mt76.mcu_ops = &mt7921_mcu_ops; ··· 1265 1264 sizeof(req), false); 1266 1265 } 1267 1266 1267 + int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta, 1268 + struct ieee80211_vif *vif, bool enable) 1269 + { 1270 + struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 1271 + int rssi = -ewma_rssi_read(&mvif->rssi); 1272 + struct mt76_sta_cmd_info info = { 1273 + .sta = sta, 1274 + .vif = vif, 1275 + .enable = enable, 1276 + .cmd = MCU_UNI_CMD_STA_REC_UPDATE, 1277 + .rcpi = to_rcpi(rssi), 1278 + }; 1279 + struct mt7921_sta *msta; 1280 + 1281 + msta = sta ? (struct mt7921_sta *)sta->drv_priv : NULL; 1282 + info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; 1283 + 1284 + return mt76_connac_mcu_add_sta_cmd(&dev->mphy, &info); 1285 + } 1286 + 1268 1287 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev) 1269 1288 { 1270 1289 struct mt76_phy *mphy = &dev->mt76.phy; 1271 - int i; 1290 + struct mt76_connac_pm *pm = &dev->pm; 1291 + int i, err = 0; 1272 1292 1273 - if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state)) 1293 + mutex_lock(&pm->mutex); 1294 + 1295 + if (!test_bit(MT76_STATE_PM, &mphy->state)) 1274 1296 goto out; 1275 1297 1276 1298 for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { ··· 1305 1281 1306 1282 if (i == MT7921_DRV_OWN_RETRY_COUNT) { 1307 1283 dev_err(dev->mt76.dev, "driver own failed\n"); 1308 - mt7921_reset(&dev->mt76); 1309 - return -EIO; 1284 + err = -EIO; 1285 + goto out; 1310 1286 } 1311 1287 1312 - out: 1313 - dev->pm.last_activity = jiffies; 1288 + mt7921_wpdma_reinit_cond(dev); 1289 + clear_bit(MT76_STATE_PM, &mphy->state); 1314 1290 1315 - return 0; 1291 + pm->stats.last_wake_event = jiffies; 1292 + pm->stats.doze_time += pm->stats.last_wake_event - 1293 + pm->stats.last_doze_event; 1294 + out: 1295 + mutex_unlock(&pm->mutex); 1296 + 1297 + if (err) 1298 + mt7921_reset(&dev->mt76); 1299 + 1300 + return err; 1316 1301 } 1317 1302 1318 1303 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev) 1319 1304 { 1320 1305 struct mt76_phy *mphy = &dev->mt76.phy; 1321 - int i; 1306 + struct mt76_connac_pm *pm = &dev->pm; 1307 + int i, err = 0; 1322 1308 1323 - if (test_and_set_bit(MT76_STATE_PM, &mphy->state)) 1324 - return 0; 1309 + mutex_lock(&pm->mutex); 1310 + 1311 + if (mt76_connac_skip_fw_pmctrl(mphy, pm)) 1312 + goto out; 1325 1313 1326 1314 for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) { 1327 1315 mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN); ··· 1344 1308 1345 1309 if (i == MT7921_DRV_OWN_RETRY_COUNT) { 1346 1310 dev_err(dev->mt76.dev, "firmware own failed\n"); 1347 - mt7921_reset(&dev->mt76); 1348 - return -EIO; 1311 + clear_bit(MT76_STATE_PM, &mphy->state); 1312 + err = -EIO; 1349 1313 } 1350 1314 1351 - return 0; 1315 + pm->stats.last_doze_event = jiffies; 1316 + pm->stats.awake_time += pm->stats.last_doze_event - 1317 + pm->stats.last_wake_event; 1318 + out: 1319 + mutex_unlock(&pm->mutex); 1320 + 1321 + if (err) 1322 + mt7921_reset(&dev->mt76); 1323 + 1324 + return err; 1352 1325 } 1353 1326 1354 1327 void ··· 1384 1339 } 1385 1340 } 1386 1341 1387 - int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, 1388 - struct ieee80211_vif *vif, 1389 - struct ieee80211_bss_conf *info) 1342 + int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr) 1390 1343 { 1391 - struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv; 1392 - struct mt7921_dev *dev = mt7921_hw_dev(hw); 1393 - struct sk_buff *skb; 1394 - int i, len = min_t(int, info->arp_addr_cnt, 1395 - IEEE80211_BSS_ARP_ADDR_LIST_LEN); 1396 - struct { 1397 - struct { 1398 - u8 bss_idx; 1399 - u8 pad[3]; 1400 - } __packed hdr; 1401 - struct mt76_connac_arpns_tlv arp; 1402 - } req_hdr = { 1403 - .hdr = { 1404 - .bss_idx = mvif->mt76.idx, 1405 - }, 1406 - .arp = { 1407 - .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), 1408 - .len = cpu_to_le16(sizeof(struct mt76_connac_arpns_tlv)), 1409 - .ips_num = len, 1410 - .mode = 2, /* update */ 1411 - .option = 1, 1412 - }, 1344 + struct mt7921_txpwr_event *event; 1345 + struct mt7921_txpwr_req req = { 1346 + .dbdc_idx = 0, 1413 1347 }; 1348 + struct sk_buff *skb; 1349 + int ret; 1414 1350 1415 - skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, 1416 - sizeof(req_hdr) + len * sizeof(__be32)); 1417 - if (!skb) 1418 - return -ENOMEM; 1351 + ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_GET_TXPWR, 1352 + &req, sizeof(req), true, &skb); 1353 + if (ret) 1354 + return ret; 1419 1355 1420 - skb_put_data(skb, &req_hdr, sizeof(req_hdr)); 1421 - for (i = 0; i < len; i++) { 1422 - u8 *addr = (u8 *)skb_put(skb, sizeof(__be32)); 1356 + event = (struct mt7921_txpwr_event *)skb->data; 1357 + WARN_ON(skb->len != le16_to_cpu(event->len)); 1358 + memcpy(txpwr, &event->txpwr, sizeof(event->txpwr)); 1423 1359 1424 - memcpy(addr, &info->arp_addr_list[i], sizeof(__be32)); 1425 - } 1360 + dev_kfree_skb(skb); 1426 1361 1427 - return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD_OFFLOAD, 1428 - true); 1362 + return 0; 1429 1363 }
+17
drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
··· 86 86 MCU_EVENT_CH_PRIVILEGE = 0x18, 87 87 MCU_EVENT_SCHED_SCAN_DONE = 0x23, 88 88 MCU_EVENT_DBG_MSG = 0x27, 89 + MCU_EVENT_TXPWR = 0xd0, 89 90 MCU_EVENT_COREDUMP = 0xf0, 90 91 }; 91 92 ··· 391 390 __le32 wlan_idx; 392 391 struct mt7921_mcu_wlan_info_event event; 393 392 } __packed; 393 + 394 + struct mt7921_txpwr_req { 395 + u8 ver; 396 + u8 action; 397 + __le16 len; 398 + u8 dbdc_idx; 399 + u8 rsv[3]; 400 + } __packed; 401 + 402 + struct mt7921_txpwr_event { 403 + u8 ver; 404 + u8 action; 405 + __le16 len; 406 + struct mt7921_txpwr txpwr; 407 + } __packed; 408 + 394 409 #endif
+44 -16
drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
··· 18 18 19 19 #define MT7921_PM_TIMEOUT (HZ / 12) 20 20 #define MT7921_HW_SCAN_TIMEOUT (HZ / 10) 21 - #define MT7921_WATCHDOG_TIME (HZ / 10) 21 + #define MT7921_WATCHDOG_TIME (HZ / 4) 22 22 #define MT7921_RESET_TIMEOUT (30 * HZ) 23 23 24 24 #define MT7921_TX_RING_SIZE 2048 ··· 35 35 36 36 #define MT7921_EEPROM_SIZE 3584 37 37 #define MT7921_TOKEN_SIZE 8192 38 - #define MT7921_TOKEN_FREE_THR 64 39 38 40 39 #define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */ 41 40 #define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ ··· 155 156 156 157 u16 chainmask; 157 158 158 - struct work_struct init_work; 159 159 struct work_struct reset_work; 160 160 161 161 struct list_head sta_poll_list; 162 162 spinlock_t sta_poll_lock; 163 163 164 - spinlock_t token_lock; 165 - int token_count; 166 - struct idr token; 167 - 168 164 u8 fw_debug; 169 165 170 166 struct mt76_connac_pm pm; 171 167 struct mt76_connac_coredump coredump; 168 + }; 169 + 170 + enum { 171 + TXPWR_USER, 172 + TXPWR_EEPROM, 173 + TXPWR_MAC, 174 + TXPWR_MAX_NUM, 175 + }; 176 + 177 + struct mt7921_txpwr { 178 + u8 ch; 179 + u8 rsv[3]; 180 + struct { 181 + u8 ch; 182 + u8 cck[4]; 183 + u8 ofdm[8]; 184 + u8 ht20[8]; 185 + u8 ht40[9]; 186 + u8 vht20[12]; 187 + u8 vht40[12]; 188 + u8 vht80[12]; 189 + u8 vht160[12]; 190 + u8 he26[12]; 191 + u8 he52[12]; 192 + u8 he106[12]; 193 + u8 he242[12]; 194 + u8 he484[12]; 195 + u8 he996[12]; 196 + u8 he996x2[12]; 197 + } data[TXPWR_MAX_NUM]; 172 198 }; 173 199 174 200 enum { ··· 248 224 u8 chain_idx); 249 225 void mt7921_eeprom_init_sku(struct mt7921_dev *dev); 250 226 int mt7921_dma_init(struct mt7921_dev *dev); 251 - void mt7921_dma_prefetch(struct mt7921_dev *dev); 227 + int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force); 228 + int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev); 252 229 void mt7921_dma_cleanup(struct mt7921_dev *dev); 253 230 int mt7921_run_firmware(struct mt7921_dev *dev); 254 231 int mt7921_mcu_init(struct mt7921_dev *dev); 255 - int mt7921_mcu_add_bss_info(struct mt7921_phy *phy, 256 - struct ieee80211_vif *vif, int enable); 257 232 int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif, 258 233 struct mt7921_sta *msta, struct ieee80211_key_conf *key, 259 234 enum set_key_cmd cmd); 260 235 int mt7921_set_channel(struct mt7921_phy *phy); 236 + int mt7921_mcu_sta_add(struct mt7921_dev *dev, struct ieee80211_sta *sta, 237 + struct ieee80211_vif *vif, bool enable); 261 238 int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd); 262 239 int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif); 263 240 int mt7921_mcu_set_eeprom(struct mt7921_dev *dev); ··· 313 288 #define mt7921_l1_set(dev, addr, val) mt7921_l1_rmw(dev, addr, 0, val) 314 289 #define mt7921_l1_clear(dev, addr, val) mt7921_l1_rmw(dev, addr, val, 0) 315 290 291 + static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev) 292 + { 293 + return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT); 294 + } 295 + 316 296 void mt7921_mac_init(struct mt7921_dev *dev); 317 297 bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask); 318 298 void mt7921_mac_reset_counters(struct mt7921_phy *phy); ··· 335 305 void mt7921_mac_work(struct work_struct *work); 336 306 void mt7921_mac_reset_work(struct work_struct *work); 337 307 void mt7921_reset(struct mt76_dev *mdev); 308 + void mt7921_tx_cleanup(struct mt7921_dev *dev); 338 309 int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 339 310 enum mt76_txq_id qid, struct mt76_wcid *wcid, 340 311 struct ieee80211_sta *sta, 341 312 struct mt76_tx_info *tx_info); 313 + 314 + void mt7921_tx_worker(struct mt76_worker *w); 342 315 void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e); 343 316 int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc); 344 317 void mt7921_tx_token_put(struct mt7921_dev *dev); ··· 368 335 bool enable); 369 336 int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif, 370 337 bool enable); 371 - int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, 372 - struct ieee80211_vif *vif, 373 - struct ieee80211_bss_conf *info); 374 338 int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev); 375 339 int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev); 376 340 void mt7921_pm_wake_work(struct work_struct *work); ··· 378 348 bool enable); 379 349 void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif); 380 350 void mt7921_coredump_work(struct work_struct *work); 381 - int mt7921_mcu_update_arp_filter(struct ieee80211_hw *hw, 382 - struct ieee80211_vif *vif, 383 - struct ieee80211_bss_conf *info); 384 351 int mt7921_wfsys_reset(struct mt7921_dev *dev); 352 + int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr); 385 353 #endif
+29 -1
drivers/net/wireless/mediatek/mt76/mt7921/pci.c
··· 61 61 if (intr & MT_INT_TX_DONE_MCU) 62 62 mask |= MT_INT_TX_DONE_MCU; 63 63 64 + if (intr & MT_INT_MCU_CMD) { 65 + u32 intr_sw; 66 + 67 + intr_sw = mt76_rr(dev, MT_MCU_CMD); 68 + /* ack MCU2HOST_SW_INT_STA */ 69 + mt76_wr(dev, MT_MCU_CMD, intr_sw); 70 + if (intr_sw & MT_MCU_CMD_WAKE_RX_PCIE) { 71 + mask |= MT_INT_RX_DONE_DATA; 72 + intr |= MT_INT_RX_DONE_DATA; 73 + } 74 + } 75 + 64 76 mt76_set_irq_mask(&dev->mt76, MT_WFDMA0_HOST_INT_ENA, mask, 0); 65 77 66 78 if (intr & MT_INT_TX_DONE_ALL) ··· 99 87 .survey_flags = SURVEY_INFO_TIME_TX | 100 88 SURVEY_INFO_TIME_RX | 101 89 SURVEY_INFO_TIME_BSS_RX, 90 + .token_size = MT7921_TOKEN_SIZE, 102 91 .tx_prepare_skb = mt7921_tx_prepare_skb, 103 92 .tx_complete_skb = mt7921_tx_complete_skb, 104 93 .rx_skb = mt7921_queue_rx_skb, ··· 202 189 return err; 203 190 } 204 191 192 + if (!dev->pm.enable) 193 + mt76_connac_mcu_set_deep_sleep(&dev->mt76, true); 194 + 205 195 napi_disable(&mdev->tx_napi); 206 196 mt76_worker_disable(&mdev->tx_worker); 207 197 208 198 mt76_for_each_q_rx(mdev, i) { 209 199 napi_disable(&mdev->napi[i]); 210 200 } 211 - tasklet_kill(&dev->irq_tasklet); 212 201 213 202 pci_enable_wake(pdev, pci_choose_state(pdev, state), true); 214 203 ··· 225 210 226 211 /* disable interrupt */ 227 212 mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0); 213 + mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0); 214 + synchronize_irq(pdev->irq); 215 + tasklet_kill(&dev->irq_tasklet); 228 216 229 217 err = mt7921_mcu_fw_pmctrl(dev); 230 218 if (err) ··· 245 227 napi_enable(&mdev->napi[i]); 246 228 } 247 229 napi_enable(&mdev->tx_napi); 230 + 231 + if (!dev->pm.enable) 232 + mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); 233 + 248 234 if (hif_suspend) 249 235 mt76_connac_mcu_set_hif_suspend(mdev, false); 250 236 ··· 271 249 if (err < 0) 272 250 return err; 273 251 252 + mt7921_wpdma_reinit_cond(dev); 253 + 274 254 /* enable interrupt */ 275 255 mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff); 276 256 mt7921_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL | 277 257 MT_INT_MCU_CMD); 258 + mt76_set(dev, MT_MCU2HOST_SW_INT_ENA, MT_MCU_CMD_WAKE_RX_PCIE); 278 259 279 260 /* put dma enabled */ 280 261 mt76_set(dev, MT_WFDMA0_GLO_CFG, ··· 290 265 } 291 266 napi_enable(&mdev->tx_napi); 292 267 napi_schedule(&mdev->tx_napi); 268 + 269 + if (!dev->pm.enable) 270 + mt76_connac_mcu_set_deep_sleep(&dev->mt76, false); 293 271 294 272 if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state)) 295 273 err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+10 -7
drivers/net/wireless/mediatek/mt76/mt7921/regs.h
··· 251 251 #define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1) 252 252 #define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2) 253 253 254 - #define MT_MCU_CMD MT_WFDMA0(0x1f0) 255 - #define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) 256 - #define MT_MCU_CMD_STOP_DMA BIT(2) 257 - #define MT_MCU_CMD_RESET_DONE BIT(3) 258 - #define MT_MCU_CMD_RECOVERY_DONE BIT(4) 259 - #define MT_MCU_CMD_NORMAL_STATE BIT(5) 260 - #define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) 254 + #define MT_MCU_CMD MT_WFDMA0(0x1f0) 255 + #define MT_MCU_CMD_WAKE_RX_PCIE BIT(0) 256 + #define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1) 257 + #define MT_MCU_CMD_STOP_DMA BIT(2) 258 + #define MT_MCU_CMD_RESET_DONE BIT(3) 259 + #define MT_MCU_CMD_RECOVERY_DONE BIT(4) 260 + #define MT_MCU_CMD_NORMAL_STATE BIT(5) 261 + #define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1) 262 + 263 + #define MT_MCU2HOST_SW_INT_ENA MT_WFDMA0(0x1f4) 261 264 262 265 #define MT_WFDMA0_HOST_INT_STA MT_WFDMA0(0x200) 263 266 #define HOST_RX_DONE_INT_STS0 BIT(0) /* Rx mcu */
+127 -32
drivers/net/wireless/mediatek/mt76/testmode.c
··· 62 62 spin_unlock_bh(&q->lock); 63 63 } 64 64 65 + static u32 66 + mt76_testmode_max_mpdu_len(struct mt76_phy *phy, u8 tx_rate_mode) 67 + { 68 + switch (tx_rate_mode) { 69 + case MT76_TM_TX_MODE_HT: 70 + return IEEE80211_MAX_MPDU_LEN_HT_7935; 71 + case MT76_TM_TX_MODE_VHT: 72 + case MT76_TM_TX_MODE_HE_SU: 73 + case MT76_TM_TX_MODE_HE_EXT_SU: 74 + case MT76_TM_TX_MODE_HE_TB: 75 + case MT76_TM_TX_MODE_HE_MU: 76 + if (phy->sband_5g.sband.vht_cap.cap & 77 + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991) 78 + return IEEE80211_MAX_MPDU_LEN_VHT_7991; 79 + return IEEE80211_MAX_MPDU_LEN_VHT_11454; 80 + case MT76_TM_TX_MODE_CCK: 81 + case MT76_TM_TX_MODE_OFDM: 82 + default: 83 + return IEEE80211_MAX_FRAME_LEN; 84 + } 85 + } 65 86 66 - static int 67 - mt76_testmode_tx_init(struct mt76_phy *phy) 87 + static void 88 + mt76_testmode_free_skb(struct mt76_phy *phy) 68 89 { 69 90 struct mt76_testmode_data *td = &phy->test; 70 - struct ieee80211_tx_info *info; 71 - struct ieee80211_hdr *hdr; 72 - struct sk_buff *skb; 91 + struct sk_buff *skb = td->tx_skb; 92 + 93 + if (!skb) 94 + return; 95 + 96 + if (skb_has_frag_list(skb)) { 97 + kfree_skb_list(skb_shinfo(skb)->frag_list); 98 + skb_shinfo(skb)->frag_list = NULL; 99 + } 100 + 101 + dev_kfree_skb(skb); 102 + td->tx_skb = NULL; 103 + } 104 + 105 + int mt76_testmode_alloc_skb(struct mt76_phy *phy, u32 len) 106 + { 107 + #define MT_TXP_MAX_LEN 4095 73 108 u16 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | 74 109 IEEE80211_FCTL_FROMDS; 75 - struct ieee80211_tx_rate *rate; 76 - u8 max_nss = hweight8(phy->antenna_mask); 110 + struct mt76_testmode_data *td = &phy->test; 77 111 bool ext_phy = phy != &phy->dev->phy; 112 + struct sk_buff **frag_tail, *head; 113 + struct ieee80211_tx_info *info; 114 + struct ieee80211_hdr *hdr; 115 + u32 max_len, head_len; 116 + int nfrags, i; 78 117 79 - if (td->tx_antenna_mask) 80 - max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); 118 + max_len = mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode); 119 + if (len > max_len) 120 + len = max_len; 121 + else if (len < sizeof(struct ieee80211_hdr)) 122 + len = sizeof(struct ieee80211_hdr); 81 123 82 - skb = alloc_skb(td->tx_msdu_len, GFP_KERNEL); 83 - if (!skb) 124 + nfrags = len / MT_TXP_MAX_LEN; 125 + head_len = nfrags ? MT_TXP_MAX_LEN : len; 126 + 127 + if (len > IEEE80211_MAX_FRAME_LEN) 128 + fc |= IEEE80211_STYPE_QOS_DATA; 129 + 130 + head = alloc_skb(head_len, GFP_KERNEL); 131 + if (!head) 84 132 return -ENOMEM; 85 133 86 - dev_kfree_skb(td->tx_skb); 87 - td->tx_skb = skb; 88 - hdr = __skb_put_zero(skb, td->tx_msdu_len); 134 + hdr = __skb_put_zero(head, head_len); 89 135 hdr->frame_control = cpu_to_le16(fc); 90 136 memcpy(hdr->addr1, phy->macaddr, sizeof(phy->macaddr)); 91 137 memcpy(hdr->addr2, phy->macaddr, sizeof(phy->macaddr)); 92 138 memcpy(hdr->addr3, phy->macaddr, sizeof(phy->macaddr)); 139 + skb_set_queue_mapping(head, IEEE80211_AC_BE); 93 140 94 - info = IEEE80211_SKB_CB(skb); 141 + info = IEEE80211_SKB_CB(head); 95 142 info->flags = IEEE80211_TX_CTL_INJECTED | 96 143 IEEE80211_TX_CTL_NO_ACK | 97 144 IEEE80211_TX_CTL_NO_PS_BUFFER; ··· 146 99 if (ext_phy) 147 100 info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY; 148 101 102 + frag_tail = &skb_shinfo(head)->frag_list; 103 + 104 + for (i = 0; i < nfrags; i++) { 105 + struct sk_buff *frag; 106 + u16 frag_len; 107 + 108 + if (i == nfrags - 1) 109 + frag_len = len % MT_TXP_MAX_LEN; 110 + else 111 + frag_len = MT_TXP_MAX_LEN; 112 + 113 + frag = alloc_skb(frag_len, GFP_KERNEL); 114 + if (!frag) 115 + return -ENOMEM; 116 + 117 + __skb_put_zero(frag, frag_len); 118 + head->len += frag->len; 119 + head->data_len += frag->len; 120 + 121 + if (*frag_tail) { 122 + (*frag_tail)->next = frag; 123 + frag_tail = &frag; 124 + } else { 125 + *frag_tail = frag; 126 + } 127 + } 128 + 129 + mt76_testmode_free_skb(phy); 130 + td->tx_skb = head; 131 + 132 + return 0; 133 + } 134 + EXPORT_SYMBOL(mt76_testmode_alloc_skb); 135 + 136 + static int 137 + mt76_testmode_tx_init(struct mt76_phy *phy) 138 + { 139 + struct mt76_testmode_data *td = &phy->test; 140 + struct ieee80211_tx_info *info; 141 + struct ieee80211_tx_rate *rate; 142 + u8 max_nss = hweight8(phy->antenna_mask); 143 + int ret; 144 + 145 + ret = mt76_testmode_alloc_skb(phy, td->tx_mpdu_len); 146 + if (ret) 147 + return ret; 148 + 149 149 if (td->tx_rate_mode > MT76_TM_TX_MODE_VHT) 150 150 goto out; 151 151 152 + if (td->tx_antenna_mask) 153 + max_nss = min_t(u8, max_nss, hweight8(td->tx_antenna_mask)); 154 + 155 + info = IEEE80211_SKB_CB(td->tx_skb); 152 156 rate = &info->control.rates[0]; 153 157 rate->count = 1; 154 158 rate->idx = td->tx_rate_idx; ··· 269 171 } 270 172 } 271 173 out: 272 - skb_set_queue_mapping(skb, IEEE80211_AC_BE); 273 - 274 174 return 0; 275 175 } 276 176 ··· 299 203 wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 300 204 MT76_TM_TIMEOUT * HZ); 301 205 302 - dev_kfree_skb(td->tx_skb); 303 - td->tx_skb = NULL; 206 + mt76_testmode_free_skb(phy); 304 207 } 305 208 306 209 static inline void ··· 319 224 { 320 225 struct mt76_testmode_data *td = &phy->test; 321 226 322 - if (td->tx_msdu_len > 0) 227 + if (td->tx_mpdu_len > 0) 323 228 return; 324 229 325 - td->tx_msdu_len = 1024; 230 + td->tx_mpdu_len = 1024; 326 231 td->tx_count = 1; 327 232 td->tx_rate_mode = MT76_TM_TX_MODE_OFDM; 328 233 td->tx_rate_nss = 1; ··· 440 345 if (tb[MT76_TM_ATTR_TX_COUNT]) 441 346 td->tx_count = nla_get_u32(tb[MT76_TM_ATTR_TX_COUNT]); 442 347 443 - if (tb[MT76_TM_ATTR_TX_LENGTH]) { 444 - u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); 445 - 446 - if (val > IEEE80211_MAX_FRAME_LEN || 447 - val < sizeof(struct ieee80211_hdr)) 448 - goto out; 449 - 450 - td->tx_msdu_len = val; 451 - } 452 - 453 348 if (tb[MT76_TM_ATTR_TX_RATE_IDX]) 454 349 td->tx_rate_idx = nla_get_u8(tb[MT76_TM_ATTR_TX_RATE_IDX]); 455 350 ··· 459 374 mt76_tm_get_u8(tb[MT76_TM_ATTR_TX_POWER_CONTROL], 460 375 &td->tx_power_control, 0, 1)) 461 376 goto out; 377 + 378 + if (tb[MT76_TM_ATTR_TX_LENGTH]) { 379 + u32 val = nla_get_u32(tb[MT76_TM_ATTR_TX_LENGTH]); 380 + 381 + if (val > mt76_testmode_max_mpdu_len(phy, td->tx_rate_mode) || 382 + val < sizeof(struct ieee80211_hdr)) 383 + goto out; 384 + 385 + td->tx_mpdu_len = val; 386 + } 462 387 463 388 if (tb[MT76_TM_ATTR_TX_IPG]) 464 389 td->tx_ipg = nla_get_u32(tb[MT76_TM_ATTR_TX_IPG]); ··· 601 506 goto out; 602 507 603 508 if (nla_put_u32(msg, MT76_TM_ATTR_TX_COUNT, td->tx_count) || 604 - nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_msdu_len) || 509 + nla_put_u32(msg, MT76_TM_ATTR_TX_LENGTH, td->tx_mpdu_len) || 605 510 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_MODE, td->tx_rate_mode) || 606 511 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_NSS, td->tx_rate_nss) || 607 512 nla_put_u8(msg, MT76_TM_ATTR_TX_RATE_IDX, td->tx_rate_idx) ||
+1 -1
drivers/net/wireless/mediatek/mt76/testmode.h
··· 21 21 * @MT76_TM_ATTR_TX_COUNT: configured number of frames to send when setting 22 22 * state to MT76_TM_STATE_TX_FRAMES (u32) 23 23 * @MT76_TM_ATTR_TX_PENDING: pending frames during MT76_TM_STATE_TX_FRAMES (u32) 24 - * @MT76_TM_ATTR_TX_LENGTH: packet tx msdu length (u32) 24 + * @MT76_TM_ATTR_TX_LENGTH: packet tx mpdu length (u32) 25 25 * @MT76_TM_ATTR_TX_RATE_MODE: packet tx mode (u8, see &enum mt76_testmode_tx_mode) 26 26 * @MT76_TM_ATTR_TX_RATE_NSS: packet tx number of spatial streams (u8) 27 27 * @MT76_TM_ATTR_TX_RATE_IDX: packet tx rate/MCS index (u8)
+73 -8
drivers/net/wireless/mediatek/mt76/tx.c
··· 213 213 if (phy->test.tx_queued == phy->test.tx_done) 214 214 wake_up(&dev->tx_wait); 215 215 216 - ieee80211_free_txskb(hw, skb); 216 + dev_kfree_skb_any(skb); 217 217 return; 218 218 } 219 219 #endif ··· 422 422 return idx; 423 423 424 424 do { 425 - if (test_bit(MT76_STATE_PM, &phy->state) || 426 - test_bit(MT76_RESET, &phy->state)) 425 + if (test_bit(MT76_RESET, &phy->state)) 427 426 return -EBUSY; 428 427 429 428 if (stop || mt76_txq_stopped(q)) ··· 462 463 while (1) { 463 464 int n_frames = 0; 464 465 465 - if (test_bit(MT76_STATE_PM, &phy->state) || 466 - test_bit(MT76_RESET, &phy->state)) 466 + if (test_bit(MT76_RESET, &phy->state)) 467 467 return -EBUSY; 468 468 469 469 if (dev->queue_ops->tx_cleanup && ··· 538 540 } 539 541 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all); 540 542 541 - void mt76_tx_worker(struct mt76_worker *w) 543 + void mt76_tx_worker_run(struct mt76_dev *dev) 542 544 { 543 - struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 544 - 545 545 mt76_txq_schedule_all(&dev->phy); 546 546 if (dev->phy2) 547 547 mt76_txq_schedule_all(dev->phy2); ··· 550 554 if (dev->phy2 && dev->phy2->test.tx_pending) 551 555 mt76_testmode_tx_pending(dev->phy2); 552 556 #endif 557 + } 558 + EXPORT_SYMBOL_GPL(mt76_tx_worker_run); 559 + 560 + void mt76_tx_worker(struct mt76_worker *w) 561 + { 562 + struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker); 563 + 564 + mt76_tx_worker_run(dev); 553 565 } 554 566 555 567 void mt76_stop_tx_queues(struct mt76_phy *phy, struct ieee80211_sta *sta, ··· 648 644 spin_unlock_bh(&q->lock); 649 645 } 650 646 EXPORT_SYMBOL_GPL(mt76_queue_tx_complete); 647 + 648 + void __mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked) 649 + { 650 + struct mt76_phy *phy = &dev->phy, *phy2 = dev->phy2; 651 + struct mt76_queue *q, *q2 = NULL; 652 + 653 + q = phy->q_tx[0]; 654 + if (blocked == q->blocked) 655 + return; 656 + 657 + q->blocked = blocked; 658 + if (phy2) { 659 + q2 = phy2->q_tx[0]; 660 + q2->blocked = blocked; 661 + } 662 + 663 + if (!blocked) 664 + mt76_worker_schedule(&dev->tx_worker); 665 + } 666 + EXPORT_SYMBOL_GPL(__mt76_set_tx_blocked); 667 + 668 + int mt76_token_consume(struct mt76_dev *dev, struct mt76_txwi_cache **ptxwi) 669 + { 670 + int token; 671 + 672 + spin_lock_bh(&dev->token_lock); 673 + 674 + token = idr_alloc(&dev->token, *ptxwi, 0, dev->drv->token_size, 675 + GFP_ATOMIC); 676 + if (token >= 0) 677 + dev->token_count++; 678 + 679 + if (dev->token_count >= dev->drv->token_size - MT76_TOKEN_FREE_THR) 680 + __mt76_set_tx_blocked(dev, true); 681 + 682 + spin_unlock_bh(&dev->token_lock); 683 + 684 + return token; 685 + } 686 + EXPORT_SYMBOL_GPL(mt76_token_consume); 687 + 688 + struct mt76_txwi_cache * 689 + mt76_token_release(struct mt76_dev *dev, int token, bool *wake) 690 + { 691 + struct mt76_txwi_cache *txwi; 692 + 693 + spin_lock_bh(&dev->token_lock); 694 + 695 + txwi = idr_remove(&dev->token, token); 696 + if (txwi) 697 + dev->token_count--; 698 + 699 + if (dev->token_count < dev->drv->token_size - MT76_TOKEN_FREE_THR && 700 + dev->phy.q_tx[0]->blocked) 701 + *wake = true; 702 + 703 + spin_unlock_bh(&dev->token_lock); 704 + 705 + return txwi; 706 + } 707 + EXPORT_SYMBOL_GPL(mt76_token_release);
+4 -2
drivers/net/wireless/quantenna/qtnfmac/event.c
··· 570 570 return 0; 571 571 572 572 if (ev->ssid_len) { 573 - memcpy(auth.ssid.ssid, ev->ssid, ev->ssid_len); 574 - auth.ssid.ssid_len = ev->ssid_len; 573 + int len = clamp_val(ev->ssid_len, 0, IEEE80211_MAX_SSID_LEN); 574 + 575 + memcpy(auth.ssid.ssid, ev->ssid, len); 576 + auth.ssid.ssid_len = len; 575 577 } 576 578 577 579 auth.key_mgmt_suite = le32_to_cpu(ev->akm_suite);
+32
drivers/net/wireless/realtek/rtlwifi/core.c
··· 1018 1018 } 1019 1019 } 1020 1020 1021 + void rtl_update_beacon_work_callback(struct work_struct *work) 1022 + { 1023 + struct rtl_works *rtlworks = 1024 + container_of(work, struct rtl_works, update_beacon_work); 1025 + struct ieee80211_hw *hw = rtlworks->hw; 1026 + struct rtl_priv *rtlpriv = rtl_priv(hw); 1027 + struct ieee80211_vif *vif = rtlpriv->mac80211.vif; 1028 + 1029 + if (!vif) { 1030 + WARN_ONCE(true, "no vif to update beacon\n"); 1031 + return; 1032 + } 1033 + 1034 + mutex_lock(&rtlpriv->locks.conf_mutex); 1035 + send_beacon_frame(hw, vif); 1036 + mutex_unlock(&rtlpriv->locks.conf_mutex); 1037 + } 1038 + EXPORT_SYMBOL_GPL(rtl_update_beacon_work_callback); 1039 + 1021 1040 static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1022 1041 struct ieee80211_vif *vif, 1023 1042 struct ieee80211_bss_conf *bss_conf, ··· 1766 1747 rtlpriv->intf_ops->flush(hw, queues, drop); 1767 1748 } 1768 1749 1750 + static int rtl_op_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 1751 + bool set) 1752 + { 1753 + struct rtl_priv *rtlpriv = rtl_priv(hw); 1754 + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1755 + 1756 + if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) 1757 + schedule_work(&rtlpriv->works.update_beacon_work); 1758 + 1759 + return 0; 1760 + } 1761 + 1769 1762 /* Description: 1770 1763 * This routine deals with the Power Configuration CMD 1771 1764 * parsing for RTL8723/RTL8188E Series IC. ··· 1934 1903 .sta_add = rtl_op_sta_add, 1935 1904 .sta_remove = rtl_op_sta_remove, 1936 1905 .flush = rtl_op_flush, 1906 + .set_tim = rtl_op_set_tim, 1937 1907 }; 1938 1908 EXPORT_SYMBOL_GPL(rtl_ops); 1939 1909
+1
drivers/net/wireless/realtek/rtlwifi/core.h
··· 60 60 bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); 61 61 bool rtl_btc_status_false(void); 62 62 void rtl_dm_diginit(struct ieee80211_hw *hw, u32 cur_igval); 63 + void rtl_update_beacon_work_callback(struct work_struct *work); 63 64 64 65 #endif
+3
drivers/net/wireless/realtek/rtlwifi/usb.c
··· 805 805 806 806 tasklet_kill(&rtlusb->rx_work_tasklet); 807 807 cancel_work_sync(&rtlpriv->works.lps_change_work); 808 + cancel_work_sync(&rtlpriv->works.update_beacon_work); 808 809 809 810 flush_workqueue(rtlpriv->works.rtl_wq); 810 811 ··· 1032 1031 rtl_fill_h2c_cmd_work_callback); 1033 1032 INIT_WORK(&rtlpriv->works.lps_change_work, 1034 1033 rtl_lps_change_work_callback); 1034 + INIT_WORK(&rtlpriv->works.update_beacon_work, 1035 + rtl_update_beacon_work_callback); 1035 1036 1036 1037 rtlpriv->usb_data_index = 0; 1037 1038 init_completion(&rtlpriv->firmware_loading_complete);
+1
drivers/net/wireless/realtek/rtlwifi/wifi.h
··· 2486 2486 2487 2487 struct work_struct lps_change_work; 2488 2488 struct work_struct fill_h2c_cmd; 2489 + struct work_struct update_beacon_work; 2489 2490 }; 2490 2491 2491 2492 struct rtl_debug {
+91
drivers/net/wireless/realtek/rtw88/debug.c
··· 35 35 u32 addr; 36 36 u32 len; 37 37 } read_reg; 38 + struct { 39 + u8 bit; 40 + } dm_cap; 38 41 }; 42 + }; 43 + 44 + static const char * const rtw_dm_cap_strs[] = { 45 + [RTW_DM_CAP_NA] = "NA", 46 + [RTW_DM_CAP_TXGAPK] = "TXGAPK", 39 47 }; 40 48 41 49 static int rtw_debugfs_single_show(struct seq_file *m, void *v) ··· 861 853 return 0; 862 854 } 863 855 856 + static ssize_t rtw_debugfs_set_dm_cap(struct file *filp, 857 + const char __user *buffer, 858 + size_t count, loff_t *loff) 859 + { 860 + struct seq_file *seqpriv = (struct seq_file *)filp->private_data; 861 + struct rtw_debugfs_priv *debugfs_priv = seqpriv->private; 862 + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; 863 + struct rtw_dm_info *dm_info = &rtwdev->dm_info; 864 + int bit; 865 + bool en; 866 + 867 + if (kstrtoint_from_user(buffer, count, 10, &bit)) 868 + return -EINVAL; 869 + 870 + en = bit > 0; 871 + bit = abs(bit); 872 + 873 + if (bit >= RTW_DM_CAP_NUM) { 874 + rtw_warn(rtwdev, "unknown DM CAP %d\n", bit); 875 + return -EINVAL; 876 + } 877 + 878 + if (en) 879 + dm_info->dm_flags &= ~BIT(bit); 880 + else 881 + dm_info->dm_flags |= BIT(bit); 882 + 883 + debugfs_priv->dm_cap.bit = bit; 884 + 885 + return count; 886 + } 887 + 888 + static void dump_gapk_status(struct rtw_dev *rtwdev, struct seq_file *m) 889 + { 890 + struct rtw_dm_info *dm_info = &rtwdev->dm_info; 891 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 892 + int i, path; 893 + u32 val; 894 + 895 + seq_printf(m, "\n(%2d) %c%s\n\n", RTW_DM_CAP_TXGAPK, 896 + dm_info->dm_flags & BIT(RTW_DM_CAP_TXGAPK) ? '-' : '+', 897 + rtw_dm_cap_strs[RTW_DM_CAP_TXGAPK]); 898 + 899 + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 900 + val = rtw_read_rf(rtwdev, path, RF_GAINTX, RFREG_MASK); 901 + seq_printf(m, "path %d:\n0x%x = 0x%x\n", path, RF_GAINTX, val); 902 + 903 + for (i = 0; i < RF_HW_OFFSET_NUM; i++) 904 + seq_printf(m, "[TXGAPK] offset %d %d\n", 905 + txgapk->rf3f_fs[path][i], i); 906 + seq_puts(m, "\n"); 907 + } 908 + } 909 + 910 + static int rtw_debugfs_get_dm_cap(struct seq_file *m, void *v) 911 + { 912 + struct rtw_debugfs_priv *debugfs_priv = m->private; 913 + struct rtw_dev *rtwdev = debugfs_priv->rtwdev; 914 + struct rtw_dm_info *dm_info = &rtwdev->dm_info; 915 + int i; 916 + 917 + switch (debugfs_priv->dm_cap.bit) { 918 + case RTW_DM_CAP_TXGAPK: 919 + dump_gapk_status(rtwdev, m); 920 + break; 921 + default: 922 + for (i = 1; i < RTW_DM_CAP_NUM; i++) { 923 + seq_printf(m, "(%2d) %c%s\n", i, 924 + dm_info->dm_flags & BIT(i) ? '-' : '+', 925 + rtw_dm_cap_strs[i]); 926 + } 927 + break; 928 + } 929 + debugfs_priv->dm_cap.bit = RTW_DM_CAP_NA; 930 + return 0; 931 + } 932 + 864 933 #define rtw_debug_impl_mac(page, addr) \ 865 934 static struct rtw_debugfs_priv rtw_debug_priv_mac_ ##page = { \ 866 935 .cb_read = rtw_debug_get_mac_page, \ ··· 1046 961 .cb_read = rtw_debugfs_get_fw_crash, 1047 962 }; 1048 963 964 + static struct rtw_debugfs_priv rtw_debug_priv_dm_cap = { 965 + .cb_write = rtw_debugfs_set_dm_cap, 966 + .cb_read = rtw_debugfs_get_dm_cap, 967 + }; 968 + 1049 969 #define rtw_debugfs_add_core(name, mode, fopname, parent) \ 1050 970 do { \ 1051 971 rtw_debug_priv_ ##name.rtwdev = rtwdev; \ ··· 1125 1035 rtw_debugfs_add_r(rf_dump); 1126 1036 rtw_debugfs_add_r(tx_pwr_tbl); 1127 1037 rtw_debugfs_add_rw(fw_crash); 1038 + rtw_debugfs_add_rw(dm_cap); 1128 1039 } 1129 1040 1130 1041 #endif /* CONFIG_RTW88_DEBUGFS */
+12
drivers/net/wireless/realtek/rtw88/fw.c
··· 350 350 } 351 351 EXPORT_SYMBOL(rtw_fw_do_iqk); 352 352 353 + void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start) 354 + { 355 + u8 h2c_pkt[H2C_PKT_SIZE] = {0}; 356 + 357 + SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_WIFI_CALIBRATION); 358 + 359 + RFK_SET_INFORM_START(h2c_pkt, start); 360 + 361 + rtw_fw_send_h2c_command(rtwdev, h2c_pkt); 362 + } 363 + EXPORT_SYMBOL(rtw_fw_inform_rfk_status); 364 + 353 365 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev) 354 366 { 355 367 u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+5
drivers/net/wireless/realtek/rtw88/fw.h
··· 354 354 #define H2C_CMD_WL_CH_INFO 0x66 355 355 #define H2C_CMD_QUERY_BT_MP_INFO 0x67 356 356 #define H2C_CMD_BT_WIFI_CONTROL 0x69 357 + #define H2C_CMD_WIFI_CALIBRATION 0x6d 357 358 358 359 #define H2C_CMD_KEEP_ALIVE 0x03 359 360 #define H2C_CMD_DISCONNECT_DECISION 0x04 ··· 543 542 le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16)) 544 543 #define GET_FW_DUMP_TLV_VAL(_header) \ 545 544 le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0)) 545 + 546 + #define RFK_SET_INFORM_START(h2c_pkt, value) \ 547 + le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8)) 546 548 static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb) 547 549 { 548 550 u32 pkt_offset; ··· 561 557 void rtw_fw_send_phydm_info(struct rtw_dev *rtwdev); 562 558 563 559 void rtw_fw_do_iqk(struct rtw_dev *rtwdev, struct rtw_iqk_para *para); 560 + void rtw_fw_inform_rfk_status(struct rtw_dev *rtwdev, bool start); 564 561 void rtw_fw_set_pwr_mode(struct rtw_dev *rtwdev); 565 562 void rtw_fw_set_pg_info(struct rtw_dev *rtwdev); 566 563 void rtw_fw_query_bt_info(struct rtw_dev *rtwdev);
+31
drivers/net/wireless/realtek/rtw88/main.h
··· 1502 1502 } result; 1503 1503 }; 1504 1504 1505 + enum rtw_rf_band { 1506 + RF_BAND_2G_CCK, 1507 + RF_BAND_2G_OFDM, 1508 + RF_BAND_5G_L, 1509 + RF_BAND_5G_M, 1510 + RF_BAND_5G_H, 1511 + RF_BAND_MAX 1512 + }; 1513 + 1514 + #define RF_GAIN_NUM 11 1515 + #define RF_HW_OFFSET_NUM 10 1516 + 1517 + struct rtw_gapk_info { 1518 + u32 rf3f_bp[RF_BAND_MAX][RF_GAIN_NUM][RTW_RF_PATH_MAX]; 1519 + u32 rf3f_fs[RTW_RF_PATH_MAX][RF_GAIN_NUM]; 1520 + bool txgapk_bp_done; 1521 + s8 offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; 1522 + s8 fianl_offset[RF_GAIN_NUM][RTW_RF_PATH_MAX]; 1523 + u8 read_txgain; 1524 + u8 channel; 1525 + }; 1526 + 1505 1527 struct rtw_cfo_track { 1506 1528 bool is_adjust; 1507 1529 u8 crystal_cap; ··· 1535 1513 1536 1514 #define RRSR_INIT_2G 0x15f 1537 1515 #define RRSR_INIT_5G 0x150 1516 + 1517 + enum rtw_dm_cap { 1518 + RTW_DM_CAP_NA, 1519 + RTW_DM_CAP_TXGAPK, 1520 + RTW_DM_CAP_NUM 1521 + }; 1538 1522 1539 1523 struct rtw_dm_info { 1540 1524 u32 cck_fa_cnt; ··· 1610 1582 struct ewma_evm ewma_evm[RTW_EVM_NUM]; 1611 1583 struct ewma_snr ewma_snr[RTW_SNR_NUM]; 1612 1584 1585 + u32 dm_flags; /* enum rtw_dm_cap */ 1613 1586 struct rtw_iqk_info iqk; 1587 + struct rtw_gapk_info gapk; 1588 + bool is_bt_iqk_timeout; 1614 1589 }; 1615 1590 1616 1591 struct rtw_efuse {
+22 -7
drivers/net/wireless/realtek/rtw88/pci.c
··· 581 581 { 582 582 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 583 583 584 + rtw_pci_napi_start(rtwdev); 585 + 584 586 spin_lock_bh(&rtwpci->irq_lock); 587 + rtwpci->running = true; 585 588 rtw_pci_enable_interrupt(rtwdev, rtwpci, false); 586 589 spin_unlock_bh(&rtwpci->irq_lock); 587 - 588 - rtw_pci_napi_start(rtwdev); 589 590 590 591 return 0; 591 592 } ··· 594 593 static void rtw_pci_stop(struct rtw_dev *rtwdev) 595 594 { 596 595 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; 596 + struct pci_dev *pdev = rtwpci->pdev; 597 597 598 + spin_lock_bh(&rtwpci->irq_lock); 599 + rtwpci->running = false; 600 + rtw_pci_disable_interrupt(rtwdev, rtwpci); 601 + spin_unlock_bh(&rtwpci->irq_lock); 602 + 603 + synchronize_irq(pdev->irq); 598 604 rtw_pci_napi_stop(rtwdev); 599 605 600 606 spin_lock_bh(&rtwpci->irq_lock); 601 - rtw_pci_disable_interrupt(rtwdev, rtwpci); 602 607 rtw_pci_dma_release(rtwdev, rtwpci); 603 608 spin_unlock_bh(&rtwpci->irq_lock); 604 609 } ··· 957 950 return ret; 958 951 959 952 ring = &rtwpci->tx_rings[queue]; 953 + spin_lock_bh(&rtwpci->irq_lock); 960 954 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) { 961 955 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb)); 962 956 ring->queue_stopped = true; 963 957 } 958 + spin_unlock_bh(&rtwpci->irq_lock); 964 959 965 960 return 0; 966 961 } ··· 977 968 struct sk_buff *skb; 978 969 u32 count; 979 970 u32 bd_idx_addr; 980 - u32 bd_idx, cur_rp; 971 + u32 bd_idx, cur_rp, rp_idx; 981 972 u16 q_map; 982 973 983 974 ring = &rtwpci->tx_rings[hw_queue]; ··· 986 977 bd_idx = rtw_read32(rtwdev, bd_idx_addr); 987 978 cur_rp = bd_idx >> 16; 988 979 cur_rp &= TRX_BD_IDX_MASK; 980 + rp_idx = ring->r.rp; 989 981 if (cur_rp >= ring->r.rp) 990 982 count = cur_rp - ring->r.rp; 991 983 else ··· 1010 1000 } 1011 1001 1012 1002 if (ring->queue_stopped && 1013 - avail_desc(ring->r.wp, ring->r.rp, ring->r.len) > 4) { 1003 + avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) { 1014 1004 q_map = skb_get_queue_mapping(skb); 1015 1005 ieee80211_wake_queue(hw, q_map); 1016 1006 ring->queue_stopped = false; 1017 1007 } 1008 + 1009 + if (++rp_idx >= ring->r.len) 1010 + rp_idx = 0; 1018 1011 1019 1012 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); 1020 1013 ··· 1219 1206 rtw_fw_c2h_cmd_isr(rtwdev); 1220 1207 1221 1208 /* all of the jobs for this interrupt have been done */ 1222 - rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); 1209 + if (rtwpci->running) 1210 + rtw_pci_enable_interrupt(rtwdev, rtwpci, rx); 1223 1211 spin_unlock_bh(&rtwpci->irq_lock); 1224 1212 1225 1213 return IRQ_HANDLED; ··· 1641 1627 if (work_done < budget) { 1642 1628 napi_complete_done(napi, work_done); 1643 1629 spin_lock_bh(&rtwpci->irq_lock); 1644 - rtw_pci_enable_interrupt(rtwdev, rtwpci, false); 1630 + if (rtwpci->running) 1631 + rtw_pci_enable_interrupt(rtwdev, rtwpci, false); 1645 1632 spin_unlock_bh(&rtwpci->irq_lock); 1646 1633 /* When ISR happens during polling and before napi_complete 1647 1634 * while no further data is received. Data on the dma_ring will
+1
drivers/net/wireless/realtek/rtw88/pci.h
··· 211 211 spinlock_t irq_lock; 212 212 u32 irq_mask[4]; 213 213 bool irq_enabled; 214 + bool running; 214 215 215 216 /* napi structure */ 216 217 struct net_device netdev;
+8
drivers/net/wireless/realtek/rtw88/reg.h
··· 129 129 #define REG_MCU_TST_CFG 0x84 130 130 #define VAL_FW_TRIGGER 0x1 131 131 132 + #define REG_PMC_DBG_CTRL1 0xa8 133 + #define BITS_PMC_BT_IQK_STS GENMASK(22, 21) 134 + 132 135 #define REG_EFUSE_ACCESS 0x00CF 133 136 #define EFUSE_ACCESS_ON 0x69 134 137 #define EFUSE_ACCESS_OFF 0x00 ··· 363 360 #define REG_TX_PTCL_CTRL 0x0520 364 361 #define BIT_SIFS_BK_EN BIT(12) 365 362 #define REG_TXPAUSE 0x0522 363 + #define BIT_AC_QUEUE GENMASK(7, 0) 366 364 #define REG_RD_CTRL 0x0524 367 365 #define BIT_DIS_TXOP_CFE BIT(10) 368 366 #define BIT_DIS_LSIG_CFE BIT(9) ··· 648 644 #define RF_WLSEL 0x02 649 645 #define RF_DTXLOK 0x08 650 646 #define RF_CFGCH 0x18 647 + #define BIT_BAND GENMASK(18, 16) 651 648 #define RF_RCK 0x1d 652 649 #define RF_LUTWA 0x33 653 650 #define RF_LUTWD1 0x3e 654 651 #define RF_LUTWD0 0x3f 652 + #define BIT_GAIN_EXT BIT(12) 653 + #define BIT_DATA_L GENMASK(11, 0) 655 654 #define RF_T_METER 0x42 656 655 #define RF_BSPAD 0x54 657 656 #define RF_GAINTX 0x56 ··· 671 664 #define RF_RCKD 0xde 672 665 #define RF_TXADBG 0xde 673 666 #define RF_LUTDBG 0xdf 667 + #define BIT_TXA_TANK BIT(4) 674 668 #define RF_LUTWE2 0xee 675 669 #define RF_LUTWE 0xef 676 670
+2 -1
drivers/net/wireless/realtek/rtw88/rtw8821c.c
··· 581 581 pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc); 582 582 pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc); 583 583 pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc); 584 - pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc); 584 + pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) && 585 + GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE; 585 586 pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc); 586 587 pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc); 587 588 pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+716 -8
drivers/net/wireless/realtek/rtw88/rtw8822c.c
··· 1095 1095 if (pg_pa_bias == EFUSE_READ_FAIL) 1096 1096 return; 1097 1097 pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias); 1098 - rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_2G_MASK, pg_pa_bias); 1098 + rtw_write_rf(rtwdev, path, RF_PA, RF_PABIAS_2G_MASK, pg_pa_bias); 1099 1099 } 1100 1100 for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 1101 1101 rtw_read8_physical_efuse(rtwdev, rf_efuse_5g[path], 1102 1102 &pg_pa_bias); 1103 1103 pg_pa_bias = FIELD_GET(PPG_PABIAS_MASK, pg_pa_bias); 1104 - rtw_write_rf(rtwdev, path, 0x60, RF_PABIAS_5G_MASK, pg_pa_bias); 1104 + rtw_write_rf(rtwdev, path, RF_PA, RF_PABIAS_5G_MASK, pg_pa_bias); 1105 1105 } 1106 + } 1107 + 1108 + static void rtw8822c_rfk_handshake(struct rtw_dev *rtwdev, bool is_before_k) 1109 + { 1110 + struct rtw_dm_info *dm = &rtwdev->dm_info; 1111 + u8 u1b_tmp; 1112 + u8 u4b_tmp; 1113 + int ret; 1114 + 1115 + if (is_before_k) { 1116 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1117 + "[RFK] WiFi / BT RFK handshake start!!\n"); 1118 + 1119 + if (!dm->is_bt_iqk_timeout) { 1120 + ret = read_poll_timeout(rtw_read32_mask, u4b_tmp, 1121 + u4b_tmp == 0, 20, 600000, false, 1122 + rtwdev, REG_PMC_DBG_CTRL1, 1123 + BITS_PMC_BT_IQK_STS); 1124 + if (ret) { 1125 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1126 + "[RFK] Wait BT IQK finish timeout!!\n"); 1127 + dm->is_bt_iqk_timeout = true; 1128 + } 1129 + } 1130 + 1131 + rtw_fw_inform_rfk_status(rtwdev, true); 1132 + 1133 + ret = read_poll_timeout(rtw_read8_mask, u1b_tmp, 1134 + u1b_tmp == 1, 20, 100000, false, 1135 + rtwdev, REG_ARFR4, BIT_WL_RFK); 1136 + if (ret) 1137 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1138 + "[RFK] Send WiFi RFK start H2C cmd FAIL!!\n"); 1139 + } else { 1140 + rtw_fw_inform_rfk_status(rtwdev, false); 1141 + ret = read_poll_timeout(rtw_read8_mask, u1b_tmp, 1142 + u1b_tmp == 1, 20, 100000, false, 1143 + rtwdev, REG_ARFR4, 1144 + BIT_WL_RFK); 1145 + if (ret) 1146 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1147 + "[RFK] Send WiFi RFK finish H2C cmd FAIL!!\n"); 1148 + 1149 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1150 + "[RFK] WiFi / BT RFK handshake finish!!\n"); 1151 + } 1152 + } 1153 + 1154 + static void rtw8822c_rfk_power_save(struct rtw_dev *rtwdev, 1155 + bool is_power_save) 1156 + { 1157 + u8 path; 1158 + 1159 + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 1160 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); 1161 + rtw_write32_mask(rtwdev, REG_DPD_CTL1_S0, BIT_PS_EN, 1162 + is_power_save ? 0 : 1); 1163 + } 1164 + } 1165 + 1166 + static void rtw8822c_txgapk_backup_bb_reg(struct rtw_dev *rtwdev, const u32 reg[], 1167 + u32 reg_backup[], u32 reg_num) 1168 + { 1169 + u32 i; 1170 + 1171 + for (i = 0; i < reg_num; i++) { 1172 + reg_backup[i] = rtw_read32(rtwdev, reg[i]); 1173 + 1174 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Backup BB 0x%x = 0x%x\n", 1175 + reg[i], reg_backup[i]); 1176 + } 1177 + } 1178 + 1179 + static void rtw8822c_txgapk_reload_bb_reg(struct rtw_dev *rtwdev, 1180 + const u32 reg[], u32 reg_backup[], 1181 + u32 reg_num) 1182 + { 1183 + u32 i; 1184 + 1185 + for (i = 0; i < reg_num; i++) { 1186 + rtw_write32(rtwdev, reg[i], reg_backup[i]); 1187 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Reload BB 0x%x = 0x%x\n", 1188 + reg[i], reg_backup[i]); 1189 + } 1190 + } 1191 + 1192 + static bool check_rf_status(struct rtw_dev *rtwdev, u8 status) 1193 + { 1194 + u8 reg_rf0_a, reg_rf0_b; 1195 + 1196 + reg_rf0_a = (u8)rtw_read_rf(rtwdev, RF_PATH_A, 1197 + RF_MODE_TRXAGC, BIT_RF_MODE); 1198 + reg_rf0_b = (u8)rtw_read_rf(rtwdev, RF_PATH_B, 1199 + RF_MODE_TRXAGC, BIT_RF_MODE); 1200 + 1201 + if (reg_rf0_a == status || reg_rf0_b == status) 1202 + return false; 1203 + 1204 + return true; 1205 + } 1206 + 1207 + static void rtw8822c_txgapk_tx_pause(struct rtw_dev *rtwdev) 1208 + { 1209 + bool status; 1210 + int ret; 1211 + 1212 + rtw_write8(rtwdev, REG_TXPAUSE, BIT_AC_QUEUE); 1213 + rtw_write32_mask(rtwdev, REG_TX_FIFO, BIT_STOP_TX, 0x2); 1214 + 1215 + ret = read_poll_timeout_atomic(check_rf_status, status, status, 1216 + 2, 5000, false, rtwdev, 2); 1217 + if (ret) 1218 + rtw_warn(rtwdev, "failed to pause TX\n"); 1219 + 1220 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] Tx pause!!\n"); 1221 + } 1222 + 1223 + static void rtw8822c_txgapk_bb_dpk(struct rtw_dev *rtwdev, u8 path) 1224 + { 1225 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1226 + 1227 + rtw_write32_mask(rtwdev, REG_ENFN, BIT_IQK_DPK_EN, 0x1); 1228 + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, 1229 + BIT_IQK_DPK_CLOCK_SRC, 0x1); 1230 + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, 1231 + BIT_IQK_DPK_RESET_SRC, 0x1); 1232 + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, BIT_EN_IOQ_IQK_DPK, 0x1); 1233 + rtw_write32_mask(rtwdev, REG_CH_DELAY_EXTR2, BIT_TST_IQK2SET_SRC, 0x0); 1234 + rtw_write32_mask(rtwdev, REG_CCA_OFF, BIT_CCA_ON_BY_PW, 0x1ff); 1235 + 1236 + if (path == RF_PATH_A) { 1237 + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_A, 1238 + BIT_RFTXEN_GCK_FORCE_ON, 0x1); 1239 + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_DIS_SHARERX_TXGAT, 0x1); 1240 + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_A, 1241 + BIT_TX_SCALE_0DB, 0x1); 1242 + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_3WIRE_EN, 0x0); 1243 + } else if (path == RF_PATH_B) { 1244 + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_B, 1245 + BIT_RFTXEN_GCK_FORCE_ON, 0x1); 1246 + rtw_write32_mask(rtwdev, REG_3WIRE2, 1247 + BIT_DIS_SHARERX_TXGAT, 0x1); 1248 + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_B, 1249 + BIT_TX_SCALE_0DB, 0x1); 1250 + rtw_write32_mask(rtwdev, REG_3WIRE2, BIT_3WIRE_EN, 0x0); 1251 + } 1252 + rtw_write32_mask(rtwdev, REG_CCKSB, BIT_BBMODE, 0x2); 1253 + } 1254 + 1255 + static void rtw8822c_txgapk_afe_dpk(struct rtw_dev *rtwdev, u8 path) 1256 + { 1257 + u32 reg; 1258 + 1259 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1260 + 1261 + if (path == RF_PATH_A) { 1262 + reg = REG_ANAPAR_A; 1263 + } else if (path == RF_PATH_B) { 1264 + reg = REG_ANAPAR_B; 1265 + } else { 1266 + rtw_err(rtwdev, "[TXGAPK] unknown path %d!!\n", path); 1267 + return; 1268 + } 1269 + 1270 + rtw_write32_mask(rtwdev, REG_IQK_CTRL, MASKDWORD, MASKDWORD); 1271 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700f0001); 1272 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700f0001); 1273 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x701f0001); 1274 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x702f0001); 1275 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x703f0001); 1276 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x704f0001); 1277 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x705f0001); 1278 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x706f0001); 1279 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x707f0001); 1280 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x708f0001); 1281 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x709f0001); 1282 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70af0001); 1283 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70bf0001); 1284 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70cf0001); 1285 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70df0001); 1286 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ef0001); 1287 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ff0001); 1288 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ff0001); 1289 + } 1290 + 1291 + static void rtw8822c_txgapk_afe_dpk_restore(struct rtw_dev *rtwdev, u8 path) 1292 + { 1293 + u32 reg; 1294 + 1295 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1296 + 1297 + if (path == RF_PATH_A) { 1298 + reg = REG_ANAPAR_A; 1299 + } else if (path == RF_PATH_B) { 1300 + reg = REG_ANAPAR_B; 1301 + } else { 1302 + rtw_err(rtwdev, "[TXGAPK] unknown path %d!!\n", path); 1303 + return; 1304 + } 1305 + rtw_write32_mask(rtwdev, REG_IQK_CTRL, MASKDWORD, 0xffa1005e); 1306 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x700b8041); 1307 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70144041); 1308 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70244041); 1309 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70344041); 1310 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70444041); 1311 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x705b8041); 1312 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70644041); 1313 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x707b8041); 1314 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x708b8041); 1315 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x709b8041); 1316 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70ab8041); 1317 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70bb8041); 1318 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70cb8041); 1319 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70db8041); 1320 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70eb8041); 1321 + rtw_write32_mask(rtwdev, reg, MASKDWORD, 0x70fb8041); 1322 + } 1323 + 1324 + static void rtw8822c_txgapk_bb_dpk_restore(struct rtw_dev *rtwdev, u8 path) 1325 + { 1326 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1327 + 1328 + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x0); 1329 + rtw_write_rf(rtwdev, path, RF_DIS_BYPASS_TXBB, BIT_TIA_BYPASS, 0x0); 1330 + rtw_write_rf(rtwdev, path, RF_DIS_BYPASS_TXBB, BIT_TXBB, 0x0); 1331 + 1332 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x0); 1333 + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); 1334 + rtw_write32_mask(rtwdev, REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); 1335 + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, 0x00); 1336 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x1); 1337 + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); 1338 + rtw_write32_mask(rtwdev, REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); 1339 + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, 0x00); 1340 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, 0x0); 1341 + rtw_write32_mask(rtwdev, REG_CCA_OFF, BIT_CCA_ON_BY_PW, 0x0); 1342 + 1343 + if (path == RF_PATH_A) { 1344 + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_A, 1345 + BIT_RFTXEN_GCK_FORCE_ON, 0x0); 1346 + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_DIS_SHARERX_TXGAT, 0x0); 1347 + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_A, 1348 + BIT_TX_SCALE_0DB, 0x0); 1349 + rtw_write32_mask(rtwdev, REG_3WIRE, BIT_3WIRE_EN, 0x3); 1350 + } else if (path == RF_PATH_B) { 1351 + rtw_write32_mask(rtwdev, REG_RFTXEN_GCK_B, 1352 + BIT_RFTXEN_GCK_FORCE_ON, 0x0); 1353 + rtw_write32_mask(rtwdev, REG_3WIRE2, 1354 + BIT_DIS_SHARERX_TXGAT, 0x0); 1355 + rtw_write32_mask(rtwdev, REG_DIS_SHARE_RX_B, 1356 + BIT_TX_SCALE_0DB, 0x0); 1357 + rtw_write32_mask(rtwdev, REG_3WIRE2, BIT_3WIRE_EN, 0x3); 1358 + } 1359 + 1360 + rtw_write32_mask(rtwdev, REG_CCKSB, BIT_BBMODE, 0x0); 1361 + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_CFIR_EN, 0x5); 1362 + } 1363 + 1364 + static bool _rtw8822c_txgapk_gain_valid(struct rtw_dev *rtwdev, u32 gain) 1365 + { 1366 + if ((FIELD_GET(BIT_GAIN_TX_PAD_H, gain) >= 0xc) && 1367 + (FIELD_GET(BIT_GAIN_TX_PAD_L, gain) >= 0xe)) 1368 + return true; 1369 + 1370 + return false; 1371 + } 1372 + 1373 + static void _rtw8822c_txgapk_write_gain_bb_table(struct rtw_dev *rtwdev, 1374 + u8 band, u8 path) 1375 + { 1376 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 1377 + u32 v, tmp_3f = 0; 1378 + u8 gain, check_txgain; 1379 + 1380 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); 1381 + 1382 + switch (band) { 1383 + case RF_BAND_2G_OFDM: 1384 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x0); 1385 + break; 1386 + case RF_BAND_5G_L: 1387 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x2); 1388 + break; 1389 + case RF_BAND_5G_M: 1390 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x3); 1391 + break; 1392 + case RF_BAND_5G_H: 1393 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x4); 1394 + break; 1395 + default: 1396 + break; 1397 + } 1398 + 1399 + rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, MASKBYTE0, 0x88); 1400 + 1401 + check_txgain = 0; 1402 + for (gain = 0; gain < RF_GAIN_NUM; gain++) { 1403 + v = txgapk->rf3f_bp[band][gain][path]; 1404 + if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) { 1405 + if (!check_txgain) { 1406 + tmp_3f = txgapk->rf3f_bp[band][gain][path]; 1407 + check_txgain = 1; 1408 + } 1409 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1410 + "[TXGAPK] tx_gain=0x%03X >= 0xCEX\n", 1411 + txgapk->rf3f_bp[band][gain][path]); 1412 + } else { 1413 + tmp_3f = txgapk->rf3f_bp[band][gain][path]; 1414 + } 1415 + 1416 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN, tmp_3f); 1417 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_I_GAIN, gain); 1418 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_GAIN_RST, 0x1); 1419 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_GAIN_RST, 0x0); 1420 + 1421 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1422 + "[TXGAPK] Band=%d 0x1b98[11:0]=0x%03X path=%d\n", 1423 + band, tmp_3f, path); 1424 + } 1425 + } 1426 + 1427 + static void rtw8822c_txgapk_write_gain_bb_table(struct rtw_dev *rtwdev) 1428 + { 1429 + u8 path, band; 1430 + 1431 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s channel=%d\n", 1432 + __func__, rtwdev->dm_info.gapk.channel); 1433 + 1434 + for (band = 0; band < RF_BAND_MAX; band++) { 1435 + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 1436 + _rtw8822c_txgapk_write_gain_bb_table(rtwdev, 1437 + band, path); 1438 + } 1439 + } 1440 + } 1441 + 1442 + static void rtw8822c_txgapk_read_offset(struct rtw_dev *rtwdev, u8 path) 1443 + { 1444 + static const u32 cfg1_1b00[2] = {0x00000d18, 0x00000d2a}; 1445 + static const u32 cfg2_1b00[2] = {0x00000d19, 0x00000d2b}; 1446 + static const u32 set_pi[2] = {REG_RSV_CTRL, REG_WLRF1}; 1447 + static const u32 path_setting[2] = {REG_ORITXCODE, REG_ORITXCODE2}; 1448 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 1449 + u8 channel = txgapk->channel; 1450 + u32 val; 1451 + int i; 1452 + 1453 + if (path >= ARRAY_SIZE(cfg1_1b00) || 1454 + path >= ARRAY_SIZE(cfg2_1b00) || 1455 + path >= ARRAY_SIZE(set_pi) || 1456 + path >= ARRAY_SIZE(path_setting)) { 1457 + rtw_warn(rtwdev, "[TXGAPK] wrong path %d\n", path); 1458 + return; 1459 + } 1460 + 1461 + rtw_write32_mask(rtwdev, REG_ANTMAP0, BIT_ANT_PATH, path + 1); 1462 + rtw_write32_mask(rtwdev, REG_TXLGMAP, MASKDWORD, 0xe4e40000); 1463 + rtw_write32_mask(rtwdev, REG_TXANTSEG, BIT_ANTSEG, 0x3); 1464 + rtw_write32_mask(rtwdev, path_setting[path], MASK20BITS, 0x33312); 1465 + rtw_write32_mask(rtwdev, path_setting[path], BIT_PATH_EN, 0x1); 1466 + rtw_write32_mask(rtwdev, set_pi[path], BITS_RFC_DIRECT, 0x0); 1467 + rtw_write_rf(rtwdev, path, RF_LUTDBG, BIT_TXA_TANK, 0x1); 1468 + rtw_write_rf(rtwdev, path, RF_IDAC, BIT_TX_MODE, 0x820); 1469 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); 1470 + rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x0); 1471 + 1472 + rtw_write32_mask(rtwdev, REG_TX_TONE_IDX, MASKBYTE0, 0x018); 1473 + fsleep(1000); 1474 + if (channel >= 1 && channel <= 14) 1475 + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, BIT_2G_SWING); 1476 + else 1477 + rtw_write32_mask(rtwdev, REG_R_CONFIG, MASKBYTE0, BIT_5G_SWING); 1478 + fsleep(1000); 1479 + 1480 + rtw_write32_mask(rtwdev, REG_NCTL0, MASKDWORD, cfg1_1b00[path]); 1481 + rtw_write32_mask(rtwdev, REG_NCTL0, MASKDWORD, cfg2_1b00[path]); 1482 + 1483 + read_poll_timeout(rtw_read32_mask, val, 1484 + val == 0x55, 1000, 100000, false, 1485 + rtwdev, REG_RPT_CIP, BIT_RPT_CIP_STATUS); 1486 + 1487 + rtw_write32_mask(rtwdev, set_pi[path], BITS_RFC_DIRECT, 0x2); 1488 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); 1489 + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_EN, 0x1); 1490 + rtw_write32_mask(rtwdev, REG_RXSRAM_CTL, BIT_RPT_SEL, 0x12); 1491 + rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, BIT_GAPK_RPT_IDX, 0x3); 1492 + val = rtw_read32(rtwdev, REG_STAT_RPT); 1493 + 1494 + txgapk->offset[0][path] = (s8)FIELD_GET(BIT_GAPK_RPT0, val); 1495 + txgapk->offset[1][path] = (s8)FIELD_GET(BIT_GAPK_RPT1, val); 1496 + txgapk->offset[2][path] = (s8)FIELD_GET(BIT_GAPK_RPT2, val); 1497 + txgapk->offset[3][path] = (s8)FIELD_GET(BIT_GAPK_RPT3, val); 1498 + txgapk->offset[4][path] = (s8)FIELD_GET(BIT_GAPK_RPT4, val); 1499 + txgapk->offset[5][path] = (s8)FIELD_GET(BIT_GAPK_RPT5, val); 1500 + txgapk->offset[6][path] = (s8)FIELD_GET(BIT_GAPK_RPT6, val); 1501 + txgapk->offset[7][path] = (s8)FIELD_GET(BIT_GAPK_RPT7, val); 1502 + 1503 + rtw_write32_mask(rtwdev, REG_TX_GAIN_SET, BIT_GAPK_RPT_IDX, 0x4); 1504 + val = rtw_read32(rtwdev, REG_STAT_RPT); 1505 + 1506 + txgapk->offset[8][path] = (s8)FIELD_GET(BIT_GAPK_RPT0, val); 1507 + txgapk->offset[9][path] = (s8)FIELD_GET(BIT_GAPK_RPT1, val); 1508 + 1509 + for (i = 0; i < RF_HW_OFFSET_NUM; i++) 1510 + if (txgapk->offset[i][path] & BIT(3)) 1511 + txgapk->offset[i][path] = txgapk->offset[i][path] | 1512 + 0xf0; 1513 + for (i = 0; i < RF_HW_OFFSET_NUM; i++) 1514 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1515 + "[TXGAPK] offset %d %d path=%d\n", 1516 + txgapk->offset[i][path], i, path); 1517 + } 1518 + 1519 + static void rtw8822c_txgapk_calculate_offset(struct rtw_dev *rtwdev, u8 path) 1520 + { 1521 + static const u32 bb_reg[] = {REG_ANTMAP0, REG_TXLGMAP, REG_TXANTSEG, 1522 + REG_ORITXCODE, REG_ORITXCODE2}; 1523 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 1524 + u8 channel = txgapk->channel; 1525 + u32 reg_backup[ARRAY_SIZE(bb_reg)] = {0}; 1526 + 1527 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s channel=%d\n", 1528 + __func__, channel); 1529 + 1530 + rtw8822c_txgapk_backup_bb_reg(rtwdev, bb_reg, 1531 + reg_backup, ARRAY_SIZE(bb_reg)); 1532 + 1533 + if (channel >= 1 && channel <= 14) { 1534 + rtw_write32_mask(rtwdev, 1535 + REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); 1536 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); 1537 + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x3f); 1538 + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); 1539 + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); 1540 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x5000f); 1541 + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_RF_GAIN, 0x0); 1542 + rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x1); 1543 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0x0f); 1544 + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1); 1545 + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_TXBB, 0x1); 1546 + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0); 1547 + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x1); 1548 + 1549 + rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x00); 1550 + rtw_write32_mask(rtwdev, REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x0); 1551 + 1552 + rtw8822c_txgapk_read_offset(rtwdev, path); 1553 + rtw_dbg(rtwdev, RTW_DBG_RFK, "=============================\n"); 1554 + 1555 + } else { 1556 + rtw_write32_mask(rtwdev, 1557 + REG_SINGLE_TONE_SW, BIT_IRQ_TEST_MODE, 0x0); 1558 + rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SEL_PATH, path); 1559 + rtw_write32_mask(rtwdev, REG_R_CONFIG, BIT_IQ_SWITCH, 0x3f); 1560 + rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x0); 1561 + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); 1562 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, RFREG_MASK, 0x50011); 1563 + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x3); 1564 + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_ATT, 0x3); 1565 + rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_LB_SW, 0x1); 1566 + rtw_write_rf(rtwdev, path, 1567 + RF_RXA_MIX_GAIN, BIT_RXA_MIX_GAIN, 0x2); 1568 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RXAGC, 0x12); 1569 + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x1); 1570 + rtw_write_rf(rtwdev, path, RF_BW_TRXBB, BIT_BW_RXBB, 0x0); 1571 + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x1); 1572 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RF_MODE, 0x5); 1573 + 1574 + rtw_write32_mask(rtwdev, REG_IQKSTAT, MASKBYTE0, 0x0); 1575 + 1576 + if (channel >= 36 && channel <= 64) 1577 + rtw_write32_mask(rtwdev, 1578 + REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x2); 1579 + else if (channel >= 100 && channel <= 144) 1580 + rtw_write32_mask(rtwdev, 1581 + REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x3); 1582 + else if (channel >= 149 && channel <= 177) 1583 + rtw_write32_mask(rtwdev, 1584 + REG_TABLE_SEL, BIT_Q_GAIN_SEL, 0x4); 1585 + 1586 + rtw8822c_txgapk_read_offset(rtwdev, path); 1587 + rtw_dbg(rtwdev, RTW_DBG_RFK, "=============================\n"); 1588 + } 1589 + rtw8822c_txgapk_reload_bb_reg(rtwdev, bb_reg, 1590 + reg_backup, ARRAY_SIZE(bb_reg)); 1591 + } 1592 + 1593 + static void rtw8822c_txgapk_rf_restore(struct rtw_dev *rtwdev, u8 path) 1594 + { 1595 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1596 + 1597 + if (path >= rtwdev->hal.rf_path_num) 1598 + return; 1599 + 1600 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, BIT_RF_MODE, 0x3); 1601 + rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TRXBW, 0x0); 1602 + rtw_write_rf(rtwdev, path, RF_EXT_TIA_BW, BIT_PW_EXT_TIA, 0x0); 1603 + } 1604 + 1605 + static u32 rtw8822c_txgapk_cal_gain(struct rtw_dev *rtwdev, u32 gain, s8 offset) 1606 + { 1607 + u32 gain_x2, new_gain; 1608 + 1609 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1610 + 1611 + if (_rtw8822c_txgapk_gain_valid(rtwdev, gain)) { 1612 + new_gain = gain; 1613 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1614 + "[TXGAPK] gain=0x%03X(>=0xCEX) offset=%d new_gain=0x%03X\n", 1615 + gain, offset, new_gain); 1616 + return new_gain; 1617 + } 1618 + 1619 + gain_x2 = (gain << 1) + offset; 1620 + new_gain = (gain_x2 >> 1) | (gain_x2 & BIT(0) ? BIT_GAIN_EXT : 0); 1621 + 1622 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1623 + "[TXGAPK] gain=0x%X offset=%d new_gain=0x%X\n", 1624 + gain, offset, new_gain); 1625 + 1626 + return new_gain; 1627 + } 1628 + 1629 + static void rtw8822c_txgapk_write_tx_gain(struct rtw_dev *rtwdev) 1630 + { 1631 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 1632 + u32 i, j, tmp = 0x20, tmp_3f, v; 1633 + s8 offset_tmp[RF_GAIN_NUM] = {0}; 1634 + u8 path, band = RF_BAND_2G_OFDM, channel = txgapk->channel; 1635 + 1636 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1637 + 1638 + if (channel >= 1 && channel <= 14) { 1639 + tmp = 0x20; 1640 + band = RF_BAND_2G_OFDM; 1641 + } else if (channel >= 36 && channel <= 64) { 1642 + tmp = 0x200; 1643 + band = RF_BAND_5G_L; 1644 + } else if (channel >= 100 && channel <= 144) { 1645 + tmp = 0x280; 1646 + band = RF_BAND_5G_M; 1647 + } else if (channel >= 149 && channel <= 177) { 1648 + tmp = 0x300; 1649 + band = RF_BAND_5G_H; 1650 + } else { 1651 + rtw_err(rtwdev, "[TXGAPK] unknown channel %d!!\n", channel); 1652 + return; 1653 + } 1654 + 1655 + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 1656 + for (i = 0; i < RF_GAIN_NUM; i++) { 1657 + offset_tmp[i] = 0; 1658 + for (j = i; j < RF_GAIN_NUM; j++) { 1659 + v = txgapk->rf3f_bp[band][j][path]; 1660 + if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) 1661 + continue; 1662 + 1663 + offset_tmp[i] += txgapk->offset[j][path]; 1664 + txgapk->fianl_offset[i][path] = offset_tmp[i]; 1665 + } 1666 + 1667 + v = txgapk->rf3f_bp[band][i][path]; 1668 + if (_rtw8822c_txgapk_gain_valid(rtwdev, v)) { 1669 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1670 + "[TXGAPK] tx_gain=0x%03X >= 0xCEX\n", 1671 + txgapk->rf3f_bp[band][i][path]); 1672 + } else { 1673 + txgapk->rf3f_fs[path][i] = offset_tmp[i]; 1674 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1675 + "[TXGAPK] offset %d %d\n", 1676 + offset_tmp[i], i); 1677 + } 1678 + } 1679 + 1680 + rtw_write_rf(rtwdev, path, RF_LUTWE2, RFREG_MASK, 0x10000); 1681 + for (i = 0; i < RF_GAIN_NUM; i++) { 1682 + rtw_write_rf(rtwdev, path, 1683 + RF_LUTWA, RFREG_MASK, tmp + i); 1684 + 1685 + tmp_3f = rtw8822c_txgapk_cal_gain(rtwdev, 1686 + txgapk->rf3f_bp[band][i][path], 1687 + offset_tmp[i]); 1688 + rtw_write_rf(rtwdev, path, RF_LUTWD0, 1689 + BIT_GAIN_EXT | BIT_DATA_L, tmp_3f); 1690 + 1691 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1692 + "[TXGAPK] 0x33=0x%05X 0x3f=0x%04X\n", 1693 + tmp + i, tmp_3f); 1694 + } 1695 + rtw_write_rf(rtwdev, path, RF_LUTWE2, RFREG_MASK, 0x0); 1696 + } 1697 + } 1698 + 1699 + static void rtw8822c_txgapk_save_all_tx_gain_table(struct rtw_dev *rtwdev) 1700 + { 1701 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 1702 + static const u32 three_wire[2] = {REG_3WIRE, REG_3WIRE2}; 1703 + static const u8 ch_num[RF_BAND_MAX] = {1, 1, 36, 100, 149}; 1704 + static const u8 band_num[RF_BAND_MAX] = {0x0, 0x0, 0x1, 0x3, 0x5}; 1705 + static const u8 cck[RF_BAND_MAX] = {0x1, 0x0, 0x0, 0x0, 0x0}; 1706 + u8 path, band, gain, rf0_idx; 1707 + u32 rf18, v; 1708 + 1709 + if (rtwdev->dm_info.dm_flags & BIT(RTW_DM_CAP_TXGAPK)) 1710 + return; 1711 + 1712 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1713 + 1714 + if (txgapk->read_txgain == 1) { 1715 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1716 + "[TXGAPK] Already Read txgapk->read_txgain return!!!\n"); 1717 + rtw8822c_txgapk_write_gain_bb_table(rtwdev); 1718 + return; 1719 + } 1720 + 1721 + for (band = 0; band < RF_BAND_MAX; band++) { 1722 + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 1723 + rf18 = rtw_read_rf(rtwdev, path, RF_CFGCH, RFREG_MASK); 1724 + 1725 + rtw_write32_mask(rtwdev, 1726 + three_wire[path], BIT_3WIRE_EN, 0x0); 1727 + rtw_write_rf(rtwdev, path, 1728 + RF_CFGCH, MASKBYTE0, ch_num[band]); 1729 + rtw_write_rf(rtwdev, path, 1730 + RF_CFGCH, BIT_BAND, band_num[band]); 1731 + rtw_write_rf(rtwdev, path, 1732 + RF_BW_TRXBB, BIT_DBG_CCK_CCA, cck[band]); 1733 + rtw_write_rf(rtwdev, path, 1734 + RF_BW_TRXBB, BIT_TX_CCK_IND, cck[band]); 1735 + gain = 0; 1736 + for (rf0_idx = 1; rf0_idx < 32; rf0_idx += 3) { 1737 + rtw_write_rf(rtwdev, path, RF_MODE_TRXAGC, 1738 + MASKBYTE0, rf0_idx); 1739 + v = rtw_read_rf(rtwdev, path, 1740 + RF_TX_RESULT, RFREG_MASK); 1741 + txgapk->rf3f_bp[band][gain][path] = v & BIT_DATA_L; 1742 + 1743 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1744 + "[TXGAPK] 0x5f=0x%03X band=%d path=%d\n", 1745 + txgapk->rf3f_bp[band][gain][path], 1746 + band, path); 1747 + gain++; 1748 + } 1749 + rtw_write_rf(rtwdev, path, RF_CFGCH, RFREG_MASK, rf18); 1750 + rtw_write32_mask(rtwdev, 1751 + three_wire[path], BIT_3WIRE_EN, 0x3); 1752 + } 1753 + } 1754 + rtw8822c_txgapk_write_gain_bb_table(rtwdev); 1755 + txgapk->read_txgain = 1; 1756 + } 1757 + 1758 + static void rtw8822c_txgapk(struct rtw_dev *rtwdev) 1759 + { 1760 + static const u32 bb_reg[2] = {REG_TX_PTCL_CTRL, REG_TX_FIFO}; 1761 + struct rtw_gapk_info *txgapk = &rtwdev->dm_info.gapk; 1762 + u32 bb_reg_backup[2]; 1763 + u8 path; 1764 + 1765 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] ======>%s\n", __func__); 1766 + 1767 + rtw8822c_txgapk_save_all_tx_gain_table(rtwdev); 1768 + 1769 + if (txgapk->read_txgain == 0) { 1770 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1771 + "[TXGAPK] txgapk->read_txgain == 0 return!!!\n"); 1772 + return; 1773 + } 1774 + 1775 + if (rtwdev->efuse.power_track_type >= 4 && 1776 + rtwdev->efuse.power_track_type <= 7) { 1777 + rtw_dbg(rtwdev, RTW_DBG_RFK, 1778 + "[TXGAPK] Normal Mode in TSSI mode. return!!!\n"); 1779 + return; 1780 + } 1781 + 1782 + rtw8822c_txgapk_backup_bb_reg(rtwdev, bb_reg, 1783 + bb_reg_backup, ARRAY_SIZE(bb_reg)); 1784 + rtw8822c_txgapk_tx_pause(rtwdev); 1785 + for (path = 0; path < rtwdev->hal.rf_path_num; path++) { 1786 + txgapk->channel = rtw_read_rf(rtwdev, path, 1787 + RF_CFGCH, RFREG_MASK) & MASKBYTE0; 1788 + rtw8822c_txgapk_bb_dpk(rtwdev, path); 1789 + rtw8822c_txgapk_afe_dpk(rtwdev, path); 1790 + rtw8822c_txgapk_calculate_offset(rtwdev, path); 1791 + rtw8822c_txgapk_rf_restore(rtwdev, path); 1792 + rtw8822c_txgapk_afe_dpk_restore(rtwdev, path); 1793 + rtw8822c_txgapk_bb_dpk_restore(rtwdev, path); 1794 + } 1795 + rtw8822c_txgapk_write_tx_gain(rtwdev); 1796 + rtw8822c_txgapk_reload_bb_reg(rtwdev, bb_reg, 1797 + bb_reg_backup, ARRAY_SIZE(bb_reg)); 1798 + } 1799 + 1800 + static void rtw8822c_do_gapk(struct rtw_dev *rtwdev) 1801 + { 1802 + struct rtw_dm_info *dm = &rtwdev->dm_info; 1803 + 1804 + if (dm->dm_flags & BIT(RTW_DM_CAP_TXGAPK)) { 1805 + rtw_dbg(rtwdev, RTW_DBG_RFK, "[TXGAPK] feature disable!!!\n"); 1806 + return; 1807 + } 1808 + rtw8822c_rfk_handshake(rtwdev, true); 1809 + rtw8822c_txgapk(rtwdev); 1810 + rtw8822c_rfk_handshake(rtwdev, false); 1106 1811 } 1107 1812 1108 1813 static void rtw8822c_rf_init(struct rtw_dev *rtwdev) ··· 3251 2546 rtw_write_rf(rtwdev, path, RF_RXAGC_OFFSET, RFREG_MASK, 0x0); 3252 2547 rtw_write32(rtwdev, REG_NCTL0, 0x8 | (path << 1)); 3253 2548 if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) 3254 - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000); 2549 + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f100000); 3255 2550 else 3256 - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000); 2551 + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f0d0000); 3257 2552 rtw_write32_mask(rtwdev, REG_DPD_LUT0, BIT_GLOSS_DB, 0x4); 3258 2553 rtw_write32_mask(rtwdev, REG_IQK_CTL1, BIT_TX_CFIR, 0x3); 3259 2554 } ··· 3271 2566 3272 2567 rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_TX_GAIN, 0x1); 3273 2568 rtw_write_rf(rtwdev, path, RF_DEBUG, BIT_DE_PWR_TRIM, 0x1); 3274 - rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_TX_OFFSET_VAL, 0x0); 2569 + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_BB_GAIN, 0x0); 3275 2570 rtw_write_rf(rtwdev, path, RF_TX_GAIN, RFREG_MASK, ori_txbb); 3276 2571 3277 2572 if (rtwdev->dm_info.dpk_info.dpk_band == RTW_BAND_2G) { 3278 - rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_LB_ATT, 0x1); 2573 + rtw_write_rf(rtwdev, path, RF_TX_GAIN_OFFSET, BIT_RF_GAIN, 0x1); 3279 2574 rtw_write_rf(rtwdev, path, RF_RXG_GAIN, BIT_RXG_GAIN, 0x0); 3280 2575 } else { 3281 2576 rtw_write_rf(rtwdev, path, RF_TXA_LB_SW, BIT_TXA_LB_ATT, 0x0); ··· 4022 3317 rtw_write32_mask(rtwdev, REG_NCTL0, BIT_SUBPAGE, 4023 3318 0x8 | (path << 1)); 4024 3319 if (dpk_info->dpk_band == RTW_BAND_2G) 4025 - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f100000); 3320 + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f100000); 4026 3321 else 4027 - rtw_write32(rtwdev, REG_DPD_LUT3, 0x1f0d0000); 3322 + rtw_write32(rtwdev, REG_DPD_CTL1_S1, 0x1f0d0000); 4028 3323 4029 3324 rtw_write8(rtwdev, REG_DPD_AGC, dpk_info->dpk_txagc[path]); 4030 3325 ··· 4108 3403 4109 3404 static void rtw8822c_phy_calibration(struct rtw_dev *rtwdev) 4110 3405 { 3406 + rtw8822c_rfk_power_save(rtwdev, false); 3407 + rtw8822c_do_gapk(rtwdev); 4111 3408 rtw8822c_do_iqk(rtwdev); 4112 3409 rtw8822c_do_dpk(rtwdev); 3410 + rtw8822c_rfk_power_save(rtwdev, true); 4113 3411 } 4114 3412 4115 3413 static void rtw8822c_dpk_track(struct rtw_dev *rtwdev)
+206 -138
drivers/net/wireless/realtek/rtw88/rtw8822c.h
··· 164 164 165 165 #define REG_ANAPARLDO_POW_MAC 0x0029 166 166 #define BIT_LDOE25_PON BIT(0) 167 - 168 167 #define XCAP_MASK GENMASK(6, 0) 169 168 #define CFO_TRK_ENABLE_TH 20 170 169 #define CFO_TRK_STOP_TH 10 171 170 #define CFO_TRK_ADJ_TH 10 172 171 173 - #define REG_TXDFIR0 0x808 174 - #define REG_DFIRBW 0x810 175 - #define REG_ANTMAP0 0x820 176 - #define REG_ANTMAP 0x824 177 - #define REG_DYMPRITH 0x86c 178 - #define REG_DYMENTH0 0x870 179 - #define REG_DYMENTH 0x874 180 - #define REG_SBD 0x88c 172 + #define REG_TXDFIR0 0x808 173 + #define REG_DFIRBW 0x810 174 + #define REG_ANTMAP0 0x820 175 + #define BIT_ANT_PATH GENMASK(1, 0) 176 + #define REG_ANTMAP 0x824 177 + #define REG_DYMPRITH 0x86c 178 + #define REG_DYMENTH0 0x870 179 + #define REG_DYMENTH 0x874 180 + #define REG_SBD 0x88c 181 181 #define BITS_SUBTUNE GENMASK(15, 12) 182 - #define REG_DYMTHMIN 0x8a4 183 - #define REG_TXBWCTL 0x9b0 184 - #define REG_TXCLK 0x9b4 185 - #define REG_SCOTRK 0xc30 186 - #define REG_MRCM 0xc38 187 - #define REG_AGCSWSH 0xc44 188 - #define REG_ANTWTPD 0xc54 189 - #define REG_PT_CHSMO 0xcbc 182 + #define REG_DYMTHMIN 0x8a4 183 + 184 + #define REG_TXBWCTL 0x9b0 185 + #define REG_TXCLK 0x9b4 186 + 187 + #define REG_SCOTRK 0xc30 188 + #define REG_MRCM 0xc38 189 + #define REG_AGCSWSH 0xc44 190 + #define REG_ANTWTPD 0xc54 191 + #define REG_PT_CHSMO 0xcbc 190 192 #define BIT_PT_OPT BIT(21) 191 - #define REG_ORITXCODE 0x1800 192 - #define REG_3WIRE 0x180c 193 + 194 + #define REG_ORITXCODE 0x1800 195 + #define BIT_PATH_EN BIT(31) 196 + #define REG_3WIRE 0x180c 197 + #define BIT_DIS_SHARERX_TXGAT BIT(27) 193 198 #define BIT_3WIRE_TX_EN BIT(0) 194 199 #define BIT_3WIRE_RX_EN BIT(1) 200 + #define BIT_3WIRE_EN GENMASK(1, 0) 195 201 #define BIT_3WIRE_PI_ON BIT(28) 196 - #define REG_ANAPAR_A 0x1830 202 + #define REG_ANAPAR_A 0x1830 197 203 #define BIT_ANAPAR_UPDATE BIT(29) 198 - #define REG_RXAGCCTL0 0x18ac 204 + #define REG_RFTXEN_GCK_A 0x1864 205 + #define BIT_RFTXEN_GCK_FORCE_ON BIT(31) 206 + #define REG_DIS_SHARE_RX_A 0x186c 207 + #define BIT_TX_SCALE_0DB BIT(7) 208 + #define REG_RXAGCCTL0 0x18ac 199 209 #define BITS_RXAGC_CCK GENMASK(15, 12) 200 210 #define BITS_RXAGC_OFDM GENMASK(8, 4) 201 - #define REG_DCKA_I_0 0x18bc 202 - #define REG_DCKA_I_1 0x18c0 203 - #define REG_DCKA_Q_0 0x18d8 204 - #define REG_DCKA_Q_1 0x18dc 205 - #define REG_CCKSB 0x1a00 206 - #define REG_RXCCKSEL 0x1a04 207 - #define REG_BGCTRL 0x1a14 211 + #define REG_DCKA_I_0 0x18bc 212 + #define REG_DCKA_I_1 0x18c0 213 + #define REG_DCKA_Q_0 0x18d8 214 + #define REG_DCKA_Q_1 0x18dc 215 + 216 + #define REG_CCKSB 0x1a00 217 + #define BIT_BBMODE GENMASK(2, 1) 218 + #define REG_RXCCKSEL 0x1a04 219 + #define REG_BGCTRL 0x1a14 208 220 #define BITS_RX_IQ_WEIGHT (BIT(8) | BIT(9)) 209 - #define REG_TXF0 0x1a20 210 - #define REG_TXF1 0x1a24 211 - #define REG_TXF2 0x1a28 212 - #define REG_CCANRX 0x1a2c 221 + #define REG_TXF0 0x1a20 222 + #define REG_TXF1 0x1a24 223 + #define REG_TXF2 0x1a28 224 + #define REG_CCANRX 0x1a2c 213 225 #define BIT_CCK_FA_RST (BIT(14) | BIT(15)) 214 226 #define BIT_OFDM_FA_RST (BIT(12) | BIT(13)) 215 - #define REG_CCK_FACNT 0x1a5c 216 - #define REG_CCKTXONLY 0x1a80 227 + #define REG_CCK_FACNT 0x1a5c 228 + #define REG_CCKTXONLY 0x1a80 217 229 #define BIT_BB_CCK_CHECK_EN BIT(18) 218 - #define REG_TXF3 0x1a98 219 - #define REG_TXF4 0x1a9c 220 - #define REG_TXF5 0x1aa0 221 - #define REG_TXF6 0x1aac 222 - #define REG_TXF7 0x1ab0 223 - #define REG_CCK_SOURCE 0x1abc 230 + #define REG_TXF3 0x1a98 231 + #define REG_TXF4 0x1a9c 232 + #define REG_TXF5 0x1aa0 233 + #define REG_TXF6 0x1aac 234 + #define REG_TXF7 0x1ab0 235 + #define REG_CCK_SOURCE 0x1abc 224 236 #define BIT_NBI_EN BIT(30) 225 - #define REG_IQKSTAT 0x1b10 226 - #define REG_TXANT 0x1c28 227 - #define REG_ENCCK 0x1c3c 228 - #define BIT_CCK_BLK_EN BIT(1) 229 - #define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1)) 230 - #define REG_CCAMSK 0x1c80 231 - #define REG_RSTB 0x1c90 232 - #define BIT_RSTB_3WIRE BIT(8) 233 - #define REG_RX_BREAK 0x1d2c 234 - #define BIT_COM_RX_GCK_EN BIT(31) 235 - #define REG_RXFNCTL 0x1d30 236 - #define REG_RXIGI 0x1d70 237 - #define REG_ENFN 0x1e24 238 - #define REG_TXANTSEG 0x1e28 239 - #define REG_TXLGMAP 0x1e2c 240 - #define REG_CCKPATH 0x1e5c 241 - #define REG_CNT_CTRL 0x1eb4 242 - #define BIT_ALL_CNT_RST BIT(25) 243 - #define REG_OFDM_FACNT 0x2d00 244 - #define REG_OFDM_FACNT1 0x2d04 245 - #define REG_OFDM_FACNT2 0x2d08 246 - #define REG_OFDM_FACNT3 0x2d0c 247 - #define REG_OFDM_FACNT4 0x2d10 248 - #define REG_OFDM_FACNT5 0x2d20 249 - #define REG_RPT_CIP 0x2d9c 250 - #define REG_OFDM_TXCNT 0x2de0 251 - #define REG_ORITXCODE2 0x4100 252 - #define REG_3WIRE2 0x410c 253 - #define REG_ANAPAR_B 0x4130 254 - #define REG_RXAGCCTL 0x41ac 255 - #define REG_DCKB_I_0 0x41bc 256 - #define REG_DCKB_I_1 0x41c0 257 - #define REG_DCKB_Q_0 0x41d8 258 - #define REG_DCKB_Q_1 0x41dc 259 - 260 - #define RF_MODE_TRXAGC 0x00 261 - #define RF_RXAGC_OFFSET 0x19 262 - #define RF_BW_TRXBB 0x1a 263 - #define RF_TX_GAIN_OFFSET 0x55 264 - #define RF_TX_GAIN 0x56 265 - #define RF_TXA_LB_SW 0x63 266 - #define RF_RXG_GAIN 0x87 267 - #define RF_RXA_MIX_GAIN 0x8a 268 - #define RF_EXT_TIA_BW 0x8f 269 - #define RF_DEBUG 0xde 270 237 271 238 #define REG_NCTL0 0x1b00 239 + #define BIT_SEL_PATH GENMASK(2, 1) 240 + #define BIT_SUBPAGE GENMASK(3, 0) 272 241 #define REG_DPD_CTL0_S0 0x1b04 242 + #define BIT_GS_PWSF GENMASK(27, 0) 273 243 #define REG_DPD_CTL1_S0 0x1b08 244 + #define BIT_DPD_EN BIT(31) 245 + #define BIT_PS_EN BIT(7) 246 + #define REG_IQKSTAT 0x1b10 274 247 #define REG_IQK_CTL1 0x1b20 248 + #define BIT_TX_CFIR GENMASK(31, 30) 249 + #define BIT_CFIR_EN GENMASK(26, 24) 250 + #define BIT_BYPASS_DPD BIT(25) 251 + 252 + #define REG_TX_TONE_IDX 0x1b2c 275 253 #define REG_DPD_LUT0 0x1b44 254 + #define BIT_GLOSS_DB GENMASK(14, 12) 276 255 #define REG_DPD_CTL0_S1 0x1b5c 277 - #define REG_DPD_LUT3 0x1b60 278 256 #define REG_DPD_CTL1_S1 0x1b60 279 257 #define REG_DPD_AGC 0x1b67 258 + #define REG_TABLE_SEL 0x1b98 259 + #define BIT_I_GAIN GENMASK(19, 16) 260 + #define BIT_GAIN_RST BIT(15) 261 + #define BIT_Q_GAIN_SEL GENMASK(14, 12) 262 + #define BIT_Q_GAIN GENMASK(11, 0) 263 + #define REG_TX_GAIN_SET 0x1b9c 264 + #define BIT_GAPK_RPT_IDX GENMASK(11, 8) 280 265 #define REG_DPD_CTL0 0x1bb4 266 + #define REG_SINGLE_TONE_SW 0x1bb8 267 + #define BIT_IRQ_TEST_MODE BIT(20) 281 268 #define REG_R_CONFIG 0x1bcc 269 + #define BIT_INNER_LB BIT(21) 270 + #define BIT_IQ_SWITCH GENMASK(5, 0) 271 + #define BIT_2G_SWING 0x2d 272 + #define BIT_5G_SWING 0x36 282 273 #define REG_RXSRAM_CTL 0x1bd4 274 + #define BIT_RPT_EN BIT(21) 275 + #define BIT_RPT_SEL GENMASK(20, 16) 276 + #define BIT_DPD_CLK GENMASK(7, 4) 283 277 #define REG_DPD_CTL11 0x1be4 284 278 #define REG_DPD_CTL12 0x1be8 285 279 #define REG_DPD_CTL15 0x1bf4 286 280 #define REG_DPD_CTL16 0x1bf8 287 281 #define REG_STAT_RPT 0x1bfc 288 - 289 - #define BIT_EXT_TIA_BW BIT(1) 290 - #define BIT_DE_TRXBW BIT(2) 291 - #define BIT_DE_TX_GAIN BIT(16) 292 - #define BIT_RXG_GAIN BIT(18) 293 - #define BIT_DE_PWR_TRIM BIT(19) 294 - #define BIT_INNER_LB BIT(21) 295 - #define BIT_BYPASS_DPD BIT(25) 296 - #define BIT_DPD_EN BIT(31) 297 - #define BIT_SUBPAGE GENMASK(3, 0) 298 - #define BIT_TXAGC GENMASK(4, 0) 299 - #define BIT_GAIN_TXBB GENMASK(4, 0) 300 - #define BIT_LB_ATT GENMASK(4, 2) 301 - #define BIT_RXA_MIX_GAIN GENMASK(4, 3) 302 - #define BIT_IQ_SWITCH GENMASK(5, 0) 303 - #define BIT_DPD_CLK GENMASK(7, 4) 304 - #define BIT_RXAGC GENMASK(9, 5) 305 - #define BIT_BW_RXBB GENMASK(11, 10) 306 - #define BIT_LB_SW GENMASK(13, 12) 307 - #define BIT_BW_TXBB GENMASK(14, 12) 308 - #define BIT_GLOSS_DB GENMASK(14, 12) 309 - #define BIT_TXA_LB_ATT GENMASK(15, 14) 310 - #define BIT_TX_OFFSET_VAL GENMASK(18, 14) 311 - #define BIT_RPT_SEL GENMASK(20, 16) 312 - #define BIT_GS_PWSF GENMASK(27, 0) 313 282 #define BIT_RPT_DGAIN GENMASK(27, 16) 314 - #define BIT_TX_CFIR GENMASK(31, 30) 283 + #define BIT_GAPK_RPT0 GENMASK(3, 0) 284 + #define BIT_GAPK_RPT1 GENMASK(7, 4) 285 + #define BIT_GAPK_RPT2 GENMASK(11, 8) 286 + #define BIT_GAPK_RPT3 GENMASK(15, 12) 287 + #define BIT_GAPK_RPT4 GENMASK(19, 16) 288 + #define BIT_GAPK_RPT5 GENMASK(23, 20) 289 + #define BIT_GAPK_RPT6 GENMASK(27, 24) 290 + #define BIT_GAPK_RPT7 GENMASK(31, 28) 315 291 316 - #define PPG_THERMAL_A 0x1ef 317 - #define PPG_THERMAL_B 0x1b0 318 - #define RF_THEMAL_MASK GENMASK(19, 16) 319 - #define PPG_2GL_TXAB 0x1d4 320 - #define PPG_2GM_TXAB 0x1ee 321 - #define PPG_2GH_TXAB 0x1d2 322 - #define PPG_2G_A_MASK GENMASK(3, 0) 323 - #define PPG_2G_B_MASK GENMASK(7, 4) 324 - #define PPG_5GL1_TXA 0x1ec 325 - #define PPG_5GL2_TXA 0x1e8 326 - #define PPG_5GM1_TXA 0x1e4 327 - #define PPG_5GM2_TXA 0x1e0 328 - #define PPG_5GH1_TXA 0x1dc 329 - #define PPG_5GL1_TXB 0x1eb 330 - #define PPG_5GL2_TXB 0x1e7 331 - #define PPG_5GM1_TXB 0x1e3 332 - #define PPG_5GM2_TXB 0x1df 333 - #define PPG_5GH1_TXB 0x1db 334 - #define PPG_5G_MASK GENMASK(4, 0) 335 - #define PPG_PABIAS_2GA 0x1d6 336 - #define PPG_PABIAS_2GB 0x1d5 337 - #define PPG_PABIAS_5GA 0x1d8 338 - #define PPG_PABIAS_5GB 0x1d7 339 - #define PPG_PABIAS_MASK GENMASK(3, 0) 340 - #define RF_PABIAS_2G_MASK GENMASK(15, 12) 341 - #define RF_PABIAS_5G_MASK GENMASK(19, 16) 292 + #define REG_TXANT 0x1c28 293 + #define REG_IQK_CTRL 0x1c38 294 + #define REG_ENCCK 0x1c3c 295 + #define BIT_CCK_BLK_EN BIT(1) 296 + #define BIT_CCK_OFDM_BLK_EN (BIT(0) | BIT(1)) 297 + #define REG_CCAMSK 0x1c80 298 + #define REG_RSTB 0x1c90 299 + #define BIT_RSTB_3WIRE BIT(8) 300 + #define REG_CH_DELAY_EXTR2 0x1cd0 301 + #define BIT_TST_IQK2SET_SRC BIT(31) 302 + #define BIT_EN_IOQ_IQK_DPK BIT(30) 303 + #define BIT_IQK_DPK_RESET_SRC BIT(29) 304 + #define BIT_IQK_DPK_CLOCK_SRC BIT(28) 342 305 306 + #define REG_RX_BREAK 0x1d2c 307 + #define BIT_COM_RX_GCK_EN BIT(31) 308 + #define REG_RXFNCTL 0x1d30 309 + #define REG_CCA_OFF 0x1d58 310 + #define BIT_CCA_ON_BY_PW GENMASK(11, 3) 311 + #define REG_RXIGI 0x1d70 312 + 313 + #define REG_ENFN 0x1e24 314 + #define BIT_IQK_DPK_EN BIT(17) 315 + #define REG_TXANTSEG 0x1e28 316 + #define BIT_ANTSEG GENMASK(3, 0) 317 + #define REG_TXLGMAP 0x1e2c 318 + #define REG_CCKPATH 0x1e5c 319 + #define REG_TX_FIFO 0x1e70 320 + #define BIT_STOP_TX GENMASK(3, 0) 321 + #define REG_CNT_CTRL 0x1eb4 322 + #define BIT_ALL_CNT_RST BIT(25) 323 + 324 + #define REG_OFDM_FACNT 0x2d00 325 + #define REG_OFDM_FACNT1 0x2d04 326 + #define REG_OFDM_FACNT2 0x2d08 327 + #define REG_OFDM_FACNT3 0x2d0c 328 + #define REG_OFDM_FACNT4 0x2d10 329 + #define REG_OFDM_FACNT5 0x2d20 330 + #define REG_RPT_CIP 0x2d9c 331 + #define BIT_RPT_CIP_STATUS GENMASK(7, 0) 332 + #define REG_OFDM_TXCNT 0x2de0 333 + 334 + #define REG_ORITXCODE2 0x4100 335 + #define REG_3WIRE2 0x410c 336 + #define REG_ANAPAR_B 0x4130 337 + #define REG_RFTXEN_GCK_B 0x4164 338 + #define REG_DIS_SHARE_RX_B 0x416c 339 + #define BIT_EXT_TIA_BW BIT(1) 340 + #define REG_RXAGCCTL 0x41ac 341 + #define REG_DCKB_I_0 0x41bc 342 + #define REG_DCKB_I_1 0x41c0 343 + #define REG_DCKB_Q_0 0x41d8 344 + #define REG_DCKB_Q_1 0x41dc 345 + 346 + #define RF_MODE_TRXAGC 0x00 347 + #define BIT_RF_MODE GENMASK(19, 16) 348 + #define BIT_RXAGC GENMASK(9, 5) 349 + #define BIT_TXAGC GENMASK(4, 0) 350 + #define RF_RXAGC_OFFSET 0x19 351 + #define RF_BW_TRXBB 0x1a 352 + #define BIT_TX_CCK_IND BIT(16) 353 + #define BIT_BW_TXBB GENMASK(14, 12) 354 + #define BIT_BW_RXBB GENMASK(11, 10) 355 + #define BIT_DBG_CCK_CCA BIT(1) 356 + #define RF_TX_GAIN_OFFSET 0x55 357 + #define BIT_BB_GAIN GENMASK(18, 14) 358 + #define BIT_RF_GAIN GENMASK(4, 2) 359 + #define RF_TX_GAIN 0x56 360 + #define BIT_GAIN_TXBB GENMASK(4, 0) 361 + #define RF_IDAC 0x58 362 + #define BIT_TX_MODE GENMASK(19, 8) 363 + #define RF_TX_RESULT 0x5f 364 + #define BIT_GAIN_TX_PAD_H GENMASK(11, 8) 365 + #define BIT_GAIN_TX_PAD_L GENMASK(7, 4) 366 + #define RF_PA 0x60 367 + #define RF_PABIAS_2G_MASK GENMASK(15, 12) 368 + #define RF_PABIAS_5G_MASK GENMASK(19, 16) 369 + #define RF_TXA_LB_SW 0x63 370 + #define BIT_TXA_LB_ATT GENMASK(15, 14) 371 + #define BIT_LB_SW GENMASK(13, 12) 372 + #define BIT_LB_ATT GENMASK(4, 2) 373 + #define RF_RXG_GAIN 0x87 374 + #define BIT_RXG_GAIN BIT(18) 375 + #define RF_RXA_MIX_GAIN 0x8a 376 + #define BIT_RXA_MIX_GAIN GENMASK(4, 3) 377 + #define RF_EXT_TIA_BW 0x8f 378 + #define BIT_PW_EXT_TIA BIT(1) 379 + #define RF_DIS_BYPASS_TXBB 0x9e 380 + #define BIT_TXBB BIT(10) 381 + #define BIT_TIA_BYPASS BIT(5) 382 + #define RF_DEBUG 0xde 383 + #define BIT_DE_PWR_TRIM BIT(19) 384 + #define BIT_DE_TX_GAIN BIT(16) 385 + #define BIT_DE_TRXBW BIT(2) 386 + 387 + #define PPG_THERMAL_B 0x1b0 388 + #define RF_THEMAL_MASK GENMASK(19, 16) 389 + #define PPG_2GH_TXAB 0x1d2 390 + #define PPG_2G_A_MASK GENMASK(3, 0) 391 + #define PPG_2G_B_MASK GENMASK(7, 4) 392 + #define PPG_2GL_TXAB 0x1d4 393 + #define PPG_PABIAS_2GB 0x1d5 394 + #define PPG_PABIAS_2GA 0x1d6 395 + #define PPG_PABIAS_MASK GENMASK(3, 0) 396 + #define PPG_PABIAS_5GB 0x1d7 397 + #define PPG_PABIAS_5GA 0x1d8 398 + #define PPG_5G_MASK GENMASK(4, 0) 399 + #define PPG_5GH1_TXB 0x1db 400 + #define PPG_5GH1_TXA 0x1dc 401 + #define PPG_5GM2_TXB 0x1df 402 + #define PPG_5GM2_TXA 0x1e0 403 + #define PPG_5GM1_TXB 0x1e3 404 + #define PPG_5GM1_TXA 0x1e4 405 + #define PPG_5GL2_TXB 0x1e7 406 + #define PPG_5GL2_TXA 0x1e8 407 + #define PPG_5GL1_TXB 0x1eb 408 + #define PPG_5GL1_TXA 0x1ec 409 + #define PPG_2GM_TXAB 0x1ee 410 + #define PPG_THERMAL_A 0x1ef 343 411 #endif
+1 -1
drivers/net/wireless/ti/wlcore/debugfs.h
··· 84 84 wl1271_debugfs_update_stats(wl); \ 85 85 \ 86 86 for (i = 0; i < len && pos < sizeof(buf); i++) \ 87 - pos += snprintf(buf + pos, sizeof(buf), \ 87 + pos += snprintf(buf + pos, sizeof(buf) - pos, \ 88 88 "[%d] = %d\n", i, stats->sub.name[i]); \ 89 89 \ 90 90 return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
+23 -24
drivers/net/wireless/wl3501.h
··· 379 379 u8 mib_value[100]; 380 380 }; 381 381 382 - struct wl3501_join_req { 383 - u16 next_blk; 384 - u8 sig_id; 385 - u8 reserved; 386 - struct iw_mgmt_data_rset operational_rset; 387 - u16 reserved2; 388 - u16 timeout; 389 - u16 probe_delay; 390 - u8 timestamp[8]; 391 - u8 local_time[8]; 382 + struct wl3501_req { 392 383 u16 beacon_period; 393 384 u16 dtim_period; 394 385 u16 cap_info; ··· 390 399 struct iw_mgmt_cf_pset cf_pset; 391 400 struct iw_mgmt_ibss_pset ibss_pset; 392 401 struct iw_mgmt_data_rset bss_basic_rset; 402 + }; 403 + 404 + struct wl3501_join_req { 405 + u16 next_blk; 406 + u8 sig_id; 407 + u8 reserved; 408 + struct iw_mgmt_data_rset operational_rset; 409 + u16 reserved2; 410 + u16 timeout; 411 + u16 probe_delay; 412 + u8 timestamp[8]; 413 + u8 local_time[8]; 414 + struct wl3501_req req; 393 415 }; 394 416 395 417 struct wl3501_join_confirm { ··· 447 443 u16 status; 448 444 char timestamp[8]; 449 445 char localtime[8]; 450 - u16 beacon_period; 451 - u16 dtim_period; 452 - u16 cap_info; 453 - u8 bss_type; 454 - u8 bssid[ETH_ALEN]; 455 - struct iw_mgmt_essid_pset ssid; 456 - struct iw_mgmt_ds_pset ds_pset; 457 - struct iw_mgmt_cf_pset cf_pset; 458 - struct iw_mgmt_ibss_pset ibss_pset; 459 - struct iw_mgmt_data_rset bss_basic_rset; 446 + struct wl3501_req req; 460 447 u8 rssi; 461 448 }; 462 449 ··· 466 471 u16 size; 467 472 u8 pri; 468 473 u8 service_class; 469 - u8 daddr[ETH_ALEN]; 470 - u8 saddr[ETH_ALEN]; 474 + struct { 475 + u8 daddr[ETH_ALEN]; 476 + u8 saddr[ETH_ALEN]; 477 + } addr; 471 478 }; 472 479 473 480 struct wl3501_md_ind { ··· 481 484 u8 reception; 482 485 u8 pri; 483 486 u8 service_class; 484 - u8 daddr[ETH_ALEN]; 485 - u8 saddr[ETH_ALEN]; 487 + struct { 488 + u8 daddr[ETH_ALEN]; 489 + u8 saddr[ETH_ALEN]; 490 + } addr; 486 491 }; 487 492 488 493 struct wl3501_md_confirm {
+29 -25
drivers/net/wireless/wl3501_cs.c
··· 469 469 struct wl3501_md_req sig = { 470 470 .sig_id = WL3501_SIG_MD_REQ, 471 471 }; 472 + size_t sig_addr_len = sizeof(sig.addr); 472 473 u8 *pdata = (char *)data; 473 474 int rc = -EIO; 474 475 ··· 485 484 goto out; 486 485 } 487 486 rc = 0; 488 - memcpy(&sig.daddr[0], pdata, 12); 489 - pktlen = len - 12; 490 - pdata += 12; 487 + memcpy(&sig.addr, pdata, sig_addr_len); 488 + pktlen = len - sig_addr_len; 489 + pdata += sig_addr_len; 491 490 sig.data = bf; 492 491 if (((*pdata) * 256 + (*(pdata + 1))) > 1500) { 493 492 u8 addr4[ETH_ALEN] = { ··· 590 589 struct wl3501_join_req sig = { 591 590 .sig_id = WL3501_SIG_JOIN_REQ, 592 591 .timeout = 10, 593 - .ds_pset = { 592 + .req.ds_pset = { 594 593 .el = { 595 594 .id = IW_MGMT_INFO_ELEMENT_DS_PARAMETER_SET, 596 595 .len = 1, ··· 599 598 }, 600 599 }; 601 600 602 - memcpy(&sig.beacon_period, &this->bss_set[stas].beacon_period, 72); 601 + memcpy(&sig.req, &this->bss_set[stas].req, sizeof(sig.req)); 603 602 return wl3501_esbq_exec(this, &sig, sizeof(sig)); 604 603 } 605 604 ··· 667 666 if (sig.status == WL3501_STATUS_SUCCESS) { 668 667 pr_debug("success"); 669 668 if ((this->net_type == IW_MODE_INFRA && 670 - (sig.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || 669 + (sig.req.cap_info & WL3501_MGMT_CAPABILITY_ESS)) || 671 670 (this->net_type == IW_MODE_ADHOC && 672 - (sig.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || 671 + (sig.req.cap_info & WL3501_MGMT_CAPABILITY_IBSS)) || 673 672 this->net_type == IW_MODE_AUTO) { 674 673 if (!this->essid.el.len) 675 674 matchflag = 1; 676 675 else if (this->essid.el.len == 3 && 677 676 !memcmp(this->essid.essid, "ANY", 3)) 678 677 matchflag = 1; 679 - else if (this->essid.el.len != sig.ssid.el.len) 678 + else if (this->essid.el.len != sig.req.ssid.el.len) 680 679 matchflag = 0; 681 - else if (memcmp(this->essid.essid, sig.ssid.essid, 680 + else if (memcmp(this->essid.essid, sig.req.ssid.essid, 682 681 this->essid.el.len)) 683 682 matchflag = 0; 684 683 else 685 684 matchflag = 1; 686 685 if (matchflag) { 687 686 for (i = 0; i < this->bss_cnt; i++) { 688 - if (ether_addr_equal_unaligned(this->bss_set[i].bssid, sig.bssid)) { 687 + if (ether_addr_equal_unaligned(this->bss_set[i].req.bssid, 688 + sig.req.bssid)) { 689 689 matchflag = 0; 690 690 break; 691 691 } 692 692 } 693 693 } 694 694 if (matchflag && (i < 20)) { 695 - memcpy(&this->bss_set[i].beacon_period, 696 - &sig.beacon_period, 73); 695 + memcpy(&this->bss_set[i].req, 696 + &sig.req, sizeof(sig.req)); 697 697 this->bss_cnt++; 698 698 this->rssi = sig.rssi; 699 + this->bss_set[i].rssi = sig.rssi; 699 700 } 700 701 } 701 702 } else if (sig.status == WL3501_STATUS_TIMEOUT) { ··· 889 886 if (this->join_sta_bss < this->bss_cnt) { 890 887 const int i = this->join_sta_bss; 891 888 memcpy(this->bssid, 892 - this->bss_set[i].bssid, ETH_ALEN); 893 - this->chan = this->bss_set[i].ds_pset.chan; 889 + this->bss_set[i].req.bssid, ETH_ALEN); 890 + this->chan = this->bss_set[i].req.ds_pset.chan; 894 891 iw_copy_mgmt_info_element(&this->keep_essid.el, 895 - &this->bss_set[i].ssid.el); 892 + &this->bss_set[i].req.ssid.el); 896 893 wl3501_mgmt_auth(this); 897 894 } 898 895 } else { 899 896 const int i = this->join_sta_bss; 900 897 901 - memcpy(&this->bssid, &this->bss_set[i].bssid, ETH_ALEN); 902 - this->chan = this->bss_set[i].ds_pset.chan; 898 + memcpy(&this->bssid, &this->bss_set[i].req.bssid, ETH_ALEN); 899 + this->chan = this->bss_set[i].req.ds_pset.chan; 903 900 iw_copy_mgmt_info_element(&this->keep_essid.el, 904 - &this->bss_set[i].ssid.el); 901 + &this->bss_set[i].req.ssid.el); 905 902 wl3501_online(dev); 906 903 } 907 904 } else { ··· 983 980 } else { 984 981 skb->dev = dev; 985 982 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ 986 - skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); 983 + skb_copy_to_linear_data(skb, (unsigned char *)&sig.addr, 984 + sizeof(sig.addr)); 987 985 wl3501_receive(this, skb->data, pkt_len); 988 986 skb_put(skb, pkt_len); 989 987 skb->protocol = eth_type_trans(skb, dev); ··· 1575 1571 for (i = 0; i < this->bss_cnt; ++i) { 1576 1572 iwe.cmd = SIOCGIWAP; 1577 1573 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 1578 - memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].bssid, ETH_ALEN); 1574 + memcpy(iwe.u.ap_addr.sa_data, this->bss_set[i].req.bssid, ETH_ALEN); 1579 1575 current_ev = iwe_stream_add_event(info, current_ev, 1580 1576 extra + IW_SCAN_MAX_DATA, 1581 1577 &iwe, IW_EV_ADDR_LEN); 1582 1578 iwe.cmd = SIOCGIWESSID; 1583 1579 iwe.u.data.flags = 1; 1584 - iwe.u.data.length = this->bss_set[i].ssid.el.len; 1580 + iwe.u.data.length = this->bss_set[i].req.ssid.el.len; 1585 1581 current_ev = iwe_stream_add_point(info, current_ev, 1586 1582 extra + IW_SCAN_MAX_DATA, 1587 1583 &iwe, 1588 - this->bss_set[i].ssid.essid); 1584 + this->bss_set[i].req.ssid.essid); 1589 1585 iwe.cmd = SIOCGIWMODE; 1590 - iwe.u.mode = this->bss_set[i].bss_type; 1586 + iwe.u.mode = this->bss_set[i].req.bss_type; 1591 1587 current_ev = iwe_stream_add_event(info, current_ev, 1592 1588 extra + IW_SCAN_MAX_DATA, 1593 1589 &iwe, IW_EV_UINT_LEN); 1594 1590 iwe.cmd = SIOCGIWFREQ; 1595 - iwe.u.freq.m = this->bss_set[i].ds_pset.chan; 1591 + iwe.u.freq.m = this->bss_set[i].req.ds_pset.chan; 1596 1592 iwe.u.freq.e = 0; 1597 1593 current_ev = iwe_stream_add_event(info, current_ev, 1598 1594 extra + IW_SCAN_MAX_DATA, 1599 1595 &iwe, IW_EV_FREQ_LEN); 1600 1596 iwe.cmd = SIOCGIWENCODE; 1601 - if (this->bss_set[i].cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) 1597 + if (this->bss_set[i].req.cap_info & WL3501_MGMT_CAPABILITY_PRIVACY) 1602 1598 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 1603 1599 else 1604 1600 iwe.u.data.flags = IW_ENCODE_DISABLED;