Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem

+2592 -1751
+1 -1
drivers/net/wireless/at76c50x-usb.h
··· 290 290 u8 res; 291 291 u8 multi_domain_capability_implemented; 292 292 u8 multi_domain_capability_enabled; 293 - u8 country_string[3]; 293 + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; 294 294 u8 reserved[3]; 295 295 } __packed; 296 296
+1 -1
drivers/net/wireless/ath/ath5k/ahb.c
··· 93 93 goto err_out; 94 94 } 95 95 96 - mem = ioremap_nocache(res->start, res->end - res->start + 1); 96 + mem = ioremap_nocache(res->start, resource_size(res)); 97 97 if (mem == NULL) { 98 98 dev_err(&pdev->dev, "ioremap failed\n"); 99 99 ret = -ENOMEM;
+1 -1
drivers/net/wireless/ath/ath5k/ath5k.h
··· 513 513 AR5K_TX_QUEUE_ID_NOQCU_DATA = 0, 514 514 AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1, 515 515 AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/ 516 - AR5K_TX_QUEUE_ID_DATA_MAX = 4, /*IEEE80211_TX_QUEUE_DATA4*/ 516 + AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/ 517 517 AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/ 518 518 AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/ 519 519 AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
+23 -29
drivers/net/wireless/ath/ath5k/base.c
··· 442 442 return ath5k_reset(sc, chan, true); 443 443 } 444 444 445 - struct ath_vif_iter_data { 446 - const u8 *hw_macaddr; 447 - u8 mask[ETH_ALEN]; 448 - u8 active_mac[ETH_ALEN]; /* first active MAC */ 449 - bool need_set_hw_addr; 450 - bool found_active; 451 - bool any_assoc; 452 - enum nl80211_iftype opmode; 453 - }; 454 - 455 - static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 445 + void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 456 446 { 457 - struct ath_vif_iter_data *iter_data = data; 447 + struct ath5k_vif_iter_data *iter_data = data; 458 448 int i; 459 449 struct ath5k_vif *avf = (void *)vif->drv_priv; 460 450 ··· 474 484 */ 475 485 if (avf->opmode == NL80211_IFTYPE_AP) 476 486 iter_data->opmode = NL80211_IFTYPE_AP; 477 - else 487 + else { 488 + if (avf->opmode == NL80211_IFTYPE_STATION) 489 + iter_data->n_stas++; 478 490 if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED) 479 491 iter_data->opmode = avf->opmode; 492 + } 480 493 } 481 494 482 495 void ··· 487 494 struct ieee80211_vif *vif) 488 495 { 489 496 struct ath_common *common = ath5k_hw_common(sc->ah); 490 - struct ath_vif_iter_data iter_data; 497 + struct ath5k_vif_iter_data iter_data; 498 + u32 rfilt; 491 499 492 500 /* 493 501 * Use the hardware MAC address as reference, the hardware uses it ··· 499 505 iter_data.found_active = false; 500 506 iter_data.need_set_hw_addr = true; 501 507 iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED; 508 + iter_data.n_stas = 0; 502 509 503 510 if (vif) 504 - ath_vif_iter(&iter_data, vif->addr, vif); 511 + ath5k_vif_iter(&iter_data, vif->addr, vif); 505 512 506 513 /* Get list of all active MAC addresses */ 507 - ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter, 514 + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, 508 515 &iter_data); 509 516 memcpy(sc->bssidmask, iter_data.mask, ETH_ALEN); 510 517 ··· 523 528 524 529 if (ath5k_hw_hasbssidmask(sc->ah)) 525 530 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); 526 - } 527 531 528 - void 529 - ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif) 530 - { 531 - struct ath5k_hw *ah = sc->ah; 532 - u32 rfilt; 532 + /* Set up RX Filter */ 533 + if (iter_data.n_stas > 1) { 534 + /* If you have multiple STA interfaces connected to 535 + * different APs, ARPs are not received (most of the time?) 536 + * Enabling PROMISC appears to fix that probem. 537 + */ 538 + sc->filter_flags |= AR5K_RX_FILTER_PROM; 539 + } 533 540 534 - /* configure rx filter */ 535 541 rfilt = sc->filter_flags; 536 - ath5k_hw_set_rx_filter(ah, rfilt); 542 + ath5k_hw_set_rx_filter(sc->ah, rfilt); 537 543 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 538 - 539 - ath5k_update_bssid_mask_and_opmode(sc, vif); 540 544 } 541 545 542 546 static inline int ··· 1111 1117 spin_unlock_bh(&sc->rxbuflock); 1112 1118 1113 1119 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */ 1114 - ath5k_mode_setup(sc, NULL); /* set filters, etc. */ 1120 + ath5k_update_bssid_mask_and_opmode(sc, NULL); /* set filters, etc. */ 1115 1121 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */ 1116 1122 1117 1123 return 0; ··· 2917 2923 bool 2918 2924 ath_any_vif_assoc(struct ath5k_softc *sc) 2919 2925 { 2920 - struct ath_vif_iter_data iter_data; 2926 + struct ath5k_vif_iter_data iter_data; 2921 2927 iter_data.hw_macaddr = NULL; 2922 2928 iter_data.any_assoc = false; 2923 2929 iter_data.need_set_hw_addr = false; 2924 2930 iter_data.found_active = true; 2925 2931 2926 - ieee80211_iterate_active_interfaces_atomic(sc->hw, ath_vif_iter, 2932 + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, 2927 2933 &iter_data); 2928 2934 return iter_data.any_assoc; 2929 2935 }
+13
drivers/net/wireless/ath/ath5k/base.h
··· 259 259 struct survey_info survey; /* collected survey info */ 260 260 }; 261 261 262 + struct ath5k_vif_iter_data { 263 + const u8 *hw_macaddr; 264 + u8 mask[ETH_ALEN]; 265 + u8 active_mac[ETH_ALEN]; /* first active MAC */ 266 + bool need_set_hw_addr; 267 + bool found_active; 268 + bool any_assoc; 269 + enum nl80211_iftype opmode; 270 + int n_stas; 271 + }; 272 + void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif); 273 + 274 + 262 275 #define ath5k_hw_hasbssidmask(_ah) \ 263 276 (ath5k_hw_get_capability(_ah, AR5K_CAP_BSSIDMASK, 0, NULL) == 0) 264 277 #define ath5k_hw_hasveol(_ah) \
+17 -2
drivers/net/wireless/ath/ath5k/mac80211-ops.c
··· 158 158 159 159 memcpy(&avf->lladdr, vif->addr, ETH_ALEN); 160 160 161 - ath5k_mode_setup(sc, vif); 162 - 161 + ath5k_update_bssid_mask_and_opmode(sc, vif); 163 162 ret = 0; 164 163 end: 165 164 mutex_unlock(&sc->lock); ··· 380 381 struct ath5k_softc *sc = hw->priv; 381 382 struct ath5k_hw *ah = sc->ah; 382 383 u32 mfilt[2], rfilt; 384 + struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */ 383 385 384 386 mutex_lock(&sc->lock); 385 387 ··· 452 452 rfilt |= AR5K_RX_FILTER_BEACON; 453 453 default: 454 454 break; 455 + } 456 + 457 + iter_data.hw_macaddr = NULL; 458 + iter_data.n_stas = 0; 459 + iter_data.need_set_hw_addr = false; 460 + ieee80211_iterate_active_interfaces_atomic(sc->hw, ath5k_vif_iter, 461 + &iter_data); 462 + 463 + /* Set up RX Filter */ 464 + if (iter_data.n_stas > 1) { 465 + /* If you have multiple STA interfaces connected to 466 + * different APs, ARPs are not received (most of the time?) 467 + * Enabling PROMISC appears to fix that probem. 468 + */ 469 + rfilt |= AR5K_RX_FILTER_PROM; 455 470 } 456 471 457 472 /* Set filters */
+1 -1
drivers/net/wireless/ath/ath9k/ahb.c
··· 75 75 goto err_out; 76 76 } 77 77 78 - mem = ioremap_nocache(res->start, res->end - res->start + 1); 78 + mem = ioremap_nocache(res->start, resource_size(res)); 79 79 if (mem == NULL) { 80 80 dev_err(&pdev->dev, "ioremap failed\n"); 81 81 ret = -ENOMEM;
+19 -18
drivers/net/wireless/ath/ath9k/ar9003_phy.c
··· 1020 1020 static void ar9003_hw_do_getnf(struct ath_hw *ah, 1021 1021 int16_t nfarray[NUM_NF_READINGS]) 1022 1022 { 1023 + #define AR_PHY_CH_MINCCA_PWR 0x1FF00000 1024 + #define AR_PHY_CH_MINCCA_PWR_S 20 1025 + #define AR_PHY_CH_EXT_MINCCA_PWR 0x01FF0000 1026 + #define AR_PHY_CH_EXT_MINCCA_PWR_S 16 1027 + 1023 1028 int16_t nf; 1029 + int i; 1024 1030 1025 - nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); 1026 - nfarray[0] = sign_extend32(nf, 8); 1031 + for (i = 0; i < AR9300_MAX_CHAINS; i++) { 1032 + if (ah->rxchainmask & BIT(i)) { 1033 + nf = MS(REG_READ(ah, ah->nf_regs[i]), 1034 + AR_PHY_CH_MINCCA_PWR); 1035 + nfarray[i] = sign_extend32(nf, 8); 1027 1036 1028 - nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR); 1029 - nfarray[1] = sign_extend32(nf, 8); 1037 + if (IS_CHAN_HT40(ah->curchan)) { 1038 + u8 ext_idx = AR9300_MAX_CHAINS + i; 1030 1039 1031 - nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); 1032 - nfarray[2] = sign_extend32(nf, 8); 1033 - 1034 - if (!IS_CHAN_HT40(ah->curchan)) 1035 - return; 1036 - 1037 - nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1038 - nfarray[3] = sign_extend32(nf, 8); 1039 - 1040 - nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); 1041 - nfarray[4] = sign_extend32(nf, 8); 1042 - 1043 - nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); 1044 - nfarray[5] = sign_extend32(nf, 8); 1040 + nf = MS(REG_READ(ah, ah->nf_regs[ext_idx]), 1041 + AR_PHY_CH_EXT_MINCCA_PWR); 1042 + nfarray[ext_idx] = sign_extend32(nf, 8); 1043 + } 1044 + } 1045 + } 1045 1046 } 1046 1047 1047 1048 static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
+58 -4
drivers/net/wireless/ath/ath9k/debug.c
··· 15 15 */ 16 16 17 17 #include <linux/slab.h> 18 + #include <linux/vmalloc.h> 18 19 #include <asm/unaligned.h> 19 20 20 21 #include "ath9k.h" ··· 28 27 static int ath9k_debugfs_open(struct inode *inode, struct file *file) 29 28 { 30 29 file->private_data = inode->i_private; 30 + return 0; 31 + } 32 + 33 + static ssize_t ath9k_debugfs_read_buf(struct file *file, char __user *user_buf, 34 + size_t count, loff_t *ppos) 35 + { 36 + u8 *buf = file->private_data; 37 + return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf)); 38 + } 39 + 40 + static int ath9k_debugfs_release_buf(struct inode *inode, struct file *file) 41 + { 42 + vfree(file->private_data); 31 43 return 0; 32 44 } 33 45 ··· 562 548 PR("hw-tx-proc-desc: ", txprocdesc); 563 549 len += snprintf(buf + len, size - len, 564 550 "%s%11p%11p%10p%10p\n", "txq-memory-address:", 565 - &(sc->tx.txq_map[WME_AC_BE]), 566 - &(sc->tx.txq_map[WME_AC_BK]), 567 - &(sc->tx.txq_map[WME_AC_VI]), 568 - &(sc->tx.txq_map[WME_AC_VO])); 551 + sc->tx.txq_map[WME_AC_BE], 552 + sc->tx.txq_map[WME_AC_BK], 553 + sc->tx.txq_map[WME_AC_VI], 554 + sc->tx.txq_map[WME_AC_VO]); 569 555 if (len >= size) 570 556 goto done; 571 557 ··· 1041 1027 .llseek = default_llseek, 1042 1028 }; 1043 1029 1030 + #define REGDUMP_LINE_SIZE 20 1031 + 1032 + static int open_file_regdump(struct inode *inode, struct file *file) 1033 + { 1034 + struct ath_softc *sc = inode->i_private; 1035 + unsigned int len = 0; 1036 + u8 *buf; 1037 + int i; 1038 + unsigned long num_regs, regdump_len, max_reg_offset; 1039 + 1040 + max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500; 1041 + num_regs = max_reg_offset / 4 + 1; 1042 + regdump_len = num_regs * REGDUMP_LINE_SIZE + 1; 1043 + buf = vmalloc(regdump_len); 1044 + if (!buf) 1045 + return -ENOMEM; 1046 + 1047 + ath9k_ps_wakeup(sc); 1048 + for (i = 0; i < num_regs; i++) 1049 + len += scnprintf(buf + len, regdump_len - len, 1050 + "0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2)); 1051 + ath9k_ps_restore(sc); 1052 + 1053 + file->private_data = buf; 1054 + 1055 + return 0; 1056 + } 1057 + 1058 + static const struct file_operations fops_regdump = { 1059 + .open = open_file_regdump, 1060 + .read = ath9k_debugfs_read_buf, 1061 + .release = ath9k_debugfs_release_buf, 1062 + .owner = THIS_MODULE, 1063 + .llseek = default_llseek,/* read accesses f_pos */ 1064 + }; 1065 + 1044 1066 int ath9k_init_debug(struct ath_hw *ah) 1045 1067 { 1046 1068 struct ath_common *common = ath9k_hw_common(ah); ··· 1139 1089 1140 1090 if (!debugfs_create_bool("ignore_extcca", S_IRUSR | S_IWUSR, 1141 1091 sc->debug.debugfs_phy, &ah->config.cwm_ignore_extcca)) 1092 + goto err; 1093 + 1094 + if (!debugfs_create_file("regdump", S_IRUSR, sc->debug.debugfs_phy, 1095 + sc, &fops_regdump)) 1142 1096 goto err; 1143 1097 1144 1098 sc->debug.regidx = 0;
+1 -1
drivers/net/wireless/b43/Kconfig
··· 92 92 ---help--- 93 93 Support for the N-PHY. 94 94 95 - This enables support for devices with N-PHY revision up to 2. 95 + This enables support for devices with N-PHY. 96 96 97 97 Say N if you expect high stability and performance. Saying Y will not 98 98 affect other devices support and may provide support for basic needs.
+169 -12
drivers/net/wireless/b43/phy_n.c
··· 1168 1168 static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev) 1169 1169 { 1170 1170 struct b43_phy_n *nphy = dev->phy.n; 1171 + struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 1172 + 1173 + /* PHY rev 0, 1, 2 */ 1171 1174 u8 i, j; 1172 1175 u8 code; 1173 1176 u16 tmp; 1174 - 1175 - /* TODO: for PHY >= 3 1176 - s8 *lna1_gain, *lna2_gain; 1177 - u8 *gain_db, *gain_bits; 1178 - u16 *rfseq_init; 1179 - u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; 1180 - u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; 1181 - */ 1182 - 1183 1177 u8 rfseq_events[3] = { 6, 8, 7 }; 1184 1178 u8 rfseq_delays[3] = { 10, 30, 1 }; 1185 1179 1180 + /* PHY rev >= 3 */ 1181 + bool ghz5; 1182 + bool ext_lna; 1183 + u16 rssi_gain; 1184 + struct nphy_gain_ctl_workaround_entry *e; 1185 + u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 }; 1186 + u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 }; 1187 + 1186 1188 if (dev->phy.rev >= 3) { 1187 - /* TODO */ 1189 + /* Prepare values */ 1190 + ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL) 1191 + & B43_NPHY_BANDCTL_5GHZ; 1192 + ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA; 1193 + e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna); 1194 + if (ghz5 && dev->phy.rev >= 5) 1195 + rssi_gain = 0x90; 1196 + else 1197 + rssi_gain = 0x50; 1198 + 1199 + b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040); 1200 + 1201 + /* Set Clip 2 detect */ 1202 + b43_phy_set(dev, B43_NPHY_C1_CGAINI, 1203 + B43_NPHY_C1_CGAINI_CL2DETECT); 1204 + b43_phy_set(dev, B43_NPHY_C2_CGAINI, 1205 + B43_NPHY_C2_CGAINI_CL2DETECT); 1206 + 1207 + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC, 1208 + 0x17); 1209 + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC, 1210 + 0x17); 1211 + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0); 1212 + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0); 1213 + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00); 1214 + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00); 1215 + b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN, 1216 + rssi_gain); 1217 + b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN, 1218 + rssi_gain); 1219 + b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC, 1220 + 0x17); 1221 + b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC, 1222 + 0x17); 1223 + b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF); 1224 + b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF); 1225 + 1226 + b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain); 1227 + b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain); 1228 + b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain); 1229 + b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain); 1230 + b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db); 1231 + b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db); 1232 + b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits); 1233 + b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits); 1234 + b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain); 1235 + b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain); 1236 + b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits); 1237 + b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits); 1238 + 1239 + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); 1240 + b43_phy_write(dev, 0x2A7, e->init_gain); 1241 + b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2, 1242 + e->rfseq_init); 1243 + b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain); 1244 + 1245 + /* TODO: check defines. Do not match variables names */ 1246 + b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain); 1247 + b43_phy_write(dev, 0x2A9, e->cliphi_gain); 1248 + b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain); 1249 + b43_phy_write(dev, 0x2AB, e->clipmd_gain); 1250 + b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain); 1251 + b43_phy_write(dev, 0x2AD, e->cliplo_gain); 1252 + 1253 + b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin); 1254 + b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl); 1255 + b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu); 1256 + b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip); 1257 + b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip); 1258 + b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES, 1259 + ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip); 1260 + b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES, 1261 + ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip); 1262 + b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); 1188 1263 } else { 1189 1264 /* Set Clip 2 detect */ 1190 1265 b43_phy_set(dev, B43_NPHY_C1_CGAINI, ··· 1383 1308 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; 1384 1309 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 1385 1310 1311 + u16 tmp16; 1312 + u32 tmp32; 1313 + 1386 1314 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) 1387 1315 b43_nphy_classifier(dev, 1, 0); 1388 1316 else ··· 1398 1320 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); 1399 1321 1400 1322 if (dev->phy.rev >= 3) { 1323 + tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0)); 1324 + tmp32 &= 0xffffff; 1325 + b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32); 1326 + 1327 + b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125); 1328 + b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3); 1329 + b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105); 1330 + b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E); 1331 + b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD); 1332 + b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020); 1333 + 1334 + b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C); 1335 + b43_phy_write(dev, 0x2AE, 0x000C); 1336 + 1401 1337 /* TODO */ 1338 + 1339 + tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 1340 + 0x2 : 0x9C40; 1341 + b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16); 1342 + 1343 + b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700); 1344 + 1345 + b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D); 1346 + b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D); 1347 + 1348 + b43_nphy_gain_ctrl_workarounds(dev); 1349 + 1350 + b43_ntab_write(dev, B43_NTAB32(8, 0), 2); 1351 + b43_ntab_write(dev, B43_NTAB32(8, 16), 2); 1352 + 1353 + /* TODO */ 1354 + 1355 + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00); 1356 + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00); 1357 + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06); 1358 + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06); 1359 + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07); 1360 + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07); 1361 + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88); 1362 + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88); 1363 + b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00); 1364 + b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00); 1365 + 1366 + /* N PHY WAR TX Chain Update with hw_phytxchain as argument */ 1367 + 1368 + if ((bus->sprom.boardflags2_lo & B43_BFL2_APLL_WAR && 1369 + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) || 1370 + (bus->sprom.boardflags2_lo & B43_BFL2_GPLL_WAR && 1371 + b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) 1372 + tmp32 = 0x00088888; 1373 + else 1374 + tmp32 = 0x88888888; 1375 + b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32); 1376 + b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32); 1377 + b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32); 1378 + 1379 + if (dev->phy.rev == 4 && 1380 + b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { 1381 + b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC, 1382 + 0x70); 1383 + b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC, 1384 + 0x70); 1385 + } 1386 + 1387 + b43_phy_write(dev, 0x224, 0x039C); 1388 + b43_phy_write(dev, 0x225, 0x0357); 1389 + b43_phy_write(dev, 0x226, 0x0317); 1390 + b43_phy_write(dev, 0x227, 0x02D7); 1391 + b43_phy_write(dev, 0x228, 0x039C); 1392 + b43_phy_write(dev, 0x229, 0x0357); 1393 + b43_phy_write(dev, 0x22A, 0x0317); 1394 + b43_phy_write(dev, 0x22B, 0x02D7); 1395 + b43_phy_write(dev, 0x22C, 0x039C); 1396 + b43_phy_write(dev, 0x22D, 0x0357); 1397 + b43_phy_write(dev, 0x22E, 0x0317); 1398 + b43_phy_write(dev, 0x22F, 0x02D7); 1402 1399 } else { 1403 1400 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ && 1404 1401 nphy->band5g_pwrgain) { ··· 4031 3878 } 4032 3879 } 4033 3880 3881 + /* http://bcm-v4.sipsolutions.net/802.11/PHY/Anacore */ 4034 3882 static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) 4035 3883 { 4036 - b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 4037 - on ? 0 : 0x7FFF); 3884 + u16 val = on ? 0 : 0x7FFF; 3885 + 3886 + if (dev->phy.rev >= 3) 3887 + b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, val); 3888 + b43_phy_write(dev, B43_NPHY_AFECTL_OVER, val); 4038 3889 } 4039 3890 4040 3891 static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
+103
drivers/net/wireless/b43/tables_nphy.c
··· 2709 2709 { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */ 2710 2710 }; 2711 2711 2712 + struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = { 2713 + { /* 2GHz */ 2714 + { /* PHY rev 3 */ 2715 + { 7, 11, 16, 23 }, 2716 + { -5, 6, 10, 14 }, 2717 + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, 2718 + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 2719 + 0x627E, 2720 + { 0x613F, 0x613F, 0x613F, 0x613F }, 2721 + 0x107E, 0x0066, 0x0074, 2722 + 0x18, 0x18, 0x18, 2723 + 0x020D, 0x5, 2724 + }, 2725 + { /* PHY rev 4 */ 2726 + { 8, 12, 17, 25 }, 2727 + { -5, 6, 10, 14 }, 2728 + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, 2729 + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 2730 + 0x527E, 2731 + { 0x513F, 0x513F, 0x513F, 0x513F }, 2732 + 0x007E, 0x0066, 0x0074, 2733 + 0x18, 0x18, 0x18, 2734 + 0x01A1, 0x5, 2735 + }, 2736 + { /* PHY rev 5+ */ 2737 + { 9, 13, 18, 26 }, 2738 + { -3, 7, 11, 16 }, 2739 + { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, 2740 + { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 2741 + 0x427E, /* invalid for external LNA! */ 2742 + { 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */ 2743 + 0x1076, 0x0066, 0x106A, 2744 + 0xC, 0xC, 0xC, 2745 + 0x01D0, 0x5, 2746 + }, 2747 + }, 2748 + { /* 5GHz */ 2749 + { /* PHY rev 3 */ 2750 + { 7, 11, 17, 23 }, 2751 + { -6, 2, 6, 10 }, 2752 + { 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, 2753 + { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, 2754 + 0x52DE, 2755 + { 0x516F, 0x516F, 0x516F, 0x516F }, 2756 + 0x00DE, 0x00CA, 0x00CC, 2757 + 0x1E, 0x1E, 0x1E, 2758 + 0x01A1, 25, 2759 + }, 2760 + { /* PHY rev 4 */ 2761 + { 8, 12, 18, 23 }, 2762 + { -5, 2, 6, 10 }, 2763 + { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, 2764 + { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, 2765 + 0x629E, 2766 + { 0x614F, 0x614F, 0x614F, 0x614F }, 2767 + 0x029E, 0x1084, 0x0086, 2768 + 0x24, 0x24, 0x24, 2769 + 0x0107, 25, 2770 + }, 2771 + { /* PHY rev 5+ */ 2772 + { 6, 10, 16, 21 }, 2773 + { -7, 0, 4, 8 }, 2774 + { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, 2775 + { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, 2776 + 0x729E, 2777 + { 0x714F, 0x714F, 0x714F, 0x714F }, 2778 + 0x029E, 0x2084, 0x2086, 2779 + 0x24, 0x24, 0x24, 2780 + 0x00A9, 25, 2781 + }, 2782 + }, 2783 + }; 2784 + 2712 2785 static inline void assert_ntab_array_sizes(void) 2713 2786 { 2714 2787 #undef check ··· 3029 2956 3030 2957 /* Volatile tables */ 3031 2958 /* TODO */ 2959 + } 2960 + 2961 + struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( 2962 + struct b43_wldev *dev, bool ghz5, bool ext_lna) 2963 + { 2964 + struct nphy_gain_ctl_workaround_entry *e; 2965 + u8 phy_idx; 2966 + 2967 + B43_WARN_ON(dev->phy.rev < 3); 2968 + if (dev->phy.rev >= 5) 2969 + phy_idx = 2; 2970 + else if (dev->phy.rev == 4) 2971 + phy_idx = 1; 2972 + else 2973 + phy_idx = 0; 2974 + 2975 + e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; 2976 + 2977 + /* Only one entry differs for external LNA, so instead making whole 2978 + * table 2 times bigger, hack is here 2979 + */ 2980 + if (!ghz5 && dev->phy.rev >= 5 && ext_lna) { 2981 + e->rfseq_init[0] &= 0x0FFF; 2982 + e->rfseq_init[1] &= 0x0FFF; 2983 + e->rfseq_init[2] &= 0x0FFF; 2984 + e->rfseq_init[3] &= 0x0FFF; 2985 + e->init_gain &= 0x0FFF; 2986 + } 2987 + 2988 + return e; 3032 2989 }
+25
drivers/net/wireless/b43/tables_nphy.h
··· 35 35 u8 val_addr1; 36 36 }; 37 37 38 + struct nphy_gain_ctl_workaround_entry { 39 + s8 lna1_gain[4]; 40 + s8 lna2_gain[4]; 41 + u8 gain_db[10]; 42 + u8 gain_bits[10]; 43 + 44 + u16 init_gain; 45 + u16 rfseq_init[4]; 46 + 47 + u16 cliphi_gain; 48 + u16 clipmd_gain; 49 + u16 cliplo_gain; 50 + 51 + u16 crsmin; 52 + u16 crsminl; 53 + u16 crsminu; 54 + 55 + u16 nbclip; 56 + u16 wlclip; 57 + }; 58 + 59 + /* Get entry with workaround values for gain ctl. Does not return NULL. */ 60 + struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( 61 + struct b43_wldev *dev, bool ghz5, bool ext_lna); 62 + 38 63 /* Get the NPHY Channel Switch Table entry for a channel. 39 64 * Returns NULL on failure to find an entry. */ 40 65 const struct b43_nphy_channeltab_entry_rev2 *
+1 -1
drivers/net/wireless/ipw2x00/ipw2200.h
··· 961 961 struct ipw_country_info { 962 962 u8 id; 963 963 u8 length; 964 - u8 country_str[3]; 964 + u8 country_str[IEEE80211_COUNTRY_STRING_LEN]; 965 965 struct ipw_country_channel_info groups[7]; 966 966 } __packed; 967 967
+4 -238
drivers/net/wireless/iwlwifi/iwl-agn-lib.c
··· 533 533 534 534 void iwlagn_temperature(struct iwl_priv *priv) 535 535 { 536 - /* store temperature from statistics (in Celsius) */ 537 - priv->temperature = 538 - le32_to_cpu(priv->_agn.statistics.general.common.temperature); 536 + /* store temperature from correct statistics (in Celsius) */ 537 + priv->temperature = le32_to_cpu((iwl_bt_statistics(priv)) ? 538 + priv->_agn.statistics_bt.general.common.temperature : 539 + priv->_agn.statistics.general.common.temperature); 539 540 iwl_tt_handler(priv); 540 541 } 541 542 ··· 993 992 } 994 993 995 994 return -1; 996 - } 997 - 998 - /* Calc max signal level (dBm) among 3 possible receivers */ 999 - static inline int iwlagn_calc_rssi(struct iwl_priv *priv, 1000 - struct iwl_rx_phy_res *rx_resp) 1001 - { 1002 - return priv->cfg->ops->utils->calc_rssi(priv, rx_resp); 1003 - } 1004 - 1005 - static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 1006 - { 1007 - u32 decrypt_out = 0; 1008 - 1009 - if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == 1010 - RX_RES_STATUS_STATION_FOUND) 1011 - decrypt_out |= (RX_RES_STATUS_STATION_FOUND | 1012 - RX_RES_STATUS_NO_STATION_INFO_MISMATCH); 1013 - 1014 - decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); 1015 - 1016 - /* packet was not encrypted */ 1017 - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == 1018 - RX_RES_STATUS_SEC_TYPE_NONE) 1019 - return decrypt_out; 1020 - 1021 - /* packet was encrypted with unknown alg */ 1022 - if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == 1023 - RX_RES_STATUS_SEC_TYPE_ERR) 1024 - return decrypt_out; 1025 - 1026 - /* decryption was not done in HW */ 1027 - if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != 1028 - RX_MPDU_RES_STATUS_DEC_DONE_MSK) 1029 - return decrypt_out; 1030 - 1031 - switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { 1032 - 1033 - case RX_RES_STATUS_SEC_TYPE_CCMP: 1034 - /* alg is CCM: check MIC only */ 1035 - if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) 1036 - /* Bad MIC */ 1037 - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; 1038 - else 1039 - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; 1040 - 1041 - break; 1042 - 1043 - case RX_RES_STATUS_SEC_TYPE_TKIP: 1044 - if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { 1045 - /* Bad TTAK */ 1046 - decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; 1047 - break; 1048 - } 1049 - /* fall through if TTAK OK */ 1050 - default: 1051 - if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) 1052 - decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; 1053 - else 1054 - decrypt_out |= RX_RES_STATUS_DECRYPT_OK; 1055 - break; 1056 - } 1057 - 1058 - IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", 1059 - decrypt_in, decrypt_out); 1060 - 1061 - return decrypt_out; 1062 - } 1063 - 1064 - static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, 1065 - struct ieee80211_hdr *hdr, 1066 - u16 len, 1067 - u32 ampdu_status, 1068 - struct iwl_rx_mem_buffer *rxb, 1069 - struct ieee80211_rx_status *stats) 1070 - { 1071 - struct sk_buff *skb; 1072 - __le16 fc = hdr->frame_control; 1073 - 1074 - /* We only process data packets if the interface is open */ 1075 - if (unlikely(!priv->is_open)) { 1076 - IWL_DEBUG_DROP_LIMIT(priv, 1077 - "Dropping packet while interface is not open.\n"); 1078 - return; 1079 - } 1080 - 1081 - /* In case of HW accelerated crypto and bad decryption, drop */ 1082 - if (!priv->cfg->mod_params->sw_crypto && 1083 - iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 1084 - return; 1085 - 1086 - skb = dev_alloc_skb(128); 1087 - if (!skb) { 1088 - IWL_ERR(priv, "dev_alloc_skb failed\n"); 1089 - return; 1090 - } 1091 - 1092 - skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); 1093 - 1094 - iwl_update_stats(priv, false, fc, len); 1095 - memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 1096 - 1097 - ieee80211_rx(priv->hw, skb); 1098 - priv->alloc_rxb_page--; 1099 - rxb->page = NULL; 1100 - } 1101 - 1102 - /* Called for REPLY_RX (legacy ABG frames), or 1103 - * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ 1104 - void iwlagn_rx_reply_rx(struct iwl_priv *priv, 1105 - struct iwl_rx_mem_buffer *rxb) 1106 - { 1107 - struct ieee80211_hdr *header; 1108 - struct ieee80211_rx_status rx_status; 1109 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 1110 - struct iwl_rx_phy_res *phy_res; 1111 - __le32 rx_pkt_status; 1112 - struct iwl_rx_mpdu_res_start *amsdu; 1113 - u32 len; 1114 - u32 ampdu_status; 1115 - u32 rate_n_flags; 1116 - 1117 - /** 1118 - * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. 1119 - * REPLY_RX: physical layer info is in this buffer 1120 - * REPLY_RX_MPDU_CMD: physical layer info was sent in separate 1121 - * command and cached in priv->last_phy_res 1122 - * 1123 - * Here we set up local variables depending on which command is 1124 - * received. 1125 - */ 1126 - if (pkt->hdr.cmd == REPLY_RX) { 1127 - phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; 1128 - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) 1129 - + phy_res->cfg_phy_cnt); 1130 - 1131 - len = le16_to_cpu(phy_res->byte_count); 1132 - rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + 1133 - phy_res->cfg_phy_cnt + len); 1134 - ampdu_status = le32_to_cpu(rx_pkt_status); 1135 - } else { 1136 - if (!priv->_agn.last_phy_res_valid) { 1137 - IWL_ERR(priv, "MPDU frame without cached PHY data\n"); 1138 - return; 1139 - } 1140 - phy_res = &priv->_agn.last_phy_res; 1141 - amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; 1142 - header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); 1143 - len = le16_to_cpu(amsdu->byte_count); 1144 - rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); 1145 - ampdu_status = iwlagn_translate_rx_status(priv, 1146 - le32_to_cpu(rx_pkt_status)); 1147 - } 1148 - 1149 - if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 1150 - IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", 1151 - phy_res->cfg_phy_cnt); 1152 - return; 1153 - } 1154 - 1155 - if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || 1156 - !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { 1157 - IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", 1158 - le32_to_cpu(rx_pkt_status)); 1159 - return; 1160 - } 1161 - 1162 - /* This will be used in several places later */ 1163 - rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); 1164 - 1165 - /* rx_status carries information about the packet to mac80211 */ 1166 - rx_status.mactime = le64_to_cpu(phy_res->timestamp); 1167 - rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 1168 - IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 1169 - rx_status.freq = 1170 - ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), 1171 - rx_status.band); 1172 - rx_status.rate_idx = 1173 - iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); 1174 - rx_status.flag = 0; 1175 - 1176 - /* TSF isn't reliable. In order to allow smooth user experience, 1177 - * this W/A doesn't propagate it to the mac80211 */ 1178 - /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ 1179 - 1180 - priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); 1181 - 1182 - /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 1183 - rx_status.signal = iwlagn_calc_rssi(priv, phy_res); 1184 - 1185 - iwl_dbg_log_rx_data_frame(priv, len, header); 1186 - IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", 1187 - rx_status.signal, (unsigned long long)rx_status.mactime); 1188 - 1189 - /* 1190 - * "antenna number" 1191 - * 1192 - * It seems that the antenna field in the phy flags value 1193 - * is actually a bit field. This is undefined by radiotap, 1194 - * it wants an actual antenna number but I always get "7" 1195 - * for most legacy frames I receive indicating that the 1196 - * same frame was received on all three RX chains. 1197 - * 1198 - * I think this field should be removed in favor of a 1199 - * new 802.11n radiotap field "RX chains" that is defined 1200 - * as a bitmask. 1201 - */ 1202 - rx_status.antenna = 1203 - (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) 1204 - >> RX_RES_PHY_FLAGS_ANTENNA_POS; 1205 - 1206 - /* set the preamble flag if appropriate */ 1207 - if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 1208 - rx_status.flag |= RX_FLAG_SHORTPRE; 1209 - 1210 - /* Set up the HT phy flags */ 1211 - if (rate_n_flags & RATE_MCS_HT_MSK) 1212 - rx_status.flag |= RX_FLAG_HT; 1213 - if (rate_n_flags & RATE_MCS_HT40_MSK) 1214 - rx_status.flag |= RX_FLAG_40MHZ; 1215 - if (rate_n_flags & RATE_MCS_SGI_MSK) 1216 - rx_status.flag |= RX_FLAG_SHORT_GI; 1217 - 1218 - iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status, 1219 - rxb, &rx_status); 1220 - } 1221 - 1222 - /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). 1223 - * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ 1224 - void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, 1225 - struct iwl_rx_mem_buffer *rxb) 1226 - { 1227 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 1228 - priv->_agn.last_phy_res_valid = true; 1229 - memcpy(&priv->_agn.last_phy_res, pkt->u.raw, 1230 - sizeof(struct iwl_rx_phy_res)); 1231 995 } 1232 996 1233 997 static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+2 -176
drivers/net/wireless/iwlwifi/iwl-agn.c
··· 424 424 return 0; 425 425 } 426 426 427 - /****************************************************************************** 428 - * 429 - * Generic RX handler implementations 430 - * 431 - ******************************************************************************/ 432 - static void iwl_rx_reply_alive(struct iwl_priv *priv, 433 - struct iwl_rx_mem_buffer *rxb) 434 - { 435 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 436 - struct iwl_alive_resp *palive; 437 - struct delayed_work *pwork; 438 - 439 - palive = &pkt->u.alive_frame; 440 - 441 - IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " 442 - "0x%01X 0x%01X\n", 443 - palive->is_valid, palive->ver_type, 444 - palive->ver_subtype); 445 - 446 - if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 447 - IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); 448 - memcpy(&priv->card_alive_init, 449 - &pkt->u.alive_frame, 450 - sizeof(struct iwl_init_alive_resp)); 451 - pwork = &priv->init_alive_start; 452 - } else { 453 - IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 454 - memcpy(&priv->card_alive, &pkt->u.alive_frame, 455 - sizeof(struct iwl_alive_resp)); 456 - pwork = &priv->alive_start; 457 - } 458 - 459 - /* We delay the ALIVE response by 5ms to 460 - * give the HW RF Kill time to activate... */ 461 - if (palive->is_valid == UCODE_VALID_OK) 462 - queue_delayed_work(priv->workqueue, pwork, 463 - msecs_to_jiffies(5)); 464 - else { 465 - IWL_WARN(priv, "%s uCode did not respond OK.\n", 466 - (palive->ver_subtype == INITIALIZE_SUBTYPE) ? 467 - "init" : "runtime"); 468 - /* 469 - * If fail to load init uCode, 470 - * let's try to load the init uCode again. 471 - * We should not get into this situation, but if it 472 - * does happen, we should not move on and loading "runtime" 473 - * without proper calibrate the device. 474 - */ 475 - if (palive->ver_subtype == INITIALIZE_SUBTYPE) 476 - priv->ucode_type = UCODE_NONE; 477 - queue_work(priv->workqueue, &priv->restart); 478 - } 479 - } 480 - 481 427 static void iwl_bg_beacon_update(struct work_struct *work) 482 428 { 483 429 struct iwl_priv *priv = ··· 658 712 } 659 713 } 660 714 661 - static void iwlagn_rx_beacon_notif(struct iwl_priv *priv, 662 - struct iwl_rx_mem_buffer *rxb) 663 - { 664 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 665 - struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw; 666 - #ifdef CONFIG_IWLWIFI_DEBUG 667 - u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status); 668 - u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 669 - 670 - IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " 671 - "tsf:0x%.8x%.8x rate:%d\n", 672 - status & TX_STATUS_MSK, 673 - beacon->beacon_notify_hdr.failure_frame, 674 - le32_to_cpu(beacon->ibss_mgr_status), 675 - le32_to_cpu(beacon->high_tsf), 676 - le32_to_cpu(beacon->low_tsf), rate); 677 - #endif 678 - 679 - priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 680 - 681 - if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 682 - queue_work(priv->workqueue, &priv->beacon_update); 683 - } 684 - 685 - /* Handle notification from uCode that card's power state is changing 686 - * due to software, hardware, or critical temperature RFKILL */ 687 - static void iwl_rx_card_state_notif(struct iwl_priv *priv, 688 - struct iwl_rx_mem_buffer *rxb) 689 - { 690 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 691 - u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 692 - unsigned long status = priv->status; 693 - 694 - IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", 695 - (flags & HW_CARD_DISABLED) ? "Kill" : "On", 696 - (flags & SW_CARD_DISABLED) ? "Kill" : "On", 697 - (flags & CT_CARD_DISABLED) ? 698 - "Reached" : "Not reached"); 699 - 700 - if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 701 - CT_CARD_DISABLED)) { 702 - 703 - iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 704 - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 705 - 706 - iwl_write_direct32(priv, HBUS_TARG_MBX_C, 707 - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 708 - 709 - if (!(flags & RXON_CARD_DISABLED)) { 710 - iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 711 - CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 712 - iwl_write_direct32(priv, HBUS_TARG_MBX_C, 713 - HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 714 - } 715 - if (flags & CT_CARD_DISABLED) 716 - iwl_tt_enter_ct_kill(priv); 717 - } 718 - if (!(flags & CT_CARD_DISABLED)) 719 - iwl_tt_exit_ct_kill(priv); 720 - 721 - if (flags & HW_CARD_DISABLED) 722 - set_bit(STATUS_RF_KILL_HW, &priv->status); 723 - else 724 - clear_bit(STATUS_RF_KILL_HW, &priv->status); 725 - 726 - 727 - if (!(flags & RXON_CARD_DISABLED)) 728 - iwl_scan_cancel(priv); 729 - 730 - if ((test_bit(STATUS_RF_KILL_HW, &status) != 731 - test_bit(STATUS_RF_KILL_HW, &priv->status))) 732 - wiphy_rfkill_set_hw_state(priv->hw->wiphy, 733 - test_bit(STATUS_RF_KILL_HW, &priv->status)); 734 - else 735 - wake_up_interruptible(&priv->wait_command_queue); 736 - } 737 - 738 715 static void iwl_bg_tx_flush(struct work_struct *work) 739 716 { 740 717 struct iwl_priv *priv = ··· 674 805 IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n"); 675 806 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 676 807 } 677 - } 678 - 679 - /** 680 - * iwl_setup_rx_handlers - Initialize Rx handler callbacks 681 - * 682 - * Setup the RX handlers for each of the reply types sent from the uCode 683 - * to the host. 684 - * 685 - * This function chains into the hardware specific files for them to setup 686 - * any hardware specific handlers as well. 687 - */ 688 - static void iwl_setup_rx_handlers(struct iwl_priv *priv) 689 - { 690 - priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; 691 - priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; 692 - priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 693 - priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = 694 - iwl_rx_spectrum_measure_notif; 695 - priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 696 - priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = 697 - iwl_rx_pm_debug_statistics_notif; 698 - priv->rx_handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif; 699 - 700 - /* 701 - * The same handler is used for both the REPLY to a discrete 702 - * statistics request from the host as well as for the periodic 703 - * statistics notifications (after received beacons) from the uCode. 704 - */ 705 - priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics; 706 - priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; 707 - 708 - iwl_setup_rx_scan_handlers(priv); 709 - 710 - /* status change handler */ 711 - priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; 712 - 713 - priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = 714 - iwl_rx_missed_beacon_notif; 715 - /* Rx handlers */ 716 - priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy; 717 - priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx; 718 - /* block ack */ 719 - priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; 720 - /* Set up hardware specific Rx handlers */ 721 - priv->cfg->ops->lib->rx_handler_setup(priv); 722 808 } 723 809 724 810 /** ··· 3736 3912 IWL_DELAY_NEXT_FORCE_RF_RESET; 3737 3913 priv->force_reset[IWL_FW_RESET].reset_duration = 3738 3914 IWL_DELAY_NEXT_FORCE_FW_RELOAD; 3915 + 3916 + priv->rx_statistics_jiffies = jiffies; 3739 3917 3740 3918 /* Choose which receivers/antennas to use */ 3741 3919 if (priv->cfg->ops->hcmd->set_rxon_chain)
+1 -12
drivers/net/wireless/iwlwifi/iwl-agn.h
··· 190 190 void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq); 191 191 int iwlagn_rxq_stop(struct iwl_priv *priv); 192 192 int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 193 - void iwlagn_rx_reply_rx(struct iwl_priv *priv, 194 - struct iwl_rx_mem_buffer *rxb); 195 - void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv, 196 - struct iwl_rx_mem_buffer *rxb); 193 + void iwl_setup_rx_handlers(struct iwl_priv *priv); 197 194 198 195 /* tx */ 199 196 void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); ··· 239 242 } 240 243 241 244 u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid); 242 - 243 - /* rx */ 244 - void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 245 - struct iwl_rx_mem_buffer *rxb); 246 - void iwl_rx_statistics(struct iwl_priv *priv, 247 - struct iwl_rx_mem_buffer *rxb); 248 - void iwl_reply_statistics(struct iwl_priv *priv, 249 - struct iwl_rx_mem_buffer *rxb); 250 245 251 246 /* scan */ 252 247 int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-63
drivers/net/wireless/iwlwifi/iwl-core.c
··· 869 869 } 870 870 } 871 871 872 - void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 873 - { 874 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 875 - struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 876 - /* 877 - * MULTI-FIXME 878 - * See iwl_mac_channel_switch. 879 - */ 880 - struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 881 - struct iwl_rxon_cmd *rxon = (void *)&ctx->active; 882 - 883 - if (priv->switch_rxon.switch_in_progress) { 884 - if (!le32_to_cpu(csa->status) && 885 - (csa->channel == priv->switch_rxon.channel)) { 886 - rxon->channel = csa->channel; 887 - ctx->staging.channel = csa->channel; 888 - IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 889 - le16_to_cpu(csa->channel)); 890 - iwl_chswitch_done(priv, true); 891 - } else { 892 - IWL_ERR(priv, "CSA notif (fail) : channel %d\n", 893 - le16_to_cpu(csa->channel)); 894 - iwl_chswitch_done(priv, false); 895 - } 896 - } 897 - } 898 - 899 872 #ifdef CONFIG_IWLWIFI_DEBUG 900 873 void iwl_print_rx_config_cmd(struct iwl_priv *priv, 901 874 struct iwl_rxon_context *ctx) ··· 1216 1243 return iwl_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 1217 1244 sizeof(struct iwl_statistics_cmd), 1218 1245 &statistics_cmd); 1219 - } 1220 - 1221 - void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 1222 - struct iwl_rx_mem_buffer *rxb) 1223 - { 1224 - #ifdef CONFIG_IWLWIFI_DEBUG 1225 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 1226 - struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); 1227 - IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", 1228 - sleep->pm_sleep_mode, sleep->pm_wakeup_src); 1229 - #endif 1230 - } 1231 - 1232 - void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 1233 - struct iwl_rx_mem_buffer *rxb) 1234 - { 1235 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 1236 - u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 1237 - IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 1238 - "notification for %s:\n", len, 1239 - get_cmd_string(pkt->hdr.cmd)); 1240 - iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); 1241 - } 1242 - 1243 - void iwl_rx_reply_error(struct iwl_priv *priv, 1244 - struct iwl_rx_mem_buffer *rxb) 1245 - { 1246 - struct iwl_rx_packet *pkt = rxb_addr(rxb); 1247 - 1248 - IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 1249 - "seq 0x%04X ser 0x%08X\n", 1250 - le32_to_cpu(pkt->u.err_resp.error_type), 1251 - get_cmd_string(pkt->u.err_resp.cmd_id), 1252 - pkt->u.err_resp.cmd_id, 1253 - le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 1254 - le32_to_cpu(pkt->u.err_resp.error_info)); 1255 1246 } 1256 1247 1257 1248 void iwl_clear_isr_stats(struct iwl_priv *priv)
+1 -17
drivers/net/wireless/iwlwifi/iwl-core.h
··· 441 441 void iwl_connection_init_rx_config(struct iwl_priv *priv, 442 442 struct iwl_rxon_context *ctx); 443 443 void iwl_set_rate(struct iwl_priv *priv); 444 - int iwl_set_decrypted_flag(struct iwl_priv *priv, 445 - struct ieee80211_hdr *hdr, 446 - u32 decrypt_res, 447 - struct ieee80211_rx_status *stats); 448 444 void iwl_irq_handle_error(struct iwl_priv *priv); 449 445 int iwl_mac_add_interface(struct ieee80211_hw *hw, 450 446 struct ieee80211_vif *vif); ··· 489 493 { 490 494 } 491 495 #endif 492 - /***************************************************** 493 - * RX handlers. 494 - * **************************************************/ 495 - void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 496 - struct iwl_rx_mem_buffer *rxb); 497 - void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 498 - struct iwl_rx_mem_buffer *rxb); 499 - void iwl_rx_reply_error(struct iwl_priv *priv, 500 - struct iwl_rx_mem_buffer *rxb); 501 496 502 497 /***************************************************** 503 498 * RX ··· 500 513 struct iwl_rx_queue *q); 501 514 int iwl_rx_queue_space(const struct iwl_rx_queue *q); 502 515 void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 503 - /* Handlers */ 504 - void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 505 - struct iwl_rx_mem_buffer *rxb); 516 + 506 517 void iwl_chswitch_done(struct iwl_priv *priv, bool is_success); 507 - void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 508 518 509 519 /* TX helpers */ 510 520
+2 -2
drivers/net/wireless/iwlwifi/iwl-dev.h
··· 1261 1261 /* track IBSS manager (last beacon) status */ 1262 1262 u32 ibss_manager; 1263 1263 1264 - /* storing the jiffies when the plcp error rate is received */ 1265 - unsigned long plcp_jiffies; 1264 + /* jiffies when last recovery from statistics was performed */ 1265 + unsigned long rx_statistics_jiffies; 1266 1266 1267 1267 /* force reset */ 1268 1268 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
+558 -103
drivers/net/wireless/iwlwifi/iwl-rx.c
··· 29 29 30 30 #include <linux/etherdevice.h> 31 31 #include <linux/slab.h> 32 + #include <linux/sched.h> 32 33 #include <net/mac80211.h> 33 34 #include <asm/unaligned.h> 34 35 #include "iwl-eeprom.h" ··· 39 38 #include "iwl-io.h" 40 39 #include "iwl-helpers.h" 41 40 #include "iwl-agn-calib.h" 42 - /************************** RX-FUNCTIONS ****************************/ 41 + #include "iwl-agn.h" 42 + 43 + /****************************************************************************** 44 + * 45 + * RX path functions 46 + * 47 + ******************************************************************************/ 48 + 43 49 /* 44 50 * Rx theory of operation 45 51 * ··· 219 211 return -ENOMEM; 220 212 } 221 213 222 - void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 214 + /****************************************************************************** 215 + * 216 + * Generic RX handler implementations 217 + * 218 + ******************************************************************************/ 219 + 220 + static void iwl_rx_reply_alive(struct iwl_priv *priv, 221 + struct iwl_rx_mem_buffer *rxb) 222 + { 223 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 224 + struct iwl_alive_resp *palive; 225 + struct delayed_work *pwork; 226 + 227 + palive = &pkt->u.alive_frame; 228 + 229 + IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " 230 + "0x%01X 0x%01X\n", 231 + palive->is_valid, palive->ver_type, 232 + palive->ver_subtype); 233 + 234 + if (palive->ver_subtype == INITIALIZE_SUBTYPE) { 235 + IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); 236 + memcpy(&priv->card_alive_init, 237 + &pkt->u.alive_frame, 238 + sizeof(struct iwl_init_alive_resp)); 239 + pwork = &priv->init_alive_start; 240 + } else { 241 + IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); 242 + memcpy(&priv->card_alive, &pkt->u.alive_frame, 243 + sizeof(struct iwl_alive_resp)); 244 + pwork = &priv->alive_start; 245 + } 246 + 247 + /* We delay the ALIVE response by 5ms to 248 + * give the HW RF Kill time to activate... */ 249 + if (palive->is_valid == UCODE_VALID_OK) 250 + queue_delayed_work(priv->workqueue, pwork, 251 + msecs_to_jiffies(5)); 252 + else { 253 + IWL_WARN(priv, "%s uCode did not respond OK.\n", 254 + (palive->ver_subtype == INITIALIZE_SUBTYPE) ? 255 + "init" : "runtime"); 256 + /* 257 + * If fail to load init uCode, 258 + * let's try to load the init uCode again. 259 + * We should not get into this situation, but if it 260 + * does happen, we should not move on and loading "runtime" 261 + * without proper calibrate the device. 262 + */ 263 + if (palive->ver_subtype == INITIALIZE_SUBTYPE) 264 + priv->ucode_type = UCODE_NONE; 265 + queue_work(priv->workqueue, &priv->restart); 266 + } 267 + } 268 + 269 + static void iwl_rx_reply_error(struct iwl_priv *priv, 270 + struct iwl_rx_mem_buffer *rxb) 271 + { 272 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 273 + 274 + IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 275 + "seq 0x%04X ser 0x%08X\n", 276 + le32_to_cpu(pkt->u.err_resp.error_type), 277 + get_cmd_string(pkt->u.err_resp.cmd_id), 278 + pkt->u.err_resp.cmd_id, 279 + le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), 280 + le32_to_cpu(pkt->u.err_resp.error_info)); 281 + } 282 + 283 + static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 284 + { 285 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 286 + struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 287 + /* 288 + * MULTI-FIXME 289 + * See iwl_mac_channel_switch. 290 + */ 291 + struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 292 + struct iwl_rxon_cmd *rxon = (void *)&ctx->active; 293 + 294 + if (priv->switch_rxon.switch_in_progress) { 295 + if (!le32_to_cpu(csa->status) && 296 + (csa->channel == priv->switch_rxon.channel)) { 297 + rxon->channel = csa->channel; 298 + ctx->staging.channel = csa->channel; 299 + IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 300 + le16_to_cpu(csa->channel)); 301 + iwl_chswitch_done(priv, true); 302 + } else { 303 + IWL_ERR(priv, "CSA notif (fail) : channel %d\n", 304 + le16_to_cpu(csa->channel)); 305 + iwl_chswitch_done(priv, false); 306 + } 307 + } 308 + } 309 + 310 + 311 + static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 223 312 struct iwl_rx_mem_buffer *rxb) 224 313 { 225 314 struct iwl_rx_packet *pkt = rxb_addr(rxb); ··· 330 225 331 226 memcpy(&priv->measure_report, report, sizeof(*report)); 332 227 priv->measurement_status |= MEASUREMENT_READY; 228 + } 229 + 230 + static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, 231 + struct iwl_rx_mem_buffer *rxb) 232 + { 233 + #ifdef CONFIG_IWLWIFI_DEBUG 234 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 235 + struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); 236 + IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", 237 + sleep->pm_sleep_mode, sleep->pm_wakeup_src); 238 + #endif 239 + } 240 + 241 + static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 242 + struct iwl_rx_mem_buffer *rxb) 243 + { 244 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 245 + u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 246 + IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 247 + "notification for %s:\n", len, 248 + get_cmd_string(pkt->hdr.cmd)); 249 + iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len); 250 + } 251 + 252 + static void iwl_rx_beacon_notif(struct iwl_priv *priv, 253 + struct iwl_rx_mem_buffer *rxb) 254 + { 255 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 256 + struct iwlagn_beacon_notif *beacon = (void *)pkt->u.raw; 257 + #ifdef CONFIG_IWLWIFI_DEBUG 258 + u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status); 259 + u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 260 + 261 + IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d " 262 + "tsf:0x%.8x%.8x rate:%d\n", 263 + status & TX_STATUS_MSK, 264 + beacon->beacon_notify_hdr.failure_frame, 265 + le32_to_cpu(beacon->ibss_mgr_status), 266 + le32_to_cpu(beacon->high_tsf), 267 + le32_to_cpu(beacon->low_tsf), rate); 268 + #endif 269 + 270 + priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); 271 + 272 + if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 273 + queue_work(priv->workqueue, &priv->beacon_update); 333 274 } 334 275 335 276 /* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */ ··· 449 298 * When the plcp error is exceeding the thresholds, reset the radio 450 299 * to improve the throughput. 451 300 */ 452 - static bool iwl_good_plcp_health(struct iwl_priv *priv, struct iwl_rx_packet *pkt) 301 + static bool iwl_good_plcp_health(struct iwl_priv *priv, 302 + struct iwl_rx_packet *pkt, unsigned int msecs) 453 303 { 454 - bool rc = true; 455 - int combined_plcp_delta; 456 - unsigned int plcp_msec; 457 - unsigned long plcp_received_jiffies; 304 + int delta; 305 + int threshold = priv->cfg->base_params->plcp_delta_threshold; 458 306 459 - if (priv->cfg->base_params->plcp_delta_threshold == 460 - IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { 307 + if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { 461 308 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); 462 - return rc; 309 + return true; 463 310 } 464 311 465 - /* 466 - * check for plcp_err and trigger radio reset if it exceeds 467 - * the plcp error threshold plcp_delta. 468 - */ 469 - plcp_received_jiffies = jiffies; 470 - plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - 471 - (long) priv->plcp_jiffies); 472 - priv->plcp_jiffies = plcp_received_jiffies; 473 - /* 474 - * check to make sure plcp_msec is not 0 to prevent division 475 - * by zero. 476 - */ 477 - if (plcp_msec) { 478 - struct statistics_rx_phy *ofdm; 479 - struct statistics_rx_ht_phy *ofdm_ht; 312 + if (iwl_bt_statistics(priv)) { 313 + struct statistics_rx_bt *cur, *old; 480 314 481 - if (iwl_bt_statistics(priv)) { 482 - ofdm = &pkt->u.stats_bt.rx.ofdm; 483 - ofdm_ht = &pkt->u.stats_bt.rx.ofdm_ht; 484 - combined_plcp_delta = 485 - (le32_to_cpu(ofdm->plcp_err) - 486 - le32_to_cpu(priv->_agn.statistics_bt. 487 - rx.ofdm.plcp_err)) + 488 - (le32_to_cpu(ofdm_ht->plcp_err) - 489 - le32_to_cpu(priv->_agn.statistics_bt. 490 - rx.ofdm_ht.plcp_err)); 491 - } else { 492 - ofdm = &pkt->u.stats.rx.ofdm; 493 - ofdm_ht = &pkt->u.stats.rx.ofdm_ht; 494 - combined_plcp_delta = 495 - (le32_to_cpu(ofdm->plcp_err) - 496 - le32_to_cpu(priv->_agn.statistics. 497 - rx.ofdm.plcp_err)) + 498 - (le32_to_cpu(ofdm_ht->plcp_err) - 499 - le32_to_cpu(priv->_agn.statistics. 500 - rx.ofdm_ht.plcp_err)); 501 - } 315 + cur = &pkt->u.stats_bt.rx; 316 + old = &priv->_agn.statistics_bt.rx; 502 317 503 - if ((combined_plcp_delta > 0) && 504 - ((combined_plcp_delta * 100) / plcp_msec) > 505 - priv->cfg->base_params->plcp_delta_threshold) { 506 - /* 507 - * if plcp_err exceed the threshold, 508 - * the following data is printed in csv format: 509 - * Text: plcp_err exceeded %d, 510 - * Received ofdm.plcp_err, 511 - * Current ofdm.plcp_err, 512 - * Received ofdm_ht.plcp_err, 513 - * Current ofdm_ht.plcp_err, 514 - * combined_plcp_delta, 515 - * plcp_msec 516 - */ 517 - IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " 518 - "%u, %u, %u, %u, %d, %u mSecs\n", 519 - priv->cfg->base_params->plcp_delta_threshold, 520 - le32_to_cpu(ofdm->plcp_err), 521 - le32_to_cpu(ofdm->plcp_err), 522 - le32_to_cpu(ofdm_ht->plcp_err), 523 - le32_to_cpu(ofdm_ht->plcp_err), 524 - combined_plcp_delta, plcp_msec); 318 + delta = le32_to_cpu(cur->ofdm.plcp_err) - 319 + le32_to_cpu(old->ofdm.plcp_err) + 320 + le32_to_cpu(cur->ofdm_ht.plcp_err) - 321 + le32_to_cpu(old->ofdm_ht.plcp_err); 322 + } else { 323 + struct statistics_rx *cur, *old; 525 324 526 - rc = false; 527 - } 325 + cur = &pkt->u.stats.rx; 326 + old = &priv->_agn.statistics.rx; 327 + 328 + delta = le32_to_cpu(cur->ofdm.plcp_err) - 329 + le32_to_cpu(old->ofdm.plcp_err) + 330 + le32_to_cpu(cur->ofdm_ht.plcp_err) - 331 + le32_to_cpu(old->ofdm_ht.plcp_err); 528 332 } 529 - return rc; 333 + 334 + /* Can be negative if firmware reseted statistics */ 335 + if (delta <= 0) 336 + return true; 337 + 338 + if ((delta * 100 / msecs) > threshold) { 339 + IWL_DEBUG_RADIO(priv, 340 + "plcp health threshold %u delta %d msecs %u\n", 341 + threshold, delta, msecs); 342 + return false; 343 + } 344 + 345 + return true; 530 346 } 531 347 532 - static void iwl_recover_from_statistics(struct iwl_priv *priv, struct iwl_rx_packet *pkt) 348 + static void iwl_recover_from_statistics(struct iwl_priv *priv, 349 + struct iwl_rx_packet *pkt) 533 350 { 534 351 const struct iwl_mod_params *mod_params = priv->cfg->mod_params; 352 + unsigned int msecs; 353 + unsigned long stamp; 535 354 536 - if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 537 - !iwl_is_any_associated(priv)) 355 + if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 356 + return; 357 + 358 + stamp = jiffies; 359 + msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies); 360 + 361 + /* Only gather statistics and update time stamp when not associated */ 362 + if (!iwl_is_any_associated(priv)) 363 + goto out; 364 + 365 + /* Do not check/recover when do not have enough statistics data */ 366 + if (msecs < 99) 538 367 return; 539 368 540 369 if (mod_params->ack_check && !iwl_good_ack_health(priv, pkt)) { ··· 523 392 return; 524 393 } 525 394 526 - if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt)) 395 + if (mod_params->plcp_check && !iwl_good_plcp_health(priv, pkt, msecs)) 527 396 iwl_force_reset(priv, IWL_RF_RESET, false); 397 + 398 + out: 399 + if (iwl_bt_statistics(priv)) 400 + memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, 401 + sizeof(priv->_agn.statistics_bt)); 402 + else 403 + memcpy(&priv->_agn.statistics, &pkt->u.stats, 404 + sizeof(priv->_agn.statistics)); 405 + 406 + priv->rx_statistics_jiffies = stamp; 528 407 } 529 408 530 409 /* Calculate noise level, based on measurements during network silence just ··· 583 442 last_rx_noise); 584 443 } 585 444 586 - #ifdef CONFIG_IWLWIFI_DEBUGFS 587 445 /* 588 446 * based on the assumption of all statistics counter are in DWORD 589 447 * FIXME: This function is for debugging, do not deal with ··· 591 451 static void iwl_accumulative_statistics(struct iwl_priv *priv, 592 452 __le32 *stats) 593 453 { 454 + #ifdef CONFIG_IWLWIFI_DEBUGFS 594 455 int i, size; 595 456 __le32 *prev_stats; 596 457 u32 *accum_stats; ··· 639 498 accum_tx->tx_power.ant_a = tx->tx_power.ant_a; 640 499 accum_tx->tx_power.ant_b = tx->tx_power.ant_b; 641 500 accum_tx->tx_power.ant_c = tx->tx_power.ant_c; 642 - } 643 501 #endif 502 + } 644 503 645 - #define REG_RECALIB_PERIOD (60) 646 - 647 - void iwl_rx_statistics(struct iwl_priv *priv, 504 + static void iwl_rx_statistics(struct iwl_priv *priv, 648 505 struct iwl_rx_mem_buffer *rxb) 649 506 { 507 + const int reg_recalib_period = 60; 650 508 int change; 651 509 struct iwl_rx_packet *pkt = rxb_addr(rxb); 652 510 ··· 662 522 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 663 523 (pkt->u.stats_bt.flag & 664 524 STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 665 - #ifdef CONFIG_IWLWIFI_DEBUGFS 666 - iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt); 667 - #endif 668 525 526 + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats_bt); 669 527 } else { 670 528 IWL_DEBUG_RX(priv, 671 529 "Statistics notification received (%d vs %d).\n", ··· 677 539 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 678 540 (pkt->u.stats.flag & 679 541 STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 680 - #ifdef CONFIG_IWLWIFI_DEBUGFS 681 - iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); 682 - #endif 683 542 543 + iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats); 684 544 } 685 545 686 546 iwl_recover_from_statistics(priv, pkt); 687 547 688 - if (iwl_bt_statistics(priv)) 689 - memcpy(&priv->_agn.statistics_bt, &pkt->u.stats_bt, 690 - sizeof(priv->_agn.statistics_bt)); 691 - else 692 - memcpy(&priv->_agn.statistics, &pkt->u.stats, 693 - sizeof(priv->_agn.statistics)); 694 - 695 548 set_bit(STATUS_STATISTICS, &priv->status); 696 549 697 550 /* Reschedule the statistics timer to occur in 698 - * REG_RECALIB_PERIOD seconds to ensure we get a 551 + * reg_recalib_period seconds to ensure we get a 699 552 * thermal update even if the uCode doesn't give 700 553 * us one */ 701 554 mod_timer(&priv->statistics_periodic, jiffies + 702 - msecs_to_jiffies(REG_RECALIB_PERIOD * 1000)); 555 + msecs_to_jiffies(reg_recalib_period * 1000)); 703 556 704 557 if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) && 705 558 (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) { ··· 701 572 priv->cfg->ops->lib->temp_ops.temperature(priv); 702 573 } 703 574 704 - void iwl_reply_statistics(struct iwl_priv *priv, 705 - struct iwl_rx_mem_buffer *rxb) 575 + static void iwl_rx_reply_statistics(struct iwl_priv *priv, 576 + struct iwl_rx_mem_buffer *rxb) 706 577 { 707 578 struct iwl_rx_packet *pkt = rxb_addr(rxb); 708 579 ··· 726 597 iwl_rx_statistics(priv, rxb); 727 598 } 728 599 729 - void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 730 - struct iwl_rx_mem_buffer *rxb) 600 + /* Handle notification from uCode that card's power state is changing 601 + * due to software, hardware, or critical temperature RFKILL */ 602 + static void iwl_rx_card_state_notif(struct iwl_priv *priv, 603 + struct iwl_rx_mem_buffer *rxb) 604 + { 605 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 606 + u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 607 + unsigned long status = priv->status; 608 + 609 + IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n", 610 + (flags & HW_CARD_DISABLED) ? "Kill" : "On", 611 + (flags & SW_CARD_DISABLED) ? "Kill" : "On", 612 + (flags & CT_CARD_DISABLED) ? 613 + "Reached" : "Not reached"); 614 + 615 + if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 616 + CT_CARD_DISABLED)) { 617 + 618 + iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, 619 + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 620 + 621 + iwl_write_direct32(priv, HBUS_TARG_MBX_C, 622 + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 623 + 624 + if (!(flags & RXON_CARD_DISABLED)) { 625 + iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, 626 + CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 627 + iwl_write_direct32(priv, HBUS_TARG_MBX_C, 628 + HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 629 + } 630 + if (flags & CT_CARD_DISABLED) 631 + iwl_tt_enter_ct_kill(priv); 632 + } 633 + if (!(flags & CT_CARD_DISABLED)) 634 + iwl_tt_exit_ct_kill(priv); 635 + 636 + if (flags & HW_CARD_DISABLED) 637 + set_bit(STATUS_RF_KILL_HW, &priv->status); 638 + else 639 + clear_bit(STATUS_RF_KILL_HW, &priv->status); 640 + 641 + 642 + if (!(flags & RXON_CARD_DISABLED)) 643 + iwl_scan_cancel(priv); 644 + 645 + if ((test_bit(STATUS_RF_KILL_HW, &status) != 646 + test_bit(STATUS_RF_KILL_HW, &priv->status))) 647 + wiphy_rfkill_set_hw_state(priv->hw->wiphy, 648 + test_bit(STATUS_RF_KILL_HW, &priv->status)); 649 + else 650 + wake_up_interruptible(&priv->wait_command_queue); 651 + } 652 + 653 + static void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 654 + struct iwl_rx_mem_buffer *rxb) 731 655 732 656 { 733 657 struct iwl_rx_packet *pkt = rxb_addr(rxb); ··· 800 618 } 801 619 } 802 620 621 + /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). 622 + * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ 623 + static void iwl_rx_reply_rx_phy(struct iwl_priv *priv, 624 + struct iwl_rx_mem_buffer *rxb) 625 + { 626 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 627 + 628 + priv->_agn.last_phy_res_valid = true; 629 + memcpy(&priv->_agn.last_phy_res, pkt->u.raw, 630 + sizeof(struct iwl_rx_phy_res)); 631 + } 632 + 803 633 /* 804 634 * returns non-zero if packet should be dropped 805 635 */ 806 - int iwl_set_decrypted_flag(struct iwl_priv *priv, 807 - struct ieee80211_hdr *hdr, 808 - u32 decrypt_res, 809 - struct ieee80211_rx_status *stats) 636 + static int iwl_set_decrypted_flag(struct iwl_priv *priv, 637 + struct ieee80211_hdr *hdr, 638 + u32 decrypt_res, 639 + struct ieee80211_rx_status *stats) 810 640 { 811 641 u16 fc = le16_to_cpu(hdr->frame_control); 812 642 ··· 862 668 break; 863 669 } 864 670 return 0; 671 + } 672 + 673 + static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, 674 + struct ieee80211_hdr *hdr, 675 + u16 len, 676 + u32 ampdu_status, 677 + struct iwl_rx_mem_buffer *rxb, 678 + struct ieee80211_rx_status *stats) 679 + { 680 + struct sk_buff *skb; 681 + __le16 fc = hdr->frame_control; 682 + 683 + /* We only process data packets if the interface is open */ 684 + if (unlikely(!priv->is_open)) { 685 + IWL_DEBUG_DROP_LIMIT(priv, 686 + "Dropping packet while interface is not open.\n"); 687 + return; 688 + } 689 + 690 + /* In case of HW accelerated crypto and bad decryption, drop */ 691 + if (!priv->cfg->mod_params->sw_crypto && 692 + iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 693 + return; 694 + 695 + skb = dev_alloc_skb(128); 696 + if (!skb) { 697 + IWL_ERR(priv, "dev_alloc_skb failed\n"); 698 + return; 699 + } 700 + 701 + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); 702 + 703 + iwl_update_stats(priv, false, fc, len); 704 + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 705 + 706 + ieee80211_rx(priv->hw, skb); 707 + priv->alloc_rxb_page--; 708 + rxb->page = NULL; 709 + } 710 + 711 + static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in) 712 + { 713 + u32 decrypt_out = 0; 714 + 715 + if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) == 716 + RX_RES_STATUS_STATION_FOUND) 717 + decrypt_out |= (RX_RES_STATUS_STATION_FOUND | 718 + RX_RES_STATUS_NO_STATION_INFO_MISMATCH); 719 + 720 + decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK); 721 + 722 + /* packet was not encrypted */ 723 + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == 724 + RX_RES_STATUS_SEC_TYPE_NONE) 725 + return decrypt_out; 726 + 727 + /* packet was encrypted with unknown alg */ 728 + if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) == 729 + RX_RES_STATUS_SEC_TYPE_ERR) 730 + return decrypt_out; 731 + 732 + /* decryption was not done in HW */ 733 + if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) != 734 + RX_MPDU_RES_STATUS_DEC_DONE_MSK) 735 + return decrypt_out; 736 + 737 + switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) { 738 + 739 + case RX_RES_STATUS_SEC_TYPE_CCMP: 740 + /* alg is CCM: check MIC only */ 741 + if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK)) 742 + /* Bad MIC */ 743 + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; 744 + else 745 + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; 746 + 747 + break; 748 + 749 + case RX_RES_STATUS_SEC_TYPE_TKIP: 750 + if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) { 751 + /* Bad TTAK */ 752 + decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; 753 + break; 754 + } 755 + /* fall through if TTAK OK */ 756 + default: 757 + if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) 758 + decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; 759 + else 760 + decrypt_out |= RX_RES_STATUS_DECRYPT_OK; 761 + break; 762 + } 763 + 764 + IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n", 765 + decrypt_in, decrypt_out); 766 + 767 + return decrypt_out; 768 + } 769 + 770 + /* Called for REPLY_RX (legacy ABG frames), or 771 + * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ 772 + static void iwl_rx_reply_rx(struct iwl_priv *priv, 773 + struct iwl_rx_mem_buffer *rxb) 774 + { 775 + struct ieee80211_hdr *header; 776 + struct ieee80211_rx_status rx_status; 777 + struct iwl_rx_packet *pkt = rxb_addr(rxb); 778 + struct iwl_rx_phy_res *phy_res; 779 + __le32 rx_pkt_status; 780 + struct iwl_rx_mpdu_res_start *amsdu; 781 + u32 len; 782 + u32 ampdu_status; 783 + u32 rate_n_flags; 784 + 785 + /** 786 + * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently. 787 + * REPLY_RX: physical layer info is in this buffer 788 + * REPLY_RX_MPDU_CMD: physical layer info was sent in separate 789 + * command and cached in priv->last_phy_res 790 + * 791 + * Here we set up local variables depending on which command is 792 + * received. 793 + */ 794 + if (pkt->hdr.cmd == REPLY_RX) { 795 + phy_res = (struct iwl_rx_phy_res *)pkt->u.raw; 796 + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) 797 + + phy_res->cfg_phy_cnt); 798 + 799 + len = le16_to_cpu(phy_res->byte_count); 800 + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) + 801 + phy_res->cfg_phy_cnt + len); 802 + ampdu_status = le32_to_cpu(rx_pkt_status); 803 + } else { 804 + if (!priv->_agn.last_phy_res_valid) { 805 + IWL_ERR(priv, "MPDU frame without cached PHY data\n"); 806 + return; 807 + } 808 + phy_res = &priv->_agn.last_phy_res; 809 + amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw; 810 + header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu)); 811 + len = le16_to_cpu(amsdu->byte_count); 812 + rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len); 813 + ampdu_status = iwl_translate_rx_status(priv, 814 + le32_to_cpu(rx_pkt_status)); 815 + } 816 + 817 + if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 818 + IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", 819 + phy_res->cfg_phy_cnt); 820 + return; 821 + } 822 + 823 + if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || 824 + !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { 825 + IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", 826 + le32_to_cpu(rx_pkt_status)); 827 + return; 828 + } 829 + 830 + /* This will be used in several places later */ 831 + rate_n_flags = le32_to_cpu(phy_res->rate_n_flags); 832 + 833 + /* rx_status carries information about the packet to mac80211 */ 834 + rx_status.mactime = le64_to_cpu(phy_res->timestamp); 835 + rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? 836 + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; 837 + rx_status.freq = 838 + ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel), 839 + rx_status.band); 840 + rx_status.rate_idx = 841 + iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band); 842 + rx_status.flag = 0; 843 + 844 + /* TSF isn't reliable. In order to allow smooth user experience, 845 + * this W/A doesn't propagate it to the mac80211 */ 846 + /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/ 847 + 848 + priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp); 849 + 850 + /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 851 + rx_status.signal = priv->cfg->ops->utils->calc_rssi(priv, phy_res); 852 + 853 + iwl_dbg_log_rx_data_frame(priv, len, header); 854 + IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", 855 + rx_status.signal, (unsigned long long)rx_status.mactime); 856 + 857 + /* 858 + * "antenna number" 859 + * 860 + * It seems that the antenna field in the phy flags value 861 + * is actually a bit field. This is undefined by radiotap, 862 + * it wants an actual antenna number but I always get "7" 863 + * for most legacy frames I receive indicating that the 864 + * same frame was received on all three RX chains. 865 + * 866 + * I think this field should be removed in favor of a 867 + * new 802.11n radiotap field "RX chains" that is defined 868 + * as a bitmask. 869 + */ 870 + rx_status.antenna = 871 + (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) 872 + >> RX_RES_PHY_FLAGS_ANTENNA_POS; 873 + 874 + /* set the preamble flag if appropriate */ 875 + if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 876 + rx_status.flag |= RX_FLAG_SHORTPRE; 877 + 878 + /* Set up the HT phy flags */ 879 + if (rate_n_flags & RATE_MCS_HT_MSK) 880 + rx_status.flag |= RX_FLAG_HT; 881 + if (rate_n_flags & RATE_MCS_HT40_MSK) 882 + rx_status.flag |= RX_FLAG_40MHZ; 883 + if (rate_n_flags & RATE_MCS_SGI_MSK) 884 + rx_status.flag |= RX_FLAG_SHORT_GI; 885 + 886 + iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status, 887 + rxb, &rx_status); 888 + } 889 + 890 + /** 891 + * iwl_setup_rx_handlers - Initialize Rx handler callbacks 892 + * 893 + * Setup the RX handlers for each of the reply types sent from the uCode 894 + * to the host. 895 + */ 896 + void iwl_setup_rx_handlers(struct iwl_priv *priv) 897 + { 898 + void (**handlers)(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 899 + 900 + handlers = priv->rx_handlers; 901 + 902 + handlers[REPLY_ALIVE] = iwl_rx_reply_alive; 903 + handlers[REPLY_ERROR] = iwl_rx_reply_error; 904 + handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; 905 + handlers[SPECTRUM_MEASURE_NOTIFICATION] = iwl_rx_spectrum_measure_notif; 906 + handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; 907 + handlers[PM_DEBUG_STATISTIC_NOTIFIC] = iwl_rx_pm_debug_statistics_notif; 908 + handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; 909 + 910 + /* 911 + * The same handler is used for both the REPLY to a discrete 912 + * statistics request from the host as well as for the periodic 913 + * statistics notifications (after received beacons) from the uCode. 914 + */ 915 + handlers[REPLY_STATISTICS_CMD] = iwl_rx_reply_statistics; 916 + handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics; 917 + 918 + iwl_setup_rx_scan_handlers(priv); 919 + 920 + handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; 921 + handlers[MISSED_BEACONS_NOTIFICATION] = iwl_rx_missed_beacon_notif; 922 + 923 + /* Rx handlers */ 924 + handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; 925 + handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; 926 + 927 + /* block ack */ 928 + handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba; 929 + 930 + /* Set up hardware specific Rx handlers */ 931 + priv->cfg->ops->lib->rx_handler_setup(priv); 865 932 }
+1 -1
drivers/net/wireless/libertas/host.h
··· 387 387 struct mrvl_ie_domain_param_set { 388 388 struct mrvl_ie_header header; 389 389 390 - u8 country_code[3]; 390 + u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 391 391 struct ieee80211_country_ie_triplet triplet[MAX_11D_TRIPLETS]; 392 392 } __packed; 393 393
+2 -4
drivers/net/wireless/mwl8k.c
··· 1056 1056 } 1057 1057 memset(rxq->rxd, 0, size); 1058 1058 1059 - rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL); 1059 + rxq->buf = kcalloc(MWL8K_RX_DESCS, sizeof(*rxq->buf), GFP_KERNEL); 1060 1060 if (rxq->buf == NULL) { 1061 1061 wiphy_err(hw->wiphy, "failed to alloc RX skbuff list\n"); 1062 1062 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma); 1063 1063 return -ENOMEM; 1064 1064 } 1065 - memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf)); 1066 1065 1067 1066 for (i = 0; i < MWL8K_RX_DESCS; i++) { 1068 1067 int desc_size; ··· 1346 1347 } 1347 1348 memset(txq->txd, 0, size); 1348 1349 1349 - txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL); 1350 + txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL); 1350 1351 if (txq->skb == NULL) { 1351 1352 wiphy_err(hw->wiphy, "failed to alloc TX skbuff list\n"); 1352 1353 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma); 1353 1354 return -ENOMEM; 1354 1355 } 1355 - memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb)); 1356 1356 1357 1357 for (i = 0; i < MWL8K_TX_DESCS; i++) { 1358 1358 struct mwl8k_tx_desc *tx_desc;
+2 -3
drivers/net/wireless/p54/Kconfig
··· 43 43 tristate "Prism54 SPI (stlc45xx) support" 44 44 depends on P54_COMMON && SPI_MASTER && GENERIC_HARDIRQS 45 45 ---help--- 46 - This driver is for stlc4550 or stlc4560 based wireless chips. 47 - This driver is experimental, untested and will probably only work on 48 - Nokia's N800/N810 Portable Internet Tablet. 46 + This driver is for stlc4550 or stlc4560 based wireless chips 47 + such as Nokia's N800/N810 Portable Internet Tablet. 49 48 50 49 If you choose to build a module, it'll be called p54spi. 51 50
+18 -18
drivers/net/wireless/rt2x00/rt2400pci.c
··· 779 779 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 780 780 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); 781 781 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); 782 - rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); 782 + rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); 783 783 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 784 784 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 785 785 ··· 795 795 entry_priv->desc_dma); 796 796 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 797 797 798 - entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; 798 + entry_priv = rt2x00dev->atim->entries[0].priv_data; 799 799 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 800 800 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 801 801 entry_priv->desc_dma); 802 802 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 803 803 804 - entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; 804 + entry_priv = rt2x00dev->bcn->entries[0].priv_data; 805 805 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 806 806 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 807 807 entry_priv->desc_dma); ··· 1131 1131 rt2x00_desc_write(txd, 2, word); 1132 1132 1133 1133 rt2x00_desc_read(txd, 3, &word); 1134 - rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); 1134 + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal); 1135 1135 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5); 1136 1136 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1); 1137 - rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); 1137 + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service); 1138 1138 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6); 1139 1139 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1); 1140 1140 rt2x00_desc_write(txd, 3, word); 1141 1141 1142 1142 rt2x00_desc_read(txd, 4, &word); 1143 - rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low); 1143 + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, 1144 + txdesc->u.plcp.length_low); 1144 1145 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8); 1145 1146 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1); 1146 - rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high); 1147 + rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, 1148 + txdesc->u.plcp.length_high); 1147 1149 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7); 1148 1150 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); 1149 1151 rt2x00_desc_write(txd, 4, word); ··· 1166 1164 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1167 1165 rt2x00_set_field32(&word, TXD_W0_RTS, 1168 1166 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); 1169 - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1167 + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); 1170 1168 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1171 1169 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1172 1170 rt2x00_desc_write(txd, 0, word); ··· 1278 1276 static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, 1279 1277 const enum data_queue_qid queue_idx) 1280 1278 { 1281 - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1279 + struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 1282 1280 struct queue_entry_priv_pci *entry_priv; 1283 1281 struct queue_entry *entry; 1284 1282 struct txdone_entry_desc txdesc; ··· 1317 1315 static void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 1318 1316 struct rt2x00_field32 irq_field) 1319 1317 { 1320 - unsigned long flags; 1321 1318 u32 reg; 1322 1319 1323 1320 /* 1324 1321 * Enable a single interrupt. The interrupt mask register 1325 1322 * access needs locking. 1326 1323 */ 1327 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1324 + spin_lock_irq(&rt2x00dev->irqmask_lock); 1328 1325 1329 1326 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1330 1327 rt2x00_set_field32(&reg, irq_field, 0); 1331 1328 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1332 1329 1333 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1330 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 1334 1331 } 1335 1332 1336 1333 static void rt2400pci_txstatus_tasklet(unsigned long data) 1337 1334 { 1338 1335 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 1339 1336 u32 reg; 1340 - unsigned long flags; 1341 1337 1342 1338 /* 1343 1339 * Handle all tx queues. ··· 1347 1347 /* 1348 1348 * Enable all TXDONE interrupts again. 1349 1349 */ 1350 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1350 + spin_lock_irq(&rt2x00dev->irqmask_lock); 1351 1351 1352 1352 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1353 1353 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0); ··· 1355 1355 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0); 1356 1356 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1357 1357 1358 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1358 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 1359 1359 } 1360 1360 1361 1361 static void rt2400pci_tbtt_tasklet(unsigned long data) ··· 1376 1376 { 1377 1377 struct rt2x00_dev *rt2x00dev = dev_instance; 1378 1378 u32 reg, mask; 1379 - unsigned long flags; 1380 1379 1381 1380 /* 1382 1381 * Get the interrupt sources & saved to local variable. ··· 1417 1418 * Disable all interrupts for which a tasklet was scheduled right now, 1418 1419 * the tasklet will reenable the appropriate interrupts. 1419 1420 */ 1420 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1421 + spin_lock(&rt2x00dev->irqmask_lock); 1421 1422 1422 1423 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1423 1424 reg |= mask; 1424 1425 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1425 1426 1426 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1427 + spin_unlock(&rt2x00dev->irqmask_lock); 1427 1428 1428 1429 1429 1430 ··· 1640 1641 */ 1641 1642 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1642 1643 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1644 + __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); 1643 1645 1644 1646 /* 1645 1647 * Set the rssi offset.
+19 -19
drivers/net/wireless/rt2x00/rt2500pci.c
··· 293 293 struct rt2x00intf_conf *conf, 294 294 const unsigned int flags) 295 295 { 296 - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 296 + struct data_queue *queue = rt2x00dev->bcn; 297 297 unsigned int bcn_preload; 298 298 u32 reg; 299 299 ··· 865 865 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 866 866 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); 867 867 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); 868 - rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit); 868 + rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); 869 869 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); 870 870 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 871 871 ··· 881 881 entry_priv->desc_dma); 882 882 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 883 883 884 - entry_priv = rt2x00dev->bcn[1].entries[0].priv_data; 884 + entry_priv = rt2x00dev->atim->entries[0].priv_data; 885 885 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 886 886 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 887 887 entry_priv->desc_dma); 888 888 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 889 889 890 - entry_priv = rt2x00dev->bcn[0].entries[0].priv_data; 890 + entry_priv = rt2x00dev->bcn->entries[0].priv_data; 891 891 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 892 892 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 893 893 entry_priv->desc_dma); ··· 1287 1287 rt2x00_desc_write(txd, 2, word); 1288 1288 1289 1289 rt2x00_desc_read(txd, 3, &word); 1290 - rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal); 1291 - rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service); 1292 - rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low); 1293 - rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high); 1290 + rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal); 1291 + rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service); 1292 + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, 1293 + txdesc->u.plcp.length_low); 1294 + rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, 1295 + txdesc->u.plcp.length_high); 1294 1296 rt2x00_desc_write(txd, 3, word); 1295 1297 1296 1298 rt2x00_desc_read(txd, 10, &word); ··· 1317 1315 rt2x00_set_field32(&word, TXD_W0_OFDM, 1318 1316 (txdesc->rate_mode == RATE_MODE_OFDM)); 1319 1317 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); 1320 - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1318 + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); 1321 1319 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1322 1320 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1323 1321 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); ··· 1410 1408 static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, 1411 1409 const enum data_queue_qid queue_idx) 1412 1410 { 1413 - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 1411 + struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 1414 1412 struct queue_entry_priv_pci *entry_priv; 1415 1413 struct queue_entry *entry; 1416 1414 struct txdone_entry_desc txdesc; ··· 1449 1447 static void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 1450 1448 struct rt2x00_field32 irq_field) 1451 1449 { 1452 - unsigned long flags; 1453 1450 u32 reg; 1454 1451 1455 1452 /* 1456 1453 * Enable a single interrupt. The interrupt mask register 1457 1454 * access needs locking. 1458 1455 */ 1459 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1456 + spin_lock_irq(&rt2x00dev->irqmask_lock); 1460 1457 1461 1458 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1462 1459 rt2x00_set_field32(&reg, irq_field, 0); 1463 1460 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1464 1461 1465 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1462 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 1466 1463 } 1467 1464 1468 1465 static void rt2500pci_txstatus_tasklet(unsigned long data) 1469 1466 { 1470 1467 struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data; 1471 1468 u32 reg; 1472 - unsigned long flags; 1473 1469 1474 1470 /* 1475 1471 * Handle all tx queues. ··· 1479 1479 /* 1480 1480 * Enable all TXDONE interrupts again. 1481 1481 */ 1482 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1482 + spin_lock_irq(&rt2x00dev->irqmask_lock); 1483 1483 1484 1484 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1485 1485 rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0); ··· 1487 1487 rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0); 1488 1488 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1489 1489 1490 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1490 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 1491 1491 } 1492 1492 1493 1493 static void rt2500pci_tbtt_tasklet(unsigned long data) ··· 1508 1508 { 1509 1509 struct rt2x00_dev *rt2x00dev = dev_instance; 1510 1510 u32 reg, mask; 1511 - unsigned long flags; 1512 1511 1513 1512 /* 1514 1513 * Get the interrupt sources & saved to local variable. ··· 1549 1550 * Disable all interrupts for which a tasklet was scheduled right now, 1550 1551 * the tasklet will reenable the appropriate interrupts. 1551 1552 */ 1552 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 1553 + spin_lock(&rt2x00dev->irqmask_lock); 1553 1554 1554 1555 rt2x00pci_register_read(rt2x00dev, CSR8, &reg); 1555 1556 reg |= mask; 1556 1557 rt2x00pci_register_write(rt2x00dev, CSR8, reg); 1557 1558 1558 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 1559 + spin_unlock(&rt2x00dev->irqmask_lock); 1559 1560 1560 1561 return IRQ_HANDLED; 1561 1562 } ··· 1958 1959 */ 1959 1960 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 1960 1961 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 1962 + __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); 1961 1963 1962 1964 /* 1963 1965 * Set the rssi offset.
+8 -5
drivers/net/wireless/rt2x00/rt2500usb.c
··· 1100 1100 (txdesc->rate_mode == RATE_MODE_OFDM)); 1101 1101 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1102 1102 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); 1103 - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1103 + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); 1104 1104 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); 1105 1105 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); 1106 1106 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); ··· 1114 1114 rt2x00_desc_write(txd, 1, word); 1115 1115 1116 1116 rt2x00_desc_read(txd, 2, &word); 1117 - rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); 1118 - rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); 1119 - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); 1120 - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1117 + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); 1118 + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); 1119 + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, 1120 + txdesc->u.plcp.length_low); 1121 + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, 1122 + txdesc->u.plcp.length_high); 1121 1123 rt2x00_desc_write(txd, 2, word); 1122 1124 1123 1125 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { ··· 1797 1795 __set_bit(DRIVER_REQUIRE_COPY_IV, &rt2x00dev->flags); 1798 1796 } 1799 1797 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 1798 + __set_bit(DRIVER_REQUIRE_SW_SEQNO, &rt2x00dev->flags); 1800 1799 1801 1800 /* 1802 1801 * Set the rssi offset.
+20 -20
drivers/net/wireless/rt2x00/rt2800.h
··· 66 66 #define RF3320 0x000b 67 67 #define RF3322 0x000c 68 68 #define RF3853 0x000d 69 - #define RF5390 0x5390 69 + #define RF5390 0x5390 70 70 71 71 /* 72 72 * Chipset revisions. ··· 79 79 #define REV_RT3071E 0x0211 80 80 #define REV_RT3090E 0x0211 81 81 #define REV_RT3390E 0x0211 82 - #define REV_RT5390F 0x0502 82 + #define REV_RT5390F 0x0502 83 83 84 84 /* 85 85 * Signal information. ··· 126 126 /* 127 127 * AUX_CTRL: Aux/PCI-E related configuration 128 128 */ 129 - #define AUX_CTRL 0x10c 130 - #define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002) 131 - #define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400) 129 + #define AUX_CTRL 0x10c 130 + #define AUX_CTRL_WAKE_PCIE_EN FIELD32(0x00000002) 131 + #define AUX_CTRL_FORCE_PCIE_CLK FIELD32(0x00000400) 132 132 133 133 /* 134 134 * OPT_14: Unknown register used by rt3xxx devices. ··· 464 464 */ 465 465 #define RF_CSR_CFG 0x0500 466 466 #define RF_CSR_CFG_DATA FIELD32(0x000000ff) 467 - #define RF_CSR_CFG_REGNUM FIELD32(0x00003f00) 467 + #define RF_CSR_CFG_REGNUM FIELD32(0x00003f00) 468 468 #define RF_CSR_CFG_WRITE FIELD32(0x00010000) 469 469 #define RF_CSR_CFG_BUSY FIELD32(0x00020000) 470 470 ··· 1746 1746 */ 1747 1747 #define BBP4_TX_BF FIELD8(0x01) 1748 1748 #define BBP4_BANDWIDTH FIELD8(0x18) 1749 - #define BBP4_MAC_IF_CTRL FIELD8(0x40) 1749 + #define BBP4_MAC_IF_CTRL FIELD8(0x40) 1750 1750 1751 1751 /* 1752 1752 * BBP 109 1753 1753 */ 1754 - #define BBP109_TX0_POWER FIELD8(0x0f) 1755 - #define BBP109_TX1_POWER FIELD8(0xf0) 1754 + #define BBP109_TX0_POWER FIELD8(0x0f) 1755 + #define BBP109_TX1_POWER FIELD8(0xf0) 1756 1756 1757 1757 /* 1758 1758 * BBP 138: Unknown ··· 1765 1765 /* 1766 1766 * BBP 152: Rx Ant 1767 1767 */ 1768 - #define BBP152_RX_DEFAULT_ANT FIELD8(0x80) 1768 + #define BBP152_RX_DEFAULT_ANT FIELD8(0x80) 1769 1769 1770 1770 /* 1771 1771 * RFCSR registers ··· 1776 1776 * RFCSR 1: 1777 1777 */ 1778 1778 #define RFCSR1_RF_BLOCK_EN FIELD8(0x01) 1779 - #define RFCSR1_PLL_PD FIELD8(0x02) 1779 + #define RFCSR1_PLL_PD FIELD8(0x02) 1780 1780 #define RFCSR1_RX0_PD FIELD8(0x04) 1781 1781 #define RFCSR1_TX0_PD FIELD8(0x08) 1782 1782 #define RFCSR1_RX1_PD FIELD8(0x10) ··· 1785 1785 /* 1786 1786 * RFCSR 2: 1787 1787 */ 1788 - #define RFCSR2_RESCAL_EN FIELD8(0x80) 1788 + #define RFCSR2_RESCAL_EN FIELD8(0x80) 1789 1789 1790 1790 /* 1791 1791 * RFCSR 6: ··· 1801 1801 /* 1802 1802 * RFCSR 11: 1803 1803 */ 1804 - #define RFCSR11_R FIELD8(0x03) 1804 + #define RFCSR11_R FIELD8(0x03) 1805 1805 1806 1806 /* 1807 1807 * RFCSR 12: ··· 1857 1857 /* 1858 1858 * RFCSR 30: 1859 1859 */ 1860 - #define RFCSR30_TX_H20M FIELD8(0x02) 1861 - #define RFCSR30_RX_H20M FIELD8(0x04) 1862 - #define RFCSR30_RX_VCM FIELD8(0x18) 1860 + #define RFCSR30_TX_H20M FIELD8(0x02) 1861 + #define RFCSR30_RX_H20M FIELD8(0x04) 1862 + #define RFCSR30_RX_VCM FIELD8(0x18) 1863 1863 #define RFCSR30_RF_CALIBRATION FIELD8(0x80) 1864 1864 1865 1865 /* ··· 1871 1871 /* 1872 1872 * RFCSR 38: 1873 1873 */ 1874 - #define RFCSR38_RX_LO1_EN FIELD8(0x20) 1874 + #define RFCSR38_RX_LO1_EN FIELD8(0x20) 1875 1875 1876 1876 /* 1877 1877 * RFCSR 39: 1878 1878 */ 1879 - #define RFCSR39_RX_LO2_EN FIELD8(0x80) 1879 + #define RFCSR39_RX_LO2_EN FIELD8(0x80) 1880 1880 1881 1881 /* 1882 1882 * RFCSR 49: 1883 1883 */ 1884 - #define RFCSR49_TX FIELD8(0x3f) 1884 + #define RFCSR49_TX FIELD8(0x3f) 1885 1885 1886 1886 /* 1887 1887 * RF registers ··· 1918 1918 /* 1919 1919 * Chip ID 1920 1920 */ 1921 - #define EEPROM_CHIP_ID 0x0000 1921 + #define EEPROM_CHIP_ID 0x0000 1922 1922 1923 1923 /* 1924 1924 * EEPROM Version
+367 -340
drivers/net/wireless/rt2x00/rt2800lib.c
··· 400 400 if (rt2800_wait_csr_ready(rt2x00dev)) 401 401 return -EBUSY; 402 402 403 - if (rt2x00_is_pci(rt2x00dev)) { 404 - if (rt2x00_rt(rt2x00dev, RT5390)) { 405 - rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 406 - rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 407 - rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 408 - rt2800_register_write(rt2x00dev, AUX_CTRL, reg); 409 - } 403 + if (rt2x00_is_pci(rt2x00dev)) { 404 + if (rt2x00_rt(rt2x00dev, RT5390)) { 405 + rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 406 + rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 407 + rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 408 + rt2800_register_write(rt2x00dev, AUX_CTRL, reg); 409 + } 410 410 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 411 - } 411 + } 412 412 413 413 /* 414 414 * Disable DMA, will be reenabled later when enabling ··· 472 472 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 473 473 rt2x00_set_field32(&word, TXWI_W0_AMPDU, 474 474 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags)); 475 - rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density); 476 - rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop); 477 - rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs); 475 + rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, 476 + txdesc->u.ht.mpdu_density); 477 + rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop); 478 + rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs); 478 479 rt2x00_set_field32(&word, TXWI_W0_BW, 479 480 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags)); 480 481 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI, 481 482 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags)); 482 - rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc); 483 + rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc); 483 484 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode); 484 485 rt2x00_desc_write(txwi, 0, word); 485 486 ··· 489 488 test_bit(ENTRY_TXD_ACK, &txdesc->flags)); 490 489 rt2x00_set_field32(&word, TXWI_W1_NSEQ, 491 490 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 492 - rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size); 491 + rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size); 493 492 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, 494 493 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? 495 494 txdesc->key_idx : 0xff); ··· 682 681 * confuse the rate control algortihm by providing clearly wrong 683 682 * data. 684 683 */ 685 - if (aggr == 1 && ampdu == 0 && real_mcs != mcs) { 684 + if (unlikely(aggr == 1 && ampdu == 0 && real_mcs != mcs)) { 686 685 skbdesc->tx_rate_idx = real_mcs; 687 686 mcs = real_mcs; 688 687 } ··· 752 751 if (pid >= QID_RX) 753 752 continue; 754 753 755 - queue = rt2x00queue_get_queue(rt2x00dev, pid); 754 + queue = rt2x00queue_get_tx_queue(rt2x00dev, pid); 756 755 if (unlikely(!queue)) 757 756 continue; 758 757 ··· 1101 1100 } 1102 1101 EXPORT_SYMBOL_GPL(rt2800_config_shared_key); 1103 1102 1103 + static inline int rt2800_find_pairwise_keyslot(struct rt2x00_dev *rt2x00dev) 1104 + { 1105 + int idx; 1106 + u32 offset, reg; 1107 + 1108 + /* 1109 + * Search for the first free pairwise key entry and return the 1110 + * corresponding index. 1111 + * 1112 + * Make sure the WCID starts _after_ the last possible shared key 1113 + * entry (>32). 1114 + * 1115 + * Since parts of the pairwise key table might be shared with 1116 + * the beacon frame buffers 6 & 7 we should only write into the 1117 + * first 222 entries. 1118 + */ 1119 + for (idx = 33; idx <= 222; idx++) { 1120 + offset = MAC_WCID_ATTR_ENTRY(idx); 1121 + rt2800_register_read(rt2x00dev, offset, &reg); 1122 + if (!reg) 1123 + return idx; 1124 + } 1125 + return -1; 1126 + } 1127 + 1104 1128 int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, 1105 1129 struct rt2x00lib_crypto *crypto, 1106 1130 struct ieee80211_key_conf *key) 1107 1131 { 1108 1132 struct hw_key_entry key_entry; 1109 1133 u32 offset; 1134 + int idx; 1110 1135 1111 1136 if (crypto->cmd == SET_KEY) { 1112 - /* 1113 - * 1 pairwise key is possible per AID, this means that the AID 1114 - * equals our hw_key_idx. Make sure the WCID starts _after_ the 1115 - * last possible shared key entry. 1116 - * 1117 - * Since parts of the pairwise key table might be shared with 1118 - * the beacon frame buffers 6 & 7 we should only write into the 1119 - * first 222 entries. 1120 - */ 1121 - if (crypto->aid > (222 - 32)) 1137 + idx = rt2800_find_pairwise_keyslot(rt2x00dev); 1138 + if (idx < 0) 1122 1139 return -ENOSPC; 1123 - 1124 - key->hw_key_idx = 32 + crypto->aid; 1140 + key->hw_key_idx = idx; 1125 1141 1126 1142 memcpy(key_entry.key, crypto->key, 1127 1143 sizeof(key_entry.key)); ··· 1603 1585 #define RT5390_FREQ_OFFSET_BOUND 0x5f 1604 1586 1605 1587 static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, 1606 - struct ieee80211_conf *conf, 1607 - struct rf_channel *rf, 1608 - struct channel_info *info) 1588 + struct ieee80211_conf *conf, 1589 + struct rf_channel *rf, 1590 + struct channel_info *info) 1609 1591 { 1610 - u8 rfcsr; 1611 - u16 eeprom; 1592 + u8 rfcsr; 1593 + u16 eeprom; 1612 1594 1613 - rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); 1614 - rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); 1615 - rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr); 1616 - rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2); 1617 - rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); 1595 + rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); 1596 + rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); 1597 + rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr); 1598 + rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2); 1599 + rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); 1618 1600 1619 - rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr); 1620 - if (info->default_power1 > RT5390_POWER_BOUND) 1621 - rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND); 1622 - else 1623 - rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); 1624 - rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 1601 + rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr); 1602 + if (info->default_power1 > RT5390_POWER_BOUND) 1603 + rt2x00_set_field8(&rfcsr, RFCSR49_TX, RT5390_POWER_BOUND); 1604 + else 1605 + rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); 1606 + rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 1625 1607 1626 - rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 1627 - rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); 1628 - rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); 1629 - rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 1630 - rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); 1631 - rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 1608 + rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 1609 + rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); 1610 + rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); 1611 + rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 1612 + rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); 1613 + rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 1632 1614 1633 - rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 1634 - if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND) 1635 - rt2x00_set_field8(&rfcsr, RFCSR17_CODE, RT5390_FREQ_OFFSET_BOUND); 1636 - else 1637 - rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); 1638 - rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 1615 + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 1616 + if (rt2x00dev->freq_offset > RT5390_FREQ_OFFSET_BOUND) 1617 + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, 1618 + RT5390_FREQ_OFFSET_BOUND); 1619 + else 1620 + rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset); 1621 + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 1639 1622 1640 - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 1641 - if (rf->channel <= 14) { 1642 - int idx = rf->channel-1; 1623 + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 1624 + if (rf->channel <= 14) { 1625 + int idx = rf->channel-1; 1643 1626 1644 - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { 1645 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { 1646 - /* r55/r59 value array of channel 1~14 */ 1647 - static const char r55_bt_rev[] = {0x83, 0x83, 1648 - 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, 1649 - 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; 1650 - static const char r59_bt_rev[] = {0x0e, 0x0e, 1651 - 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, 1652 - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; 1627 + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { 1628 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { 1629 + /* r55/r59 value array of channel 1~14 */ 1630 + static const char r55_bt_rev[] = {0x83, 0x83, 1631 + 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, 1632 + 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; 1633 + static const char r59_bt_rev[] = {0x0e, 0x0e, 1634 + 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, 1635 + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; 1653 1636 1654 - rt2800_rfcsr_write(rt2x00dev, 55, r55_bt_rev[idx]); 1655 - rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]); 1656 - } else { 1657 - static const char r59_bt[] = {0x8b, 0x8b, 0x8b, 1658 - 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, 1659 - 0x88, 0x88, 0x86, 0x85, 0x84}; 1637 + rt2800_rfcsr_write(rt2x00dev, 55, 1638 + r55_bt_rev[idx]); 1639 + rt2800_rfcsr_write(rt2x00dev, 59, 1640 + r59_bt_rev[idx]); 1641 + } else { 1642 + static const char r59_bt[] = {0x8b, 0x8b, 0x8b, 1643 + 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, 1644 + 0x88, 0x88, 0x86, 0x85, 0x84}; 1660 1645 1661 - rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]); 1662 - } 1663 - } else { 1664 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { 1665 - static const char r55_nonbt_rev[] = {0x23, 0x23, 1666 - 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, 1667 - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; 1668 - static const char r59_nonbt_rev[] = {0x07, 0x07, 1669 - 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 1670 - 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; 1646 + rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]); 1647 + } 1648 + } else { 1649 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { 1650 + static const char r55_nonbt_rev[] = {0x23, 0x23, 1651 + 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, 1652 + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; 1653 + static const char r59_nonbt_rev[] = {0x07, 0x07, 1654 + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 1655 + 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; 1671 1656 1672 - rt2800_rfcsr_write(rt2x00dev, 55, r55_nonbt_rev[idx]); 1673 - rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]); 1674 - } else if (rt2x00_rt(rt2x00dev, RT5390)) { 1675 - static const char r59_non_bt[] = {0x8f, 0x8f, 1676 - 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 1677 - 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; 1657 + rt2800_rfcsr_write(rt2x00dev, 55, 1658 + r55_nonbt_rev[idx]); 1659 + rt2800_rfcsr_write(rt2x00dev, 59, 1660 + r59_nonbt_rev[idx]); 1661 + } else if (rt2x00_rt(rt2x00dev, RT5390)) { 1662 + static const char r59_non_bt[] = {0x8f, 0x8f, 1663 + 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 1664 + 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; 1678 1665 1679 - rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]); 1680 - } 1681 - } 1682 - } 1666 + rt2800_rfcsr_write(rt2x00dev, 59, 1667 + r59_non_bt[idx]); 1668 + } 1669 + } 1670 + } 1683 1671 1684 - rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 1685 - rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0); 1686 - rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0); 1687 - rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 1672 + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 1673 + rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, 0); 1674 + rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, 0); 1675 + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 1688 1676 1689 - rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); 1690 - rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 1691 - rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); 1677 + rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr); 1678 + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 1679 + rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); 1692 1680 } 1693 1681 1694 1682 static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, ··· 1721 1697 rt2x00_rf(rt2x00dev, RF3052) || 1722 1698 rt2x00_rf(rt2x00dev, RF3320)) 1723 1699 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1724 - else if (rt2x00_rf(rt2x00dev, RF5390)) 1725 - rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 1700 + else if (rt2x00_rf(rt2x00dev, RF5390)) 1701 + rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 1726 1702 else 1727 1703 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1728 1704 ··· 1735 1711 rt2800_bbp_write(rt2x00dev, 86, 0); 1736 1712 1737 1713 if (rf->channel <= 14) { 1738 - if (!rt2x00_rt(rt2x00dev, RT5390)) { 1739 - if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) { 1740 - rt2800_bbp_write(rt2x00dev, 82, 0x62); 1741 - rt2800_bbp_write(rt2x00dev, 75, 0x46); 1742 - } else { 1743 - rt2800_bbp_write(rt2x00dev, 82, 0x84); 1744 - rt2800_bbp_write(rt2x00dev, 75, 0x50); 1745 - } 1714 + if (!rt2x00_rt(rt2x00dev, RT5390)) { 1715 + if (test_bit(CONFIG_EXTERNAL_LNA_BG, 1716 + &rt2x00dev->flags)) { 1717 + rt2800_bbp_write(rt2x00dev, 82, 0x62); 1718 + rt2800_bbp_write(rt2x00dev, 75, 0x46); 1719 + } else { 1720 + rt2800_bbp_write(rt2x00dev, 82, 0x84); 1721 + rt2800_bbp_write(rt2x00dev, 75, 0x50); 1722 + } 1746 1723 } 1747 1724 } else { 1748 1725 rt2800_bbp_write(rt2x00dev, 82, 0xf2); ··· 2122 2097 if (rt2x00_rt(rt2x00dev, RT3070) || 2123 2098 rt2x00_rt(rt2x00dev, RT3071) || 2124 2099 rt2x00_rt(rt2x00dev, RT3090) || 2125 - rt2x00_rt(rt2x00dev, RT3390) || 2126 - rt2x00_rt(rt2x00dev, RT5390)) 2100 + rt2x00_rt(rt2x00dev, RT3390) || 2101 + rt2x00_rt(rt2x00dev, RT5390)) 2127 2102 return 0x1c + (2 * rt2x00dev->lna_gain); 2128 2103 else 2129 2104 return 0x2e + rt2x00dev->lna_gain; ··· 2255 2230 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 2256 2231 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 2257 2232 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); 2258 - } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2259 - rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 2260 - rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 2261 - rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 2233 + } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2234 + rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 2235 + rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 2236 + rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 2262 2237 } else { 2263 2238 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); 2264 2239 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); ··· 2475 2450 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i), 2476 2451 wcid, sizeof(wcid)); 2477 2452 2478 - rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1); 2453 + rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 0); 2479 2454 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); 2480 2455 } 2481 2456 ··· 2634 2609 rt2800_wait_bbp_ready(rt2x00dev))) 2635 2610 return -EACCES; 2636 2611 2637 - if (rt2x00_rt(rt2x00dev, RT5390)) { 2638 - rt2800_bbp_read(rt2x00dev, 4, &value); 2639 - rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1); 2640 - rt2800_bbp_write(rt2x00dev, 4, value); 2641 - } 2612 + if (rt2x00_rt(rt2x00dev, RT5390)) { 2613 + rt2800_bbp_read(rt2x00dev, 4, &value); 2614 + rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1); 2615 + rt2800_bbp_write(rt2x00dev, 4, value); 2616 + } 2642 2617 2643 - if (rt2800_is_305x_soc(rt2x00dev) || 2644 - rt2x00_rt(rt2x00dev, RT5390)) 2618 + if (rt2800_is_305x_soc(rt2x00dev) || 2619 + rt2x00_rt(rt2x00dev, RT5390)) 2645 2620 rt2800_bbp_write(rt2x00dev, 31, 0x08); 2646 2621 2647 2622 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 2648 2623 rt2800_bbp_write(rt2x00dev, 66, 0x38); 2649 2624 2650 - if (rt2x00_rt(rt2x00dev, RT5390)) 2651 - rt2800_bbp_write(rt2x00dev, 68, 0x0b); 2625 + if (rt2x00_rt(rt2x00dev, RT5390)) 2626 + rt2800_bbp_write(rt2x00dev, 68, 0x0b); 2652 2627 2653 2628 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { 2654 2629 rt2800_bbp_write(rt2x00dev, 69, 0x16); 2655 2630 rt2800_bbp_write(rt2x00dev, 73, 0x12); 2656 - } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2657 - rt2800_bbp_write(rt2x00dev, 69, 0x12); 2658 - rt2800_bbp_write(rt2x00dev, 73, 0x13); 2659 - rt2800_bbp_write(rt2x00dev, 75, 0x46); 2660 - rt2800_bbp_write(rt2x00dev, 76, 0x28); 2661 - rt2800_bbp_write(rt2x00dev, 77, 0x59); 2631 + } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2632 + rt2800_bbp_write(rt2x00dev, 69, 0x12); 2633 + rt2800_bbp_write(rt2x00dev, 73, 0x13); 2634 + rt2800_bbp_write(rt2x00dev, 75, 0x46); 2635 + rt2800_bbp_write(rt2x00dev, 76, 0x28); 2636 + rt2800_bbp_write(rt2x00dev, 77, 0x59); 2662 2637 } else { 2663 2638 rt2800_bbp_write(rt2x00dev, 69, 0x12); 2664 2639 rt2800_bbp_write(rt2x00dev, 73, 0x10); ··· 2669 2644 if (rt2x00_rt(rt2x00dev, RT3070) || 2670 2645 rt2x00_rt(rt2x00dev, RT3071) || 2671 2646 rt2x00_rt(rt2x00dev, RT3090) || 2672 - rt2x00_rt(rt2x00dev, RT3390) || 2673 - rt2x00_rt(rt2x00dev, RT5390)) { 2647 + rt2x00_rt(rt2x00dev, RT3390) || 2648 + rt2x00_rt(rt2x00dev, RT5390)) { 2674 2649 rt2800_bbp_write(rt2x00dev, 79, 0x13); 2675 2650 rt2800_bbp_write(rt2x00dev, 80, 0x05); 2676 2651 rt2800_bbp_write(rt2x00dev, 81, 0x33); ··· 2682 2657 } 2683 2658 2684 2659 rt2800_bbp_write(rt2x00dev, 82, 0x62); 2685 - if (rt2x00_rt(rt2x00dev, RT5390)) 2686 - rt2800_bbp_write(rt2x00dev, 83, 0x7a); 2687 - else 2688 - rt2800_bbp_write(rt2x00dev, 83, 0x6a); 2660 + if (rt2x00_rt(rt2x00dev, RT5390)) 2661 + rt2800_bbp_write(rt2x00dev, 83, 0x7a); 2662 + else 2663 + rt2800_bbp_write(rt2x00dev, 83, 0x6a); 2689 2664 2690 2665 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) 2691 2666 rt2800_bbp_write(rt2x00dev, 84, 0x19); 2692 - else if (rt2x00_rt(rt2x00dev, RT5390)) 2693 - rt2800_bbp_write(rt2x00dev, 84, 0x9a); 2667 + else if (rt2x00_rt(rt2x00dev, RT5390)) 2668 + rt2800_bbp_write(rt2x00dev, 84, 0x9a); 2694 2669 else 2695 2670 rt2800_bbp_write(rt2x00dev, 84, 0x99); 2696 2671 2697 - if (rt2x00_rt(rt2x00dev, RT5390)) 2698 - rt2800_bbp_write(rt2x00dev, 86, 0x38); 2699 - else 2700 - rt2800_bbp_write(rt2x00dev, 86, 0x00); 2672 + if (rt2x00_rt(rt2x00dev, RT5390)) 2673 + rt2800_bbp_write(rt2x00dev, 86, 0x38); 2674 + else 2675 + rt2800_bbp_write(rt2x00dev, 86, 0x00); 2701 2676 2702 2677 rt2800_bbp_write(rt2x00dev, 91, 0x04); 2703 2678 2704 - if (rt2x00_rt(rt2x00dev, RT5390)) 2705 - rt2800_bbp_write(rt2x00dev, 92, 0x02); 2706 - else 2707 - rt2800_bbp_write(rt2x00dev, 92, 0x00); 2679 + if (rt2x00_rt(rt2x00dev, RT5390)) 2680 + rt2800_bbp_write(rt2x00dev, 92, 0x02); 2681 + else 2682 + rt2800_bbp_write(rt2x00dev, 92, 0x00); 2708 2683 2709 2684 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || 2710 2685 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || 2711 2686 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) || 2712 2687 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) || 2713 - rt2x00_rt(rt2x00dev, RT5390) || 2688 + rt2x00_rt(rt2x00dev, RT5390) || 2714 2689 rt2800_is_305x_soc(rt2x00dev)) 2715 2690 rt2800_bbp_write(rt2x00dev, 103, 0xc0); 2716 2691 else 2717 2692 rt2800_bbp_write(rt2x00dev, 103, 0x00); 2718 2693 2719 - if (rt2x00_rt(rt2x00dev, RT5390)) 2720 - rt2800_bbp_write(rt2x00dev, 104, 0x92); 2694 + if (rt2x00_rt(rt2x00dev, RT5390)) 2695 + rt2800_bbp_write(rt2x00dev, 104, 0x92); 2721 2696 2722 2697 if (rt2800_is_305x_soc(rt2x00dev)) 2723 2698 rt2800_bbp_write(rt2x00dev, 105, 0x01); 2724 - else if (rt2x00_rt(rt2x00dev, RT5390)) 2725 - rt2800_bbp_write(rt2x00dev, 105, 0x3c); 2699 + else if (rt2x00_rt(rt2x00dev, RT5390)) 2700 + rt2800_bbp_write(rt2x00dev, 105, 0x3c); 2726 2701 else 2727 2702 rt2800_bbp_write(rt2x00dev, 105, 0x05); 2728 2703 2729 - if (rt2x00_rt(rt2x00dev, RT5390)) 2730 - rt2800_bbp_write(rt2x00dev, 106, 0x03); 2731 - else 2732 - rt2800_bbp_write(rt2x00dev, 106, 0x35); 2704 + if (rt2x00_rt(rt2x00dev, RT5390)) 2705 + rt2800_bbp_write(rt2x00dev, 106, 0x03); 2706 + else 2707 + rt2800_bbp_write(rt2x00dev, 106, 0x35); 2733 2708 2734 - if (rt2x00_rt(rt2x00dev, RT5390)) 2735 - rt2800_bbp_write(rt2x00dev, 128, 0x12); 2709 + if (rt2x00_rt(rt2x00dev, RT5390)) 2710 + rt2800_bbp_write(rt2x00dev, 128, 0x12); 2736 2711 2737 2712 if (rt2x00_rt(rt2x00dev, RT3071) || 2738 2713 rt2x00_rt(rt2x00dev, RT3090) || 2739 - rt2x00_rt(rt2x00dev, RT3390) || 2740 - rt2x00_rt(rt2x00dev, RT5390)) { 2714 + rt2x00_rt(rt2x00dev, RT3390) || 2715 + rt2x00_rt(rt2x00dev, RT5390)) { 2741 2716 rt2800_bbp_read(rt2x00dev, 138, &value); 2742 2717 2743 2718 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); ··· 2749 2724 rt2800_bbp_write(rt2x00dev, 138, value); 2750 2725 } 2751 2726 2752 - if (rt2x00_rt(rt2x00dev, RT5390)) { 2753 - int ant, div_mode; 2727 + if (rt2x00_rt(rt2x00dev, RT5390)) { 2728 + int ant, div_mode; 2754 2729 2755 - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 2756 - div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); 2757 - ant = (div_mode == 3) ? 1 : 0; 2730 + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 2731 + div_mode = rt2x00_get_field16(eeprom, 2732 + EEPROM_NIC_CONF1_ANT_DIVERSITY); 2733 + ant = (div_mode == 3) ? 1 : 0; 2758 2734 2759 - /* check if this is a Bluetooth combo card */ 2760 - rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 2761 - if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { 2762 - u32 reg; 2735 + /* check if this is a Bluetooth combo card */ 2736 + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); 2737 + if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) { 2738 + u32 reg; 2763 2739 2764 - rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 2765 - rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0); 2766 - rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0); 2767 - rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0); 2768 - rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0); 2769 - if (ant == 0) 2770 - rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1); 2771 - else if (ant == 1) 2772 - rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1); 2773 - rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 2774 - } 2740 + rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg); 2741 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT3, 0); 2742 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_GPIOD_BIT6, 0); 2743 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 0); 2744 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 0); 2745 + if (ant == 0) 2746 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT3, 1); 2747 + else if (ant == 1) 2748 + rt2x00_set_field32(&reg, GPIO_CTRL_CFG_BIT6, 1); 2749 + rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 2750 + } 2775 2751 2776 - rt2800_bbp_read(rt2x00dev, 152, &value); 2777 - if (ant == 0) 2778 - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); 2779 - else 2780 - rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); 2781 - rt2800_bbp_write(rt2x00dev, 152, value); 2752 + rt2800_bbp_read(rt2x00dev, 152, &value); 2753 + if (ant == 0) 2754 + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); 2755 + else 2756 + rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); 2757 + rt2800_bbp_write(rt2x00dev, 152, value); 2782 2758 2783 - /* Init frequency calibration */ 2784 - rt2800_bbp_write(rt2x00dev, 142, 1); 2785 - rt2800_bbp_write(rt2x00dev, 143, 57); 2786 - } 2759 + /* Init frequency calibration */ 2760 + rt2800_bbp_write(rt2x00dev, 142, 1); 2761 + rt2800_bbp_write(rt2x00dev, 143, 57); 2762 + } 2787 2763 2788 2764 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 2789 2765 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); ··· 2874 2848 !rt2x00_rt(rt2x00dev, RT3071) && 2875 2849 !rt2x00_rt(rt2x00dev, RT3090) && 2876 2850 !rt2x00_rt(rt2x00dev, RT3390) && 2877 - !rt2x00_rt(rt2x00dev, RT5390) && 2851 + !rt2x00_rt(rt2x00dev, RT5390) && 2878 2852 !rt2800_is_305x_soc(rt2x00dev)) 2879 2853 return 0; 2880 2854 2881 2855 /* 2882 2856 * Init RF calibration. 2883 2857 */ 2884 - if (rt2x00_rt(rt2x00dev, RT5390)) { 2885 - rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); 2886 - rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); 2887 - rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 2888 - msleep(1); 2889 - rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0); 2890 - rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 2891 - } else { 2892 - rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 2893 - rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2894 - rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2895 - msleep(1); 2896 - rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 2897 - rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2898 - } 2858 + if (rt2x00_rt(rt2x00dev, RT5390)) { 2859 + rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr); 2860 + rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); 2861 + rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 2862 + msleep(1); 2863 + rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0); 2864 + rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); 2865 + } else { 2866 + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 2867 + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); 2868 + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2869 + msleep(1); 2870 + rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 2871 + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 2872 + } 2899 2873 2900 2874 if (rt2x00_rt(rt2x00dev, RT3070) || 2901 2875 rt2x00_rt(rt2x00dev, RT3071) || ··· 2986 2960 rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 2987 2961 rt2800_rfcsr_write(rt2x00dev, 31, 0x00); 2988 2962 return 0; 2989 - } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2990 - rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); 2991 - rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 2992 - rt2800_rfcsr_write(rt2x00dev, 3, 0x88); 2993 - rt2800_rfcsr_write(rt2x00dev, 5, 0x10); 2994 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 2995 - rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); 2996 - else 2997 - rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); 2998 - rt2800_rfcsr_write(rt2x00dev, 7, 0x00); 2999 - rt2800_rfcsr_write(rt2x00dev, 10, 0x53); 3000 - rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); 3001 - rt2800_rfcsr_write(rt2x00dev, 12, 0xc6); 3002 - rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); 3003 - rt2800_rfcsr_write(rt2x00dev, 14, 0x00); 3004 - rt2800_rfcsr_write(rt2x00dev, 15, 0x00); 3005 - rt2800_rfcsr_write(rt2x00dev, 16, 0x00); 3006 - rt2800_rfcsr_write(rt2x00dev, 18, 0x03); 3007 - rt2800_rfcsr_write(rt2x00dev, 19, 0x00); 2963 + } else if (rt2x00_rt(rt2x00dev, RT5390)) { 2964 + rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); 2965 + rt2800_rfcsr_write(rt2x00dev, 2, 0x80); 2966 + rt2800_rfcsr_write(rt2x00dev, 3, 0x88); 2967 + rt2800_rfcsr_write(rt2x00dev, 5, 0x10); 2968 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 2969 + rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); 2970 + else 2971 + rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); 2972 + rt2800_rfcsr_write(rt2x00dev, 7, 0x00); 2973 + rt2800_rfcsr_write(rt2x00dev, 10, 0x53); 2974 + rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); 2975 + rt2800_rfcsr_write(rt2x00dev, 12, 0xc6); 2976 + rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); 2977 + rt2800_rfcsr_write(rt2x00dev, 14, 0x00); 2978 + rt2800_rfcsr_write(rt2x00dev, 15, 0x00); 2979 + rt2800_rfcsr_write(rt2x00dev, 16, 0x00); 2980 + rt2800_rfcsr_write(rt2x00dev, 18, 0x03); 2981 + rt2800_rfcsr_write(rt2x00dev, 19, 0x00); 3008 2982 3009 - rt2800_rfcsr_write(rt2x00dev, 20, 0x00); 3010 - rt2800_rfcsr_write(rt2x00dev, 21, 0x00); 3011 - rt2800_rfcsr_write(rt2x00dev, 22, 0x20); 3012 - rt2800_rfcsr_write(rt2x00dev, 23, 0x00); 3013 - rt2800_rfcsr_write(rt2x00dev, 24, 0x00); 3014 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3015 - rt2800_rfcsr_write(rt2x00dev, 25, 0x80); 3016 - else 3017 - rt2800_rfcsr_write(rt2x00dev, 25, 0xc0); 3018 - rt2800_rfcsr_write(rt2x00dev, 26, 0x00); 3019 - rt2800_rfcsr_write(rt2x00dev, 27, 0x09); 3020 - rt2800_rfcsr_write(rt2x00dev, 28, 0x00); 3021 - rt2800_rfcsr_write(rt2x00dev, 29, 0x10); 2983 + rt2800_rfcsr_write(rt2x00dev, 20, 0x00); 2984 + rt2800_rfcsr_write(rt2x00dev, 21, 0x00); 2985 + rt2800_rfcsr_write(rt2x00dev, 22, 0x20); 2986 + rt2800_rfcsr_write(rt2x00dev, 23, 0x00); 2987 + rt2800_rfcsr_write(rt2x00dev, 24, 0x00); 2988 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 2989 + rt2800_rfcsr_write(rt2x00dev, 25, 0x80); 2990 + else 2991 + rt2800_rfcsr_write(rt2x00dev, 25, 0xc0); 2992 + rt2800_rfcsr_write(rt2x00dev, 26, 0x00); 2993 + rt2800_rfcsr_write(rt2x00dev, 27, 0x09); 2994 + rt2800_rfcsr_write(rt2x00dev, 28, 0x00); 2995 + rt2800_rfcsr_write(rt2x00dev, 29, 0x10); 3022 2996 3023 - rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 3024 - rt2800_rfcsr_write(rt2x00dev, 31, 0x80); 3025 - rt2800_rfcsr_write(rt2x00dev, 32, 0x80); 3026 - rt2800_rfcsr_write(rt2x00dev, 33, 0x00); 3027 - rt2800_rfcsr_write(rt2x00dev, 34, 0x07); 3028 - rt2800_rfcsr_write(rt2x00dev, 35, 0x12); 3029 - rt2800_rfcsr_write(rt2x00dev, 36, 0x00); 3030 - rt2800_rfcsr_write(rt2x00dev, 37, 0x08); 3031 - rt2800_rfcsr_write(rt2x00dev, 38, 0x85); 3032 - rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); 2997 + rt2800_rfcsr_write(rt2x00dev, 30, 0x00); 2998 + rt2800_rfcsr_write(rt2x00dev, 31, 0x80); 2999 + rt2800_rfcsr_write(rt2x00dev, 32, 0x80); 3000 + rt2800_rfcsr_write(rt2x00dev, 33, 0x00); 3001 + rt2800_rfcsr_write(rt2x00dev, 34, 0x07); 3002 + rt2800_rfcsr_write(rt2x00dev, 35, 0x12); 3003 + rt2800_rfcsr_write(rt2x00dev, 36, 0x00); 3004 + rt2800_rfcsr_write(rt2x00dev, 37, 0x08); 3005 + rt2800_rfcsr_write(rt2x00dev, 38, 0x85); 3006 + rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); 3033 3007 3034 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3035 - rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); 3036 - else 3037 - rt2800_rfcsr_write(rt2x00dev, 40, 0x4b); 3038 - rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); 3039 - rt2800_rfcsr_write(rt2x00dev, 42, 0xd2); 3040 - rt2800_rfcsr_write(rt2x00dev, 43, 0x9a); 3041 - rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); 3042 - rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); 3043 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3044 - rt2800_rfcsr_write(rt2x00dev, 46, 0x73); 3045 - else 3046 - rt2800_rfcsr_write(rt2x00dev, 46, 0x7b); 3047 - rt2800_rfcsr_write(rt2x00dev, 47, 0x00); 3048 - rt2800_rfcsr_write(rt2x00dev, 48, 0x10); 3049 - rt2800_rfcsr_write(rt2x00dev, 49, 0x94); 3008 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3009 + rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); 3010 + else 3011 + rt2800_rfcsr_write(rt2x00dev, 40, 0x4b); 3012 + rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); 3013 + rt2800_rfcsr_write(rt2x00dev, 42, 0xd2); 3014 + rt2800_rfcsr_write(rt2x00dev, 43, 0x9a); 3015 + rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); 3016 + rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); 3017 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3018 + rt2800_rfcsr_write(rt2x00dev, 46, 0x73); 3019 + else 3020 + rt2800_rfcsr_write(rt2x00dev, 46, 0x7b); 3021 + rt2800_rfcsr_write(rt2x00dev, 47, 0x00); 3022 + rt2800_rfcsr_write(rt2x00dev, 48, 0x10); 3023 + rt2800_rfcsr_write(rt2x00dev, 49, 0x94); 3050 3024 3051 - rt2800_rfcsr_write(rt2x00dev, 52, 0x38); 3052 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3053 - rt2800_rfcsr_write(rt2x00dev, 53, 0x00); 3054 - else 3055 - rt2800_rfcsr_write(rt2x00dev, 53, 0x84); 3056 - rt2800_rfcsr_write(rt2x00dev, 54, 0x78); 3057 - rt2800_rfcsr_write(rt2x00dev, 55, 0x44); 3058 - rt2800_rfcsr_write(rt2x00dev, 56, 0x22); 3059 - rt2800_rfcsr_write(rt2x00dev, 57, 0x80); 3060 - rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); 3061 - rt2800_rfcsr_write(rt2x00dev, 59, 0x63); 3025 + rt2800_rfcsr_write(rt2x00dev, 52, 0x38); 3026 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3027 + rt2800_rfcsr_write(rt2x00dev, 53, 0x00); 3028 + else 3029 + rt2800_rfcsr_write(rt2x00dev, 53, 0x84); 3030 + rt2800_rfcsr_write(rt2x00dev, 54, 0x78); 3031 + rt2800_rfcsr_write(rt2x00dev, 55, 0x44); 3032 + rt2800_rfcsr_write(rt2x00dev, 56, 0x22); 3033 + rt2800_rfcsr_write(rt2x00dev, 57, 0x80); 3034 + rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); 3035 + rt2800_rfcsr_write(rt2x00dev, 59, 0x63); 3062 3036 3063 - rt2800_rfcsr_write(rt2x00dev, 60, 0x45); 3064 - if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3065 - rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); 3066 - else 3067 - rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); 3068 - rt2800_rfcsr_write(rt2x00dev, 62, 0x00); 3069 - rt2800_rfcsr_write(rt2x00dev, 63, 0x00); 3037 + rt2800_rfcsr_write(rt2x00dev, 60, 0x45); 3038 + if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) 3039 + rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); 3040 + else 3041 + rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); 3042 + rt2800_rfcsr_write(rt2x00dev, 62, 0x00); 3043 + rt2800_rfcsr_write(rt2x00dev, 63, 0x00); 3070 3044 } 3071 3045 3072 3046 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { ··· 3120 3094 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15); 3121 3095 } 3122 3096 3123 - if (!rt2x00_rt(rt2x00dev, RT5390)) { 3124 - /* 3125 - * Set back to initial state 3126 - */ 3127 - rt2800_bbp_write(rt2x00dev, 24, 0); 3097 + if (!rt2x00_rt(rt2x00dev, RT5390)) { 3098 + /* 3099 + * Set back to initial state 3100 + */ 3101 + rt2800_bbp_write(rt2x00dev, 24, 0); 3128 3102 3129 - rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); 3130 - rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); 3131 - rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); 3103 + rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr); 3104 + rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); 3105 + rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); 3132 3106 3133 - /* 3134 - * Set BBP back to BW20 3135 - */ 3136 - rt2800_bbp_read(rt2x00dev, 4, &bbp); 3137 - rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); 3138 - rt2800_bbp_write(rt2x00dev, 4, bbp); 3139 - } 3107 + /* 3108 + * Set BBP back to BW20 3109 + */ 3110 + rt2800_bbp_read(rt2x00dev, 4, &bbp); 3111 + rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); 3112 + rt2800_bbp_write(rt2x00dev, 4, bbp); 3113 + } 3140 3114 3141 3115 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || 3142 3116 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || ··· 3148 3122 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1); 3149 3123 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg); 3150 3124 3151 - if (!rt2x00_rt(rt2x00dev, RT5390)) { 3152 - rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 3153 - rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); 3154 - if (rt2x00_rt(rt2x00dev, RT3070) || 3155 - rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3156 - rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 3157 - rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 3158 - if (!test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) 3159 - rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 3160 - } 3161 - rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); 3162 - if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1) 3163 - rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, 3164 - rt2x00_get_field16(eeprom, 3165 - EEPROM_TXMIXER_GAIN_BG_VAL)); 3166 - rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 3167 - } 3125 + if (!rt2x00_rt(rt2x00dev, RT5390)) { 3126 + rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 3127 + rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); 3128 + if (rt2x00_rt(rt2x00dev, RT3070) || 3129 + rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 3130 + rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 3131 + rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 3132 + if (!test_bit(CONFIG_EXTERNAL_LNA_BG, 3133 + &rt2x00dev->flags)) 3134 + rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); 3135 + } 3136 + rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom); 3137 + if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1) 3138 + rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, 3139 + rt2x00_get_field16(eeprom, 3140 + EEPROM_TXMIXER_GAIN_BG_VAL)); 3141 + rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 3142 + } 3168 3143 3169 3144 if (rt2x00_rt(rt2x00dev, RT3090)) { 3170 3145 rt2800_bbp_read(rt2x00dev, 138, &bbp); ··· 3216 3189 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); 3217 3190 } 3218 3191 3219 - if (rt2x00_rt(rt2x00dev, RT5390)) { 3220 - rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); 3221 - rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); 3222 - rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); 3192 + if (rt2x00_rt(rt2x00dev, RT5390)) { 3193 + rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); 3194 + rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); 3195 + rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); 3223 3196 3224 - rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr); 3225 - rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0); 3226 - rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); 3197 + rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr); 3198 + rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0); 3199 + rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); 3227 3200 3228 - rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 3229 - rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); 3230 - rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 3231 - } 3201 + rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr); 3202 + rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); 3203 + rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 3204 + } 3232 3205 3233 3206 return 0; 3234 3207 } ··· 3494 3467 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom); 3495 3468 3496 3469 /* 3497 - * Identify RF chipset by EEPROM value 3498 - * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field 3499 - * RT53xx: defined in "EEPROM_CHIP_ID" field 3470 + * Identify RF chipset by EEPROM value 3471 + * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field 3472 + * RT53xx: defined in "EEPROM_CHIP_ID" field 3500 3473 */ 3501 3474 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 3502 - if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390) 3503 - rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value); 3504 - else 3505 - value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); 3475 + if (rt2x00_get_field32(reg, MAC_CSR0_CHIPSET) == RT5390) 3476 + rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &value); 3477 + else 3478 + value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); 3506 3479 3507 3480 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), 3508 3481 value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); ··· 3514 3487 !rt2x00_rt(rt2x00dev, RT3071) && 3515 3488 !rt2x00_rt(rt2x00dev, RT3090) && 3516 3489 !rt2x00_rt(rt2x00dev, RT3390) && 3517 - !rt2x00_rt(rt2x00dev, RT3572) && 3518 - !rt2x00_rt(rt2x00dev, RT5390)) { 3490 + !rt2x00_rt(rt2x00dev, RT3572) && 3491 + !rt2x00_rt(rt2x00dev, RT5390)) { 3519 3492 ERROR(rt2x00dev, "Invalid RT chipset detected.\n"); 3520 3493 return -ENODEV; 3521 3494 } ··· 3529 3502 !rt2x00_rf(rt2x00dev, RF3021) && 3530 3503 !rt2x00_rf(rt2x00dev, RF3022) && 3531 3504 !rt2x00_rf(rt2x00dev, RF3052) && 3532 - !rt2x00_rf(rt2x00dev, RF3320) && 3533 - !rt2x00_rf(rt2x00dev, RF5390)) { 3505 + !rt2x00_rf(rt2x00dev, RF3320) && 3506 + !rt2x00_rf(rt2x00dev, RF5390)) { 3534 3507 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3535 3508 return -ENODEV; 3536 3509 } ··· 3827 3800 rt2x00_rf(rt2x00dev, RF2020) || 3828 3801 rt2x00_rf(rt2x00dev, RF3021) || 3829 3802 rt2x00_rf(rt2x00dev, RF3022) || 3830 - rt2x00_rf(rt2x00dev, RF3320) || 3831 - rt2x00_rf(rt2x00dev, RF5390)) { 3803 + rt2x00_rf(rt2x00dev, RF3320) || 3804 + rt2x00_rf(rt2x00dev, RF5390)) { 3832 3805 spec->num_channels = 14; 3833 3806 spec->channels = rf_vals_3x; 3834 3807 } else if (rt2x00_rf(rt2x00dev, RF3052)) { ··· 3992 3965 if (queue_idx >= 4) 3993 3966 return 0; 3994 3967 3995 - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 3968 + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 3996 3969 3997 3970 /* Update WMM TXOP register */ 3998 3971 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
+16 -17
drivers/net/wireless/rt2x00/rt2800pci.c
··· 493 493 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); 494 494 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); 495 495 496 - if (rt2x00_rt(rt2x00dev, RT5390)) { 497 - rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 498 - rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 499 - rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 500 - rt2800_register_write(rt2x00dev, AUX_CTRL, reg); 501 - } 496 + if (rt2x00_rt(rt2x00dev, RT5390)) { 497 + rt2800_register_read(rt2x00dev, AUX_CTRL, &reg); 498 + rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); 499 + rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); 500 + rt2800_register_write(rt2x00dev, AUX_CTRL, reg); 501 + } 502 502 503 503 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 504 504 ··· 726 726 727 727 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) { 728 728 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); 729 - if (qid >= QID_RX) { 729 + if (unlikely(qid >= QID_RX)) { 730 730 /* 731 731 * Unknown queue, this shouldn't happen. Just drop 732 732 * this tx status. ··· 736 736 break; 737 737 } 738 738 739 - queue = rt2x00queue_get_queue(rt2x00dev, qid); 739 + queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 740 740 if (unlikely(queue == NULL)) { 741 741 /* 742 742 * The queue is NULL, this shouldn't happen. Stop ··· 747 747 break; 748 748 } 749 749 750 - if (rt2x00queue_empty(queue)) { 750 + if (unlikely(rt2x00queue_empty(queue))) { 751 751 /* 752 752 * The queue is empty. Stop processing here 753 753 * and drop the tx status. ··· 765 765 static void rt2800pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 766 766 struct rt2x00_field32 irq_field) 767 767 { 768 - unsigned long flags; 769 768 u32 reg; 770 769 771 770 /* 772 771 * Enable a single interrupt. The interrupt mask register 773 772 * access needs locking. 774 773 */ 775 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 774 + spin_lock_irq(&rt2x00dev->irqmask_lock); 776 775 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 777 776 rt2x00_set_field32(&reg, irq_field, 1); 778 777 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 779 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 778 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 780 779 } 781 780 782 781 static void rt2800pci_txstatus_tasklet(unsigned long data) ··· 835 836 * 836 837 * Furthermore we don't disable the TX_FIFO_STATUS 837 838 * interrupt here but leave it enabled so that the TX_STA_FIFO 838 - * can also be read while the interrupt thread gets executed. 839 + * can also be read while the tx status tasklet gets executed. 839 840 * 840 841 * Since we have only one producer and one consumer we don't 841 842 * need to lock the kfifo. ··· 861 862 { 862 863 struct rt2x00_dev *rt2x00dev = dev_instance; 863 864 u32 reg, mask; 864 - unsigned long flags; 865 865 866 866 /* Read status and ACK all interrupts */ 867 867 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg); ··· 903 905 * Disable all interrupts for which a tasklet was scheduled right now, 904 906 * the tasklet will reenable the appropriate interrupts. 905 907 */ 906 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 908 + spin_lock(&rt2x00dev->irqmask_lock); 907 909 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg); 908 910 reg &= mask; 909 911 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg); 910 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 912 + spin_unlock(&rt2x00dev->irqmask_lock); 911 913 912 914 return IRQ_HANDLED; 913 915 } ··· 977 979 if (!modparam_nohwcrypt) 978 980 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 979 981 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 982 + __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags); 980 983 981 984 /* 982 985 * Set the rssi offset. ··· 1134 1135 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1135 1136 #endif 1136 1137 #ifdef CONFIG_RT2800PCI_RT53XX 1137 - { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1138 + { PCI_DEVICE(0x1814, 0x5390), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1138 1139 #endif 1139 1140 { 0, } 1140 1141 };
+1
drivers/net/wireless/rt2x00/rt2800usb.c
··· 565 565 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 566 566 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 567 567 __set_bit(DRIVER_SUPPORT_WATCHDOG, &rt2x00dev->flags); 568 + __set_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags); 568 569 569 570 /* 570 571 * Set the rssi offset.
+19 -7
drivers/net/wireless/rt2x00/rt2x00.h
··· 467 467 const u8 *address; 468 468 469 469 u32 bssidx; 470 - u32 aid; 471 470 472 471 u8 key[16]; 473 472 u8 tx_mic[8]; ··· 661 662 DRIVER_REQUIRE_L2PAD, 662 663 DRIVER_REQUIRE_TXSTATUS_FIFO, 663 664 DRIVER_REQUIRE_TASKLET_CONTEXT, 665 + DRIVER_REQUIRE_SW_SEQNO, 666 + DRIVER_REQUIRE_HT_TX_DESC, 664 667 665 668 /* 666 669 * Driver features ··· 887 886 struct work_struct txdone_work; 888 887 889 888 /* 890 - * Data queue arrays for RX, TX and Beacon. 891 - * The Beacon array also contains the Atim queue 892 - * if that is supported by the device. 889 + * Data queue arrays for RX, TX, Beacon and ATIM. 893 890 */ 894 891 unsigned int data_queues; 895 892 struct data_queue *rx; 896 893 struct data_queue *tx; 897 894 struct data_queue *bcn; 895 + struct data_queue *atim; 898 896 899 897 /* 900 898 * Firmware image. ··· 1063 1063 void rt2x00queue_unmap_skb(struct queue_entry *entry); 1064 1064 1065 1065 /** 1066 - * rt2x00queue_get_queue - Convert queue index to queue pointer 1066 + * rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer 1067 1067 * @rt2x00dev: Pointer to &struct rt2x00_dev. 1068 1068 * @queue: rt2x00 queue index (see &enum data_queue_qid). 1069 + * 1070 + * Returns NULL for non tx queues. 1069 1071 */ 1070 - struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 1071 - const enum data_queue_qid queue); 1072 + static inline struct data_queue * 1073 + rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev, 1074 + const enum data_queue_qid queue) 1075 + { 1076 + if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) 1077 + return &rt2x00dev->tx[queue]; 1078 + 1079 + if (queue == QID_ATIM) 1080 + return rt2x00dev->atim; 1081 + 1082 + return NULL; 1083 + } 1072 1084 1073 1085 /** 1074 1086 * rt2x00queue_get_entry - Get queue entry where the given index points to.
+10 -18
drivers/net/wireless/rt2x00/rt2x00ht.c
··· 38 38 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 39 39 40 40 if (tx_info->control.sta) 41 - txdesc->mpdu_density = 41 + txdesc->u.ht.mpdu_density = 42 42 tx_info->control.sta->ht_cap.ampdu_density; 43 43 44 - txdesc->ba_size = 7; /* FIXME: What value is needed? */ 44 + txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ 45 45 46 - txdesc->stbc = 46 + txdesc->u.ht.stbc = 47 47 (tx_info->flags & IEEE80211_TX_CTL_STBC) >> IEEE80211_TX_CTL_STBC_SHIFT; 48 48 49 49 /* ··· 51 51 * mcs rate to be used 52 52 */ 53 53 if (txrate->flags & IEEE80211_TX_RC_MCS) { 54 - txdesc->mcs = txrate->idx; 54 + txdesc->u.ht.mcs = txrate->idx; 55 55 56 56 /* 57 57 * MIMO PS should be set to 1 for STA's using dynamic SM PS 58 58 * when using more then one tx stream (>MCS7). 59 59 */ 60 - if (tx_info->control.sta && txdesc->mcs > 7 && 60 + if (tx_info->control.sta && txdesc->u.ht.mcs > 7 && 61 61 ((tx_info->control.sta->ht_cap.cap & 62 62 IEEE80211_HT_CAP_SM_PS) >> 63 63 IEEE80211_HT_CAP_SM_PS_SHIFT) == 64 64 WLAN_HT_CAP_SM_PS_DYNAMIC) 65 65 __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); 66 66 } else { 67 - txdesc->mcs = rt2x00_get_rate_mcs(hwrate->mcs); 67 + txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); 68 68 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 69 - txdesc->mcs |= 0x08; 69 + txdesc->u.ht.mcs |= 0x08; 70 70 } 71 71 72 72 /* ··· 76 76 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && 77 77 !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 78 78 __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); 79 - 80 - /* 81 - * Determine HT Mix/Greenfield rate mode 82 - */ 83 - if (txrate->flags & IEEE80211_TX_RC_MCS) 84 - txdesc->rate_mode = RATE_MODE_HT_MIX; 85 - if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 86 - txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 87 79 88 80 /* 89 81 * Set 40Mhz mode if necessary (for legacy rates this will ··· 97 105 * for frames not transmitted with TXOP_HTTXOP 98 106 */ 99 107 if (ieee80211_is_mgmt(hdr->frame_control)) 100 - txdesc->txop = TXOP_BACKOFF; 108 + txdesc->u.ht.txop = TXOP_BACKOFF; 101 109 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 102 - txdesc->txop = TXOP_SIFS; 110 + txdesc->u.ht.txop = TXOP_SIFS; 103 111 else 104 - txdesc->txop = TXOP_HTTXOP; 112 + txdesc->u.ht.txop = TXOP_HTTXOP; 105 113 } 106 114 107 115 u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev,
+9 -11
drivers/net/wireless/rt2x00/rt2x00mac.c
··· 116 116 goto exit_fail; 117 117 118 118 /* 119 - * Determine which queue to put packet on. 119 + * Use the ATIM queue if appropriate and present. 120 120 */ 121 121 if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && 122 122 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) 123 - queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM); 124 - else 125 - queue = rt2x00queue_get_queue(rt2x00dev, qid); 123 + qid = QID_ATIM; 124 + 125 + queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); 126 126 if (unlikely(!queue)) { 127 127 ERROR(rt2x00dev, 128 128 "Attempt to send packet over invalid queue %d.\n" ··· 149 149 goto exit_fail; 150 150 } 151 151 152 - if (rt2x00queue_write_tx_frame(queue, skb, false)) 152 + if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false))) 153 153 goto exit_fail; 154 154 155 155 if (rt2x00queue_threshold(queue)) ··· 190 190 { 191 191 struct rt2x00_dev *rt2x00dev = hw->priv; 192 192 struct rt2x00_intf *intf = vif_to_intf(vif); 193 - struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON); 193 + struct data_queue *queue = rt2x00dev->bcn; 194 194 struct queue_entry *entry = NULL; 195 195 unsigned int i; 196 196 ··· 518 518 519 519 crypto.cmd = cmd; 520 520 521 - if (sta) { 522 - /* some drivers need the AID */ 523 - crypto.aid = sta->aid; 521 + if (sta) 524 522 crypto.address = sta->addr; 525 - } else 523 + else 526 524 crypto.address = bcast_addr; 527 525 528 526 if (crypto.cipher == CIPHER_TKIP) ··· 690 692 struct rt2x00_dev *rt2x00dev = hw->priv; 691 693 struct data_queue *queue; 692 694 693 - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 695 + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 694 696 if (unlikely(!queue)) 695 697 return -EINVAL; 696 698
+51 -58
drivers/net/wireless/rt2x00/rt2x00queue.c
··· 221 221 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 222 222 unsigned long irqflags; 223 223 224 - if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) || 225 - unlikely(!tx_info->control.vif)) 224 + if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 225 + return; 226 + 227 + __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 228 + 229 + if (!test_bit(DRIVER_REQUIRE_SW_SEQNO, &entry->queue->rt2x00dev->flags)) 226 230 return; 227 231 228 232 /* 229 - * Hardware should insert sequence counter. 230 - * FIXME: We insert a software sequence counter first for 231 - * hardware that doesn't support hardware sequence counting. 233 + * The hardware is not able to insert a sequence number. Assign a 234 + * software generated one here. 232 235 * 233 236 * This is wrong because beacons are not getting sequence 234 237 * numbers assigned properly. ··· 249 246 250 247 spin_unlock_irqrestore(&intf->seqlock, irqflags); 251 248 252 - __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 253 249 } 254 250 255 251 static void rt2x00queue_create_tx_descriptor_plcp(struct queue_entry *entry, ··· 262 260 unsigned int duration; 263 261 unsigned int residual; 264 262 263 + /* 264 + * Determine with what IFS priority this frame should be send. 265 + * Set ifs to IFS_SIFS when the this is not the first fragment, 266 + * or this fragment came after RTS/CTS. 267 + */ 268 + if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 269 + txdesc->u.plcp.ifs = IFS_BACKOFF; 270 + else 271 + txdesc->u.plcp.ifs = IFS_SIFS; 272 + 265 273 /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ 266 274 data_length = entry->skb->len + 4; 267 275 data_length += rt2x00crypto_tx_overhead(rt2x00dev, entry->skb); ··· 280 268 * PLCP setup 281 269 * Length calculation depends on OFDM/CCK rate. 282 270 */ 283 - txdesc->signal = hwrate->plcp; 284 - txdesc->service = 0x04; 271 + txdesc->u.plcp.signal = hwrate->plcp; 272 + txdesc->u.plcp.service = 0x04; 285 273 286 274 if (hwrate->flags & DEV_RATE_OFDM) { 287 - txdesc->length_high = (data_length >> 6) & 0x3f; 288 - txdesc->length_low = data_length & 0x3f; 275 + txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; 276 + txdesc->u.plcp.length_low = data_length & 0x3f; 289 277 } else { 290 278 /* 291 279 * Convert length to microseconds. ··· 300 288 * Check if we need to set the Length Extension 301 289 */ 302 290 if (hwrate->bitrate == 110 && residual <= 30) 303 - txdesc->service |= 0x80; 291 + txdesc->u.plcp.service |= 0x80; 304 292 } 305 293 306 - txdesc->length_high = (duration >> 8) & 0xff; 307 - txdesc->length_low = duration & 0xff; 294 + txdesc->u.plcp.length_high = (duration >> 8) & 0xff; 295 + txdesc->u.plcp.length_low = duration & 0xff; 308 296 309 297 /* 310 298 * When preamble is enabled we should set the 311 299 * preamble bit for the signal. 312 300 */ 313 301 if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 314 - txdesc->signal |= 0x08; 302 + txdesc->u.plcp.signal |= 0x08; 315 303 } 316 304 } 317 305 ··· 321 309 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 322 310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 323 311 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data; 324 - struct ieee80211_rate *rate = 325 - ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 326 - const struct rt2x00_rate *hwrate; 312 + struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 313 + struct ieee80211_rate *rate; 314 + const struct rt2x00_rate *hwrate = NULL; 327 315 328 316 memset(txdesc, 0, sizeof(*txdesc)); 329 317 ··· 383 371 ieee80211_is_probe_resp(hdr->frame_control)) 384 372 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); 385 373 386 - /* 387 - * Determine with what IFS priority this frame should be send. 388 - * Set ifs to IFS_SIFS when the this is not the first fragment, 389 - * or this fragment came after RTS/CTS. 390 - */ 391 374 if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && 392 - !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) { 375 + !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) 393 376 __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); 394 - txdesc->ifs = IFS_BACKOFF; 395 - } else 396 - txdesc->ifs = IFS_SIFS; 397 377 398 378 /* 399 379 * Determine rate modulation. 400 380 */ 401 - hwrate = rt2x00_get_rate(rate->hw_value); 402 - txdesc->rate_mode = RATE_MODE_CCK; 403 - if (hwrate->flags & DEV_RATE_OFDM) 404 - txdesc->rate_mode = RATE_MODE_OFDM; 381 + if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) 382 + txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; 383 + else if (txrate->flags & IEEE80211_TX_RC_MCS) 384 + txdesc->rate_mode = RATE_MODE_HT_MIX; 385 + else { 386 + rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); 387 + hwrate = rt2x00_get_rate(rate->hw_value); 388 + if (hwrate->flags & DEV_RATE_OFDM) 389 + txdesc->rate_mode = RATE_MODE_OFDM; 390 + else 391 + txdesc->rate_mode = RATE_MODE_CCK; 392 + } 405 393 406 394 /* 407 395 * Apply TX descriptor handling by components 408 396 */ 409 397 rt2x00crypto_create_tx_descriptor(entry, txdesc); 410 - rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 411 398 rt2x00queue_create_tx_descriptor_seq(entry, txdesc); 412 - rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 399 + 400 + if (test_bit(DRIVER_REQUIRE_HT_TX_DESC, &rt2x00dev->flags)) 401 + rt2x00ht_create_tx_descriptor(entry, txdesc, hwrate); 402 + else 403 + rt2x00queue_create_tx_descriptor_plcp(entry, txdesc, hwrate); 413 404 } 414 405 415 406 static int rt2x00queue_write_tx_data(struct queue_entry *entry, ··· 704 689 } 705 690 } 706 691 EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); 707 - 708 - struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev, 709 - const enum data_queue_qid queue) 710 - { 711 - int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags); 712 - 713 - if (queue == QID_RX) 714 - return rt2x00dev->rx; 715 - 716 - if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) 717 - return &rt2x00dev->tx[queue]; 718 - 719 - if (!rt2x00dev->bcn) 720 - return NULL; 721 - 722 - if (queue == QID_BEACON) 723 - return &rt2x00dev->bcn[0]; 724 - else if (queue == QID_ATIM && atim) 725 - return &rt2x00dev->bcn[1]; 726 - 727 - return NULL; 728 - } 729 - EXPORT_SYMBOL_GPL(rt2x00queue_get_queue); 730 692 731 693 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 732 694 enum queue_index index) ··· 1080 1088 goto exit; 1081 1089 1082 1090 if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) { 1083 - status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1], 1091 + status = rt2x00queue_alloc_entries(rt2x00dev->atim, 1084 1092 rt2x00dev->ops->atim); 1085 1093 if (status) 1086 1094 goto exit; ··· 1154 1162 rt2x00dev->rx = queue; 1155 1163 rt2x00dev->tx = &queue[1]; 1156 1164 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; 1165 + rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; 1157 1166 1158 1167 /* 1159 1168 * Initialize queue parameters. ··· 1171 1178 tx_queue_for_each(rt2x00dev, queue) 1172 1179 rt2x00queue_init(rt2x00dev, queue, qid++); 1173 1180 1174 - rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON); 1181 + rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); 1175 1182 if (req_atim) 1176 - rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM); 1183 + rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); 1177 1184 1178 1185 return 0; 1179 1186 }
+18 -11
drivers/net/wireless/rt2x00/rt2x00queue.h
··· 305 305 u16 length; 306 306 u16 header_length; 307 307 308 - u16 length_high; 309 - u16 length_low; 310 - u16 signal; 311 - u16 service; 308 + union { 309 + struct { 310 + u16 length_high; 311 + u16 length_low; 312 + u16 signal; 313 + u16 service; 314 + enum ifs ifs; 315 + } plcp; 312 316 313 - u16 mcs; 314 - u16 stbc; 315 - u16 ba_size; 316 - u16 rate_mode; 317 - u16 mpdu_density; 317 + struct { 318 + u16 mcs; 319 + u8 stbc; 320 + u8 ba_size; 321 + u8 mpdu_density; 322 + enum txop txop; 323 + } ht; 324 + } u; 325 + 326 + enum rate_modulation rate_mode; 318 327 319 328 short retry_limit; 320 - short ifs; 321 - short txop; 322 329 323 330 enum cipher cipher; 324 331 u16 key_idx;
+15 -16
drivers/net/wireless/rt2x00/rt61pci.c
··· 1898 1898 rt2x00_desc_write(txd, 1, word); 1899 1899 1900 1900 rt2x00_desc_read(txd, 2, &word); 1901 - rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); 1902 - rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); 1903 - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); 1904 - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1901 + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); 1902 + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); 1903 + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, 1904 + txdesc->u.plcp.length_low); 1905 + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, 1906 + txdesc->u.plcp.length_high); 1905 1907 rt2x00_desc_write(txd, 2, word); 1906 1908 1907 1909 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { ··· 1948 1946 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1949 1947 rt2x00_set_field32(&word, TXD_W0_OFDM, 1950 1948 (txdesc->rate_mode == RATE_MODE_OFDM)); 1951 - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1949 + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); 1952 1950 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1953 1951 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1954 1952 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, ··· 2192 2190 * queue identication number. 2193 2191 */ 2194 2192 type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); 2195 - queue = rt2x00queue_get_queue(rt2x00dev, type); 2193 + queue = rt2x00queue_get_tx_queue(rt2x00dev, type); 2196 2194 if (unlikely(!queue)) 2197 2195 continue; 2198 2196 ··· 2263 2261 static void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, 2264 2262 struct rt2x00_field32 irq_field) 2265 2263 { 2266 - unsigned long flags; 2267 2264 u32 reg; 2268 2265 2269 2266 /* 2270 2267 * Enable a single interrupt. The interrupt mask register 2271 2268 * access needs locking. 2272 2269 */ 2273 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 2270 + spin_lock_irq(&rt2x00dev->irqmask_lock); 2274 2271 2275 2272 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 2276 2273 rt2x00_set_field32(&reg, irq_field, 0); 2277 2274 rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg); 2278 2275 2279 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 2276 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 2280 2277 } 2281 2278 2282 2279 static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev, 2283 2280 struct rt2x00_field32 irq_field) 2284 2281 { 2285 - unsigned long flags; 2286 2282 u32 reg; 2287 2283 2288 2284 /* 2289 2285 * Enable a single MCU interrupt. The interrupt mask register 2290 2286 * access needs locking. 2291 2287 */ 2292 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 2288 + spin_lock_irq(&rt2x00dev->irqmask_lock); 2293 2289 2294 2290 rt2x00pci_register_read(rt2x00dev, MCU_INT_MASK_CSR, &reg); 2295 2291 rt2x00_set_field32(&reg, irq_field, 0); 2296 2292 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 2297 2293 2298 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 2294 + spin_unlock_irq(&rt2x00dev->irqmask_lock); 2299 2295 } 2300 2296 2301 2297 static void rt61pci_txstatus_tasklet(unsigned long data) ··· 2331 2331 struct rt2x00_dev *rt2x00dev = dev_instance; 2332 2332 u32 reg_mcu, mask_mcu; 2333 2333 u32 reg, mask; 2334 - unsigned long flags; 2335 2334 2336 2335 /* 2337 2336 * Get the interrupt sources & saved to local variable. ··· 2375 2376 * Disable all interrupts for which a tasklet was scheduled right now, 2376 2377 * the tasklet will reenable the appropriate interrupts. 2377 2378 */ 2378 - spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); 2379 + spin_lock(&rt2x00dev->irqmask_lock); 2379 2380 2380 2381 rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg); 2381 2382 reg |= mask; ··· 2385 2386 reg |= mask_mcu; 2386 2387 rt2x00pci_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); 2387 2388 2388 - spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); 2389 + spin_unlock(&rt2x00dev->irqmask_lock); 2389 2390 2390 2391 return IRQ_HANDLED; 2391 2392 } ··· 2916 2917 if (queue_idx >= 4) 2917 2918 return 0; 2918 2919 2919 - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 2920 + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 2920 2921 2921 2922 /* Update WMM TXOP register */ 2922 2923 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
+8 -6
drivers/net/wireless/rt2x00/rt73usb.c
··· 1474 1474 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); 1475 1475 rt2x00_set_field32(&word, TXD_W0_OFDM, 1476 1476 (txdesc->rate_mode == RATE_MODE_OFDM)); 1477 - rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1477 + rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); 1478 1478 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1479 1479 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1480 1480 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, ··· 1499 1499 rt2x00_desc_write(txd, 1, word); 1500 1500 1501 1501 rt2x00_desc_read(txd, 2, &word); 1502 - rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal); 1503 - rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service); 1504 - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low); 1505 - rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high); 1502 + rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); 1503 + rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); 1504 + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, 1505 + txdesc->u.plcp.length_low); 1506 + rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, 1507 + txdesc->u.plcp.length_high); 1506 1508 rt2x00_desc_write(txd, 2, word); 1507 1509 1508 1510 if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { ··· 2249 2247 if (queue_idx >= 4) 2250 2248 return 0; 2251 2249 2252 - queue = rt2x00queue_get_queue(rt2x00dev, queue_idx); 2250 + queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); 2253 2251 2254 2252 /* Update WMM TXOP register */ 2255 2253 offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2)));
+20 -5
drivers/net/wireless/rtl818x/rtl8187/dev.c
··· 869 869 /* The RTL8187 returns the retry count through register 0xFFFA. In 870 870 * addition, it appears to be a cumulative retry count, not the 871 871 * value for the current TX packet. When multiple TX entries are 872 - * queued, the retry count will be valid for the last one in the queue. 873 - * The "error" should not matter for purposes of rate setting. */ 872 + * waiting in the queue, the retry count will be the total for all. 873 + * The "error" may matter for purposes of rate setting, but there is 874 + * no other choice with this hardware. 875 + */ 874 876 struct rtl8187_priv *priv = container_of(work, struct rtl8187_priv, 875 877 work.work); 876 878 struct ieee80211_tx_info *info; 877 879 struct ieee80211_hw *dev = priv->dev; 878 880 static u16 retry; 879 881 u16 tmp; 882 + u16 avg_retry; 883 + int length; 880 884 881 885 mutex_lock(&priv->conf_mutex); 882 886 tmp = rtl818x_ioread16(priv, (__le16 *)0xFFFA); 887 + length = skb_queue_len(&priv->b_tx_status.queue); 888 + if (unlikely(!length)) 889 + length = 1; 890 + if (unlikely(tmp < retry)) 891 + tmp = retry; 892 + avg_retry = (tmp - retry) / length; 883 893 while (skb_queue_len(&priv->b_tx_status.queue) > 0) { 884 894 struct sk_buff *old_skb; 885 895 886 896 old_skb = skb_dequeue(&priv->b_tx_status.queue); 887 897 info = IEEE80211_SKB_CB(old_skb); 888 - info->status.rates[0].count = tmp - retry + 1; 898 + info->status.rates[0].count = avg_retry + 1; 899 + if (info->status.rates[0].count > RETRY_COUNT) 900 + info->flags &= ~IEEE80211_TX_STAT_ACK; 889 901 ieee80211_tx_status_irqsafe(dev, old_skb); 890 902 } 891 903 retry = tmp; ··· 943 931 rtl818x_iowrite32(priv, &priv->map->TX_CONF, 944 932 RTL818X_TX_CONF_HW_SEQNUM | 945 933 RTL818X_TX_CONF_DISREQQSIZE | 946 - (7 << 8 /* short retry limit */) | 947 - (7 << 0 /* long retry limit */) | 934 + (RETRY_COUNT << 8 /* short retry limit */) | 935 + (RETRY_COUNT << 0 /* long retry limit */) | 948 936 (7 << 21 /* MAX TX DMA */)); 949 937 rtl8187_init_urbs(dev); 950 938 rtl8187b_init_status_urb(dev); ··· 1388 1376 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 1389 1377 IEEE80211_HW_SIGNAL_DBM | 1390 1378 IEEE80211_HW_RX_INCLUDES_FCS; 1379 + /* Initialize rate-control variables */ 1380 + dev->max_rates = 1; 1381 + dev->max_rate_tries = RETRY_COUNT; 1391 1382 1392 1383 eeprom.data = dev; 1393 1384 eeprom.register_read = rtl8187_eeprom_register_read;
+2
drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
··· 35 35 #define RFKILL_MASK_8187_89_97 0x2 36 36 #define RFKILL_MASK_8198 0x4 37 37 38 + #define RETRY_COUNT 7 39 + 38 40 struct rtl8187_rx_info { 39 41 struct urb *urb; 40 42 struct ieee80211_hw *dev;
+6 -3
drivers/net/wireless/rtlwifi/Makefile
··· 7 7 efuse.o \ 8 8 ps.o \ 9 9 rc.o \ 10 - regd.o \ 11 - usb.o 10 + regd.o 12 11 13 12 rtl8192c_common-objs += \ 14 13 15 - ifeq ($(CONFIG_PCI),y) 14 + ifneq ($(CONFIG_PCI),) 16 15 rtlwifi-objs += pci.o 16 + endif 17 + 18 + ifneq ($(CONFIG_USB),) 19 + rtlwifi-objs += usb.o 17 20 endif 18 21 19 22 obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
+1 -2
drivers/net/wireless/wl1251/wl12xx_80211.h
··· 54 54 55 55 /* This really should be 8, but not for our firmware */ 56 56 #define MAX_SUPPORTED_RATES 32 57 - #define COUNTRY_STRING_LEN 3 58 57 #define MAX_COUNTRY_TRIPLETS 32 59 58 60 59 /* Headers */ ··· 97 98 98 99 struct wl12xx_ie_country { 99 100 struct wl12xx_ie_header header; 100 - u8 country_string[COUNTRY_STRING_LEN]; 101 + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; 101 102 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; 102 103 } __packed; 103 104
+2 -1
drivers/net/wireless/wl12xx/acx.c
··· 1361 1361 acx->ht_protection = 1362 1362 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); 1363 1363 acx->rifs_mode = 0; 1364 - acx->gf_protection = 0; 1364 + acx->gf_protection = 1365 + !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 1365 1366 acx->ht_tx_burst_limit = 0; 1366 1367 acx->dual_cts_protection = 0; 1367 1368
+3
drivers/net/wireless/wl12xx/boot.c
··· 488 488 fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET; 489 489 490 490 wl->hw_pg_ver = (s8)fuse; 491 + 492 + if (((wl->hw_pg_ver & PG_MAJOR_VER_MASK) >> PG_MAJOR_VER_OFFSET) < 3) 493 + wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION; 491 494 } 492 495 493 496 /* uploads NVS and firmware */
+5
drivers/net/wireless/wl12xx/boot.h
··· 59 59 #define PG_VER_MASK 0x3c 60 60 #define PG_VER_OFFSET 2 61 61 62 + #define PG_MAJOR_VER_MASK 0x3 63 + #define PG_MAJOR_VER_OFFSET 0x0 64 + #define PG_MINOR_VER_MASK 0xc 65 + #define PG_MINOR_VER_OFFSET 0x2 66 + 62 67 #define CMD_MBOX_ADDRESS 0x407B4 63 68 64 69 #define POLARITY_LOW BIT(1)
+1
drivers/net/wireless/wl12xx/cmd.c
··· 63 63 cmd->status = 0; 64 64 65 65 WARN_ON(len % 4 != 0); 66 + WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags)); 66 67 67 68 wl1271_write(wl, wl->cmd_box_addr, buf, len, false); 68 69
+1 -1
drivers/net/wireless/wl12xx/debugfs.c
··· 99 99 100 100 mutex_lock(&wl->mutex); 101 101 102 - ret = wl1271_ps_elp_wakeup(wl, false); 102 + ret = wl1271_ps_elp_wakeup(wl); 103 103 if (ret < 0) 104 104 goto out; 105 105
+1
drivers/net/wireless/wl12xx/io.h
··· 168 168 int wl1271_init_ieee80211(struct wl1271 *wl); 169 169 struct ieee80211_hw *wl1271_alloc_hw(void); 170 170 int wl1271_free_hw(struct wl1271 *wl); 171 + irqreturn_t wl1271_irq(int irq, void *data); 171 172 172 173 #endif
+118 -64
drivers/net/wireless/wl12xx/main.c
··· 304 304 .rx_block_num = 70, 305 305 .tx_min_block_num = 40, 306 306 .dynamic_memory = 0, 307 - .min_req_tx_blocks = 104, 307 + .min_req_tx_blocks = 100, 308 308 .min_req_rx_blocks = 22, 309 309 .tx_min = 27, 310 310 } ··· 374 374 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 375 375 goto out; 376 376 377 - ret = wl1271_ps_elp_wakeup(wl, false); 377 + ret = wl1271_ps_elp_wakeup(wl); 378 378 if (ret < 0) 379 379 goto out; 380 380 ··· 635 635 (s64)le32_to_cpu(status->fw_localtime); 636 636 } 637 637 638 - #define WL1271_IRQ_MAX_LOOPS 10 638 + static void wl1271_flush_deferred_work(struct wl1271 *wl) 639 + { 640 + struct sk_buff *skb; 639 641 640 - static void wl1271_irq_work(struct work_struct *work) 642 + /* Pass all received frames to the network stack */ 643 + while ((skb = skb_dequeue(&wl->deferred_rx_queue))) 644 + ieee80211_rx_ni(wl->hw, skb); 645 + 646 + /* Return sent skbs to the network stack */ 647 + while ((skb = skb_dequeue(&wl->deferred_tx_queue))) 648 + ieee80211_tx_status(wl->hw, skb); 649 + } 650 + 651 + static void wl1271_netstack_work(struct work_struct *work) 652 + { 653 + struct wl1271 *wl = 654 + container_of(work, struct wl1271, netstack_work); 655 + 656 + do { 657 + wl1271_flush_deferred_work(wl); 658 + } while (skb_queue_len(&wl->deferred_rx_queue)); 659 + } 660 + 661 + #define WL1271_IRQ_MAX_LOOPS 256 662 + 663 + irqreturn_t wl1271_irq(int irq, void *cookie) 641 664 { 642 665 int ret; 643 666 u32 intr; 644 667 int loopcount = WL1271_IRQ_MAX_LOOPS; 668 + struct wl1271 *wl = (struct wl1271 *)cookie; 669 + bool done = false; 670 + unsigned int defer_count; 645 671 unsigned long flags; 646 - struct wl1271 *wl = 647 - container_of(work, struct wl1271, irq_work); 672 + 673 + /* TX might be handled here, avoid redundant work */ 674 + set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 675 + cancel_work_sync(&wl->tx_work); 648 676 649 677 mutex_lock(&wl->mutex); 650 678 ··· 681 653 if (unlikely(wl->state == WL1271_STATE_OFF)) 682 654 goto out; 683 655 684 - ret = wl1271_ps_elp_wakeup(wl, true); 656 + ret = wl1271_ps_elp_wakeup(wl); 685 657 if (ret < 0) 686 658 goto out; 687 659 688 - spin_lock_irqsave(&wl->wl_lock, flags); 689 - while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) { 690 - clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); 691 - spin_unlock_irqrestore(&wl->wl_lock, flags); 692 - loopcount--; 660 + while (!done && loopcount--) { 661 + /* 662 + * In order to avoid a race with the hardirq, clear the flag 663 + * before acknowledging the chip. Since the mutex is held, 664 + * wl1271_ps_elp_wakeup cannot be called concurrently. 665 + */ 666 + clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 667 + smp_mb__after_clear_bit(); 693 668 694 669 wl1271_fw_status(wl, wl->fw_status); 695 670 intr = le32_to_cpu(wl->fw_status->common.intr); 671 + intr &= WL1271_INTR_MASK; 696 672 if (!intr) { 697 - wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 698 - spin_lock_irqsave(&wl->wl_lock, flags); 673 + done = true; 699 674 continue; 700 675 } 701 - 702 - intr &= WL1271_INTR_MASK; 703 676 704 677 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { 705 678 wl1271_error("watchdog interrupt received! " ··· 711 682 goto out; 712 683 } 713 684 714 - if (intr & WL1271_ACX_INTR_DATA) { 685 + if (likely(intr & WL1271_ACX_INTR_DATA)) { 715 686 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 687 + 688 + wl1271_rx(wl, &wl->fw_status->common); 689 + 690 + /* Check if any tx blocks were freed */ 691 + spin_lock_irqsave(&wl->wl_lock, flags); 692 + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 693 + wl->tx_queue_count) { 694 + spin_unlock_irqrestore(&wl->wl_lock, flags); 695 + /* 696 + * In order to avoid starvation of the TX path, 697 + * call the work function directly. 698 + */ 699 + wl1271_tx_work_locked(wl); 700 + } else { 701 + spin_unlock_irqrestore(&wl->wl_lock, flags); 702 + } 716 703 717 704 /* check for tx results */ 718 705 if (wl->fw_status->common.tx_results_counter != 719 706 (wl->tx_results_count & 0xff)) 720 707 wl1271_tx_complete(wl); 721 708 722 - /* Check if any tx blocks were freed */ 723 - if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 724 - wl->tx_queue_count) { 725 - /* 726 - * In order to avoid starvation of the TX path, 727 - * call the work function directly. 728 - */ 729 - wl1271_tx_work_locked(wl); 730 - } 731 - 732 - wl1271_rx(wl, &wl->fw_status->common); 709 + /* Make sure the deferred queues don't get too long */ 710 + defer_count = skb_queue_len(&wl->deferred_tx_queue) + 711 + skb_queue_len(&wl->deferred_rx_queue); 712 + if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) 713 + wl1271_flush_deferred_work(wl); 733 714 } 734 715 735 716 if (intr & WL1271_ACX_INTR_EVENT_A) { ··· 758 719 759 720 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 760 721 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 761 - 762 - spin_lock_irqsave(&wl->wl_lock, flags); 763 722 } 764 - 765 - if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags)) 766 - ieee80211_queue_work(wl->hw, &wl->irq_work); 767 - else 768 - clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 769 - spin_unlock_irqrestore(&wl->wl_lock, flags); 770 723 771 724 wl1271_ps_elp_sleep(wl); 772 725 773 726 out: 727 + spin_lock_irqsave(&wl->wl_lock, flags); 728 + /* In case TX was not handled here, queue TX work */ 729 + clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); 730 + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 731 + wl->tx_queue_count) 732 + ieee80211_queue_work(wl->hw, &wl->tx_work); 733 + spin_unlock_irqrestore(&wl->wl_lock, flags); 734 + 774 735 mutex_unlock(&wl->mutex); 736 + 737 + return IRQ_HANDLED; 775 738 } 739 + EXPORT_SYMBOL_GPL(wl1271_irq); 776 740 777 741 static int wl1271_fetch_firmware(struct wl1271 *wl) 778 742 { ··· 1016 974 goto out; 1017 975 1018 976 irq_disable: 1019 - wl1271_disable_interrupts(wl); 1020 977 mutex_unlock(&wl->mutex); 1021 978 /* Unlocking the mutex in the middle of handling is 1022 979 inherently unsafe. In this case we deem it safe to do, ··· 1024 983 work function will not do anything.) Also, any other 1025 984 possible concurrent operations will fail due to the 1026 985 current state, hence the wl1271 struct should be safe. */ 1027 - cancel_work_sync(&wl->irq_work); 986 + wl1271_disable_interrupts(wl); 987 + wl1271_flush_deferred_work(wl); 988 + cancel_work_sync(&wl->netstack_work); 1028 989 mutex_lock(&wl->mutex); 1029 990 power_off: 1030 991 wl1271_power_off(wl); ··· 1053 1010 goto out; 1054 1011 } 1055 1012 1056 - wl1271_disable_interrupts(wl); 1057 1013 wl1271_power_off(wl); 1058 1014 1059 1015 wl->state = WL1271_STATE_OFF; 1060 1016 wl->rx_counter = 0; 1061 1017 1062 1018 mutex_unlock(&wl->mutex); 1063 - cancel_work_sync(&wl->irq_work); 1019 + wl1271_disable_interrupts(wl); 1020 + wl1271_flush_deferred_work(wl); 1021 + cancel_work_sync(&wl->netstack_work); 1064 1022 cancel_work_sync(&wl->recovery_work); 1065 1023 mutex_lock(&wl->mutex); 1066 1024 out: ··· 1085 1041 int q; 1086 1042 u8 hlid = 0; 1087 1043 1044 + q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 1045 + 1046 + if (wl->bss_type == BSS_TYPE_AP_BSS) 1047 + hlid = wl1271_tx_get_hlid(skb); 1048 + 1088 1049 spin_lock_irqsave(&wl->wl_lock, flags); 1050 + 1089 1051 wl->tx_queue_count++; 1090 1052 1091 1053 /* ··· 1104 1054 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); 1105 1055 } 1106 1056 1107 - spin_unlock_irqrestore(&wl->wl_lock, flags); 1108 - 1109 1057 /* queue the packet */ 1110 - q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 1111 1058 if (wl->bss_type == BSS_TYPE_AP_BSS) { 1112 - hlid = wl1271_tx_get_hlid(skb); 1113 1059 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); 1114 1060 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); 1115 1061 } else { ··· 1117 1071 * before that, the tx_work will not be initialized! 1118 1072 */ 1119 1073 1120 - if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) 1074 + if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && 1075 + !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) 1121 1076 ieee80211_queue_work(wl->hw, &wl->tx_work); 1077 + 1078 + spin_unlock_irqrestore(&wl->wl_lock, flags); 1122 1079 } 1123 1080 1124 1081 static struct notifier_block wl1271_dev_notifier = { ··· 1218 1169 break; 1219 1170 1220 1171 irq_disable: 1221 - wl1271_disable_interrupts(wl); 1222 1172 mutex_unlock(&wl->mutex); 1223 1173 /* Unlocking the mutex in the middle of handling is 1224 1174 inherently unsafe. In this case we deem it safe to do, ··· 1226 1178 work function will not do anything.) Also, any other 1227 1179 possible concurrent operations will fail due to the 1228 1180 current state, hence the wl1271 struct should be safe. */ 1229 - cancel_work_sync(&wl->irq_work); 1181 + wl1271_disable_interrupts(wl); 1182 + wl1271_flush_deferred_work(wl); 1183 + cancel_work_sync(&wl->netstack_work); 1230 1184 mutex_lock(&wl->mutex); 1231 1185 power_off: 1232 1186 wl1271_power_off(wl); ··· 1294 1244 1295 1245 wl->state = WL1271_STATE_OFF; 1296 1246 1297 - wl1271_disable_interrupts(wl); 1298 - 1299 1247 mutex_unlock(&wl->mutex); 1300 1248 1249 + wl1271_disable_interrupts(wl); 1250 + wl1271_flush_deferred_work(wl); 1301 1251 cancel_delayed_work_sync(&wl->scan_complete_work); 1302 - cancel_work_sync(&wl->irq_work); 1252 + cancel_work_sync(&wl->netstack_work); 1303 1253 cancel_work_sync(&wl->tx_work); 1304 1254 cancel_delayed_work_sync(&wl->pspoll_work); 1305 1255 cancel_delayed_work_sync(&wl->elp_work); ··· 1575 1525 1576 1526 is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); 1577 1527 1578 - ret = wl1271_ps_elp_wakeup(wl, false); 1528 + ret = wl1271_ps_elp_wakeup(wl); 1579 1529 if (ret < 0) 1580 1530 goto out; 1581 1531 ··· 1731 1681 if (unlikely(wl->state == WL1271_STATE_OFF)) 1732 1682 goto out; 1733 1683 1734 - ret = wl1271_ps_elp_wakeup(wl, false); 1684 + ret = wl1271_ps_elp_wakeup(wl); 1735 1685 if (ret < 0) 1736 1686 goto out; 1737 1687 ··· 1960 1910 goto out_unlock; 1961 1911 } 1962 1912 1963 - ret = wl1271_ps_elp_wakeup(wl, false); 1913 + ret = wl1271_ps_elp_wakeup(wl); 1964 1914 if (ret < 0) 1965 1915 goto out_unlock; 1966 1916 ··· 2063 2013 goto out; 2064 2014 } 2065 2015 2066 - ret = wl1271_ps_elp_wakeup(wl, false); 2016 + ret = wl1271_ps_elp_wakeup(wl); 2067 2017 if (ret < 0) 2068 2018 goto out; 2069 2019 ··· 2089 2039 goto out; 2090 2040 } 2091 2041 2092 - ret = wl1271_ps_elp_wakeup(wl, false); 2042 + ret = wl1271_ps_elp_wakeup(wl); 2093 2043 if (ret < 0) 2094 2044 goto out; 2095 2045 ··· 2117 2067 goto out; 2118 2068 } 2119 2069 2120 - ret = wl1271_ps_elp_wakeup(wl, false); 2070 + ret = wl1271_ps_elp_wakeup(wl); 2121 2071 if (ret < 0) 2122 2072 goto out; 2123 2073 ··· 2596 2546 if (unlikely(wl->state == WL1271_STATE_OFF)) 2597 2547 goto out; 2598 2548 2599 - ret = wl1271_ps_elp_wakeup(wl, false); 2549 + ret = wl1271_ps_elp_wakeup(wl); 2600 2550 if (ret < 0) 2601 2551 goto out; 2602 2552 ··· 2651 2601 conf_tid->apsd_conf[0] = 0; 2652 2602 conf_tid->apsd_conf[1] = 0; 2653 2603 } else { 2654 - ret = wl1271_ps_elp_wakeup(wl, false); 2604 + ret = wl1271_ps_elp_wakeup(wl); 2655 2605 if (ret < 0) 2656 2606 goto out; 2657 2607 ··· 2697 2647 if (unlikely(wl->state == WL1271_STATE_OFF)) 2698 2648 goto out; 2699 2649 2700 - ret = wl1271_ps_elp_wakeup(wl, false); 2650 + ret = wl1271_ps_elp_wakeup(wl); 2701 2651 if (ret < 0) 2702 2652 goto out; 2703 2653 ··· 2786 2736 if (ret < 0) 2787 2737 goto out; 2788 2738 2789 - ret = wl1271_ps_elp_wakeup(wl, false); 2739 + ret = wl1271_ps_elp_wakeup(wl); 2790 2740 if (ret < 0) 2791 2741 goto out_free_sta; 2792 2742 ··· 2829 2779 if (WARN_ON(!test_bit(id, wl->ap_hlid_map))) 2830 2780 goto out; 2831 2781 2832 - ret = wl1271_ps_elp_wakeup(wl, false); 2782 + ret = wl1271_ps_elp_wakeup(wl); 2833 2783 if (ret < 0) 2834 2784 goto out; 2835 2785 ··· 2862 2812 goto out; 2863 2813 } 2864 2814 2865 - ret = wl1271_ps_elp_wakeup(wl, false); 2815 + ret = wl1271_ps_elp_wakeup(wl); 2866 2816 if (ret < 0) 2867 2817 goto out; 2868 2818 ··· 3226 3176 if (wl->state == WL1271_STATE_OFF) 3227 3177 goto out; 3228 3178 3229 - ret = wl1271_ps_elp_wakeup(wl, false); 3179 + ret = wl1271_ps_elp_wakeup(wl); 3230 3180 if (ret < 0) 3231 3181 goto out; 3232 3182 ··· 3426 3376 for (j = 0; j < AP_MAX_LINKS; j++) 3427 3377 skb_queue_head_init(&wl->links[j].tx_queue[i]); 3428 3378 3379 + skb_queue_head_init(&wl->deferred_rx_queue); 3380 + skb_queue_head_init(&wl->deferred_tx_queue); 3381 + 3429 3382 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 3430 3383 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); 3431 - INIT_WORK(&wl->irq_work, wl1271_irq_work); 3384 + INIT_WORK(&wl->netstack_work, wl1271_netstack_work); 3432 3385 INIT_WORK(&wl->tx_work, wl1271_tx_work); 3433 3386 INIT_WORK(&wl->recovery_work, wl1271_recovery_work); 3434 3387 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work); ··· 3457 3404 wl->last_tx_hlid = 0; 3458 3405 wl->ap_ps_map = 0; 3459 3406 wl->ap_fw_ps_map = 0; 3407 + wl->quirks = 0; 3460 3408 3461 3409 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 3462 3410 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
+3 -3
drivers/net/wireless/wl12xx/ps.c
··· 69 69 } 70 70 } 71 71 72 - int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake) 72 + int wl1271_ps_elp_wakeup(struct wl1271 *wl) 73 73 { 74 74 DECLARE_COMPLETION_ONSTACK(compl); 75 75 unsigned long flags; ··· 87 87 * the completion variable in one entity. 88 88 */ 89 89 spin_lock_irqsave(&wl->wl_lock, flags); 90 - if (work_pending(&wl->irq_work) || chip_awake) 90 + if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) 91 91 pending = true; 92 92 else 93 93 wl->elp_compl = &compl; ··· 149 149 case STATION_ACTIVE_MODE: 150 150 default: 151 151 wl1271_debug(DEBUG_PSM, "leaving psm"); 152 - ret = wl1271_ps_elp_wakeup(wl, false); 152 + ret = wl1271_ps_elp_wakeup(wl); 153 153 if (ret < 0) 154 154 return ret; 155 155
+1 -1
drivers/net/wireless/wl12xx/ps.h
··· 30 30 int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 31 31 u32 rates, bool send); 32 32 void wl1271_ps_elp_sleep(struct wl1271 *wl); 33 - int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 33 + int wl1271_ps_elp_wakeup(struct wl1271 *wl); 34 34 void wl1271_elp_work(struct work_struct *work); 35 35 void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues); 36 36 void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+9 -2
drivers/net/wireless/wl12xx/rx.c
··· 129 129 130 130 skb_trim(skb, skb->len - desc->pad_len); 131 131 132 - ieee80211_rx_ni(wl->hw, skb); 132 + skb_queue_tail(&wl->deferred_rx_queue, skb); 133 + ieee80211_queue_work(wl->hw, &wl->netstack_work); 133 134 134 135 return 0; 135 136 } ··· 199 198 pkt_offset += pkt_length; 200 199 } 201 200 } 202 - wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 201 + 202 + /* 203 + * Write the driver's packet counter to the FW. This is only required 204 + * for older hardware revisions 205 + */ 206 + if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 207 + wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 203 208 } 204 209 205 210 void wl1271_set_default_filters(struct wl1271 *wl)
+14 -6
drivers/net/wireless/wl12xx/scan.c
··· 27 27 #include "cmd.h" 28 28 #include "scan.h" 29 29 #include "acx.h" 30 + #include "ps.h" 30 31 31 32 void wl1271_scan_complete_work(struct work_struct *work) 32 33 { ··· 41 40 42 41 mutex_lock(&wl->mutex); 43 42 44 - if (wl->scan.state == WL1271_SCAN_STATE_IDLE) { 45 - mutex_unlock(&wl->mutex); 46 - return; 47 - } 43 + if (wl->state == WL1271_STATE_OFF) 44 + goto out; 45 + 46 + if (wl->scan.state == WL1271_SCAN_STATE_IDLE) 47 + goto out; 48 48 49 49 wl->scan.state = WL1271_SCAN_STATE_IDLE; 50 50 kfree(wl->scan.scanned_ch); ··· 54 52 ieee80211_scan_completed(wl->hw, false); 55 53 56 54 /* restore hardware connection monitoring template */ 57 - if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) 58 - wl1271_cmd_build_ap_probe_req(wl, wl->probereq); 55 + if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 56 + if (wl1271_ps_elp_wakeup(wl) == 0) { 57 + wl1271_cmd_build_ap_probe_req(wl, wl->probereq); 58 + wl1271_ps_elp_sleep(wl); 59 + } 60 + } 59 61 60 62 if (wl->scan.failed) { 61 63 wl1271_info("Scan completed due to error."); 62 64 ieee80211_queue_work(wl->hw, &wl->recovery_work); 63 65 } 66 + 67 + out: 64 68 mutex_unlock(&wl->mutex); 65 69 66 70 }
+20 -22
drivers/net/wireless/wl12xx/sdio.c
··· 28 28 #include <linux/mmc/sdio_func.h> 29 29 #include <linux/mmc/sdio_ids.h> 30 30 #include <linux/mmc/card.h> 31 + #include <linux/mmc/host.h> 31 32 #include <linux/gpio.h> 32 33 #include <linux/wl12xx.h> 33 34 #include <linux/pm_runtime.h> ··· 61 60 return &(wl_to_func(wl)->dev); 62 61 } 63 62 64 - static irqreturn_t wl1271_irq(int irq, void *cookie) 63 + static irqreturn_t wl1271_hardirq(int irq, void *cookie) 65 64 { 66 65 struct wl1271 *wl = cookie; 67 66 unsigned long flags; ··· 70 69 71 70 /* complete the ELP completion */ 72 71 spin_lock_irqsave(&wl->wl_lock, flags); 72 + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 73 73 if (wl->elp_compl) { 74 74 complete(wl->elp_compl); 75 75 wl->elp_compl = NULL; 76 76 } 77 - 78 - if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) 79 - ieee80211_queue_work(wl->hw, &wl->irq_work); 80 - set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); 81 77 spin_unlock_irqrestore(&wl->wl_lock, flags); 82 78 83 - return IRQ_HANDLED; 79 + return IRQ_WAKE_THREAD; 84 80 } 85 81 86 82 static void wl1271_sdio_disable_interrupts(struct wl1271 *wl) ··· 104 106 int ret; 105 107 struct sdio_func *func = wl_to_func(wl); 106 108 107 - sdio_claim_host(func); 108 - 109 109 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 110 110 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 111 111 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x", ··· 119 123 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len); 120 124 } 121 125 122 - sdio_release_host(func); 123 - 124 126 if (ret) 125 127 wl1271_error("sdio read failed (%d)", ret); 126 128 } ··· 128 134 { 129 135 int ret; 130 136 struct sdio_func *func = wl_to_func(wl); 131 - 132 - sdio_claim_host(func); 133 137 134 138 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 135 139 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); ··· 144 152 ret = sdio_memcpy_toio(func, addr, buf, len); 145 153 } 146 154 147 - sdio_release_host(func); 148 - 149 155 if (ret) 150 156 wl1271_error("sdio write failed (%d)", ret); 151 157 } ··· 153 163 struct sdio_func *func = wl_to_func(wl); 154 164 int ret; 155 165 156 - /* Power up the card */ 166 + /* Make sure the card will not be powered off by runtime PM */ 157 167 ret = pm_runtime_get_sync(&func->dev); 168 + if (ret < 0) 169 + goto out; 170 + 171 + /* Runtime PM might be disabled, so power up the card manually */ 172 + ret = mmc_power_restore_host(func->card->host); 158 173 if (ret < 0) 159 174 goto out; 160 175 161 176 sdio_claim_host(func); 162 177 sdio_enable_func(func); 163 - sdio_release_host(func); 164 178 165 179 out: 166 180 return ret; ··· 173 179 static int wl1271_sdio_power_off(struct wl1271 *wl) 174 180 { 175 181 struct sdio_func *func = wl_to_func(wl); 182 + int ret; 176 183 177 - sdio_claim_host(func); 178 184 sdio_disable_func(func); 179 185 sdio_release_host(func); 180 186 181 - /* Power down the card */ 187 + /* Runtime PM might be disabled, so power off the card manually */ 188 + ret = mmc_power_save_host(func->card->host); 189 + if (ret < 0) 190 + return ret; 191 + 192 + /* Let runtime PM know the card is powered off */ 182 193 return pm_runtime_put_sync(&func->dev); 183 194 } 184 195 ··· 240 241 wl->irq = wlan_data->irq; 241 242 wl->ref_clock = wlan_data->board_ref_clock; 242 243 243 - ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); 244 + ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, 245 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 246 + DRIVER_NAME, wl); 244 247 if (ret < 0) { 245 248 wl1271_error("request_irq() failed: %d", ret); 246 249 goto out_free; 247 250 } 248 - 249 - set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 250 251 251 252 disable_irq(wl->irq); 252 253 ··· 269 270 270 271 out_irq: 271 272 free_irq(wl->irq, wl); 272 - 273 273 274 274 out_free: 275 275 wl1271_free_hw(wl);
+7 -12
drivers/net/wireless/wl12xx/spi.c
··· 320 320 spi_sync(wl_to_spi(wl), &m); 321 321 } 322 322 323 - static irqreturn_t wl1271_irq(int irq, void *cookie) 323 + static irqreturn_t wl1271_hardirq(int irq, void *cookie) 324 324 { 325 - struct wl1271 *wl; 325 + struct wl1271 *wl = cookie; 326 326 unsigned long flags; 327 327 328 328 wl1271_debug(DEBUG_IRQ, "IRQ"); 329 329 330 - wl = cookie; 331 - 332 330 /* complete the ELP completion */ 333 331 spin_lock_irqsave(&wl->wl_lock, flags); 332 + set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); 334 333 if (wl->elp_compl) { 335 334 complete(wl->elp_compl); 336 335 wl->elp_compl = NULL; 337 336 } 338 - 339 - if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) 340 - ieee80211_queue_work(wl->hw, &wl->irq_work); 341 - set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags); 342 337 spin_unlock_irqrestore(&wl->wl_lock, flags); 343 338 344 - return IRQ_HANDLED; 339 + return IRQ_WAKE_THREAD; 345 340 } 346 341 347 342 static int wl1271_spi_set_power(struct wl1271 *wl, bool enable) ··· 408 413 goto out_free; 409 414 } 410 415 411 - ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl); 416 + ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq, 417 + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 418 + DRIVER_NAME, wl); 412 419 if (ret < 0) { 413 420 wl1271_error("request_irq() failed: %d", ret); 414 421 goto out_free; 415 422 } 416 - 417 - set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 418 423 419 424 disable_irq(wl->irq); 420 425
+34 -13
drivers/net/wireless/wl12xx/tx.c
··· 464 464 465 465 while ((skb = wl1271_skb_dequeue(wl))) { 466 466 if (!woken_up) { 467 - ret = wl1271_ps_elp_wakeup(wl, false); 467 + ret = wl1271_ps_elp_wakeup(wl); 468 468 if (ret < 0) 469 469 goto out_ack; 470 470 woken_up = true; ··· 506 506 sent_packets = true; 507 507 } 508 508 if (sent_packets) { 509 - /* interrupt the firmware with the new packets */ 510 - wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 509 + /* 510 + * Interrupt the firmware with the new packets. This is only 511 + * required for older hardware revisions 512 + */ 513 + if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 514 + wl1271_write32(wl, WL1271_HOST_WR_ACCESS, 515 + wl->tx_packets_count); 516 + 511 517 wl1271_handle_tx_low_watermark(wl); 512 518 } 513 519 ··· 589 583 result->rate_class_index, result->status); 590 584 591 585 /* return the packet to the stack */ 592 - ieee80211_tx_status(wl->hw, skb); 586 + skb_queue_tail(&wl->deferred_tx_queue, skb); 587 + ieee80211_queue_work(wl->hw, &wl->netstack_work); 593 588 wl1271_free_tx_id(wl, result->id); 594 589 } 595 590 ··· 694 687 */ 695 688 wl1271_handle_tx_low_watermark(wl); 696 689 697 - for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 698 - if (wl->tx_frames[i] != NULL) { 699 - skb = wl->tx_frames[i]; 700 - wl1271_free_tx_id(wl, i); 701 - wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 702 - info = IEEE80211_SKB_CB(skb); 703 - info->status.rates[0].idx = -1; 704 - info->status.rates[0].count = 0; 705 - ieee80211_tx_status(wl->hw, skb); 690 + for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { 691 + if (wl->tx_frames[i] == NULL) 692 + continue; 693 + 694 + skb = wl->tx_frames[i]; 695 + wl1271_free_tx_id(wl, i); 696 + wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 697 + 698 + /* Remove private headers before passing the skb to mac80211 */ 699 + info = IEEE80211_SKB_CB(skb); 700 + skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 701 + if (info->control.hw_key && 702 + info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 703 + int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 704 + memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, 705 + hdrlen); 706 + skb_pull(skb, WL1271_TKIP_IV_SPACE); 706 707 } 708 + 709 + info->status.rates[0].idx = -1; 710 + info->status.rates[0].count = 0; 711 + 712 + ieee80211_tx_status(wl->hw, skb); 713 + } 707 714 } 708 715 709 716 #define WL1271_TX_FLUSH_TIMEOUT 500000
+22 -6
drivers/net/wireless/wl12xx/wl12xx.h
··· 130 130 131 131 132 132 133 - #define WL1271_FW_NAME "wl1271-fw-2.bin" 134 - #define WL1271_AP_FW_NAME "wl1271-fw-ap.bin" 133 + #define WL1271_FW_NAME "ti-connectivity/wl1271-fw-2.bin" 134 + #define WL1271_AP_FW_NAME "ti-connectivity/wl1271-fw-ap.bin" 135 135 136 - #define WL1271_NVS_NAME "wl1271-nvs.bin" 136 + #define WL1271_NVS_NAME "ti-connectivity/wl1271-nvs.bin" 137 137 138 138 #define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff)) 139 139 #define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff)) ··· 317 317 WL1271_FLAG_JOINED, 318 318 WL1271_FLAG_GPIO_POWER, 319 319 WL1271_FLAG_TX_QUEUE_STOPPED, 320 + WL1271_FLAG_TX_PENDING, 320 321 WL1271_FLAG_IN_ELP, 321 322 WL1271_FLAG_PSM, 322 323 WL1271_FLAG_PSM_REQUESTED, 323 - WL1271_FLAG_IRQ_PENDING, 324 324 WL1271_FLAG_IRQ_RUNNING, 325 325 WL1271_FLAG_IDLE, 326 326 WL1271_FLAG_IDLE_REQUESTED, ··· 404 404 struct sk_buff_head tx_queue[NUM_TX_QUEUES]; 405 405 int tx_queue_count; 406 406 407 + /* Frames received, not handled yet by mac80211 */ 408 + struct sk_buff_head deferred_rx_queue; 409 + 410 + /* Frames sent, not returned yet to mac80211 */ 411 + struct sk_buff_head deferred_tx_queue; 412 + 407 413 struct work_struct tx_work; 408 414 409 415 /* Pending TX frames */ ··· 430 424 /* Intermediate buffer, used for packet aggregation */ 431 425 u8 *aggr_buf; 432 426 433 - /* The target interrupt mask */ 434 - struct work_struct irq_work; 427 + /* Network stack work */ 428 + struct work_struct netstack_work; 435 429 436 430 /* Hardware recovery work */ 437 431 struct work_struct recovery_work; ··· 541 535 542 536 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */ 543 537 unsigned long ap_ps_map; 538 + 539 + /* Quirks of specific hardware revisions */ 540 + unsigned int quirks; 544 541 }; 545 542 546 543 struct wl1271_station { ··· 562 553 #define WL1271_TX_QUEUE_LOW_WATERMARK 10 563 554 #define WL1271_TX_QUEUE_HIGH_WATERMARK 25 564 555 556 + #define WL1271_DEFERRED_QUEUE_LIMIT 64 557 + 565 558 /* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power 566 559 on in case is has been shut down shortly before */ 567 560 #define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ ··· 572 561 /* Macros to handle wl1271.sta_rate_set */ 573 562 #define HW_BG_RATES_MASK 0xffff 574 563 #define HW_HT_RATES_OFFSET 16 564 + 565 + /* Quirks */ 566 + 567 + /* Each RX/TX transaction requires an end-of-transaction transfer */ 568 + #define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0) 575 569 576 570 #endif
+1 -2
drivers/net/wireless/wl12xx/wl12xx_80211.h
··· 55 55 56 56 /* This really should be 8, but not for our firmware */ 57 57 #define MAX_SUPPORTED_RATES 32 58 - #define COUNTRY_STRING_LEN 3 59 58 #define MAX_COUNTRY_TRIPLETS 32 60 59 61 60 /* Headers */ ··· 98 99 99 100 struct wl12xx_ie_country { 100 101 struct wl12xx_ie_header header; 101 - u8 country_string[COUNTRY_STRING_LEN]; 102 + u8 country_string[IEEE80211_COUNTRY_STRING_LEN]; 102 103 struct country_triplet triplets[MAX_COUNTRY_TRIPLETS]; 103 104 } __packed; 104 105
+3
include/linux/ieee80211.h
··· 1325 1325 /* Although the spec says 8 I'm seeing 6 in practice */ 1326 1326 #define IEEE80211_COUNTRY_IE_MIN_LEN 6 1327 1327 1328 + /* The Country String field of the element shall be 3 octets in length */ 1329 + #define IEEE80211_COUNTRY_STRING_LEN 3 1330 + 1328 1331 /* 1329 1332 * For regulatory extension stuff see IEEE 802.11-2007 1330 1333 * Annex I (page 1141) and Annex J (page 1147). Also
+17
include/net/bluetooth/hci.h
··· 415 415 __u8 authentication; 416 416 } __packed; 417 417 418 + #define HCI_OP_USER_CONFIRM_REPLY 0x042c 419 + struct hci_cp_user_confirm_reply { 420 + bdaddr_t bdaddr; 421 + } __packed; 422 + struct hci_rp_user_confirm_reply { 423 + __u8 status; 424 + bdaddr_t bdaddr; 425 + } __packed; 426 + 427 + #define HCI_OP_USER_CONFIRM_NEG_REPLY 0x042d 428 + 418 429 #define HCI_OP_IO_CAPABILITY_NEG_REPLY 0x0434 419 430 struct hci_cp_io_capability_neg_reply { 420 431 bdaddr_t bdaddr; ··· 945 934 __u8 capability; 946 935 __u8 oob_data; 947 936 __u8 authentication; 937 + } __packed; 938 + 939 + #define HCI_EV_USER_CONFIRM_REQUEST 0x33 940 + struct hci_ev_user_confirm_req { 941 + bdaddr_t bdaddr; 942 + __le32 passkey; 948 943 } __packed; 949 944 950 945 #define HCI_EV_SIMPLE_PAIR_COMPLETE 0x36
+21
include/net/bluetooth/hci_core.h
··· 248 248 void *priv; 249 249 250 250 struct hci_conn *link; 251 + 252 + void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); 253 + void (*security_cfm_cb) (struct hci_conn *conn, u8 status); 254 + void (*disconn_cfm_cb) (struct hci_conn *conn, u8 reason); 251 255 }; 252 256 253 257 extern struct hci_proto *hci_proto[]; ··· 575 571 hp = hci_proto[HCI_PROTO_SCO]; 576 572 if (hp && hp->connect_cfm) 577 573 hp->connect_cfm(conn, status); 574 + 575 + if (conn->connect_cfm_cb) 576 + conn->connect_cfm_cb(conn, status); 578 577 } 579 578 580 579 static inline int hci_proto_disconn_ind(struct hci_conn *conn) ··· 607 600 hp = hci_proto[HCI_PROTO_SCO]; 608 601 if (hp && hp->disconn_cfm) 609 602 hp->disconn_cfm(conn, reason); 603 + 604 + if (conn->disconn_cfm_cb) 605 + conn->disconn_cfm_cb(conn, reason); 610 606 } 611 607 612 608 static inline void hci_proto_auth_cfm(struct hci_conn *conn, __u8 status) ··· 629 619 hp = hci_proto[HCI_PROTO_SCO]; 630 620 if (hp && hp->security_cfm) 631 621 hp->security_cfm(conn, status, encrypt); 622 + 623 + if (conn->security_cfm_cb) 624 + conn->security_cfm_cb(conn, status); 632 625 } 633 626 634 627 static inline void hci_proto_encrypt_cfm(struct hci_conn *conn, __u8 status, __u8 encrypt) ··· 645 632 hp = hci_proto[HCI_PROTO_SCO]; 646 633 if (hp && hp->security_cfm) 647 634 hp->security_cfm(conn, status, encrypt); 635 + 636 + if (conn->security_cfm_cb) 637 + conn->security_cfm_cb(conn, status); 648 638 } 649 639 650 640 int hci_register_proto(struct hci_proto *hproto); ··· 762 746 int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr); 763 747 int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); 764 748 int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); 749 + int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value); 750 + int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status); 751 + int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, 752 + u8 status); 753 + int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status); 765 754 766 755 /* HCI info for socket */ 767 756 #define hci_pi(sk) ((struct hci_pinfo *) sk)
+40 -33
include/net/bluetooth/mgmt.h
··· 21 21 SOFTWARE IS DISCLAIMED. 22 22 */ 23 23 24 + #define MGMT_INDEX_NONE 0xFFFF 25 + 24 26 struct mgmt_hdr { 25 27 __le16 opcode; 28 + __le16 index; 26 29 __le16 len; 27 30 } __packed; 28 - #define MGMT_HDR_SIZE 4 29 31 30 32 #define MGMT_OP_READ_VERSION 0x0001 31 33 struct mgmt_rp_read_version { ··· 42 40 } __packed; 43 41 44 42 #define MGMT_OP_READ_INFO 0x0004 45 - struct mgmt_cp_read_info { 46 - __le16 index; 47 - } __packed; 48 43 struct mgmt_rp_read_info { 49 - __le16 index; 50 44 __u8 type; 51 45 __u8 powered; 52 46 __u8 connectable; ··· 58 60 } __packed; 59 61 60 62 struct mgmt_mode { 61 - __le16 index; 62 63 __u8 val; 63 64 } __packed; 64 65 ··· 71 74 72 75 #define MGMT_OP_ADD_UUID 0x0009 73 76 struct mgmt_cp_add_uuid { 74 - __le16 index; 75 77 __u8 uuid[16]; 76 78 __u8 svc_hint; 77 79 } __packed; 78 80 79 81 #define MGMT_OP_REMOVE_UUID 0x000A 80 82 struct mgmt_cp_remove_uuid { 81 - __le16 index; 82 83 __u8 uuid[16]; 83 84 } __packed; 84 85 85 86 #define MGMT_OP_SET_DEV_CLASS 0x000B 86 87 struct mgmt_cp_set_dev_class { 87 - __le16 index; 88 88 __u8 major; 89 89 __u8 minor; 90 90 } __packed; 91 91 92 92 #define MGMT_OP_SET_SERVICE_CACHE 0x000C 93 93 struct mgmt_cp_set_service_cache { 94 - __le16 index; 95 94 __u8 enable; 96 95 } __packed; 97 96 ··· 100 107 101 108 #define MGMT_OP_LOAD_KEYS 0x000D 102 109 struct mgmt_cp_load_keys { 103 - __le16 index; 104 110 __u8 debug_keys; 105 111 __le16 key_count; 106 112 struct mgmt_key_info keys[0]; ··· 107 115 108 116 #define MGMT_OP_REMOVE_KEY 0x000E 109 117 struct mgmt_cp_remove_key { 110 - __le16 index; 111 118 bdaddr_t bdaddr; 112 119 __u8 disconnect; 113 120 } __packed; 114 121 115 122 #define MGMT_OP_DISCONNECT 0x000F 116 123 struct mgmt_cp_disconnect { 117 - __le16 index; 118 124 bdaddr_t bdaddr; 119 125 } __packed; 120 126 struct mgmt_rp_disconnect { 121 - __le16 index; 122 127 bdaddr_t bdaddr; 123 128 } __packed; 124 129 125 130 #define MGMT_OP_GET_CONNECTIONS 0x0010 126 - struct mgmt_cp_get_connections { 127 - __le16 index; 128 - } __packed; 129 131 struct mgmt_rp_get_connections { 130 - __le16 index; 131 132 __le16 conn_count; 132 133 bdaddr_t conn[0]; 133 134 } __packed; 134 135 135 136 #define MGMT_OP_PIN_CODE_REPLY 0x0011 136 137 struct mgmt_cp_pin_code_reply { 137 - __le16 index; 138 138 bdaddr_t bdaddr; 139 139 __u8 pin_len; 140 140 __u8 pin_code[16]; 141 141 } __packed; 142 + struct mgmt_rp_pin_code_reply { 143 + bdaddr_t bdaddr; 144 + uint8_t status; 145 + } __packed; 142 146 143 147 #define MGMT_OP_PIN_CODE_NEG_REPLY 0x0012 144 148 struct mgmt_cp_pin_code_neg_reply { 145 - __le16 index; 146 149 bdaddr_t bdaddr; 147 150 } __packed; 148 151 149 152 #define MGMT_OP_SET_IO_CAPABILITY 0x0013 150 153 struct mgmt_cp_set_io_capability { 151 - __le16 index; 152 154 __u8 io_capability; 153 155 } __packed; 156 + 157 + #define MGMT_OP_PAIR_DEVICE 0x0014 158 + struct mgmt_cp_pair_device { 159 + bdaddr_t bdaddr; 160 + __u8 io_cap; 161 + } __packed; 162 + struct mgmt_rp_pair_device { 163 + bdaddr_t bdaddr; 164 + __u8 status; 165 + } __packed; 166 + 167 + #define MGMT_OP_USER_CONFIRM_REPLY 0x0015 168 + struct mgmt_cp_user_confirm_reply { 169 + bdaddr_t bdaddr; 170 + } __packed; 171 + struct mgmt_rp_user_confirm_reply { 172 + bdaddr_t bdaddr; 173 + __u8 status; 174 + } __packed; 175 + 176 + #define MGMT_OP_USER_CONFIRM_NEG_REPLY 0x0016 154 177 155 178 #define MGMT_EV_CMD_COMPLETE 0x0001 156 179 struct mgmt_ev_cmd_complete { ··· 181 174 182 175 #define MGMT_EV_CONTROLLER_ERROR 0x0003 183 176 struct mgmt_ev_controller_error { 184 - __le16 index; 185 177 __u8 error_code; 186 178 } __packed; 187 179 188 180 #define MGMT_EV_INDEX_ADDED 0x0004 189 - struct mgmt_ev_index_added { 190 - __le16 index; 191 - } __packed; 192 181 193 182 #define MGMT_EV_INDEX_REMOVED 0x0005 194 - struct mgmt_ev_index_removed { 195 - __le16 index; 196 - } __packed; 197 183 198 184 #define MGMT_EV_POWERED 0x0006 199 185 ··· 198 198 199 199 #define MGMT_EV_NEW_KEY 0x000A 200 200 struct mgmt_ev_new_key { 201 - __le16 index; 202 201 struct mgmt_key_info key; 203 202 __u8 old_key_type; 204 203 } __packed; 205 204 206 205 #define MGMT_EV_CONNECTED 0x000B 207 206 struct mgmt_ev_connected { 208 - __le16 index; 209 207 bdaddr_t bdaddr; 210 208 } __packed; 211 209 212 210 #define MGMT_EV_DISCONNECTED 0x000C 213 211 struct mgmt_ev_disconnected { 214 - __le16 index; 215 212 bdaddr_t bdaddr; 216 213 } __packed; 217 214 218 215 #define MGMT_EV_CONNECT_FAILED 0x000D 219 216 struct mgmt_ev_connect_failed { 220 - __le16 index; 221 217 bdaddr_t bdaddr; 222 218 __u8 status; 223 219 } __packed; 224 220 225 221 #define MGMT_EV_PIN_CODE_REQUEST 0x000E 226 222 struct mgmt_ev_pin_code_request { 227 - __le16 index; 228 223 bdaddr_t bdaddr; 224 + } __packed; 225 + 226 + #define MGMT_EV_USER_CONFIRM_REQUEST 0x000F 227 + struct mgmt_ev_user_confirm_request { 228 + bdaddr_t bdaddr; 229 + __le32 value; 230 + } __packed; 231 + 232 + #define MGMT_EV_AUTH_FAILED 0x0010 233 + struct mgmt_ev_auth_failed { 234 + bdaddr_t bdaddr; 235 + __u8 status; 229 236 } __packed;
+8 -1
lib/Kconfig
··· 221 221 tristate 222 222 223 223 config AVERAGE 224 - bool 224 + bool "Averaging functions" 225 + help 226 + This option is provided for the case where no in-kernel-tree 227 + modules require averaging functions, but a module built outside 228 + the kernel tree does. Such modules that use library averaging 229 + functions require Y here. 230 + 231 + If unsure, say N. 225 232 226 233 endmenu
+1 -3
net/bluetooth/af_bluetooth.c
··· 550 550 goto error; 551 551 552 552 err = l2cap_init(); 553 - if (err < 0) { 554 - hci_sock_cleanup(); 553 + if (err < 0) 555 554 goto sock_err; 556 - } 557 555 558 556 err = sco_init(); 559 557 if (err < 0) {
+5 -3
net/bluetooth/hci_conn.c
··· 286 286 conn->state = BT_OPEN; 287 287 conn->auth_type = HCI_AT_GENERAL_BONDING; 288 288 conn->io_capability = hdev->io_capability; 289 + conn->remote_auth = 0xff; 289 290 290 291 conn->power_save = 1; 291 292 conn->disc_timeout = HCI_DISCONN_TIMEOUT; ··· 430 429 431 430 if (type == LE_LINK) { 432 431 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); 432 + if (le) 433 + return ERR_PTR(-EBUSY); 434 + le = hci_conn_add(hdev, LE_LINK, dst); 433 435 if (!le) 434 - le = hci_conn_add(hdev, LE_LINK, dst); 435 - if (!le) 436 - return NULL; 436 + return ERR_PTR(-ENOMEM); 437 437 if (le->state == BT_OPEN) 438 438 hci_le_connect(le); 439 439
+66 -3
net/bluetooth/hci_event.c
··· 796 796 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status); 797 797 } 798 798 799 + static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) 800 + { 801 + struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 802 + 803 + BT_DBG("%s status 0x%x", hdev->name, rp->status); 804 + 805 + if (test_bit(HCI_MGMT, &hdev->flags)) 806 + mgmt_user_confirm_reply_complete(hdev->id, &rp->bdaddr, 807 + rp->status); 808 + } 809 + 810 + static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, 811 + struct sk_buff *skb) 812 + { 813 + struct hci_rp_user_confirm_reply *rp = (void *) skb->data; 814 + 815 + BT_DBG("%s status 0x%x", hdev->name, rp->status); 816 + 817 + if (test_bit(HCI_MGMT, &hdev->flags)) 818 + mgmt_user_confirm_neg_reply_complete(hdev->id, &rp->bdaddr, 819 + rp->status); 820 + } 821 + 799 822 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) 800 823 { 801 824 BT_DBG("%s status 0x%x", hdev->name, status); ··· 1424 1401 if (!ev->status) { 1425 1402 conn->link_mode |= HCI_LM_AUTH; 1426 1403 conn->sec_level = conn->pending_sec_level; 1427 - } else 1404 + } else { 1405 + mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 1428 1406 conn->sec_level = BT_SECURITY_LOW; 1407 + } 1429 1408 1430 1409 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); 1431 1410 ··· 1751 1726 1752 1727 case HCI_OP_LE_READ_BUFFER_SIZE: 1753 1728 hci_cc_le_read_buffer_size(hdev, skb); 1729 + break; 1730 + 1731 + case HCI_OP_USER_CONFIRM_REPLY: 1732 + hci_cc_user_confirm_reply(hdev, skb); 1733 + break; 1734 + 1735 + case HCI_OP_USER_CONFIRM_NEG_REPLY: 1736 + hci_cc_user_confirm_neg_reply(hdev, skb); 1754 1737 break; 1755 1738 1756 1739 default: ··· 2395 2362 hci_dev_unlock(hdev); 2396 2363 } 2397 2364 2365 + static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, 2366 + struct sk_buff *skb) 2367 + { 2368 + struct hci_ev_user_confirm_req *ev = (void *) skb->data; 2369 + 2370 + BT_DBG("%s", hdev->name); 2371 + 2372 + hci_dev_lock(hdev); 2373 + 2374 + if (test_bit(HCI_MGMT, &hdev->flags)) 2375 + mgmt_user_confirm_request(hdev->id, &ev->bdaddr, ev->passkey); 2376 + 2377 + hci_dev_unlock(hdev); 2378 + } 2379 + 2398 2380 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) 2399 2381 { 2400 2382 struct hci_ev_simple_pair_complete *ev = (void *) skb->data; ··· 2420 2372 hci_dev_lock(hdev); 2421 2373 2422 2374 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); 2423 - if (conn) 2424 - hci_conn_put(conn); 2375 + if (!conn) 2376 + goto unlock; 2425 2377 2378 + /* To avoid duplicate auth_failed events to user space we check 2379 + * the HCI_CONN_AUTH_PEND flag which will be set if we 2380 + * initiated the authentication. A traditional auth_complete 2381 + * event gets always produced as initiator and is also mapped to 2382 + * the mgmt_auth_failed event */ 2383 + if (!test_bit(HCI_CONN_AUTH_PEND, &conn->pend) && ev->status != 0) 2384 + mgmt_auth_failed(hdev->id, &conn->dst, ev->status); 2385 + 2386 + hci_conn_put(conn); 2387 + 2388 + unlock: 2426 2389 hci_dev_unlock(hdev); 2427 2390 } 2428 2391 ··· 2637 2578 2638 2579 case HCI_EV_IO_CAPA_REPLY: 2639 2580 hci_io_capa_reply_evt(hdev, skb); 2581 + break; 2582 + 2583 + case HCI_EV_USER_CONFIRM_REQUEST: 2584 + hci_user_confirm_request_evt(hdev, skb); 2640 2585 break; 2641 2586 2642 2587 case HCI_EV_SIMPLE_PAIR_COMPLETE:
+1 -1
net/bluetooth/hci_sock.c
··· 861 861 return err; 862 862 } 863 863 864 - void __exit hci_sock_cleanup(void) 864 + void hci_sock_cleanup(void) 865 865 { 866 866 if (bt_sock_unregister(BTPROTO_HCI) < 0) 867 867 BT_ERR("HCI socket unregistration failed");
+6 -7
net/bluetooth/l2cap_core.c
··· 852 852 853 853 hci_dev_lock_bh(hdev); 854 854 855 - err = -ENOMEM; 856 - 857 855 auth_type = l2cap_get_auth_type(sk); 858 856 859 857 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA) ··· 861 863 hcon = hci_connect(hdev, ACL_LINK, dst, 862 864 l2cap_pi(sk)->sec_level, auth_type); 863 865 864 - if (!hcon) 866 + if (IS_ERR(hcon)) { 867 + err = PTR_ERR(hcon); 865 868 goto done; 869 + } 866 870 867 871 conn = l2cap_conn_add(hcon, 0); 868 872 if (!conn) { 869 873 hci_conn_put(hcon); 874 + err = -ENOMEM; 870 875 goto done; 871 876 } 872 - 873 - err = 0; 874 877 875 878 /* Update source addr of the socket */ 876 879 bacpy(src, conn->src); ··· 890 891 } else 891 892 l2cap_do_start(sk); 892 893 } 894 + 895 + err = 0; 893 896 894 897 done: 895 898 hci_dev_unlock_bh(hdev); ··· 4033 4032 if (!l2cap_debugfs) 4034 4033 BT_ERR("Failed to create L2CAP debug file"); 4035 4034 } 4036 - 4037 - BT_INFO("L2CAP socket layer initialized"); 4038 4035 4039 4036 return 0; 4040 4037
+502 -249
net/bluetooth/mgmt.c
··· 38 38 int index; 39 39 void *cmd; 40 40 struct sock *sk; 41 + void *user_data; 41 42 }; 42 43 43 44 LIST_HEAD(cmd_list); 44 45 45 - static int cmd_status(struct sock *sk, u16 cmd, u8 status) 46 + static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) 46 47 { 47 48 struct sk_buff *skb; 48 49 struct mgmt_hdr *hdr; 49 50 struct mgmt_ev_cmd_status *ev; 50 51 51 - BT_DBG("sock %p", sk); 52 + BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); 52 53 53 54 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_ATOMIC); 54 55 if (!skb) ··· 58 57 hdr = (void *) skb_put(skb, sizeof(*hdr)); 59 58 60 59 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); 60 + hdr->index = cpu_to_le16(index); 61 61 hdr->len = cpu_to_le16(sizeof(*ev)); 62 62 63 63 ev = (void *) skb_put(skb, sizeof(*ev)); ··· 71 69 return 0; 72 70 } 73 71 74 - static int cmd_complete(struct sock *sk, u16 cmd, void *rp, size_t rp_len) 72 + static int cmd_complete(struct sock *sk, u16 index, u16 cmd, void *rp, 73 + size_t rp_len) 75 74 { 76 75 struct sk_buff *skb; 77 76 struct mgmt_hdr *hdr; ··· 87 84 hdr = (void *) skb_put(skb, sizeof(*hdr)); 88 85 89 86 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); 87 + hdr->index = cpu_to_le16(index); 90 88 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); 91 89 92 90 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len); 93 91 put_unaligned_le16(cmd, &ev->opcode); 94 - memcpy(ev->data, rp, rp_len); 92 + 93 + if (rp) 94 + memcpy(ev->data, rp, rp_len); 95 95 96 96 if (sock_queue_rcv_skb(sk, skb) < 0) 97 97 kfree_skb(skb); ··· 111 105 rp.version = MGMT_VERSION; 112 106 put_unaligned_le16(MGMT_REVISION, &rp.revision); 113 107 114 - return cmd_complete(sk, MGMT_OP_READ_VERSION, &rp, sizeof(rp)); 108 + return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, &rp, 109 + sizeof(rp)); 115 110 } 116 111 117 112 static int read_index_list(struct sock *sk) ··· 158 151 159 152 read_unlock(&hci_dev_list_lock); 160 153 161 - err = cmd_complete(sk, MGMT_OP_READ_INDEX_LIST, rp, rp_len); 154 + err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, rp, 155 + rp_len); 162 156 163 157 kfree(rp); 164 158 165 159 return err; 166 160 } 167 161 168 - static int read_controller_info(struct sock *sk, unsigned char *data, u16 len) 162 + static int read_controller_info(struct sock *sk, u16 index) 169 163 { 170 164 struct mgmt_rp_read_info rp; 171 - struct mgmt_cp_read_info *cp = (void *) data; 172 165 struct hci_dev *hdev; 173 - u16 dev_id; 174 166 175 - BT_DBG("sock %p", sk); 167 + BT_DBG("sock %p hci%u", sk, index); 176 168 177 - if (len != 2) 178 - return cmd_status(sk, MGMT_OP_READ_INFO, EINVAL); 179 - 180 - dev_id = get_unaligned_le16(&cp->index); 181 - 182 - BT_DBG("request for hci%u", dev_id); 183 - 184 - hdev = hci_dev_get(dev_id); 169 + hdev = hci_dev_get(index); 185 170 if (!hdev) 186 - return cmd_status(sk, MGMT_OP_READ_INFO, ENODEV); 171 + return cmd_status(sk, index, MGMT_OP_READ_INFO, ENODEV); 187 172 188 173 hci_del_off_timer(hdev); 189 174 ··· 183 184 184 185 set_bit(HCI_MGMT, &hdev->flags); 185 186 186 - put_unaligned_le16(hdev->id, &rp.index); 187 187 rp.type = hdev->dev_type; 188 188 189 189 rp.powered = test_bit(HCI_UP, &hdev->flags); ··· 207 209 hci_dev_unlock_bh(hdev); 208 210 hci_dev_put(hdev); 209 211 210 - return cmd_complete(sk, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 212 + return cmd_complete(sk, index, MGMT_OP_READ_INFO, &rp, sizeof(rp)); 211 213 } 212 214 213 215 static void mgmt_pending_free(struct pending_cmd *cmd) ··· 217 219 kfree(cmd); 218 220 } 219 221 220 - static int mgmt_pending_add(struct sock *sk, u16 opcode, int index, 221 - void *data, u16 len) 222 + static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, 223 + u16 index, void *data, u16 len) 222 224 { 223 225 struct pending_cmd *cmd; 224 226 225 227 cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); 226 228 if (!cmd) 227 - return -ENOMEM; 229 + return NULL; 228 230 229 231 cmd->opcode = opcode; 230 232 cmd->index = index; ··· 232 234 cmd->cmd = kmalloc(len, GFP_ATOMIC); 233 235 if (!cmd->cmd) { 234 236 kfree(cmd); 235 - return -ENOMEM; 237 + return NULL; 236 238 } 237 239 238 240 memcpy(cmd->cmd, data, len); ··· 242 244 243 245 list_add(&cmd->list, &cmd_list); 244 246 245 - return 0; 247 + return cmd; 246 248 } 247 249 248 250 static void mgmt_pending_foreach(u16 opcode, int index, ··· 287 289 return NULL; 288 290 } 289 291 290 - static void mgmt_pending_remove(u16 opcode, int index) 292 + static void mgmt_pending_remove(struct pending_cmd *cmd) 291 293 { 292 - struct pending_cmd *cmd; 293 - 294 - cmd = mgmt_pending_find(opcode, index); 295 - if (cmd == NULL) 296 - return; 297 - 298 294 list_del(&cmd->list); 299 295 mgmt_pending_free(cmd); 300 296 } 301 297 302 - static int set_powered(struct sock *sk, unsigned char *data, u16 len) 298 + static int set_powered(struct sock *sk, u16 index, unsigned char *data, u16 len) 303 299 { 304 300 struct mgmt_mode *cp; 305 301 struct hci_dev *hdev; 306 - u16 dev_id; 307 - int ret, up; 302 + struct pending_cmd *cmd; 303 + int err, up; 308 304 309 305 cp = (void *) data; 310 - dev_id = get_unaligned_le16(&cp->index); 311 306 312 - BT_DBG("request for hci%u", dev_id); 307 + BT_DBG("request for hci%u", index); 313 308 314 - hdev = hci_dev_get(dev_id); 309 + if (len != sizeof(*cp)) 310 + return cmd_status(sk, index, MGMT_OP_SET_POWERED, EINVAL); 311 + 312 + hdev = hci_dev_get(index); 315 313 if (!hdev) 316 - return cmd_status(sk, MGMT_OP_SET_POWERED, ENODEV); 314 + return cmd_status(sk, index, MGMT_OP_SET_POWERED, ENODEV); 317 315 318 316 hci_dev_lock_bh(hdev); 319 317 320 318 up = test_bit(HCI_UP, &hdev->flags); 321 319 if ((cp->val && up) || (!cp->val && !up)) { 322 - ret = cmd_status(sk, MGMT_OP_SET_POWERED, EALREADY); 320 + err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EALREADY); 323 321 goto failed; 324 322 } 325 323 326 - if (mgmt_pending_find(MGMT_OP_SET_POWERED, dev_id)) { 327 - ret = cmd_status(sk, MGMT_OP_SET_POWERED, EBUSY); 324 + if (mgmt_pending_find(MGMT_OP_SET_POWERED, index)) { 325 + err = cmd_status(sk, index, MGMT_OP_SET_POWERED, EBUSY); 328 326 goto failed; 329 327 } 330 328 331 - ret = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, dev_id, data, len); 332 - if (ret < 0) 329 + cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, index, data, len); 330 + if (!cmd) { 331 + err = -ENOMEM; 333 332 goto failed; 333 + } 334 334 335 335 if (cp->val) 336 336 queue_work(hdev->workqueue, &hdev->power_on); 337 337 else 338 338 queue_work(hdev->workqueue, &hdev->power_off); 339 339 340 - ret = 0; 340 + err = 0; 341 341 342 342 failed: 343 343 hci_dev_unlock_bh(hdev); 344 344 hci_dev_put(hdev); 345 - return ret; 345 + return err; 346 346 } 347 347 348 - static int set_discoverable(struct sock *sk, unsigned char *data, u16 len) 348 + static int set_discoverable(struct sock *sk, u16 index, unsigned char *data, 349 + u16 len) 349 350 { 350 351 struct mgmt_mode *cp; 351 352 struct hci_dev *hdev; 352 - u16 dev_id; 353 + struct pending_cmd *cmd; 353 354 u8 scan; 354 355 int err; 355 356 356 357 cp = (void *) data; 357 - dev_id = get_unaligned_le16(&cp->index); 358 358 359 - BT_DBG("request for hci%u", dev_id); 359 + BT_DBG("request for hci%u", index); 360 360 361 - hdev = hci_dev_get(dev_id); 361 + if (len != sizeof(*cp)) 362 + return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EINVAL); 363 + 364 + hdev = hci_dev_get(index); 362 365 if (!hdev) 363 - return cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENODEV); 366 + return cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENODEV); 364 367 365 368 hci_dev_lock_bh(hdev); 366 369 367 370 if (!test_bit(HCI_UP, &hdev->flags)) { 368 - err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 371 + err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, ENETDOWN); 369 372 goto failed; 370 373 } 371 374 372 - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) || 373 - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) { 374 - err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EBUSY); 375 + if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || 376 + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { 377 + err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EBUSY); 375 378 goto failed; 376 379 } 377 380 378 381 if (cp->val == test_bit(HCI_ISCAN, &hdev->flags) && 379 382 test_bit(HCI_PSCAN, &hdev->flags)) { 380 - err = cmd_status(sk, MGMT_OP_SET_DISCOVERABLE, EALREADY); 383 + err = cmd_status(sk, index, MGMT_OP_SET_DISCOVERABLE, EALREADY); 381 384 goto failed; 382 385 } 383 386 384 - err = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, dev_id, data, len); 385 - if (err < 0) 387 + cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, index, data, len); 388 + if (!cmd) { 389 + err = -ENOMEM; 386 390 goto failed; 391 + } 387 392 388 393 scan = SCAN_PAGE; 389 394 ··· 395 394 396 395 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 397 396 if (err < 0) 398 - mgmt_pending_remove(MGMT_OP_SET_DISCOVERABLE, dev_id); 397 + mgmt_pending_remove(cmd); 399 398 400 399 failed: 401 400 hci_dev_unlock_bh(hdev); ··· 404 403 return err; 405 404 } 406 405 407 - static int set_connectable(struct sock *sk, unsigned char *data, u16 len) 406 + static int set_connectable(struct sock *sk, u16 index, unsigned char *data, 407 + u16 len) 408 408 { 409 409 struct mgmt_mode *cp; 410 410 struct hci_dev *hdev; 411 - u16 dev_id; 411 + struct pending_cmd *cmd; 412 412 u8 scan; 413 413 int err; 414 414 415 415 cp = (void *) data; 416 - dev_id = get_unaligned_le16(&cp->index); 417 416 418 - BT_DBG("request for hci%u", dev_id); 417 + BT_DBG("request for hci%u", index); 419 418 420 - hdev = hci_dev_get(dev_id); 419 + if (len != sizeof(*cp)) 420 + return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EINVAL); 421 + 422 + hdev = hci_dev_get(index); 421 423 if (!hdev) 422 - return cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENODEV); 424 + return cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENODEV); 423 425 424 426 hci_dev_lock_bh(hdev); 425 427 426 428 if (!test_bit(HCI_UP, &hdev->flags)) { 427 - err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 429 + err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, ENETDOWN); 428 430 goto failed; 429 431 } 430 432 431 - if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, dev_id) || 432 - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, dev_id)) { 433 - err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EBUSY); 433 + if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, index) || 434 + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, index)) { 435 + err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EBUSY); 434 436 goto failed; 435 437 } 436 438 437 439 if (cp->val == test_bit(HCI_PSCAN, &hdev->flags)) { 438 - err = cmd_status(sk, MGMT_OP_SET_CONNECTABLE, EALREADY); 440 + err = cmd_status(sk, index, MGMT_OP_SET_CONNECTABLE, EALREADY); 439 441 goto failed; 440 442 } 441 443 442 - err = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, dev_id, data, len); 443 - if (err < 0) 444 + cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, index, data, len); 445 + if (!cmd) { 446 + err = -ENOMEM; 444 447 goto failed; 448 + } 445 449 446 450 if (cp->val) 447 451 scan = SCAN_PAGE; ··· 455 449 456 450 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); 457 451 if (err < 0) 458 - mgmt_pending_remove(MGMT_OP_SET_CONNECTABLE, dev_id); 452 + mgmt_pending_remove(cmd); 459 453 460 454 failed: 461 455 hci_dev_unlock_bh(hdev); ··· 464 458 return err; 465 459 } 466 460 467 - static int mgmt_event(u16 event, void *data, u16 data_len, struct sock *skip_sk) 461 + static int mgmt_event(u16 event, u16 index, void *data, u16 data_len, 462 + struct sock *skip_sk) 468 463 { 469 464 struct sk_buff *skb; 470 465 struct mgmt_hdr *hdr; ··· 478 471 479 472 hdr = (void *) skb_put(skb, sizeof(*hdr)); 480 473 hdr->opcode = cpu_to_le16(event); 474 + hdr->index = cpu_to_le16(index); 481 475 hdr->len = cpu_to_le16(data_len); 482 476 483 - memcpy(skb_put(skb, data_len), data, data_len); 477 + if (data) 478 + memcpy(skb_put(skb, data_len), data, data_len); 484 479 485 480 hci_send_to_sock(NULL, skb, skip_sk); 486 481 kfree_skb(skb); ··· 494 485 { 495 486 struct mgmt_mode rp; 496 487 497 - put_unaligned_le16(index, &rp.index); 498 488 rp.val = val; 499 489 500 - return cmd_complete(sk, opcode, &rp, sizeof(rp)); 490 + return cmd_complete(sk, index, opcode, &rp, sizeof(rp)); 501 491 } 502 492 503 - static int set_pairable(struct sock *sk, unsigned char *data, u16 len) 493 + static int set_pairable(struct sock *sk, u16 index, unsigned char *data, 494 + u16 len) 504 495 { 505 496 struct mgmt_mode *cp, ev; 506 497 struct hci_dev *hdev; 507 - u16 dev_id; 508 498 int err; 509 499 510 500 cp = (void *) data; 511 - dev_id = get_unaligned_le16(&cp->index); 512 501 513 - BT_DBG("request for hci%u", dev_id); 502 + BT_DBG("request for hci%u", index); 514 503 515 - hdev = hci_dev_get(dev_id); 504 + if (len != sizeof(*cp)) 505 + return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, EINVAL); 506 + 507 + hdev = hci_dev_get(index); 516 508 if (!hdev) 517 - return cmd_status(sk, MGMT_OP_SET_PAIRABLE, ENODEV); 509 + return cmd_status(sk, index, MGMT_OP_SET_PAIRABLE, ENODEV); 518 510 519 511 hci_dev_lock_bh(hdev); 520 512 ··· 524 514 else 525 515 clear_bit(HCI_PAIRABLE, &hdev->flags); 526 516 527 - err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, dev_id, cp->val); 517 + err = send_mode_rsp(sk, MGMT_OP_SET_PAIRABLE, index, cp->val); 528 518 if (err < 0) 529 519 goto failed; 530 520 531 - put_unaligned_le16(dev_id, &ev.index); 532 521 ev.val = cp->val; 533 522 534 - err = mgmt_event(MGMT_EV_PAIRABLE, &ev, sizeof(ev), sk); 523 + err = mgmt_event(MGMT_EV_PAIRABLE, index, &ev, sizeof(ev), sk); 535 524 536 525 failed: 537 526 hci_dev_unlock_bh(hdev); ··· 572 563 return hci_send_cmd(hdev, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); 573 564 } 574 565 575 - static int add_uuid(struct sock *sk, unsigned char *data, u16 len) 566 + static int add_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 576 567 { 577 568 struct mgmt_cp_add_uuid *cp; 578 569 struct hci_dev *hdev; 579 570 struct bt_uuid *uuid; 580 - u16 dev_id; 581 571 int err; 582 572 583 573 cp = (void *) data; 584 - dev_id = get_unaligned_le16(&cp->index); 585 574 586 - BT_DBG("request for hci%u", dev_id); 575 + BT_DBG("request for hci%u", index); 587 576 588 - hdev = hci_dev_get(dev_id); 577 + if (len != sizeof(*cp)) 578 + return cmd_status(sk, index, MGMT_OP_ADD_UUID, EINVAL); 579 + 580 + hdev = hci_dev_get(index); 589 581 if (!hdev) 590 - return cmd_status(sk, MGMT_OP_ADD_UUID, ENODEV); 582 + return cmd_status(sk, index, MGMT_OP_ADD_UUID, ENODEV); 591 583 592 584 hci_dev_lock_bh(hdev); 593 585 ··· 607 597 if (err < 0) 608 598 goto failed; 609 599 610 - err = cmd_complete(sk, MGMT_OP_ADD_UUID, &dev_id, sizeof(dev_id)); 600 + err = cmd_complete(sk, index, MGMT_OP_ADD_UUID, NULL, 0); 611 601 612 602 failed: 613 603 hci_dev_unlock_bh(hdev); ··· 616 606 return err; 617 607 } 618 608 619 - static int remove_uuid(struct sock *sk, unsigned char *data, u16 len) 609 + static int remove_uuid(struct sock *sk, u16 index, unsigned char *data, u16 len) 620 610 { 621 611 struct list_head *p, *n; 622 - struct mgmt_cp_add_uuid *cp; 612 + struct mgmt_cp_remove_uuid *cp; 623 613 struct hci_dev *hdev; 624 614 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; 625 - u16 dev_id; 626 615 int err, found; 627 616 628 617 cp = (void *) data; 629 - dev_id = get_unaligned_le16(&cp->index); 630 618 631 - BT_DBG("request for hci%u", dev_id); 619 + BT_DBG("request for hci%u", index); 632 620 633 - hdev = hci_dev_get(dev_id); 621 + if (len != sizeof(*cp)) 622 + return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, EINVAL); 623 + 624 + hdev = hci_dev_get(index); 634 625 if (!hdev) 635 - return cmd_status(sk, MGMT_OP_REMOVE_UUID, ENODEV); 626 + return cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENODEV); 636 627 637 628 hci_dev_lock_bh(hdev); 638 629 ··· 655 644 } 656 645 657 646 if (found == 0) { 658 - err = cmd_status(sk, MGMT_OP_REMOVE_UUID, ENOENT); 647 + err = cmd_status(sk, index, MGMT_OP_REMOVE_UUID, ENOENT); 659 648 goto unlock; 660 649 } 661 650 ··· 663 652 if (err < 0) 664 653 goto unlock; 665 654 666 - err = cmd_complete(sk, MGMT_OP_REMOVE_UUID, &dev_id, sizeof(dev_id)); 655 + err = cmd_complete(sk, index, MGMT_OP_REMOVE_UUID, NULL, 0); 667 656 668 657 unlock: 669 658 hci_dev_unlock_bh(hdev); ··· 672 661 return err; 673 662 } 674 663 675 - static int set_dev_class(struct sock *sk, unsigned char *data, u16 len) 664 + static int set_dev_class(struct sock *sk, u16 index, unsigned char *data, 665 + u16 len) 676 666 { 677 667 struct hci_dev *hdev; 678 668 struct mgmt_cp_set_dev_class *cp; 679 - u16 dev_id; 680 669 int err; 681 670 682 671 cp = (void *) data; 683 - dev_id = get_unaligned_le16(&cp->index); 684 672 685 - BT_DBG("request for hci%u", dev_id); 673 + BT_DBG("request for hci%u", index); 686 674 687 - hdev = hci_dev_get(dev_id); 675 + if (len != sizeof(*cp)) 676 + return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, EINVAL); 677 + 678 + hdev = hci_dev_get(index); 688 679 if (!hdev) 689 - return cmd_status(sk, MGMT_OP_SET_DEV_CLASS, ENODEV); 680 + return cmd_status(sk, index, MGMT_OP_SET_DEV_CLASS, ENODEV); 690 681 691 682 hci_dev_lock_bh(hdev); 692 683 ··· 698 685 err = update_class(hdev); 699 686 700 687 if (err == 0) 701 - err = cmd_complete(sk, MGMT_OP_SET_DEV_CLASS, &dev_id, 702 - sizeof(dev_id)); 688 + err = cmd_complete(sk, index, MGMT_OP_SET_DEV_CLASS, NULL, 0); 703 689 704 690 hci_dev_unlock_bh(hdev); 705 691 hci_dev_put(hdev); ··· 706 694 return err; 707 695 } 708 696 709 - static int set_service_cache(struct sock *sk, unsigned char *data, u16 len) 697 + static int set_service_cache(struct sock *sk, u16 index, unsigned char *data, 698 + u16 len) 710 699 { 711 700 struct hci_dev *hdev; 712 701 struct mgmt_cp_set_service_cache *cp; 713 - u16 dev_id; 714 702 int err; 715 703 716 704 cp = (void *) data; 717 - dev_id = get_unaligned_le16(&cp->index); 718 705 719 - hdev = hci_dev_get(dev_id); 706 + if (len != sizeof(*cp)) 707 + return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, EINVAL); 708 + 709 + hdev = hci_dev_get(index); 720 710 if (!hdev) 721 - return cmd_status(sk, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 711 + return cmd_status(sk, index, MGMT_OP_SET_SERVICE_CACHE, ENODEV); 722 712 723 713 hci_dev_lock_bh(hdev); 724 714 725 - BT_DBG("hci%u enable %d", dev_id, cp->enable); 715 + BT_DBG("hci%u enable %d", index, cp->enable); 726 716 727 717 if (cp->enable) { 728 718 set_bit(HCI_SERVICE_CACHE, &hdev->flags); ··· 735 721 } 736 722 737 723 if (err == 0) 738 - err = cmd_complete(sk, MGMT_OP_SET_SERVICE_CACHE, &dev_id, 739 - sizeof(dev_id)); 724 + err = cmd_complete(sk, index, MGMT_OP_SET_SERVICE_CACHE, NULL, 725 + 0); 740 726 741 727 hci_dev_unlock_bh(hdev); 742 728 hci_dev_put(hdev); ··· 744 730 return err; 745 731 } 746 732 747 - static int load_keys(struct sock *sk, unsigned char *data, u16 len) 733 + static int load_keys(struct sock *sk, u16 index, unsigned char *data, u16 len) 748 734 { 749 735 struct hci_dev *hdev; 750 736 struct mgmt_cp_load_keys *cp; 751 - u16 dev_id, key_count, expected_len; 737 + u16 key_count, expected_len; 752 738 int i; 753 739 754 740 cp = (void *) data; 755 - dev_id = get_unaligned_le16(&cp->index); 741 + 742 + if (len < sizeof(*cp)) 743 + return -EINVAL; 744 + 756 745 key_count = get_unaligned_le16(&cp->key_count); 757 746 758 747 expected_len = sizeof(*cp) + key_count * sizeof(struct mgmt_key_info); ··· 765 748 return -EINVAL; 766 749 } 767 750 768 - hdev = hci_dev_get(dev_id); 751 + hdev = hci_dev_get(index); 769 752 if (!hdev) 770 - return cmd_status(sk, MGMT_OP_LOAD_KEYS, ENODEV); 753 + return cmd_status(sk, index, MGMT_OP_LOAD_KEYS, ENODEV); 771 754 772 - BT_DBG("hci%u debug_keys %u key_count %u", dev_id, cp->debug_keys, 755 + BT_DBG("hci%u debug_keys %u key_count %u", index, cp->debug_keys, 773 756 key_count); 774 757 775 758 hci_dev_lock_bh(hdev); ··· 796 779 return 0; 797 780 } 798 781 799 - static int remove_key(struct sock *sk, unsigned char *data, u16 len) 782 + static int remove_key(struct sock *sk, u16 index, unsigned char *data, u16 len) 800 783 { 801 784 struct hci_dev *hdev; 802 785 struct mgmt_cp_remove_key *cp; 803 786 struct hci_conn *conn; 804 - u16 dev_id; 805 787 int err; 806 788 807 789 cp = (void *) data; 808 - dev_id = get_unaligned_le16(&cp->index); 809 790 810 - hdev = hci_dev_get(dev_id); 791 + if (len != sizeof(*cp)) 792 + return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, EINVAL); 793 + 794 + hdev = hci_dev_get(index); 811 795 if (!hdev) 812 - return cmd_status(sk, MGMT_OP_REMOVE_KEY, ENODEV); 796 + return cmd_status(sk, index, MGMT_OP_REMOVE_KEY, ENODEV); 813 797 814 798 hci_dev_lock_bh(hdev); 815 799 816 800 err = hci_remove_link_key(hdev, &cp->bdaddr); 817 801 if (err < 0) { 818 - err = cmd_status(sk, MGMT_OP_REMOVE_KEY, -err); 802 + err = cmd_status(sk, index, MGMT_OP_REMOVE_KEY, -err); 819 803 goto unlock; 820 804 } 821 805 ··· 841 823 return err; 842 824 } 843 825 844 - static int disconnect(struct sock *sk, unsigned char *data, u16 len) 826 + static int disconnect(struct sock *sk, u16 index, unsigned char *data, u16 len) 845 827 { 846 828 struct hci_dev *hdev; 847 829 struct mgmt_cp_disconnect *cp; 848 830 struct hci_cp_disconnect dc; 831 + struct pending_cmd *cmd; 849 832 struct hci_conn *conn; 850 - u16 dev_id; 851 833 int err; 852 834 853 835 BT_DBG(""); 854 836 855 837 cp = (void *) data; 856 - dev_id = get_unaligned_le16(&cp->index); 857 838 858 - hdev = hci_dev_get(dev_id); 839 + if (len != sizeof(*cp)) 840 + return cmd_status(sk, index, MGMT_OP_DISCONNECT, EINVAL); 841 + 842 + hdev = hci_dev_get(index); 859 843 if (!hdev) 860 - return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV); 844 + return cmd_status(sk, index, MGMT_OP_DISCONNECT, ENODEV); 861 845 862 846 hci_dev_lock_bh(hdev); 863 847 864 848 if (!test_bit(HCI_UP, &hdev->flags)) { 865 - err = cmd_status(sk, MGMT_OP_DISCONNECT, ENETDOWN); 849 + err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENETDOWN); 866 850 goto failed; 867 851 } 868 852 869 - if (mgmt_pending_find(MGMT_OP_DISCONNECT, dev_id)) { 870 - err = cmd_status(sk, MGMT_OP_DISCONNECT, EBUSY); 853 + if (mgmt_pending_find(MGMT_OP_DISCONNECT, index)) { 854 + err = cmd_status(sk, index, MGMT_OP_DISCONNECT, EBUSY); 871 855 goto failed; 872 856 } 873 857 874 858 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); 875 859 if (!conn) { 876 - err = cmd_status(sk, MGMT_OP_DISCONNECT, ENOTCONN); 860 + err = cmd_status(sk, index, MGMT_OP_DISCONNECT, ENOTCONN); 877 861 goto failed; 878 862 } 879 863 880 - err = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, dev_id, data, len); 881 - if (err < 0) 864 + cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, index, data, len); 865 + if (!cmd) { 866 + err = -ENOMEM; 882 867 goto failed; 868 + } 883 869 884 870 put_unaligned_le16(conn->handle, &dc.handle); 885 871 dc.reason = 0x13; /* Remote User Terminated Connection */ 886 872 887 873 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc); 888 874 if (err < 0) 889 - mgmt_pending_remove(MGMT_OP_DISCONNECT, dev_id); 875 + mgmt_pending_remove(cmd); 890 876 891 877 failed: 892 878 hci_dev_unlock_bh(hdev); ··· 899 877 return err; 900 878 } 901 879 902 - static int get_connections(struct sock *sk, unsigned char *data, u16 len) 880 + static int get_connections(struct sock *sk, u16 index) 903 881 { 904 - struct mgmt_cp_get_connections *cp; 905 882 struct mgmt_rp_get_connections *rp; 906 883 struct hci_dev *hdev; 907 884 struct list_head *p; 908 885 size_t rp_len; 909 - u16 dev_id, count; 886 + u16 count; 910 887 int i, err; 911 888 912 889 BT_DBG(""); 913 890 914 - cp = (void *) data; 915 - dev_id = get_unaligned_le16(&cp->index); 916 - 917 - hdev = hci_dev_get(dev_id); 891 + hdev = hci_dev_get(index); 918 892 if (!hdev) 919 - return cmd_status(sk, MGMT_OP_GET_CONNECTIONS, ENODEV); 893 + return cmd_status(sk, index, MGMT_OP_GET_CONNECTIONS, ENODEV); 920 894 921 895 hci_dev_lock_bh(hdev); 922 896 ··· 928 910 goto unlock; 929 911 } 930 912 931 - put_unaligned_le16(dev_id, &rp->index); 932 913 put_unaligned_le16(count, &rp->conn_count); 933 914 934 915 read_lock(&hci_dev_list_lock); ··· 941 924 942 925 read_unlock(&hci_dev_list_lock); 943 926 944 - err = cmd_complete(sk, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 927 + err = cmd_complete(sk, index, MGMT_OP_GET_CONNECTIONS, rp, rp_len); 945 928 946 929 unlock: 947 930 kfree(rp); ··· 950 933 return err; 951 934 } 952 935 953 - static int pin_code_reply(struct sock *sk, unsigned char *data, u16 len) 936 + static int pin_code_reply(struct sock *sk, u16 index, unsigned char *data, 937 + u16 len) 954 938 { 955 939 struct hci_dev *hdev; 956 940 struct mgmt_cp_pin_code_reply *cp; 957 941 struct hci_cp_pin_code_reply reply; 958 - u16 dev_id; 942 + struct pending_cmd *cmd; 959 943 int err; 960 944 961 945 BT_DBG(""); 962 946 963 947 cp = (void *) data; 964 - dev_id = get_unaligned_le16(&cp->index); 965 948 966 - hdev = hci_dev_get(dev_id); 949 + if (len != sizeof(*cp)) 950 + return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, EINVAL); 951 + 952 + hdev = hci_dev_get(index); 967 953 if (!hdev) 968 - return cmd_status(sk, MGMT_OP_DISCONNECT, ENODEV); 954 + return cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENODEV); 969 955 970 956 hci_dev_lock_bh(hdev); 971 957 972 958 if (!test_bit(HCI_UP, &hdev->flags)) { 973 - err = cmd_status(sk, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 959 + err = cmd_status(sk, index, MGMT_OP_PIN_CODE_REPLY, ENETDOWN); 974 960 goto failed; 975 961 } 976 962 977 - err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, dev_id, data, len); 978 - if (err < 0) 963 + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, index, data, len); 964 + if (!cmd) { 965 + err = -ENOMEM; 979 966 goto failed; 967 + } 980 968 981 969 bacpy(&reply.bdaddr, &cp->bdaddr); 982 970 reply.pin_len = cp->pin_len; ··· 989 967 990 968 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); 991 969 if (err < 0) 992 - mgmt_pending_remove(MGMT_OP_PIN_CODE_REPLY, dev_id); 970 + mgmt_pending_remove(cmd); 993 971 994 972 failed: 995 973 hci_dev_unlock_bh(hdev); ··· 998 976 return err; 999 977 } 1000 978 1001 - static int pin_code_neg_reply(struct sock *sk, unsigned char *data, u16 len) 979 + static int pin_code_neg_reply(struct sock *sk, u16 index, unsigned char *data, 980 + u16 len) 1002 981 { 1003 982 struct hci_dev *hdev; 1004 983 struct mgmt_cp_pin_code_neg_reply *cp; 1005 - u16 dev_id; 984 + struct pending_cmd *cmd; 1006 985 int err; 1007 986 1008 987 BT_DBG(""); 1009 988 1010 989 cp = (void *) data; 1011 - dev_id = get_unaligned_le16(&cp->index); 1012 990 1013 - hdev = hci_dev_get(dev_id); 991 + if (len != sizeof(*cp)) 992 + return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 993 + EINVAL); 994 + 995 + hdev = hci_dev_get(index); 1014 996 if (!hdev) 1015 - return cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENODEV); 997 + return cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 998 + ENODEV); 1016 999 1017 1000 hci_dev_lock_bh(hdev); 1018 1001 1019 1002 if (!test_bit(HCI_UP, &hdev->flags)) { 1020 - err = cmd_status(sk, MGMT_OP_PIN_CODE_NEG_REPLY, ENETDOWN); 1003 + err = cmd_status(sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, 1004 + ENETDOWN); 1021 1005 goto failed; 1022 1006 } 1023 1007 1024 - err = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, dev_id, 1008 + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, index, 1025 1009 data, len); 1026 - if (err < 0) 1010 + if (!cmd) { 1011 + err = -ENOMEM; 1027 1012 goto failed; 1013 + } 1028 1014 1029 - err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(bdaddr_t), 1015 + err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, sizeof(cp->bdaddr), 1030 1016 &cp->bdaddr); 1031 1017 if (err < 0) 1032 - mgmt_pending_remove(MGMT_OP_PIN_CODE_NEG_REPLY, dev_id); 1018 + mgmt_pending_remove(cmd); 1033 1019 1034 1020 failed: 1035 1021 hci_dev_unlock_bh(hdev); ··· 1046 1016 return err; 1047 1017 } 1048 1018 1049 - static int set_io_capability(struct sock *sk, unsigned char *data, u16 len) 1019 + static int set_io_capability(struct sock *sk, u16 index, unsigned char *data, 1020 + u16 len) 1050 1021 { 1051 1022 struct hci_dev *hdev; 1052 1023 struct mgmt_cp_set_io_capability *cp; 1053 - u16 dev_id; 1054 1024 1055 1025 BT_DBG(""); 1056 1026 1057 1027 cp = (void *) data; 1058 - dev_id = get_unaligned_le16(&cp->index); 1059 1028 1060 - hdev = hci_dev_get(dev_id); 1029 + if (len != sizeof(*cp)) 1030 + return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, EINVAL); 1031 + 1032 + hdev = hci_dev_get(index); 1061 1033 if (!hdev) 1062 - return cmd_status(sk, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1034 + return cmd_status(sk, index, MGMT_OP_SET_IO_CAPABILITY, ENODEV); 1063 1035 1064 1036 hci_dev_lock_bh(hdev); 1065 1037 1066 1038 hdev->io_capability = cp->io_capability; 1067 1039 1068 1040 BT_DBG("%s IO capability set to 0x%02x", hdev->name, 1069 - hdev->io_capability); 1041 + hdev->io_capability); 1070 1042 1071 1043 hci_dev_unlock_bh(hdev); 1072 1044 hci_dev_put(hdev); 1073 1045 1074 - return cmd_complete(sk, MGMT_OP_SET_IO_CAPABILITY, 1075 - &dev_id, sizeof(dev_id)); 1046 + return cmd_complete(sk, index, MGMT_OP_SET_IO_CAPABILITY, NULL, 0); 1047 + } 1048 + 1049 + static inline struct pending_cmd *find_pairing(struct hci_conn *conn) 1050 + { 1051 + struct hci_dev *hdev = conn->hdev; 1052 + struct list_head *p; 1053 + 1054 + list_for_each(p, &cmd_list) { 1055 + struct pending_cmd *cmd; 1056 + 1057 + cmd = list_entry(p, struct pending_cmd, list); 1058 + 1059 + if (cmd->opcode != MGMT_OP_PAIR_DEVICE) 1060 + continue; 1061 + 1062 + if (cmd->index != hdev->id) 1063 + continue; 1064 + 1065 + if (cmd->user_data != conn) 1066 + continue; 1067 + 1068 + return cmd; 1069 + } 1070 + 1071 + return NULL; 1072 + } 1073 + 1074 + static void pairing_complete(struct pending_cmd *cmd, u8 status) 1075 + { 1076 + struct mgmt_rp_pair_device rp; 1077 + struct hci_conn *conn = cmd->user_data; 1078 + 1079 + bacpy(&rp.bdaddr, &conn->dst); 1080 + rp.status = status; 1081 + 1082 + cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, &rp, sizeof(rp)); 1083 + 1084 + /* So we don't get further callbacks for this connection */ 1085 + conn->connect_cfm_cb = NULL; 1086 + conn->security_cfm_cb = NULL; 1087 + conn->disconn_cfm_cb = NULL; 1088 + 1089 + hci_conn_put(conn); 1090 + 1091 + mgmt_pending_remove(cmd); 1092 + } 1093 + 1094 + static void pairing_complete_cb(struct hci_conn *conn, u8 status) 1095 + { 1096 + struct pending_cmd *cmd; 1097 + 1098 + BT_DBG("status %u", status); 1099 + 1100 + cmd = find_pairing(conn); 1101 + if (!cmd) { 1102 + BT_DBG("Unable to find a pending command"); 1103 + return; 1104 + } 1105 + 1106 + pairing_complete(cmd, status); 1107 + } 1108 + 1109 + static int pair_device(struct sock *sk, u16 index, unsigned char *data, u16 len) 1110 + { 1111 + struct hci_dev *hdev; 1112 + struct mgmt_cp_pair_device *cp; 1113 + struct pending_cmd *cmd; 1114 + u8 sec_level, auth_type; 1115 + struct hci_conn *conn; 1116 + int err; 1117 + 1118 + BT_DBG(""); 1119 + 1120 + cp = (void *) data; 1121 + 1122 + if (len != sizeof(*cp)) 1123 + return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EINVAL); 1124 + 1125 + hdev = hci_dev_get(index); 1126 + if (!hdev) 1127 + return cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, ENODEV); 1128 + 1129 + hci_dev_lock_bh(hdev); 1130 + 1131 + if (cp->io_cap == 0x03) { 1132 + sec_level = BT_SECURITY_MEDIUM; 1133 + auth_type = HCI_AT_DEDICATED_BONDING; 1134 + } else { 1135 + sec_level = BT_SECURITY_HIGH; 1136 + auth_type = HCI_AT_DEDICATED_BONDING_MITM; 1137 + } 1138 + 1139 + conn = hci_connect(hdev, ACL_LINK, &cp->bdaddr, sec_level, auth_type); 1140 + if (IS_ERR(conn)) { 1141 + err = PTR_ERR(conn); 1142 + goto unlock; 1143 + } 1144 + 1145 + if (conn->connect_cfm_cb) { 1146 + hci_conn_put(conn); 1147 + err = cmd_status(sk, index, MGMT_OP_PAIR_DEVICE, EBUSY); 1148 + goto unlock; 1149 + } 1150 + 1151 + cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, index, data, len); 1152 + if (!cmd) { 1153 + err = -ENOMEM; 1154 + hci_conn_put(conn); 1155 + goto unlock; 1156 + } 1157 + 1158 + conn->connect_cfm_cb = pairing_complete_cb; 1159 + conn->security_cfm_cb = pairing_complete_cb; 1160 + conn->disconn_cfm_cb = pairing_complete_cb; 1161 + conn->io_capability = cp->io_cap; 1162 + cmd->user_data = conn; 1163 + 1164 + if (conn->state == BT_CONNECTED && 1165 + hci_conn_security(conn, sec_level, auth_type)) 1166 + pairing_complete(cmd, 0); 1167 + 1168 + err = 0; 1169 + 1170 + unlock: 1171 + hci_dev_unlock_bh(hdev); 1172 + hci_dev_put(hdev); 1173 + 1174 + return err; 1175 + } 1176 + 1177 + static int user_confirm_reply(struct sock *sk, u16 index, unsigned char *data, 1178 + u16 len, int success) 1179 + { 1180 + struct mgmt_cp_user_confirm_reply *cp = (void *) data; 1181 + u16 mgmt_op, hci_op; 1182 + struct pending_cmd *cmd; 1183 + struct hci_dev *hdev; 1184 + int err; 1185 + 1186 + BT_DBG(""); 1187 + 1188 + if (success) { 1189 + mgmt_op = MGMT_OP_USER_CONFIRM_REPLY; 1190 + hci_op = HCI_OP_USER_CONFIRM_REPLY; 1191 + } else { 1192 + mgmt_op = MGMT_OP_USER_CONFIRM_NEG_REPLY; 1193 + hci_op = HCI_OP_USER_CONFIRM_NEG_REPLY; 1194 + } 1195 + 1196 + if (len != sizeof(*cp)) 1197 + return cmd_status(sk, index, mgmt_op, EINVAL); 1198 + 1199 + hdev = hci_dev_get(index); 1200 + if (!hdev) 1201 + return cmd_status(sk, index, mgmt_op, ENODEV); 1202 + 1203 + if (!test_bit(HCI_UP, &hdev->flags)) { 1204 + err = cmd_status(sk, index, mgmt_op, ENETDOWN); 1205 + goto failed; 1206 + } 1207 + 1208 + cmd = mgmt_pending_add(sk, mgmt_op, index, data, len); 1209 + if (!cmd) { 1210 + err = -ENOMEM; 1211 + goto failed; 1212 + } 1213 + 1214 + err = hci_send_cmd(hdev, hci_op, sizeof(cp->bdaddr), &cp->bdaddr); 1215 + if (err < 0) 1216 + mgmt_pending_remove(cmd); 1217 + 1218 + failed: 1219 + hci_dev_unlock_bh(hdev); 1220 + hci_dev_put(hdev); 1221 + 1222 + return err; 1076 1223 } 1077 1224 1078 1225 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) 1079 1226 { 1080 1227 unsigned char *buf; 1081 1228 struct mgmt_hdr *hdr; 1082 - u16 opcode, len; 1229 + u16 opcode, index, len; 1083 1230 int err; 1084 1231 1085 1232 BT_DBG("got %zu bytes", msglen); ··· 1275 1068 1276 1069 hdr = (struct mgmt_hdr *) buf; 1277 1070 opcode = get_unaligned_le16(&hdr->opcode); 1071 + index = get_unaligned_le16(&hdr->index); 1278 1072 len = get_unaligned_le16(&hdr->len); 1279 1073 1280 1074 if (len != msglen - sizeof(*hdr)) { ··· 1291 1083 err = read_index_list(sk); 1292 1084 break; 1293 1085 case MGMT_OP_READ_INFO: 1294 - err = read_controller_info(sk, buf + sizeof(*hdr), len); 1086 + err = read_controller_info(sk, index); 1295 1087 break; 1296 1088 case MGMT_OP_SET_POWERED: 1297 - err = set_powered(sk, buf + sizeof(*hdr), len); 1089 + err = set_powered(sk, index, buf + sizeof(*hdr), len); 1298 1090 break; 1299 1091 case MGMT_OP_SET_DISCOVERABLE: 1300 - err = set_discoverable(sk, buf + sizeof(*hdr), len); 1092 + err = set_discoverable(sk, index, buf + sizeof(*hdr), len); 1301 1093 break; 1302 1094 case MGMT_OP_SET_CONNECTABLE: 1303 - err = set_connectable(sk, buf + sizeof(*hdr), len); 1095 + err = set_connectable(sk, index, buf + sizeof(*hdr), len); 1304 1096 break; 1305 1097 case MGMT_OP_SET_PAIRABLE: 1306 - err = set_pairable(sk, buf + sizeof(*hdr), len); 1098 + err = set_pairable(sk, index, buf + sizeof(*hdr), len); 1307 1099 break; 1308 1100 case MGMT_OP_ADD_UUID: 1309 - err = add_uuid(sk, buf + sizeof(*hdr), len); 1101 + err = add_uuid(sk, index, buf + sizeof(*hdr), len); 1310 1102 break; 1311 1103 case MGMT_OP_REMOVE_UUID: 1312 - err = remove_uuid(sk, buf + sizeof(*hdr), len); 1104 + err = remove_uuid(sk, index, buf + sizeof(*hdr), len); 1313 1105 break; 1314 1106 case MGMT_OP_SET_DEV_CLASS: 1315 - err = set_dev_class(sk, buf + sizeof(*hdr), len); 1107 + err = set_dev_class(sk, index, buf + sizeof(*hdr), len); 1316 1108 break; 1317 1109 case MGMT_OP_SET_SERVICE_CACHE: 1318 - err = set_service_cache(sk, buf + sizeof(*hdr), len); 1110 + err = set_service_cache(sk, index, buf + sizeof(*hdr), len); 1319 1111 break; 1320 1112 case MGMT_OP_LOAD_KEYS: 1321 - err = load_keys(sk, buf + sizeof(*hdr), len); 1113 + err = load_keys(sk, index, buf + sizeof(*hdr), len); 1322 1114 break; 1323 1115 case MGMT_OP_REMOVE_KEY: 1324 - err = remove_key(sk, buf + sizeof(*hdr), len); 1116 + err = remove_key(sk, index, buf + sizeof(*hdr), len); 1325 1117 break; 1326 1118 case MGMT_OP_DISCONNECT: 1327 - err = disconnect(sk, buf + sizeof(*hdr), len); 1119 + err = disconnect(sk, index, buf + sizeof(*hdr), len); 1328 1120 break; 1329 1121 case MGMT_OP_GET_CONNECTIONS: 1330 - err = get_connections(sk, buf + sizeof(*hdr), len); 1122 + err = get_connections(sk, index); 1331 1123 break; 1332 1124 case MGMT_OP_PIN_CODE_REPLY: 1333 - err = pin_code_reply(sk, buf + sizeof(*hdr), len); 1125 + err = pin_code_reply(sk, index, buf + sizeof(*hdr), len); 1334 1126 break; 1335 1127 case MGMT_OP_PIN_CODE_NEG_REPLY: 1336 - err = pin_code_neg_reply(sk, buf + sizeof(*hdr), len); 1128 + err = pin_code_neg_reply(sk, index, buf + sizeof(*hdr), len); 1337 1129 break; 1338 1130 case MGMT_OP_SET_IO_CAPABILITY: 1339 - err = set_io_capability(sk, buf + sizeof(*hdr), len); 1131 + err = set_io_capability(sk, index, buf + sizeof(*hdr), len); 1132 + break; 1133 + case MGMT_OP_PAIR_DEVICE: 1134 + err = pair_device(sk, index, buf + sizeof(*hdr), len); 1135 + break; 1136 + case MGMT_OP_USER_CONFIRM_REPLY: 1137 + err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 1); 1138 + break; 1139 + case MGMT_OP_USER_CONFIRM_NEG_REPLY: 1140 + err = user_confirm_reply(sk, index, buf + sizeof(*hdr), len, 0); 1340 1141 break; 1341 1142 default: 1342 1143 BT_DBG("Unknown op %u", opcode); 1343 - err = cmd_status(sk, opcode, 0x01); 1144 + err = cmd_status(sk, index, opcode, 0x01); 1344 1145 break; 1345 1146 } 1346 1147 ··· 1365 1148 1366 1149 int mgmt_index_added(u16 index) 1367 1150 { 1368 - struct mgmt_ev_index_added ev; 1369 - 1370 - put_unaligned_le16(index, &ev.index); 1371 - 1372 - return mgmt_event(MGMT_EV_INDEX_ADDED, &ev, sizeof(ev), NULL); 1151 + return mgmt_event(MGMT_EV_INDEX_ADDED, index, NULL, 0, NULL); 1373 1152 } 1374 1153 1375 1154 int mgmt_index_removed(u16 index) 1376 1155 { 1377 - struct mgmt_ev_index_added ev; 1378 - 1379 - put_unaligned_le16(index, &ev.index); 1380 - 1381 - return mgmt_event(MGMT_EV_INDEX_REMOVED, &ev, sizeof(ev), NULL); 1156 + return mgmt_event(MGMT_EV_INDEX_REMOVED, index, NULL, 0, NULL); 1382 1157 } 1383 1158 1384 1159 struct cmd_lookup { ··· 1406 1197 1407 1198 mgmt_pending_foreach(MGMT_OP_SET_POWERED, index, mode_rsp, &match); 1408 1199 1409 - put_unaligned_le16(index, &ev.index); 1410 1200 ev.val = powered; 1411 1201 1412 - ret = mgmt_event(MGMT_EV_POWERED, &ev, sizeof(ev), match.sk); 1202 + ret = mgmt_event(MGMT_EV_POWERED, index, &ev, sizeof(ev), match.sk); 1413 1203 1414 1204 if (match.sk) 1415 1205 sock_put(match.sk); ··· 1422 1214 struct cmd_lookup match = { discoverable, NULL }; 1423 1215 int ret; 1424 1216 1425 - mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, 1426 - mode_rsp, &match); 1217 + mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, index, mode_rsp, &match); 1427 1218 1428 - put_unaligned_le16(index, &ev.index); 1429 1219 ev.val = discoverable; 1430 1220 1431 - ret = mgmt_event(MGMT_EV_DISCOVERABLE, &ev, sizeof(ev), match.sk); 1221 + ret = mgmt_event(MGMT_EV_DISCOVERABLE, index, &ev, sizeof(ev), 1222 + match.sk); 1432 1223 1433 1224 if (match.sk) 1434 1225 sock_put(match.sk); ··· 1443 1236 1444 1237 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, index, mode_rsp, &match); 1445 1238 1446 - put_unaligned_le16(index, &ev.index); 1447 1239 ev.val = connectable; 1448 1240 1449 - ret = mgmt_event(MGMT_EV_CONNECTABLE, &ev, sizeof(ev), match.sk); 1241 + ret = mgmt_event(MGMT_EV_CONNECTABLE, index, &ev, sizeof(ev), match.sk); 1450 1242 1451 1243 if (match.sk) 1452 1244 sock_put(match.sk); ··· 1459 1253 1460 1254 memset(&ev, 0, sizeof(ev)); 1461 1255 1462 - put_unaligned_le16(index, &ev.index); 1463 - 1464 1256 bacpy(&ev.key.bdaddr, &key->bdaddr); 1465 1257 ev.key.type = key->type; 1466 1258 memcpy(ev.key.val, key->val, 16); 1467 1259 ev.key.pin_len = key->pin_len; 1468 1260 ev.old_key_type = old_key_type; 1469 1261 1470 - return mgmt_event(MGMT_EV_NEW_KEY, &ev, sizeof(ev), NULL); 1262 + return mgmt_event(MGMT_EV_NEW_KEY, index, &ev, sizeof(ev), NULL); 1471 1263 } 1472 1264 1473 1265 int mgmt_connected(u16 index, bdaddr_t *bdaddr) 1474 1266 { 1475 1267 struct mgmt_ev_connected ev; 1476 1268 1477 - put_unaligned_le16(index, &ev.index); 1478 1269 bacpy(&ev.bdaddr, bdaddr); 1479 1270 1480 - return mgmt_event(MGMT_EV_CONNECTED, &ev, sizeof(ev), NULL); 1271 + return mgmt_event(MGMT_EV_CONNECTED, index, &ev, sizeof(ev), NULL); 1481 1272 } 1482 1273 1483 1274 static void disconnect_rsp(struct pending_cmd *cmd, void *data) ··· 1483 1280 struct sock **sk = data; 1484 1281 struct mgmt_rp_disconnect rp; 1485 1282 1486 - put_unaligned_le16(cmd->index, &rp.index); 1487 1283 bacpy(&rp.bdaddr, &cp->bdaddr); 1488 1284 1489 - cmd_complete(cmd->sk, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); 1285 + cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, &rp, sizeof(rp)); 1490 1286 1491 1287 *sk = cmd->sk; 1492 1288 sock_hold(*sk); 1493 1289 1494 - list_del(&cmd->list); 1495 - mgmt_pending_free(cmd); 1290 + mgmt_pending_remove(cmd); 1496 1291 } 1497 1292 1498 1293 int mgmt_disconnected(u16 index, bdaddr_t *bdaddr) ··· 1501 1300 1502 1301 mgmt_pending_foreach(MGMT_OP_DISCONNECT, index, disconnect_rsp, &sk); 1503 1302 1504 - put_unaligned_le16(index, &ev.index); 1505 1303 bacpy(&ev.bdaddr, bdaddr); 1506 1304 1507 - err = mgmt_event(MGMT_EV_DISCONNECTED, &ev, sizeof(ev), sk); 1305 + err = mgmt_event(MGMT_EV_DISCONNECTED, index, &ev, sizeof(ev), sk); 1508 1306 1509 1307 if (sk) 1510 1308 sock_put(sk); ··· 1520 1320 if (!cmd) 1521 1321 return -ENOENT; 1522 1322 1523 - err = cmd_status(cmd->sk, MGMT_OP_DISCONNECT, EIO); 1323 + err = cmd_status(cmd->sk, index, MGMT_OP_DISCONNECT, EIO); 1524 1324 1525 - list_del(&cmd->list); 1526 - mgmt_pending_free(cmd); 1325 + mgmt_pending_remove(cmd); 1527 1326 1528 1327 return err; 1529 1328 } ··· 1531 1332 { 1532 1333 struct mgmt_ev_connect_failed ev; 1533 1334 1534 - put_unaligned_le16(index, &ev.index); 1535 1335 bacpy(&ev.bdaddr, bdaddr); 1536 1336 ev.status = status; 1537 1337 1538 - return mgmt_event(MGMT_EV_CONNECT_FAILED, &ev, sizeof(ev), NULL); 1338 + return mgmt_event(MGMT_EV_CONNECT_FAILED, index, &ev, sizeof(ev), NULL); 1539 1339 } 1540 1340 1541 1341 int mgmt_pin_code_request(u16 index, bdaddr_t *bdaddr) 1542 1342 { 1543 1343 struct mgmt_ev_pin_code_request ev; 1544 1344 1545 - put_unaligned_le16(index, &ev.index); 1546 1345 bacpy(&ev.bdaddr, bdaddr); 1547 1346 1548 - return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, &ev, sizeof(ev), NULL); 1347 + return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, index, &ev, sizeof(ev), 1348 + NULL); 1549 1349 } 1550 1350 1551 1351 int mgmt_pin_code_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 1552 1352 { 1553 1353 struct pending_cmd *cmd; 1354 + struct mgmt_rp_pin_code_reply rp; 1554 1355 int err; 1555 1356 1556 1357 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, index); 1557 1358 if (!cmd) 1558 1359 return -ENOENT; 1559 1360 1560 - if (status != 0) 1561 - err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_REPLY, status); 1562 - else 1563 - err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_REPLY, 1564 - bdaddr, sizeof(*bdaddr)); 1361 + bacpy(&rp.bdaddr, bdaddr); 1362 + rp.status = status; 1565 1363 1566 - list_del(&cmd->list); 1567 - mgmt_pending_free(cmd); 1364 + err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_REPLY, &rp, 1365 + sizeof(rp)); 1366 + 1367 + mgmt_pending_remove(cmd); 1568 1368 1569 1369 return err; 1570 1370 } ··· 1571 1373 int mgmt_pin_code_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 1572 1374 { 1573 1375 struct pending_cmd *cmd; 1376 + struct mgmt_rp_pin_code_reply rp; 1574 1377 int err; 1575 1378 1576 1379 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, index); 1577 1380 if (!cmd) 1578 1381 return -ENOENT; 1579 1382 1580 - if (status != 0) 1581 - err = cmd_status(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY, status); 1582 - else 1583 - err = cmd_complete(cmd->sk, MGMT_OP_PIN_CODE_NEG_REPLY, 1584 - bdaddr, sizeof(*bdaddr)); 1383 + bacpy(&rp.bdaddr, bdaddr); 1384 + rp.status = status; 1585 1385 1586 - list_del(&cmd->list); 1587 - mgmt_pending_free(cmd); 1386 + err = cmd_complete(cmd->sk, index, MGMT_OP_PIN_CODE_NEG_REPLY, &rp, 1387 + sizeof(rp)); 1388 + 1389 + mgmt_pending_remove(cmd); 1588 1390 1589 1391 return err; 1392 + } 1393 + 1394 + int mgmt_user_confirm_request(u16 index, bdaddr_t *bdaddr, __le32 value) 1395 + { 1396 + struct mgmt_ev_user_confirm_request ev; 1397 + 1398 + BT_DBG("hci%u", index); 1399 + 1400 + bacpy(&ev.bdaddr, bdaddr); 1401 + put_unaligned_le32(value, &ev.value); 1402 + 1403 + return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, index, &ev, sizeof(ev), 1404 + NULL); 1405 + } 1406 + 1407 + static int confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status, 1408 + u8 opcode) 1409 + { 1410 + struct pending_cmd *cmd; 1411 + struct mgmt_rp_user_confirm_reply rp; 1412 + int err; 1413 + 1414 + cmd = mgmt_pending_find(opcode, index); 1415 + if (!cmd) 1416 + return -ENOENT; 1417 + 1418 + bacpy(&rp.bdaddr, bdaddr); 1419 + rp.status = status; 1420 + err = cmd_complete(cmd->sk, index, opcode, &rp, sizeof(rp)); 1421 + 1422 + mgmt_pending_remove(cmd); 1423 + 1424 + return err; 1425 + } 1426 + 1427 + int mgmt_user_confirm_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 1428 + { 1429 + return confirm_reply_complete(index, bdaddr, status, 1430 + MGMT_OP_USER_CONFIRM_REPLY); 1431 + } 1432 + 1433 + int mgmt_user_confirm_neg_reply_complete(u16 index, bdaddr_t *bdaddr, u8 status) 1434 + { 1435 + return confirm_reply_complete(index, bdaddr, status, 1436 + MGMT_OP_USER_CONFIRM_NEG_REPLY); 1437 + } 1438 + 1439 + int mgmt_auth_failed(u16 index, bdaddr_t *bdaddr, u8 status) 1440 + { 1441 + struct mgmt_ev_auth_failed ev; 1442 + 1443 + bacpy(&ev.bdaddr, bdaddr); 1444 + ev.status = status; 1445 + 1446 + return mgmt_event(MGMT_EV_AUTH_FAILED, index, &ev, sizeof(ev), NULL); 1590 1447 }
+4 -3
net/bluetooth/sco.c
··· 190 190 191 191 hci_dev_lock_bh(hdev); 192 192 193 - err = -ENOMEM; 194 - 195 193 if (lmp_esco_capable(hdev) && !disable_esco) 196 194 type = ESCO_LINK; 197 195 else 198 196 type = SCO_LINK; 199 197 200 198 hcon = hci_connect(hdev, type, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING); 201 - if (!hcon) 199 + if (IS_ERR(hcon)) { 200 + err = PTR_ERR(hcon); 202 201 goto done; 202 + } 203 203 204 204 conn = sco_conn_add(hcon, 0); 205 205 if (!conn) { 206 206 hci_conn_put(hcon); 207 + err = -ENOMEM; 207 208 goto done; 208 209 } 209 210
-1
net/mac80211/key.h
··· 21 21 22 22 #define WEP_IV_LEN 4 23 23 #define WEP_ICV_LEN 4 24 - #define ALG_TKIP_KEY_LEN 32 25 24 #define ALG_CCMP_KEY_LEN 16 26 25 #define CCMP_HDR_LEN 8 27 26 #define CCMP_MIC_LEN 8
+3
net/mac80211/main.c
··· 380 380 381 381 trace_api_restart_hw(local); 382 382 383 + wiphy_info(hw->wiphy, 384 + "Hardware restart was requested\n"); 385 + 383 386 /* use this reason, ieee80211_reconfig will unblock it */ 384 387 ieee80211_stop_queues_by_reason(hw, 385 388 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
+6 -13
net/mac80211/rc80211_minstrel_ht.c
··· 415 415 mi->sample_count--; 416 416 } 417 417 418 - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 418 + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) 419 419 mi->sample_packets += info->status.ampdu_len; 420 - minstrel_next_sample_idx(mi); 421 - } 422 420 423 421 for (i = 0; !last; i++) { 424 422 last = (i == IEEE80211_TX_MAX_RATES - 1) || ··· 517 519 rate->count = mr->retry_count; 518 520 519 521 rate->flags = IEEE80211_TX_RC_MCS | group->flags; 520 - if (txrc->short_preamble) 521 - rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; 522 - if (txrc->rts || rtscts) 522 + if (rtscts) 523 523 rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; 524 524 rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; 525 525 } ··· 549 553 sample_idx = sample_table[mg->column][mg->index]; 550 554 mr = &mg->rates[sample_idx]; 551 555 sample_idx += mi->sample_group * MCS_GROUP_RATES; 556 + minstrel_next_sample_idx(mi); 552 557 553 558 /* 554 559 * When not using MRR, do not sample if the probability is already 555 560 * higher than 95% to avoid wasting airtime 556 561 */ 557 562 if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) 558 - goto next; 563 + return -1; 559 564 560 565 /* 561 566 * Make sure that lower rates get sampled only occasionally, ··· 565 568 if (minstrel_get_duration(sample_idx) > 566 569 minstrel_get_duration(mi->max_tp_rate)) { 567 570 if (mr->sample_skipped < 20) 568 - goto next; 571 + return -1; 569 572 570 573 if (mi->sample_slow++ > 2) 571 - goto next; 574 + return -1; 572 575 } 573 576 574 577 return sample_idx; 575 - 576 - next: 577 - minstrel_next_sample_idx(mi); 578 - return -1; 579 578 } 580 579 581 580 static void
-3
net/mac80211/rc80211_pid.h
··· 24 24 /* Fixed point arithmetic shifting amount. */ 25 25 #define RC_PID_ARITH_SHIFT 8 26 26 27 - /* Fixed point arithmetic factor. */ 28 - #define RC_PID_ARITH_FACTOR (1 << RC_PID_ARITH_SHIFT) 29 - 30 27 /* Proportional PID component coefficient. */ 31 28 #define RC_PID_COEFF_P 15 32 29 /* Integral PID component coefficient. */
+24 -40
net/mac80211/scan.c
··· 258 258 return true; 259 259 } 260 260 261 - static bool __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, 261 + static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, 262 262 bool was_hw_scan) 263 263 { 264 264 struct ieee80211_local *local = hw_to_local(hw); 265 + bool on_oper_chan; 266 + bool enable_beacons = false; 265 267 266 268 lockdep_assert_held(&local->mtx); 267 269 ··· 277 275 aborted = true; 278 276 279 277 if (WARN_ON(!local->scan_req)) 280 - return false; 278 + return; 281 279 282 280 if (was_hw_scan && !aborted && ieee80211_prep_hw_scan(local)) { 283 281 int rc = drv_hw_scan(local, local->scan_sdata, local->hw_scan_req); 284 282 if (rc == 0) 285 - return false; 283 + return; 286 284 } 287 285 288 286 kfree(local->hw_scan_req); ··· 296 294 local->scanning = 0; 297 295 local->scan_channel = NULL; 298 296 299 - return true; 300 - } 301 - 302 - static void __ieee80211_scan_completed_finish(struct ieee80211_hw *hw, 303 - bool was_hw_scan) 304 - { 305 - struct ieee80211_local *local = hw_to_local(hw); 306 - bool on_oper_chan; 307 - bool enable_beacons = false; 308 - 309 - mutex_lock(&local->mtx); 310 297 on_oper_chan = ieee80211_cfg_on_oper_channel(local); 311 298 312 - WARN_ON(local->scanning & (SCAN_SW_SCANNING | SCAN_HW_SCANNING)); 313 - 314 - if (was_hw_scan || !on_oper_chan) { 315 - if (WARN_ON(local->scan_channel)) 316 - local->scan_channel = NULL; 299 + if (was_hw_scan || !on_oper_chan) 317 300 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 318 - } else 301 + else 319 302 /* Set power back to normal operating levels. */ 320 303 ieee80211_hw_config(local, 0); 321 304 ··· 318 331 } 319 332 320 333 ieee80211_recalc_idle(local); 321 - mutex_unlock(&local->mtx); 322 334 323 335 ieee80211_mlme_notify_scan_completed(local); 324 336 ieee80211_ibss_notify_scan_completed(local); ··· 672 686 { 673 687 struct ieee80211_local *local = 674 688 container_of(work, struct ieee80211_local, scan_work.work); 675 - struct ieee80211_sub_if_data *sdata = local->scan_sdata; 689 + struct ieee80211_sub_if_data *sdata; 676 690 unsigned long next_delay = 0; 677 - bool aborted, hw_scan, finish; 691 + bool aborted, hw_scan; 678 692 679 693 mutex_lock(&local->mtx); 694 + 695 + sdata = local->scan_sdata; 680 696 681 697 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { 682 698 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); ··· 743 755 } while (next_delay == 0); 744 756 745 757 ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay); 746 - mutex_unlock(&local->mtx); 747 - return; 758 + goto out; 748 759 749 760 out_complete: 750 761 hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning); 751 - finish = __ieee80211_scan_completed(&local->hw, aborted, hw_scan); 752 - mutex_unlock(&local->mtx); 753 - if (finish) 754 - __ieee80211_scan_completed_finish(&local->hw, hw_scan); 755 - return; 756 - 762 + __ieee80211_scan_completed(&local->hw, aborted, hw_scan); 757 763 out: 758 764 mutex_unlock(&local->mtx); 759 765 } ··· 817 835 void ieee80211_scan_cancel(struct ieee80211_local *local) 818 836 { 819 837 bool abortscan; 820 - bool finish = false; 821 838 822 839 /* 823 840 * We are only canceling software scan, or deferred scan that was not ··· 836 855 837 856 mutex_lock(&local->mtx); 838 857 abortscan = local->scan_req && !test_bit(SCAN_HW_SCANNING, &local->scanning); 839 - if (abortscan) 840 - finish = __ieee80211_scan_completed(&local->hw, true, false); 841 - mutex_unlock(&local->mtx); 842 - 843 858 if (abortscan) { 844 - /* The scan is canceled, but stop work from being pending */ 845 - cancel_delayed_work_sync(&local->scan_work); 859 + /* 860 + * The scan is canceled, but stop work from being pending. 861 + * 862 + * If the work is currently running, it must be blocked on 863 + * the mutex, but we'll set scan_sdata = NULL and it'll 864 + * simply exit once it acquires the mutex. 865 + */ 866 + cancel_delayed_work(&local->scan_work); 867 + /* and clean up */ 868 + __ieee80211_scan_completed(&local->hw, true, false); 846 869 } 847 - if (finish) 848 - __ieee80211_scan_completed_finish(&local->hw, false); 870 + mutex_unlock(&local->mtx); 849 871 }
-1
net/mac80211/work.c
··· 30 30 #define IEEE80211_AUTH_MAX_TRIES 3 31 31 #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 32 32 #define IEEE80211_ASSOC_MAX_TRIES 3 33 - #define IEEE80211_MAX_PROBE_TRIES 5 34 33 35 34 enum work_action { 36 35 WORK_ACT_MISMATCH,
+28 -11
net/wireless/reg.c
··· 63 63 /* To trigger userspace events */ 64 64 static struct platform_device *reg_pdev; 65 65 66 + static struct device_type reg_device_type = { 67 + .uevent = reg_device_uevent, 68 + }; 69 + 66 70 /* 67 71 * Central wireless core regulatory domains, we only need two, 68 72 * the current one and a world regulatory domain in case we have no ··· 366 362 367 363 /* 368 364 * This lets us keep regulatory code which is updated on a regulatory 369 - * basis in userspace. 365 + * basis in userspace. Country information is filled in by 366 + * reg_device_uevent 370 367 */ 371 368 static int call_crda(const char *alpha2) 372 369 { 373 - char country_env[9 + 2] = "COUNTRY="; 374 - char *envp[] = { 375 - country_env, 376 - NULL 377 - }; 378 - 379 370 if (!is_world_regdom((char *) alpha2)) 380 371 pr_info("Calling CRDA for country: %c%c\n", 381 372 alpha2[0], alpha2[1]); ··· 380 381 /* query internal regulatory database (if it exists) */ 381 382 reg_regdb_query(alpha2); 382 383 383 - country_env[8] = alpha2[0]; 384 - country_env[9] = alpha2[1]; 385 - 386 - return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, envp); 384 + return kobject_uevent(&reg_pdev->dev.kobj, KOBJ_CHANGE); 387 385 } 388 386 389 387 /* Used by nl80211 before kmalloc'ing our regulatory domain */ ··· 2083 2087 return r; 2084 2088 } 2085 2089 2090 + #ifdef CONFIG_HOTPLUG 2091 + int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) 2092 + { 2093 + if (last_request && !last_request->processed) { 2094 + if (add_uevent_var(env, "COUNTRY=%c%c", 2095 + last_request->alpha2[0], 2096 + last_request->alpha2[1])) 2097 + return -ENOMEM; 2098 + } 2099 + 2100 + return 0; 2101 + } 2102 + #else 2103 + int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env) 2104 + { 2105 + return -ENODEV; 2106 + } 2107 + #endif /* CONFIG_HOTPLUG */ 2108 + 2086 2109 /* Caller must hold cfg80211_mutex */ 2087 2110 void reg_device_remove(struct wiphy *wiphy) 2088 2111 { ··· 2132 2117 reg_pdev = platform_device_register_simple("regulatory", 0, NULL, 0); 2133 2118 if (IS_ERR(reg_pdev)) 2134 2119 return PTR_ERR(reg_pdev); 2120 + 2121 + reg_pdev->dev.type = &reg_device_type; 2135 2122 2136 2123 spin_lock_init(&reg_requests_lock); 2137 2124 spin_lock_init(&reg_pending_beacons_lock);
+1
net/wireless/reg.h
··· 8 8 9 9 int regulatory_hint_user(const char *alpha2); 10 10 11 + int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env); 11 12 void reg_device_remove(struct wiphy *wiphy); 12 13 13 14 int __init regulatory_init(void);