Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'wireless-drivers-next-for-davem-2019-03-01' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 5.1

Last set of patches. A new hardware support for mt76 otherwise quite
normal.

Major changes:

mt76

* add driver for MT7603E/MT7628

ath10k

* more preparation for SDIO support

wil6210

* support up to 20 stations in AP mode
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+7120 -767
+19
Documentation/devicetree/bindings/net/wireless/mediatek,mt76.txt
··· 4 4 device. The node is expected to be specified as a child node of the PCI 5 5 controller to which the wireless chip is connected. 6 6 7 + Alternatively, it can specify the wireless part of the MT7628/MT7688 SoC. 8 + For SoC, use the compatible string "mediatek,mt7628-wmac" and the following 9 + properties: 10 + 11 + - reg: Address and length of the register set for the device. 12 + - interrupts: Main device interrupt 13 + 7 14 Optional properties: 8 15 9 16 - mac-address: See ethernet.txt in the parent directory ··· 36 29 }; 37 30 }; 38 31 }; 32 + }; 33 + 34 + MT7628 example: 35 + 36 + wmac: wmac@10300000 { 37 + compatible = "mediatek,mt7628-wmac"; 38 + reg = <0x10300000 0x100000>; 39 + 40 + interrupt-parent = <&cpuintc>; 41 + interrupts = <6>; 42 + 43 + mediatek,mtd-eeprom = <&factory 0x0000>; 39 44 };
+64 -2
drivers/net/wireless/ath/ath10k/ce.c
··· 1066 1066 * Guts of ath10k_ce_completed_send_next. 1067 1067 * The caller takes responsibility for any necessary locking. 1068 1068 */ 1069 - int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 1070 - void **per_transfer_contextp) 1069 + static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 1070 + void **per_transfer_contextp) 1071 1071 { 1072 1072 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 1073 1073 u32 ctrl_addr = ce_state->ctrl_addr; ··· 1117 1117 src_ring->sw_index = sw_index; 1118 1118 1119 1119 return 0; 1120 + } 1121 + 1122 + static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state, 1123 + void **per_transfer_contextp) 1124 + { 1125 + struct ath10k_ce_ring *src_ring = ce_state->src_ring; 1126 + u32 ctrl_addr = ce_state->ctrl_addr; 1127 + struct ath10k *ar = ce_state->ar; 1128 + unsigned int nentries_mask = src_ring->nentries_mask; 1129 + unsigned int sw_index = src_ring->sw_index; 1130 + unsigned int read_index; 1131 + struct ce_desc_64 *desc; 1132 + 1133 + if (src_ring->hw_index == sw_index) { 1134 + /* 1135 + * The SW completion index has caught up with the cached 1136 + * version of the HW completion index. 1137 + * Update the cached HW completion index to see whether 1138 + * the SW has really caught up to the HW, or if the cached 1139 + * value of the HW index has become stale. 1140 + */ 1141 + 1142 + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 1143 + if (read_index == 0xffffffff) 1144 + return -ENODEV; 1145 + 1146 + read_index &= nentries_mask; 1147 + src_ring->hw_index = read_index; 1148 + } 1149 + 1150 + if (ar->hw_params.rri_on_ddr) 1151 + read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 1152 + else 1153 + read_index = src_ring->hw_index; 1154 + 1155 + if (read_index == sw_index) 1156 + return -EIO; 1157 + 1158 + if (per_transfer_contextp) 1159 + *per_transfer_contextp = 1160 + src_ring->per_transfer_context[sw_index]; 1161 + 1162 + /* sanity */ 1163 + src_ring->per_transfer_context[sw_index] = NULL; 1164 + desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space, 1165 + sw_index); 1166 + desc->nbytes = 0; 1167 + 1168 + /* Update sw_index */ 1169 + sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 1170 + src_ring->sw_index = sw_index; 1171 + 1172 + return 0; 1173 + } 1174 + 1175 + int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 1176 + void **per_transfer_contextp) 1177 + { 1178 + return ce_state->ops->ce_completed_send_next_nolock(ce_state, 1179 + per_transfer_contextp); 1120 1180 } 1121 1181 EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock); 1122 1182 ··· 1899 1839 .ce_send_nolock = _ath10k_ce_send_nolock, 1900 1840 .ce_set_src_ring_base_addr_hi = NULL, 1901 1841 .ce_set_dest_ring_base_addr_hi = NULL, 1842 + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock, 1902 1843 }; 1903 1844 1904 1845 static const struct ath10k_ce_ops ce_64_ops = { ··· 1914 1853 .ce_send_nolock = _ath10k_ce_send_nolock_64, 1915 1854 .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi, 1916 1855 .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi, 1856 + .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64, 1917 1857 }; 1918 1858 1919 1859 static void ath10k_ce_set_ops(struct ath10k *ar,
+2
drivers/net/wireless/ath/ath10k/ce.h
··· 329 329 void (*ce_set_dest_ring_base_addr_hi)(struct ath10k *ar, 330 330 u32 ce_ctrl_addr, 331 331 u64 addr); 332 + int (*ce_completed_send_next_nolock)(struct ath10k_ce_pipe *ce_state, 333 + void **per_transfer_contextp); 332 334 }; 333 335 334 336 static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
+21 -8
drivers/net/wireless/ath/ath10k/core.c
··· 549 549 .sw_decrypt_mcast_mgmt = true, 550 550 .hw_ops = &wcn3990_ops, 551 551 .decap_align_bytes = 1, 552 - .num_peers = TARGET_HL_10_TLV_NUM_PEERS, 552 + .num_peers = TARGET_HL_TLV_NUM_PEERS, 553 553 .n_cipher_suites = 11, 554 - .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT, 555 - .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, 554 + .ast_skid_limit = TARGET_HL_TLV_AST_SKID_LIMIT, 555 + .num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES, 556 556 .target_64bit = true, 557 557 .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC, 558 558 .per_ce_irq = true, ··· 637 637 ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99); 638 638 ath10k_bmi_read32(ar, hi_acs_flags, &param); 639 639 640 - param |= (HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET | 641 - HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET | 642 - HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE); 640 + /* Data transfer is not initiated, when reduced Tx completion 641 + * is used for SDIO. disable it until fixed 642 + */ 643 + param &= ~HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET; 643 644 645 + /* Alternate credit size of 1544 as used by SDIO firmware is 646 + * not big enough for mac80211 / native wifi frames. disable it 647 + */ 648 + param &= ~HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE; 649 + param |= HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET; 644 650 ath10k_bmi_write32(ar, hi_acs_flags, param); 651 + 652 + /* Explicitly set fwlog prints to zero as target may turn it on 653 + * based on scratch registers. 654 + */ 655 + ath10k_bmi_read32(ar, hi_option_flag, &param); 656 + param |= HI_OPTION_DISABLE_DBGLOG; 657 + ath10k_bmi_write32(ar, hi_option_flag, param); 645 658 } 646 659 647 660 static int ath10k_init_configure_target(struct ath10k *ar) ··· 2317 2304 else 2318 2305 ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; 2319 2306 ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS; 2320 - ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV | 2321 - WMI_STAT_PEER; 2307 + ar->fw_stats_req_mask = WMI_TLV_STAT_PDEV | WMI_TLV_STAT_VDEV | 2308 + WMI_TLV_STAT_PEER | WMI_TLV_STAT_PEER_EXTD; 2322 2309 ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM; 2323 2310 ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC; 2324 2311 break;
+1 -1
drivers/net/wireless/ath/ath10k/core.h
··· 189 189 u32 peer_rssi; 190 190 u32 peer_tx_rate; 191 191 u32 peer_rx_rate; /* 10x only */ 192 - u32 rx_duration; 192 + u64 rx_duration; 193 193 }; 194 194 195 195 struct ath10k_fw_extd_stats_peer {
+3
drivers/net/wireless/ath/ath10k/debug.c
··· 1252 1252 if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN)) 1253 1253 return -EINVAL; 1254 1254 1255 + if (ar->hw_params.cal_data_len == 0) 1256 + return -EOPNOTSUPP; 1257 + 1255 1258 hi_addr = host_interest_item_address(HI_ITEM(hi_board_data)); 1256 1259 1257 1260 ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+4 -3
drivers/net/wireless/ath/ath10k/debugfs_sta.c
··· 685 685 " %llu ", stats->ht[j][i]); 686 686 len += scnprintf(buf + len, size - len, "\n"); 687 687 len += scnprintf(buf + len, size - len, 688 - " BW %s (20,40,80,160 MHz)\n", str[j]); 688 + " BW %s (20,5,10,40,80,160 MHz)\n", str[j]); 689 689 len += scnprintf(buf + len, size - len, 690 - " %llu %llu %llu %llu\n", 690 + " %llu %llu %llu %llu %llu %llu\n", 691 691 stats->bw[j][0], stats->bw[j][1], 692 - stats->bw[j][2], stats->bw[j][3]); 692 + stats->bw[j][2], stats->bw[j][3], 693 + stats->bw[j][4], stats->bw[j][5]); 693 694 len += scnprintf(buf + len, size - len, 694 695 " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]); 695 696 len += scnprintf(buf + len, size - len,
+86
drivers/net/wireless/ath/ath10k/htt.h
··· 578 578 #define HTT_TX_CMPL_FLAG_PA_PRESENT BIT(2) 579 579 #define HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT BIT(3) 580 580 581 + #define HTT_TX_DATA_RSSI_ENABLE_WCN3990 BIT(3) 582 + #define HTT_TX_DATA_APPEND_RETRIES BIT(0) 583 + #define HTT_TX_DATA_APPEND_TIMESTAMP BIT(1) 584 + 581 585 struct htt_rx_indication_hdr { 582 586 u8 info0; /* %HTT_RX_INDICATION_INFO0_ */ 583 587 __le16 peer_id; ··· 855 851 }; 856 852 857 853 #define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF 854 + 855 + struct htt_append_retries { 856 + __le16 msdu_id; 857 + u8 tx_retries; 858 + u8 flag; 859 + } __packed; 860 + 861 + struct htt_data_tx_completion_ext { 862 + struct htt_append_retries a_retries; 863 + __le32 t_stamp; 864 + __le16 msdus_rssi[0]; 865 + } __packed; 866 + 867 + /** 868 + * @brief target -> host TX completion indication message definition 869 + * 870 + * @details 871 + * The following diagram shows the format of the TX completion indication sent 872 + * from the target to the host 873 + * 874 + * |31 28|27|26|25|24|23 16| 15 |14 11|10 8|7 0| 875 + * |-------------------------------------------------------------| 876 + * header: |rsvd |A2|TP|A1|A0| num | t_i| tid |status| msg_type | 877 + * |-------------------------------------------------------------| 878 + * payload: | MSDU1 ID | MSDU0 ID | 879 + * |-------------------------------------------------------------| 880 + * : MSDU3 ID : MSDU2 ID : 881 + * |-------------------------------------------------------------| 882 + * | struct htt_tx_compl_ind_append_retries | 883 + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| 884 + * | struct htt_tx_compl_ind_append_tx_tstamp | 885 + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| 886 + * | MSDU1 ACK RSSI | MSDU0 ACK RSSI | 887 + * |-------------------------------------------------------------| 888 + * : MSDU3 ACK RSSI : MSDU2 ACK RSSI : 889 + * |- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -| 890 + * -msg_type 891 + * Bits 7:0 892 + * Purpose: identifies this as HTT TX completion indication 893 + * -status 894 + * Bits 10:8 895 + * Purpose: the TX completion status of payload fragmentations descriptors 896 + * Value: could be HTT_TX_COMPL_IND_STAT_OK or HTT_TX_COMPL_IND_STAT_DISCARD 897 + * -tid 898 + * Bits 14:11 899 + * Purpose: the tid associated with those fragmentation descriptors. It is 900 + * valid or not, depending on the tid_invalid bit. 901 + * Value: 0 to 15 902 + * -tid_invalid 903 + * Bits 15:15 904 + * Purpose: this bit indicates whether the tid field is valid or not 905 + * Value: 0 indicates valid, 1 indicates invalid 906 + * -num 907 + * Bits 23:16 908 + * Purpose: the number of payload in this indication 909 + * Value: 1 to 255 910 + * -A0 = append 911 + * Bits 24:24 912 + * Purpose: append the struct htt_tx_compl_ind_append_retries which contains 913 + * the number of tx retries for one MSDU at the end of this message 914 + * Value: 0 indicates no appending, 1 indicates appending 915 + * -A1 = append1 916 + * Bits 25:25 917 + * Purpose: Append the struct htt_tx_compl_ind_append_tx_tstamp which 918 + * contains the timestamp info for each TX msdu id in payload. 919 + * Value: 0 indicates no appending, 1 indicates appending 920 + * -TP = MSDU tx power presence 921 + * Bits 26:26 922 + * Purpose: Indicate whether the TX_COMPL_IND includes a tx power report 923 + * for each MSDU referenced by the TX_COMPL_IND message. 924 + * The order of the per-MSDU tx power reports matches the order 925 + * of the MSDU IDs. 926 + * Value: 0 indicates not appending, 1 indicates appending 927 + * -A2 = append2 928 + * Bits 27:27 929 + * Purpose: Indicate whether data ACK RSSI is appended for each MSDU in 930 + * TX_COMP_IND message. The order of the per-MSDU ACK RSSI report 931 + * matches the order of the MSDU IDs. 932 + * The ACK RSSI values are valid when status is COMPLETE_OK (and 933 + * this append2 bit is set). 934 + * Value: 0 indicates not appending, 1 indicates appending 935 + */ 858 936 859 937 struct htt_data_tx_completion { 860 938 union {
+30 -20
drivers/net/wireless/ath/ath10k/htt_rx.c
··· 2119 2119 hdr = (struct ieee80211_hdr *)skb->data; 2120 2120 rx_status = IEEE80211_SKB_RXCB(skb); 2121 2121 rx_status->chains |= BIT(0); 2122 - rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2123 - rx->ppdu.combined_rssi; 2124 - rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2122 + if (rx->ppdu.combined_rssi == 0) { 2123 + /* SDIO firmware does not provide signal */ 2124 + rx_status->signal = 0; 2125 + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL; 2126 + } else { 2127 + rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR + 2128 + rx->ppdu.combined_rssi; 2129 + rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL; 2130 + } 2125 2131 2126 2132 spin_lock_bh(&ar->data_lock); 2127 2133 ch = ar->scan_channel; ··· 2216 2210 __le16 msdu_id, *msdus; 2217 2211 bool rssi_enabled = false; 2218 2212 u8 msdu_count = 0, num_airtime_records, tid; 2219 - int i; 2213 + int i, htt_pad = 0; 2220 2214 struct htt_data_tx_compl_ppdu_dur *ppdu_info; 2221 2215 struct ath10k_peer *peer; 2222 2216 u16 ppdu_info_offset = 0, peer_id; ··· 2245 2239 2246 2240 msdu_count = resp->data_tx_completion.num_msdus; 2247 2241 msdus = resp->data_tx_completion.msdus; 2242 + rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp); 2248 2243 2249 - if (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI) 2250 - rssi_enabled = true; 2244 + if (rssi_enabled) 2245 + htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params, 2246 + resp); 2251 2247 2252 2248 for (i = 0; i < msdu_count; i++) { 2253 2249 msdu_id = msdus[i]; ··· 2261 2253 * last msdu id with 0xffff 2262 2254 */ 2263 2255 if (msdu_count & 0x01) { 2264 - msdu_id = msdus[msdu_count + i + 1]; 2256 + msdu_id = msdus[msdu_count + i + 1 + htt_pad]; 2265 2257 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2266 2258 } else { 2267 - msdu_id = msdus[msdu_count + i]; 2259 + msdu_id = msdus[msdu_count + i + htt_pad]; 2268 2260 tx_done.ack_rssi = __le16_to_cpu(msdu_id); 2269 2261 } 2270 2262 } ··· 2921 2913 struct rate_info *txrate = &arsta->txrate; 2922 2914 struct ath10k_htt_tx_stats *tx_stats; 2923 2915 int idx, ht_idx, gi, mcs, bw, nss; 2916 + unsigned long flags; 2924 2917 2925 2918 if (!arsta->tx_stats) 2926 2919 return; 2927 2920 2928 2921 tx_stats = arsta->tx_stats; 2929 - gi = (arsta->txrate.flags & RATE_INFO_FLAGS_SHORT_GI); 2930 - ht_idx = txrate->mcs + txrate->nss * 8; 2931 - mcs = txrate->mcs; 2922 + flags = txrate->flags; 2923 + gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags); 2924 + mcs = ATH10K_HW_MCS_RATE(pstats->ratecode); 2932 2925 bw = txrate->bw; 2933 2926 nss = txrate->nss; 2934 - idx = mcs * 8 + 8 * 10 * nss; 2927 + ht_idx = mcs + (nss - 1) * 8; 2928 + idx = mcs * 8 + 8 * 10 * (nss - 1); 2935 2929 idx += bw * 2 + gi; 2936 2930 2937 2931 #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] ··· 2979 2969 } 2980 2970 STATS_OP_FMT(AMPDU).bw[0][bw] += 2981 2971 pstats->succ_bytes + pstats->retry_bytes; 2982 - STATS_OP_FMT(AMPDU).nss[0][nss] += 2972 + STATS_OP_FMT(AMPDU).nss[0][nss - 1] += 2983 2973 pstats->succ_bytes + pstats->retry_bytes; 2984 2974 STATS_OP_FMT(AMPDU).gi[0][gi] += 2985 2975 pstats->succ_bytes + pstats->retry_bytes; ··· 2987 2977 pstats->succ_bytes + pstats->retry_bytes; 2988 2978 STATS_OP_FMT(AMPDU).bw[1][bw] += 2989 2979 pstats->succ_pkts + pstats->retry_pkts; 2990 - STATS_OP_FMT(AMPDU).nss[1][nss] += 2980 + STATS_OP_FMT(AMPDU).nss[1][nss - 1] += 2991 2981 pstats->succ_pkts + pstats->retry_pkts; 2992 2982 STATS_OP_FMT(AMPDU).gi[1][gi] += 2993 2983 pstats->succ_pkts + pstats->retry_pkts; ··· 2999 2989 } 3000 2990 3001 2991 STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes; 3002 - STATS_OP_FMT(SUCC).nss[0][nss] += pstats->succ_bytes; 2992 + STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes; 3003 2993 STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes; 3004 2994 3005 2995 STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts; 3006 - STATS_OP_FMT(SUCC).nss[1][nss] += pstats->succ_pkts; 2996 + STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts; 3007 2997 STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts; 3008 2998 3009 2999 STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes; 3010 - STATS_OP_FMT(FAIL).nss[0][nss] += pstats->failed_bytes; 3000 + STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes; 3011 3001 STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes; 3012 3002 3013 3003 STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts; 3014 - STATS_OP_FMT(FAIL).nss[1][nss] += pstats->failed_pkts; 3004 + STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts; 3015 3005 STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts; 3016 3006 3017 3007 STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes; 3018 - STATS_OP_FMT(RETRY).nss[0][nss] += pstats->retry_bytes; 3008 + STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes; 3019 3009 STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes; 3020 3010 3021 3011 STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; 3022 - STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; 3012 + STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts; 3023 3013 STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; 3024 3014 3025 3015 if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
+31 -1
drivers/net/wireless/ath/ath10k/hw.c
··· 1100 1100 return ret; 1101 1101 } 1102 1102 1103 + static int ath10k_htt_tx_rssi_enable(struct htt_resp *resp) 1104 + { 1105 + return (resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_DATA_RSSI); 1106 + } 1107 + 1108 + static int ath10k_htt_tx_rssi_enable_wcn3990(struct htt_resp *resp) 1109 + { 1110 + return (resp->data_tx_completion.flags2 & 1111 + HTT_TX_DATA_RSSI_ENABLE_WCN3990); 1112 + } 1113 + 1114 + static int ath10k_get_htt_tx_data_rssi_pad(struct htt_resp *resp) 1115 + { 1116 + struct htt_data_tx_completion_ext extd; 1117 + int pad_bytes = 0; 1118 + 1119 + if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_RETRIES) 1120 + pad_bytes += sizeof(extd.a_retries) / 1121 + sizeof(extd.msdus_rssi[0]); 1122 + 1123 + if (resp->data_tx_completion.flags2 & HTT_TX_DATA_APPEND_TIMESTAMP) 1124 + pad_bytes += sizeof(extd.t_stamp) / sizeof(extd.msdus_rssi[0]); 1125 + 1126 + return pad_bytes; 1127 + } 1128 + 1103 1129 const struct ath10k_hw_ops qca988x_ops = { 1104 1130 .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, 1105 1131 }; ··· 1150 1124 const struct ath10k_hw_ops qca6174_ops = { 1151 1125 .set_coverage_class = ath10k_hw_qca988x_set_coverage_class, 1152 1126 .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock, 1127 + .is_rssi_enable = ath10k_htt_tx_rssi_enable, 1153 1128 }; 1154 1129 1155 - const struct ath10k_hw_ops wcn3990_ops = {}; 1130 + const struct ath10k_hw_ops wcn3990_ops = { 1131 + .tx_data_rssi_pad_bytes = ath10k_get_htt_tx_data_rssi_pad, 1132 + .is_rssi_enable = ath10k_htt_tx_rssi_enable_wcn3990, 1133 + };
+25 -3
drivers/net/wireless/ath/ath10k/hw.h
··· 609 609 }; 610 610 611 611 struct htt_rx_desc; 612 + struct htt_resp; 613 + struct htt_data_tx_completion_ext; 612 614 613 615 /* Defines needed for Rx descriptor abstraction */ 614 616 struct ath10k_hw_ops { ··· 618 616 void (*set_coverage_class)(struct ath10k *ar, s16 value); 619 617 int (*enable_pll_clk)(struct ath10k *ar); 620 618 bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd); 619 + int (*tx_data_rssi_pad_bytes)(struct htt_resp *htt); 620 + int (*is_rssi_enable)(struct htt_resp *resp); 621 621 }; 622 622 623 623 extern const struct ath10k_hw_ops qca988x_ops; ··· 645 641 if (hw->hw_ops->rx_desc_get_msdu_limit_error) 646 642 return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd); 647 643 return false; 644 + } 645 + 646 + static inline int 647 + ath10k_tx_data_rssi_get_pad_bytes(struct ath10k_hw_params *hw, 648 + struct htt_resp *htt) 649 + { 650 + if (hw->hw_ops->tx_data_rssi_pad_bytes) 651 + return hw->hw_ops->tx_data_rssi_pad_bytes(htt); 652 + return 0; 653 + } 654 + 655 + static inline int 656 + ath10k_is_rssi_enable(struct ath10k_hw_params *hw, 657 + struct htt_resp *resp) 658 + { 659 + if (hw->hw_ops->is_rssi_enable) 660 + return hw->hw_ops->is_rssi_enable(resp); 661 + return 0; 648 662 } 649 663 650 664 /* Target specific defines for MAIN firmware */ ··· 752 730 #define TARGET_TLV_MGMT_NUM_MSDU_DESC (50) 753 731 754 732 /* Target specific defines for WMI-HL-1.0 firmware */ 755 - #define TARGET_HL_10_TLV_NUM_PEERS 14 756 - #define TARGET_HL_10_TLV_AST_SKID_LIMIT 6 757 - #define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2 733 + #define TARGET_HL_TLV_NUM_PEERS 33 734 + #define TARGET_HL_TLV_AST_SKID_LIMIT 16 735 + #define TARGET_HL_TLV_NUM_WDS_ENTRIES 2 758 736 759 737 /* Diagnostic Window */ 760 738 #define CE_DIAG_PIPE 7
+17 -9
drivers/net/wireless/ath/ath10k/sdio.c
··· 1382 1382 1383 1383 ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n"); 1384 1384 1385 + ret = ath10k_sdio_config(ar); 1386 + if (ret) { 1387 + ath10k_err(ar, "failed to config sdio: %d\n", ret); 1388 + return ret; 1389 + } 1390 + 1385 1391 sdio_claim_host(func); 1386 1392 1387 1393 ret = sdio_enable_func(func); ··· 1425 1419 1426 1420 /* Disable the card */ 1427 1421 sdio_claim_host(ar_sdio->func); 1428 - ret = sdio_disable_func(ar_sdio->func); 1429 - sdio_release_host(ar_sdio->func); 1430 1422 1431 - if (ret) 1423 + ret = sdio_disable_func(ar_sdio->func); 1424 + if (ret) { 1432 1425 ath10k_warn(ar, "unable to disable sdio function: %d\n", ret); 1426 + sdio_release_host(ar_sdio->func); 1427 + return; 1428 + } 1429 + 1430 + ret = mmc_hw_reset(ar_sdio->func->card->host); 1431 + if (ret) 1432 + ath10k_warn(ar, "unable to reset sdio: %d\n", ret); 1433 + 1434 + sdio_release_host(ar_sdio->func); 1433 1435 1434 1436 ar_sdio->is_disabled = true; 1435 1437 } ··· 2041 2027 ar->id.device = id->device; 2042 2028 2043 2029 ath10k_sdio_set_mbox_info(ar); 2044 - 2045 - ret = ath10k_sdio_config(ar); 2046 - if (ret) { 2047 - ath10k_err(ar, "failed to config sdio: %d\n", ret); 2048 - goto err_free_wq; 2049 - } 2050 2030 2051 2031 bus_params.dev_type = ATH10K_DEV_TYPE_HL; 2052 2032 /* TODO: don't know yet how to get chip_id with SDIO */
+97 -12
drivers/net/wireless/ath/ath10k/wmi-tlv.c
··· 13 13 #include "wmi-tlv.h" 14 14 #include "p2p.h" 15 15 #include "testmode.h" 16 + #include <linux/bitfield.h> 16 17 17 18 /***************/ 18 19 /* TLV helpers */ ··· 674 673 arg->desc_id = ev->desc_id; 675 674 arg->status = ev->status; 676 675 arg->pdev_id = ev->pdev_id; 676 + arg->ppdu_id = ev->ppdu_id; 677 + 678 + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 679 + arg->ack_rssi = ev->ack_rssi; 677 680 678 681 kfree(tb); 679 682 return 0; ··· 687 682 const __le32 *num_reports; 688 683 const __le32 *desc_ids; 689 684 const __le32 *status; 685 + const __le32 *ppdu_ids; 686 + const __le32 *ack_rssi; 690 687 bool desc_ids_done; 691 688 bool status_done; 689 + bool ppdu_ids_done; 690 + bool ack_rssi_done; 692 691 }; 693 692 694 693 static int ··· 712 703 } else if (!bundle_tx_compl->status_done) { 713 704 bundle_tx_compl->status_done = true; 714 705 bundle_tx_compl->status = ptr; 706 + } else if (!bundle_tx_compl->ppdu_ids_done) { 707 + bundle_tx_compl->ppdu_ids_done = true; 708 + bundle_tx_compl->ppdu_ids = ptr; 709 + } else if (!bundle_tx_compl->ack_rssi_done) { 710 + bundle_tx_compl->ack_rssi_done = true; 711 + bundle_tx_compl->ack_rssi = ptr; 715 712 } 716 713 break; 717 714 default: ··· 748 733 arg->num_reports = *bundle_tx_compl.num_reports; 749 734 arg->desc_ids = bundle_tx_compl.desc_ids; 750 735 arg->status = bundle_tx_compl.status; 736 + arg->ppdu_ids = bundle_tx_compl.ppdu_ids; 737 + 738 + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 739 + arg->ack_rssi = bundle_tx_compl.ack_rssi; 751 740 752 741 return 0; 753 742 } ··· 1297 1278 { 1298 1279 const void **tb; 1299 1280 const struct wmi_tlv_stats_ev *ev; 1281 + u32 num_peer_stats_extd; 1300 1282 const void *data; 1301 1283 u32 num_pdev_stats; 1302 1284 u32 num_vdev_stats; ··· 1305 1285 u32 num_bcnflt_stats; 1306 1286 u32 num_chan_stats; 1307 1287 size_t data_len; 1288 + u32 stats_id; 1308 1289 int ret; 1309 1290 int i; 1310 1291 ··· 1330 1309 num_peer_stats = __le32_to_cpu(ev->num_peer_stats); 1331 1310 num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats); 1332 1311 num_chan_stats = __le32_to_cpu(ev->num_chan_stats); 1312 + stats_id = __le32_to_cpu(ev->stats_id); 1313 + num_peer_stats_extd = __le32_to_cpu(ev->num_peer_stats_extd); 1333 1314 1334 1315 ath10k_dbg(ar, ATH10K_DBG_WMI, 1335 - "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n", 1316 + "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i peer_extd %i\n", 1336 1317 num_pdev_stats, num_vdev_stats, num_peer_stats, 1337 - num_bcnflt_stats, num_chan_stats); 1318 + num_bcnflt_stats, num_chan_stats, num_peer_stats_extd); 1338 1319 1339 1320 for (i = 0; i < num_pdev_stats; i++) { 1340 1321 const struct wmi_pdev_stats *src; ··· 1401 1378 1402 1379 ath10k_wmi_pull_peer_stats(&src->old, dst); 1403 1380 dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate); 1381 + 1382 + if (stats_id & WMI_TLV_STAT_PEER_EXTD) { 1383 + const struct wmi_tlv_peer_stats_extd *extd; 1384 + unsigned long rx_duration_high; 1385 + 1386 + extd = data + sizeof(*src) * (num_peer_stats - i - 1) 1387 + + sizeof(*extd) * i; 1388 + 1389 + dst->rx_duration = __le32_to_cpu(extd->rx_duration); 1390 + rx_duration_high = __le32_to_cpu 1391 + (extd->rx_duration_high); 1392 + 1393 + if (test_bit(WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT, 1394 + &rx_duration_high)) { 1395 + rx_duration_high = 1396 + FIELD_GET(WMI_TLV_PEER_RX_DURATION_HIGH_MASK, 1397 + rx_duration_high); 1398 + dst->rx_duration |= (u64)rx_duration_high << 1399 + WMI_TLV_PEER_RX_DURATION_SHIFT; 1400 + } 1401 + } 1402 + 1404 1403 list_add_tail(&dst->list, &stats->peers); 1405 1404 } 1406 1405 ··· 1610 1565 cmd->param_id = __cpu_to_le32(param_id); 1611 1566 cmd->param_value = __cpu_to_le32(param_value); 1612 1567 1613 - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n"); 1568 + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param %d value 0x%x\n", 1569 + param_id, param_value); 1614 1570 return skb; 1571 + } 1572 + 1573 + static void 1574 + ath10k_wmi_tlv_put_host_mem_chunks(struct ath10k *ar, void *host_mem_chunks) 1575 + { 1576 + struct host_memory_chunk *chunk; 1577 + struct wmi_tlv *tlv; 1578 + int i; 1579 + __le16 tlv_len, tlv_tag; 1580 + 1581 + tlv_tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK); 1582 + tlv_len = __cpu_to_le16(sizeof(*chunk)); 1583 + for (i = 0; i < ar->wmi.num_mem_chunks; i++) { 1584 + tlv = host_mem_chunks; 1585 + tlv->tag = tlv_tag; 1586 + tlv->len = tlv_len; 1587 + chunk = (void *)tlv->value; 1588 + 1589 + chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr); 1590 + chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len); 1591 + chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id); 1592 + 1593 + ath10k_dbg(ar, ATH10K_DBG_WMI, 1594 + "wmi-tlv chunk %d len %d, addr 0x%llx, id 0x%x\n", 1595 + i, 1596 + ar->wmi.mem_chunks[i].len, 1597 + (unsigned long long)ar->wmi.mem_chunks[i].paddr, 1598 + ar->wmi.mem_chunks[i].req_id); 1599 + 1600 + host_mem_chunks += sizeof(*tlv); 1601 + host_mem_chunks += sizeof(*chunk); 1602 + } 1615 1603 } 1616 1604 1617 1605 static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar) ··· 1653 1575 struct wmi_tlv *tlv; 1654 1576 struct wmi_tlv_init_cmd *cmd; 1655 1577 struct wmi_tlv_resource_config *cfg; 1656 - struct wmi_host_mem_chunks *chunks; 1578 + void *chunks; 1657 1579 size_t len, chunks_len; 1658 1580 void *ptr; 1659 1581 1660 - chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk); 1582 + chunks_len = ar->wmi.num_mem_chunks * 1583 + (sizeof(struct host_memory_chunk) + sizeof(*tlv)); 1661 1584 len = (sizeof(*tlv) + sizeof(*cmd)) + 1662 1585 (sizeof(*tlv) + sizeof(*cfg)) + 1663 1586 (sizeof(*tlv) + chunks_len); ··· 1758 1679 cfg->num_ocb_schedules = __cpu_to_le32(0); 1759 1680 cfg->host_capab = __cpu_to_le32(WMI_TLV_FLAG_MGMT_BUNDLE_TX_COMPL); 1760 1681 1761 - ath10k_wmi_put_host_mem_chunks(ar, chunks); 1682 + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 1683 + cfg->host_capab |= __cpu_to_le32(WMI_RSRC_CFG_FLAG_TX_ACK_RSSI); 1684 + 1685 + ath10k_wmi_tlv_put_host_mem_chunks(ar, chunks); 1762 1686 1763 1687 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n"); 1764 1688 return skb; ··· 2117 2035 cmd->param_id = __cpu_to_le32(param_id); 2118 2036 cmd->param_value = __cpu_to_le32(param_value); 2119 2037 2120 - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n"); 2038 + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev %d set param %d value 0x%x\n", 2039 + vdev_id, param_id, param_value); 2121 2040 return skb; 2122 2041 } 2123 2042 ··· 2434 2351 cmd->param_value = __cpu_to_le32(param_value); 2435 2352 ether_addr_copy(cmd->peer_macaddr.addr, peer_addr); 2436 2353 2437 - ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n"); 2354 + ath10k_dbg(ar, ATH10K_DBG_WMI, 2355 + "wmi tlv vdev %d peer %pM set param %d value 0x%x\n", 2356 + vdev_id, peer_addr, param_id, param_value); 2438 2357 return skb; 2439 2358 } 2440 2359 ··· 2830 2745 arvif = (void *)cb->vif->drv_priv; 2831 2746 vdev_id = arvif->vdev_id; 2832 2747 2833 - if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control))) 2748 + if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control) && 2749 + (!(ieee80211_is_nullfunc(hdr->frame_control) || 2750 + ieee80211_is_qos_nullfunc(hdr->frame_control))))) 2834 2751 return ERR_PTR(-EINVAL); 2835 2752 2836 2753 len = sizeof(*cmd) + 2 * sizeof(*tlv); ··· 2840 2753 if ((ieee80211_is_action(hdr->frame_control) || 2841 2754 ieee80211_is_deauth(hdr->frame_control) || 2842 2755 ieee80211_is_disassoc(hdr->frame_control)) && 2843 - ieee80211_has_protected(hdr->frame_control)) { 2844 - len += IEEE80211_CCMP_MIC_LEN; 2756 + ieee80211_has_protected(hdr->frame_control)) 2845 2757 buf_len += IEEE80211_CCMP_MIC_LEN; 2846 - } 2847 2758 2848 2759 buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN); 2849 2760 buf_len = round_up(buf_len, 4);
+45
drivers/net/wireless/ath/ath10k/wmi-tlv.h
··· 14 14 #define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0 15 15 #define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64 16 16 17 + #define WMI_RSRC_CFG_FLAG_TX_ACK_RSSI BIT(18) 18 + 17 19 enum wmi_tlv_grp_id { 18 20 WMI_TLV_GRP_START = 0x3, 19 21 WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START, ··· 1386 1384 WMI_TLV_SERVICE_AP_TWT = 153, 1387 1385 WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154, 1388 1386 WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155, 1387 + WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156, 1388 + WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157, 1389 + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158, 1390 + WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159, 1391 + WMI_TLV_SERVICE_MOTION_DET = 160, 1392 + WMI_TLV_SERVICE_INFRA_MBSSID = 161, 1393 + WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162, 1394 + WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163, 1395 + WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164, 1396 + WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165, 1397 + WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166, 1398 + WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167, 1399 + WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168, 1400 + WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169, 1401 + WMI_TLV_SERVICE_ESP_SUPPORT = 170, 1402 + WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171, 1403 + WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172, 1404 + WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173, 1405 + WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174, 1389 1406 1390 1407 WMI_TLV_MAX_EXT_SERVICE = 256, 1391 1408 }; ··· 1578 1557 SVCMAP(WMI_TLV_SERVICE_THERM_THROT, 1579 1558 WMI_SERVICE_THERM_THROT, 1580 1559 WMI_TLV_MAX_SERVICE); 1560 + SVCMAP(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI, 1561 + WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_TLV_MAX_SERVICE); 1581 1562 } 1582 1563 1583 1564 #undef SVCMAP ··· 1611 1588 __le32 desc_id; 1612 1589 __le32 status; 1613 1590 __le32 pdev_id; 1591 + __le32 ppdu_id; 1592 + __le32 ack_rssi; 1614 1593 }; 1615 1594 1616 1595 #define WMI_TLV_MGMT_RX_NUM_RSSI 4 ··· 1889 1864 struct wmi_mac_addr peer_macaddr; 1890 1865 } __packed; 1891 1866 1867 + #define WMI_TLV_PEER_RX_DURATION_HIGH_VALID_BIT 31 1868 + #define WMI_TLV_PEER_RX_DURATION_HIGH_MASK GENMASK(30, 0) 1869 + #define WMI_TLV_PEER_RX_DURATION_SHIFT 32 1870 + 1871 + struct wmi_tlv_peer_stats_extd { 1872 + struct wmi_mac_addr peer_macaddr; 1873 + __le32 rx_duration; 1874 + __le32 peer_tx_bytes; 1875 + __le32 peer_rx_bytes; 1876 + __le32 last_tx_rate_code; 1877 + __le32 last_tx_power; 1878 + __le32 rx_mc_bc_cnt; 1879 + __le32 rx_duration_high; 1880 + __le32 reserved[2]; 1881 + } __packed; 1882 + 1892 1883 struct wmi_tlv_vdev_stats { 1893 1884 __le32 vdev_id; 1894 1885 __le32 beacon_snr; ··· 1998 1957 __le32 num_peer_stats; 1999 1958 __le32 num_bcnflt_stats; 2000 1959 __le32 num_chan_stats; 1960 + __le32 num_mib_stats; 1961 + __le32 pdev_id; 1962 + __le32 num_bcn_stats; 1963 + __le32 num_peer_stats_extd; 2001 1964 } __packed; 2002 1965 2003 1966 struct wmi_tlv_p2p_noa_ev {
+31 -13
drivers/net/wireless/ath/ath10k/wmi.c
··· 2342 2342 return true; 2343 2343 } 2344 2344 2345 - static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id, 2346 - u32 status) 2345 + static int 2346 + wmi_process_mgmt_tx_comp(struct ath10k *ar, struct mgmt_tx_compl_params *param) 2347 2347 { 2348 2348 struct ath10k_mgmt_tx_pkt_addr *pkt_addr; 2349 2349 struct ath10k_wmi *wmi = &ar->wmi; ··· 2353 2353 2354 2354 spin_lock_bh(&ar->data_lock); 2355 2355 2356 - pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id); 2356 + pkt_addr = idr_find(&wmi->mgmt_pending_tx, param->desc_id); 2357 2357 if (!pkt_addr) { 2358 2358 ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n", 2359 - desc_id); 2359 + param->desc_id); 2360 2360 ret = -ENOENT; 2361 2361 goto out; 2362 2362 } ··· 2366 2366 msdu->len, DMA_TO_DEVICE); 2367 2367 info = IEEE80211_SKB_CB(msdu); 2368 2368 2369 - if (status) 2369 + if (param->status) { 2370 2370 info->flags &= ~IEEE80211_TX_STAT_ACK; 2371 - else 2371 + } else { 2372 2372 info->flags |= IEEE80211_TX_STAT_ACK; 2373 + info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR + 2374 + param->ack_rssi; 2375 + info->status.is_valid_ack_signal = true; 2376 + } 2373 2377 2374 2378 ieee80211_tx_status_irqsafe(ar->hw, msdu); 2375 2379 2376 2380 ret = 0; 2377 2381 2378 2382 out: 2379 - idr_remove(&wmi->mgmt_pending_tx, desc_id); 2383 + idr_remove(&wmi->mgmt_pending_tx, param->desc_id); 2380 2384 spin_unlock_bh(&ar->data_lock); 2381 2385 return ret; 2382 2386 } ··· 2388 2384 int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb) 2389 2385 { 2390 2386 struct wmi_tlv_mgmt_tx_compl_ev_arg arg; 2387 + struct mgmt_tx_compl_params param; 2391 2388 int ret; 2392 2389 2393 2390 ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg); ··· 2397 2392 return ret; 2398 2393 } 2399 2394 2400 - wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id), 2401 - __le32_to_cpu(arg.status)); 2395 + memset(&param, 0, sizeof(struct mgmt_tx_compl_params)); 2396 + param.desc_id = __le32_to_cpu(arg.desc_id); 2397 + param.status = __le32_to_cpu(arg.status); 2398 + 2399 + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 2400 + param.ack_rssi = __le32_to_cpu(arg.ack_rssi); 2401 + 2402 + wmi_process_mgmt_tx_comp(ar, &param); 2402 2403 2403 2404 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n"); 2404 2405 ··· 2414 2403 int ath10k_wmi_event_mgmt_tx_bundle_compl(struct ath10k *ar, struct sk_buff *skb) 2415 2404 { 2416 2405 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg arg; 2406 + struct mgmt_tx_compl_params param; 2417 2407 u32 num_reports; 2418 2408 int i, ret; 2419 2409 ··· 2426 2414 2427 2415 num_reports = __le32_to_cpu(arg.num_reports); 2428 2416 2429 - for (i = 0; i < num_reports; i++) 2430 - wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_ids[i]), 2431 - __le32_to_cpu(arg.status[i])); 2417 + for (i = 0; i < num_reports; i++) { 2418 + memset(&param, 0, sizeof(struct mgmt_tx_compl_params)); 2419 + param.desc_id = __le32_to_cpu(arg.desc_ids[i]); 2420 + param.status = __le32_to_cpu(arg.desc_ids[i]); 2421 + 2422 + if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map)) 2423 + param.ack_rssi = __le32_to_cpu(arg.ack_rssi[i]); 2424 + wmi_process_mgmt_tx_comp(ar, &param); 2425 + } 2432 2426 2433 2427 ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv event bundle mgmt tx completion\n"); 2434 2428 ··· 8322 8304 "Peer TX rate", peer->peer_tx_rate); 8323 8305 len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8324 8306 "Peer RX rate", peer->peer_rx_rate); 8325 - len += scnprintf(buf + len, buf_len - len, "%30s %u\n", 8307 + len += scnprintf(buf + len, buf_len - len, "%30s %llu\n", 8326 8308 "Peer RX duration", peer->rx_duration); 8327 8309 8328 8310 len += scnprintf(buf + len, buf_len - len, "\n");
+20 -1
drivers/net/wireless/ath/ath10k/wmi.h
··· 4534 4534 WMI_10_4_STAT_VDEV_EXTD = BIT(4), 4535 4535 }; 4536 4536 4537 + enum wmi_tlv_stats_id { 4538 + WMI_TLV_STAT_PDEV = BIT(0), 4539 + WMI_TLV_STAT_VDEV = BIT(1), 4540 + WMI_TLV_STAT_PEER = BIT(2), 4541 + WMI_TLV_STAT_PEER_EXTD = BIT(10), 4542 + }; 4543 + 4537 4544 struct wlan_inst_rssi_args { 4538 4545 __le16 cfg_retry_count; 4539 4546 __le16 retry_count; ··· 5052 5045 #define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1) 5053 5046 5054 5047 #define ATH10K_VHT_MCS_NUM 10 5055 - #define ATH10K_BW_NUM 4 5048 + #define ATH10K_BW_NUM 6 5056 5049 #define ATH10K_NSS_NUM 4 5057 5050 #define ATH10K_LEGACY_NUM 12 5058 5051 #define ATH10K_GI_NUM 2 5059 5052 #define ATH10K_HT_MCS_NUM 32 5060 5053 #define ATH10K_RATE_TABLE_NUM 320 5054 + #define ATH10K_RATE_INFO_FLAGS_SGI_BIT 2 5061 5055 5062 5056 /* Value to disable fixed rate setting */ 5063 5057 #define WMI_FIXED_RATE_NONE (0xff) ··· 6733 6725 __le32 vdev_id; 6734 6726 }; 6735 6727 6728 + struct mgmt_tx_compl_params { 6729 + u32 desc_id; 6730 + u32 status; 6731 + u32 ppdu_id; 6732 + int ack_rssi; 6733 + }; 6734 + 6736 6735 struct wmi_tlv_mgmt_tx_compl_ev_arg { 6737 6736 __le32 desc_id; 6738 6737 __le32 status; 6739 6738 __le32 pdev_id; 6739 + __le32 ppdu_id; 6740 + __le32 ack_rssi; 6740 6741 }; 6741 6742 6742 6743 struct wmi_tlv_mgmt_tx_bundle_compl_ev_arg { 6743 6744 __le32 num_reports; 6744 6745 const __le32 *desc_ids; 6745 6746 const __le32 *status; 6747 + const __le32 *ppdu_ids; 6748 + const __le32 *ack_rssi; 6746 6749 }; 6747 6750 6748 6751 struct wmi_mgmt_rx_ev_arg {
+1 -1
drivers/net/wireless/ath/ath9k/debug.c
··· 148 148 { "OFDM LEVEL", ah->ani.ofdmNoiseImmunityLevel }, 149 149 { "CCK LEVEL", ah->ani.cckNoiseImmunityLevel }, 150 150 { "SPUR UP", ah->stats.ast_ani_spurup }, 151 - { "SPUR DOWN", ah->stats.ast_ani_spurup }, 151 + { "SPUR DOWN", ah->stats.ast_ani_spurdown }, 152 152 { "OFDM WS-DET ON", ah->stats.ast_ani_ofdmon }, 153 153 { "OFDM WS-DET OFF", ah->stats.ast_ani_ofdmoff }, 154 154 { "MRC-CCK ON", ah->stats.ast_ani_ccklow },
+1 -8
drivers/net/wireless/ath/ath9k/recv.c
··· 1006 1006 struct ath_rx_status *rs, 1007 1007 struct sk_buff *skb) 1008 1008 { 1009 - struct ath_node *an; 1010 - struct ath_acq *acq; 1011 - struct ath_vif *avp; 1012 1009 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1013 1010 struct ath_hw *ah = sc->sc_ah; 1014 1011 struct ath_common *common = ath9k_hw_common(ah); ··· 1016 1019 int phy; 1017 1020 u16 len = rs->rs_datalen; 1018 1021 u32 airtime = 0; 1019 - u8 tidno, acno; 1022 + u8 tidno; 1020 1023 1021 1024 if (!ieee80211_is_data(hdr->frame_control)) 1022 1025 return; ··· 1026 1029 sta = ieee80211_find_sta_by_ifaddr(sc->hw, hdr->addr2, NULL); 1027 1030 if (!sta) 1028 1031 goto exit; 1029 - an = (struct ath_node *) sta->drv_priv; 1030 - avp = (struct ath_vif *) an->vif->drv_priv; 1031 1032 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1032 - acno = TID_TO_WME_AC(tidno); 1033 - acq = &avp->chanctx->acq[acno]; 1034 1033 1035 1034 rxs = IEEE80211_SKB_RXCB(skb); 1036 1035
+3
drivers/net/wireless/ath/ath9k/xmit.c
··· 2552 2552 } 2553 2553 2554 2554 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2555 + 2556 + /* we report airtime in ath_tx_count_airtime(), don't report twice */ 2557 + tx_info->status.tx_time = 0; 2555 2558 } 2556 2559 2557 2560 static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
+14 -6
drivers/net/wireless/ath/wil6210/cfg80211.c
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 395 395 { 396 396 int i; 397 397 398 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 398 + for (i = 0; i < max_assoc_sta; i++) { 399 399 if (wil->sta[i].status == wil_sta_unused) 400 400 continue; 401 401 if (wil->sta[i].mid != mid) ··· 1580 1580 u8 *buf, *dpos; 1581 1581 const u8 *spos; 1582 1582 1583 + if (!ies1) 1584 + ies1_len = 0; 1585 + 1586 + if (!ies2) 1587 + ies2_len = 0; 1588 + 1583 1589 if (ies1_len == 0 && ies2_len == 0) { 1584 1590 *merged_ies = NULL; 1585 1591 *merged_len = 0; ··· 1595 1589 buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL); 1596 1590 if (!buf) 1597 1591 return -ENOMEM; 1598 - memcpy(buf, ies1, ies1_len); 1592 + if (ies1) 1593 + memcpy(buf, ies1, ies1_len); 1599 1594 dpos = buf + ies1_len; 1600 1595 spos = ies2; 1601 - while (spos + 1 < ies2 + ies2_len) { 1596 + while (spos && (spos + 1 < ies2 + ies2_len)) { 1602 1597 /* IE tag at offset 0, length at offset 1 */ 1603 1598 u16 ielen = 2 + spos[1]; 1604 1599 1605 1600 if (spos + ielen > ies2 + ies2_len) 1606 1601 break; 1607 1602 if (spos[0] == WLAN_EID_VENDOR_SPECIFIC && 1608 - !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) { 1603 + (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len, 1604 + spos, ielen))) { 1609 1605 memcpy(dpos, spos, ielen); 1610 1606 dpos += ielen; 1611 1607 } ··· 3015 3007 wil, vif->mid, WMI_INVALID_RF_SECTOR_INDEX, 3016 3008 sector_type, WIL_CID_ALL); 3017 3009 if (rc == -EINVAL) { 3018 - for (i = 0; i < WIL6210_MAX_CID; i++) { 3010 + for (i = 0; i < max_assoc_sta; i++) { 3019 3011 if (wil->sta[i].mid != vif->mid) 3020 3012 continue; 3021 3013 rc = wil_rf_sector_wmi_set_selected(
+12 -11
drivers/net/wireless/ath/wil6210/debugfs.c
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 162 162 163 163 snprintf(name, sizeof(name), "tx_%2d", i); 164 164 165 - if (cid < WIL6210_MAX_CID) 165 + if (cid < max_assoc_sta) 166 166 seq_printf(s, 167 167 "\n%pM CID %d TID %d 1x%s BACK([%u] %u TU A%s) [%3d|%3d] idle %s\n", 168 168 wil->sta[cid].addr, cid, tid, ··· 792 792 "BACK: del_rx require at least 2 params\n"); 793 793 return -EINVAL; 794 794 } 795 - if (p1 < 0 || p1 >= WIL6210_MAX_CID) { 795 + if (p1 < 0 || p1 >= max_assoc_sta) { 796 796 wil_err(wil, "BACK: invalid CID %d\n", p1); 797 797 return -EINVAL; 798 798 } 799 799 if (rc < 4) 800 800 p3 = WLAN_REASON_QSTA_LEAVE_QBSS; 801 801 sta = &wil->sta[p1]; 802 - wmi_delba_rx(wil, sta->mid, mk_cidxtid(p1, p2), p3); 802 + wmi_delba_rx(wil, sta->mid, p1, p2, p3); 803 803 } else { 804 804 wil_err(wil, "BACK: Unrecognized command \"%s\"\n", cmd); 805 805 return -EINVAL; ··· 1243 1243 1244 1244 memset(&reply, 0, sizeof(reply)); 1245 1245 1246 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1246 + for (i = 0; i < max_assoc_sta; i++) { 1247 1247 u32 status; 1248 1248 1249 1249 cmd.cid = i; ··· 1340 1340 if (!sinfo) 1341 1341 return -ENOMEM; 1342 1342 1343 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1343 + for (i = 0; i < max_assoc_sta; i++) { 1344 1344 struct wil_sta_info *p = &wil->sta[i]; 1345 1345 char *status = "unknown"; 1346 1346 struct wil6210_vif *vif; ··· 1542 1542 struct wil6210_priv *wil = s->private; 1543 1543 int i, tid, mcs; 1544 1544 1545 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1545 + for (i = 0; i < max_assoc_sta; i++) { 1546 1546 struct wil_sta_info *p = &wil->sta[i]; 1547 1547 char *status = "unknown"; 1548 1548 u8 aid = 0; ··· 1651 1651 struct wil6210_priv *wil = s->private; 1652 1652 int i, bin; 1653 1653 1654 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1654 + for (i = 0; i < max_assoc_sta; i++) { 1655 1655 struct wil_sta_info *p = &wil->sta[i]; 1656 1656 char *status = "unknown"; 1657 1657 u8 aid = 0; ··· 1740 1740 size_t sz = sizeof(u64) * WIL_NUM_LATENCY_BINS; 1741 1741 1742 1742 wil->tx_latency_res = val; 1743 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1743 + for (i = 0; i < max_assoc_sta; i++) { 1744 1744 struct wil_sta_info *sta = &wil->sta[i]; 1745 1745 1746 1746 kfree(sta->tx_latency_bins); ··· 1825 1825 } 1826 1826 1827 1827 seq_printf(s, "TSF %lld\n", vif->fw_stats_tsf); 1828 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1828 + for (i = 0; i < max_assoc_sta; i++) { 1829 1829 if (wil->sta[i].status == wil_sta_unused) 1830 1830 continue; 1831 1831 if (wil->sta[i].mid != vif->mid) ··· 2386 2386 {"led_polarity", 0644, (ulong)&led_polarity, doff_u8}, 2387 2387 {"status_index", 0644, (ulong)&dbg_status_msg_index, doff_u32}, 2388 2388 {"sring_index", 0644, (ulong)&dbg_sring_index, doff_u32}, 2389 + {"drop_if_ring_full", 0644, (ulong)&drop_if_ring_full, doff_u8}, 2389 2390 {}, 2390 2391 }; 2391 2392 ··· 2440 2439 wil->debug = NULL; 2441 2440 2442 2441 kfree(wil->dbg_data.data_arr); 2443 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) 2442 + for (i = 0; i < max_assoc_sta; i++) 2444 2443 kfree(wil->sta[i].tx_latency_bins); 2445 2444 2446 2445 /* free pmc memory without sending command to fw, as it will
+8 -4
drivers/net/wireless/ath/wil6210/interrupt.c
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 575 575 } 576 576 577 577 if (isr & BIT_DMA_EP_MISC_ICR_HALP) { 578 - wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); 579 - wil6210_mask_halp(wil); 580 578 isr &= ~BIT_DMA_EP_MISC_ICR_HALP; 581 - complete(&wil->halp.comp); 579 + if (wil->halp.handle_icr) { 580 + /* no need to handle HALP ICRs until next vote */ 581 + wil->halp.handle_icr = false; 582 + wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n"); 583 + wil6210_mask_halp(wil); 584 + complete(&wil->halp.comp); 585 + } 582 586 } 583 587 584 588 wil->isr_misc = isr;
+8 -5
drivers/net/wireless/ath/wil6210/main.c
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 219 219 { 220 220 int i; 221 221 222 - for (i = 0; i < WIL6210_MAX_CID; i++) { 222 + for (i = 0; i < max_assoc_sta; i++) { 223 223 if (wil->sta[i].mid == mid && 224 224 wil->sta[i].status == wil_sta_connected) 225 225 return true; ··· 322 322 wil_disconnect_cid_complete(vif, cid, reason_code); 323 323 } else { /* all */ 324 324 wil_dbg_misc(wil, "Disconnect complete all\n"); 325 - for (cid = 0; cid < WIL6210_MAX_CID; cid++) 325 + for (cid = 0; cid < max_assoc_sta; cid++) 326 326 wil_disconnect_cid_complete(vif, cid, reason_code); 327 327 } 328 328 ··· 434 434 wil_disconnect_cid(vif, cid, reason_code); 435 435 } else { /* all */ 436 436 wil_dbg_misc(wil, "Disconnect all\n"); 437 - for (cid = 0; cid < WIL6210_MAX_CID; cid++) 437 + for (cid = 0; cid < max_assoc_sta; cid++) 438 438 wil_disconnect_cid(vif, cid, reason_code); 439 439 } 440 440 ··· 1895 1895 int i; 1896 1896 int rc = -ENOENT; 1897 1897 1898 - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { 1898 + for (i = 0; i < max_assoc_sta; i++) { 1899 1899 if (wil->sta[i].mid == mid && 1900 1900 wil->sta[i].status != wil_sta_unused && 1901 1901 ether_addr_equal(wil->sta[i].addr, mac)) { ··· 1919 1919 1920 1920 if (++wil->halp.ref_cnt == 1) { 1921 1921 reinit_completion(&wil->halp.comp); 1922 + /* mark to IRQ context to handle HALP ICR */ 1923 + wil->halp.handle_icr = true; 1922 1924 wil6210_set_halp(wil); 1923 1925 rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies); 1924 1926 if (!rc) { 1925 1927 wil_err(wil, "HALP vote timed out\n"); 1926 1928 /* Mask HALP as done in case the interrupt is raised */ 1929 + wil->halp.handle_icr = false; 1927 1930 wil6210_mask_halp(wil); 1928 1931 } else { 1929 1932 wil_dbg_irq(wil,
+4 -6
drivers/net/wireless/ath/wil6210/rx_reorder.c
··· 1 1 /* 2 2 * Copyright (c) 2014-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 307 307 } 308 308 309 309 /* Block Ack - Rx side (recipient) */ 310 - int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, 311 - u8 cidxtid, u8 dialog_token, __le16 ba_param_set, 310 + int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, 311 + u8 dialog_token, __le16 ba_param_set, 312 312 __le16 ba_timeout, __le16 ba_seq_ctrl) 313 313 __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) 314 314 { ··· 316 316 u16 agg_timeout = le16_to_cpu(ba_timeout); 317 317 u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl); 318 318 struct wil_sta_info *sta; 319 - u8 cid, tid; 320 319 u16 agg_wsize = 0; 321 320 /* bit 0: A-MSDU supported 322 321 * bit 1: policy (should be 0 for us) ··· 334 335 int rc = 0; 335 336 336 337 might_sleep(); 337 - parse_cidxtid(cidxtid, &cid, &tid); 338 338 339 339 /* sanity checks */ 340 - if (cid >= WIL6210_MAX_CID) { 340 + if (cid >= max_assoc_sta) { 341 341 wil_err(wil, "BACK: invalid CID %d\n", cid); 342 342 rc = -EINVAL; 343 343 goto out;
+2 -1
drivers/net/wireless/ath/wil6210/trace.h
··· 1 1 /* 2 2 * Copyright (c) 2013-2016 Qualcomm Atheros, Inc. 3 + * Copyright (c) 2019, The Linux Foundation. All rights reserved. 3 4 * 4 5 * Permission to use, copy, modify, and/or distribute this software for any 5 6 * purpose with or without fee is hereby granted, provided that the above ··· 182 181 __entry->seq = wil_rxdesc_seq(d); 183 182 __entry->mcs = wil_rxdesc_mcs(d); 184 183 ), 185 - TP_printk("index %d len %d mid %d cid %d tid %d mcs %d seq 0x%03x" 184 + TP_printk("index %d len %d mid %d cid (%%8) %d tid %d mcs %d seq 0x%03x" 186 185 " type 0x%1x subtype 0x%1x", __entry->index, __entry->len, 187 186 __entry->mid, __entry->cid, __entry->tid, __entry->mcs, 188 187 __entry->seq, __entry->type, __entry->subtype)
+138 -112
drivers/net/wireless/ath/wil6210/txrx.c
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 30 30 #include "trace.h" 31 31 #include "txrx_edma.h" 32 32 33 - static bool rtap_include_phy_info; 34 - module_param(rtap_include_phy_info, bool, 0444); 35 - MODULE_PARM_DESC(rtap_include_phy_info, 36 - " Include PHY info in the radiotap header, default - no"); 37 - 38 33 bool rx_align_2; 39 34 module_param(rx_align_2, bool, 0444); 40 35 MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no"); ··· 37 42 bool rx_large_buf; 38 43 module_param(rx_large_buf, bool, 0444); 39 44 MODULE_PARM_DESC(rx_large_buf, " allocate 8KB RX buffers, default - no"); 45 + 46 + /* Drop Tx packets in case Tx ring is full */ 47 + bool drop_if_ring_full; 40 48 41 49 static inline uint wil_rx_snaplen(void) 42 50 { ··· 330 332 u8 mcs_flags; 331 333 u8 mcs_index; 332 334 } __packed; 333 - struct wil6210_rtap_vendor { 334 - struct wil6210_rtap rtap; 335 - /* vendor */ 336 - u8 vendor_oui[3] __aligned(2); 337 - u8 vendor_ns; 338 - __le16 vendor_skip; 339 - u8 vendor_data[0]; 340 - } __packed; 341 335 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 342 - struct wil6210_rtap_vendor *rtap_vendor; 336 + struct wil6210_rtap *rtap; 343 337 int rtap_len = sizeof(struct wil6210_rtap); 344 - int phy_length = 0; /* phy info header size, bytes */ 345 - static char phy_data[128]; 346 338 struct ieee80211_channel *ch = wil->monitor_chandef.chan; 347 - 348 - if (rtap_include_phy_info) { 349 - rtap_len = sizeof(*rtap_vendor) + sizeof(*d); 350 - /* calculate additional length */ 351 - if (d->dma.status & RX_DMA_STATUS_PHY_INFO) { 352 - /** 353 - * PHY info starts from 8-byte boundary 354 - * there are 8-byte lines, last line may be partially 355 - * written (HW bug), thus FW configures for last line 356 - * to be excessive. Driver skips this last line. 357 - */ 358 - int len = min_t(int, 8 + sizeof(phy_data), 359 - wil_rxdesc_phy_length(d)); 360 - 361 - if (len > 8) { 362 - void *p = skb_tail_pointer(skb); 363 - void *pa = PTR_ALIGN(p, 8); 364 - 365 - if (skb_tailroom(skb) >= len + (pa - p)) { 366 - phy_length = len - 8; 367 - memcpy(phy_data, pa, phy_length); 368 - } 369 - } 370 - } 371 - rtap_len += phy_length; 372 - } 373 339 374 340 if (skb_headroom(skb) < rtap_len && 375 341 pskb_expand_head(skb, rtap_len, 0, GFP_ATOMIC)) { ··· 341 379 return; 342 380 } 343 381 344 - rtap_vendor = skb_push(skb, rtap_len); 345 - memset(rtap_vendor, 0, rtap_len); 382 + rtap = skb_push(skb, rtap_len); 383 + memset(rtap, 0, rtap_len); 346 384 347 - rtap_vendor->rtap.rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 348 - rtap_vendor->rtap.rthdr.it_len = cpu_to_le16(rtap_len); 349 - rtap_vendor->rtap.rthdr.it_present = cpu_to_le32( 350 - (1 << IEEE80211_RADIOTAP_FLAGS) | 385 + rtap->rthdr.it_version = PKTHDR_RADIOTAP_VERSION; 386 + rtap->rthdr.it_len = cpu_to_le16(rtap_len); 387 + rtap->rthdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | 351 388 (1 << IEEE80211_RADIOTAP_CHANNEL) | 352 389 (1 << IEEE80211_RADIOTAP_MCS)); 353 390 if (d->dma.status & RX_DMA_STATUS_ERROR) 354 - rtap_vendor->rtap.flags |= IEEE80211_RADIOTAP_F_BADFCS; 391 + rtap->flags |= IEEE80211_RADIOTAP_F_BADFCS; 355 392 356 - rtap_vendor->rtap.chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 357 - rtap_vendor->rtap.chnl_flags = cpu_to_le16(0); 393 + rtap->chnl_freq = cpu_to_le16(ch ? ch->center_freq : 58320); 394 + rtap->chnl_flags = cpu_to_le16(0); 358 395 359 - rtap_vendor->rtap.mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 360 - rtap_vendor->rtap.mcs_flags = 0; 361 - rtap_vendor->rtap.mcs_index = wil_rxdesc_mcs(d); 362 - 363 - if (rtap_include_phy_info) { 364 - rtap_vendor->rtap.rthdr.it_present |= cpu_to_le32(1 << 365 - IEEE80211_RADIOTAP_VENDOR_NAMESPACE); 366 - /* OUI for Wilocity 04:ce:14 */ 367 - rtap_vendor->vendor_oui[0] = 0x04; 368 - rtap_vendor->vendor_oui[1] = 0xce; 369 - rtap_vendor->vendor_oui[2] = 0x14; 370 - rtap_vendor->vendor_ns = 1; 371 - /* Rx descriptor + PHY data */ 372 - rtap_vendor->vendor_skip = cpu_to_le16(sizeof(*d) + 373 - phy_length); 374 - memcpy(rtap_vendor->vendor_data, (void *)d, sizeof(*d)); 375 - memcpy(rtap_vendor->vendor_data + sizeof(*d), phy_data, 376 - phy_length); 377 - } 396 + rtap->mcs_present = IEEE80211_RADIOTAP_MCS_HAVE_MCS; 397 + rtap->mcs_flags = 0; 398 + rtap->mcs_index = wil_rxdesc_mcs(d); 378 399 } 379 400 380 401 static bool wil_is_rx_idle(struct wil6210_priv *wil) ··· 370 425 return false; 371 426 372 427 return true; 428 + } 429 + 430 + static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb) 431 + { 432 + struct vring_rx_desc *d = wil_skb_rxdesc(skb); 433 + int mid = wil_rxdesc_mid(d); 434 + struct wil6210_vif *vif = wil->vifs[mid]; 435 + /* cid from DMA descriptor is limited to 3 bits. 436 + * In case of cid>=8, the value would be cid modulo 8 and we need to 437 + * find real cid by locating the transmitter (ta) inside sta array 438 + */ 439 + int cid = wil_rxdesc_cid(d); 440 + unsigned int snaplen = wil_rx_snaplen(); 441 + struct ieee80211_hdr_3addr *hdr; 442 + int i; 443 + unsigned char *ta; 444 + u8 ftype; 445 + 446 + /* in monitor mode there are no connections */ 447 + if (vif->wdev.iftype == NL80211_IFTYPE_MONITOR) 448 + return cid; 449 + 450 + ftype = wil_rxdesc_ftype(d) << 2; 451 + if (likely(ftype == IEEE80211_FTYPE_DATA)) { 452 + if (unlikely(skb->len < ETH_HLEN + snaplen)) { 453 + wil_err_ratelimited(wil, 454 + "Short data frame, len = %d\n", 455 + skb->len); 456 + return -ENOENT; 457 + } 458 + ta = wil_skb_get_sa(skb); 459 + } else { 460 + if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) { 461 + wil_err_ratelimited(wil, "Short frame, len = %d\n", 462 + skb->len); 463 + return -ENOENT; 464 + } 465 + hdr = (void *)skb->data; 466 + ta = hdr->addr2; 467 + } 468 + 469 + if (max_assoc_sta <= WIL6210_RX_DESC_MAX_CID) 470 + return cid; 471 + 472 + /* assuming no concurrency between AP interfaces and STA interfaces. 473 + * multista is used only in P2P_GO or AP mode. In other modes return 474 + * cid from the rx descriptor 475 + */ 476 + if (vif->wdev.iftype != NL80211_IFTYPE_P2P_GO && 477 + vif->wdev.iftype != NL80211_IFTYPE_AP) 478 + return cid; 479 + 480 + /* For Rx packets cid from rx descriptor is limited to 3 bits (0..7), 481 + * to find the real cid, compare transmitter address with the stored 482 + * stations mac address in the driver sta array 483 + */ 484 + for (i = cid; i < max_assoc_sta; i += WIL6210_RX_DESC_MAX_CID) { 485 + if (wil->sta[i].status != wil_sta_unused && 486 + ether_addr_equal(wil->sta[i].addr, ta)) { 487 + cid = i; 488 + break; 489 + } 490 + } 491 + if (i >= max_assoc_sta) { 492 + wil_err_ratelimited(wil, "Could not find cid for frame with transmit addr = %pM, iftype = %d, frametype = %d, len = %d\n", 493 + ta, vif->wdev.iftype, ftype, skb->len); 494 + cid = -ENOENT; 495 + } 496 + 497 + return cid; 373 498 } 374 499 375 500 /** ··· 467 452 int i; 468 453 struct wil_net_stats *stats; 469 454 470 - BUILD_BUG_ON(sizeof(struct vring_rx_desc) > sizeof(skb->cb)); 455 + BUILD_BUG_ON(sizeof(struct skb_rx_info) > sizeof(skb->cb)); 471 456 472 457 again: 473 458 if (unlikely(wil_ring_is_empty(vring))) ··· 499 484 wil_hex_dump_txrx("RxD ", DUMP_PREFIX_NONE, 32, 4, 500 485 (const void *)d, sizeof(*d), false); 501 486 502 - cid = wil_rxdesc_cid(d); 503 487 mid = wil_rxdesc_mid(d); 504 488 vif = wil->vifs[mid]; 505 489 ··· 509 495 goto again; 510 496 } 511 497 ndev = vif_to_ndev(vif); 512 - stats = &wil->sta[cid].stats; 513 - 514 498 if (unlikely(dmalen > sz)) { 515 - wil_err(wil, "Rx size too large: %d bytes!\n", dmalen); 516 - stats->rx_large_frame++; 499 + wil_err_ratelimited(wil, "Rx size too large: %d bytes!\n", 500 + dmalen); 517 501 kfree_skb(skb); 518 502 goto again; 519 503 } ··· 521 509 522 510 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 523 511 skb->data, skb_headlen(skb), false); 512 + 513 + cid = wil_rx_get_cid_by_skb(wil, skb); 514 + if (cid == -ENOENT) { 515 + kfree_skb(skb); 516 + goto again; 517 + } 518 + wil_skb_set_cid(skb, (u8)cid); 519 + stats = &wil->sta[cid].stats; 524 520 525 521 stats->last_mcs_rx = wil_rxdesc_mcs(d); 526 522 if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs)) ··· 572 552 wil_hex_dump_txrx("Rx ", DUMP_PREFIX_OFFSET, 16, 1, 573 553 skb->data, skb_headlen(skb), false); 574 554 } 575 - kfree_skb(skb); 576 - goto again; 577 - } 578 - 579 - if (unlikely(skb->len < ETH_HLEN + snaplen)) { 580 - wil_err(wil, "Short frame, len = %d\n", skb->len); 581 - stats->rx_short_frame++; 582 555 kfree_skb(skb); 583 556 goto again; 584 557 } ··· 672 659 static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb) 673 660 { 674 661 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 675 - int cid = wil_rxdesc_cid(d); 662 + int cid = wil_skb_get_cid(skb); 676 663 int tid = wil_rxdesc_tid(d); 677 664 int key_id = wil_rxdesc_key_id(d); 678 665 int mc = wil_rxdesc_mcast(d); ··· 720 707 { 721 708 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 722 709 723 - *cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */ 710 + *cid = wil_skb_get_cid(skb); 724 711 *security = wil_rxdesc_security(d); 725 712 } 726 713 ··· 737 724 unsigned int len = skb->len; 738 725 int cid; 739 726 int security; 740 - struct ethhdr *eth = (void *)skb->data; 727 + u8 *sa, *da = wil_skb_get_da(skb); 741 728 /* here looking for DA, not A1, thus Rxdesc's 'mcast' indication 742 729 * is not suitable, need to look at data 743 730 */ 744 - int mcast = is_multicast_ether_addr(eth->h_dest); 731 + int mcast = is_multicast_ether_addr(da); 745 732 struct wil_net_stats *stats; 746 733 struct sk_buff *xmit_skb = NULL; 747 734 static const char * const gro_res_str[] = { ··· 772 759 } 773 760 774 761 if (wdev->iftype == NL80211_IFTYPE_STATION) { 775 - if (mcast && ether_addr_equal(eth->h_source, ndev->dev_addr)) { 762 + sa = wil_skb_get_sa(skb); 763 + if (mcast && ether_addr_equal(sa, ndev->dev_addr)) { 776 764 /* mcast packet looped back to us */ 777 765 rc = GRO_DROP; 778 766 dev_kfree_skb(skb); ··· 786 772 */ 787 773 xmit_skb = skb_copy(skb, GFP_ATOMIC); 788 774 } else { 789 - int xmit_cid = wil_find_cid(wil, vif->mid, 790 - eth->h_dest); 775 + int xmit_cid = wil_find_cid(wil, vif->mid, da); 791 776 792 777 if (xmit_cid >= 0) { 793 778 /* The destination station is associated to ··· 984 971 .ring_size = cpu_to_le16(size), 985 972 }, 986 973 .ringid = id, 987 - .cidxtid = mk_cidxtid(cid, tid), 988 974 .encap_trans_type = WMI_VRING_ENC_TYPE_802_3, 989 975 .mac_ctrl = 0, 990 976 .to_resolution = 0, ··· 1002 990 }; 1003 991 struct wil_ring *vring = &wil->ring_tx[id]; 1004 992 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[id]; 993 + 994 + if (cid >= WIL6210_RX_DESC_MAX_CID) { 995 + cmd.vring_cfg.cidxtid = CIDXTID_EXTENDED_CID_TID; 996 + cmd.vring_cfg.cid = cid; 997 + cmd.vring_cfg.tid = tid; 998 + } else { 999 + cmd.vring_cfg.cidxtid = mk_cidxtid(cid, tid); 1000 + } 1005 1001 1006 1002 wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n", 1007 1003 cmd.vring_cfg.tx_sw_ring.max_mpdu_size); ··· 1063 1043 txdata->enabled = 0; 1064 1044 spin_unlock_bh(&txdata->lock); 1065 1045 wil_vring_free(wil, vring); 1066 - wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; 1046 + wil->ring2cid_tid[id][0] = max_assoc_sta; 1067 1047 wil->ring2cid_tid[id][1] = 0; 1068 1048 1069 1049 out: ··· 1148 1128 txdata->dot1x_open = false; 1149 1129 txdata->enabled = 0; 1150 1130 spin_unlock_bh(&txdata->lock); 1151 - wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; 1131 + wil->ring2cid_tid[ring_id][0] = max_assoc_sta; 1152 1132 wil->ring2cid_tid[ring_id][1] = 0; 1153 1133 return rc; 1154 1134 } ··· 1195 1175 if (rc) 1196 1176 goto out; 1197 1177 1198 - wil->ring2cid_tid[id][0] = WIL6210_MAX_CID; /* CID */ 1178 + wil->ring2cid_tid[id][0] = max_assoc_sta; /* CID */ 1199 1179 wil->ring2cid_tid[id][1] = 0; /* TID */ 1200 1180 1201 1181 cmd.vring_cfg.tx_sw_ring.ring_mem_base = cpu_to_le64(vring->pa); ··· 1237 1217 struct wil6210_vif *vif, 1238 1218 struct sk_buff *skb) 1239 1219 { 1240 - int i; 1241 - struct ethhdr *eth = (void *)skb->data; 1242 - int cid = wil_find_cid(wil, vif->mid, eth->h_dest); 1220 + int i, cid; 1221 + const u8 *da = wil_skb_get_da(skb); 1243 1222 int min_ring_id = wil_get_min_tx_ring_id(wil); 1244 1223 1245 - if (cid < 0) 1224 + cid = wil_find_cid(wil, vif->mid, da); 1225 + 1226 + if (cid < 0 || cid >= max_assoc_sta) 1246 1227 return NULL; 1247 1228 1248 1229 /* TODO: fix for multiple TID */ ··· 1256 1235 struct wil_ring_tx_data *txdata = &wil->ring_tx_data[i]; 1257 1236 1258 1237 wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n", 1259 - eth->h_dest, i); 1238 + da, i); 1260 1239 if (v->va && txdata->enabled) { 1261 1240 return v; 1262 1241 } else { ··· 1295 1274 continue; 1296 1275 1297 1276 cid = wil->ring2cid_tid[i][0]; 1298 - if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1277 + if (cid >= max_assoc_sta) /* skip BCAST */ 1299 1278 continue; 1300 1279 1301 1280 if (!wil->ring_tx_data[i].dot1x_open && ··· 1347 1326 static void wil_set_da_for_vring(struct wil6210_priv *wil, 1348 1327 struct sk_buff *skb, int vring_index) 1349 1328 { 1350 - struct ethhdr *eth = (void *)skb->data; 1329 + u8 *da = wil_skb_get_da(skb); 1351 1330 int cid = wil->ring2cid_tid[vring_index][0]; 1352 1331 1353 - ether_addr_copy(eth->h_dest, wil->sta[cid].addr); 1332 + ether_addr_copy(da, wil->sta[cid].addr); 1354 1333 } 1355 1334 1356 1335 static struct wil_ring *wil_find_tx_bcast_2(struct wil6210_priv *wil, ··· 1361 1340 struct sk_buff *skb2; 1362 1341 int i; 1363 1342 u8 cid; 1364 - struct ethhdr *eth = (void *)skb->data; 1365 - char *src = eth->h_source; 1343 + const u8 *src = wil_skb_get_sa(skb); 1366 1344 struct wil_ring_tx_data *txdata, *txdata2; 1367 1345 int min_ring_id = wil_get_min_tx_ring_id(wil); 1368 1346 ··· 1373 1353 continue; 1374 1354 1375 1355 cid = wil->ring2cid_tid[i][0]; 1376 - if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1356 + if (cid >= max_assoc_sta) /* skip BCAST */ 1377 1357 continue; 1378 1358 if (!wil->ring_tx_data[i].dot1x_open && 1379 1359 skb->protocol != cpu_to_be16(ETH_P_PAE)) ··· 1401 1381 if (!v2->va || txdata2->mid != vif->mid) 1402 1382 continue; 1403 1383 cid = wil->ring2cid_tid[i][0]; 1404 - if (cid >= WIL6210_MAX_CID) /* skip BCAST */ 1384 + if (cid >= max_assoc_sta) /* skip BCAST */ 1405 1385 continue; 1406 1386 if (!wil->ring_tx_data[i].dot1x_open && 1407 1387 skb->protocol != cpu_to_be16(ETH_P_PAE)) ··· 2052 2032 wil_dbg_txrx(wil, "check_stop=%d, mid=%d, stopped=%d", 2053 2033 check_stop, vif->mid, vif->net_queue_stopped); 2054 2034 2035 + if (ring && drop_if_ring_full) 2036 + /* no need to stop/wake net queues */ 2037 + return; 2038 + 2055 2039 if (check_stop == vif->net_queue_stopped) 2056 2040 /* net queues already in desired state */ 2057 2041 return; ··· 2119 2095 { 2120 2096 struct wil6210_vif *vif = ndev_to_vif(ndev); 2121 2097 struct wil6210_priv *wil = vif_to_wil(vif); 2122 - struct ethhdr *eth = (void *)skb->data; 2123 - bool bcast = is_multicast_ether_addr(eth->h_dest); 2098 + const u8 *da = wil_skb_get_da(skb); 2099 + bool bcast = is_multicast_ether_addr(da); 2124 2100 struct wil_ring *ring; 2125 2101 static bool pr_once_fw; 2126 2102 int rc; ··· 2167 2143 ring = wil_find_tx_ucast(wil, vif, skb); 2168 2144 } 2169 2145 if (unlikely(!ring)) { 2170 - wil_dbg_txrx(wil, "No Tx RING found for %pM\n", eth->h_dest); 2146 + wil_dbg_txrx(wil, "No Tx RING found for %pM\n", da); 2171 2147 goto drop; 2172 2148 } 2173 2149 /* set up vring entry */ ··· 2181 2157 dev_kfree_skb_any(skb); 2182 2158 return NETDEV_TX_OK; 2183 2159 case -ENOMEM: 2160 + if (drop_if_ring_full) 2161 + goto drop; 2184 2162 return NETDEV_TX_BUSY; 2185 2163 default: 2186 2164 break; /* goto drop; */ ··· 2254 2228 2255 2229 used_before_complete = wil_ring_used_tx(vring); 2256 2230 2257 - if (cid < WIL6210_MAX_CID) 2231 + if (cid < max_assoc_sta) 2258 2232 stats = &wil->sta[cid].stats; 2259 2233 2260 2234 while (!wil_ring_is_empty(vring)) { ··· 2363 2337 struct vring_rx_desc *d = wil_skb_rxdesc(skb); 2364 2338 2365 2339 *tid = wil_rxdesc_tid(d); 2366 - *cid = wil_rxdesc_cid(d); 2340 + *cid = wil_skb_get_cid(skb); 2367 2341 *mid = wil_rxdesc_mid(d); 2368 2342 *seq = wil_rxdesc_seq(d); 2369 2343 *mcast = wil_rxdesc_mcast(d);
+43 -8
drivers/net/wireless/ath/wil6210/txrx.h
··· 1 1 /* 2 2 * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 458 458 union wil_rx_desc rx; 459 459 } __packed; 460 460 461 + struct packet_rx_info { 462 + u8 cid; 463 + }; 464 + 465 + /* this struct will be stored in the skb cb buffer 466 + * max length of the struct is limited to 48 bytes 467 + */ 468 + struct skb_rx_info { 469 + struct vring_rx_desc rx_desc; 470 + struct packet_rx_info rx_info; 471 + }; 472 + 461 473 static inline int wil_rxdesc_tid(struct vring_rx_desc *d) 462 474 { 463 475 return WIL_GET_BITS(d->mac.d0, 0, 3); ··· 542 530 return WIL_GET_BITS(d->mac.d1, 13, 14); 543 531 } 544 532 545 - static inline int wil_rxdesc_phy_length(struct vring_rx_desc *d) 546 - { 547 - return WIL_GET_BITS(d->dma.d0, 16, 29); 548 - } 549 - 550 533 static inline struct vring_rx_desc *wil_skb_rxdesc(struct sk_buff *skb) 551 534 { 552 535 return (void *)skb->cb; ··· 567 560 return wil_ring_next_tail(ring) == ring->swhead; 568 561 } 569 562 570 - static inline bool wil_need_txstat(struct sk_buff *skb) 563 + static inline u8 *wil_skb_get_da(struct sk_buff *skb) 571 564 { 572 565 struct ethhdr *eth = (void *)skb->data; 573 566 574 - return is_unicast_ether_addr(eth->h_dest) && skb->sk && 567 + return eth->h_dest; 568 + } 569 + 570 + static inline u8 *wil_skb_get_sa(struct sk_buff *skb) 571 + { 572 + struct ethhdr *eth = (void *)skb->data; 573 + 574 + return eth->h_source; 575 + } 576 + 577 + static inline bool wil_need_txstat(struct sk_buff *skb) 578 + { 579 + const u8 *da = wil_skb_get_da(skb); 580 + 581 + return is_unicast_ether_addr(da) && skb->sk && 575 582 (skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS); 576 583 } 577 584 ··· 629 608 static inline bool wil_val_in_range(int val, int min, int max) 630 609 { 631 610 return val >= min && val < max; 611 + } 612 + 613 + static inline u8 wil_skb_get_cid(struct sk_buff *skb) 614 + { 615 + struct skb_rx_info *skb_rx_info = (void *)skb->cb; 616 + 617 + return skb_rx_info->rx_info.cid; 618 + } 619 + 620 + static inline void wil_skb_set_cid(struct sk_buff *skb, u8 cid) 621 + { 622 + struct skb_rx_info *skb_rx_info = (void *)skb->cb; 623 + 624 + skb_rx_info->rx_info.cid = cid; 632 625 } 633 626 634 627 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev);
+5 -6
drivers/net/wireless/ath/wil6210/txrx_edma.c
··· 1 1 /* 2 - * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved. 2 + * Copyright (c) 2012-2019 The Linux Foundation. All rights reserved. 3 3 * 4 4 * Permission to use, copy, modify, and/or distribute this software for any 5 5 * purpose with or without fee is hereby granted, provided that the above ··· 727 727 txdata->enabled = 0; 728 728 spin_unlock_bh(&txdata->lock); 729 729 wil_ring_free_edma(wil, ring); 730 - wil->ring2cid_tid[ring_id][0] = WIL6210_MAX_CID; 730 + wil->ring2cid_tid[ring_id][0] = max_assoc_sta; 731 731 wil->ring2cid_tid[ring_id][1] = 0; 732 732 733 733 out: ··· 932 932 eop = wil_rx_status_get_eop(msg); 933 933 934 934 cid = wil_rx_status_get_cid(msg); 935 - if (unlikely(!wil_val_in_range(cid, 0, WIL6210_MAX_CID))) { 935 + if (unlikely(!wil_val_in_range(cid, 0, max_assoc_sta))) { 936 936 wil_err(wil, "Corrupt cid=%d, sring->swhead=%d\n", 937 937 cid, sring->swhead); 938 938 rxdata->skipping = true; ··· 1137 1137 /* Total number of completed descriptors in all descriptor rings */ 1138 1138 int desc_cnt = 0; 1139 1139 int cid; 1140 - struct wil_net_stats *stats = NULL; 1140 + struct wil_net_stats *stats; 1141 1141 struct wil_tx_enhanced_desc *_d; 1142 1142 unsigned int ring_id; 1143 1143 unsigned int num_descs; ··· 1187 1187 ndev = vif_to_ndev(vif); 1188 1188 1189 1189 cid = wil->ring2cid_tid[ring_id][0]; 1190 - if (cid < WIL6210_MAX_CID) 1191 - stats = &wil->sta[cid].stats; 1190 + stats = (cid < max_assoc_sta ? &wil->sta[cid].stats : NULL); 1192 1191 1193 1192 wil_dbg_txrx(wil, 1194 1193 "tx_status: completed desc_ring (%d), num_descs (%d)\n",
+10 -6
drivers/net/wireless/ath/wil6210/wil6210.h
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 38 38 extern bool debug_fw; 39 39 extern bool disable_ap_sme; 40 40 extern bool ftm_mode; 41 + extern bool drop_if_ring_full; 42 + extern uint max_assoc_sta; 41 43 42 44 struct wil6210_priv; 43 45 struct wil6210_vif; ··· 91 89 #define WIL_RING_SIZE_ORDER_MIN (5) 92 90 #define WIL_RING_SIZE_ORDER_MAX (15) 93 91 #define WIL6210_MAX_TX_RINGS (24) /* HW limit */ 94 - #define WIL6210_MAX_CID (8) /* HW limit */ 92 + #define WIL6210_MAX_CID (20) /* max number of stations */ 93 + #define WIL6210_RX_DESC_MAX_CID (8) /* HW limit */ 95 94 #define WIL6210_NAPI_BUDGET (16) /* arbitrary */ 96 95 #define WIL_MAX_AMPDU_SIZE (64 * 1024) /* FW/HW limit */ 97 96 #define WIL_MAX_AGG_WSIZE (32) /* FW/HW limit */ ··· 460 457 */ 461 458 static inline bool wil_cid_valid(u8 cid) 462 459 { 463 - return cid < WIL6210_MAX_CID; 460 + return (cid >= 0 && cid < max_assoc_sta); 464 461 } 465 462 466 463 struct wil6210_mbox_ring { ··· 794 791 struct mutex lock; /* protect halp ref_cnt */ 795 792 unsigned int ref_cnt; 796 793 struct completion comp; 794 + u8 handle_icr; 797 795 }; 798 796 799 797 struct wil_blob_wrapper { ··· 1239 1235 int wmi_addba(struct wil6210_priv *wil, u8 mid, 1240 1236 u8 ringid, u8 size, u16 timeout); 1241 1237 int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason); 1242 - int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason); 1238 + int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason); 1243 1239 int wmi_addba_rx_resp(struct wil6210_priv *wil, 1244 1240 u8 mid, u8 cid, u8 tid, u8 token, 1245 1241 u16 status, bool amsdu, u16 agg_wsize, u16 timeout); ··· 1252 1248 const u8 *mac, enum nl80211_iftype iftype); 1253 1249 int wmi_port_delete(struct wil6210_priv *wil, u8 mid); 1254 1250 int wmi_link_stats_cfg(struct wil6210_vif *vif, u32 type, u8 cid, u32 interval); 1255 - int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, 1256 - u8 cidxtid, u8 dialog_token, __le16 ba_param_set, 1251 + int wil_addba_rx_request(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, 1252 + u8 dialog_token, __le16 ba_param_set, 1257 1253 __le16 ba_timeout, __le16 ba_seq_ctrl); 1258 1254 int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize); 1259 1255
+65 -18
drivers/net/wireless/ath/wil6210/wmi.c
··· 1 1 /* 2 2 * Copyright (c) 2012-2017 Qualcomm Atheros, Inc. 3 - * Copyright (c) 2018, The Linux Foundation. All rights reserved. 3 + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 4 4 * 5 5 * Permission to use, copy, modify, and/or distribute this software for any 6 6 * purpose with or without fee is hereby granted, provided that the above ··· 24 24 #include "wmi.h" 25 25 #include "trace.h" 26 26 27 - static uint max_assoc_sta = WIL6210_MAX_CID; 28 - module_param(max_assoc_sta, uint, 0644); 27 + /* set the default max assoc sta to max supported by driver */ 28 + uint max_assoc_sta = WIL6210_MAX_CID; 29 + module_param(max_assoc_sta, uint, 0444); 29 30 MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP"); 30 31 31 32 int agg_wsize; /* = 0; */ ··· 771 770 struct wil6210_priv *wil = vif_to_wil(vif); 772 771 struct wiphy *wiphy = wil_to_wiphy(wil); 773 772 struct wmi_ready_event *evt = d; 773 + u8 fw_max_assoc_sta; 774 774 775 775 wil_info(wil, "FW ver. %s(SW %d); MAC %pM; %d MID's\n", 776 776 wil->fw_version, le32_to_cpu(evt->sw_version), ··· 789 787 evt->rfc_read_calib_result); 790 788 wil->fw_calib_result = evt->rfc_read_calib_result; 791 789 } 790 + 791 + fw_max_assoc_sta = WIL6210_RX_DESC_MAX_CID; 792 + if (len > offsetof(struct wmi_ready_event, max_assoc_sta) && 793 + evt->max_assoc_sta > 0) { 794 + fw_max_assoc_sta = evt->max_assoc_sta; 795 + wil_dbg_wmi(wil, "fw reported max assoc sta %d\n", 796 + fw_max_assoc_sta); 797 + 798 + if (fw_max_assoc_sta > WIL6210_MAX_CID) { 799 + wil_dbg_wmi(wil, 800 + "fw max assoc sta %d exceeds max driver supported %d\n", 801 + fw_max_assoc_sta, WIL6210_MAX_CID); 802 + fw_max_assoc_sta = WIL6210_MAX_CID; 803 + } 804 + } 805 + 806 + max_assoc_sta = min_t(uint, max_assoc_sta, fw_max_assoc_sta); 807 + wil_dbg_wmi(wil, "setting max assoc sta to %d\n", max_assoc_sta); 808 + 792 809 wil_set_recovery_state(wil, fw_recovery_idle); 793 810 set_bit(wil_status_fwready, wil->status); 794 811 /* let the reset sequence continue */ ··· 973 952 evt->assoc_req_len, evt->assoc_resp_len); 974 953 return; 975 954 } 976 - if (evt->cid >= WIL6210_MAX_CID) { 955 + if (evt->cid >= max_assoc_sta) { 977 956 wil_err(wil, "Connect CID invalid : %d\n", evt->cid); 978 957 return; 979 958 } ··· 1292 1271 void *d, int len) 1293 1272 { 1294 1273 struct wil6210_priv *wil = vif_to_wil(vif); 1274 + u8 cid, tid; 1295 1275 struct wmi_rcp_addba_req_event *evt = d; 1296 1276 1297 - wil_addba_rx_request(wil, vif->mid, evt->cidxtid, evt->dialog_token, 1277 + if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) { 1278 + parse_cidxtid(evt->cidxtid, &cid, &tid); 1279 + } else { 1280 + cid = evt->cid; 1281 + tid = evt->tid; 1282 + } 1283 + wil_addba_rx_request(wil, vif->mid, cid, tid, evt->dialog_token, 1298 1284 evt->ba_param_set, evt->ba_timeout, 1299 1285 evt->ba_seq_ctrl); 1300 1286 } ··· 1317 1289 struct wil_tid_ampdu_rx *r; 1318 1290 1319 1291 might_sleep(); 1320 - parse_cidxtid(evt->cidxtid, &cid, &tid); 1292 + 1293 + if (evt->cidxtid != CIDXTID_EXTENDED_CID_TID) { 1294 + parse_cidxtid(evt->cidxtid, &cid, &tid); 1295 + } else { 1296 + cid = evt->cid; 1297 + tid = evt->tid; 1298 + } 1321 1299 wil_dbg_wmi(wil, "DELBA MID %d CID %d TID %d from %s reason %d\n", 1322 1300 vif->mid, cid, tid, 1323 1301 evt->from_initiator ? "originator" : "recipient", ··· 1438 1404 u8 cid = basic->cid; 1439 1405 struct wil_sta_info *sta; 1440 1406 1441 - if (cid < 0 || cid >= WIL6210_MAX_CID) { 1407 + if (cid < 0 || cid >= max_assoc_sta) { 1442 1408 wil_err(wil, "invalid cid %d\n", cid); 1443 1409 return; 1444 1410 } ··· 1588 1554 continue; 1589 1555 1590 1556 lcid = wil->ring2cid_tid[i][0]; 1591 - if (lcid >= WIL6210_MAX_CID) /* skip BCAST */ 1557 + if (lcid >= max_assoc_sta) /* skip BCAST */ 1592 1558 continue; 1593 1559 1594 1560 wil_dbg_wmi(wil, "find sta -> ringid %d cid %d\n", i, lcid); ··· 2154 2120 2155 2121 if ((cmd.pcp_max_assoc_sta > WIL6210_MAX_CID) || 2156 2122 (cmd.pcp_max_assoc_sta <= 0)) { 2157 - wil_info(wil, 2158 - "Requested connection limit %u, valid values are 1 - %d. Setting to %d\n", 2159 - max_assoc_sta, WIL6210_MAX_CID, WIL6210_MAX_CID); 2160 - cmd.pcp_max_assoc_sta = WIL6210_MAX_CID; 2123 + wil_err(wil, "unexpected max_assoc_sta %d\n", 2124 + cmd.pcp_max_assoc_sta); 2125 + return -EOPNOTSUPP; 2161 2126 } 2162 2127 2163 2128 if (disable_ap_sme && ··· 2549 2516 if (ch) 2550 2517 cmd.sniffer_cfg.channel = ch->hw_value - 1; 2551 2518 cmd.sniffer_cfg.phy_info_mode = 2552 - cpu_to_le32(ndev->type == ARPHRD_IEEE80211_RADIOTAP); 2519 + cpu_to_le32(WMI_SNIFFER_PHY_INFO_DISABLED); 2553 2520 cmd.sniffer_cfg.phy_support = 2554 2521 cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL) 2555 2522 ? WMI_SNIFFER_CP : WMI_SNIFFER_BOTH_PHYS); ··· 2684 2651 return wmi_send(wil, WMI_RING_BA_DIS_CMDID, mid, &cmd, sizeof(cmd)); 2685 2652 } 2686 2653 2687 - int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cidxtid, u16 reason) 2654 + int wmi_delba_rx(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u16 reason) 2688 2655 { 2689 2656 struct wmi_rcp_delba_cmd cmd = { 2690 - .cidxtid = cidxtid, 2691 2657 .reason = cpu_to_le16(reason), 2692 2658 }; 2693 2659 2694 - wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf, 2695 - (cidxtid >> 4) & 0xf, reason); 2660 + if (cid >= WIL6210_RX_DESC_MAX_CID) { 2661 + cmd.cidxtid = CIDXTID_EXTENDED_CID_TID; 2662 + cmd.cid = cid; 2663 + cmd.tid = tid; 2664 + } else { 2665 + cmd.cidxtid = mk_cidxtid(cid, tid); 2666 + } 2667 + 2668 + wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cid, 2669 + tid, reason); 2696 2670 2697 2671 return wmi_send(wil, WMI_RCP_DELBA_CMDID, mid, &cmd, sizeof(cmd)); 2698 2672 } ··· 2710 2670 { 2711 2671 int rc; 2712 2672 struct wmi_rcp_addba_resp_cmd cmd = { 2713 - .cidxtid = mk_cidxtid(cid, tid), 2714 2673 .dialog_token = token, 2715 2674 .status_code = cpu_to_le16(status), 2716 2675 /* bit 0: A-MSDU supported ··· 2727 2688 } __packed reply = { 2728 2689 .evt = {.status = cpu_to_le16(WMI_FW_STATUS_FAILURE)}, 2729 2690 }; 2691 + 2692 + if (cid >= WIL6210_RX_DESC_MAX_CID) { 2693 + cmd.cidxtid = CIDXTID_EXTENDED_CID_TID; 2694 + cmd.cid = cid; 2695 + cmd.tid = tid; 2696 + } else { 2697 + cmd.cidxtid = mk_cidxtid(cid, tid); 2698 + } 2730 2699 2731 2700 wil_dbg_wmi(wil, 2732 2701 "ADDBA response for MID %d CID %d TID %d size %d timeout %d status %d AMSDU%s\n",
+25 -1
drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
··· 134 134 __le16 flow_ring_id; 135 135 }; 136 136 137 + /* Data struct for the MSGBUF_TYPE_GEN_STATUS */ 138 + struct msgbuf_gen_status { 139 + struct msgbuf_common_hdr msg; 140 + struct msgbuf_completion_hdr compl_hdr; 141 + __le16 write_idx; 142 + __le32 rsvd0[3]; 143 + }; 144 + 137 145 /* Data struct for the MSGBUF_TYPE_RING_STATUS */ 138 146 struct msgbuf_ring_status { 139 147 struct msgbuf_common_hdr msg; 140 148 struct msgbuf_completion_hdr compl_hdr; 141 149 __le16 write_idx; 142 - __le32 rsvd0[5]; 150 + __le16 rsvd0[5]; 143 151 }; 144 152 145 153 struct msgbuf_rx_event { ··· 1202 1194 brcmf_netif_rx(ifp, skb); 1203 1195 } 1204 1196 1197 + static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf, 1198 + void *buf) 1199 + { 1200 + struct msgbuf_gen_status *gen_status = buf; 1201 + struct brcmf_pub *drvr = msgbuf->drvr; 1202 + int err; 1203 + 1204 + err = le16_to_cpu(gen_status->compl_hdr.status); 1205 + if (err) 1206 + bphy_err(drvr, "Firmware reported general error: %d\n", err); 1207 + } 1208 + 1205 1209 static void brcmf_msgbuf_process_ring_status(struct brcmf_msgbuf *msgbuf, 1206 1210 void *buf) 1207 1211 { ··· 1293 1273 1294 1274 msg = (struct msgbuf_common_hdr *)buf; 1295 1275 switch (msg->msgtype) { 1276 + case MSGBUF_TYPE_GEN_STATUS: 1277 + brcmf_dbg(MSGBUF, "MSGBUF_TYPE_GEN_STATUS\n"); 1278 + brcmf_msgbuf_process_gen_status(msgbuf, buf); 1279 + break; 1296 1280 case MSGBUF_TYPE_RING_STATUS: 1297 1281 brcmf_dbg(MSGBUF, "MSGBUF_TYPE_RING_STATUS\n"); 1298 1282 brcmf_msgbuf_process_ring_status(msgbuf, buf);
+1
drivers/net/wireless/mediatek/mt76/Kconfig
··· 21 21 22 22 source "drivers/net/wireless/mediatek/mt76/mt76x0/Kconfig" 23 23 source "drivers/net/wireless/mediatek/mt76/mt76x2/Kconfig" 24 + source "drivers/net/wireless/mediatek/mt76/mt7603/Kconfig"
+2 -1
drivers/net/wireless/mediatek/mt76/Makefile
··· 7 7 mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \ 8 8 tx.o agg-rx.o mcu.o 9 9 10 - mt76-usb-y := usb.o usb_trace.o usb_mcu.o 10 + mt76-usb-y := usb.o usb_trace.o 11 11 12 12 CFLAGS_trace.o := -I$(src) 13 13 CFLAGS_usb_trace.o := -I$(src) ··· 22 22 23 23 obj-$(CONFIG_MT76x0_COMMON) += mt76x0/ 24 24 obj-$(CONFIG_MT76x2_COMMON) += mt76x2/ 25 + obj-$(CONFIG_MT7603E) += mt7603/
+16 -8
drivers/net/wireless/mediatek/mt76/eeprom.c
··· 54 54 part = np->name; 55 55 56 56 mtd = get_mtd_device_nm(part); 57 - if (IS_ERR(mtd)) 58 - return PTR_ERR(mtd); 57 + if (IS_ERR(mtd)) { 58 + ret = PTR_ERR(mtd); 59 + goto out_put_node; 60 + } 59 61 60 - if (size <= sizeof(*list)) 61 - return -EINVAL; 62 + if (size <= sizeof(*list)) { 63 + ret = -EINVAL; 64 + goto out_put_node; 65 + } 62 66 63 67 offset = be32_to_cpup(list); 64 68 ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data); 65 69 put_mtd_device(mtd); 66 70 if (ret) 67 - return ret; 71 + goto out_put_node; 68 72 69 - if (retlen < len) 70 - return -EINVAL; 73 + if (retlen < len) { 74 + ret = -EINVAL; 75 + goto out_put_node; 76 + } 71 77 72 - return 0; 78 + out_put_node: 79 + of_node_put(np); 80 + return ret; 73 81 #else 74 82 return -ENOENT; 75 83 #endif
+5
drivers/net/wireless/mediatek/mt76/mac80211.c
··· 714 714 new_state == IEEE80211_STA_NONE) 715 715 return mt76_sta_add(dev, vif, sta); 716 716 717 + if (old_state == IEEE80211_STA_AUTH && 718 + new_state == IEEE80211_STA_ASSOC && 719 + dev->drv->sta_assoc) 720 + dev->drv->sta_assoc(dev, vif, sta); 721 + 717 722 if (old_state == IEEE80211_STA_NONE && 718 723 new_state == IEEE80211_STA_NOTEXIST) 719 724 mt76_sta_remove(dev, vif, sta);
+12 -17
drivers/net/wireless/mediatek/mt76/mt76.h
··· 304 304 int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, 305 305 struct ieee80211_sta *sta); 306 306 307 + void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif, 308 + struct ieee80211_sta *sta); 309 + 307 310 void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, 308 311 struct ieee80211_sta *sta); 309 312 }; ··· 387 384 388 385 struct mt76u_mcu { 389 386 struct mutex mutex; 390 - struct completion cmpl; 391 - struct mt76u_buf res; 387 + u8 *data; 392 388 u32 msg_seq; 393 389 394 390 /* multiple reads */ ··· 731 729 } 732 730 733 731 static inline int 734 - mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int timeout) 732 + mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len, 733 + int timeout) 735 734 { 736 735 struct usb_interface *intf = to_usb_interface(dev->dev); 737 736 struct usb_device *udev = interface_to_usbdev(intf); 738 737 struct mt76_usb *usb = &dev->usb; 739 738 unsigned int pipe; 740 - int sent; 741 739 742 - pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); 743 - return usb_bulk_msg(udev, pipe, data, len, &sent, timeout); 740 + if (actual_len) 741 + pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]); 742 + else 743 + pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]); 744 + 745 + return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout); 744 746 } 745 747 746 748 int mt76u_vendor_request(struct mt76_dev *dev, u8 req, ··· 753 747 void mt76u_single_wr(struct mt76_dev *dev, const u8 req, 754 748 const u16 offset, const u32 val); 755 749 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); 756 - void mt76u_deinit(struct mt76_dev *dev); 757 - int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, 758 - int len, int data_len, gfp_t gfp); 759 - void mt76u_buf_free(struct mt76u_buf *buf); 760 - int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, 761 - struct mt76u_buf *buf, gfp_t gfp, 762 - usb_complete_t complete_fn, void *context); 763 750 int mt76u_submit_rx_buffers(struct mt76_dev *dev); 764 751 int mt76u_alloc_queues(struct mt76_dev *dev); 765 752 void mt76u_stop_queues(struct mt76_dev *dev); ··· 765 766 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb); 766 767 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev, 767 768 unsigned long expires); 768 - 769 - void mt76u_mcu_complete_urb(struct urb *urb); 770 - int mt76u_mcu_init_rx(struct mt76_dev *dev); 771 - void mt76u_mcu_deinit(struct mt76_dev *dev); 772 769 773 770 #endif
+9
drivers/net/wireless/mediatek/mt76/mt7603/Kconfig
··· 1 + config MT7603E 2 + tristate "MediaTek MT7603E (PCIe) and MT76x8 WLAN support" 3 + select MT76_CORE 4 + depends on MAC80211 5 + depends on PCI 6 + help 7 + This adds support for MT7603E wireless PCIe devices and the WLAN core on 8 + MT7628/MT7688 SoC devices 9 +
+6
drivers/net/wireless/mediatek/mt76/mt7603/Makefile
··· 1 + obj-$(CONFIG_MT7603E) += mt7603e.o 2 + 3 + mt7603e-y := \ 4 + pci.o soc.o main.o init.o mcu.o \ 5 + core.o dma.o mac.o eeprom.o \ 6 + beacon.o debugfs.o
+186
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include "mt7603.h" 4 + 5 + struct beacon_bc_data { 6 + struct mt7603_dev *dev; 7 + struct sk_buff_head q; 8 + struct sk_buff *tail[MT7603_MAX_INTERFACES]; 9 + int count[MT7603_MAX_INTERFACES]; 10 + }; 11 + 12 + static void 13 + mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) 14 + { 15 + struct mt7603_dev *dev = (struct mt7603_dev *)priv; 16 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 17 + struct sk_buff *skb = NULL; 18 + 19 + if (!(dev->beacon_mask & BIT(mvif->idx))) 20 + return; 21 + 22 + skb = ieee80211_beacon_get(mt76_hw(dev), vif); 23 + if (!skb) 24 + return; 25 + 26 + mt76_dma_tx_queue_skb(&dev->mt76, &dev->mt76.q_tx[MT_TXQ_BEACON], skb, 27 + &mvif->sta.wcid, NULL); 28 + 29 + spin_lock_bh(&dev->ps_lock); 30 + mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 31 + FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) | 32 + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, 33 + dev->mt76.q_tx[MT_TXQ_CAB].hw_idx) | 34 + FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) | 35 + FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8)); 36 + 37 + if (!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 0, 5000)) 38 + dev->beacon_check = MT7603_WATCHDOG_TIMEOUT; 39 + 40 + spin_unlock_bh(&dev->ps_lock); 41 + } 42 + 43 + static void 44 + mt7603_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) 45 + { 46 + struct beacon_bc_data *data = priv; 47 + struct mt7603_dev *dev = data->dev; 48 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 49 + struct ieee80211_tx_info *info; 50 + struct sk_buff *skb; 51 + 52 + if (!(dev->beacon_mask & BIT(mvif->idx))) 53 + return; 54 + 55 + skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); 56 + if (!skb) 57 + return; 58 + 59 + info = IEEE80211_SKB_CB(skb); 60 + info->control.vif = vif; 61 + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; 62 + mt76_skb_set_moredata(skb, true); 63 + __skb_queue_tail(&data->q, skb); 64 + data->tail[mvif->idx] = skb; 65 + data->count[mvif->idx]++; 66 + } 67 + 68 + void mt7603_pre_tbtt_tasklet(unsigned long arg) 69 + { 70 + struct mt7603_dev *dev = (struct mt7603_dev *)arg; 71 + struct mt76_queue *q; 72 + struct beacon_bc_data data = {}; 73 + struct sk_buff *skb; 74 + int i, nframes; 75 + 76 + data.dev = dev; 77 + __skb_queue_head_init(&data.q); 78 + 79 + q = &dev->mt76.q_tx[MT_TXQ_BEACON]; 80 + spin_lock_bh(&q->lock); 81 + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 82 + IEEE80211_IFACE_ITER_RESUME_ALL, 83 + mt7603_update_beacon_iter, dev); 84 + mt76_queue_kick(dev, q); 85 + spin_unlock_bh(&q->lock); 86 + 87 + /* Flush all previous CAB queue packets */ 88 + mt76_wr(dev, MT_WF_ARB_CAB_FLUSH, GENMASK(30, 16) | BIT(0)); 89 + 90 + mt76_queue_tx_cleanup(dev, MT_TXQ_CAB, false); 91 + 92 + mt76_csa_check(&dev->mt76); 93 + if (dev->mt76.csa_complete) 94 + goto out; 95 + 96 + q = &dev->mt76.q_tx[MT_TXQ_CAB]; 97 + do { 98 + nframes = skb_queue_len(&data.q); 99 + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), 100 + IEEE80211_IFACE_ITER_RESUME_ALL, 101 + mt7603_add_buffered_bc, &data); 102 + } while (nframes != skb_queue_len(&data.q) && 103 + skb_queue_len(&data.q) < 8); 104 + 105 + if (skb_queue_empty(&data.q)) 106 + goto out; 107 + 108 + for (i = 0; i < ARRAY_SIZE(data.tail); i++) { 109 + if (!data.tail[i]) 110 + continue; 111 + 112 + mt76_skb_set_moredata(data.tail[i], false); 113 + } 114 + 115 + spin_lock_bh(&q->lock); 116 + while ((skb = __skb_dequeue(&data.q)) != NULL) { 117 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 118 + struct ieee80211_vif *vif = info->control.vif; 119 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 120 + 121 + mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->sta.wcid, 122 + NULL); 123 + } 124 + mt76_queue_kick(dev, q); 125 + spin_unlock_bh(&q->lock); 126 + 127 + for (i = 0; i < ARRAY_SIZE(data.count); i++) 128 + mt76_wr(dev, MT_WF_ARB_CAB_COUNT_B0_REG(i), 129 + data.count[i] << MT_WF_ARB_CAB_COUNT_B0_SHIFT(i)); 130 + 131 + mt76_wr(dev, MT_WF_ARB_CAB_START, 132 + MT_WF_ARB_CAB_START_BSSn(0) | 133 + (MT_WF_ARB_CAB_START_BSS0n(1) * 134 + ((1 << (MT7603_MAX_INTERFACES - 1)) - 1))); 135 + 136 + out: 137 + mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); 138 + if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > 139 + __sw_hweight8(dev->beacon_mask)) 140 + dev->beacon_check++; 141 + } 142 + 143 + void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval) 144 + { 145 + u32 pre_tbtt = MT7603_PRE_TBTT_TIME / 64; 146 + 147 + if (idx >= 0) { 148 + if (intval) 149 + dev->beacon_mask |= BIT(idx); 150 + else 151 + dev->beacon_mask &= ~BIT(idx); 152 + } 153 + 154 + if (!dev->beacon_mask || (!intval && idx < 0)) { 155 + mt7603_irq_disable(dev, MT_INT_MAC_IRQ3); 156 + mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK); 157 + mt76_wr(dev, MT_HW_INT_MASK(3), 0); 158 + return; 159 + } 160 + 161 + dev->beacon_int = intval; 162 + mt76_wr(dev, MT_TBTT, 163 + FIELD_PREP(MT_TBTT_PERIOD, intval) | MT_TBTT_CAL_ENABLE); 164 + 165 + mt76_wr(dev, MT_TBTT_TIMER_CFG, 0x99); /* start timer */ 166 + 167 + mt76_rmw_field(dev, MT_ARB_SCR, MT_ARB_SCR_BCNQ_OPMODE_MASK, 168 + MT_BCNQ_OPMODE_AP); 169 + mt76_clear(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCN_PRIO); 170 + mt76_set(dev, MT_ARB_SCR, MT_ARB_SCR_TBTT_BCAST_PRIO); 171 + 172 + mt76_wr(dev, MT_PRE_TBTT, pre_tbtt); 173 + 174 + mt76_set(dev, MT_HW_INT_MASK(3), 175 + MT_HW_INT3_PRE_TBTT0 | MT_HW_INT3_TBTT0); 176 + 177 + mt76_set(dev, MT_WF_ARB_BCN_START, 178 + MT_WF_ARB_BCN_START_BSSn(0) | 179 + ((dev->beacon_mask >> 1) * MT_WF_ARB_BCN_START_BSS0n(1))); 180 + mt7603_irq_enable(dev, MT_INT_MAC_IRQ3); 181 + 182 + if (dev->beacon_mask & ~BIT(0)) 183 + mt76_set(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN); 184 + else 185 + mt76_clear(dev, MT_LPON_SBTOR(0), MT_LPON_SBTOR_SUB_BSS_EN); 186 + }
+73
drivers/net/wireless/mediatek/mt76/mt7603/core.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include "mt7603.h" 4 + 5 + void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set) 6 + { 7 + unsigned long flags; 8 + 9 + spin_lock_irqsave(&dev->mt76.mmio.irq_lock, flags); 10 + dev->mt76.mmio.irqmask &= ~clear; 11 + dev->mt76.mmio.irqmask |= set; 12 + mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask); 13 + spin_unlock_irqrestore(&dev->mt76.mmio.irq_lock, flags); 14 + } 15 + 16 + void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) 17 + { 18 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 19 + 20 + mt7603_irq_enable(dev, MT_INT_RX_DONE(q)); 21 + } 22 + 23 + irqreturn_t mt7603_irq_handler(int irq, void *dev_instance) 24 + { 25 + struct mt7603_dev *dev = dev_instance; 26 + u32 intr; 27 + 28 + intr = mt76_rr(dev, MT_INT_SOURCE_CSR); 29 + mt76_wr(dev, MT_INT_SOURCE_CSR, intr); 30 + 31 + if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.state)) 32 + return IRQ_NONE; 33 + 34 + intr &= dev->mt76.mmio.irqmask; 35 + 36 + if (intr & MT_INT_MAC_IRQ3) { 37 + u32 hwintr = mt76_rr(dev, MT_HW_INT_STATUS(3)); 38 + 39 + mt76_wr(dev, MT_HW_INT_STATUS(3), hwintr); 40 + if (hwintr & MT_HW_INT3_PRE_TBTT0) 41 + tasklet_schedule(&dev->pre_tbtt_tasklet); 42 + 43 + if ((hwintr & MT_HW_INT3_TBTT0) && dev->mt76.csa_complete) 44 + mt76_csa_finish(&dev->mt76); 45 + } 46 + 47 + if (intr & MT_INT_TX_DONE_ALL) { 48 + mt7603_irq_disable(dev, MT_INT_TX_DONE_ALL); 49 + tasklet_schedule(&dev->tx_tasklet); 50 + } 51 + 52 + if (intr & MT_INT_RX_DONE(0)) { 53 + mt7603_irq_disable(dev, MT_INT_RX_DONE(0)); 54 + napi_schedule(&dev->mt76.napi[0]); 55 + } 56 + 57 + if (intr & MT_INT_RX_DONE(1)) { 58 + mt7603_irq_disable(dev, MT_INT_RX_DONE(1)); 59 + napi_schedule(&dev->mt76.napi[1]); 60 + } 61 + 62 + return IRQ_HANDLED; 63 + } 64 + 65 + u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr) 66 + { 67 + u32 base = addr & GENMASK(31, 19); 68 + u32 offset = addr & GENMASK(18, 0); 69 + 70 + dev->bus_ops->wr(&dev->mt76, MT_MCU_PCIE_REMAP_2, base); 71 + 72 + return MT_PCIE_REMAP_BASE_2 + offset; 73 + }
+56
drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include "mt7603.h" 4 + 5 + static int 6 + mt7603_reset_read(struct seq_file *s, void *data) 7 + { 8 + struct mt7603_dev *dev = dev_get_drvdata(s->private); 9 + static const char * const reset_cause_str[] = { 10 + [RESET_CAUSE_TX_HANG] = "TX hang", 11 + [RESET_CAUSE_TX_BUSY] = "TX DMA busy stuck", 12 + [RESET_CAUSE_RX_BUSY] = "RX DMA busy stuck", 13 + [RESET_CAUSE_RX_PSE_BUSY] = "RX PSE busy stuck", 14 + [RESET_CAUSE_BEACON_STUCK] = "Beacon stuck", 15 + [RESET_CAUSE_MCU_HANG] = "MCU hang", 16 + [RESET_CAUSE_RESET_FAILED] = "PSE reset failed", 17 + }; 18 + int i; 19 + 20 + for (i = 0; i < ARRAY_SIZE(reset_cause_str); i++) { 21 + if (!reset_cause_str[i]) 22 + continue; 23 + 24 + seq_printf(s, "%20s: %u\n", reset_cause_str[i], 25 + dev->reset_cause[i]); 26 + } 27 + 28 + return 0; 29 + } 30 + 31 + static int 32 + mt7603_radio_read(struct seq_file *s, void *data) 33 + { 34 + struct mt7603_dev *dev = dev_get_drvdata(s->private); 35 + 36 + seq_printf(s, "Sensitivity: %d\n", dev->sensitivity); 37 + seq_printf(s, "False CCA: ofdm=%d cck=%d\n", 38 + dev->false_cca_ofdm, dev->false_cca_cck); 39 + 40 + return 0; 41 + } 42 + 43 + void mt7603_init_debugfs(struct mt7603_dev *dev) 44 + { 45 + struct dentry *dir; 46 + 47 + dir = mt76_register_debugfs(&dev->mt76); 48 + if (!dir) 49 + return; 50 + 51 + debugfs_create_u32("reset_test", 0600, dir, &dev->reset_test); 52 + debugfs_create_devm_seqfile(dev->mt76.dev, "reset", dir, 53 + mt7603_reset_read); 54 + debugfs_create_devm_seqfile(dev->mt76.dev, "radio", dir, 55 + mt7603_radio_read); 56 + }
+215
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include "mt7603.h" 4 + #include "mac.h" 5 + #include "../dma.h" 6 + 7 + static int 8 + mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_queue *q, 9 + int idx, int n_desc) 10 + { 11 + int ret; 12 + 13 + q->hw_idx = idx; 14 + q->regs = dev->mt76.mmio.regs + MT_TX_RING_BASE + idx * MT_RING_SIZE; 15 + q->ndesc = n_desc; 16 + 17 + ret = mt76_queue_alloc(dev, q); 18 + if (ret) 19 + return ret; 20 + 21 + mt7603_irq_enable(dev, MT_INT_TX_DONE(idx)); 22 + 23 + return 0; 24 + } 25 + 26 + static void 27 + mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) 28 + { 29 + __le32 *txd = (__le32 *)skb->data; 30 + struct mt7603_sta *msta; 31 + struct mt76_wcid *wcid; 32 + int idx; 33 + u32 val; 34 + 35 + if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) 36 + goto free; 37 + 38 + val = le32_to_cpu(txd[1]); 39 + idx = FIELD_GET(MT_TXD1_WLAN_IDX, val); 40 + skb->priority = FIELD_GET(MT_TXD1_TID, val); 41 + 42 + if (idx >= MT7603_WTBL_STA - 1) 43 + goto free; 44 + 45 + wcid = rcu_dereference(dev->mt76.wcid[idx]); 46 + if (!wcid) 47 + goto free; 48 + 49 + msta = container_of(wcid, struct mt7603_sta, wcid); 50 + val = le32_to_cpu(txd[0]); 51 + skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); 52 + 53 + spin_lock_bh(&dev->ps_lock); 54 + __skb_queue_tail(&msta->psq, skb); 55 + if (skb_queue_len(&msta->psq) >= 64) { 56 + skb = __skb_dequeue(&msta->psq); 57 + dev_kfree_skb(skb); 58 + } 59 + spin_unlock_bh(&dev->ps_lock); 60 + return; 61 + 62 + free: 63 + dev_kfree_skb(skb); 64 + } 65 + 66 + void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 67 + struct sk_buff *skb) 68 + { 69 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 70 + __le32 *rxd = (__le32 *)skb->data; 71 + __le32 *end = (__le32 *)&skb->data[skb->len]; 72 + enum rx_pkt_type type; 73 + 74 + type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0])); 75 + 76 + if (q == MT_RXQ_MCU) { 77 + if (type == PKT_TYPE_RX_EVENT) 78 + mt76_mcu_rx_event(&dev->mt76, skb); 79 + else 80 + mt7603_rx_loopback_skb(dev, skb); 81 + return; 82 + } 83 + 84 + switch (type) { 85 + case PKT_TYPE_TXS: 86 + for (rxd++; rxd + 5 <= end; rxd += 5) 87 + mt7603_mac_add_txs(dev, rxd); 88 + dev_kfree_skb(skb); 89 + break; 90 + case PKT_TYPE_RX_EVENT: 91 + mt76_mcu_rx_event(&dev->mt76, skb); 92 + return; 93 + case PKT_TYPE_NORMAL: 94 + if (mt7603_mac_fill_rx(dev, skb) == 0) { 95 + mt76_rx(&dev->mt76, q, skb); 96 + return; 97 + } 98 + /* fall through */ 99 + default: 100 + dev_kfree_skb(skb); 101 + break; 102 + } 103 + } 104 + 105 + static int 106 + mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q, 107 + int idx, int n_desc, int bufsize) 108 + { 109 + int ret; 110 + 111 + q->regs = dev->mt76.mmio.regs + MT_RX_RING_BASE + idx * MT_RING_SIZE; 112 + q->ndesc = n_desc; 113 + q->buf_size = bufsize; 114 + 115 + ret = mt76_queue_alloc(dev, q); 116 + if (ret) 117 + return ret; 118 + 119 + mt7603_irq_enable(dev, MT_INT_RX_DONE(idx)); 120 + 121 + return 0; 122 + } 123 + 124 + static void 125 + mt7603_tx_tasklet(unsigned long data) 126 + { 127 + struct mt7603_dev *dev = (struct mt7603_dev *)data; 128 + int i; 129 + 130 + dev->tx_dma_check = 0; 131 + for (i = MT_TXQ_MCU; i >= 0; i--) 132 + mt76_queue_tx_cleanup(dev, i, false); 133 + 134 + mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL); 135 + } 136 + 137 + int mt7603_dma_init(struct mt7603_dev *dev) 138 + { 139 + static const u8 wmm_queue_map[] = { 140 + [IEEE80211_AC_BK] = 0, 141 + [IEEE80211_AC_BE] = 1, 142 + [IEEE80211_AC_VI] = 2, 143 + [IEEE80211_AC_VO] = 3, 144 + }; 145 + int ret; 146 + int i; 147 + 148 + mt76_dma_attach(&dev->mt76); 149 + 150 + init_waitqueue_head(&dev->mt76.mmio.mcu.wait); 151 + skb_queue_head_init(&dev->mt76.mmio.mcu.res_q); 152 + 153 + tasklet_init(&dev->tx_tasklet, mt7603_tx_tasklet, (unsigned long)dev); 154 + 155 + mt76_clear(dev, MT_WPDMA_GLO_CFG, 156 + MT_WPDMA_GLO_CFG_TX_DMA_EN | 157 + MT_WPDMA_GLO_CFG_RX_DMA_EN | 158 + MT_WPDMA_GLO_CFG_DMA_BURST_SIZE | 159 + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 160 + 161 + mt76_wr(dev, MT_WPDMA_RST_IDX, ~0); 162 + mt7603_pse_client_reset(dev); 163 + 164 + for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) { 165 + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i], 166 + wmm_queue_map[i], 167 + MT_TX_RING_SIZE); 168 + if (ret) 169 + return ret; 170 + } 171 + 172 + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD], 173 + MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE); 174 + if (ret) 175 + return ret; 176 + 177 + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU], 178 + MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE); 179 + if (ret) 180 + return ret; 181 + 182 + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON], 183 + MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE); 184 + if (ret) 185 + return ret; 186 + 187 + ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB], 188 + MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE); 189 + if (ret) 190 + return ret; 191 + 192 + ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1, 193 + MT_MCU_RING_SIZE, MT_RX_BUF_SIZE); 194 + if (ret) 195 + return ret; 196 + 197 + ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0, 198 + MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE); 199 + if (ret) 200 + return ret; 201 + 202 + mt76_wr(dev, MT_DELAY_INT_CFG, 0); 203 + return mt76_init_queues(dev); 204 + } 205 + 206 + void mt7603_dma_cleanup(struct mt7603_dev *dev) 207 + { 208 + mt76_clear(dev, MT_WPDMA_GLO_CFG, 209 + MT_WPDMA_GLO_CFG_TX_DMA_EN | 210 + MT_WPDMA_GLO_CFG_RX_DMA_EN | 211 + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 212 + 213 + tasklet_kill(&dev->tx_tasklet); 214 + mt76_dma_cleanup(&dev->mt76); 215 + }
+168
drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include "mt7603.h" 4 + #include "eeprom.h" 5 + 6 + static int 7 + mt7603_efuse_read(struct mt7603_dev *dev, u32 base, u16 addr, u8 *data) 8 + { 9 + u32 val; 10 + int i; 11 + 12 + val = mt76_rr(dev, base + MT_EFUSE_CTRL); 13 + val &= ~(MT_EFUSE_CTRL_AIN | 14 + MT_EFUSE_CTRL_MODE); 15 + val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf); 16 + val |= MT_EFUSE_CTRL_KICK; 17 + mt76_wr(dev, base + MT_EFUSE_CTRL, val); 18 + 19 + if (!mt76_poll(dev, base + MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) 20 + return -ETIMEDOUT; 21 + 22 + udelay(2); 23 + 24 + val = mt76_rr(dev, base + MT_EFUSE_CTRL); 25 + if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT || 26 + WARN_ON_ONCE(!(val & MT_EFUSE_CTRL_VALID))) { 27 + memset(data, 0xff, 16); 28 + return 0; 29 + } 30 + 31 + for (i = 0; i < 4; i++) { 32 + val = mt76_rr(dev, base + MT_EFUSE_RDATA(i)); 33 + put_unaligned_le32(val, data + 4 * i); 34 + } 35 + 36 + return 0; 37 + } 38 + 39 + static int 40 + mt7603_efuse_init(struct mt7603_dev *dev) 41 + { 42 + u32 base = mt7603_reg_map(dev, MT_EFUSE_BASE); 43 + int len = MT7603_EEPROM_SIZE; 44 + void *buf; 45 + int ret, i; 46 + 47 + if (mt76_rr(dev, base + MT_EFUSE_BASE_CTRL) & MT_EFUSE_BASE_CTRL_EMPTY) 48 + return 0; 49 + 50 + dev->mt76.otp.data = devm_kzalloc(dev->mt76.dev, len, GFP_KERNEL); 51 + dev->mt76.otp.size = len; 52 + if (!dev->mt76.otp.data) 53 + return -ENOMEM; 54 + 55 + buf = dev->mt76.otp.data; 56 + for (i = 0; i + 16 <= len; i += 16) { 57 + ret = mt7603_efuse_read(dev, base, i, buf + i); 58 + if (ret) 59 + return ret; 60 + } 61 + 62 + return 0; 63 + } 64 + 65 + static bool 66 + mt7603_has_cal_free_data(struct mt7603_dev *dev, u8 *efuse) 67 + { 68 + if (!efuse[MT_EE_TEMP_SENSOR_CAL]) 69 + return false; 70 + 71 + if (get_unaligned_le16(efuse + MT_EE_TX_POWER_0_START_2G) == 0) 72 + return false; 73 + 74 + if (get_unaligned_le16(efuse + MT_EE_TX_POWER_1_START_2G) == 0) 75 + return false; 76 + 77 + if (!efuse[MT_EE_CP_FT_VERSION]) 78 + return false; 79 + 80 + if (!efuse[MT_EE_XTAL_FREQ_OFFSET]) 81 + return false; 82 + 83 + if (!efuse[MT_EE_XTAL_WF_RFCAL]) 84 + return false; 85 + 86 + return true; 87 + } 88 + 89 + static void 90 + mt7603_apply_cal_free_data(struct mt7603_dev *dev, u8 *efuse) 91 + { 92 + static const u8 cal_free_bytes[] = { 93 + MT_EE_TEMP_SENSOR_CAL, 94 + MT_EE_CP_FT_VERSION, 95 + MT_EE_XTAL_FREQ_OFFSET, 96 + MT_EE_XTAL_WF_RFCAL, 97 + /* Skip for MT7628 */ 98 + MT_EE_TX_POWER_0_START_2G, 99 + MT_EE_TX_POWER_0_START_2G + 1, 100 + MT_EE_TX_POWER_1_START_2G, 101 + MT_EE_TX_POWER_1_START_2G + 1, 102 + }; 103 + u8 *eeprom = dev->mt76.eeprom.data; 104 + int n = ARRAY_SIZE(cal_free_bytes); 105 + int i; 106 + 107 + if (!mt7603_has_cal_free_data(dev, efuse)) 108 + return; 109 + 110 + if (is_mt7628(dev)) 111 + n -= 4; 112 + 113 + for (i = 0; i < n; i++) { 114 + int offset = cal_free_bytes[i]; 115 + 116 + eeprom[offset] = efuse[offset]; 117 + } 118 + } 119 + 120 + static int 121 + mt7603_eeprom_load(struct mt7603_dev *dev) 122 + { 123 + int ret; 124 + 125 + ret = mt76_eeprom_init(&dev->mt76, MT7603_EEPROM_SIZE); 126 + if (ret < 0) 127 + return ret; 128 + 129 + return mt7603_efuse_init(dev); 130 + } 131 + 132 + static int mt7603_check_eeprom(struct mt76_dev *dev) 133 + { 134 + u16 val = get_unaligned_le16(dev->eeprom.data); 135 + 136 + switch (val) { 137 + case 0x7628: 138 + case 0x7603: 139 + return 0; 140 + default: 141 + return -EINVAL; 142 + } 143 + } 144 + 145 + int mt7603_eeprom_init(struct mt7603_dev *dev) 146 + { 147 + int ret; 148 + 149 + ret = mt7603_eeprom_load(dev); 150 + if (ret < 0) 151 + return ret; 152 + 153 + if (dev->mt76.otp.data) { 154 + if (mt7603_check_eeprom(&dev->mt76) == 0) 155 + mt7603_apply_cal_free_data(dev, dev->mt76.otp.data); 156 + else 157 + memcpy(dev->mt76.eeprom.data, dev->mt76.otp.data, 158 + MT7603_EEPROM_SIZE); 159 + } 160 + 161 + dev->mt76.cap.has_2ghz = true; 162 + memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR, 163 + ETH_ALEN); 164 + 165 + mt76_eeprom_override(&dev->mt76); 166 + 167 + return 0; 168 + }
+86
drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #ifndef __MT7603_EEPROM_H 4 + #define __MT7603_EEPROM_H 5 + 6 + #include "mt7603.h" 7 + 8 + enum mt7603_eeprom_field { 9 + MT_EE_CHIP_ID = 0x000, 10 + MT_EE_VERSION = 0x002, 11 + MT_EE_MAC_ADDR = 0x004, 12 + MT_EE_NIC_CONF_0 = 0x034, 13 + MT_EE_NIC_CONF_1 = 0x036, 14 + MT_EE_NIC_CONF_2 = 0x042, 15 + 16 + MT_EE_XTAL_TRIM_1 = 0x03a, 17 + 18 + MT_EE_RSSI_OFFSET_2G = 0x046, 19 + MT_EE_WIFI_RF_SETTING = 0x048, 20 + MT_EE_RSSI_OFFSET_5G = 0x04a, 21 + 22 + MT_EE_TX_POWER_DELTA_BW40 = 0x050, 23 + MT_EE_TX_POWER_DELTA_BW80 = 0x052, 24 + 25 + MT_EE_TX_POWER_EXT_PA_5G = 0x054, 26 + 27 + MT_EE_TEMP_SENSOR_CAL = 0x055, 28 + 29 + MT_EE_TX_POWER_0_START_2G = 0x056, 30 + MT_EE_TX_POWER_1_START_2G = 0x05c, 31 + 32 + /* used as byte arrays */ 33 + #define MT_TX_POWER_GROUP_SIZE_5G 5 34 + #define MT_TX_POWER_GROUPS_5G 6 35 + MT_EE_TX_POWER_0_START_5G = 0x062, 36 + 37 + MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074, 38 + MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076, 39 + 40 + MT_EE_TX_POWER_1_START_5G = 0x080, 41 + 42 + MT_EE_TX_POWER_CCK = 0x0a0, 43 + MT_EE_TX_POWER_OFDM_2G_6M = 0x0a2, 44 + MT_EE_TX_POWER_OFDM_2G_24M = 0x0a4, 45 + MT_EE_TX_POWER_OFDM_2G_54M = 0x0a6, 46 + MT_EE_TX_POWER_HT_BPSK_QPSK = 0x0a8, 47 + MT_EE_TX_POWER_HT_16_64_QAM = 0x0aa, 48 + MT_EE_TX_POWER_HT_64_QAM = 0x0ac, 49 + 50 + MT_EE_ELAN_RX_MODE_GAIN = 0x0c0, 51 + MT_EE_ELAN_RX_MODE_NF = 0x0c1, 52 + MT_EE_ELAN_RX_MODE_P1DB = 0x0c2, 53 + 54 + MT_EE_ELAN_BYPASS_MODE_GAIN = 0x0c3, 55 + MT_EE_ELAN_BYPASS_MODE_NF = 0x0c4, 56 + MT_EE_ELAN_BYPASS_MODE_P1DB = 0x0c5, 57 + 58 + MT_EE_STEP_NUM_NEG_6_7 = 0x0c6, 59 + MT_EE_STEP_NUM_NEG_4_5 = 0x0c8, 60 + MT_EE_STEP_NUM_NEG_2_3 = 0x0ca, 61 + MT_EE_STEP_NUM_NEG_0_1 = 0x0cc, 62 + 63 + MT_EE_REF_STEP_24G = 0x0ce, 64 + 65 + MT_EE_STEP_NUM_PLUS_1_2 = 0x0d0, 66 + MT_EE_STEP_NUM_PLUS_3_4 = 0x0d2, 67 + MT_EE_STEP_NUM_PLUS_5_6 = 0x0d4, 68 + MT_EE_STEP_NUM_PLUS_7 = 0x0d6, 69 + 70 + MT_EE_CP_FT_VERSION = 0x0f0, 71 + 72 + MT_EE_XTAL_FREQ_OFFSET = 0x0f4, 73 + MT_EE_XTAL_TRIM_2_COMP = 0x0f5, 74 + MT_EE_XTAL_TRIM_3_COMP = 0x0f6, 75 + MT_EE_XTAL_WF_RFCAL = 0x0f7, 76 + 77 + __MT_EE_MAX 78 + }; 79 + 80 + enum mt7603_eeprom_source { 81 + MT_EE_SRC_PROM, 82 + MT_EE_SRC_EFUSE, 83 + MT_EE_SRC_FLASH, 84 + }; 85 + 86 + #endif
+578
drivers/net/wireless/mediatek/mt76/mt7603/init.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include <linux/etherdevice.h> 4 + #include "mt7603.h" 5 + #include "mac.h" 6 + #include "eeprom.h" 7 + 8 + const struct mt76_driver_ops mt7603_drv_ops = { 9 + .txwi_size = MT_TXD_SIZE, 10 + .tx_prepare_skb = mt7603_tx_prepare_skb, 11 + .tx_complete_skb = mt7603_tx_complete_skb, 12 + .rx_skb = mt7603_queue_rx_skb, 13 + .rx_poll_complete = mt7603_rx_poll_complete, 14 + .sta_ps = mt7603_sta_ps, 15 + .sta_add = mt7603_sta_add, 16 + .sta_assoc = mt7603_sta_assoc, 17 + .sta_remove = mt7603_sta_remove, 18 + .update_survey = mt7603_update_channel, 19 + }; 20 + 21 + static void 22 + mt7603_set_tmac_template(struct mt7603_dev *dev) 23 + { 24 + u32 desc[5] = { 25 + [1] = FIELD_PREP(MT_TXD3_REM_TX_COUNT, 0xf), 26 + [3] = MT_TXD5_SW_POWER_MGMT 27 + }; 28 + u32 addr; 29 + int i; 30 + 31 + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR); 32 + addr += MT_CLIENT_TMAC_INFO_TEMPLATE; 33 + for (i = 0; i < ARRAY_SIZE(desc); i++) 34 + mt76_wr(dev, addr + 4 * i, desc[i]); 35 + } 36 + 37 + static void 38 + mt7603_dma_sched_init(struct mt7603_dev *dev) 39 + { 40 + int page_size = 128; 41 + int page_count; 42 + int max_len = 1792; 43 + int max_amsdu_pages = 4096 / page_size; 44 + int max_mcu_len = 4096; 45 + int max_beacon_len = 512 * 4 + max_len; 46 + int max_mcast_pages = 4 * max_len / page_size; 47 + int reserved_count = 0; 48 + int beacon_pages; 49 + int mcu_pages; 50 + int i; 51 + 52 + page_count = mt76_get_field(dev, MT_PSE_FC_P0, 53 + MT_PSE_FC_P0_MAX_QUOTA); 54 + beacon_pages = 4 * (max_beacon_len / page_size); 55 + mcu_pages = max_mcu_len / page_size; 56 + 57 + mt76_wr(dev, MT_PSE_FRP, 58 + FIELD_PREP(MT_PSE_FRP_P0, 7) | 59 + FIELD_PREP(MT_PSE_FRP_P1, 6) | 60 + FIELD_PREP(MT_PSE_FRP_P2_RQ2, 4)); 61 + 62 + mt76_wr(dev, MT_HIGH_PRIORITY_1, 0x55555553); 63 + mt76_wr(dev, MT_HIGH_PRIORITY_2, 0x78555555); 64 + 65 + mt76_wr(dev, MT_QUEUE_PRIORITY_1, 0x2b1a096e); 66 + mt76_wr(dev, MT_QUEUE_PRIORITY_2, 0x785f4d3c); 67 + 68 + mt76_wr(dev, MT_PRIORITY_MASK, 0xffffffff); 69 + 70 + mt76_wr(dev, MT_SCH_1, page_count | (2 << 28)); 71 + mt76_wr(dev, MT_SCH_2, max_amsdu_pages); 72 + 73 + for (i = 0; i <= 4; i++) 74 + mt76_wr(dev, MT_PAGE_COUNT(i), max_amsdu_pages); 75 + reserved_count += 5 * max_amsdu_pages; 76 + 77 + mt76_wr(dev, MT_PAGE_COUNT(5), mcu_pages); 78 + reserved_count += mcu_pages; 79 + 80 + mt76_wr(dev, MT_PAGE_COUNT(7), beacon_pages); 81 + reserved_count += beacon_pages; 82 + 83 + mt76_wr(dev, MT_PAGE_COUNT(8), max_mcast_pages); 84 + reserved_count += max_mcast_pages; 85 + 86 + if (is_mt7603(dev)) 87 + reserved_count = 0; 88 + 89 + mt76_wr(dev, MT_RSV_MAX_THRESH, page_count - reserved_count); 90 + 91 + if (is_mt7603(dev) && mt76xx_rev(dev) >= MT7603_REV_E2) { 92 + mt76_wr(dev, MT_GROUP_THRESH(0), 93 + page_count - beacon_pages - mcu_pages); 94 + mt76_wr(dev, MT_GROUP_THRESH(1), beacon_pages); 95 + mt76_wr(dev, MT_BMAP_0, 0x0080ff5f); 96 + mt76_wr(dev, MT_GROUP_THRESH(2), mcu_pages); 97 + mt76_wr(dev, MT_BMAP_1, 0x00000020); 98 + } else { 99 + mt76_wr(dev, MT_GROUP_THRESH(0), page_count); 100 + mt76_wr(dev, MT_BMAP_0, 0xffff); 101 + } 102 + 103 + mt76_wr(dev, MT_SCH_4, 0); 104 + 105 + for (i = 0; i <= 15; i++) 106 + mt76_wr(dev, MT_TXTIME_THRESH(i), 0xfffff); 107 + 108 + mt76_set(dev, MT_SCH_4, BIT(6)); 109 + } 110 + 111 + static void 112 + mt7603_phy_init(struct mt7603_dev *dev) 113 + { 114 + int rx_chains = dev->mt76.antenna_mask; 115 + int tx_chains = __sw_hweight8(rx_chains) - 1; 116 + 117 + mt76_rmw(dev, MT_WF_RMAC_RMCR, 118 + (MT_WF_RMAC_RMCR_SMPS_MODE | 119 + MT_WF_RMAC_RMCR_RX_STREAMS), 120 + (FIELD_PREP(MT_WF_RMAC_RMCR_SMPS_MODE, 3) | 121 + FIELD_PREP(MT_WF_RMAC_RMCR_RX_STREAMS, rx_chains))); 122 + 123 + mt76_rmw_field(dev, MT_TMAC_TCR, MT_TMAC_TCR_TX_STREAMS, 124 + tx_chains); 125 + 126 + dev->agc0 = mt76_rr(dev, MT_AGC(0)); 127 + dev->agc3 = mt76_rr(dev, MT_AGC(3)); 128 + } 129 + 130 + static void 131 + mt7603_mac_init(struct mt7603_dev *dev) 132 + { 133 + u8 bc_addr[ETH_ALEN]; 134 + u32 addr; 135 + int i; 136 + 137 + mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_0, 138 + (MT_AGG_SIZE_LIMIT(0) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | 139 + (MT_AGG_SIZE_LIMIT(1) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | 140 + (MT_AGG_SIZE_LIMIT(2) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | 141 + (MT_AGG_SIZE_LIMIT(3) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT)); 142 + 143 + mt76_wr(dev, MT_AGG_BA_SIZE_LIMIT_1, 144 + (MT_AGG_SIZE_LIMIT(4) << 0 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | 145 + (MT_AGG_SIZE_LIMIT(5) << 1 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | 146 + (MT_AGG_SIZE_LIMIT(6) << 2 * MT_AGG_BA_SIZE_LIMIT_SHIFT) | 147 + (MT_AGG_SIZE_LIMIT(7) << 3 * MT_AGG_BA_SIZE_LIMIT_SHIFT)); 148 + 149 + mt76_wr(dev, MT_AGG_LIMIT, 150 + FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) | 151 + FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) | 152 + FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) | 153 + FIELD_PREP(MT_AGG_LIMIT_AC(3), 24)); 154 + 155 + mt76_wr(dev, MT_AGG_LIMIT_1, 156 + FIELD_PREP(MT_AGG_LIMIT_AC(0), 24) | 157 + FIELD_PREP(MT_AGG_LIMIT_AC(1), 24) | 158 + FIELD_PREP(MT_AGG_LIMIT_AC(2), 24) | 159 + FIELD_PREP(MT_AGG_LIMIT_AC(3), 24)); 160 + 161 + mt76_wr(dev, MT_AGG_CONTROL, 162 + FIELD_PREP(MT_AGG_CONTROL_BAR_RATE, 0x4b) | 163 + FIELD_PREP(MT_AGG_CONTROL_CFEND_RATE, 0x69) | 164 + MT_AGG_CONTROL_NO_BA_AR_RULE); 165 + 166 + mt76_wr(dev, MT_AGG_RETRY_CONTROL, 167 + FIELD_PREP(MT_AGG_RETRY_CONTROL_BAR_LIMIT, 1) | 168 + FIELD_PREP(MT_AGG_RETRY_CONTROL_RTS_LIMIT, 15)); 169 + 170 + mt76_rmw(dev, MT_DMA_DCR0, ~0xfffc, 4096); 171 + 172 + mt76_rmw(dev, MT_DMA_VCFR0, BIT(0), BIT(13)); 173 + mt76_rmw(dev, MT_DMA_TMCFR0, BIT(0) | BIT(1), BIT(13)); 174 + 175 + mt76_clear(dev, MT_WF_RMAC_TMR_PA, BIT(31)); 176 + 177 + mt76_set(dev, MT_WF_RMACDR, MT_WF_RMACDR_MAXLEN_20BIT); 178 + mt76_rmw(dev, MT_WF_RMAC_MAXMINLEN, 0xffffff, 0x19000); 179 + 180 + mt76_wr(dev, MT_WF_RFCR1, 0); 181 + 182 + mt76_set(dev, MT_TMAC_TCR, MT_TMAC_TCR_RX_RIFS_MODE); 183 + 184 + mt7603_set_tmac_template(dev); 185 + 186 + /* Enable RX group to HIF */ 187 + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR); 188 + mt76_set(dev, addr + MT_CLIENT_RXINF, MT_CLIENT_RXINF_RXSH_GROUPS); 189 + 190 + /* Enable RX group to MCU */ 191 + mt76_set(dev, MT_DMA_DCR1, GENMASK(13, 11)); 192 + 193 + mt76_rmw_field(dev, MT_AGG_PCR_RTS, MT_AGG_PCR_RTS_PKT_THR, 3); 194 + mt76_set(dev, MT_TMAC_PCR, MT_TMAC_PCR_SPE_EN); 195 + 196 + /* include preamble detection in CCA trigger signal */ 197 + mt76_rmw_field(dev, MT_TXREQ, MT_TXREQ_CCA_SRC_SEL, 2); 198 + 199 + mt76_wr(dev, MT_RXREQ, 4); 200 + 201 + /* Configure all rx packets to HIF */ 202 + mt76_wr(dev, MT_DMA_RCFR0, 0xc0000000); 203 + 204 + /* Configure MCU txs selection with aggregation */ 205 + mt76_wr(dev, MT_DMA_TCFR0, 206 + FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */ 207 + MT_DMA_TCFR_TXS_AGGR_COUNT); 208 + 209 + /* Configure HIF txs selection with aggregation */ 210 + mt76_wr(dev, MT_DMA_TCFR1, 211 + FIELD_PREP(MT_DMA_TCFR_TXS_AGGR_TIMEOUT, 1) | /* 32 us */ 212 + MT_DMA_TCFR_TXS_AGGR_COUNT | /* Maximum count */ 213 + MT_DMA_TCFR_TXS_BIT_MAP); 214 + 215 + mt76_wr(dev, MT_MCU_PCIE_REMAP_1, MT_PSE_WTBL_2_PHYS_ADDR); 216 + 217 + for (i = 0; i < MT7603_WTBL_SIZE; i++) 218 + mt7603_wtbl_clear(dev, i); 219 + 220 + eth_broadcast_addr(bc_addr); 221 + mt7603_wtbl_init(dev, MT7603_WTBL_RESERVED, -1, bc_addr); 222 + dev->global_sta.wcid.idx = MT7603_WTBL_RESERVED; 223 + rcu_assign_pointer(dev->mt76.wcid[MT7603_WTBL_RESERVED], 224 + &dev->global_sta.wcid); 225 + 226 + mt76_rmw_field(dev, MT_LPON_BTEIR, MT_LPON_BTEIR_MBSS_MODE, 2); 227 + mt76_rmw_field(dev, MT_WF_RMACDR, MT_WF_RMACDR_MBSSID_MASK, 2); 228 + 229 + mt76_wr(dev, MT_AGG_ARUCR, FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 7)); 230 + mt76_wr(dev, MT_AGG_ARDCR, 231 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(0), 0) | 232 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(1), 233 + max_t(int, 0, MT7603_RATE_RETRY - 2)) | 234 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(2), MT7603_RATE_RETRY - 1) | 235 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(3), MT7603_RATE_RETRY - 1) | 236 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(4), MT7603_RATE_RETRY - 1) | 237 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(5), MT7603_RATE_RETRY - 1) | 238 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(6), MT7603_RATE_RETRY - 1) | 239 + FIELD_PREP(MT_AGG_ARxCR_LIMIT(7), MT7603_RATE_RETRY - 1)); 240 + 241 + mt76_wr(dev, MT_AGG_ARCR, 242 + (MT_AGG_ARCR_INIT_RATE1 | 243 + FIELD_PREP(MT_AGG_ARCR_RTS_RATE_THR, 2) | 244 + MT_AGG_ARCR_RATE_DOWN_RATIO_EN | 245 + FIELD_PREP(MT_AGG_ARCR_RATE_DOWN_RATIO, 1) | 246 + FIELD_PREP(MT_AGG_ARCR_RATE_UP_EXTRA_TH, 4))); 247 + 248 + mt76_set(dev, MT_WTBL_RMVTCR, MT_WTBL_RMVTCR_RX_MV_MODE); 249 + 250 + mt76_clear(dev, MT_SEC_SCR, MT_SEC_SCR_MASK_ORDER); 251 + mt76_clear(dev, MT_SEC_SCR, BIT(18)); 252 + 253 + /* Set secondary beacon time offsets */ 254 + for (i = 0; i <= 4; i++) 255 + mt76_rmw_field(dev, MT_LPON_SBTOR(i), MT_LPON_SBTOR_TIME_OFFSET, 256 + (i + 1) * (20 + 4096)); 257 + } 258 + 259 + static int 260 + mt7603_init_hardware(struct mt7603_dev *dev) 261 + { 262 + int i, ret; 263 + 264 + mt76_wr(dev, MT_INT_SOURCE_CSR, ~0); 265 + 266 + ret = mt7603_eeprom_init(dev); 267 + if (ret < 0) 268 + return ret; 269 + 270 + ret = mt7603_dma_init(dev); 271 + if (ret) 272 + return ret; 273 + 274 + mt76_wr(dev, MT_WPDMA_GLO_CFG, 0x52000850); 275 + mt7603_mac_dma_start(dev); 276 + dev->rxfilter = mt76_rr(dev, MT_WF_RFCR); 277 + set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); 278 + 279 + for (i = 0; i < MT7603_WTBL_SIZE; i++) { 280 + mt76_wr(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY | MT_PSE_RTA_WRITE | 281 + FIELD_PREP(MT_PSE_RTA_TAG_ID, i)); 282 + mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); 283 + } 284 + 285 + ret = mt7603_mcu_init(dev); 286 + if (ret) 287 + return ret; 288 + 289 + mt7603_dma_sched_init(dev); 290 + mt7603_mcu_set_eeprom(dev); 291 + mt7603_phy_init(dev); 292 + mt7603_mac_init(dev); 293 + 294 + return 0; 295 + } 296 + 297 + #define CCK_RATE(_idx, _rate) { \ 298 + .bitrate = _rate, \ 299 + .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ 300 + .hw_value = (MT_PHY_TYPE_CCK << 8) | (_idx), \ 301 + .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (4 + _idx), \ 302 + } 303 + 304 + #define OFDM_RATE(_idx, _rate) { \ 305 + .bitrate = _rate, \ 306 + .hw_value = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 307 + .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | (_idx), \ 308 + } 309 + 310 + static struct ieee80211_rate mt7603_rates[] = { 311 + CCK_RATE(0, 10), 312 + CCK_RATE(1, 20), 313 + CCK_RATE(2, 55), 314 + CCK_RATE(3, 110), 315 + OFDM_RATE(11, 60), 316 + OFDM_RATE(15, 90), 317 + OFDM_RATE(10, 120), 318 + OFDM_RATE(14, 180), 319 + OFDM_RATE(9, 240), 320 + OFDM_RATE(13, 360), 321 + OFDM_RATE(8, 480), 322 + OFDM_RATE(12, 540), 323 + }; 324 + 325 + static const struct ieee80211_iface_limit if_limits[] = { 326 + { 327 + .max = 1, 328 + .types = BIT(NL80211_IFTYPE_ADHOC) 329 + }, { 330 + .max = MT7603_MAX_INTERFACES, 331 + .types = BIT(NL80211_IFTYPE_STATION) | 332 + #ifdef CONFIG_MAC80211_MESH 333 + BIT(NL80211_IFTYPE_MESH_POINT) | 334 + #endif 335 + BIT(NL80211_IFTYPE_AP) 336 + }, 337 + }; 338 + 339 + static const struct ieee80211_iface_combination if_comb[] = { 340 + { 341 + .limits = if_limits, 342 + .n_limits = ARRAY_SIZE(if_limits), 343 + .max_interfaces = 4, 344 + .num_different_channels = 1, 345 + .beacon_int_infra_match = true, 346 + } 347 + }; 348 + 349 + static void mt7603_led_set_config(struct mt76_dev *mt76, u8 delay_on, 350 + u8 delay_off) 351 + { 352 + struct mt7603_dev *dev = container_of(mt76, struct mt7603_dev, 353 + mt76); 354 + u32 val, addr; 355 + 356 + val = MT_LED_STATUS_DURATION(0xffff) | 357 + MT_LED_STATUS_OFF(delay_off) | 358 + MT_LED_STATUS_ON(delay_on); 359 + 360 + addr = mt7603_reg_map(dev, MT_LED_STATUS_0(mt76->led_pin)); 361 + mt76_wr(dev, addr, val); 362 + addr = mt7603_reg_map(dev, MT_LED_STATUS_1(mt76->led_pin)); 363 + mt76_wr(dev, addr, val); 364 + 365 + val = MT_LED_CTRL_REPLAY(mt76->led_pin) | 366 + MT_LED_CTRL_KICK(mt76->led_pin); 367 + if (mt76->led_al) 368 + val |= MT_LED_CTRL_POLARITY(mt76->led_pin); 369 + addr = mt7603_reg_map(dev, MT_LED_CTRL); 370 + mt76_wr(dev, addr, val); 371 + } 372 + 373 + static int mt7603_led_set_blink(struct led_classdev *led_cdev, 374 + unsigned long *delay_on, 375 + unsigned long *delay_off) 376 + { 377 + struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, 378 + led_cdev); 379 + u8 delta_on, delta_off; 380 + 381 + delta_off = max_t(u8, *delay_off / 10, 1); 382 + delta_on = max_t(u8, *delay_on / 10, 1); 383 + 384 + mt7603_led_set_config(mt76, delta_on, delta_off); 385 + return 0; 386 + } 387 + 388 + static void mt7603_led_set_brightness(struct led_classdev *led_cdev, 389 + enum led_brightness brightness) 390 + { 391 + struct mt76_dev *mt76 = container_of(led_cdev, struct mt76_dev, 392 + led_cdev); 393 + 394 + if (!brightness) 395 + mt7603_led_set_config(mt76, 0, 0xff); 396 + else 397 + mt7603_led_set_config(mt76, 0xff, 0); 398 + } 399 + 400 + static u32 __mt7603_reg_addr(struct mt7603_dev *dev, u32 addr) 401 + { 402 + if (addr < 0x100000) 403 + return addr; 404 + 405 + return mt7603_reg_map(dev, addr); 406 + } 407 + 408 + static u32 mt7603_rr(struct mt76_dev *mdev, u32 offset) 409 + { 410 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 411 + u32 addr = __mt7603_reg_addr(dev, offset); 412 + 413 + return dev->bus_ops->rr(mdev, addr); 414 + } 415 + 416 + static void mt7603_wr(struct mt76_dev *mdev, u32 offset, u32 val) 417 + { 418 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 419 + u32 addr = __mt7603_reg_addr(dev, offset); 420 + 421 + dev->bus_ops->wr(mdev, addr, val); 422 + } 423 + 424 + static u32 mt7603_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val) 425 + { 426 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 427 + u32 addr = __mt7603_reg_addr(dev, offset); 428 + 429 + return dev->bus_ops->rmw(mdev, addr, mask, val); 430 + } 431 + 432 + static void 433 + mt7603_regd_notifier(struct wiphy *wiphy, 434 + struct regulatory_request *request) 435 + { 436 + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 437 + struct mt7603_dev *dev = hw->priv; 438 + 439 + dev->ed_monitor = request->dfs_region == NL80211_DFS_ETSI; 440 + } 441 + 442 + static int 443 + mt7603_txpower_signed(int val) 444 + { 445 + bool sign = val & BIT(6); 446 + 447 + if (!(val & BIT(7))) 448 + return 0; 449 + 450 + val &= GENMASK(5, 0); 451 + if (!sign) 452 + val = -val; 453 + 454 + return val; 455 + } 456 + 457 + static void 458 + mt7603_init_txpower(struct mt7603_dev *dev, 459 + struct ieee80211_supported_band *sband) 460 + { 461 + struct ieee80211_channel *chan; 462 + u8 *eeprom = (u8 *)dev->mt76.eeprom.data; 463 + int target_power = eeprom[MT_EE_TX_POWER_0_START_2G + 2] & ~BIT(7); 464 + u8 *rate_power = &eeprom[MT_EE_TX_POWER_CCK]; 465 + int max_offset, cur_offset; 466 + int i; 467 + 468 + if (target_power & BIT(6)) 469 + target_power = -(target_power & GENMASK(5, 0)); 470 + 471 + max_offset = 0; 472 + for (i = 0; i < 14; i++) { 473 + cur_offset = mt7603_txpower_signed(rate_power[i]); 474 + max_offset = max(max_offset, cur_offset); 475 + } 476 + 477 + target_power += max_offset; 478 + 479 + dev->tx_power_limit = target_power; 480 + dev->mt76.txpower_cur = target_power; 481 + 482 + target_power = DIV_ROUND_UP(target_power, 2); 483 + 484 + /* add 3 dBm for 2SS devices (combined output) */ 485 + if (dev->mt76.antenna_mask & BIT(1)) 486 + target_power += 3; 487 + 488 + for (i = 0; i < sband->n_channels; i++) { 489 + chan = &sband->channels[i]; 490 + chan->max_power = target_power; 491 + } 492 + } 493 + 494 + 495 + int mt7603_register_device(struct mt7603_dev *dev) 496 + { 497 + struct mt76_bus_ops *bus_ops; 498 + struct ieee80211_hw *hw = mt76_hw(dev); 499 + struct wiphy *wiphy = hw->wiphy; 500 + int ret; 501 + 502 + dev->bus_ops = dev->mt76.bus; 503 + bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops), 504 + GFP_KERNEL); 505 + if (!bus_ops) 506 + return -ENOMEM; 507 + 508 + bus_ops->rr = mt7603_rr; 509 + bus_ops->wr = mt7603_wr; 510 + bus_ops->rmw = mt7603_rmw; 511 + dev->mt76.bus = bus_ops; 512 + 513 + INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work); 514 + tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet, 515 + (unsigned long)dev); 516 + 517 + /* Check for 7688, which only has 1SS */ 518 + dev->mt76.antenna_mask = 3; 519 + if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4)) 520 + dev->mt76.antenna_mask = 1; 521 + 522 + dev->slottime = 9; 523 + 524 + ret = mt7603_init_hardware(dev); 525 + if (ret) 526 + return ret; 527 + 528 + hw->queues = 4; 529 + hw->max_rates = 3; 530 + hw->max_report_rates = 7; 531 + hw->max_rate_tries = 11; 532 + 533 + hw->sta_data_size = sizeof(struct mt7603_sta); 534 + hw->vif_data_size = sizeof(struct mt7603_vif); 535 + 536 + wiphy->iface_combinations = if_comb; 537 + wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 538 + 539 + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 540 + ieee80211_hw_set(hw, TX_STATUS_NO_AMPDU_LEN); 541 + 542 + /* init led callbacks */ 543 + if (IS_ENABLED(CONFIG_MT76_LEDS)) { 544 + dev->mt76.led_cdev.brightness_set = mt7603_led_set_brightness; 545 + dev->mt76.led_cdev.blink_set = mt7603_led_set_blink; 546 + } 547 + 548 + wiphy->interface_modes = 549 + BIT(NL80211_IFTYPE_STATION) | 550 + BIT(NL80211_IFTYPE_AP) | 551 + #ifdef CONFIG_MAC80211_MESH 552 + BIT(NL80211_IFTYPE_MESH_POINT) | 553 + #endif 554 + BIT(NL80211_IFTYPE_ADHOC); 555 + 556 + wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 557 + 558 + wiphy->reg_notifier = mt7603_regd_notifier; 559 + 560 + ret = mt76_register_device(&dev->mt76, true, mt7603_rates, 561 + ARRAY_SIZE(mt7603_rates)); 562 + if (ret) 563 + return ret; 564 + 565 + mt7603_init_debugfs(dev); 566 + mt7603_init_txpower(dev, &dev->mt76.sband_2g.sband); 567 + 568 + return 0; 569 + } 570 + 571 + void mt7603_unregister_device(struct mt7603_dev *dev) 572 + { 573 + tasklet_disable(&dev->pre_tbtt_tasklet); 574 + mt76_unregister_device(&dev->mt76); 575 + mt7603_mcu_exit(dev); 576 + mt7603_dma_cleanup(dev); 577 + ieee80211_free_hw(mt76_hw(dev)); 578 + }
+1749
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include <linux/etherdevice.h> 4 + #include <linux/timekeeping.h> 5 + #include "mt7603.h" 6 + #include "mac.h" 7 + 8 + #define MT_PSE_PAGE_SIZE 128 9 + 10 + static u32 11 + mt7603_ac_queue_mask0(u32 mask) 12 + { 13 + u32 ret = 0; 14 + 15 + ret |= GENMASK(3, 0) * !!(mask & BIT(0)); 16 + ret |= GENMASK(8, 5) * !!(mask & BIT(1)); 17 + ret |= GENMASK(13, 10) * !!(mask & BIT(2)); 18 + ret |= GENMASK(19, 16) * !!(mask & BIT(3)); 19 + return ret; 20 + } 21 + 22 + static void 23 + mt76_stop_tx_ac(struct mt7603_dev *dev, u32 mask) 24 + { 25 + mt76_set(dev, MT_WF_ARB_TX_STOP_0, mt7603_ac_queue_mask0(mask)); 26 + } 27 + 28 + static void 29 + mt76_start_tx_ac(struct mt7603_dev *dev, u32 mask) 30 + { 31 + mt76_set(dev, MT_WF_ARB_TX_START_0, mt7603_ac_queue_mask0(mask)); 32 + } 33 + 34 + void mt7603_mac_set_timing(struct mt7603_dev *dev) 35 + { 36 + u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) | 37 + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48); 38 + u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) | 39 + FIELD_PREP(MT_TIMEOUT_VAL_CCA, 24); 40 + int offset = 3 * dev->coverage_class; 41 + u32 reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) | 42 + FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset); 43 + int sifs; 44 + u32 val; 45 + 46 + if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) 47 + sifs = 16; 48 + else 49 + sifs = 10; 50 + 51 + mt76_set(dev, MT_ARB_SCR, 52 + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 53 + udelay(1); 54 + 55 + mt76_wr(dev, MT_TIMEOUT_CCK, cck + reg_offset); 56 + mt76_wr(dev, MT_TIMEOUT_OFDM, ofdm + reg_offset); 57 + mt76_wr(dev, MT_IFS, 58 + FIELD_PREP(MT_IFS_EIFS, 360) | 59 + FIELD_PREP(MT_IFS_RIFS, 2) | 60 + FIELD_PREP(MT_IFS_SIFS, sifs) | 61 + FIELD_PREP(MT_IFS_SLOT, dev->slottime)); 62 + 63 + if (dev->slottime < 20) 64 + val = MT7603_CFEND_RATE_DEFAULT; 65 + else 66 + val = MT7603_CFEND_RATE_11B; 67 + 68 + mt76_rmw_field(dev, MT_AGG_CONTROL, MT_AGG_CONTROL_CFEND_RATE, val); 69 + 70 + mt76_clear(dev, MT_ARB_SCR, 71 + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 72 + } 73 + 74 + static void 75 + mt7603_wtbl_update(struct mt7603_dev *dev, int idx, u32 mask) 76 + { 77 + mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX, 78 + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask); 79 + 80 + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 81 + } 82 + 83 + static u32 84 + mt7603_wtbl1_addr(int idx) 85 + { 86 + return MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 87 + } 88 + 89 + static u32 90 + mt7603_wtbl2_addr(int idx) 91 + { 92 + /* Mapped to WTBL2 */ 93 + return MT_PCIE_REMAP_BASE_1 + idx * MT_WTBL2_SIZE; 94 + } 95 + 96 + static u32 97 + mt7603_wtbl3_addr(int idx) 98 + { 99 + u32 base = mt7603_wtbl2_addr(MT7603_WTBL_SIZE); 100 + 101 + return base + idx * MT_WTBL3_SIZE; 102 + } 103 + 104 + static u32 105 + mt7603_wtbl4_addr(int idx) 106 + { 107 + u32 base = mt7603_wtbl3_addr(MT7603_WTBL_SIZE); 108 + 109 + return base + idx * MT_WTBL4_SIZE; 110 + } 111 + 112 + void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, 113 + const u8 *mac_addr) 114 + { 115 + const void *_mac = mac_addr; 116 + u32 addr = mt7603_wtbl1_addr(idx); 117 + u32 w0 = 0, w1 = 0; 118 + int i; 119 + 120 + if (_mac) { 121 + w0 = FIELD_PREP(MT_WTBL1_W0_ADDR_HI, 122 + get_unaligned_le16(_mac + 4)); 123 + w1 = FIELD_PREP(MT_WTBL1_W1_ADDR_LO, 124 + get_unaligned_le32(_mac)); 125 + } 126 + 127 + if (vif < 0) 128 + vif = 0; 129 + else 130 + w0 |= MT_WTBL1_W0_RX_CHECK_A1; 131 + w0 |= FIELD_PREP(MT_WTBL1_W0_MUAR_IDX, vif); 132 + 133 + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 134 + 135 + mt76_set(dev, addr + 0 * 4, w0); 136 + mt76_set(dev, addr + 1 * 4, w1); 137 + mt76_set(dev, addr + 2 * 4, MT_WTBL1_W2_ADMISSION_CONTROL); 138 + 139 + mt76_stop_tx_ac(dev, GENMASK(3, 0)); 140 + addr = mt7603_wtbl2_addr(idx); 141 + for (i = 0; i < MT_WTBL2_SIZE; i += 4) 142 + mt76_wr(dev, addr + i, 0); 143 + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 144 + mt76_start_tx_ac(dev, GENMASK(3, 0)); 145 + 146 + addr = mt7603_wtbl3_addr(idx); 147 + for (i = 0; i < MT_WTBL3_SIZE; i += 4) 148 + mt76_wr(dev, addr + i, 0); 149 + 150 + addr = mt7603_wtbl4_addr(idx); 151 + for (i = 0; i < MT_WTBL4_SIZE; i += 4) 152 + mt76_wr(dev, addr + i, 0); 153 + } 154 + 155 + static void 156 + mt7603_wtbl_set_skip_tx(struct mt7603_dev *dev, int idx, bool enabled) 157 + { 158 + u32 addr = mt7603_wtbl1_addr(idx); 159 + u32 val = mt76_rr(dev, addr + 3 * 4); 160 + 161 + val &= ~MT_WTBL1_W3_SKIP_TX; 162 + val |= enabled * MT_WTBL1_W3_SKIP_TX; 163 + 164 + mt76_wr(dev, addr + 3 * 4, val); 165 + } 166 + 167 + void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort) 168 + { 169 + int i, port, queue; 170 + 171 + if (abort) { 172 + port = 3; /* PSE */ 173 + queue = 8; /* free queue */ 174 + } else { 175 + port = 0; /* HIF */ 176 + queue = 1; /* MCU queue */ 177 + } 178 + 179 + mt7603_wtbl_set_skip_tx(dev, idx, true); 180 + 181 + mt76_wr(dev, MT_TX_ABORT, MT_TX_ABORT_EN | 182 + FIELD_PREP(MT_TX_ABORT_WCID, idx)); 183 + 184 + for (i = 0; i < 4; i++) { 185 + mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY | 186 + FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, idx) | 187 + FIELD_PREP(MT_DMA_FQCR0_TARGET_QID, i) | 188 + FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, port) | 189 + FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, queue)); 190 + 191 + WARN_ON_ONCE(!mt76_poll(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY, 192 + 0, 5000)); 193 + } 194 + 195 + mt76_wr(dev, MT_TX_ABORT, 0); 196 + 197 + mt7603_wtbl_set_skip_tx(dev, idx, false); 198 + } 199 + 200 + void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, 201 + bool enabled) 202 + { 203 + u32 addr = mt7603_wtbl1_addr(sta->wcid.idx); 204 + 205 + if (sta->smps == enabled) 206 + return; 207 + 208 + mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_SMPS, enabled); 209 + sta->smps = enabled; 210 + } 211 + 212 + void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, 213 + bool enabled) 214 + { 215 + int idx = sta->wcid.idx; 216 + u32 addr; 217 + 218 + spin_lock_bh(&dev->ps_lock); 219 + 220 + if (sta->ps == enabled) 221 + goto out; 222 + 223 + mt76_wr(dev, MT_PSE_RTA, 224 + FIELD_PREP(MT_PSE_RTA_TAG_ID, idx) | 225 + FIELD_PREP(MT_PSE_RTA_PORT_ID, 0) | 226 + FIELD_PREP(MT_PSE_RTA_QUEUE_ID, 1) | 227 + FIELD_PREP(MT_PSE_RTA_REDIRECT_EN, enabled) | 228 + MT_PSE_RTA_WRITE | MT_PSE_RTA_BUSY); 229 + 230 + mt76_poll(dev, MT_PSE_RTA, MT_PSE_RTA_BUSY, 0, 5000); 231 + 232 + if (enabled) 233 + mt7603_filter_tx(dev, idx, false); 234 + 235 + addr = mt7603_wtbl1_addr(idx); 236 + mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 237 + mt76_rmw(dev, addr + 3 * 4, MT_WTBL1_W3_POWER_SAVE, 238 + enabled * MT_WTBL1_W3_POWER_SAVE); 239 + mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 240 + sta->ps = enabled; 241 + 242 + out: 243 + spin_unlock_bh(&dev->ps_lock); 244 + } 245 + 246 + void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx) 247 + { 248 + int wtbl2_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL2_SIZE; 249 + int wtbl2_frame = idx / wtbl2_frame_size; 250 + int wtbl2_entry = idx % wtbl2_frame_size; 251 + 252 + int wtbl3_base_frame = MT_WTBL3_OFFSET / MT_PSE_PAGE_SIZE; 253 + int wtbl3_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL3_SIZE; 254 + int wtbl3_frame = wtbl3_base_frame + idx / wtbl3_frame_size; 255 + int wtbl3_entry = (idx % wtbl3_frame_size) * 2; 256 + 257 + int wtbl4_base_frame = MT_WTBL4_OFFSET / MT_PSE_PAGE_SIZE; 258 + int wtbl4_frame_size = MT_PSE_PAGE_SIZE / MT_WTBL4_SIZE; 259 + int wtbl4_frame = wtbl4_base_frame + idx / wtbl4_frame_size; 260 + int wtbl4_entry = idx % wtbl4_frame_size; 261 + 262 + u32 addr = MT_WTBL1_BASE + idx * MT_WTBL1_SIZE; 263 + int i; 264 + 265 + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 266 + 267 + mt76_wr(dev, addr + 0 * 4, 268 + MT_WTBL1_W0_RX_CHECK_A1 | 269 + MT_WTBL1_W0_RX_CHECK_A2 | 270 + MT_WTBL1_W0_RX_VALID); 271 + mt76_wr(dev, addr + 1 * 4, 0); 272 + mt76_wr(dev, addr + 2 * 4, 0); 273 + 274 + mt76_set(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 275 + 276 + mt76_wr(dev, addr + 3 * 4, 277 + FIELD_PREP(MT_WTBL1_W3_WTBL2_FRAME_ID, wtbl2_frame) | 278 + FIELD_PREP(MT_WTBL1_W3_WTBL2_ENTRY_ID, wtbl2_entry) | 279 + FIELD_PREP(MT_WTBL1_W3_WTBL4_FRAME_ID, wtbl4_frame) | 280 + MT_WTBL1_W3_I_PSM | MT_WTBL1_W3_KEEP_I_PSM); 281 + mt76_wr(dev, addr + 4 * 4, 282 + FIELD_PREP(MT_WTBL1_W4_WTBL3_FRAME_ID, wtbl3_frame) | 283 + FIELD_PREP(MT_WTBL1_W4_WTBL3_ENTRY_ID, wtbl3_entry) | 284 + FIELD_PREP(MT_WTBL1_W4_WTBL4_ENTRY_ID, wtbl4_entry)); 285 + 286 + mt76_clear(dev, MT_WTBL1_OR, MT_WTBL1_OR_PSM_WRITE); 287 + 288 + addr = mt7603_wtbl2_addr(idx); 289 + 290 + /* Clear BA information */ 291 + mt76_wr(dev, addr + (15 * 4), 0); 292 + 293 + mt76_stop_tx_ac(dev, GENMASK(3, 0)); 294 + for (i = 2; i <= 4; i++) 295 + mt76_wr(dev, addr + (i * 4), 0); 296 + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_WTBL2); 297 + mt76_start_tx_ac(dev, GENMASK(3, 0)); 298 + 299 + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_RX_COUNT_CLEAR); 300 + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_TX_COUNT_CLEAR); 301 + mt7603_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); 302 + } 303 + 304 + void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta) 305 + { 306 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 307 + int idx = msta->wcid.idx; 308 + u32 addr; 309 + u32 val; 310 + 311 + addr = mt7603_wtbl1_addr(idx); 312 + 313 + val = mt76_rr(dev, addr + 2 * 4); 314 + val &= MT_WTBL1_W2_KEY_TYPE | MT_WTBL1_W2_ADMISSION_CONTROL; 315 + val |= FIELD_PREP(MT_WTBL1_W2_AMPDU_FACTOR, sta->ht_cap.ampdu_factor) | 316 + FIELD_PREP(MT_WTBL1_W2_MPDU_DENSITY, sta->ht_cap.ampdu_density) | 317 + MT_WTBL1_W2_TXS_BAF_REPORT; 318 + 319 + if (sta->ht_cap.cap) 320 + val |= MT_WTBL1_W2_HT; 321 + if (sta->vht_cap.cap) 322 + val |= MT_WTBL1_W2_VHT; 323 + 324 + mt76_wr(dev, addr + 2 * 4, val); 325 + 326 + addr = mt7603_wtbl2_addr(idx); 327 + val = mt76_rr(dev, addr + 9 * 4); 328 + val &= ~(MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 329 + MT_WTBL2_W9_SHORT_GI_80); 330 + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) 331 + val |= MT_WTBL2_W9_SHORT_GI_20; 332 + if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40) 333 + val |= MT_WTBL2_W9_SHORT_GI_40; 334 + mt76_wr(dev, addr + 9 * 4, val); 335 + } 336 + 337 + void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid) 338 + { 339 + mt76_wr(dev, MT_BA_CONTROL_0, get_unaligned_le32(addr)); 340 + mt76_wr(dev, MT_BA_CONTROL_1, 341 + (get_unaligned_le16(addr + 4) | 342 + FIELD_PREP(MT_BA_CONTROL_1_TID, tid) | 343 + MT_BA_CONTROL_1_RESET)); 344 + } 345 + 346 + void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, 347 + int ba_size) 348 + { 349 + u32 addr = mt7603_wtbl2_addr(wcid); 350 + u32 tid_mask = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 351 + (MT_WTBL2_W15_BA_WIN_SIZE << 352 + (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT)); 353 + u32 tid_val; 354 + int i; 355 + 356 + if (ba_size < 0) { 357 + /* disable */ 358 + mt76_clear(dev, addr + (15 * 4), tid_mask); 359 + return; 360 + } 361 + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 362 + 363 + mt7603_mac_stop(dev); 364 + switch (tid) { 365 + case 0: 366 + mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn); 367 + break; 368 + case 1: 369 + mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn); 370 + break; 371 + case 2: 372 + mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO, 373 + ssn); 374 + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI, 375 + ssn >> 8); 376 + break; 377 + case 3: 378 + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn); 379 + break; 380 + case 4: 381 + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn); 382 + break; 383 + case 5: 384 + mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO, 385 + ssn); 386 + mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI, 387 + ssn >> 4); 388 + break; 389 + case 6: 390 + mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn); 391 + break; 392 + case 7: 393 + mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn); 394 + break; 395 + } 396 + mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2); 397 + mt7603_mac_start(dev); 398 + 399 + for (i = 7; i > 0; i--) { 400 + if (ba_size >= MT_AGG_SIZE_LIMIT(i)) 401 + break; 402 + } 403 + 404 + tid_val = FIELD_PREP(MT_WTBL2_W15_BA_EN_TIDS, BIT(tid)) | 405 + i << (tid * MT_WTBL2_W15_BA_WIN_SIZE_SHIFT); 406 + 407 + mt76_rmw(dev, addr + (15 * 4), tid_mask, tid_val); 408 + } 409 + 410 + static int 411 + mt7603_get_rate(struct mt7603_dev *dev, struct ieee80211_supported_band *sband, 412 + int idx, bool cck) 413 + { 414 + int offset = 0; 415 + int len = sband->n_bitrates; 416 + int i; 417 + 418 + if (cck) { 419 + if (sband == &dev->mt76.sband_5g.sband) 420 + return 0; 421 + 422 + idx &= ~BIT(2); /* short preamble */ 423 + } else if (sband == &dev->mt76.sband_2g.sband) { 424 + offset = 4; 425 + } 426 + 427 + for (i = offset; i < len; i++) { 428 + if ((sband->bitrates[i].hw_value & GENMASK(7, 0)) == idx) 429 + return i; 430 + } 431 + 432 + return 0; 433 + } 434 + 435 + static struct mt76_wcid * 436 + mt7603_rx_get_wcid(struct mt7603_dev *dev, u8 idx, bool unicast) 437 + { 438 + struct mt7603_sta *sta; 439 + struct mt76_wcid *wcid; 440 + 441 + if (idx >= ARRAY_SIZE(dev->mt76.wcid)) 442 + return NULL; 443 + 444 + wcid = rcu_dereference(dev->mt76.wcid[idx]); 445 + if (unicast || !wcid) 446 + return wcid; 447 + 448 + if (!wcid->sta) 449 + return NULL; 450 + 451 + sta = container_of(wcid, struct mt7603_sta, wcid); 452 + if (!sta->vif) 453 + return NULL; 454 + 455 + return &sta->vif->sta.wcid; 456 + } 457 + 458 + static void 459 + mt7603_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id) 460 + { 461 + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 462 + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 463 + u8 *pn = status->iv; 464 + u8 *hdr; 465 + 466 + __skb_push(skb, 8); 467 + memmove(skb->data, skb->data + 8, hdr_len); 468 + hdr = skb->data + hdr_len; 469 + 470 + hdr[0] = pn[5]; 471 + hdr[1] = pn[4]; 472 + hdr[2] = 0; 473 + hdr[3] = 0x20 | (key_id << 6); 474 + hdr[4] = pn[3]; 475 + hdr[5] = pn[2]; 476 + hdr[6] = pn[1]; 477 + hdr[7] = pn[0]; 478 + 479 + status->flag &= ~RX_FLAG_IV_STRIPPED; 480 + } 481 + 482 + int 483 + mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb) 484 + { 485 + struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb; 486 + struct ieee80211_supported_band *sband; 487 + struct ieee80211_hdr *hdr; 488 + __le32 *rxd = (__le32 *)skb->data; 489 + u32 rxd0 = le32_to_cpu(rxd[0]); 490 + u32 rxd1 = le32_to_cpu(rxd[1]); 491 + u32 rxd2 = le32_to_cpu(rxd[2]); 492 + bool unicast = rxd1 & MT_RXD1_NORMAL_U2M; 493 + bool insert_ccmp_hdr = false; 494 + bool remove_pad; 495 + int idx; 496 + int i; 497 + 498 + memset(status, 0, sizeof(*status)); 499 + 500 + i = FIELD_GET(MT_RXD1_NORMAL_CH_FREQ, rxd1); 501 + sband = (i & 1) ? &dev->mt76.sband_5g.sband : &dev->mt76.sband_2g.sband; 502 + i >>= 1; 503 + 504 + idx = FIELD_GET(MT_RXD2_NORMAL_WLAN_IDX, rxd2); 505 + status->wcid = mt7603_rx_get_wcid(dev, idx, unicast); 506 + 507 + status->band = sband->band; 508 + if (i < sband->n_channels) 509 + status->freq = sband->channels[i].center_freq; 510 + 511 + if (rxd2 & MT_RXD2_NORMAL_FCS_ERR) 512 + status->flag |= RX_FLAG_FAILED_FCS_CRC; 513 + 514 + if (rxd2 & MT_RXD2_NORMAL_TKIP_MIC_ERR) 515 + status->flag |= RX_FLAG_MMIC_ERROR; 516 + 517 + if (FIELD_GET(MT_RXD2_NORMAL_SEC_MODE, rxd2) != 0 && 518 + !(rxd2 & (MT_RXD2_NORMAL_CLM | MT_RXD2_NORMAL_CM))) { 519 + status->flag |= RX_FLAG_DECRYPTED; 520 + status->flag |= RX_FLAG_IV_STRIPPED; 521 + status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED; 522 + } 523 + 524 + remove_pad = rxd1 & MT_RXD1_NORMAL_HDR_OFFSET; 525 + 526 + if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR) 527 + return -EINVAL; 528 + 529 + if (!sband->channels) 530 + return -EINVAL; 531 + 532 + rxd += 4; 533 + if (rxd0 & MT_RXD0_NORMAL_GROUP_4) { 534 + rxd += 4; 535 + if ((u8 *)rxd - skb->data >= skb->len) 536 + return -EINVAL; 537 + } 538 + if (rxd0 & MT_RXD0_NORMAL_GROUP_1) { 539 + u8 *data = (u8 *)rxd; 540 + 541 + if (status->flag & RX_FLAG_DECRYPTED) { 542 + status->iv[0] = data[5]; 543 + status->iv[1] = data[4]; 544 + status->iv[2] = data[3]; 545 + status->iv[3] = data[2]; 546 + status->iv[4] = data[1]; 547 + status->iv[5] = data[0]; 548 + 549 + insert_ccmp_hdr = FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2); 550 + } 551 + 552 + rxd += 4; 553 + if ((u8 *)rxd - skb->data >= skb->len) 554 + return -EINVAL; 555 + } 556 + if (rxd0 & MT_RXD0_NORMAL_GROUP_2) { 557 + rxd += 2; 558 + if ((u8 *)rxd - skb->data >= skb->len) 559 + return -EINVAL; 560 + } 561 + if (rxd0 & MT_RXD0_NORMAL_GROUP_3) { 562 + u32 rxdg0 = le32_to_cpu(rxd[0]); 563 + u32 rxdg3 = le32_to_cpu(rxd[3]); 564 + bool cck = false; 565 + 566 + i = FIELD_GET(MT_RXV1_TX_RATE, rxdg0); 567 + switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) { 568 + case MT_PHY_TYPE_CCK: 569 + cck = true; 570 + /* fall through */ 571 + case MT_PHY_TYPE_OFDM: 572 + i = mt7603_get_rate(dev, sband, i, cck); 573 + break; 574 + case MT_PHY_TYPE_HT_GF: 575 + case MT_PHY_TYPE_HT: 576 + status->encoding = RX_ENC_HT; 577 + if (i > 15) 578 + return -EINVAL; 579 + break; 580 + default: 581 + return -EINVAL; 582 + } 583 + 584 + if (rxdg0 & MT_RXV1_HT_SHORT_GI) 585 + status->enc_flags |= RX_ENC_FLAG_SHORT_GI; 586 + if (rxdg0 & MT_RXV1_HT_AD_CODE) 587 + status->enc_flags |= RX_ENC_FLAG_LDPC; 588 + 589 + status->enc_flags |= RX_ENC_FLAG_STBC_MASK * 590 + FIELD_GET(MT_RXV1_HT_STBC, rxdg0); 591 + 592 + status->rate_idx = i; 593 + 594 + status->chains = dev->mt76.antenna_mask; 595 + status->chain_signal[0] = FIELD_GET(MT_RXV4_IB_RSSI0, rxdg3) + 596 + dev->rssi_offset[0]; 597 + status->chain_signal[1] = FIELD_GET(MT_RXV4_IB_RSSI1, rxdg3) + 598 + dev->rssi_offset[1]; 599 + 600 + status->signal = status->chain_signal[0]; 601 + if (status->chains & BIT(1)) 602 + status->signal = max(status->signal, 603 + status->chain_signal[1]); 604 + 605 + if (FIELD_GET(MT_RXV1_FRAME_MODE, rxdg0) == 1) 606 + status->bw = RATE_INFO_BW_40; 607 + 608 + rxd += 6; 609 + if ((u8 *)rxd - skb->data >= skb->len) 610 + return -EINVAL; 611 + } else { 612 + return -EINVAL; 613 + } 614 + 615 + skb_pull(skb, (u8 *)rxd - skb->data + 2 * remove_pad); 616 + 617 + if (insert_ccmp_hdr) { 618 + u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1); 619 + 620 + mt7603_insert_ccmp_hdr(skb, key_id); 621 + } 622 + 623 + hdr = (struct ieee80211_hdr *)skb->data; 624 + if (!status->wcid || !ieee80211_is_data_qos(hdr->frame_control)) 625 + return 0; 626 + 627 + status->aggr = unicast && 628 + !ieee80211_is_qos_nullfunc(hdr->frame_control); 629 + status->tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 630 + status->seqno = hdr->seq_ctrl >> 4; 631 + 632 + return 0; 633 + } 634 + 635 + static u16 636 + mt7603_mac_tx_rate_val(struct mt7603_dev *dev, 637 + const struct ieee80211_tx_rate *rate, bool stbc, u8 *bw) 638 + { 639 + u8 phy, nss, rate_idx; 640 + u16 rateval; 641 + 642 + *bw = 0; 643 + if (rate->flags & IEEE80211_TX_RC_MCS) { 644 + rate_idx = rate->idx; 645 + nss = 1 + (rate->idx >> 3); 646 + phy = MT_PHY_TYPE_HT; 647 + if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) 648 + phy = MT_PHY_TYPE_HT_GF; 649 + if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) 650 + *bw = 1; 651 + } else { 652 + const struct ieee80211_rate *r; 653 + int band = dev->mt76.chandef.chan->band; 654 + u16 val; 655 + 656 + nss = 1; 657 + r = &mt76_hw(dev)->wiphy->bands[band]->bitrates[rate->idx]; 658 + if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 659 + val = r->hw_value_short; 660 + else 661 + val = r->hw_value; 662 + 663 + phy = val >> 8; 664 + rate_idx = val & 0xff; 665 + } 666 + 667 + rateval = (FIELD_PREP(MT_TX_RATE_IDX, rate_idx) | 668 + FIELD_PREP(MT_TX_RATE_MODE, phy)); 669 + 670 + if (stbc && nss == 1) 671 + rateval |= MT_TX_RATE_STBC; 672 + 673 + return rateval; 674 + } 675 + 676 + void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, 677 + struct ieee80211_tx_rate *probe_rate, 678 + struct ieee80211_tx_rate *rates) 679 + { 680 + int wcid = sta->wcid.idx; 681 + u32 addr = mt7603_wtbl2_addr(wcid); 682 + bool stbc = false; 683 + int n_rates = sta->n_rates; 684 + u8 bw, bw_prev, bw_idx = 0; 685 + u16 val[4]; 686 + u16 probe_val; 687 + u32 w9 = mt76_rr(dev, addr + 9 * 4); 688 + int i; 689 + 690 + if (!mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000)) 691 + return; 692 + 693 + for (i = n_rates; i < 4; i++) 694 + rates[i] = rates[n_rates - 1]; 695 + 696 + w9 &= MT_WTBL2_W9_SHORT_GI_20 | MT_WTBL2_W9_SHORT_GI_40 | 697 + MT_WTBL2_W9_SHORT_GI_80; 698 + 699 + val[0] = mt7603_mac_tx_rate_val(dev, &rates[0], stbc, &bw); 700 + bw_prev = bw; 701 + 702 + if (probe_rate) { 703 + probe_val = mt7603_mac_tx_rate_val(dev, probe_rate, stbc, &bw); 704 + if (bw) 705 + bw_idx = 1; 706 + else 707 + bw_prev = 0; 708 + } else { 709 + probe_val = val[0]; 710 + } 711 + 712 + w9 |= FIELD_PREP(MT_WTBL2_W9_CC_BW_SEL, bw); 713 + w9 |= FIELD_PREP(MT_WTBL2_W9_BW_CAP, bw); 714 + 715 + val[1] = mt7603_mac_tx_rate_val(dev, &rates[1], stbc, &bw); 716 + if (bw_prev) { 717 + bw_idx = 3; 718 + bw_prev = bw; 719 + } 720 + 721 + val[2] = mt7603_mac_tx_rate_val(dev, &rates[2], stbc, &bw); 722 + if (bw_prev) { 723 + bw_idx = 5; 724 + bw_prev = bw; 725 + } 726 + 727 + val[3] = mt7603_mac_tx_rate_val(dev, &rates[3], stbc, &bw); 728 + if (bw_prev) 729 + bw_idx = 7; 730 + 731 + w9 |= FIELD_PREP(MT_WTBL2_W9_CHANGE_BW_RATE, 732 + bw_idx ? bw_idx - 1 : 7); 733 + 734 + mt76_wr(dev, MT_WTBL_RIUCR0, w9); 735 + 736 + mt76_wr(dev, MT_WTBL_RIUCR1, 737 + FIELD_PREP(MT_WTBL_RIUCR1_RATE0, probe_val) | 738 + FIELD_PREP(MT_WTBL_RIUCR1_RATE1, val[0]) | 739 + FIELD_PREP(MT_WTBL_RIUCR1_RATE2_LO, val[0])); 740 + 741 + mt76_wr(dev, MT_WTBL_RIUCR2, 742 + FIELD_PREP(MT_WTBL_RIUCR2_RATE2_HI, val[0] >> 8) | 743 + FIELD_PREP(MT_WTBL_RIUCR2_RATE3, val[1]) | 744 + FIELD_PREP(MT_WTBL_RIUCR2_RATE4, val[1]) | 745 + FIELD_PREP(MT_WTBL_RIUCR2_RATE5_LO, val[2])); 746 + 747 + mt76_wr(dev, MT_WTBL_RIUCR3, 748 + FIELD_PREP(MT_WTBL_RIUCR3_RATE5_HI, val[2] >> 4) | 749 + FIELD_PREP(MT_WTBL_RIUCR3_RATE6, val[2]) | 750 + FIELD_PREP(MT_WTBL_RIUCR3_RATE7, val[3])); 751 + 752 + mt76_wr(dev, MT_WTBL_UPDATE, 753 + FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, wcid) | 754 + MT_WTBL_UPDATE_RATE_UPDATE | 755 + MT_WTBL_UPDATE_TX_COUNT_CLEAR); 756 + 757 + if (!sta->wcid.tx_rate_set) 758 + mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000); 759 + 760 + sta->rate_count = 2 * MT7603_RATE_RETRY * n_rates; 761 + sta->wcid.tx_rate_set = true; 762 + } 763 + 764 + static enum mt7603_cipher_type 765 + mt7603_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) 766 + { 767 + memset(key_data, 0, 32); 768 + if (!key) 769 + return MT_CIPHER_NONE; 770 + 771 + if (key->keylen > 32) 772 + return MT_CIPHER_NONE; 773 + 774 + memcpy(key_data, key->key, key->keylen); 775 + 776 + switch (key->cipher) { 777 + case WLAN_CIPHER_SUITE_WEP40: 778 + return MT_CIPHER_WEP40; 779 + case WLAN_CIPHER_SUITE_WEP104: 780 + return MT_CIPHER_WEP104; 781 + case WLAN_CIPHER_SUITE_TKIP: 782 + /* Rx/Tx MIC keys are swapped */ 783 + memcpy(key_data + 16, key->key + 24, 8); 784 + memcpy(key_data + 24, key->key + 16, 8); 785 + return MT_CIPHER_TKIP; 786 + case WLAN_CIPHER_SUITE_CCMP: 787 + return MT_CIPHER_AES_CCMP; 788 + default: 789 + return MT_CIPHER_NONE; 790 + } 791 + } 792 + 793 + int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, 794 + struct ieee80211_key_conf *key) 795 + { 796 + enum mt7603_cipher_type cipher; 797 + u32 addr = mt7603_wtbl3_addr(wcid); 798 + u8 key_data[32]; 799 + int key_len = sizeof(key_data); 800 + 801 + cipher = mt7603_mac_get_key_info(key, key_data); 802 + if (cipher == MT_CIPHER_NONE && key) 803 + return -EOPNOTSUPP; 804 + 805 + if (key && (cipher == MT_CIPHER_WEP40 || cipher == MT_CIPHER_WEP104)) { 806 + addr += key->keyidx * 16; 807 + key_len = 16; 808 + } 809 + 810 + mt76_wr_copy(dev, addr, key_data, key_len); 811 + 812 + addr = mt7603_wtbl1_addr(wcid); 813 + mt76_rmw_field(dev, addr + 2 * 4, MT_WTBL1_W2_KEY_TYPE, cipher); 814 + if (key) 815 + mt76_rmw_field(dev, addr, MT_WTBL1_W0_KEY_IDX, key->keyidx); 816 + mt76_rmw_field(dev, addr, MT_WTBL1_W0_RX_KEY_VALID, !!key); 817 + 818 + return 0; 819 + } 820 + 821 + static int 822 + mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi, 823 + struct sk_buff *skb, struct mt76_queue *q, 824 + struct mt76_wcid *wcid, struct ieee80211_sta *sta, 825 + int pid, struct ieee80211_key_conf *key) 826 + { 827 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 828 + struct ieee80211_tx_rate *rate = &info->control.rates[0]; 829 + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 830 + struct ieee80211_vif *vif = info->control.vif; 831 + struct mt7603_vif *mvif; 832 + int wlan_idx; 833 + int hdr_len = ieee80211_get_hdrlen_from_skb(skb); 834 + int tx_count = 8; 835 + u8 frame_type, frame_subtype; 836 + u16 fc = le16_to_cpu(hdr->frame_control); 837 + u8 vif_idx = 0; 838 + u32 val; 839 + u8 bw; 840 + 841 + if (vif) { 842 + mvif = (struct mt7603_vif *)vif->drv_priv; 843 + vif_idx = mvif->idx; 844 + if (vif_idx && q >= &dev->mt76.q_tx[MT_TXQ_BEACON]) 845 + vif_idx += 0x10; 846 + } 847 + 848 + if (sta) { 849 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 850 + 851 + tx_count = msta->rate_count; 852 + } 853 + 854 + if (wcid) 855 + wlan_idx = wcid->idx; 856 + else 857 + wlan_idx = MT7603_WTBL_RESERVED; 858 + 859 + frame_type = (fc & IEEE80211_FCTL_FTYPE) >> 2; 860 + frame_subtype = (fc & IEEE80211_FCTL_STYPE) >> 4; 861 + 862 + val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) | 863 + FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); 864 + txwi[0] = cpu_to_le32(val); 865 + 866 + val = MT_TXD1_LONG_FORMAT | 867 + FIELD_PREP(MT_TXD1_OWN_MAC, vif_idx) | 868 + FIELD_PREP(MT_TXD1_TID, 869 + skb->priority & IEEE80211_QOS_CTL_TID_MASK) | 870 + FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) | 871 + FIELD_PREP(MT_TXD1_HDR_INFO, hdr_len / 2) | 872 + FIELD_PREP(MT_TXD1_WLAN_IDX, wlan_idx) | 873 + FIELD_PREP(MT_TXD1_PROTECTED, !!key); 874 + txwi[1] = cpu_to_le32(val); 875 + 876 + if (info->flags & IEEE80211_TX_CTL_NO_ACK) 877 + txwi[1] |= cpu_to_le32(MT_TXD1_NO_ACK); 878 + 879 + val = FIELD_PREP(MT_TXD2_FRAME_TYPE, frame_type) | 880 + FIELD_PREP(MT_TXD2_SUB_TYPE, frame_subtype) | 881 + FIELD_PREP(MT_TXD2_MULTICAST, 882 + is_multicast_ether_addr(hdr->addr1)); 883 + txwi[2] = cpu_to_le32(val); 884 + 885 + if (!(info->flags & IEEE80211_TX_CTL_AMPDU)) 886 + txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 887 + 888 + txwi[4] = 0; 889 + 890 + val = MT_TXD5_TX_STATUS_HOST | MT_TXD5_SW_POWER_MGMT | 891 + FIELD_PREP(MT_TXD5_PID, pid); 892 + txwi[5] = cpu_to_le32(val); 893 + 894 + txwi[6] = 0; 895 + 896 + if (rate->idx >= 0 && rate->count && 897 + !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { 898 + bool stbc = info->flags & IEEE80211_TX_CTL_STBC; 899 + u16 rateval = mt7603_mac_tx_rate_val(dev, rate, stbc, &bw); 900 + 901 + txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE); 902 + 903 + val = MT_TXD6_FIXED_BW | 904 + FIELD_PREP(MT_TXD6_BW, bw) | 905 + FIELD_PREP(MT_TXD6_TX_RATE, rateval); 906 + txwi[6] |= cpu_to_le32(val); 907 + 908 + if (rate->flags & IEEE80211_TX_RC_SHORT_GI) 909 + txwi[6] |= cpu_to_le32(MT_TXD6_SGI); 910 + 911 + if (!(rate->flags & IEEE80211_TX_RC_MCS)) 912 + txwi[2] |= cpu_to_le32(MT_TXD2_BA_DISABLE); 913 + 914 + tx_count = rate->count; 915 + } 916 + 917 + /* use maximum tx count for beacons and buffered multicast */ 918 + if (q >= &dev->mt76.q_tx[MT_TXQ_BEACON]) 919 + tx_count = 0x1f; 920 + 921 + val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) | 922 + FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl)); 923 + txwi[3] = cpu_to_le32(val); 924 + 925 + if (key) { 926 + u64 pn = atomic64_inc_return(&key->tx_pn); 927 + 928 + txwi[3] |= cpu_to_le32(MT_TXD3_PN_VALID); 929 + txwi[4] = cpu_to_le32(pn & GENMASK(31, 0)); 930 + txwi[5] |= cpu_to_le32(FIELD_PREP(MT_TXD5_PN_HIGH, pn >> 32)); 931 + } 932 + 933 + txwi[7] = 0; 934 + 935 + return 0; 936 + } 937 + 938 + int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 939 + struct sk_buff *skb, struct mt76_queue *q, 940 + struct mt76_wcid *wcid, struct ieee80211_sta *sta, 941 + u32 *tx_info) 942 + { 943 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 944 + struct mt7603_sta *msta = container_of(wcid, struct mt7603_sta, wcid); 945 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 946 + struct ieee80211_key_conf *key = info->control.hw_key; 947 + int pid; 948 + 949 + if (!wcid) 950 + wcid = &dev->global_sta.wcid; 951 + 952 + if (sta) { 953 + msta = (struct mt7603_sta *)sta->drv_priv; 954 + 955 + if ((info->flags & (IEEE80211_TX_CTL_NO_PS_BUFFER | 956 + IEEE80211_TX_CTL_CLEAR_PS_FILT)) || 957 + (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) 958 + mt7603_wtbl_set_ps(dev, msta, false); 959 + } 960 + 961 + pid = mt76_tx_status_skb_add(mdev, wcid, skb); 962 + 963 + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 964 + spin_lock_bh(&dev->mt76.lock); 965 + msta->rate_probe = true; 966 + mt7603_wtbl_set_rates(dev, msta, &info->control.rates[0], 967 + msta->rates); 968 + spin_unlock_bh(&dev->mt76.lock); 969 + } 970 + 971 + mt7603_mac_write_txwi(dev, txwi_ptr, skb, q, wcid, sta, pid, key); 972 + 973 + return 0; 974 + } 975 + 976 + static bool 977 + mt7603_fill_txs(struct mt7603_dev *dev, struct mt7603_sta *sta, 978 + struct ieee80211_tx_info *info, __le32 *txs_data) 979 + { 980 + struct ieee80211_supported_band *sband; 981 + int final_idx = 0; 982 + u32 final_rate; 983 + u32 final_rate_flags; 984 + bool final_mpdu; 985 + bool ack_timeout; 986 + bool fixed_rate; 987 + bool probe; 988 + bool ampdu; 989 + bool cck = false; 990 + int count; 991 + u32 txs; 992 + u8 pid; 993 + int idx; 994 + int i; 995 + 996 + fixed_rate = info->status.rates[0].count; 997 + probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); 998 + 999 + txs = le32_to_cpu(txs_data[4]); 1000 + final_mpdu = txs & MT_TXS4_ACKED_MPDU; 1001 + ampdu = !fixed_rate && (txs & MT_TXS4_AMPDU); 1002 + pid = FIELD_GET(MT_TXS4_PID, txs); 1003 + count = FIELD_GET(MT_TXS4_TX_COUNT, txs); 1004 + 1005 + txs = le32_to_cpu(txs_data[0]); 1006 + final_rate = FIELD_GET(MT_TXS0_TX_RATE, txs); 1007 + ack_timeout = txs & MT_TXS0_ACK_TIMEOUT; 1008 + 1009 + if (!ampdu && (txs & MT_TXS0_RTS_TIMEOUT)) 1010 + return false; 1011 + 1012 + if (txs & MT_TXS0_QUEUE_TIMEOUT) 1013 + return false; 1014 + 1015 + if (!ack_timeout) 1016 + info->flags |= IEEE80211_TX_STAT_ACK; 1017 + 1018 + info->status.ampdu_len = 1; 1019 + info->status.ampdu_ack_len = !!(info->flags & 1020 + IEEE80211_TX_STAT_ACK); 1021 + 1022 + if (ampdu || (info->flags & IEEE80211_TX_CTL_AMPDU)) 1023 + info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; 1024 + 1025 + if (fixed_rate && !probe) { 1026 + info->status.rates[0].count = count; 1027 + goto out; 1028 + } 1029 + 1030 + for (i = 0, idx = 0; i < ARRAY_SIZE(info->status.rates); i++) { 1031 + int cur_count = min_t(int, count, 2 * MT7603_RATE_RETRY); 1032 + 1033 + if (!i && probe) { 1034 + cur_count = 1; 1035 + } else { 1036 + info->status.rates[i] = sta->rates[idx]; 1037 + idx++; 1038 + } 1039 + 1040 + if (i && info->status.rates[i].idx < 0) { 1041 + info->status.rates[i - 1].count += count; 1042 + break; 1043 + } 1044 + 1045 + if (!count) { 1046 + info->status.rates[i].idx = -1; 1047 + break; 1048 + } 1049 + 1050 + info->status.rates[i].count = cur_count; 1051 + final_idx = i; 1052 + count -= cur_count; 1053 + } 1054 + 1055 + out: 1056 + final_rate_flags = info->status.rates[final_idx].flags; 1057 + 1058 + switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) { 1059 + case MT_PHY_TYPE_CCK: 1060 + cck = true; 1061 + /* fall through */ 1062 + case MT_PHY_TYPE_OFDM: 1063 + if (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) 1064 + sband = &dev->mt76.sband_5g.sband; 1065 + else 1066 + sband = &dev->mt76.sband_2g.sband; 1067 + final_rate &= GENMASK(5, 0); 1068 + final_rate = mt7603_get_rate(dev, sband, final_rate, cck); 1069 + final_rate_flags = 0; 1070 + break; 1071 + case MT_PHY_TYPE_HT_GF: 1072 + case MT_PHY_TYPE_HT: 1073 + final_rate_flags |= IEEE80211_TX_RC_MCS; 1074 + final_rate &= GENMASK(5, 0); 1075 + if (i > 15) 1076 + return false; 1077 + break; 1078 + default: 1079 + return false; 1080 + } 1081 + 1082 + info->status.rates[final_idx].idx = final_rate; 1083 + info->status.rates[final_idx].flags = final_rate_flags; 1084 + 1085 + return true; 1086 + } 1087 + 1088 + static bool 1089 + mt7603_mac_add_txs_skb(struct mt7603_dev *dev, struct mt7603_sta *sta, int pid, 1090 + __le32 *txs_data) 1091 + { 1092 + struct mt76_dev *mdev = &dev->mt76; 1093 + struct sk_buff_head list; 1094 + struct sk_buff *skb; 1095 + 1096 + if (pid < MT_PACKET_ID_FIRST) 1097 + return false; 1098 + 1099 + mt76_tx_status_lock(mdev, &list); 1100 + skb = mt76_tx_status_skb_get(mdev, &sta->wcid, pid, &list); 1101 + if (skb) { 1102 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1103 + 1104 + if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { 1105 + spin_lock_bh(&dev->mt76.lock); 1106 + if (sta->rate_probe) { 1107 + mt7603_wtbl_set_rates(dev, sta, NULL, 1108 + sta->rates); 1109 + sta->rate_probe = false; 1110 + } 1111 + spin_unlock_bh(&dev->mt76.lock); 1112 + } 1113 + 1114 + if (!mt7603_fill_txs(dev, sta, info, txs_data)) { 1115 + ieee80211_tx_info_clear_status(info); 1116 + info->status.rates[0].idx = -1; 1117 + } 1118 + 1119 + mt76_tx_status_skb_done(mdev, skb, &list); 1120 + } 1121 + mt76_tx_status_unlock(mdev, &list); 1122 + 1123 + return !!skb; 1124 + } 1125 + 1126 + void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data) 1127 + { 1128 + struct ieee80211_tx_info info = {}; 1129 + struct ieee80211_sta *sta = NULL; 1130 + struct mt7603_sta *msta = NULL; 1131 + struct mt76_wcid *wcid; 1132 + __le32 *txs_data = data; 1133 + u32 txs; 1134 + u8 wcidx; 1135 + u8 pid; 1136 + 1137 + txs = le32_to_cpu(txs_data[4]); 1138 + pid = FIELD_GET(MT_TXS4_PID, txs); 1139 + txs = le32_to_cpu(txs_data[3]); 1140 + wcidx = FIELD_GET(MT_TXS3_WCID, txs); 1141 + 1142 + if (pid == MT_PACKET_ID_NO_ACK) 1143 + return; 1144 + 1145 + if (wcidx >= ARRAY_SIZE(dev->mt76.wcid)) 1146 + return; 1147 + 1148 + rcu_read_lock(); 1149 + 1150 + wcid = rcu_dereference(dev->mt76.wcid[wcidx]); 1151 + if (!wcid) 1152 + goto out; 1153 + 1154 + msta = container_of(wcid, struct mt7603_sta, wcid); 1155 + sta = wcid_to_sta(wcid); 1156 + 1157 + if (mt7603_mac_add_txs_skb(dev, msta, pid, txs_data)) 1158 + goto out; 1159 + 1160 + if (wcidx >= MT7603_WTBL_STA || !sta) 1161 + goto out; 1162 + 1163 + if (mt7603_fill_txs(dev, msta, &info, txs_data)) 1164 + ieee80211_tx_status_noskb(mt76_hw(dev), sta, &info); 1165 + 1166 + out: 1167 + rcu_read_unlock(); 1168 + } 1169 + 1170 + void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, 1171 + struct mt76_queue_entry *e, bool flush) 1172 + { 1173 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1174 + struct sk_buff *skb = e->skb; 1175 + 1176 + if (!e->txwi) { 1177 + dev_kfree_skb_any(skb); 1178 + return; 1179 + } 1180 + 1181 + if (q - dev->mt76.q_tx < 4) 1182 + dev->tx_hang_check = 0; 1183 + 1184 + mt76_tx_complete_skb(mdev, skb); 1185 + } 1186 + 1187 + static bool 1188 + wait_for_wpdma(struct mt7603_dev *dev) 1189 + { 1190 + return mt76_poll(dev, MT_WPDMA_GLO_CFG, 1191 + MT_WPDMA_GLO_CFG_TX_DMA_BUSY | 1192 + MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 1193 + 0, 1000); 1194 + } 1195 + 1196 + static void mt7603_pse_reset(struct mt7603_dev *dev) 1197 + { 1198 + /* Clear previous reset result */ 1199 + if (!dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1200 + mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE_S); 1201 + 1202 + /* Reset PSE */ 1203 + mt76_set(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1204 + 1205 + if (!mt76_poll_msec(dev, MT_MCU_DEBUG_RESET, 1206 + MT_MCU_DEBUG_RESET_PSE_S, 1207 + MT_MCU_DEBUG_RESET_PSE_S, 500)) { 1208 + dev->reset_cause[RESET_CAUSE_RESET_FAILED]++; 1209 + mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_PSE); 1210 + } else { 1211 + dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1212 + mt76_clear(dev, MT_MCU_DEBUG_RESET, MT_MCU_DEBUG_RESET_QUEUES); 1213 + } 1214 + 1215 + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] >= 3) 1216 + dev->reset_cause[RESET_CAUSE_RESET_FAILED] = 0; 1217 + } 1218 + 1219 + void mt7603_mac_dma_start(struct mt7603_dev *dev) 1220 + { 1221 + mt7603_mac_start(dev); 1222 + 1223 + wait_for_wpdma(dev); 1224 + usleep_range(50, 100); 1225 + 1226 + mt76_set(dev, MT_WPDMA_GLO_CFG, 1227 + (MT_WPDMA_GLO_CFG_TX_DMA_EN | 1228 + MT_WPDMA_GLO_CFG_RX_DMA_EN | 1229 + FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) | 1230 + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE)); 1231 + 1232 + mt7603_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL); 1233 + } 1234 + 1235 + void mt7603_mac_start(struct mt7603_dev *dev) 1236 + { 1237 + mt76_clear(dev, MT_ARB_SCR, 1238 + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1239 + mt76_wr(dev, MT_WF_ARB_TX_START_0, ~0); 1240 + mt76_set(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1241 + } 1242 + 1243 + void mt7603_mac_stop(struct mt7603_dev *dev) 1244 + { 1245 + mt76_set(dev, MT_ARB_SCR, 1246 + MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE); 1247 + mt76_wr(dev, MT_WF_ARB_TX_START_0, 0); 1248 + mt76_clear(dev, MT_WF_ARB_RQCR, MT_WF_ARB_RQCR_RX_START); 1249 + } 1250 + 1251 + void mt7603_pse_client_reset(struct mt7603_dev *dev) 1252 + { 1253 + u32 addr; 1254 + 1255 + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + 1256 + MT_CLIENT_RESET_TX); 1257 + 1258 + /* Clear previous reset state */ 1259 + mt76_clear(dev, addr, 1260 + MT_CLIENT_RESET_TX_R_E_1 | 1261 + MT_CLIENT_RESET_TX_R_E_2 | 1262 + MT_CLIENT_RESET_TX_R_E_1_S | 1263 + MT_CLIENT_RESET_TX_R_E_2_S); 1264 + 1265 + /* Start PSE client TX abort */ 1266 + mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1); 1267 + mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S, 1268 + MT_CLIENT_RESET_TX_R_E_1_S, 500); 1269 + 1270 + mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_2); 1271 + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET); 1272 + 1273 + /* Wait for PSE client to clear TX FIFO */ 1274 + mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_2_S, 1275 + MT_CLIENT_RESET_TX_R_E_2_S, 500); 1276 + 1277 + /* Clear PSE client TX abort state */ 1278 + mt76_clear(dev, addr, 1279 + MT_CLIENT_RESET_TX_R_E_1 | 1280 + MT_CLIENT_RESET_TX_R_E_2); 1281 + } 1282 + 1283 + static void mt7603_dma_sched_reset(struct mt7603_dev *dev) 1284 + { 1285 + if (!is_mt7628(dev)) 1286 + return; 1287 + 1288 + mt76_set(dev, MT_SCH_4, MT_SCH_4_RESET); 1289 + mt76_clear(dev, MT_SCH_4, MT_SCH_4_RESET); 1290 + } 1291 + 1292 + static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev) 1293 + { 1294 + int beacon_int = dev->beacon_int; 1295 + u32 mask = dev->mt76.mmio.irqmask; 1296 + int i; 1297 + 1298 + ieee80211_stop_queues(dev->mt76.hw); 1299 + set_bit(MT76_RESET, &dev->mt76.state); 1300 + 1301 + /* lock/unlock all queues to ensure that no tx is pending */ 1302 + mt76_txq_schedule_all(&dev->mt76); 1303 + 1304 + tasklet_disable(&dev->tx_tasklet); 1305 + tasklet_disable(&dev->pre_tbtt_tasklet); 1306 + napi_disable(&dev->mt76.napi[0]); 1307 + napi_disable(&dev->mt76.napi[1]); 1308 + 1309 + mutex_lock(&dev->mt76.mutex); 1310 + 1311 + mt7603_beacon_set_timer(dev, -1, 0); 1312 + 1313 + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED] || 1314 + dev->cur_reset_cause == RESET_CAUSE_RX_PSE_BUSY || 1315 + dev->cur_reset_cause == RESET_CAUSE_BEACON_STUCK || 1316 + dev->cur_reset_cause == RESET_CAUSE_TX_HANG) 1317 + mt7603_pse_reset(dev); 1318 + 1319 + if (dev->reset_cause[RESET_CAUSE_RESET_FAILED]) 1320 + goto skip_dma_reset; 1321 + 1322 + mt7603_mac_stop(dev); 1323 + 1324 + mt76_clear(dev, MT_WPDMA_GLO_CFG, 1325 + MT_WPDMA_GLO_CFG_RX_DMA_EN | MT_WPDMA_GLO_CFG_TX_DMA_EN | 1326 + MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); 1327 + usleep_range(1000, 2000); 1328 + 1329 + mt7603_irq_disable(dev, mask); 1330 + 1331 + mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF); 1332 + 1333 + mt7603_pse_client_reset(dev); 1334 + 1335 + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) 1336 + mt76_queue_tx_cleanup(dev, i, true); 1337 + 1338 + for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 1339 + mt76_queue_rx_reset(dev, i); 1340 + 1341 + mt7603_dma_sched_reset(dev); 1342 + 1343 + mt7603_mac_dma_start(dev); 1344 + 1345 + mt7603_irq_enable(dev, mask); 1346 + 1347 + skip_dma_reset: 1348 + clear_bit(MT76_RESET, &dev->mt76.state); 1349 + mutex_unlock(&dev->mt76.mutex); 1350 + 1351 + tasklet_enable(&dev->tx_tasklet); 1352 + tasklet_schedule(&dev->tx_tasklet); 1353 + 1354 + tasklet_enable(&dev->pre_tbtt_tasklet); 1355 + mt7603_beacon_set_timer(dev, -1, beacon_int); 1356 + 1357 + napi_enable(&dev->mt76.napi[0]); 1358 + napi_schedule(&dev->mt76.napi[0]); 1359 + 1360 + napi_enable(&dev->mt76.napi[1]); 1361 + napi_schedule(&dev->mt76.napi[1]); 1362 + 1363 + ieee80211_wake_queues(dev->mt76.hw); 1364 + mt76_txq_schedule_all(&dev->mt76); 1365 + } 1366 + 1367 + static u32 mt7603_dma_debug(struct mt7603_dev *dev, u8 index) 1368 + { 1369 + u32 val; 1370 + 1371 + mt76_wr(dev, MT_WPDMA_DEBUG, 1372 + FIELD_PREP(MT_WPDMA_DEBUG_IDX, index) | 1373 + MT_WPDMA_DEBUG_SEL); 1374 + 1375 + val = mt76_rr(dev, MT_WPDMA_DEBUG); 1376 + return FIELD_GET(MT_WPDMA_DEBUG_VALUE, val); 1377 + } 1378 + 1379 + static bool mt7603_rx_fifo_busy(struct mt7603_dev *dev) 1380 + { 1381 + if (is_mt7628(dev)) 1382 + return mt7603_dma_debug(dev, 9) & BIT(9); 1383 + 1384 + return mt7603_dma_debug(dev, 2) & BIT(8); 1385 + } 1386 + 1387 + static bool mt7603_rx_dma_busy(struct mt7603_dev *dev) 1388 + { 1389 + if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_RX_DMA_BUSY)) 1390 + return false; 1391 + 1392 + return mt7603_rx_fifo_busy(dev); 1393 + } 1394 + 1395 + static bool mt7603_tx_dma_busy(struct mt7603_dev *dev) 1396 + { 1397 + u32 val; 1398 + 1399 + if (!(mt76_rr(dev, MT_WPDMA_GLO_CFG) & MT_WPDMA_GLO_CFG_TX_DMA_BUSY)) 1400 + return false; 1401 + 1402 + val = mt7603_dma_debug(dev, 9); 1403 + return (val & BIT(8)) && (val & 0xf) != 0xf; 1404 + } 1405 + 1406 + static bool mt7603_tx_hang(struct mt7603_dev *dev) 1407 + { 1408 + struct mt76_queue *q; 1409 + u32 dma_idx, prev_dma_idx; 1410 + int i; 1411 + 1412 + for (i = 0; i < 4; i++) { 1413 + q = &dev->mt76.q_tx[i]; 1414 + 1415 + if (!q->queued) 1416 + continue; 1417 + 1418 + prev_dma_idx = dev->tx_dma_idx[i]; 1419 + dma_idx = ioread32(&q->regs->dma_idx); 1420 + dev->tx_dma_idx[i] = dma_idx; 1421 + 1422 + if (dma_idx == prev_dma_idx && 1423 + dma_idx != ioread32(&q->regs->cpu_idx)) 1424 + break; 1425 + } 1426 + 1427 + return i < 4; 1428 + } 1429 + 1430 + static bool mt7603_rx_pse_busy(struct mt7603_dev *dev) 1431 + { 1432 + u32 addr, val; 1433 + 1434 + if (mt76_rr(dev, MT_MCU_DEBUG_RESET) & MT_MCU_DEBUG_RESET_QUEUES) 1435 + return true; 1436 + 1437 + if (mt7603_rx_fifo_busy(dev)) 1438 + return false; 1439 + 1440 + addr = mt7603_reg_map(dev, MT_CLIENT_BASE_PHYS_ADDR + MT_CLIENT_STATUS); 1441 + mt76_wr(dev, addr, 3); 1442 + val = mt76_rr(dev, addr) >> 16; 1443 + 1444 + if (is_mt7628(dev) && (val & 0x4001) == 0x4001) 1445 + return true; 1446 + 1447 + return (val & 0x8001) == 0x8001 || (val & 0xe001) == 0xe001; 1448 + } 1449 + 1450 + static bool 1451 + mt7603_watchdog_check(struct mt7603_dev *dev, u8 *counter, 1452 + enum mt7603_reset_cause cause, 1453 + bool (*check)(struct mt7603_dev *dev)) 1454 + { 1455 + if (dev->reset_test == cause + 1) { 1456 + dev->reset_test = 0; 1457 + goto trigger; 1458 + } 1459 + 1460 + if (check) { 1461 + if (!check(dev) && *counter < MT7603_WATCHDOG_TIMEOUT) { 1462 + *counter = 0; 1463 + return false; 1464 + } 1465 + 1466 + (*counter)++; 1467 + } 1468 + 1469 + if (*counter < MT7603_WATCHDOG_TIMEOUT) 1470 + return false; 1471 + trigger: 1472 + dev->cur_reset_cause = cause; 1473 + dev->reset_cause[cause]++; 1474 + return true; 1475 + } 1476 + 1477 + void mt7603_update_channel(struct mt76_dev *mdev) 1478 + { 1479 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 1480 + struct mt76_channel_state *state; 1481 + ktime_t cur_time; 1482 + u32 busy; 1483 + 1484 + if (!test_bit(MT76_STATE_RUNNING, &dev->mt76.state)) 1485 + return; 1486 + 1487 + state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan); 1488 + busy = mt76_rr(dev, MT_MIB_STAT_PSCCA); 1489 + 1490 + spin_lock_bh(&dev->mt76.cc_lock); 1491 + cur_time = ktime_get_boottime(); 1492 + state->cc_busy += busy; 1493 + state->cc_active += ktime_to_us(ktime_sub(cur_time, dev->survey_time)); 1494 + dev->survey_time = cur_time; 1495 + spin_unlock_bh(&dev->mt76.cc_lock); 1496 + } 1497 + 1498 + void 1499 + mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val) 1500 + { 1501 + u32 rxtd_6 = 0xd7c80000; 1502 + 1503 + if (val == dev->ed_strict_mode) 1504 + return; 1505 + 1506 + dev->ed_strict_mode = val; 1507 + 1508 + /* Ensure that ED/CCA does not trigger if disabled */ 1509 + if (!dev->ed_monitor) 1510 + rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x34); 1511 + else 1512 + rxtd_6 |= FIELD_PREP(MT_RXTD_6_CCAED_TH, 0x7d); 1513 + 1514 + if (dev->ed_monitor && !dev->ed_strict_mode) 1515 + rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x0f); 1516 + else 1517 + rxtd_6 |= FIELD_PREP(MT_RXTD_6_ACI_TH, 0x10); 1518 + 1519 + mt76_wr(dev, MT_RXTD(6), rxtd_6); 1520 + 1521 + mt76_rmw_field(dev, MT_RXTD(13), MT_RXTD_13_ACI_TH_EN, 1522 + dev->ed_monitor && !dev->ed_strict_mode); 1523 + } 1524 + 1525 + static void 1526 + mt7603_edcca_check(struct mt7603_dev *dev) 1527 + { 1528 + u32 val = mt76_rr(dev, MT_AGC(41)); 1529 + ktime_t cur_time; 1530 + int rssi0, rssi1; 1531 + u32 active; 1532 + u32 ed_busy; 1533 + 1534 + if (!dev->ed_monitor) 1535 + return; 1536 + 1537 + rssi0 = FIELD_GET(MT_AGC_41_RSSI_0, val); 1538 + if (rssi0 > 128) 1539 + rssi0 -= 256; 1540 + 1541 + rssi1 = FIELD_GET(MT_AGC_41_RSSI_1, val); 1542 + if (rssi1 > 128) 1543 + rssi1 -= 256; 1544 + 1545 + if (max(rssi0, rssi1) >= -40 && 1546 + dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH) 1547 + dev->ed_strong_signal++; 1548 + else if (dev->ed_strong_signal > 0) 1549 + dev->ed_strong_signal--; 1550 + 1551 + cur_time = ktime_get_boottime(); 1552 + ed_busy = mt76_rr(dev, MT_MIB_STAT_ED) & MT_MIB_STAT_ED_MASK; 1553 + 1554 + active = ktime_to_us(ktime_sub(cur_time, dev->ed_time)); 1555 + dev->ed_time = cur_time; 1556 + 1557 + if (!active) 1558 + return; 1559 + 1560 + if (100 * ed_busy / active > 90) { 1561 + if (dev->ed_trigger < 0) 1562 + dev->ed_trigger = 0; 1563 + dev->ed_trigger++; 1564 + } else { 1565 + if (dev->ed_trigger > 0) 1566 + dev->ed_trigger = 0; 1567 + dev->ed_trigger--; 1568 + } 1569 + 1570 + if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH || 1571 + dev->ed_strong_signal < MT7603_EDCCA_BLOCK_TH / 2) { 1572 + mt7603_edcca_set_strict(dev, true); 1573 + } else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) { 1574 + mt7603_edcca_set_strict(dev, false); 1575 + } 1576 + 1577 + if (dev->ed_trigger > MT7603_EDCCA_BLOCK_TH) 1578 + dev->ed_trigger = MT7603_EDCCA_BLOCK_TH; 1579 + else if (dev->ed_trigger < -MT7603_EDCCA_BLOCK_TH) 1580 + dev->ed_trigger = -MT7603_EDCCA_BLOCK_TH; 1581 + } 1582 + 1583 + void mt7603_cca_stats_reset(struct mt7603_dev *dev) 1584 + { 1585 + mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1586 + mt76_clear(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_RESET); 1587 + mt76_set(dev, MT_PHYCTRL(2), MT_PHYCTRL_2_STATUS_EN); 1588 + } 1589 + 1590 + static void 1591 + mt7603_adjust_sensitivity(struct mt7603_dev *dev) 1592 + { 1593 + u32 agc0 = dev->agc0, agc3 = dev->agc3; 1594 + u32 adj; 1595 + 1596 + if (!dev->sensitivity || dev->sensitivity < -100) { 1597 + dev->sensitivity = 0; 1598 + } else if (dev->sensitivity <= -84) { 1599 + adj = 7 + (dev->sensitivity + 92) / 2; 1600 + 1601 + agc0 = 0x56f0076f; 1602 + agc0 |= adj << 12; 1603 + agc0 |= adj << 16; 1604 + agc3 = 0x81d0d5e3; 1605 + } else if (dev->sensitivity <= -72) { 1606 + adj = 7 + (dev->sensitivity + 80) / 2; 1607 + 1608 + agc0 = 0x6af0006f; 1609 + agc0 |= adj << 8; 1610 + agc0 |= adj << 12; 1611 + agc0 |= adj << 16; 1612 + 1613 + agc3 = 0x8181d5e3; 1614 + } else { 1615 + if (dev->sensitivity > -54) 1616 + dev->sensitivity = -54; 1617 + 1618 + adj = 7 + (dev->sensitivity + 80) / 2; 1619 + 1620 + agc0 = 0x7ff0000f; 1621 + agc0 |= adj << 4; 1622 + agc0 |= adj << 8; 1623 + agc0 |= adj << 12; 1624 + agc0 |= adj << 16; 1625 + 1626 + agc3 = 0x818181e3; 1627 + } 1628 + 1629 + mt76_wr(dev, MT_AGC(0), agc0); 1630 + mt76_wr(dev, MT_AGC1(0), agc0); 1631 + 1632 + mt76_wr(dev, MT_AGC(3), agc3); 1633 + mt76_wr(dev, MT_AGC1(3), agc3); 1634 + } 1635 + 1636 + static void 1637 + mt7603_false_cca_check(struct mt7603_dev *dev) 1638 + { 1639 + int pd_cck, pd_ofdm, mdrdy_cck, mdrdy_ofdm; 1640 + int false_cca; 1641 + int min_signal; 1642 + u32 val; 1643 + 1644 + val = mt76_rr(dev, MT_PHYCTRL_STAT_PD); 1645 + pd_cck = FIELD_GET(MT_PHYCTRL_STAT_PD_CCK, val); 1646 + pd_ofdm = FIELD_GET(MT_PHYCTRL_STAT_PD_OFDM, val); 1647 + 1648 + val = mt76_rr(dev, MT_PHYCTRL_STAT_MDRDY); 1649 + mdrdy_cck = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_CCK, val); 1650 + mdrdy_ofdm = FIELD_GET(MT_PHYCTRL_STAT_MDRDY_OFDM, val); 1651 + 1652 + dev->false_cca_ofdm = pd_ofdm - mdrdy_ofdm; 1653 + dev->false_cca_cck = pd_cck - mdrdy_cck; 1654 + 1655 + mt7603_cca_stats_reset(dev); 1656 + 1657 + min_signal = mt76_get_min_avg_rssi(&dev->mt76); 1658 + if (!min_signal) { 1659 + dev->sensitivity = 0; 1660 + dev->last_cca_adj = jiffies; 1661 + goto out; 1662 + } 1663 + 1664 + min_signal -= 15; 1665 + 1666 + false_cca = dev->false_cca_ofdm + dev->false_cca_cck; 1667 + if (false_cca > 600) { 1668 + if (!dev->sensitivity) 1669 + dev->sensitivity = -92; 1670 + else 1671 + dev->sensitivity += 2; 1672 + dev->last_cca_adj = jiffies; 1673 + } else if (false_cca < 100 || 1674 + time_after(jiffies, dev->last_cca_adj + 10 * HZ)) { 1675 + dev->last_cca_adj = jiffies; 1676 + if (!dev->sensitivity) 1677 + goto out; 1678 + 1679 + dev->sensitivity -= 2; 1680 + } 1681 + 1682 + if (dev->sensitivity && dev->sensitivity > min_signal) { 1683 + dev->sensitivity = min_signal; 1684 + dev->last_cca_adj = jiffies; 1685 + } 1686 + 1687 + out: 1688 + mt7603_adjust_sensitivity(dev); 1689 + } 1690 + 1691 + void mt7603_mac_work(struct work_struct *work) 1692 + { 1693 + struct mt7603_dev *dev = container_of(work, struct mt7603_dev, 1694 + mac_work.work); 1695 + bool reset = false; 1696 + 1697 + mt76_tx_status_check(&dev->mt76, NULL, false); 1698 + 1699 + mutex_lock(&dev->mt76.mutex); 1700 + 1701 + dev->mac_work_count++; 1702 + mt7603_update_channel(&dev->mt76); 1703 + mt7603_edcca_check(dev); 1704 + 1705 + if (dev->mac_work_count == 10) 1706 + mt7603_false_cca_check(dev); 1707 + 1708 + if (mt7603_watchdog_check(dev, &dev->rx_pse_check, 1709 + RESET_CAUSE_RX_PSE_BUSY, 1710 + mt7603_rx_pse_busy) || 1711 + mt7603_watchdog_check(dev, &dev->beacon_check, 1712 + RESET_CAUSE_BEACON_STUCK, 1713 + NULL) || 1714 + mt7603_watchdog_check(dev, &dev->tx_hang_check, 1715 + RESET_CAUSE_TX_HANG, 1716 + mt7603_tx_hang) || 1717 + mt7603_watchdog_check(dev, &dev->tx_dma_check, 1718 + RESET_CAUSE_TX_BUSY, 1719 + mt7603_tx_dma_busy) || 1720 + mt7603_watchdog_check(dev, &dev->rx_dma_check, 1721 + RESET_CAUSE_RX_BUSY, 1722 + mt7603_rx_dma_busy) || 1723 + mt7603_watchdog_check(dev, &dev->mcu_hang, 1724 + RESET_CAUSE_MCU_HANG, 1725 + NULL) || 1726 + dev->reset_cause[RESET_CAUSE_RESET_FAILED]) { 1727 + dev->beacon_check = 0; 1728 + dev->tx_dma_check = 0; 1729 + dev->tx_hang_check = 0; 1730 + dev->rx_dma_check = 0; 1731 + dev->rx_pse_check = 0; 1732 + dev->mcu_hang = 0; 1733 + dev->rx_dma_idx = ~0; 1734 + memset(dev->tx_dma_idx, 0xff, sizeof(dev->tx_dma_idx)); 1735 + reset = true; 1736 + dev->mac_work_count = 0; 1737 + } 1738 + 1739 + if (dev->mac_work_count >= 10) 1740 + dev->mac_work_count = 0; 1741 + 1742 + mutex_unlock(&dev->mt76.mutex); 1743 + 1744 + if (reset) 1745 + mt7603_mac_watchdog_reset(dev); 1746 + 1747 + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, 1748 + msecs_to_jiffies(MT7603_WATCHDOG_TIME)); 1749 + }
+242
drivers/net/wireless/mediatek/mt76/mt7603/mac.h
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #ifndef __MT7603_MAC_H 4 + #define __MT7603_MAC_H 5 + 6 + #define MT_RXD0_LENGTH GENMASK(15, 0) 7 + #define MT_RXD0_PKT_TYPE GENMASK(31, 29) 8 + 9 + #define MT_RXD0_NORMAL_ETH_TYPE_OFS GENMASK(22, 16) 10 + #define MT_RXD0_NORMAL_IP_SUM BIT(23) 11 + #define MT_RXD0_NORMAL_UDP_TCP_SUM BIT(24) 12 + #define MT_RXD0_NORMAL_GROUP_1 BIT(25) 13 + #define MT_RXD0_NORMAL_GROUP_2 BIT(26) 14 + #define MT_RXD0_NORMAL_GROUP_3 BIT(27) 15 + #define MT_RXD0_NORMAL_GROUP_4 BIT(28) 16 + 17 + enum rx_pkt_type { 18 + PKT_TYPE_TXS = 0, 19 + PKT_TYPE_TXRXV = 1, 20 + PKT_TYPE_NORMAL = 2, 21 + PKT_TYPE_RX_DUP_RFB = 3, 22 + PKT_TYPE_RX_TMR = 4, 23 + PKT_TYPE_RETRIEVE = 5, 24 + PKT_TYPE_RX_EVENT = 7, 25 + }; 26 + 27 + #define MT_RXD1_NORMAL_BSSID GENMASK(31, 26) 28 + #define MT_RXD1_NORMAL_PAYLOAD_FORMAT GENMASK(25, 24) 29 + #define MT_RXD1_NORMAL_HDR_TRANS BIT(23) 30 + #define MT_RXD1_NORMAL_HDR_OFFSET BIT(22) 31 + #define MT_RXD1_NORMAL_MAC_HDR_LEN GENMASK(21, 16) 32 + #define MT_RXD1_NORMAL_CH_FREQ GENMASK(15, 8) 33 + #define MT_RXD1_NORMAL_KEY_ID GENMASK(7, 6) 34 + #define MT_RXD1_NORMAL_BEACON_UC BIT(5) 35 + #define MT_RXD1_NORMAL_BEACON_MC BIT(4) 36 + #define MT_RXD1_NORMAL_BCAST BIT(3) 37 + #define MT_RXD1_NORMAL_MCAST BIT(2) 38 + #define MT_RXD1_NORMAL_U2M BIT(1) 39 + #define MT_RXD1_NORMAL_HTC_VLD BIT(0) 40 + 41 + #define MT_RXD2_NORMAL_NON_AMPDU BIT(31) 42 + #define MT_RXD2_NORMAL_NON_AMPDU_SUB BIT(30) 43 + #define MT_RXD2_NORMAL_NDATA BIT(29) 44 + #define MT_RXD2_NORMAL_NULL_FRAME BIT(28) 45 + #define MT_RXD2_NORMAL_FRAG BIT(27) 46 + #define MT_RXD2_NORMAL_UDF_VALID BIT(26) 47 + #define MT_RXD2_NORMAL_LLC_MIS BIT(25) 48 + #define MT_RXD2_NORMAL_MAX_LEN_ERROR BIT(24) 49 + #define MT_RXD2_NORMAL_AMSDU_ERR BIT(23) 50 + #define MT_RXD2_NORMAL_LEN_MISMATCH BIT(22) 51 + #define MT_RXD2_NORMAL_TKIP_MIC_ERR BIT(21) 52 + #define MT_RXD2_NORMAL_ICV_ERR BIT(20) 53 + #define MT_RXD2_NORMAL_CLM BIT(19) 54 + #define MT_RXD2_NORMAL_CM BIT(18) 55 + #define MT_RXD2_NORMAL_FCS_ERR BIT(17) 56 + #define MT_RXD2_NORMAL_SW_BIT BIT(16) 57 + #define MT_RXD2_NORMAL_SEC_MODE GENMASK(15, 12) 58 + #define MT_RXD2_NORMAL_TID GENMASK(11, 8) 59 + #define MT_RXD2_NORMAL_WLAN_IDX GENMASK(7, 0) 60 + 61 + #define MT_RXD3_NORMAL_PF_STS GENMASK(31, 30) 62 + #define MT_RXD3_NORMAL_PF_MODE BIT(29) 63 + #define MT_RXD3_NORMAL_CLS_BITMAP GENMASK(28, 19) 64 + #define MT_RXD3_NORMAL_WOL GENMASK(18, 14) 65 + #define MT_RXD3_NORMAL_MAGIC_PKT BIT(13) 66 + #define MT_RXD3_NORMAL_OFLD GENMASK(12, 11) 67 + #define MT_RXD3_NORMAL_CLS BIT(10) 68 + #define MT_RXD3_NORMAL_PATTERN_DROP BIT(9) 69 + #define MT_RXD3_NORMAL_TSF_COMPARE_LOSS BIT(8) 70 + #define MT_RXD3_NORMAL_RXV_SEQ GENMASK(7, 0) 71 + 72 + #define MT_RXV1_VHTA1_B5_B4 GENMASK(31, 30) 73 + #define MT_RXV1_VHTA2_B8_B1 GENMASK(29, 22) 74 + #define MT_RXV1_HT_NO_SOUND BIT(21) 75 + #define MT_RXV1_HT_SMOOTH BIT(20) 76 + #define MT_RXV1_HT_SHORT_GI BIT(19) 77 + #define MT_RXV1_HT_AGGR BIT(18) 78 + #define MT_RXV1_VHTA1_B22 BIT(17) 79 + #define MT_RXV1_FRAME_MODE GENMASK(16, 15) 80 + #define MT_RXV1_TX_MODE GENMASK(14, 12) 81 + #define MT_RXV1_HT_EXT_LTF GENMASK(11, 10) 82 + #define MT_RXV1_HT_AD_CODE BIT(9) 83 + #define MT_RXV1_HT_STBC GENMASK(8, 7) 84 + #define MT_RXV1_TX_RATE GENMASK(6, 0) 85 + 86 + #define MT_RXV2_VHTA1_B16_B6 GENMASK(31, 21) 87 + #define MT_RXV2_LENGTH GENMASK(20, 0) 88 + 89 + #define MT_RXV3_F_AGC1_CAL_GAIN GENMASK(31, 29) 90 + #define MT_RXV3_F_AGC1_EQ_CAL BIT(28) 91 + #define MT_RXV3_RCPI1 GENMASK(27, 20) 92 + #define MT_RXV3_F_AGC0_CAL_GAIN GENMASK(19, 17) 93 + #define MT_RXV3_F_AGC0_EQ_CAL BIT(16) 94 + #define MT_RXV3_RCPI0 GENMASK(15, 8) 95 + #define MT_RXV3_SEL_ANT BIT(7) 96 + #define MT_RXV3_ACI_DET_X BIT(6) 97 + #define MT_RXV3_OFDM_FREQ_TRANS_DETECT BIT(5) 98 + #define MT_RXV3_VHTA1_B21_B17 GENMASK(4, 0) 99 + 100 + #define MT_RXV4_F_AGC_CAL_GAIN GENMASK(31, 29) 101 + #define MT_RXV4_F_AGC2_EQ_CAL BIT(28) 102 + #define MT_RXV4_IB_RSSI1 GENMASK(27, 20) 103 + #define MT_RXV4_F_AGC_LPF_GAIN_X GENMASK(19, 16) 104 + #define MT_RXV4_WB_RSSI_X GENMASK(15, 8) 105 + #define MT_RXV4_IB_RSSI0 GENMASK(7, 0) 106 + 107 + #define MT_RXV5_LTF_SNR0 GENMASK(31, 26) 108 + #define MT_RXV5_LTF_PROC_TIME GENMASK(25, 19) 109 + #define MT_RXV5_FOE GENMASK(18, 7) 110 + #define MT_RXV5_C_AGC_SATE GENMASK(6, 4) 111 + #define MT_RXV5_F_AGC_LNA_GAIN_0 GENMASK(3, 2) 112 + #define MT_RXV5_F_AGC_LNA_GAIN_1 GENMASK(1, 0) 113 + 114 + #define MT_RXV6_C_AGC_STATE GENMASK(30, 28) 115 + #define MT_RXV6_NS_TS_FIELD GENMASK(27, 25) 116 + #define MT_RXV6_RX_VALID BIT(24) 117 + #define MT_RXV6_NF2 GENMASK(23, 16) 118 + #define MT_RXV6_NF1 GENMASK(15, 8) 119 + #define MT_RXV6_NF0 GENMASK(7, 0) 120 + 121 + enum mt7603_tx_header_format { 122 + MT_HDR_FORMAT_802_3, 123 + MT_HDR_FORMAT_CMD, 124 + MT_HDR_FORMAT_802_11, 125 + MT_HDR_FORMAT_802_11_EXT, 126 + }; 127 + 128 + #define MT_TXD_SIZE (8 * 4) 129 + 130 + #define MT_TXD0_P_IDX BIT(31) 131 + #define MT_TXD0_Q_IDX GENMASK(30, 27) 132 + #define MT_TXD0_UTXB BIT(26) 133 + #define MT_TXD0_UNXV BIT(25) 134 + #define MT_TXD0_UDP_TCP_SUM BIT(24) 135 + #define MT_TXD0_IP_SUM BIT(23) 136 + #define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16) 137 + #define MT_TXD0_TX_BYTES GENMASK(15, 0) 138 + 139 + #define MT_TXD1_OWN_MAC GENMASK(31, 26) 140 + #define MT_TXD1_PROTECTED BIT(23) 141 + #define MT_TXD1_TID GENMASK(22, 20) 142 + #define MT_TXD1_NO_ACK BIT(19) 143 + #define MT_TXD1_HDR_PAD GENMASK(18, 16) 144 + #define MT_TXD1_LONG_FORMAT BIT(15) 145 + #define MT_TXD1_HDR_FORMAT GENMASK(14, 13) 146 + #define MT_TXD1_HDR_INFO GENMASK(12, 8) 147 + #define MT_TXD1_WLAN_IDX GENMASK(7, 0) 148 + 149 + #define MT_TXD2_FIX_RATE BIT(31) 150 + #define MT_TXD2_TIMING_MEASURE BIT(30) 151 + #define MT_TXD2_BA_DISABLE BIT(29) 152 + #define MT_TXD2_POWER_OFFSET GENMASK(28, 24) 153 + #define MT_TXD2_MAX_TX_TIME GENMASK(23, 16) 154 + #define MT_TXD2_FRAG GENMASK(15, 14) 155 + #define MT_TXD2_HTC_VLD BIT(13) 156 + #define MT_TXD2_DURATION BIT(12) 157 + #define MT_TXD2_BIP BIT(11) 158 + #define MT_TXD2_MULTICAST BIT(10) 159 + #define MT_TXD2_RTS BIT(9) 160 + #define MT_TXD2_SOUNDING BIT(8) 161 + #define MT_TXD2_NDPA BIT(7) 162 + #define MT_TXD2_NDP BIT(6) 163 + #define MT_TXD2_FRAME_TYPE GENMASK(5, 4) 164 + #define MT_TXD2_SUB_TYPE GENMASK(3, 0) 165 + 166 + #define MT_TXD3_SN_VALID BIT(31) 167 + #define MT_TXD3_PN_VALID BIT(30) 168 + #define MT_TXD3_SEQ GENMASK(27, 16) 169 + #define MT_TXD3_REM_TX_COUNT GENMASK(15, 11) 170 + #define MT_TXD3_TX_COUNT GENMASK(10, 6) 171 + 172 + #define MT_TXD4_PN_LOW GENMASK(31, 0) 173 + 174 + #define MT_TXD5_PN_HIGH GENMASK(31, 16) 175 + #define MT_TXD5_SW_POWER_MGMT BIT(13) 176 + #define MT_TXD5_BA_SEQ_CTRL BIT(12) 177 + #define MT_TXD5_DA_SELECT BIT(11) 178 + #define MT_TXD5_TX_STATUS_HOST BIT(10) 179 + #define MT_TXD5_TX_STATUS_MCU BIT(9) 180 + #define MT_TXD5_TX_STATUS_FMT BIT(8) 181 + #define MT_TXD5_PID GENMASK(7, 0) 182 + 183 + #define MT_TXD6_SGI BIT(31) 184 + #define MT_TXD6_LDPC BIT(30) 185 + #define MT_TXD6_TX_RATE GENMASK(29, 18) 186 + #define MT_TXD6_I_TXBF BIT(17) 187 + #define MT_TXD6_E_TXBF BIT(16) 188 + #define MT_TXD6_DYN_BW BIT(15) 189 + #define MT_TXD6_ANT_PRI GENMASK(14, 12) 190 + #define MT_TXD6_SPE_EN BIT(11) 191 + #define MT_TXD6_FIXED_BW BIT(10) 192 + #define MT_TXD6_BW GENMASK(9, 8) 193 + #define MT_TXD6_ANT_ID GENMASK(7, 2) 194 + #define MT_TXD6_FIXED_RATE BIT(0) 195 + 196 + #define MT_TX_RATE_STBC BIT(11) 197 + #define MT_TX_RATE_NSS GENMASK(10, 9) 198 + #define MT_TX_RATE_MODE GENMASK(8, 6) 199 + #define MT_TX_RATE_IDX GENMASK(5, 0) 200 + 201 + #define MT_TXS0_ANTENNA GENMASK(31, 26) 202 + #define MT_TXS0_TID GENMASK(25, 22) 203 + #define MT_TXS0_BA_ERROR BIT(22) 204 + #define MT_TXS0_PS_FLAG BIT(21) 205 + #define MT_TXS0_TXOP_TIMEOUT BIT(20) 206 + #define MT_TXS0_BIP_ERROR BIT(19) 207 + 208 + #define MT_TXS0_QUEUE_TIMEOUT BIT(18) 209 + #define MT_TXS0_RTS_TIMEOUT BIT(17) 210 + #define MT_TXS0_ACK_TIMEOUT BIT(16) 211 + #define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16) 212 + 213 + #define MT_TXS0_TX_STATUS_HOST BIT(15) 214 + #define MT_TXS0_TX_STATUS_MCU BIT(14) 215 + #define MT_TXS0_TXS_FORMAT BIT(13) 216 + #define MT_TXS0_FIXED_RATE BIT(12) 217 + #define MT_TXS0_TX_RATE GENMASK(11, 0) 218 + 219 + #define MT_TXS1_F0_TIMESTAMP GENMASK(31, 0) 220 + #define MT_TXS1_F1_NOISE_2 GENMASK(23, 16) 221 + #define MT_TXS1_F1_NOISE_1 GENMASK(15, 8) 222 + #define MT_TXS1_F1_NOISE_0 GENMASK(7, 0) 223 + 224 + #define MT_TXS2_F0_FRONT_TIME GENMASK(24, 0) 225 + #define MT_TXS2_F1_RCPI_2 GENMASK(23, 16) 226 + #define MT_TXS2_F1_RCPI_1 GENMASK(15, 8) 227 + #define MT_TXS2_F1_RCPI_0 GENMASK(7, 0) 228 + 229 + #define MT_TXS3_WCID GENMASK(31, 24) 230 + #define MT_TXS3_RXV_SEQNO GENMASK(23, 16) 231 + #define MT_TXS3_TX_DELAY GENMASK(15, 0) 232 + 233 + #define MT_TXS4_LAST_TX_RATE GENMASK(31, 29) 234 + #define MT_TXS4_TX_COUNT GENMASK(28, 24) 235 + #define MT_TXS4_AMPDU BIT(23) 236 + #define MT_TXS4_ACKED_MPDU BIT(22) 237 + #define MT_TXS4_PID GENMASK(21, 14) 238 + #define MT_TXS4_BW GENMASK(13, 12) 239 + #define MT_TXS4_F0_SEQNO GENMASK(11, 0) 240 + #define MT_TXS4_F1_TSSI GENMASK(11, 0) 241 + 242 + #endif
+709
drivers/net/wireless/mediatek/mt76/mt7603/main.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include <linux/etherdevice.h> 4 + #include <linux/platform_device.h> 5 + #include <linux/pci.h> 6 + #include <linux/module.h> 7 + #include "mt7603.h" 8 + #include "eeprom.h" 9 + 10 + static int 11 + mt7603_start(struct ieee80211_hw *hw) 12 + { 13 + struct mt7603_dev *dev = hw->priv; 14 + 15 + mt7603_mac_start(dev); 16 + dev->survey_time = ktime_get_boottime(); 17 + set_bit(MT76_STATE_RUNNING, &dev->mt76.state); 18 + mt7603_mac_work(&dev->mac_work.work); 19 + 20 + return 0; 21 + } 22 + 23 + static void 24 + mt7603_stop(struct ieee80211_hw *hw) 25 + { 26 + struct mt7603_dev *dev = hw->priv; 27 + 28 + clear_bit(MT76_STATE_RUNNING, &dev->mt76.state); 29 + cancel_delayed_work_sync(&dev->mac_work); 30 + mt7603_mac_stop(dev); 31 + } 32 + 33 + static int 34 + mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 35 + { 36 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 37 + struct mt7603_dev *dev = hw->priv; 38 + struct mt76_txq *mtxq; 39 + u8 bc_addr[ETH_ALEN]; 40 + int idx; 41 + int ret = 0; 42 + 43 + mutex_lock(&dev->mt76.mutex); 44 + 45 + mvif->idx = ffs(~dev->vif_mask) - 1; 46 + if (mvif->idx >= MT7603_MAX_INTERFACES) { 47 + ret = -ENOSPC; 48 + goto out; 49 + } 50 + 51 + mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 52 + get_unaligned_le32(vif->addr)); 53 + mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 54 + (get_unaligned_le16(vif->addr + 4) | 55 + MT_MAC_ADDR1_VALID)); 56 + 57 + if (vif->type == NL80211_IFTYPE_AP) { 58 + mt76_wr(dev, MT_BSSID0(mvif->idx), 59 + get_unaligned_le32(vif->addr)); 60 + mt76_wr(dev, MT_BSSID1(mvif->idx), 61 + (get_unaligned_le16(vif->addr + 4) | 62 + MT_BSSID1_VALID)); 63 + } 64 + 65 + idx = MT7603_WTBL_RESERVED - 1 - mvif->idx; 66 + dev->vif_mask |= BIT(mvif->idx); 67 + mvif->sta.wcid.idx = idx; 68 + mvif->sta.wcid.hw_key_idx = -1; 69 + 70 + eth_broadcast_addr(bc_addr); 71 + mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr); 72 + 73 + mtxq = (struct mt76_txq *)vif->txq->drv_priv; 74 + mtxq->wcid = &mvif->sta.wcid; 75 + mt76_txq_init(&dev->mt76, vif->txq); 76 + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); 77 + 78 + out: 79 + mutex_unlock(&dev->mt76.mutex); 80 + 81 + return ret; 82 + } 83 + 84 + static void 85 + mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 86 + { 87 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 88 + struct mt7603_dev *dev = hw->priv; 89 + int idx = mvif->sta.wcid.idx; 90 + 91 + mt76_wr(dev, MT_MAC_ADDR0(mvif->idx), 0); 92 + mt76_wr(dev, MT_MAC_ADDR1(mvif->idx), 0); 93 + mt76_wr(dev, MT_BSSID0(mvif->idx), 0); 94 + mt76_wr(dev, MT_BSSID1(mvif->idx), 0); 95 + mt7603_beacon_set_timer(dev, mvif->idx, 0); 96 + 97 + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); 98 + mt76_txq_remove(&dev->mt76, vif->txq); 99 + 100 + mutex_lock(&dev->mt76.mutex); 101 + dev->vif_mask &= ~BIT(mvif->idx); 102 + mutex_unlock(&dev->mt76.mutex); 103 + } 104 + 105 + static void 106 + mt7603_init_edcca(struct mt7603_dev *dev) 107 + { 108 + /* Set lower signal level to -65dBm */ 109 + mt76_rmw_field(dev, MT_RXTD(8), MT_RXTD_8_LOWER_SIGNAL, 0x23); 110 + 111 + /* clear previous energy detect monitor results */ 112 + mt76_rr(dev, MT_MIB_STAT_ED); 113 + 114 + if (dev->ed_monitor) 115 + mt76_set(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME); 116 + else 117 + mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_ED_TIME); 118 + 119 + dev->ed_strict_mode = 0xff; 120 + dev->ed_strong_signal = 0; 121 + dev->ed_time = ktime_get_boottime(); 122 + 123 + mt7603_edcca_set_strict(dev, false); 124 + } 125 + 126 + static int 127 + mt7603_set_channel(struct mt7603_dev *dev, struct cfg80211_chan_def *def) 128 + { 129 + u8 *rssi_data = (u8 *)dev->mt76.eeprom.data; 130 + int idx, ret; 131 + u8 bw = MT_BW_20; 132 + bool failed = false; 133 + 134 + cancel_delayed_work_sync(&dev->mac_work); 135 + 136 + mutex_lock(&dev->mt76.mutex); 137 + set_bit(MT76_RESET, &dev->mt76.state); 138 + 139 + mt76_set_channel(&dev->mt76); 140 + mt7603_mac_stop(dev); 141 + 142 + if (def->width == NL80211_CHAN_WIDTH_40) 143 + bw = MT_BW_40; 144 + 145 + dev->mt76.chandef = *def; 146 + mt76_rmw_field(dev, MT_AGG_BWCR, MT_AGG_BWCR_BW, bw); 147 + ret = mt7603_mcu_set_channel(dev); 148 + if (ret) { 149 + failed = true; 150 + goto out; 151 + } 152 + 153 + if (def->chan->band == NL80211_BAND_5GHZ) { 154 + idx = 1; 155 + rssi_data += MT_EE_RSSI_OFFSET_5G; 156 + } else { 157 + idx = 0; 158 + rssi_data += MT_EE_RSSI_OFFSET_2G; 159 + } 160 + 161 + memcpy(dev->rssi_offset, rssi_data, sizeof(dev->rssi_offset)); 162 + 163 + idx |= (def->chan - 164 + mt76_hw(dev)->wiphy->bands[def->chan->band]->channels) << 1; 165 + mt76_wr(dev, MT_WF_RMAC_CH_FREQ, idx); 166 + mt7603_mac_set_timing(dev); 167 + mt7603_mac_start(dev); 168 + 169 + clear_bit(MT76_RESET, &dev->mt76.state); 170 + 171 + mt76_txq_schedule_all(&dev->mt76); 172 + 173 + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, 174 + MT7603_WATCHDOG_TIME); 175 + 176 + /* reset channel stats */ 177 + mt76_clear(dev, MT_MIB_CTL, MT_MIB_CTL_READ_CLR_DIS); 178 + mt76_set(dev, MT_MIB_CTL, 179 + MT_MIB_CTL_CCA_NAV_TX | MT_MIB_CTL_PSCCA_TIME); 180 + mt76_rr(dev, MT_MIB_STAT_PSCCA); 181 + mt7603_cca_stats_reset(dev); 182 + 183 + dev->survey_time = ktime_get_boottime(); 184 + 185 + mt7603_init_edcca(dev); 186 + 187 + out: 188 + mutex_unlock(&dev->mt76.mutex); 189 + 190 + if (failed) 191 + mt7603_mac_work(&dev->mac_work.work); 192 + 193 + return ret; 194 + } 195 + 196 + static int 197 + mt7603_config(struct ieee80211_hw *hw, u32 changed) 198 + { 199 + struct mt7603_dev *dev = hw->priv; 200 + int ret = 0; 201 + 202 + if (changed & (IEEE80211_CONF_CHANGE_CHANNEL | 203 + IEEE80211_CONF_CHANGE_POWER)) 204 + ret = mt7603_set_channel(dev, &hw->conf.chandef); 205 + 206 + if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 207 + mutex_lock(&dev->mt76.mutex); 208 + 209 + if (!(hw->conf.flags & IEEE80211_CONF_MONITOR)) 210 + dev->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC; 211 + else 212 + dev->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC; 213 + 214 + mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); 215 + 216 + mutex_unlock(&dev->mt76.mutex); 217 + } 218 + 219 + return ret; 220 + } 221 + 222 + static void 223 + mt7603_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, 224 + unsigned int *total_flags, u64 multicast) 225 + { 226 + struct mt7603_dev *dev = hw->priv; 227 + u32 flags = 0; 228 + 229 + #define MT76_FILTER(_flag, _hw) do { \ 230 + flags |= *total_flags & FIF_##_flag; \ 231 + dev->rxfilter &= ~(_hw); \ 232 + dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ 233 + } while (0) 234 + 235 + dev->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS | 236 + MT_WF_RFCR_DROP_OTHER_BEACON | 237 + MT_WF_RFCR_DROP_FRAME_REPORT | 238 + MT_WF_RFCR_DROP_PROBEREQ | 239 + MT_WF_RFCR_DROP_MCAST_FILTERED | 240 + MT_WF_RFCR_DROP_MCAST | 241 + MT_WF_RFCR_DROP_BCAST | 242 + MT_WF_RFCR_DROP_DUPLICATE | 243 + MT_WF_RFCR_DROP_A2_BSSID | 244 + MT_WF_RFCR_DROP_UNWANTED_CTL | 245 + MT_WF_RFCR_DROP_STBC_MULTI); 246 + 247 + MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM | 248 + MT_WF_RFCR_DROP_A3_MAC | 249 + MT_WF_RFCR_DROP_A3_BSSID); 250 + 251 + MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL); 252 + 253 + MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS | 254 + MT_WF_RFCR_DROP_RTS | 255 + MT_WF_RFCR_DROP_CTL_RSV | 256 + MT_WF_RFCR_DROP_NDPA); 257 + 258 + *total_flags = flags; 259 + mt76_wr(dev, MT_WF_RFCR, dev->rxfilter); 260 + } 261 + 262 + static void 263 + mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 264 + struct ieee80211_bss_conf *info, u32 changed) 265 + { 266 + struct mt7603_dev *dev = hw->priv; 267 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 268 + 269 + mutex_lock(&dev->mt76.mutex); 270 + 271 + if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID)) { 272 + if (info->assoc || info->ibss_joined) { 273 + mt76_wr(dev, MT_BSSID0(mvif->idx), 274 + get_unaligned_le32(info->bssid)); 275 + mt76_wr(dev, MT_BSSID1(mvif->idx), 276 + (get_unaligned_le16(info->bssid + 4) | 277 + MT_BSSID1_VALID)); 278 + } else { 279 + mt76_wr(dev, MT_BSSID0(mvif->idx), 0); 280 + mt76_wr(dev, MT_BSSID1(mvif->idx), 0); 281 + } 282 + } 283 + 284 + if (changed & BSS_CHANGED_ERP_SLOT) { 285 + int slottime = info->use_short_slot ? 9 : 20; 286 + 287 + if (slottime != dev->slottime) { 288 + dev->slottime = slottime; 289 + mt7603_mac_set_timing(dev); 290 + } 291 + } 292 + 293 + if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) { 294 + int beacon_int = !!info->enable_beacon * info->beacon_int; 295 + 296 + tasklet_disable(&dev->pre_tbtt_tasklet); 297 + mt7603_beacon_set_timer(dev, mvif->idx, beacon_int); 298 + tasklet_enable(&dev->pre_tbtt_tasklet); 299 + } 300 + 301 + mutex_unlock(&dev->mt76.mutex); 302 + } 303 + 304 + int 305 + mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, 306 + struct ieee80211_sta *sta) 307 + { 308 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 309 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 310 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 311 + int idx; 312 + int ret = 0; 313 + 314 + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7603_WTBL_STA - 1); 315 + if (idx < 0) 316 + return -ENOSPC; 317 + 318 + __skb_queue_head_init(&msta->psq); 319 + msta->ps = ~0; 320 + msta->smps = ~0; 321 + msta->wcid.sta = 1; 322 + msta->wcid.idx = idx; 323 + mt7603_wtbl_init(dev, idx, mvif->idx, sta->addr); 324 + mt7603_wtbl_set_ps(dev, msta, false); 325 + 326 + if (vif->type == NL80211_IFTYPE_AP) 327 + set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); 328 + 329 + return ret; 330 + } 331 + 332 + void 333 + mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, 334 + struct ieee80211_sta *sta) 335 + { 336 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 337 + 338 + mt7603_wtbl_update_cap(dev, sta); 339 + } 340 + 341 + void 342 + mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, 343 + struct ieee80211_sta *sta) 344 + { 345 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 346 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 347 + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 348 + 349 + spin_lock_bh(&dev->ps_lock); 350 + __skb_queue_purge(&msta->psq); 351 + mt7603_filter_tx(dev, wcid->idx, true); 352 + spin_unlock_bh(&dev->ps_lock); 353 + 354 + mt7603_wtbl_clear(dev, wcid->idx); 355 + } 356 + 357 + static void 358 + mt7603_ps_tx_list(struct mt7603_dev *dev, struct sk_buff_head *list) 359 + { 360 + struct sk_buff *skb; 361 + 362 + while ((skb = __skb_dequeue(list)) != NULL) 363 + mt76_tx_queue_skb_raw(dev, skb_get_queue_mapping(skb), 364 + skb, 0); 365 + } 366 + 367 + void 368 + mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) 369 + { 370 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 371 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 372 + struct sk_buff_head list; 373 + 374 + mt76_stop_tx_queues(&dev->mt76, sta, false); 375 + mt7603_wtbl_set_ps(dev, msta, ps); 376 + if (ps) 377 + return; 378 + 379 + __skb_queue_head_init(&list); 380 + 381 + spin_lock_bh(&dev->ps_lock); 382 + skb_queue_splice_tail_init(&msta->psq, &list); 383 + spin_unlock_bh(&dev->ps_lock); 384 + 385 + mt7603_ps_tx_list(dev, &list); 386 + } 387 + 388 + static void 389 + mt7603_release_buffered_frames(struct ieee80211_hw *hw, 390 + struct ieee80211_sta *sta, 391 + u16 tids, int nframes, 392 + enum ieee80211_frame_release_type reason, 393 + bool more_data) 394 + { 395 + struct mt7603_dev *dev = hw->priv; 396 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 397 + struct sk_buff_head list; 398 + struct sk_buff *skb, *tmp; 399 + 400 + __skb_queue_head_init(&list); 401 + 402 + spin_lock_bh(&dev->ps_lock); 403 + skb_queue_walk_safe(&msta->psq, skb, tmp) { 404 + if (!nframes) 405 + break; 406 + 407 + if (!(tids & BIT(skb->priority))) 408 + continue; 409 + 410 + skb_set_queue_mapping(skb, MT_TXQ_PSD); 411 + __skb_unlink(skb, &msta->psq); 412 + __skb_queue_tail(&list, skb); 413 + nframes--; 414 + } 415 + spin_unlock_bh(&dev->ps_lock); 416 + 417 + mt7603_ps_tx_list(dev, &list); 418 + 419 + if (nframes) 420 + mt76_release_buffered_frames(hw, sta, tids, nframes, reason, 421 + more_data); 422 + } 423 + 424 + static int 425 + mt7603_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 426 + struct ieee80211_vif *vif, struct ieee80211_sta *sta, 427 + struct ieee80211_key_conf *key) 428 + { 429 + struct mt7603_dev *dev = hw->priv; 430 + struct mt7603_vif *mvif = (struct mt7603_vif *)vif->drv_priv; 431 + struct mt7603_sta *msta = sta ? (struct mt7603_sta *)sta->drv_priv : 432 + &mvif->sta; 433 + struct mt76_wcid *wcid = &msta->wcid; 434 + int idx = key->keyidx; 435 + 436 + /* fall back to sw encryption for unsupported ciphers */ 437 + switch (key->cipher) { 438 + case WLAN_CIPHER_SUITE_TKIP: 439 + case WLAN_CIPHER_SUITE_CCMP: 440 + break; 441 + default: 442 + return -EOPNOTSUPP; 443 + } 444 + 445 + /* 446 + * The hardware does not support per-STA RX GTK, fall back 447 + * to software mode for these. 448 + */ 449 + if ((vif->type == NL80211_IFTYPE_ADHOC || 450 + vif->type == NL80211_IFTYPE_MESH_POINT) && 451 + (key->cipher == WLAN_CIPHER_SUITE_TKIP || 452 + key->cipher == WLAN_CIPHER_SUITE_CCMP) && 453 + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) 454 + return -EOPNOTSUPP; 455 + 456 + if (cmd == SET_KEY) { 457 + key->hw_key_idx = wcid->idx; 458 + wcid->hw_key_idx = idx; 459 + } else { 460 + if (idx == wcid->hw_key_idx) 461 + wcid->hw_key_idx = -1; 462 + 463 + key = NULL; 464 + } 465 + mt76_wcid_key_setup(&dev->mt76, wcid, key); 466 + 467 + return mt7603_wtbl_set_key(dev, wcid->idx, key); 468 + } 469 + 470 + static int 471 + mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue, 472 + const struct ieee80211_tx_queue_params *params) 473 + { 474 + struct mt7603_dev *dev = hw->priv; 475 + u16 cw_min = (1 << 5) - 1; 476 + u16 cw_max = (1 << 10) - 1; 477 + u32 val; 478 + 479 + queue = dev->mt76.q_tx[queue].hw_idx; 480 + 481 + if (params->cw_min) 482 + cw_min = params->cw_min; 483 + if (params->cw_max) 484 + cw_max = params->cw_max; 485 + 486 + mutex_lock(&dev->mt76.mutex); 487 + mt7603_mac_stop(dev); 488 + 489 + val = mt76_rr(dev, MT_WMM_TXOP(queue)); 490 + val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(queue)); 491 + val |= params->txop << MT_WMM_TXOP_SHIFT(queue); 492 + mt76_wr(dev, MT_WMM_TXOP(queue), val); 493 + 494 + val = mt76_rr(dev, MT_WMM_AIFSN); 495 + val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(queue)); 496 + val |= params->aifs << MT_WMM_AIFSN_SHIFT(queue); 497 + mt76_wr(dev, MT_WMM_AIFSN, val); 498 + 499 + val = mt76_rr(dev, MT_WMM_CWMIN); 500 + val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(queue)); 501 + val |= cw_min << MT_WMM_CWMIN_SHIFT(queue); 502 + mt76_wr(dev, MT_WMM_CWMIN, val); 503 + 504 + val = mt76_rr(dev, MT_WMM_CWMAX(queue)); 505 + val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(queue)); 506 + val |= cw_max << MT_WMM_CWMAX_SHIFT(queue); 507 + mt76_wr(dev, MT_WMM_CWMAX(queue), val); 508 + 509 + mt7603_mac_start(dev); 510 + mutex_unlock(&dev->mt76.mutex); 511 + 512 + return 0; 513 + } 514 + 515 + static void 516 + mt7603_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 517 + const u8 *mac) 518 + { 519 + struct mt7603_dev *dev = hw->priv; 520 + 521 + set_bit(MT76_SCANNING, &dev->mt76.state); 522 + mt7603_beacon_set_timer(dev, -1, 0); 523 + } 524 + 525 + static void 526 + mt7603_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 527 + { 528 + struct mt7603_dev *dev = hw->priv; 529 + 530 + clear_bit(MT76_SCANNING, &dev->mt76.state); 531 + mt7603_beacon_set_timer(dev, -1, dev->beacon_int); 532 + } 533 + 534 + static void 535 + mt7603_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 536 + u32 queues, bool drop) 537 + { 538 + } 539 + 540 + static int 541 + mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 542 + struct ieee80211_ampdu_params *params) 543 + { 544 + enum ieee80211_ampdu_mlme_action action = params->action; 545 + struct mt7603_dev *dev = hw->priv; 546 + struct ieee80211_sta *sta = params->sta; 547 + struct ieee80211_txq *txq = sta->txq[params->tid]; 548 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 549 + u16 tid = params->tid; 550 + u16 *ssn = &params->ssn; 551 + u8 ba_size = params->buf_size; 552 + struct mt76_txq *mtxq; 553 + 554 + if (!txq) 555 + return -EINVAL; 556 + 557 + mtxq = (struct mt76_txq *)txq->drv_priv; 558 + 559 + switch (action) { 560 + case IEEE80211_AMPDU_RX_START: 561 + mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, *ssn, 562 + params->buf_size); 563 + mt7603_mac_rx_ba_reset(dev, sta->addr, tid); 564 + break; 565 + case IEEE80211_AMPDU_RX_STOP: 566 + mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); 567 + break; 568 + case IEEE80211_AMPDU_TX_OPERATIONAL: 569 + mtxq->aggr = true; 570 + mtxq->send_bar = false; 571 + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size); 572 + break; 573 + case IEEE80211_AMPDU_TX_STOP_FLUSH: 574 + case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 575 + mtxq->aggr = false; 576 + ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn); 577 + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); 578 + break; 579 + case IEEE80211_AMPDU_TX_START: 580 + mtxq->agg_ssn = *ssn << 4; 581 + ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 582 + break; 583 + case IEEE80211_AMPDU_TX_STOP_CONT: 584 + mtxq->aggr = false; 585 + mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1); 586 + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 587 + break; 588 + } 589 + 590 + return 0; 591 + } 592 + 593 + static void 594 + mt7603_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 595 + struct ieee80211_sta *sta) 596 + { 597 + struct mt7603_dev *dev = hw->priv; 598 + struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv; 599 + struct ieee80211_sta_rates *sta_rates = rcu_dereference(sta->rates); 600 + int i; 601 + 602 + spin_lock_bh(&dev->mt76.lock); 603 + for (i = 0; i < ARRAY_SIZE(msta->rates); i++) { 604 + msta->rates[i].idx = sta_rates->rate[i].idx; 605 + msta->rates[i].count = sta_rates->rate[i].count; 606 + msta->rates[i].flags = sta_rates->rate[i].flags; 607 + 608 + if (msta->rates[i].idx < 0 || !msta->rates[i].count) 609 + break; 610 + } 611 + msta->n_rates = i; 612 + mt7603_wtbl_set_rates(dev, msta, NULL, msta->rates); 613 + msta->rate_probe = false; 614 + mt7603_wtbl_set_smps(dev, msta, 615 + sta->smps_mode == IEEE80211_SMPS_DYNAMIC); 616 + spin_unlock_bh(&dev->mt76.lock); 617 + } 618 + 619 + static void 620 + mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class) 621 + { 622 + struct mt7603_dev *dev = hw->priv; 623 + 624 + dev->coverage_class = coverage_class; 625 + mt7603_mac_set_timing(dev); 626 + } 627 + 628 + static void mt7603_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, 629 + struct sk_buff *skb) 630 + { 631 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 632 + struct ieee80211_vif *vif = info->control.vif; 633 + struct mt7603_dev *dev = hw->priv; 634 + struct mt76_wcid *wcid = &dev->global_sta.wcid; 635 + 636 + if (control->sta) { 637 + struct mt7603_sta *msta; 638 + 639 + msta = (struct mt7603_sta *)control->sta->drv_priv; 640 + wcid = &msta->wcid; 641 + } else if (vif) { 642 + struct mt7603_vif *mvif; 643 + 644 + mvif = (struct mt7603_vif *)vif->drv_priv; 645 + wcid = &mvif->sta.wcid; 646 + } 647 + 648 + mt76_tx(&dev->mt76, control->sta, wcid, skb); 649 + } 650 + 651 + static int 652 + mt7603_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) 653 + { 654 + return 0; 655 + } 656 + 657 + const struct ieee80211_ops mt7603_ops = { 658 + .tx = mt7603_tx, 659 + .start = mt7603_start, 660 + .stop = mt7603_stop, 661 + .add_interface = mt7603_add_interface, 662 + .remove_interface = mt7603_remove_interface, 663 + .config = mt7603_config, 664 + .configure_filter = mt7603_configure_filter, 665 + .bss_info_changed = mt7603_bss_info_changed, 666 + .sta_state = mt76_sta_state, 667 + .set_key = mt7603_set_key, 668 + .conf_tx = mt7603_conf_tx, 669 + .sw_scan_start = mt7603_sw_scan, 670 + .sw_scan_complete = mt7603_sw_scan_complete, 671 + .flush = mt7603_flush, 672 + .ampdu_action = mt7603_ampdu_action, 673 + .get_txpower = mt76_get_txpower, 674 + .wake_tx_queue = mt76_wake_tx_queue, 675 + .sta_rate_tbl_update = mt7603_sta_rate_tbl_update, 676 + .release_buffered_frames = mt7603_release_buffered_frames, 677 + .set_coverage_class = mt7603_set_coverage_class, 678 + .set_tim = mt7603_set_tim, 679 + .get_survey = mt76_get_survey, 680 + }; 681 + 682 + MODULE_LICENSE("Dual BSD/GPL"); 683 + 684 + static int __init mt7603_init(void) 685 + { 686 + int ret; 687 + 688 + ret = platform_driver_register(&mt76_wmac_driver); 689 + if (ret) 690 + return ret; 691 + 692 + #ifdef CONFIG_PCI 693 + ret = pci_register_driver(&mt7603_pci_driver); 694 + if (ret) 695 + platform_driver_unregister(&mt76_wmac_driver); 696 + #endif 697 + return ret; 698 + } 699 + 700 + static void __exit mt7603_exit(void) 701 + { 702 + #ifdef CONFIG_PCI 703 + pci_unregister_driver(&mt7603_pci_driver); 704 + #endif 705 + platform_driver_unregister(&mt76_wmac_driver); 706 + } 707 + 708 + module_init(mt7603_init); 709 + module_exit(mt7603_exit);
+483
drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include <linux/firmware.h> 4 + #include "mt7603.h" 5 + #include "mcu.h" 6 + #include "eeprom.h" 7 + 8 + #define MCU_SKB_RESERVE 8 9 + 10 + struct mt7603_fw_trailer { 11 + char fw_ver[10]; 12 + char build_date[15]; 13 + __le32 dl_len; 14 + } __packed; 15 + 16 + static int 17 + __mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd, 18 + int query, int *wait_seq) 19 + { 20 + int hdrlen = dev->mcu_running ? sizeof(struct mt7603_mcu_txd) : 12; 21 + struct mt76_dev *mdev = &dev->mt76; 22 + struct mt7603_mcu_txd *txd; 23 + u8 seq; 24 + 25 + if (!skb) 26 + return -EINVAL; 27 + 28 + seq = ++mdev->mmio.mcu.msg_seq & 0xf; 29 + if (!seq) 30 + seq = ++mdev->mmio.mcu.msg_seq & 0xf; 31 + 32 + txd = (struct mt7603_mcu_txd *)skb_push(skb, hdrlen); 33 + memset(txd, 0, hdrlen); 34 + 35 + txd->len = cpu_to_le16(skb->len); 36 + if (cmd == -MCU_CMD_FW_SCATTER) 37 + txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE_FW); 38 + else 39 + txd->pq_id = cpu_to_le16(MCU_PORT_QUEUE); 40 + txd->pkt_type = MCU_PKT_ID; 41 + txd->seq = seq; 42 + 43 + if (cmd < 0) { 44 + txd->cid = -cmd; 45 + } else { 46 + txd->cid = MCU_CMD_EXT_CID; 47 + txd->ext_cid = cmd; 48 + if (query != MCU_Q_NA) 49 + txd->ext_cid_ack = 1; 50 + } 51 + 52 + txd->set_query = query; 53 + 54 + if (wait_seq) 55 + *wait_seq = seq; 56 + 57 + return mt76_tx_queue_skb_raw(dev, MT_TXQ_MCU, skb, 0); 58 + } 59 + 60 + static int 61 + mt7603_mcu_msg_send(struct mt7603_dev *dev, struct sk_buff *skb, int cmd, 62 + int query) 63 + { 64 + struct mt76_dev *mdev = &dev->mt76; 65 + unsigned long expires = jiffies + 3 * HZ; 66 + struct mt7603_mcu_rxd *rxd; 67 + int ret, seq; 68 + 69 + mutex_lock(&mdev->mmio.mcu.mutex); 70 + 71 + ret = __mt7603_mcu_msg_send(dev, skb, cmd, query, &seq); 72 + if (ret) 73 + goto out; 74 + 75 + while (1) { 76 + bool check_seq = false; 77 + 78 + skb = mt76_mcu_get_response(&dev->mt76, expires); 79 + if (!skb) { 80 + dev_err(mdev->dev, 81 + "MCU message %d (seq %d) timed out\n", 82 + cmd, seq); 83 + dev->mcu_hang = MT7603_WATCHDOG_TIMEOUT; 84 + ret = -ETIMEDOUT; 85 + break; 86 + } 87 + 88 + rxd = (struct mt7603_mcu_rxd *)skb->data; 89 + if (seq == rxd->seq) 90 + check_seq = true; 91 + 92 + dev_kfree_skb(skb); 93 + 94 + if (check_seq) 95 + break; 96 + } 97 + 98 + out: 99 + mutex_unlock(&mdev->mmio.mcu.mutex); 100 + 101 + return ret; 102 + } 103 + 104 + static int 105 + mt7603_mcu_init_download(struct mt7603_dev *dev, u32 addr, u32 len) 106 + { 107 + struct { 108 + __le32 addr; 109 + __le32 len; 110 + __le32 mode; 111 + } req = { 112 + .addr = cpu_to_le32(addr), 113 + .len = cpu_to_le32(len), 114 + .mode = cpu_to_le32(BIT(31)), 115 + }; 116 + struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); 117 + 118 + return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_TARGET_ADDRESS_LEN_REQ, 119 + MCU_Q_NA); 120 + } 121 + 122 + static int 123 + mt7603_mcu_send_firmware(struct mt7603_dev *dev, const void *data, int len) 124 + { 125 + struct sk_buff *skb; 126 + int ret = 0; 127 + 128 + while (len > 0) { 129 + int cur_len = min_t(int, 4096 - sizeof(struct mt7603_mcu_txd), 130 + len); 131 + 132 + skb = mt7603_mcu_msg_alloc(data, cur_len); 133 + if (!skb) 134 + return -ENOMEM; 135 + 136 + ret = __mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_SCATTER, 137 + MCU_Q_NA, NULL); 138 + if (ret) 139 + break; 140 + 141 + data += cur_len; 142 + len -= cur_len; 143 + } 144 + 145 + return ret; 146 + } 147 + 148 + static int 149 + mt7603_mcu_start_firmware(struct mt7603_dev *dev, u32 addr) 150 + { 151 + struct { 152 + __le32 override; 153 + __le32 addr; 154 + } req = { 155 + .override = cpu_to_le32(addr ? 1 : 0), 156 + .addr = cpu_to_le32(addr), 157 + }; 158 + struct sk_buff *skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); 159 + 160 + return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_FW_START_REQ, 161 + MCU_Q_NA); 162 + } 163 + 164 + static int 165 + mt7603_mcu_restart(struct mt7603_dev *dev) 166 + { 167 + struct sk_buff *skb = mt7603_mcu_msg_alloc(NULL, 0); 168 + 169 + return mt7603_mcu_msg_send(dev, skb, -MCU_CMD_RESTART_DL_REQ, 170 + MCU_Q_NA); 171 + } 172 + 173 + static int 174 + mt7603_load_firmware(struct mt7603_dev *dev) 175 + { 176 + const struct firmware *fw; 177 + const struct mt7603_fw_trailer *hdr; 178 + const char *firmware; 179 + int dl_len; 180 + u32 addr, val; 181 + int ret; 182 + 183 + if (is_mt7628(dev)) { 184 + if (mt76xx_rev(dev) == MT7628_REV_E1) 185 + firmware = MT7628_FIRMWARE_E1; 186 + else 187 + firmware = MT7628_FIRMWARE_E2; 188 + } else { 189 + if (mt76xx_rev(dev) < MT7603_REV_E2) 190 + firmware = MT7603_FIRMWARE_E1; 191 + else 192 + firmware = MT7603_FIRMWARE_E2; 193 + } 194 + 195 + ret = request_firmware(&fw, firmware, dev->mt76.dev); 196 + if (ret) 197 + return ret; 198 + 199 + if (!fw || !fw->data || fw->size < sizeof(*hdr)) { 200 + dev_err(dev->mt76.dev, "Invalid firmware\n"); 201 + ret = -EINVAL; 202 + goto out; 203 + } 204 + 205 + hdr = (const struct mt7603_fw_trailer *)(fw->data + fw->size - 206 + sizeof(*hdr)); 207 + 208 + dev_info(dev->mt76.dev, "Firmware Version: %.10s\n", hdr->fw_ver); 209 + dev_info(dev->mt76.dev, "Build Time: %.15s\n", hdr->build_date); 210 + 211 + addr = mt7603_reg_map(dev, 0x50012498); 212 + mt76_wr(dev, addr, 0x5); 213 + mt76_wr(dev, addr, 0x5); 214 + udelay(1); 215 + 216 + /* switch to bypass mode */ 217 + mt76_rmw(dev, MT_SCH_4, MT_SCH_4_FORCE_QID, 218 + MT_SCH_4_BYPASS | FIELD_PREP(MT_SCH_4_FORCE_QID, 5)); 219 + 220 + val = mt76_rr(dev, MT_TOP_MISC2); 221 + if (val & BIT(1)) { 222 + dev_info(dev->mt76.dev, "Firmware already running...\n"); 223 + goto running; 224 + } 225 + 226 + if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(0) | BIT(1), BIT(0), 500)) { 227 + dev_err(dev->mt76.dev, "Timeout waiting for ROM code to become ready\n"); 228 + ret = -EIO; 229 + goto out; 230 + } 231 + 232 + dl_len = le32_to_cpu(hdr->dl_len) + 4; 233 + ret = mt7603_mcu_init_download(dev, MCU_FIRMWARE_ADDRESS, dl_len); 234 + if (ret) { 235 + dev_err(dev->mt76.dev, "Download request failed\n"); 236 + goto out; 237 + } 238 + 239 + ret = mt7603_mcu_send_firmware(dev, fw->data, dl_len); 240 + if (ret) { 241 + dev_err(dev->mt76.dev, "Failed to send firmware to device\n"); 242 + goto out; 243 + } 244 + 245 + ret = mt7603_mcu_start_firmware(dev, MCU_FIRMWARE_ADDRESS); 246 + if (ret) { 247 + dev_err(dev->mt76.dev, "Failed to start firmware\n"); 248 + goto out; 249 + } 250 + 251 + if (!mt76_poll_msec(dev, MT_TOP_MISC2, BIT(1), BIT(1), 500)) { 252 + dev_err(dev->mt76.dev, "Timeout waiting for firmware to initialize\n"); 253 + ret = -EIO; 254 + goto out; 255 + } 256 + 257 + running: 258 + mt76_clear(dev, MT_SCH_4, MT_SCH_4_FORCE_QID | MT_SCH_4_BYPASS); 259 + 260 + mt76_set(dev, MT_SCH_4, BIT(8)); 261 + mt76_clear(dev, MT_SCH_4, BIT(8)); 262 + 263 + dev->mcu_running = true; 264 + dev_info(dev->mt76.dev, "firmware init done\n"); 265 + 266 + out: 267 + release_firmware(fw); 268 + 269 + return ret; 270 + } 271 + 272 + int mt7603_mcu_init(struct mt7603_dev *dev) 273 + { 274 + mutex_init(&dev->mt76.mmio.mcu.mutex); 275 + 276 + return mt7603_load_firmware(dev); 277 + } 278 + 279 + void mt7603_mcu_exit(struct mt7603_dev *dev) 280 + { 281 + mt7603_mcu_restart(dev); 282 + skb_queue_purge(&dev->mt76.mmio.mcu.res_q); 283 + } 284 + 285 + int mt7603_mcu_set_eeprom(struct mt7603_dev *dev) 286 + { 287 + static const u16 req_fields[] = { 288 + #define WORD(_start) \ 289 + _start, \ 290 + _start + 1 291 + #define GROUP_2G(_start) \ 292 + WORD(_start), \ 293 + WORD(_start + 2), \ 294 + WORD(_start + 4) 295 + 296 + MT_EE_NIC_CONF_0 + 1, 297 + WORD(MT_EE_NIC_CONF_1), 298 + MT_EE_WIFI_RF_SETTING, 299 + MT_EE_TX_POWER_DELTA_BW40, 300 + MT_EE_TX_POWER_DELTA_BW80 + 1, 301 + MT_EE_TX_POWER_EXT_PA_5G, 302 + MT_EE_TEMP_SENSOR_CAL, 303 + GROUP_2G(MT_EE_TX_POWER_0_START_2G), 304 + GROUP_2G(MT_EE_TX_POWER_1_START_2G), 305 + WORD(MT_EE_TX_POWER_CCK), 306 + WORD(MT_EE_TX_POWER_OFDM_2G_6M), 307 + WORD(MT_EE_TX_POWER_OFDM_2G_24M), 308 + WORD(MT_EE_TX_POWER_OFDM_2G_54M), 309 + WORD(MT_EE_TX_POWER_HT_BPSK_QPSK), 310 + WORD(MT_EE_TX_POWER_HT_16_64_QAM), 311 + WORD(MT_EE_TX_POWER_HT_64_QAM), 312 + MT_EE_ELAN_RX_MODE_GAIN, 313 + MT_EE_ELAN_RX_MODE_NF, 314 + MT_EE_ELAN_RX_MODE_P1DB, 315 + MT_EE_ELAN_BYPASS_MODE_GAIN, 316 + MT_EE_ELAN_BYPASS_MODE_NF, 317 + MT_EE_ELAN_BYPASS_MODE_P1DB, 318 + WORD(MT_EE_STEP_NUM_NEG_6_7), 319 + WORD(MT_EE_STEP_NUM_NEG_4_5), 320 + WORD(MT_EE_STEP_NUM_NEG_2_3), 321 + WORD(MT_EE_STEP_NUM_NEG_0_1), 322 + WORD(MT_EE_REF_STEP_24G), 323 + WORD(MT_EE_STEP_NUM_PLUS_1_2), 324 + WORD(MT_EE_STEP_NUM_PLUS_3_4), 325 + WORD(MT_EE_STEP_NUM_PLUS_5_6), 326 + MT_EE_STEP_NUM_PLUS_7, 327 + MT_EE_XTAL_FREQ_OFFSET, 328 + MT_EE_XTAL_TRIM_2_COMP, 329 + MT_EE_XTAL_TRIM_3_COMP, 330 + MT_EE_XTAL_WF_RFCAL, 331 + 332 + /* unknown fields below */ 333 + WORD(0x24), 334 + 0x34, 335 + 0x39, 336 + 0x3b, 337 + WORD(0x42), 338 + WORD(0x9e), 339 + 0xf2, 340 + WORD(0xf8), 341 + 0xfa, 342 + 0x12e, 343 + WORD(0x130), WORD(0x132), WORD(0x134), WORD(0x136), 344 + WORD(0x138), WORD(0x13a), WORD(0x13c), WORD(0x13e), 345 + 346 + #undef GROUP_2G 347 + #undef WORD 348 + 349 + }; 350 + struct req_data { 351 + u16 addr; 352 + u8 val; 353 + u8 pad; 354 + } __packed; 355 + struct { 356 + u8 buffer_mode; 357 + u8 len; 358 + u8 pad[2]; 359 + } req_hdr = { 360 + .buffer_mode = 1, 361 + .len = ARRAY_SIZE(req_fields) - 1, 362 + }; 363 + struct sk_buff *skb; 364 + struct req_data *data; 365 + const int size = 0xff * sizeof(struct req_data); 366 + u8 *eep = (u8 *)dev->mt76.eeprom.data; 367 + int i; 368 + 369 + BUILD_BUG_ON(ARRAY_SIZE(req_fields) * sizeof(*data) > size); 370 + 371 + skb = mt7603_mcu_msg_alloc(NULL, size + sizeof(req_hdr)); 372 + memcpy(skb_put(skb, sizeof(req_hdr)), &req_hdr, sizeof(req_hdr)); 373 + data = (struct req_data *)skb_put(skb, size); 374 + memset(data, 0, size); 375 + 376 + for (i = 0; i < ARRAY_SIZE(req_fields); i++) { 377 + data[i].addr = cpu_to_le16(req_fields[i]); 378 + data[i].val = eep[req_fields[i]]; 379 + data[i].pad = 0; 380 + } 381 + 382 + return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_EFUSE_BUFFER_MODE, 383 + MCU_Q_SET); 384 + } 385 + 386 + static int mt7603_mcu_set_tx_power(struct mt7603_dev *dev) 387 + { 388 + struct { 389 + u8 center_channel; 390 + u8 tssi; 391 + u8 temp_comp; 392 + u8 target_power[2]; 393 + u8 rate_power_delta[14]; 394 + u8 bw_power_delta; 395 + u8 ch_power_delta[6]; 396 + u8 temp_comp_power[17]; 397 + u8 reserved; 398 + } req = { 399 + .center_channel = dev->mt76.chandef.chan->hw_value, 400 + #define EEP_VAL(n) ((u8 *)dev->mt76.eeprom.data)[n] 401 + .tssi = EEP_VAL(MT_EE_NIC_CONF_1 + 1), 402 + .temp_comp = EEP_VAL(MT_EE_NIC_CONF_1), 403 + .target_power = { 404 + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 2), 405 + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 2) 406 + }, 407 + .bw_power_delta = EEP_VAL(MT_EE_TX_POWER_DELTA_BW40), 408 + .ch_power_delta = { 409 + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 3), 410 + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 4), 411 + EEP_VAL(MT_EE_TX_POWER_0_START_2G + 5), 412 + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 3), 413 + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 4), 414 + EEP_VAL(MT_EE_TX_POWER_1_START_2G + 5) 415 + }, 416 + #undef EEP_VAL 417 + }; 418 + struct sk_buff *skb; 419 + u8 *eep = (u8 *)dev->mt76.eeprom.data; 420 + 421 + memcpy(req.rate_power_delta, eep + MT_EE_TX_POWER_CCK, 422 + sizeof(req.rate_power_delta)); 423 + 424 + memcpy(req.temp_comp_power, eep + MT_EE_STEP_NUM_NEG_6_7, 425 + sizeof(req.temp_comp_power)); 426 + 427 + skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); 428 + return mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_SET_TX_POWER_CTRL, 429 + MCU_Q_SET); 430 + } 431 + 432 + int mt7603_mcu_set_channel(struct mt7603_dev *dev) 433 + { 434 + struct cfg80211_chan_def *chandef = &dev->mt76.chandef; 435 + struct ieee80211_hw *hw = mt76_hw(dev); 436 + int n_chains = __sw_hweight8(dev->mt76.antenna_mask); 437 + struct { 438 + u8 control_chan; 439 + u8 center_chan; 440 + u8 bw; 441 + u8 tx_streams; 442 + u8 rx_streams; 443 + u8 _res0[7]; 444 + u8 txpower[21]; 445 + u8 _res1[3]; 446 + } req = { 447 + .control_chan = chandef->chan->hw_value, 448 + .center_chan = chandef->chan->hw_value, 449 + .bw = MT_BW_20, 450 + .tx_streams = n_chains, 451 + .rx_streams = n_chains, 452 + }; 453 + struct sk_buff *skb; 454 + s8 tx_power; 455 + int ret; 456 + int i; 457 + 458 + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_40) { 459 + req.bw = MT_BW_40; 460 + if (chandef->center_freq1 > chandef->chan->center_freq) 461 + req.center_chan += 2; 462 + else 463 + req.center_chan -= 2; 464 + } 465 + 466 + tx_power = hw->conf.power_level * 2; 467 + if (dev->mt76.antenna_mask == 3) 468 + tx_power -= 6; 469 + tx_power = min(tx_power, dev->tx_power_limit); 470 + 471 + dev->mt76.txpower_cur = tx_power; 472 + 473 + for (i = 0; i < ARRAY_SIZE(req.txpower); i++) 474 + req.txpower[i] = tx_power; 475 + 476 + skb = mt7603_mcu_msg_alloc(&req, sizeof(req)); 477 + ret = mt7603_mcu_msg_send(dev, skb, MCU_EXT_CMD_CHANNEL_SWITCH, 478 + MCU_Q_SET); 479 + if (ret) 480 + return ret; 481 + 482 + return mt7603_mcu_set_tx_power(dev); 483 + }
+110
drivers/net/wireless/mediatek/mt76/mt7603/mcu.h
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #ifndef __MT7603_MCU_H 4 + #define __MT7603_MCU_H 5 + 6 + struct mt7603_mcu_txd { 7 + __le16 len; 8 + __le16 pq_id; 9 + 10 + u8 cid; 11 + u8 pkt_type; 12 + u8 set_query; 13 + u8 seq; 14 + 15 + u8 uc_d2b0_rev; 16 + u8 ext_cid; 17 + u8 uc_d2b2_rev; 18 + u8 ext_cid_ack; 19 + 20 + u32 au4_d3_to_d7_rev[5]; 21 + } __packed __aligned(4); 22 + 23 + struct mt7603_mcu_rxd { 24 + __le16 len; 25 + __le16 pkt_type_id; 26 + 27 + u8 eid; 28 + u8 seq; 29 + __le16 __rsv; 30 + 31 + u8 ext_eid; 32 + u8 __rsv1[3]; 33 + }; 34 + 35 + #define MCU_PKT_ID 0xa0 36 + #define MCU_PORT_QUEUE 0x8000 37 + #define MCU_PORT_QUEUE_FW 0xc000 38 + 39 + #define MCU_FIRMWARE_ADDRESS 0x100000 40 + 41 + enum { 42 + MCU_Q_QUERY, 43 + MCU_Q_SET, 44 + MCU_Q_RESERVED, 45 + MCU_Q_NA 46 + }; 47 + 48 + enum { 49 + MCU_CMD_TARGET_ADDRESS_LEN_REQ = 0x01, 50 + MCU_CMD_FW_START_REQ = 0x02, 51 + MCU_CMD_INIT_ACCESS_REG = 0x3, 52 + MCU_CMD_PATCH_START_REQ = 0x05, 53 + MCU_CMD_PATCH_FINISH_REQ = 0x07, 54 + MCU_CMD_PATCH_SEM_CONTROL = 0x10, 55 + MCU_CMD_HIF_LOOPBACK = 0x20, 56 + MCU_CMD_CH_PRIVILEGE = 0x20, 57 + MCU_CMD_ACCESS_REG = 0xC2, 58 + MCU_CMD_EXT_CID = 0xED, 59 + MCU_CMD_FW_SCATTER = 0xEE, 60 + MCU_CMD_RESTART_DL_REQ = 0xEF, 61 + }; 62 + 63 + enum { 64 + MCU_EXT_CMD_RF_REG_ACCESS = 0x02, 65 + MCU_EXT_CMD_RF_TEST = 0x04, 66 + MCU_EXT_CMD_RADIO_ON_OFF_CTRL = 0x05, 67 + MCU_EXT_CMD_WIFI_RX_DISABLE = 0x06, 68 + MCU_EXT_CMD_PM_STATE_CTRL = 0x07, 69 + MCU_EXT_CMD_CHANNEL_SWITCH = 0x08, 70 + MCU_EXT_CMD_NIC_CAPABILITY = 0x09, 71 + MCU_EXT_CMD_PWR_SAVING = 0x0A, 72 + MCU_EXT_CMD_MULTIPLE_REG_ACCESS = 0x0E, 73 + MCU_EXT_CMD_AP_PWR_SAVING_CAPABILITY = 0xF, 74 + MCU_EXT_CMD_SEC_ADDREMOVE_KEY = 0x10, 75 + MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11, 76 + MCU_EXT_CMD_FW_LOG_2_HOST = 0x13, 77 + MCU_EXT_CMD_PS_RETRIEVE_START = 0x14, 78 + MCU_EXT_CMD_LED_CTRL = 0x17, 79 + MCU_EXT_CMD_PACKET_FILTER = 0x18, 80 + MCU_EXT_CMD_PWR_MGT_BIT_WIFI = 0x1B, 81 + MCU_EXT_CMD_EFUSE_BUFFER_MODE = 0x21, 82 + MCU_EXT_CMD_THERMAL_PROTECT = 0x23, 83 + MCU_EXT_CMD_EDCA_SET = 0x27, 84 + MCU_EXT_CMD_SLOT_TIME_SET = 0x28, 85 + MCU_EXT_CMD_CONFIG_INTERNAL_SETTING = 0x29, 86 + MCU_EXT_CMD_NOA_OFFLOAD_CTRL = 0x2B, 87 + MCU_EXT_CMD_GET_THEMAL_SENSOR = 0x2C, 88 + MCU_EXT_CMD_WAKEUP_OPTION = 0x2E, 89 + MCU_EXT_CMD_AC_QUEUE_CONTROL = 0x31, 90 + MCU_EXT_CMD_BCN_UPDATE = 0x33 91 + }; 92 + 93 + enum { 94 + MCU_EXT_EVENT_CMD_RESULT = 0x0, 95 + MCU_EXT_EVENT_RF_REG_ACCESS = 0x2, 96 + MCU_EXT_EVENT_MULTI_CR_ACCESS = 0x0E, 97 + MCU_EXT_EVENT_FW_LOG_2_HOST = 0x13, 98 + MCU_EXT_EVENT_BEACON_LOSS = 0x1A, 99 + MCU_EXT_EVENT_THERMAL_PROTECT = 0x22, 100 + MCU_EXT_EVENT_BCN_UPDATE = 0x31, 101 + }; 102 + 103 + static inline struct sk_buff * 104 + mt7603_mcu_msg_alloc(const void *data, int len) 105 + { 106 + return mt76_mcu_msg_alloc(data, sizeof(struct mt7603_mcu_txd), 107 + len, 0); 108 + } 109 + 110 + #endif
+253
drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #ifndef __MT7603_H 4 + #define __MT7603_H 5 + 6 + #include <linux/interrupt.h> 7 + #include <linux/ktime.h> 8 + #include "../mt76.h" 9 + #include "regs.h" 10 + 11 + #define MT7603_MAX_INTERFACES 4 12 + #define MT7603_WTBL_SIZE 128 13 + #define MT7603_WTBL_RESERVED (MT7603_WTBL_SIZE - 1) 14 + #define MT7603_WTBL_STA (MT7603_WTBL_RESERVED - MT7603_MAX_INTERFACES) 15 + 16 + #define MT7603_RATE_RETRY 2 17 + 18 + #define MT7603_RX_RING_SIZE 128 19 + 20 + #define MT7603_FIRMWARE_E1 "mt7603_e1.bin" 21 + #define MT7603_FIRMWARE_E2 "mt7603_e2.bin" 22 + #define MT7628_FIRMWARE_E1 "mt7628_e1.bin" 23 + #define MT7628_FIRMWARE_E2 "mt7628_e2.bin" 24 + 25 + #define MT7603_EEPROM_SIZE 1024 26 + 27 + #define MT_AGG_SIZE_LIMIT(_n) (((_n) + 1) * 4) 28 + 29 + #define MT7603_PRE_TBTT_TIME 5000 /* ms */ 30 + 31 + #define MT7603_WATCHDOG_TIME 100 /* ms */ 32 + #define MT7603_WATCHDOG_TIMEOUT 10 /* number of checks */ 33 + 34 + #define MT7603_EDCCA_BLOCK_TH 10 35 + 36 + #define MT7603_CFEND_RATE_DEFAULT 0x69 /* chip default (24M) */ 37 + #define MT7603_CFEND_RATE_11B 0x03 /* 11B LP, 11M */ 38 + 39 + struct mt7603_vif; 40 + struct mt7603_sta; 41 + 42 + enum { 43 + MT7603_REV_E1 = 0x00, 44 + MT7603_REV_E2 = 0x10, 45 + MT7628_REV_E1 = 0x8a00, 46 + }; 47 + 48 + enum mt7603_bw { 49 + MT_BW_20, 50 + MT_BW_40, 51 + MT_BW_80, 52 + }; 53 + 54 + struct mt7603_sta { 55 + struct mt76_wcid wcid; /* must be first */ 56 + 57 + struct mt7603_vif *vif; 58 + 59 + struct sk_buff_head psq; 60 + 61 + struct ieee80211_tx_rate rates[8]; 62 + u8 rate_count; 63 + u8 n_rates; 64 + 65 + u8 rate_probe; 66 + u8 smps; 67 + 68 + u8 ps; 69 + }; 70 + 71 + struct mt7603_vif { 72 + struct mt7603_sta sta; /* must be first */ 73 + 74 + u8 idx; 75 + }; 76 + 77 + enum mt7603_reset_cause { 78 + RESET_CAUSE_TX_HANG, 79 + RESET_CAUSE_TX_BUSY, 80 + RESET_CAUSE_RX_BUSY, 81 + RESET_CAUSE_BEACON_STUCK, 82 + RESET_CAUSE_RX_PSE_BUSY, 83 + RESET_CAUSE_MCU_HANG, 84 + RESET_CAUSE_RESET_FAILED, 85 + __RESET_CAUSE_MAX 86 + }; 87 + 88 + struct mt7603_dev { 89 + struct mt76_dev mt76; /* must be first */ 90 + 91 + const struct mt76_bus_ops *bus_ops; 92 + 93 + u32 rxfilter; 94 + 95 + u8 vif_mask; 96 + 97 + struct mt7603_sta global_sta; 98 + 99 + u32 agc0, agc3; 100 + u32 false_cca_ofdm, false_cca_cck; 101 + unsigned long last_cca_adj; 102 + 103 + u8 rssi_offset[3]; 104 + 105 + u8 slottime; 106 + s16 coverage_class; 107 + 108 + s8 tx_power_limit; 109 + 110 + ktime_t survey_time; 111 + ktime_t ed_time; 112 + int beacon_int; 113 + 114 + struct mt76_queue q_rx; 115 + 116 + spinlock_t ps_lock; 117 + 118 + u8 mac_work_count; 119 + 120 + u8 mcu_running; 121 + u8 ed_monitor; 122 + 123 + s8 ed_trigger; 124 + u8 ed_strict_mode; 125 + u8 ed_strong_signal; 126 + 127 + s8 sensitivity; 128 + 129 + u8 beacon_mask; 130 + 131 + u8 beacon_check; 132 + u8 tx_hang_check; 133 + u8 tx_dma_check; 134 + u8 rx_dma_check; 135 + u8 rx_pse_check; 136 + u8 mcu_hang; 137 + 138 + enum mt7603_reset_cause cur_reset_cause; 139 + 140 + u16 tx_dma_idx[4]; 141 + u16 rx_dma_idx; 142 + 143 + u32 reset_test; 144 + 145 + unsigned int reset_cause[__RESET_CAUSE_MAX]; 146 + 147 + struct delayed_work mac_work; 148 + struct tasklet_struct tx_tasklet; 149 + struct tasklet_struct pre_tbtt_tasklet; 150 + }; 151 + 152 + extern const struct mt76_driver_ops mt7603_drv_ops; 153 + extern const struct ieee80211_ops mt7603_ops; 154 + extern struct pci_driver mt7603_pci_driver; 155 + extern struct platform_driver mt76_wmac_driver; 156 + 157 + static inline bool is_mt7603(struct mt7603_dev *dev) 158 + { 159 + return mt76xx_chip(dev) == 0x7603; 160 + } 161 + 162 + static inline bool is_mt7628(struct mt7603_dev *dev) 163 + { 164 + return mt76xx_chip(dev) == 0x7628; 165 + } 166 + 167 + /* need offset to prevent conflict with ampdu_ack_len */ 168 + #define MT_RATE_DRIVER_DATA_OFFSET 4 169 + 170 + u32 mt7603_reg_map(struct mt7603_dev *dev, u32 addr); 171 + 172 + irqreturn_t mt7603_irq_handler(int irq, void *dev_instance); 173 + 174 + int mt7603_register_device(struct mt7603_dev *dev); 175 + void mt7603_unregister_device(struct mt7603_dev *dev); 176 + int mt7603_eeprom_init(struct mt7603_dev *dev); 177 + int mt7603_dma_init(struct mt7603_dev *dev); 178 + void mt7603_dma_cleanup(struct mt7603_dev *dev); 179 + int mt7603_mcu_init(struct mt7603_dev *dev); 180 + void mt7603_init_debugfs(struct mt7603_dev *dev); 181 + 182 + void mt7603_set_irq_mask(struct mt7603_dev *dev, u32 clear, u32 set); 183 + 184 + static inline void mt7603_irq_enable(struct mt7603_dev *dev, u32 mask) 185 + { 186 + mt7603_set_irq_mask(dev, 0, mask); 187 + } 188 + 189 + static inline void mt7603_irq_disable(struct mt7603_dev *dev, u32 mask) 190 + { 191 + mt7603_set_irq_mask(dev, mask, 0); 192 + } 193 + 194 + void mt7603_mac_dma_start(struct mt7603_dev *dev); 195 + void mt7603_mac_start(struct mt7603_dev *dev); 196 + void mt7603_mac_stop(struct mt7603_dev *dev); 197 + void mt7603_mac_work(struct work_struct *work); 198 + void mt7603_mac_set_timing(struct mt7603_dev *dev); 199 + void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval); 200 + int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb); 201 + void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data); 202 + void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid); 203 + void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn, 204 + int ba_size); 205 + 206 + void mt7603_pse_client_reset(struct mt7603_dev *dev); 207 + 208 + int mt7603_mcu_set_channel(struct mt7603_dev *dev); 209 + int mt7603_mcu_set_eeprom(struct mt7603_dev *dev); 210 + void mt7603_mcu_exit(struct mt7603_dev *dev); 211 + 212 + void mt7603_wtbl_init(struct mt7603_dev *dev, int idx, int vif, 213 + const u8 *mac_addr); 214 + void mt7603_wtbl_clear(struct mt7603_dev *dev, int idx); 215 + void mt7603_wtbl_update_cap(struct mt7603_dev *dev, struct ieee80211_sta *sta); 216 + void mt7603_wtbl_set_rates(struct mt7603_dev *dev, struct mt7603_sta *sta, 217 + struct ieee80211_tx_rate *probe_rate, 218 + struct ieee80211_tx_rate *rates); 219 + int mt7603_wtbl_set_key(struct mt7603_dev *dev, int wcid, 220 + struct ieee80211_key_conf *key); 221 + void mt7603_wtbl_set_ps(struct mt7603_dev *dev, struct mt7603_sta *sta, 222 + bool enabled); 223 + void mt7603_wtbl_set_smps(struct mt7603_dev *dev, struct mt7603_sta *sta, 224 + bool enabled); 225 + void mt7603_filter_tx(struct mt7603_dev *dev, int idx, bool abort); 226 + 227 + int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, 228 + struct sk_buff *skb, struct mt76_queue *q, 229 + struct mt76_wcid *wcid, struct ieee80211_sta *sta, 230 + u32 *tx_info); 231 + 232 + void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, 233 + struct mt76_queue_entry *e, bool flush); 234 + 235 + void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, 236 + struct sk_buff *skb); 237 + void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); 238 + void mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps); 239 + int mt7603_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, 240 + struct ieee80211_sta *sta); 241 + void mt7603_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, 242 + struct ieee80211_sta *sta); 243 + void mt7603_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, 244 + struct ieee80211_sta *sta); 245 + 246 + void mt7603_pre_tbtt_tasklet(unsigned long arg); 247 + 248 + void mt7603_update_channel(struct mt76_dev *mdev); 249 + 250 + void mt7603_edcca_set_strict(struct mt7603_dev *dev, bool val); 251 + void mt7603_cca_stats_reset(struct mt7603_dev *dev); 252 + 253 + #endif
+80
drivers/net/wireless/mediatek/mt76/mt7603/pci.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include <linux/kernel.h> 4 + #include <linux/module.h> 5 + #include <linux/pci.h> 6 + 7 + #include "mt7603.h" 8 + 9 + static const struct pci_device_id mt76pci_device_table[] = { 10 + { PCI_DEVICE(0x14c3, 0x7603) }, 11 + { }, 12 + }; 13 + 14 + static int 15 + mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 16 + { 17 + struct mt7603_dev *dev; 18 + struct mt76_dev *mdev; 19 + int ret; 20 + 21 + ret = pcim_enable_device(pdev); 22 + if (ret) 23 + return ret; 24 + 25 + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); 26 + if (ret) 27 + return ret; 28 + 29 + pci_set_master(pdev); 30 + 31 + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 32 + if (ret) 33 + return ret; 34 + 35 + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, 36 + &mt7603_drv_ops); 37 + if (!mdev) 38 + return -ENOMEM; 39 + 40 + dev = container_of(mdev, struct mt7603_dev, mt76); 41 + mt76_mmio_init(mdev, pcim_iomap_table(pdev)[0]); 42 + 43 + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | 44 + (mt76_rr(dev, MT_HW_REV) & 0xff); 45 + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 46 + 47 + ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler, 48 + IRQF_SHARED, KBUILD_MODNAME, dev); 49 + if (ret) 50 + goto error; 51 + 52 + ret = mt7603_register_device(dev); 53 + if (ret) 54 + goto error; 55 + 56 + return 0; 57 + error: 58 + ieee80211_free_hw(mt76_hw(dev)); 59 + return ret; 60 + } 61 + 62 + static void 63 + mt76pci_remove(struct pci_dev *pdev) 64 + { 65 + struct mt76_dev *mdev = pci_get_drvdata(pdev); 66 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 67 + 68 + mt7603_unregister_device(dev); 69 + } 70 + 71 + MODULE_DEVICE_TABLE(pci, mt76pci_device_table); 72 + MODULE_FIRMWARE(MT7603_FIRMWARE_E1); 73 + MODULE_FIRMWARE(MT7603_FIRMWARE_E2); 74 + 75 + struct pci_driver mt7603_pci_driver = { 76 + .name = KBUILD_MODNAME, 77 + .id_table = mt76pci_device_table, 78 + .probe = mt76pci_probe, 79 + .remove = mt76pci_remove, 80 + };
+774
drivers/net/wireless/mediatek/mt76/mt7603/regs.h
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #ifndef __MT7603_REGS_H 4 + #define __MT7603_REGS_H 5 + 6 + #define MT_HW_REV 0x1000 7 + #define MT_HW_CHIPID 0x1008 8 + #define MT_TOP_MISC2 0x1134 9 + 10 + #define MT_MCU_BASE 0x2000 11 + #define MT_MCU(ofs) (MT_MCU_BASE + (ofs)) 12 + 13 + #define MT_MCU_PCIE_REMAP_1 MT_MCU(0x500) 14 + #define MT_MCU_PCIE_REMAP_1_OFFSET GENMASK(17, 0) 15 + #define MT_MCU_PCIE_REMAP_1_BASE GENMASK(31, 18) 16 + 17 + #define MT_MCU_PCIE_REMAP_2 MT_MCU(0x504) 18 + #define MT_MCU_PCIE_REMAP_2_OFFSET GENMASK(18, 0) 19 + #define MT_MCU_PCIE_REMAP_2_BASE GENMASK(31, 19) 20 + 21 + #define MT_HIF_BASE 0x4000 22 + #define MT_HIF(ofs) (MT_HIF_BASE + (ofs)) 23 + 24 + #define MT_INT_SOURCE_CSR MT_HIF(0x200) 25 + #define MT_INT_MASK_CSR MT_HIF(0x204) 26 + #define MT_DELAY_INT_CFG MT_HIF(0x210) 27 + 28 + #define MT_INT_RX_DONE(_n) BIT(_n) 29 + #define MT_INT_RX_DONE_ALL GENMASK(1, 0) 30 + #define MT_INT_TX_DONE_ALL GENMASK(19, 4) 31 + #define MT_INT_TX_DONE(_n) BIT((_n) + 4) 32 + 33 + #define MT_INT_RX_COHERENT BIT(20) 34 + #define MT_INT_TX_COHERENT BIT(21) 35 + #define MT_INT_MAC_IRQ3 BIT(27) 36 + 37 + #define MT_INT_MCU_CMD BIT(30) 38 + 39 + #define MT_WPDMA_GLO_CFG MT_HIF(0x208) 40 + #define MT_WPDMA_GLO_CFG_TX_DMA_EN BIT(0) 41 + #define MT_WPDMA_GLO_CFG_TX_DMA_BUSY BIT(1) 42 + #define MT_WPDMA_GLO_CFG_RX_DMA_EN BIT(2) 43 + #define MT_WPDMA_GLO_CFG_RX_DMA_BUSY BIT(3) 44 + #define MT_WPDMA_GLO_CFG_DMA_BURST_SIZE GENMASK(5, 4) 45 + #define MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE BIT(6) 46 + #define MT_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) 47 + #define MT_WPDMA_GLO_CFG_HDR_SEG_LEN GENMASK(15, 8) 48 + #define MT_WPDMA_GLO_CFG_SW_RESET BIT(24) 49 + #define MT_WPDMA_GLO_CFG_FORCE_TX_EOF BIT(25) 50 + #define MT_WPDMA_GLO_CFG_CLK_GATE_DIS BIT(30) 51 + #define MT_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) 52 + 53 + #define MT_WPDMA_RST_IDX MT_HIF(0x20c) 54 + 55 + #define MT_WPDMA_DEBUG MT_HIF(0x244) 56 + #define MT_WPDMA_DEBUG_VALUE GENMASK(17, 0) 57 + #define MT_WPDMA_DEBUG_SEL BIT(27) 58 + #define MT_WPDMA_DEBUG_IDX GENMASK(31, 28) 59 + 60 + #define MT_TX_RING_BASE MT_HIF(0x300) 61 + #define MT_RX_RING_BASE MT_HIF(0x400) 62 + 63 + #define MT_TXTIME_THRESH_BASE MT_HIF(0x500) 64 + #define MT_TXTIME_THRESH(n) (MT_TXTIME_THRESH_BASE + ((n) * 4)) 65 + 66 + #define MT_PAGE_COUNT_BASE MT_HIF(0x540) 67 + #define MT_PAGE_COUNT(n) (MT_PAGE_COUNT_BASE + ((n) * 4)) 68 + 69 + #define MT_SCH_1 MT_HIF(0x588) 70 + #define MT_SCH_2 MT_HIF(0x58c) 71 + #define MT_SCH_3 MT_HIF(0x590) 72 + 73 + #define MT_SCH_4 MT_HIF(0x594) 74 + #define MT_SCH_4_FORCE_QID GENMASK(4, 0) 75 + #define MT_SCH_4_BYPASS BIT(5) 76 + #define MT_SCH_4_RESET BIT(8) 77 + 78 + #define MT_GROUP_THRESH_BASE MT_HIF(0x598) 79 + #define MT_GROUP_THRESH(n) (MT_GROUP_THRESH_BASE + ((n) * 4)) 80 + 81 + #define MT_QUEUE_PRIORITY_1 MT_HIF(0x580) 82 + #define MT_QUEUE_PRIORITY_2 MT_HIF(0x584) 83 + 84 + #define MT_BMAP_0 MT_HIF(0x5b0) 85 + #define MT_BMAP_1 MT_HIF(0x5b4) 86 + #define MT_BMAP_2 MT_HIF(0x5b8) 87 + 88 + #define MT_HIGH_PRIORITY_1 MT_HIF(0x5bc) 89 + #define MT_HIGH_PRIORITY_2 MT_HIF(0x5c0) 90 + 91 + #define MT_PRIORITY_MASK MT_HIF(0x5c4) 92 + 93 + #define MT_RSV_MAX_THRESH MT_HIF(0x5c8) 94 + 95 + #define MT_PSE_BASE 0x8000 96 + #define MT_PSE(ofs) (MT_PSE_BASE + (ofs)) 97 + 98 + #define MT_MCU_DEBUG_RESET MT_PSE(0x16c) 99 + #define MT_MCU_DEBUG_RESET_PSE BIT(0) 100 + #define MT_MCU_DEBUG_RESET_PSE_S BIT(1) 101 + #define MT_MCU_DEBUG_RESET_QUEUES GENMASK(6, 2) 102 + 103 + #define MT_PSE_FC_P0 MT_PSE(0x120) 104 + #define MT_PSE_FC_P0_MIN_RESERVE GENMASK(11, 0) 105 + #define MT_PSE_FC_P0_MAX_QUOTA GENMASK(27, 16) 106 + 107 + #define MT_PSE_FRP MT_PSE(0x138) 108 + #define MT_PSE_FRP_P0 GENMASK(2, 0) 109 + #define MT_PSE_FRP_P1 GENMASK(5, 3) 110 + #define MT_PSE_FRP_P2_RQ0 GENMASK(8, 6) 111 + #define MT_PSE_FRP_P2_RQ1 GENMASK(11, 9) 112 + #define MT_PSE_FRP_P2_RQ2 GENMASK(14, 12) 113 + 114 + #define MT_FC_RSV_COUNT_0 MT_PSE(0x13c) 115 + #define MT_FC_RSV_COUNT_0_P0 GENMASK(11, 0) 116 + #define MT_FC_RSV_COUNT_0_P1 GENMASK(27, 16) 117 + 118 + #define MT_FC_SP2_Q0Q1 MT_PSE(0x14c) 119 + #define MT_FC_SP2_Q0Q1_SRC_COUNT_Q0 GENMASK(11, 0) 120 + #define MT_FC_SP2_Q0Q1_SRC_COUNT_Q1 GENMASK(27, 16) 121 + 122 + #define MT_PSE_FW_SHARED MT_PSE(0x17c) 123 + 124 + #define MT_PSE_RTA MT_PSE(0x194) 125 + #define MT_PSE_RTA_QUEUE_ID GENMASK(4, 0) 126 + #define MT_PSE_RTA_PORT_ID GENMASK(6, 5) 127 + #define MT_PSE_RTA_REDIRECT_EN BIT(7) 128 + #define MT_PSE_RTA_TAG_ID GENMASK(15, 8) 129 + #define MT_PSE_RTA_WRITE BIT(16) 130 + #define MT_PSE_RTA_BUSY BIT(31) 131 + 132 + #define MT_WF_PHY_BASE 0x10000 133 + #define MT_WF_PHY_OFFSET 0x1000 134 + #define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs)) 135 + 136 + #define MT_AGC_BASE MT_WF_PHY(0x500) 137 + #define MT_AGC(n) (MT_AGC_BASE + ((n) * 4)) 138 + 139 + #define MT_AGC1_BASE MT_WF_PHY(0x1500) 140 + #define MT_AGC1(n) (MT_AGC1_BASE + ((n) * 4)) 141 + 142 + #define MT_AGC_41_RSSI_0 GENMASK(23, 16) 143 + #define MT_AGC_41_RSSI_1 GENMASK(7, 0) 144 + 145 + #define MT_RXTD_BASE MT_WF_PHY(0x600) 146 + #define MT_RXTD(n) (MT_RXTD_BASE + ((n) * 4)) 147 + 148 + #define MT_RXTD_6_ACI_TH GENMASK(4, 0) 149 + #define MT_RXTD_6_CCAED_TH GENMASK(14, 8) 150 + 151 + #define MT_RXTD_8_LOWER_SIGNAL GENMASK(5, 0) 152 + 153 + #define MT_RXTD_13_ACI_TH_EN BIT(0) 154 + 155 + #define MT_WF_PHY_CR_TSSI_BASE MT_WF_PHY(0xd00) 156 + #define MT_WF_PHY_CR_TSSI(phy, n) (MT_WF_PHY_CR_TSSI_BASE + \ 157 + ((phy) * MT_WF_PHY_OFFSET) + \ 158 + ((n) * 4)) 159 + 160 + #define MT_PHYCTRL_BASE MT_WF_PHY(0x4100) 161 + #define MT_PHYCTRL(n) (MT_PHYCTRL_BASE + ((n) * 4)) 162 + 163 + #define MT_PHYCTRL_2_STATUS_RESET BIT(6) 164 + #define MT_PHYCTRL_2_STATUS_EN BIT(7) 165 + 166 + #define MT_PHYCTRL_STAT_PD MT_PHYCTRL(3) 167 + #define MT_PHYCTRL_STAT_PD_OFDM GENMASK(31, 16) 168 + #define MT_PHYCTRL_STAT_PD_CCK GENMASK(15, 0) 169 + 170 + #define MT_PHYCTRL_STAT_MDRDY MT_PHYCTRL(8) 171 + #define MT_PHYCTRL_STAT_MDRDY_OFDM GENMASK(31, 16) 172 + #define MT_PHYCTRL_STAT_MDRDY_CCK GENMASK(15, 0) 173 + 174 + #define MT_WF_AGG_BASE 0x21200 175 + #define MT_WF_AGG(ofs) (MT_WF_AGG_BASE + (ofs)) 176 + 177 + #define MT_AGG_ARCR MT_WF_AGG(0x010) 178 + #define MT_AGG_ARCR_INIT_RATE1 BIT(0) 179 + #define MT_AGG_ARCR_FB_SGI_DISABLE BIT(1) 180 + #define MT_AGG_ARCR_RATE8_DOWN_WRAP BIT(2) 181 + #define MT_AGG_ARCR_RTS_RATE_THR GENMASK(12, 8) 182 + #define MT_AGG_ARCR_RATE_DOWN_RATIO GENMASK(17, 16) 183 + #define MT_AGG_ARCR_RATE_DOWN_RATIO_EN BIT(19) 184 + #define MT_AGG_ARCR_RATE_UP_EXTRA_TH GENMASK(22, 20) 185 + #define MT_AGG_ARCR_SPE_DIS_TH GENMASK(27, 24) 186 + 187 + #define MT_AGG_ARUCR MT_WF_AGG(0x014) 188 + #define MT_AGG_ARDCR MT_WF_AGG(0x018) 189 + #define MT_AGG_ARxCR_LIMIT_SHIFT(_n) (4 * (_n)) 190 + #define MT_AGG_ARxCR_LIMIT(_n) GENMASK(2 + \ 191 + MT_AGG_ARxCR_LIMIT_SHIFT(_n), \ 192 + MT_AGG_ARxCR_LIMIT_SHIFT(_n)) 193 + 194 + #define MT_AGG_LIMIT MT_WF_AGG(0x040) 195 + #define MT_AGG_LIMIT_1 MT_WF_AGG(0x044) 196 + #define MT_AGG_LIMIT_AC(_n) GENMASK(((_n) + 1) * 8 - 1, (_n) * 8) 197 + 198 + #define MT_AGG_BA_SIZE_LIMIT_0 MT_WF_AGG(0x048) 199 + #define MT_AGG_BA_SIZE_LIMIT_1 MT_WF_AGG(0x04c) 200 + #define MT_AGG_BA_SIZE_LIMIT_SHIFT 8 201 + 202 + #define MT_AGG_PCR MT_WF_AGG(0x050) 203 + #define MT_AGG_PCR_MM BIT(16) 204 + #define MT_AGG_PCR_GF BIT(17) 205 + #define MT_AGG_PCR_BW40 BIT(18) 206 + #define MT_AGG_PCR_RIFS BIT(19) 207 + #define MT_AGG_PCR_BW80 BIT(20) 208 + #define MT_AGG_PCR_BW160 BIT(21) 209 + #define MT_AGG_PCR_ERP BIT(22) 210 + 211 + #define MT_AGG_PCR_RTS MT_WF_AGG(0x054) 212 + #define MT_AGG_PCR_RTS_THR GENMASK(19, 0) 213 + #define MT_AGG_PCR_RTS_PKT_THR GENMASK(31, 25) 214 + 215 + #define MT_AGG_CONTROL MT_WF_AGG(0x070) 216 + #define MT_AGG_CONTROL_NO_BA_RULE BIT(0) 217 + #define MT_AGG_CONTROL_NO_BA_AR_RULE BIT(1) 218 + #define MT_AGG_CONTROL_CFEND_SPE_EN BIT(3) 219 + #define MT_AGG_CONTROL_CFEND_RATE GENMASK(15, 4) 220 + #define MT_AGG_CONTROL_BAR_SPE_EN BIT(19) 221 + #define MT_AGG_CONTROL_BAR_RATE GENMASK(31, 20) 222 + 223 + #define MT_AGG_TMP MT_WF_AGG(0x0d8) 224 + 225 + #define MT_AGG_BWCR MT_WF_AGG(0x0ec) 226 + #define MT_AGG_BWCR_BW GENMASK(3, 2) 227 + 228 + #define MT_AGG_RETRY_CONTROL MT_WF_AGG(0x0f4) 229 + #define MT_AGG_RETRY_CONTROL_RTS_LIMIT GENMASK(11, 7) 230 + #define MT_AGG_RETRY_CONTROL_BAR_LIMIT GENMASK(15, 12) 231 + 232 + #define MT_WF_DMA_BASE 0x21c00 233 + #define MT_WF_DMA(ofs) (MT_WF_DMA_BASE + (ofs)) 234 + 235 + #define MT_DMA_DCR0 MT_WF_DMA(0x000) 236 + #define MT_DMA_DCR1 MT_WF_DMA(0x004) 237 + 238 + #define MT_DMA_FQCR0 MT_WF_DMA(0x008) 239 + #define MT_DMA_FQCR0_TARGET_WCID GENMASK(7, 0) 240 + #define MT_DMA_FQCR0_TARGET_BSS GENMASK(13, 8) 241 + #define MT_DMA_FQCR0_TARGET_QID GENMASK(20, 16) 242 + #define MT_DMA_FQCR0_DEST_PORT_ID GENMASK(23, 22) 243 + #define MT_DMA_FQCR0_DEST_QUEUE_ID GENMASK(28, 24) 244 + #define MT_DMA_FQCR0_MODE BIT(29) 245 + #define MT_DMA_FQCR0_STATUS BIT(30) 246 + #define MT_DMA_FQCR0_BUSY BIT(31) 247 + 248 + #define MT_DMA_RCFR0 MT_WF_DMA(0x070) 249 + #define MT_DMA_VCFR0 MT_WF_DMA(0x07c) 250 + 251 + #define MT_DMA_TCFR0 MT_WF_DMA(0x080) 252 + #define MT_DMA_TCFR1 MT_WF_DMA(0x084) 253 + #define MT_DMA_TCFR_TXS_AGGR_TIMEOUT GENMASK(27, 16) 254 + #define MT_DMA_TCFR_TXS_QUEUE BIT(14) 255 + #define MT_DMA_TCFR_TXS_AGGR_COUNT GENMASK(12, 8) 256 + #define MT_DMA_TCFR_TXS_BIT_MAP GENMASK(6, 0) 257 + 258 + #define MT_DMA_TMCFR0 MT_WF_DMA(0x088) 259 + 260 + #define MT_WF_ARB_BASE 0x21400 261 + #define MT_WF_ARB(ofs) (MT_WF_ARB_BASE + (ofs)) 262 + 263 + #define MT_WMM_AIFSN MT_WF_ARB(0x020) 264 + #define MT_WMM_AIFSN_MASK GENMASK(3, 0) 265 + #define MT_WMM_AIFSN_SHIFT(_n) ((_n) * 4) 266 + 267 + #define MT_WMM_CWMAX_BASE MT_WF_ARB(0x028) 268 + #define MT_WMM_CWMAX(_n) (MT_WMM_CWMAX_BASE + (((_n) / 2) << 2)) 269 + #define MT_WMM_CWMAX_SHIFT(_n) (((_n) & 1) * 16) 270 + #define MT_WMM_CWMAX_MASK GENMASK(15, 0) 271 + 272 + #define MT_WMM_CWMIN MT_WF_ARB(0x040) 273 + #define MT_WMM_CWMIN_MASK GENMASK(7, 0) 274 + #define MT_WMM_CWMIN_SHIFT(_n) ((_n) * 8) 275 + 276 + #define MT_WF_ARB_RQCR MT_WF_ARB(0x070) 277 + #define MT_WF_ARB_RQCR_RX_START BIT(0) 278 + #define MT_WF_ARB_RQCR_RXV_START BIT(4) 279 + #define MT_WF_ARB_RQCR_RXV_R_EN BIT(7) 280 + #define MT_WF_ARB_RQCR_RXV_T_EN BIT(8) 281 + 282 + #define MT_ARB_SCR MT_WF_ARB(0x080) 283 + #define MT_ARB_SCR_BCNQ_OPMODE_MASK GENMASK(1, 0) 284 + #define MT_ARB_SCR_BCNQ_OPMODE_SHIFT(n) ((n) * 2) 285 + #define MT_ARB_SCR_TX_DISABLE BIT(8) 286 + #define MT_ARB_SCR_RX_DISABLE BIT(9) 287 + #define MT_ARB_SCR_BCNQ_EMPTY_SKIP BIT(28) 288 + #define MT_ARB_SCR_TTTT_BTIM_PRIO BIT(29) 289 + #define MT_ARB_SCR_TBTT_BCN_PRIO BIT(30) 290 + #define MT_ARB_SCR_TBTT_BCAST_PRIO BIT(31) 291 + 292 + enum { 293 + MT_BCNQ_OPMODE_STA = 0, 294 + MT_BCNQ_OPMODE_AP = 1, 295 + MT_BCNQ_OPMODE_ADHOC = 2, 296 + }; 297 + 298 + #define MT_WF_ARB_TX_START_0 MT_WF_ARB(0x100) 299 + #define MT_WF_ARB_TX_START_1 MT_WF_ARB(0x104) 300 + #define MT_WF_ARB_TX_FLUSH_0 MT_WF_ARB(0x108) 301 + #define MT_WF_ARB_TX_FLUSH_1 MT_WF_ARB(0x10c) 302 + #define MT_WF_ARB_TX_STOP_0 MT_WF_ARB(0x110) 303 + #define MT_WF_ARB_TX_STOP_1 MT_WF_ARB(0x114) 304 + 305 + #define MT_WF_ARB_BCN_START MT_WF_ARB(0x118) 306 + #define MT_WF_ARB_BCN_START_BSSn(n) BIT(0 + (n)) 307 + #define MT_WF_ARB_BCN_START_T_PRE_TTTT BIT(10) 308 + #define MT_WF_ARB_BCN_START_T_TTTT BIT(11) 309 + #define MT_WF_ARB_BCN_START_T_PRE_TBTT BIT(12) 310 + #define MT_WF_ARB_BCN_START_T_TBTT BIT(13) 311 + #define MT_WF_ARB_BCN_START_T_SLOT_IDLE BIT(14) 312 + #define MT_WF_ARB_BCN_START_T_TX_START BIT(15) 313 + #define MT_WF_ARB_BCN_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) 314 + 315 + #define MT_WF_ARB_BCN_FLUSH MT_WF_ARB(0x11c) 316 + #define MT_WF_ARB_BCN_FLUSH_BSSn(n) BIT(0 + (n)) 317 + #define MT_WF_ARB_BCN_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) 318 + 319 + #define MT_WF_ARB_CAB_START MT_WF_ARB(0x120) 320 + #define MT_WF_ARB_CAB_START_BSSn(n) BIT(0 + (n)) 321 + #define MT_WF_ARB_CAB_START_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) 322 + 323 + #define MT_WF_ARB_CAB_FLUSH MT_WF_ARB(0x124) 324 + #define MT_WF_ARB_CAB_FLUSH_BSSn(n) BIT(0 + (n)) 325 + #define MT_WF_ARB_CAB_FLUSH_BSS0n(n) BIT((n) ? 16 + ((n) - 1) : 0) 326 + 327 + #define MT_WF_ARB_CAB_COUNT(n) MT_WF_ARB(0x128 + (n) * 4) 328 + #define MT_WF_ARB_CAB_COUNT_SHIFT 4 329 + #define MT_WF_ARB_CAB_COUNT_MASK GENMASK(3, 0) 330 + #define MT_WF_ARB_CAB_COUNT_B0_REG(n) MT_WF_ARB_CAB_COUNT(((n) > 12 ? 2 : \ 331 + ((n) > 4 ? 1 : 0))) 332 + #define MT_WF_ARB_CAB_COUNT_B0_SHIFT(n) (((n) > 12 ? (n) - 12 : \ 333 + ((n) > 4 ? (n) - 4 : \ 334 + (n) ? (n) + 3 : 0)) * 4) 335 + 336 + #define MT_TX_ABORT MT_WF_ARB(0x134) 337 + #define MT_TX_ABORT_EN BIT(0) 338 + #define MT_TX_ABORT_WCID GENMASK(15, 8) 339 + 340 + #define MT_WF_TMAC_BASE 0x21600 341 + #define MT_WF_TMAC(ofs) (MT_WF_TMAC_BASE + (ofs)) 342 + 343 + #define MT_TMAC_TCR MT_WF_TMAC(0x000) 344 + #define MT_TMAC_TCR_BLINK_SEL GENMASK(7, 6) 345 + #define MT_TMAC_TCR_PRE_RTS_GUARD GENMASK(11, 8) 346 + #define MT_TMAC_TCR_PRE_RTS_SEC_IDLE GENMASK(13, 12) 347 + #define MT_TMAC_TCR_RTS_SIGTA BIT(14) 348 + #define MT_TMAC_TCR_LDPC_OFS BIT(15) 349 + #define MT_TMAC_TCR_TX_STREAMS GENMASK(17, 16) 350 + #define MT_TMAC_TCR_SCH_IDLE_SEL GENMASK(19, 18) 351 + #define MT_TMAC_TCR_SCH_DET_PER_IOD BIT(20) 352 + #define MT_TMAC_TCR_DCH_DET_DISABLE BIT(21) 353 + #define MT_TMAC_TCR_TX_RIFS BIT(22) 354 + #define MT_TMAC_TCR_RX_RIFS_MODE BIT(23) 355 + #define MT_TMAC_TCR_TXOP_TBTT_CTL BIT(24) 356 + #define MT_TMAC_TCR_TBTT_TX_STOP_CTL BIT(25) 357 + #define MT_TMAC_TCR_TXOP_BURST_STOP BIT(26) 358 + #define MT_TMAC_TCR_RDG_RA_MODE BIT(27) 359 + #define MT_TMAC_TCR_RDG_RESP BIT(29) 360 + #define MT_TMAC_TCR_RDG_NO_PENDING BIT(30) 361 + #define MT_TMAC_TCR_SMOOTHING BIT(31) 362 + 363 + #define MT_WMM_TXOP_BASE MT_WF_TMAC(0x010) 364 + #define MT_WMM_TXOP(_n) (MT_WMM_TXOP_BASE + \ 365 + ((((_n) / 2) ^ 0x1) << 2)) 366 + #define MT_WMM_TXOP_SHIFT(_n) (((_n) & 1) * 16) 367 + #define MT_WMM_TXOP_MASK GENMASK(15, 0) 368 + 369 + #define MT_TIMEOUT_CCK MT_WF_TMAC(0x090) 370 + #define MT_TIMEOUT_OFDM MT_WF_TMAC(0x094) 371 + #define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0) 372 + #define MT_TIMEOUT_VAL_CCA GENMASK(31, 16) 373 + 374 + #define MT_TXREQ MT_WF_TMAC(0x09c) 375 + #define MT_TXREQ_CCA_SRC_SEL GENMASK(31, 30) 376 + 377 + #define MT_RXREQ MT_WF_TMAC(0x0a0) 378 + #define MT_RXREQ_DELAY GENMASK(8, 0) 379 + 380 + #define MT_IFS MT_WF_TMAC(0x0a4) 381 + #define MT_IFS_EIFS GENMASK(8, 0) 382 + #define MT_IFS_RIFS GENMASK(14, 10) 383 + #define MT_IFS_SIFS GENMASK(22, 16) 384 + #define MT_IFS_SLOT GENMASK(30, 24) 385 + 386 + #define MT_TMAC_PCR MT_WF_TMAC(0x0b4) 387 + #define MT_TMAC_PCR_RATE GENMASK(8, 0) 388 + #define MT_TMAC_PCR_RATE_FIXED BIT(15) 389 + #define MT_TMAC_PCR_ANT_ID GENMASK(21, 16) 390 + #define MT_TMAC_PCR_ANT_ID_SEL BIT(22) 391 + #define MT_TMAC_PCR_SPE_EN BIT(23) 392 + #define MT_TMAC_PCR_ANT_PRI GENMASK(26, 24) 393 + #define MT_TMAC_PCR_ANT_PRI_SEL GENMASK(27) 394 + 395 + #define MT_WF_RMAC_BASE 0x21800 396 + #define MT_WF_RMAC(ofs) (MT_WF_RMAC_BASE + (ofs)) 397 + 398 + #define MT_WF_RFCR MT_WF_RMAC(0x000) 399 + #define MT_WF_RFCR_DROP_STBC_MULTI BIT(0) 400 + #define MT_WF_RFCR_DROP_FCSFAIL BIT(1) 401 + #define MT_WF_RFCR_DROP_VERSION BIT(3) 402 + #define MT_WF_RFCR_DROP_PROBEREQ BIT(4) 403 + #define MT_WF_RFCR_DROP_MCAST BIT(5) 404 + #define MT_WF_RFCR_DROP_BCAST BIT(6) 405 + #define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7) 406 + #define MT_WF_RFCR_DROP_A3_MAC BIT(8) 407 + #define MT_WF_RFCR_DROP_A3_BSSID BIT(9) 408 + #define MT_WF_RFCR_DROP_A2_BSSID BIT(10) 409 + #define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11) 410 + #define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12) 411 + #define MT_WF_RFCR_DROP_CTL_RSV BIT(13) 412 + #define MT_WF_RFCR_DROP_CTS BIT(14) 413 + #define MT_WF_RFCR_DROP_RTS BIT(15) 414 + #define MT_WF_RFCR_DROP_DUPLICATE BIT(16) 415 + #define MT_WF_RFCR_DROP_OTHER_BSS BIT(17) 416 + #define MT_WF_RFCR_DROP_OTHER_UC BIT(18) 417 + #define MT_WF_RFCR_DROP_OTHER_TIM BIT(19) 418 + #define MT_WF_RFCR_DROP_NDPA BIT(20) 419 + #define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21) 420 + 421 + #define MT_BSSID0(idx) MT_WF_RMAC(0x004 + (idx) * 8) 422 + #define MT_BSSID1(idx) MT_WF_RMAC(0x008 + (idx) * 8) 423 + #define MT_BSSID1_VALID BIT(16) 424 + 425 + #define MT_MAC_ADDR0(idx) MT_WF_RMAC(0x024 + (idx) * 8) 426 + #define MT_MAC_ADDR1(idx) MT_WF_RMAC(0x028 + (idx) * 8) 427 + #define MT_MAC_ADDR1_ADDR GENMASK(15, 0) 428 + #define MT_MAC_ADDR1_VALID BIT(16) 429 + 430 + #define MT_BA_CONTROL_0 MT_WF_RMAC(0x068) 431 + #define MT_BA_CONTROL_1 MT_WF_RMAC(0x06c) 432 + #define MT_BA_CONTROL_1_ADDR GENMASK(15, 0) 433 + #define MT_BA_CONTROL_1_TID GENMASK(19, 16) 434 + #define MT_BA_CONTROL_1_IGNORE_TID BIT(20) 435 + #define MT_BA_CONTROL_1_IGNORE_ALL BIT(21) 436 + #define MT_BA_CONTROL_1_RESET BIT(22) 437 + 438 + #define MT_WF_RMACDR MT_WF_RMAC(0x078) 439 + #define MT_WF_RMACDR_TSF_PROBERSP_DIS BIT(0) 440 + #define MT_WF_RMACDR_TSF_TIM BIT(4) 441 + #define MT_WF_RMACDR_MBSSID_MASK GENMASK(25, 24) 442 + #define MT_WF_RMACDR_CHECK_HTC_BY_RATE BIT(26) 443 + #define MT_WF_RMACDR_MAXLEN_20BIT BIT(30) 444 + 445 + #define MT_WF_RMAC_RMCR MT_WF_RMAC(0x080) 446 + #define MT_WF_RMAC_RMCR_SMPS_MODE GENMASK(21, 20) 447 + #define MT_WF_RMAC_RMCR_RX_STREAMS GENMASK(24, 22) 448 + #define MT_WF_RMAC_RMCR_SMPS_RTS BIT(25) 449 + 450 + #define MT_WF_RMAC_CH_FREQ MT_WF_RMAC(0x090) 451 + #define MT_WF_RMAC_MAXMINLEN MT_WF_RMAC(0x098) 452 + #define MT_WF_RFCR1 MT_WF_RMAC(0x0a4) 453 + #define MT_WF_RMAC_TMR_PA MT_WF_RMAC(0x0e0) 454 + 455 + #define MT_WF_SEC_BASE 0x21a00 456 + #define MT_WF_SEC(ofs) (MT_WF_SEC_BASE + (ofs)) 457 + 458 + #define MT_SEC_SCR MT_WF_SEC(0x004) 459 + #define MT_SEC_SCR_MASK_ORDER GENMASK(1, 0) 460 + 461 + #define MT_WTBL_OFF_BASE 0x23000 462 + #define MT_WTBL_OFF(n) (MT_WTBL_OFF_BASE + (n)) 463 + 464 + #define MT_WTBL_UPDATE MT_WTBL_OFF(0x000) 465 + #define MT_WTBL_UPDATE_WLAN_IDX GENMASK(7, 0) 466 + #define MT_WTBL_UPDATE_WTBL2 BIT(11) 467 + #define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12) 468 + #define MT_WTBL_UPDATE_RATE_UPDATE BIT(13) 469 + #define MT_WTBL_UPDATE_TX_COUNT_CLEAR BIT(14) 470 + #define MT_WTBL_UPDATE_RX_COUNT_CLEAR BIT(15) 471 + #define MT_WTBL_UPDATE_BUSY BIT(16) 472 + 473 + #define MT_WTBL_RMVTCR MT_WTBL_OFF(0x008) 474 + #define MT_WTBL_RMVTCR_RX_MV_MODE BIT(23) 475 + 476 + #define MT_LPON_BASE 0x24000 477 + #define MT_LPON(n) (MT_LPON_BASE + (n)) 478 + 479 + #define MT_LPON_BTEIR MT_LPON(0x020) 480 + #define MT_LPON_BTEIR_MBSS_MODE GENMASK(31, 29) 481 + 482 + #define MT_PRE_TBTT MT_LPON(0x030) 483 + #define MT_PRE_TBTT_MASK GENMASK(7, 0) 484 + #define MT_PRE_TBTT_SHIFT 8 485 + 486 + #define MT_TBTT MT_LPON(0x034) 487 + #define MT_TBTT_PERIOD GENMASK(15, 0) 488 + #define MT_TBTT_DTIM_PERIOD GENMASK(23, 16) 489 + #define MT_TBTT_TBTT_WAKE_PERIOD GENMASK(27, 24) 490 + #define MT_TBTT_DTIM_WAKE_PERIOD GENMASK(30, 28) 491 + #define MT_TBTT_CAL_ENABLE BIT(31) 492 + 493 + #define MT_TBTT_TIMER_CFG MT_LPON(0x05c) 494 + 495 + #define MT_LPON_SBTOR(n) MT_LPON(0x0a0) 496 + #define MT_LPON_SBTOR_SUB_BSS_EN BIT(29) 497 + #define MT_LPON_SBTOR_TIME_OFFSET GENMASK(19, 0) 498 + 499 + #define MT_INT_WAKEUP_BASE 0x24400 500 + #define MT_INT_WAKEUP(n) (MT_INT_WAKEUP_BASE + (n)) 501 + 502 + #define MT_HW_INT_STATUS(n) MT_INT_WAKEUP(0x3c + (n) * 8) 503 + #define MT_HW_INT_MASK(n) MT_INT_WAKEUP(0x40 + (n) * 8) 504 + 505 + #define MT_HW_INT3_TBTT0 BIT(15) 506 + #define MT_HW_INT3_PRE_TBTT0 BIT(31) 507 + 508 + #define MT_WTBL1_BASE 0x28000 509 + 510 + #define MT_WTBL_ON_BASE (MT_WTBL1_BASE + 0x2000) 511 + #define MT_WTBL_ON(_n) (MT_WTBL_ON_BASE + (_n)) 512 + 513 + #define MT_WTBL_RIUCR0 MT_WTBL_ON(0x200) 514 + 515 + #define MT_WTBL_RIUCR1 MT_WTBL_ON(0x204) 516 + #define MT_WTBL_RIUCR1_RATE0 GENMASK(11, 0) 517 + #define MT_WTBL_RIUCR1_RATE1 GENMASK(23, 12) 518 + #define MT_WTBL_RIUCR1_RATE2_LO GENMASK(31, 24) 519 + 520 + #define MT_WTBL_RIUCR2 MT_WTBL_ON(0x208) 521 + #define MT_WTBL_RIUCR2_RATE2_HI GENMASK(3, 0) 522 + #define MT_WTBL_RIUCR2_RATE3 GENMASK(15, 4) 523 + #define MT_WTBL_RIUCR2_RATE4 GENMASK(27, 16) 524 + #define MT_WTBL_RIUCR2_RATE5_LO GENMASK(31, 28) 525 + 526 + #define MT_WTBL_RIUCR3 MT_WTBL_ON(0x20c) 527 + #define MT_WTBL_RIUCR3_RATE5_HI GENMASK(7, 0) 528 + #define MT_WTBL_RIUCR3_RATE6 GENMASK(19, 8) 529 + #define MT_WTBL_RIUCR3_RATE7 GENMASK(31, 20) 530 + 531 + #define MT_MIB_BASE 0x2c000 532 + #define MT_MIB(_n) (MT_MIB_BASE + (_n)) 533 + 534 + #define MT_MIB_CTL MT_MIB(0x00) 535 + #define MT_MIB_CTL_PSCCA_TIME GENMASK(13, 11) 536 + #define MT_MIB_CTL_CCA_NAV_TX GENMASK(16, 14) 537 + #define MT_MIB_CTL_ED_TIME GENMASK(30, 28) 538 + #define MT_MIB_CTL_READ_CLR_DIS BIT(31) 539 + 540 + #define MT_MIB_STAT(_n) MT_MIB(0x08 + (_n) * 4) 541 + 542 + #define MT_MIB_STAT_CCA MT_MIB_STAT(9) 543 + #define MT_MIB_STAT_CCA_MASK GENMASK(23, 0) 544 + 545 + #define MT_MIB_STAT_PSCCA MT_MIB_STAT(16) 546 + #define MT_MIB_STAT_PSCCA_MASK GENMASK(23, 0) 547 + 548 + #define MT_MIB_STAT_ED MT_MIB_STAT(18) 549 + #define MT_MIB_STAT_ED_MASK GENMASK(23, 0) 550 + 551 + #define MT_PCIE_REMAP_BASE_1 0x40000 552 + #define MT_PCIE_REMAP_BASE_2 0x80000 553 + 554 + #define MT_TX_HW_QUEUE_MGMT 4 555 + #define MT_TX_HW_QUEUE_MCU 5 556 + #define MT_TX_HW_QUEUE_BCN 7 557 + #define MT_TX_HW_QUEUE_BMC 8 558 + 559 + #define MT_LED_BASE_PHYS 0x80024000 560 + #define MT_LED_PHYS(_n) (MT_LED_BASE_PHYS + (_n)) 561 + 562 + #define MT_LED_CTRL MT_LED_PHYS(0x00) 563 + 564 + #define MT_LED_CTRL_REPLAY(_n) BIT(0 + (8 * (_n))) 565 + #define MT_LED_CTRL_POLARITY(_n) BIT(1 + (8 * (_n))) 566 + #define MT_LED_CTRL_TX_BLINK_MODE(_n) BIT(2 + (8 * (_n))) 567 + #define MT_LED_CTRL_TX_MANUAL_BLINK(_n) BIT(3 + (8 * (_n))) 568 + #define MT_LED_CTRL_TX_OVER_BLINK(_n) BIT(5 + (8 * (_n))) 569 + #define MT_LED_CTRL_KICK(_n) BIT(7 + (8 * (_n))) 570 + 571 + #define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x10 + ((_n) * 8)) 572 + #define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x14 + ((_n) * 8)) 573 + #define MT_LED_STATUS_OFF_MASK GENMASK(31, 24) 574 + #define MT_LED_STATUS_OFF(_v) (((_v) << \ 575 + __ffs(MT_LED_STATUS_OFF_MASK)) & \ 576 + MT_LED_STATUS_OFF_MASK) 577 + #define MT_LED_STATUS_ON_MASK GENMASK(23, 16) 578 + #define MT_LED_STATUS_ON(_v) (((_v) << \ 579 + __ffs(MT_LED_STATUS_ON_MASK)) & \ 580 + MT_LED_STATUS_ON_MASK) 581 + #define MT_LED_STATUS_DURATION_MASK GENMASK(15, 0) 582 + #define MT_LED_STATUS_DURATION(_v) (((_v) << \ 583 + __ffs(MT_LED_STATUS_DURATION_MASK)) &\ 584 + MT_LED_STATUS_DURATION_MASK) 585 + 586 + #define MT_CLIENT_BASE_PHYS_ADDR 0x800c0000 587 + 588 + #define MT_CLIENT_TMAC_INFO_TEMPLATE 0x040 589 + 590 + #define MT_CLIENT_STATUS 0x06c 591 + 592 + #define MT_CLIENT_RESET_TX 0x070 593 + #define MT_CLIENT_RESET_TX_R_E_1 BIT(16) 594 + #define MT_CLIENT_RESET_TX_R_E_2 BIT(17) 595 + #define MT_CLIENT_RESET_TX_R_E_1_S BIT(20) 596 + #define MT_CLIENT_RESET_TX_R_E_2_S BIT(21) 597 + 598 + #define MT_EFUSE_BASE 0x81070000 599 + 600 + #define MT_EFUSE_BASE_CTRL 0x000 601 + #define MT_EFUSE_BASE_CTRL_EMPTY BIT(30) 602 + 603 + #define MT_EFUSE_CTRL 0x008 604 + #define MT_EFUSE_CTRL_AOUT GENMASK(5, 0) 605 + #define MT_EFUSE_CTRL_MODE GENMASK(7, 6) 606 + #define MT_EFUSE_CTRL_LDO_OFF_TIME GENMASK(13, 8) 607 + #define MT_EFUSE_CTRL_LDO_ON_TIME GENMASK(15, 14) 608 + #define MT_EFUSE_CTRL_AIN GENMASK(25, 16) 609 + #define MT_EFUSE_CTRL_VALID BIT(29) 610 + #define MT_EFUSE_CTRL_KICK BIT(30) 611 + #define MT_EFUSE_CTRL_SEL BIT(31) 612 + 613 + #define MT_EFUSE_WDATA(_i) (0x010 + ((_i) * 4)) 614 + #define MT_EFUSE_RDATA(_i) (0x030 + ((_i) * 4)) 615 + 616 + #define MT_CLIENT_RXINF 0x068 617 + #define MT_CLIENT_RXINF_RXSH_GROUPS GENMASK(2, 0) 618 + 619 + #define MT_PSE_BASE_PHYS_ADDR 0xa0000000 620 + 621 + #define MT_PSE_WTBL_2_PHYS_ADDR 0xa5000000 622 + 623 + #define MT_WTBL1_SIZE (8 * 4) 624 + #define MT_WTBL2_SIZE (16 * 4) 625 + #define MT_WTBL3_OFFSET (MT7603_WTBL_SIZE * MT_WTBL2_SIZE) 626 + #define MT_WTBL3_SIZE (16 * 4) 627 + #define MT_WTBL4_OFFSET (MT7603_WTBL_SIZE * MT_WTBL3_SIZE + \ 628 + MT_WTBL3_OFFSET) 629 + #define MT_WTBL4_SIZE (8 * 4) 630 + 631 + #define MT_WTBL1_W0_ADDR_HI GENMASK(15, 0) 632 + #define MT_WTBL1_W0_MUAR_IDX GENMASK(21, 16) 633 + #define MT_WTBL1_W0_RX_CHECK_A1 BIT(22) 634 + #define MT_WTBL1_W0_KEY_IDX GENMASK(24, 23) 635 + #define MT_WTBL1_W0_RX_CHECK_KEY_IDX BIT(25) 636 + #define MT_WTBL1_W0_RX_KEY_VALID BIT(26) 637 + #define MT_WTBL1_W0_RX_IK_VALID BIT(27) 638 + #define MT_WTBL1_W0_RX_VALID BIT(28) 639 + #define MT_WTBL1_W0_RX_CHECK_A2 BIT(29) 640 + #define MT_WTBL1_W0_RX_DATA_VALID BIT(30) 641 + #define MT_WTBL1_W0_WRITE_BURST BIT(31) 642 + 643 + #define MT_WTBL1_W1_ADDR_LO GENMASK(31, 0) 644 + 645 + #define MT_WTBL1_W2_MPDU_DENSITY GENMASK(2, 0) 646 + #define MT_WTBL1_W2_KEY_TYPE GENMASK(6, 3) 647 + #define MT_WTBL1_W2_EVEN_PN BIT(7) 648 + #define MT_WTBL1_W2_TO_DS BIT(8) 649 + #define MT_WTBL1_W2_FROM_DS BIT(9) 650 + #define MT_WTBL1_W2_HEADER_TRANS BIT(10) 651 + #define MT_WTBL1_W2_AMPDU_FACTOR GENMASK(13, 11) 652 + #define MT_WTBL1_W2_PWR_MGMT BIT(14) 653 + #define MT_WTBL1_W2_RDG BIT(15) 654 + #define MT_WTBL1_W2_RTS BIT(16) 655 + #define MT_WTBL1_W2_CFACK BIT(17) 656 + #define MT_WTBL1_W2_RDG_BA BIT(18) 657 + #define MT_WTBL1_W2_SMPS BIT(19) 658 + #define MT_WTBL1_W2_TXS_BAF_REPORT BIT(20) 659 + #define MT_WTBL1_W2_DYN_BW BIT(21) 660 + #define MT_WTBL1_W2_LDPC BIT(22) 661 + #define MT_WTBL1_W2_ITXBF BIT(23) 662 + #define MT_WTBL1_W2_ETXBF BIT(24) 663 + #define MT_WTBL1_W2_TXOP_PS BIT(25) 664 + #define MT_WTBL1_W2_MESH BIT(26) 665 + #define MT_WTBL1_W2_QOS BIT(27) 666 + #define MT_WTBL1_W2_HT BIT(28) 667 + #define MT_WTBL1_W2_VHT BIT(29) 668 + #define MT_WTBL1_W2_ADMISSION_CONTROL BIT(30) 669 + #define MT_WTBL1_W2_GROUP_ID BIT(31) 670 + 671 + #define MT_WTBL1_W3_WTBL2_FRAME_ID GENMASK(10, 0) 672 + #define MT_WTBL1_W3_WTBL2_ENTRY_ID GENMASK(15, 11) 673 + #define MT_WTBL1_W3_WTBL4_FRAME_ID GENMASK(26, 16) 674 + #define MT_WTBL1_W3_CHECK_PER BIT(27) 675 + #define MT_WTBL1_W3_KEEP_I_PSM BIT(28) 676 + #define MT_WTBL1_W3_I_PSM BIT(29) 677 + #define MT_WTBL1_W3_POWER_SAVE BIT(30) 678 + #define MT_WTBL1_W3_SKIP_TX BIT(31) 679 + 680 + #define MT_WTBL1_W4_WTBL3_FRAME_ID GENMASK(10, 0) 681 + #define MT_WTBL1_W4_WTBL3_ENTRY_ID GENMASK(16, 11) 682 + #define MT_WTBL1_W4_WTBL4_ENTRY_ID GENMASK(22, 17) 683 + #define MT_WTBL1_W4_PARTIAL_AID GENMASK(31, 23) 684 + 685 + #define MT_WTBL2_W0_PN_LO GENMASK(31, 0) 686 + 687 + #define MT_WTBL2_W1_PN_HI GENMASK(15, 0) 688 + #define MT_WTBL2_W1_NON_QOS_SEQNO GENMASK(27, 16) 689 + 690 + #define MT_WTBL2_W2_TID0_SN GENMASK(11, 0) 691 + #define MT_WTBL2_W2_TID1_SN GENMASK(23, 12) 692 + #define MT_WTBL2_W2_TID2_SN_LO GENMASK(31, 24) 693 + 694 + #define MT_WTBL2_W3_TID2_SN_HI GENMASK(3, 0) 695 + #define MT_WTBL2_W3_TID3_SN GENMASK(15, 4) 696 + #define MT_WTBL2_W3_TID4_SN GENMASK(27, 16) 697 + #define MT_WTBL2_W3_TID5_SN_LO GENMASK(31, 28) 698 + 699 + #define MT_WTBL2_W4_TID5_SN_HI GENMASK(7, 0) 700 + #define MT_WTBL2_W4_TID6_SN GENMASK(19, 8) 701 + #define MT_WTBL2_W4_TID7_SN GENMASK(31, 20) 702 + 703 + #define MT_WTBL2_W5_TX_COUNT_RATE1 GENMASK(15, 0) 704 + #define MT_WTBL2_W5_FAIL_COUNT_RATE1 GENAMSK(31, 16) 705 + 706 + #define MT_WTBL2_W6_TX_COUNT_RATE2 GENMASK(7, 0) 707 + #define MT_WTBL2_W6_TX_COUNT_RATE3 GENMASK(15, 8) 708 + #define MT_WTBL2_W6_TX_COUNT_RATE4 GENMASK(23, 16) 709 + #define MT_WTBL2_W6_TX_COUNT_RATE5 GENMASK(31, 24) 710 + 711 + #define MT_WTBL2_W7_TX_COUNT_CUR_BW GENMASK(15, 0) 712 + #define MT_WTBL2_W7_FAIL_COUNT_CUR_BW GENMASK(31, 16) 713 + 714 + #define MT_WTBL2_W8_TX_COUNT_OTHER_BW GENMASK(15, 0) 715 + #define MT_WTBL2_W8_FAIL_COUNT_OTHER_BW GENMASK(31, 16) 716 + 717 + #define MT_WTBL2_W9_POWER_OFFSET GENMASK(4, 0) 718 + #define MT_WTBL2_W9_SPATIAL_EXT BIT(5) 719 + #define MT_WTBL2_W9_ANT_PRIORITY GENMASK(8, 6) 720 + #define MT_WTBL2_W9_CC_BW_SEL GENMASK(10, 9) 721 + #define MT_WTBL2_W9_CHANGE_BW_RATE GENMASK(13, 11) 722 + #define MT_WTBL2_W9_BW_CAP GENMASK(15, 14) 723 + #define MT_WTBL2_W9_SHORT_GI_20 BIT(16) 724 + #define MT_WTBL2_W9_SHORT_GI_40 BIT(17) 725 + #define MT_WTBL2_W9_SHORT_GI_80 BIT(18) 726 + #define MT_WTBL2_W9_SHORT_GI_160 BIT(19) 727 + #define MT_WTBL2_W9_MPDU_FAIL_COUNT GENMASK(25, 23) 728 + #define MT_WTBL2_W9_MPDU_OK_COUNT GENMASK(28, 26) 729 + #define MT_WTBL2_W9_RATE_IDX GENMASK(31, 29) 730 + 731 + #define MT_WTBL2_W10_RATE1 GENMASK(11, 0) 732 + #define MT_WTBL2_W10_RATE2 GENMASK(23, 12) 733 + #define MT_WTBL2_W10_RATE3_LO GENMASK(31, 24) 734 + 735 + #define MT_WTBL2_W11_RATE3_HI GENMASK(3, 0) 736 + #define MT_WTBL2_W11_RATE4 GENMASK(15, 4) 737 + #define MT_WTBL2_W11_RATE5 GENMASK(27, 16) 738 + #define MT_WTBL2_W11_RATE6_LO GENMASK(31, 28) 739 + 740 + #define MT_WTBL2_W12_RATE6_HI GENMASK(7, 0) 741 + #define MT_WTBL2_W12_RATE7 GENMASK(19, 8) 742 + #define MT_WTBL2_W12_RATE8 GENMASK(31, 20) 743 + 744 + #define MT_WTBL2_W13_AVG_RCPI0 GENMASK(7, 0) 745 + #define MT_WTBL2_W13_AVG_RCPI1 GENMASK(15, 8) 746 + #define MT_WTBL2_W13_AVG_RCPI2 GENAMSK(23, 16) 747 + 748 + #define MT_WTBL2_W14_CC_NOISE_1S GENMASK(6, 0) 749 + #define MT_WTBL2_W14_CC_NOISE_2S GENMASK(13, 7) 750 + #define MT_WTBL2_W14_CC_NOISE_3S GENMASK(20, 14) 751 + #define MT_WTBL2_W14_CHAN_EST_RMS GENMASK(24, 21) 752 + #define MT_WTBL2_W14_CC_NOISE_SEL BIT(15) 753 + #define MT_WTBL2_W14_ANT_SEL GENMASK(31, 26) 754 + 755 + #define MT_WTBL2_W15_BA_WIN_SIZE GENMASK(2, 0) 756 + #define MT_WTBL2_W15_BA_WIN_SIZE_SHIFT 3 757 + #define MT_WTBL2_W15_BA_EN_TIDS GENMASK(31, 24) 758 + 759 + #define MT_WTBL1_OR (MT_WTBL1_BASE + 0x2300) 760 + #define MT_WTBL1_OR_PSM_WRITE BIT(31) 761 + 762 + enum mt7603_cipher_type { 763 + MT_CIPHER_NONE, 764 + MT_CIPHER_WEP40, 765 + MT_CIPHER_TKIP, 766 + MT_CIPHER_TKIP_NO_MIC, 767 + MT_CIPHER_AES_CCMP, 768 + MT_CIPHER_WEP104, 769 + MT_CIPHER_BIP_CMAC_128, 770 + MT_CIPHER_WEP128, 771 + MT_CIPHER_WAPI, 772 + }; 773 + 774 + #endif
+85
drivers/net/wireless/mediatek/mt76/mt7603/soc.c
··· 1 + /* SPDX-License-Identifier: ISC */ 2 + 3 + #include <linux/kernel.h> 4 + #include <linux/module.h> 5 + #include <linux/platform_device.h> 6 + 7 + #include "mt7603.h" 8 + 9 + static int 10 + mt76_wmac_probe(struct platform_device *pdev) 11 + { 12 + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 13 + struct mt7603_dev *dev; 14 + void __iomem *mem_base; 15 + struct mt76_dev *mdev; 16 + int irq; 17 + int ret; 18 + 19 + irq = platform_get_irq(pdev, 0); 20 + if (irq < 0) { 21 + dev_err(&pdev->dev, "Failed to get device IRQ\n"); 22 + return irq; 23 + } 24 + 25 + mem_base = devm_ioremap_resource(&pdev->dev, res); 26 + if (!mem_base) { 27 + dev_err(&pdev->dev, "Failed to get memory resource\n"); 28 + return -EINVAL; 29 + } 30 + 31 + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, 32 + &mt7603_drv_ops); 33 + if (!mdev) 34 + return -ENOMEM; 35 + 36 + dev = container_of(mdev, struct mt7603_dev, mt76); 37 + mt76_mmio_init(mdev, mem_base); 38 + 39 + mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) | 40 + (mt76_rr(dev, MT_HW_REV) & 0xff); 41 + dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev); 42 + 43 + ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler, 44 + IRQF_SHARED, KBUILD_MODNAME, dev); 45 + if (ret) 46 + goto error; 47 + 48 + ret = mt7603_register_device(dev); 49 + if (ret) 50 + goto error; 51 + 52 + return 0; 53 + error: 54 + ieee80211_free_hw(mt76_hw(dev)); 55 + return ret; 56 + } 57 + 58 + static int 59 + mt76_wmac_remove(struct platform_device *pdev) 60 + { 61 + struct mt76_dev *mdev = platform_get_drvdata(pdev); 62 + struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76); 63 + 64 + mt7603_unregister_device(dev); 65 + 66 + return 0; 67 + } 68 + 69 + static const struct of_device_id of_wmac_match[] = { 70 + { .compatible = "mediatek,mt7628-wmac" }, 71 + {}, 72 + }; 73 + 74 + MODULE_DEVICE_TABLE(of, of_wmac_match); 75 + MODULE_FIRMWARE(MT7628_FIRMWARE_E1); 76 + MODULE_FIRMWARE(MT7628_FIRMWARE_E2); 77 + 78 + struct platform_driver mt76_wmac_driver = { 79 + .probe = mt76_wmac_probe, 80 + .remove = mt76_wmac_remove, 81 + .driver = { 82 + .name = "mt76_wmac", 83 + .of_match_table = of_wmac_match, 84 + }, 85 + };
+2
drivers/net/wireless/mediatek/mt76/mt76x0/init.c
··· 187 187 { 188 188 int i = 200, ok = 0; 189 189 190 + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 191 + 190 192 /* Page count on TxQ */ 191 193 while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || 192 194 (mt76_rr(dev, 0x0a30) & 0x000000ff) ||
+2
drivers/net/wireless/mediatek/mt76/mt76x0/main.c
··· 34 34 mt76_rr(dev, MT_CH_IDLE); 35 35 mt76_rr(dev, MT_CH_BUSY); 36 36 37 + mt76x02_edcca_init(dev, true); 38 + 37 39 if (mt76_is_mmio(dev)) { 38 40 mt76x02_dfs_init_params(dev); 39 41 tasklet_enable(&dev->pre_tbtt_tasklet);
+1 -5
drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
··· 1007 1007 1008 1008 /* enable vco */ 1009 1009 mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); 1010 - if (scan) { 1011 - mt76x02_edcca_init(dev, false); 1010 + if (scan) 1012 1011 return 0; 1013 - } 1014 1012 1015 1013 mt76x02_init_agc_gain(dev); 1016 1014 mt76x0_phy_calibrate(dev, false); 1017 1015 mt76x0_phy_set_txpower(dev); 1018 - 1019 - mt76x02_edcca_init(dev, true); 1020 1016 1021 1017 ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, 1022 1018 MT_CALIBRATE_INTERVAL);
-16
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
··· 79 79 clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); 80 80 mt76x0_chip_onoff(dev, false, false); 81 81 mt76u_queues_deinit(&dev->mt76); 82 - mt76u_mcu_deinit(&dev->mt76); 83 82 } 84 83 85 84 static void mt76x0u_mac_stop(struct mt76x02_dev *dev) ··· 189 190 int err; 190 191 191 192 err = mt76u_alloc_queues(&dev->mt76); 192 - if (err < 0) 193 - goto out_err; 194 - 195 - err = mt76u_mcu_init_rx(&dev->mt76); 196 193 if (err < 0) 197 194 goto out_err; 198 195 ··· 306 311 pm_message_t state) 307 312 { 308 313 struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); 309 - struct mt76_usb *usb = &dev->mt76.usb; 310 314 311 315 mt76u_stop_queues(&dev->mt76); 312 316 mt76x0u_mac_stop(dev); 313 317 clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); 314 318 mt76x0_chip_onoff(dev, false, false); 315 - usb_kill_urb(usb->mcu.res.urb); 316 319 317 320 return 0; 318 321 } ··· 320 327 struct mt76x02_dev *dev = usb_get_intfdata(usb_intf); 321 328 struct mt76_usb *usb = &dev->mt76.usb; 322 329 int ret; 323 - 324 - reinit_completion(&usb->mcu.cmpl); 325 - ret = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, 326 - MT_EP_IN_CMD_RESP, 327 - &usb->mcu.res, GFP_KERNEL, 328 - mt76u_mcu_complete_urb, 329 - &usb->mcu.cmpl); 330 - if (ret < 0) 331 - goto err; 332 330 333 331 ret = mt76u_submit_rx_buffers(&dev->mt76); 334 332 if (ret < 0)
+1
drivers/net/wireless/mediatek/mt76/mt76x02.h
··· 98 98 99 99 u32 tx_hang_reset; 100 100 u8 tx_hang_check; 101 + u8 mcu_timeout; 101 102 102 103 struct mt76x02_calibration cal; 103 104
+6 -3
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
··· 905 905 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 906 906 mt76_rmw(dev, MT_BBP(AGC, 2), GENMASK(15, 0), 907 907 ed_th << 8 | ed_th); 908 - if (!is_mt76x2(dev)) 909 - mt76_set(dev, MT_TXOP_HLDR_ET, 910 - MT_TXOP_HLDR_TX40M_BLK_EN); 908 + mt76_set(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); 911 909 } else { 912 910 mt76_set(dev, MT_TX_LINK_CFG, MT_TX_CFACK_EN); 913 911 mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 914 912 if (is_mt76x2(dev)) { 915 913 mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); 914 + mt76_set(dev, MT_TXOP_HLDR_ET, 915 + MT_TXOP_HLDR_TX40M_BLK_EN); 916 916 } else { 917 917 mt76_wr(dev, MT_BBP(AGC, 2), 0x003a6464); 918 918 mt76_clear(dev, MT_TXOP_HLDR_ET, ··· 1125 1125 tasklet_disable(&dev->pre_tbtt_tasklet); 1126 1126 else if (val) 1127 1127 skb = ieee80211_beacon_get(mt76_hw(dev), vif); 1128 + 1129 + if (!dev->beacon_mask) 1130 + dev->tbtt_count = 0; 1128 1131 1129 1132 __mt76x02_mac_set_beacon_enable(dev, vif_idx, val, skb); 1130 1133
+1
drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c
··· 61 61 "MCU message %d (seq %d) timed out\n", cmd, 62 62 seq); 63 63 ret = -ETIMEDOUT; 64 + dev->mcu_timeout = 1; 64 65 break; 65 66 } 66 67
+26 -16
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
··· 79 79 * Beacon timer drifts by 1us every tick, the timer is configured 80 80 * in 1/16 TU (64us) units. 81 81 */ 82 - if (dev->tbtt_count < 62) 82 + if (dev->tbtt_count < 63) 83 83 return; 84 - 85 - if (dev->tbtt_count >= 64) { 86 - dev->tbtt_count = 0; 87 - return; 88 - } 89 84 90 85 /* 91 86 * The updated beacon interval takes effect after two TBTT, because 92 87 * at this point the original interval has already been loaded into 93 88 * the next TBTT_TIMER value 94 89 */ 95 - if (dev->tbtt_count == 62) 90 + if (dev->tbtt_count == 63) 96 91 timer_val -= 1; 97 92 98 93 mt76_rmw_field(dev, MT_BEACON_TIME_CFG, 99 94 MT_BEACON_TIME_CFG_INTVAL, timer_val); 95 + 96 + if (dev->tbtt_count >= 64) { 97 + dev->tbtt_count = 0; 98 + return; 99 + } 100 100 } 101 101 102 102 static void mt76x02_pre_tbtt_tasklet(unsigned long arg) ··· 494 494 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) 495 495 { 496 496 if (mt76x02_tx_hang(dev)) { 497 - if (++dev->tx_hang_check < MT_TX_HANG_TH) 498 - return; 499 - 500 - mt76x02_watchdog_reset(dev); 501 - 502 - dev->tx_hang_reset++; 503 - dev->tx_hang_check = 0; 504 - memset(dev->mt76.tx_dma_idx, 0xff, 505 - sizeof(dev->mt76.tx_dma_idx)); 497 + if (++dev->tx_hang_check >= MT_TX_HANG_TH) 498 + goto restart; 506 499 } else { 507 500 dev->tx_hang_check = 0; 508 501 } 502 + 503 + if (dev->mcu_timeout) 504 + goto restart; 505 + 506 + return; 507 + 508 + restart: 509 + mt76x02_watchdog_reset(dev); 510 + 511 + mutex_lock(&dev->mt76.mmio.mcu.mutex); 512 + dev->mcu_timeout = 0; 513 + mutex_unlock(&dev->mt76.mmio.mcu.mutex); 514 + 515 + dev->tx_hang_reset++; 516 + dev->tx_hang_check = 0; 517 + memset(dev->mt76.tx_dma_idx, 0xff, 518 + sizeof(dev->mt76.tx_dma_idx)); 509 519 } 510 520 511 521 void mt76x02_wdt_work(struct work_struct *work)
+12 -24
drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
··· 61 61 static int mt76x02u_mcu_wait_resp(struct mt76_dev *dev, u8 seq) 62 62 { 63 63 struct mt76_usb *usb = &dev->usb; 64 - struct mt76u_buf *buf = &usb->mcu.res; 65 - struct urb *urb = buf->urb; 66 - u8 *data = buf->buf; 67 - int i, ret; 64 + u8 *data = usb->mcu.data; 65 + int i, len, ret; 68 66 u32 rxfce; 69 67 70 68 for (i = 0; i < 5; i++) { 71 - if (!wait_for_completion_timeout(&usb->mcu.cmpl, 72 - msecs_to_jiffies(300))) 69 + ret = mt76u_bulk_msg(dev, data, MCU_RESP_URB_SIZE, &len, 300); 70 + if (ret == -ETIMEDOUT) 73 71 continue; 74 - 75 - if (urb->status) 76 - return -EIO; 72 + if (ret) 73 + goto out; 77 74 78 75 if (usb->mcu.rp) 79 - mt76x02u_multiple_mcu_reads(dev, data + 4, 80 - urb->actual_length - 8); 76 + mt76x02u_multiple_mcu_reads(dev, data + 4, len - 8); 81 77 82 78 rxfce = get_unaligned_le32(data); 83 - ret = mt76u_submit_buf(dev, USB_DIR_IN, 84 - MT_EP_IN_CMD_RESP, 85 - buf, GFP_KERNEL, 86 - mt76u_mcu_complete_urb, 87 - &usb->mcu.cmpl); 88 - if (ret) 89 - return ret; 90 - 91 79 if (seq == FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce) && 92 80 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce) == EVT_CMD_DONE) 93 81 return 0; ··· 84 96 FIELD_GET(MT_RX_FCE_INFO_EVT_TYPE, rxfce), 85 97 seq, FIELD_GET(MT_RX_FCE_INFO_CMD_SEQ, rxfce)); 86 98 } 87 - 88 - dev_err(dev->dev, "error: %s timed out\n", __func__); 89 - return -ETIMEDOUT; 99 + out: 100 + dev_err(dev->dev, "error: %s failed with %d\n", __func__, ret); 101 + return ret; 90 102 } 91 103 92 104 static int ··· 114 126 if (ret) 115 127 return ret; 116 128 117 - ret = mt76u_bulk_msg(dev, skb->data, skb->len, 500); 129 + ret = mt76u_bulk_msg(dev, skb->data, skb->len, NULL, 500); 118 130 if (ret) 119 131 return ret; 120 132 ··· 259 271 260 272 data_len = MT_CMD_HDR_LEN + len + sizeof(info); 261 273 262 - err = mt76u_bulk_msg(&dev->mt76, data, data_len, 1000); 274 + err = mt76u_bulk_msg(&dev->mt76, data, data_len, NULL, 1000); 263 275 if (err) { 264 276 dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err); 265 277 return err;
+4 -5
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
··· 679 679 } 680 680 681 681 mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | 682 - MT_BEACON_TIME_CFG_SYNC_MODE | 683 682 MT_BEACON_TIME_CFG_TBTT_EN | 684 683 MT_BEACON_TIME_CFG_BEACON_TX)); 684 + mt76_set(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_SYNC_MODE); 685 685 mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); 686 686 687 687 for (i = 0; i < 8; i++) ··· 704 704 if (changed & BSS_CHANGED_BSSID) 705 705 mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid); 706 706 707 - if (changed & BSS_CHANGED_BEACON_ENABLED) 708 - mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon); 709 - 710 707 if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) 711 708 mt76x02_mac_set_tx_protection(dev, info->use_cts_prot, 712 709 info->ht_operation_mode); ··· 713 716 MT_BEACON_TIME_CFG_INTVAL, 714 717 info->beacon_int << 4); 715 718 dev->beacon_int = info->beacon_int; 716 - dev->tbtt_count = 0; 717 719 } 720 + 721 + if (changed & BSS_CHANGED_BEACON_ENABLED) 722 + mt76x02_mac_set_beacon_enable(dev, vif, info->enable_beacon); 718 723 719 724 if (changed & BSS_CHANGED_ERP_PREAMBLE) 720 725 mt76x02_mac_set_short_preamble(dev, info->use_short_preamble);
+3
drivers/net/wireless/mediatek/mt76/mt76x2/mac.c
··· 23 23 u32 rts_cfg; 24 24 int i; 25 25 26 + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 27 + mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); 28 + 26 29 mt76_wr(dev, MT_MAC_SYS_CTRL, 0); 27 30 28 31 rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG);
+7 -1
drivers/net/wireless/mediatek/mt76/mt76x2/mac.h
··· 25 25 26 26 int mt76x2_mac_start(struct mt76x02_dev *dev); 27 27 void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force); 28 - void mt76x2_mac_resume(struct mt76x02_dev *dev); 28 + 29 + static inline void mt76x2_mac_resume(struct mt76x02_dev *dev) 30 + { 31 + mt76_wr(dev, MT_MAC_SYS_CTRL, 32 + MT_MAC_SYS_CTRL_ENABLE_TX | 33 + MT_MAC_SYS_CTRL_ENABLE_RX); 34 + } 29 35 30 36 #endif
-1
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h
··· 35 35 void mt76x2u_stop_hw(struct mt76x02_dev *dev); 36 36 37 37 int mt76x2u_mac_reset(struct mt76x02_dev *dev); 38 - void mt76x2u_mac_resume(struct mt76x02_dev *dev); 39 38 int mt76x2u_mac_start(struct mt76x02_dev *dev); 40 39 int mt76x2u_mac_stop(struct mt76x02_dev *dev); 41 40
-7
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
··· 173 173 return 0; 174 174 } 175 175 176 - void mt76x2_mac_resume(struct mt76x02_dev *dev) 177 - { 178 - mt76_wr(dev, MT_MAC_SYS_CTRL, 179 - MT_MAC_SYS_CTRL_ENABLE_TX | 180 - MT_MAC_SYS_CTRL_ENABLE_RX); 181 - } 182 - 183 176 static void 184 177 mt76x2_power_on_rf_patch(struct mt76x02_dev *dev) 185 178 {
+2 -5
drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c
··· 74 74 mt76x2_mac_resume(dev); 75 75 76 76 mt76x2_apply_gain_adj(dev); 77 + mt76x02_edcca_init(dev, true); 77 78 78 79 dev->cal.channel_cal_done = true; 79 80 } ··· 241 240 mt76_wr(dev, MT_BBP(AGC, 2), 0x00007070); 242 241 mt76_wr(dev, MT_TXOP_CTRL_CFG, 0x04101B3F); 243 242 244 - if (scan) { 245 - mt76x02_edcca_init(dev, false); 243 + if (scan) 246 244 return 0; 247 - } 248 245 249 246 mt76x2_phy_channel_calibrate(dev, true); 250 247 mt76x02_init_agc_gain(dev); ··· 254 255 mt76_rmw_field(dev, MT_TX_ALC_CFG_2, MT_TX_ALC_CFG_2_TEMP_COMP, 255 256 0x38); 256 257 } 257 - 258 - mt76x02_edcca_init(dev, true); 259 258 260 259 ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, 261 260 MT_CALIBRATE_INTERVAL);
-11
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
··· 100 100 pm_message_t state) 101 101 { 102 102 struct mt76x02_dev *dev = usb_get_intfdata(intf); 103 - struct mt76_usb *usb = &dev->mt76.usb; 104 103 105 104 mt76u_stop_queues(&dev->mt76); 106 105 mt76x2u_stop_hw(dev); 107 - usb_kill_urb(usb->mcu.res.urb); 108 106 109 107 return 0; 110 108 } ··· 112 114 struct mt76x02_dev *dev = usb_get_intfdata(intf); 113 115 struct mt76_usb *usb = &dev->mt76.usb; 114 116 int err; 115 - 116 - reinit_completion(&usb->mcu.cmpl); 117 - err = mt76u_submit_buf(&dev->mt76, USB_DIR_IN, 118 - MT_EP_IN_CMD_RESP, 119 - &usb->mcu.res, GFP_KERNEL, 120 - mt76u_mcu_complete_urb, 121 - &usb->mcu.cmpl); 122 - if (err < 0) 123 - goto err; 124 117 125 118 err = mt76u_submit_rx_buffers(&dev->mt76); 126 119 if (err < 0)
-5
drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c
··· 214 214 if (err < 0) 215 215 goto fail; 216 216 217 - err = mt76u_mcu_init_rx(&dev->mt76); 218 - if (err < 0) 219 - goto fail; 220 - 221 217 err = mt76x2u_init_hardware(dev); 222 218 if (err < 0) 223 219 goto fail; ··· 255 259 mt76x02_mcu_set_radio_state(dev, false); 256 260 mt76x2u_stop_hw(dev); 257 261 mt76u_queues_deinit(&dev->mt76); 258 - mt76u_mcu_deinit(&dev->mt76); 259 262 }
+2 -11
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
··· 143 143 rts_cfg = mt76_rr(dev, MT_TX_RTS_CFG); 144 144 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg & ~MT_TX_RTS_CFG_RETRY_LIMIT); 145 145 146 - mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); 147 - mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); 146 + mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 147 + mt76_clear(dev, MT_TXOP_HLDR_ET, MT_TXOP_HLDR_TX40M_BLK_EN); 148 148 149 149 /* wait tx dma to stop */ 150 150 for (i = 0; i < 2000; i++) { ··· 210 210 mt76_wr(dev, MT_TX_RTS_CFG, rts_cfg); 211 211 212 212 return 0; 213 - } 214 - 215 - void mt76x2u_mac_resume(struct mt76x02_dev *dev) 216 - { 217 - mt76_wr(dev, MT_MAC_SYS_CTRL, 218 - MT_MAC_SYS_CTRL_ENABLE_TX | 219 - MT_MAC_SYS_CTRL_ENABLE_RX); 220 - mt76_set(dev, MT_TXOP_CTRL_CFG, BIT(20)); 221 - mt76_set(dev, MT_TXOP_HLDR_ET, BIT(1)); 222 213 }
+2 -3
drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
··· 57 57 58 58 mt76_set_channel(&dev->mt76); 59 59 60 - mt76_clear(dev, MT_TXOP_CTRL_CFG, BIT(20)); 61 - mt76_clear(dev, MT_TXOP_HLDR_ET, BIT(1)); 62 60 mt76x2_mac_stop(dev, false); 63 61 64 62 err = mt76x2u_phy_set_channel(dev, chandef); 65 63 66 - mt76x2u_mac_resume(dev); 64 + mt76x2_mac_resume(dev); 65 + mt76x02_edcca_init(dev, true); 67 66 68 67 clear_bit(MT76_RESET, &dev->mt76.state); 69 68 mt76_txq_schedule_all(&dev->mt76);
+2 -1
drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c
··· 43 43 mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0); 44 44 45 45 if (!mac_stopped) 46 - mt76x2u_mac_resume(dev); 46 + mt76x2_mac_resume(dev); 47 47 mt76x2_apply_gain_adj(dev); 48 + mt76x02_edcca_init(dev, true); 48 49 49 50 dev->cal.channel_cal_done = true; 50 51 }
+63 -91
drivers/net/wireless/mediatek/mt76/usb.c
··· 324 324 } 325 325 326 326 static int 327 - mt76u_buf_alloc_sg(struct mt76_dev *dev, struct mt76u_buf *buf, 328 - int nsgs, int len, int sglen, gfp_t gfp) 327 + mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, 328 + struct mt76u_buf *buf, int nsgs, gfp_t gfp) 329 329 { 330 - buf->urb = usb_alloc_urb(0, gfp); 331 - if (!buf->urb) 332 - return -ENOMEM; 333 - 334 - buf->urb->sg = devm_kcalloc(dev->dev, nsgs, sizeof(*buf->urb->sg), 335 - gfp); 336 - if (!buf->urb->sg) 337 - return -ENOMEM; 338 - 339 - sg_init_table(buf->urb->sg, nsgs); 340 - buf->dev = dev; 341 - 342 - return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen); 330 + if (dev->usb.sg_en) { 331 + return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size, 332 + SKB_WITH_OVERHEAD(q->buf_size)); 333 + } else { 334 + buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, gfp); 335 + return buf->buf ? 0 : -ENOMEM; 336 + } 343 337 } 344 338 345 - int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, 346 - int len, int data_len, gfp_t gfp) 339 + static int 340 + mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf) 347 341 { 348 342 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 349 343 350 - buf->urb = usb_alloc_urb(0, gfp); 344 + buf->len = SKB_WITH_OVERHEAD(q->buf_size); 345 + buf->dev = dev; 346 + 347 + buf->urb = usb_alloc_urb(0, GFP_KERNEL); 351 348 if (!buf->urb) 352 349 return -ENOMEM; 353 350 354 - buf->buf = page_frag_alloc(&q->rx_page, len, gfp); 355 - if (!buf->buf) 356 - return -ENOMEM; 351 + if (dev->usb.sg_en) { 352 + buf->urb->sg = devm_kcalloc(dev->dev, MT_SG_MAX_SIZE, 353 + sizeof(*buf->urb->sg), 354 + GFP_KERNEL); 355 + if (!buf->urb->sg) 356 + return -ENOMEM; 357 357 358 - buf->len = data_len; 359 - buf->dev = dev; 358 + sg_init_table(buf->urb->sg, MT_SG_MAX_SIZE); 359 + } 360 360 361 - return 0; 361 + return mt76u_refill_rx(dev, q, buf, MT_SG_MAX_SIZE, GFP_KERNEL); 362 362 } 363 363 364 - void mt76u_buf_free(struct mt76u_buf *buf) 364 + static void mt76u_buf_free(struct mt76u_buf *buf) 365 365 { 366 366 struct urb *urb = buf->urb; 367 - struct scatterlist *sg; 368 367 int i; 369 368 370 - for (i = 0; i < urb->num_sgs; i++) { 371 - sg = &urb->sg[i]; 372 - if (!sg) 373 - continue; 369 + for (i = 0; i < urb->num_sgs; i++) 370 + skb_free_frag(sg_virt(&urb->sg[i])); 374 371 375 - skb_free_frag(sg_virt(sg)); 376 - } 377 372 if (buf->buf) 378 373 skb_free_frag(buf->buf); 379 374 380 375 usb_free_urb(buf->urb); 381 376 } 382 - EXPORT_SYMBOL_GPL(mt76u_buf_free); 383 377 384 - int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, 385 - struct mt76u_buf *buf, gfp_t gfp, 386 - usb_complete_t complete_fn, void *context) 378 + static void 379 + mt76u_fill_bulk_urb(struct mt76_dev *dev, int dir, int index, 380 + struct mt76u_buf *buf, usb_complete_t complete_fn, 381 + void *context) 387 382 { 388 383 struct usb_interface *intf = to_usb_interface(dev->dev); 389 384 struct usb_device *udev = interface_to_usbdev(intf); ··· 392 397 393 398 usb_fill_bulk_urb(buf->urb, udev, pipe, data, buf->len, 394 399 complete_fn, context); 400 + } 401 + 402 + static int 403 + mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, 404 + struct mt76u_buf *buf, gfp_t gfp, 405 + usb_complete_t complete_fn, void *context) 406 + { 407 + mt76u_fill_bulk_urb(dev, dir, index, buf, complete_fn, 408 + context); 395 409 trace_submit_urb(dev, buf->urb); 396 410 397 411 return usb_submit_urb(buf->urb, gfp); 398 412 } 399 - EXPORT_SYMBOL_GPL(mt76u_submit_buf); 400 413 401 414 static inline struct mt76u_buf 402 415 *mt76u_get_next_rx_entry(struct mt76_queue *q) ··· 467 464 __skb_put(skb, data_len); 468 465 len -= data_len; 469 466 470 - while (len > 0 && urb->num_sgs) { 467 + while (len > 0 && nsgs < urb->num_sgs) { 471 468 data_len = min_t(int, len, urb->sg[nsgs].length); 472 469 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 473 470 sg_page(&urb->sg[nsgs]), ··· 513 510 spin_unlock_irqrestore(&q->lock, flags); 514 511 } 515 512 516 - static int 517 - mt76u_refill_rx(struct mt76_dev *dev, struct mt76_queue *q, 518 - struct mt76u_buf *buf, int nsgs) 519 - { 520 - if (dev->usb.sg_en) { 521 - return mt76u_fill_rx_sg(dev, buf, nsgs, q->buf_size, 522 - SKB_WITH_OVERHEAD(q->buf_size)); 523 - } else { 524 - buf->buf = page_frag_alloc(&q->rx_page, q->buf_size, 525 - GFP_ATOMIC); 526 - return buf->buf ? 0 : -ENOMEM; 527 - } 528 - } 529 - 530 513 static void mt76u_rx_tasklet(unsigned long data) 531 514 { 532 515 struct mt76_dev *dev = (struct mt76_dev *)data; ··· 529 540 530 541 count = mt76u_process_rx_entry(dev, buf); 531 542 if (count > 0) { 532 - err = mt76u_refill_rx(dev, q, buf, count); 543 + err = mt76u_refill_rx(dev, q, buf, count, 544 + GFP_ATOMIC); 533 545 if (err < 0) 534 546 break; 535 547 } ··· 567 577 568 578 static int mt76u_alloc_rx(struct mt76_dev *dev) 569 579 { 580 + struct mt76_usb *usb = &dev->usb; 570 581 struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; 571 582 int i, err; 583 + 584 + usb->mcu.data = devm_kmalloc(dev->dev, MCU_RESP_URB_SIZE, GFP_KERNEL); 585 + if (!usb->mcu.data) 586 + return -ENOMEM; 572 587 573 588 spin_lock_init(&q->rx_page_lock); 574 589 spin_lock_init(&q->lock); ··· 586 591 q->buf_size = dev->usb.sg_en ? MT_RX_BUF_SIZE : PAGE_SIZE; 587 592 q->ndesc = MT_NUM_RX_ENTRIES; 588 593 for (i = 0; i < q->ndesc; i++) { 589 - if (dev->usb.sg_en) 590 - err = mt76u_buf_alloc_sg(dev, &q->entry[i].ubuf, 591 - MT_SG_MAX_SIZE, q->buf_size, 592 - SKB_WITH_OVERHEAD(q->buf_size), 593 - GFP_KERNEL); 594 - else 595 - err = mt76u_buf_alloc(dev, &q->entry[i].ubuf, 596 - q->buf_size, 597 - SKB_WITH_OVERHEAD(q->buf_size), 598 - GFP_KERNEL); 594 + err = mt76u_buf_alloc(dev, &q->entry[i].ubuf); 599 595 if (err < 0) 600 596 return err; 601 597 } ··· 710 724 } 711 725 712 726 static int 713 - mt76u_tx_build_sg(struct sk_buff *skb, struct urb *urb) 727 + mt76u_tx_build_sg(struct mt76_dev *dev, struct sk_buff *skb, 728 + struct urb *urb) 714 729 { 715 - int nsgs = 1 + skb_shinfo(skb)->nr_frags; 716 - struct sk_buff *iter; 730 + if (!dev->usb.sg_en) 731 + return 0; 717 732 718 - skb_walk_frags(skb, iter) 719 - nsgs += 1 + skb_shinfo(iter)->nr_frags; 720 - 721 - memset(urb->sg, 0, sizeof(*urb->sg) * MT_SG_MAX_SIZE); 722 - 723 - nsgs = min_t(int, MT_SG_MAX_SIZE, nsgs); 724 - sg_init_marker(urb->sg, nsgs); 725 - urb->num_sgs = nsgs; 726 - 727 - return skb_to_sgvec_nomark(skb, urb->sg, 0, skb->len); 733 + sg_init_table(urb->sg, MT_SG_MAX_SIZE); 734 + urb->num_sgs = skb_to_sgvec(skb, urb->sg, 0, skb->len); 735 + return urb->num_sgs; 728 736 } 729 737 730 738 static int ··· 726 746 struct sk_buff *skb, struct mt76_wcid *wcid, 727 747 struct ieee80211_sta *sta) 728 748 { 729 - struct usb_interface *intf = to_usb_interface(dev->dev); 730 - struct usb_device *udev = interface_to_usbdev(intf); 731 - u8 *data = NULL, ep = q2ep(q->hw_idx); 732 749 struct mt76u_buf *buf; 733 750 u16 idx = q->tail; 734 - unsigned int pipe; 735 751 int err; 736 752 737 753 if (q->queued == q->ndesc) ··· 739 763 return err; 740 764 741 765 buf = &q->entry[idx].ubuf; 766 + buf->buf = skb->data; 767 + buf->len = skb->len; 742 768 buf->done = false; 743 769 744 - if (dev->usb.sg_en) { 745 - err = mt76u_tx_build_sg(skb, buf->urb); 746 - if (err < 0) 747 - return err; 748 - } else { 749 - data = skb->data; 750 - } 770 + err = mt76u_tx_build_sg(dev, skb, buf->urb); 771 + if (err < 0) 772 + return err; 751 773 752 - pipe = usb_sndbulkpipe(udev, dev->usb.out_ep[ep]); 753 - usb_fill_bulk_urb(buf->urb, udev, pipe, data, skb->len, 754 - mt76u_complete_tx, buf); 774 + mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), 775 + buf, mt76u_complete_tx, buf); 755 776 756 777 q->tail = (q->tail + 1) % q->ndesc; 757 778 q->entry[idx].skb = skb; ··· 906 933 INIT_DELAYED_WORK(&usb->stat_work, mt76u_tx_status_data); 907 934 skb_queue_head_init(&dev->rx_skb[MT_RXQ_MAIN]); 908 935 909 - init_completion(&usb->mcu.cmpl); 910 936 mutex_init(&usb->mcu.mutex); 911 937 912 938 mutex_init(&usb->usb_ctrl_mtx);
-57
drivers/net/wireless/mediatek/mt76/usb_mcu.c
··· 1 - /* 2 - * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> 3 - * 4 - * Permission to use, copy, modify, and/or distribute this software for any 5 - * purpose with or without fee is hereby granted, provided that the above 6 - * copyright notice and this permission notice appear in all copies. 7 - * 8 - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 9 - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 10 - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 11 - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 12 - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 13 - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 14 - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 - */ 16 - 17 - #include "mt76.h" 18 - 19 - void mt76u_mcu_complete_urb(struct urb *urb) 20 - { 21 - struct completion *cmpl = urb->context; 22 - 23 - complete(cmpl); 24 - } 25 - EXPORT_SYMBOL_GPL(mt76u_mcu_complete_urb); 26 - 27 - int mt76u_mcu_init_rx(struct mt76_dev *dev) 28 - { 29 - struct mt76_usb *usb = &dev->usb; 30 - int err; 31 - 32 - err = mt76u_buf_alloc(dev, &usb->mcu.res, MCU_RESP_URB_SIZE, 33 - MCU_RESP_URB_SIZE, GFP_KERNEL); 34 - if (err < 0) 35 - return err; 36 - 37 - err = mt76u_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, 38 - &usb->mcu.res, GFP_KERNEL, 39 - mt76u_mcu_complete_urb, 40 - &usb->mcu.cmpl); 41 - if (err < 0) 42 - mt76u_buf_free(&usb->mcu.res); 43 - 44 - return err; 45 - } 46 - EXPORT_SYMBOL_GPL(mt76u_mcu_init_rx); 47 - 48 - void mt76u_mcu_deinit(struct mt76_dev *dev) 49 - { 50 - struct mt76u_buf *buf = &dev->usb.mcu.res; 51 - 52 - if (buf->urb) { 53 - usb_kill_urb(buf->urb); 54 - mt76u_buf_free(buf); 55 - } 56 - } 57 - EXPORT_SYMBOL_GPL(mt76u_mcu_deinit);
+5 -2
drivers/net/wireless/realtek/rtlwifi/base.c
··· 430 430 SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr); 431 431 } else { 432 432 u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 }; 433 + 433 434 get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1); 434 435 SET_IEEE80211_PERM_ADDR(hw, rtlmac1); 435 436 } ··· 460 459 (void *)rtl_fwevt_wq_callback); 461 460 INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, 462 461 (void *)rtl_c2hcmd_wq_callback); 463 - 464 462 } 465 463 466 464 void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq) ··· 618 618 u8 rate_flag = info->control.rates[0].flags; 619 619 u8 sgi_40 = 0, sgi_20 = 0, bw_40 = 0; 620 620 u8 sgi_80 = 0, bw_80 = 0; 621 + 621 622 tcb_desc->use_shortgi = false; 622 623 623 624 if (sta == NULL) ··· 1851 1850 1852 1851 return 0; 1853 1852 } 1853 + 1854 1854 int rtl_tx_agg_oper(struct ieee80211_hw *hw, 1855 1855 struct ieee80211_sta *sta, u16 tid) 1856 1856 { ··· 2075 2073 * busytraffic we don't change channel 2076 2074 */ 2077 2075 if (mac->link_state >= MAC80211_LINKED) { 2078 - 2079 2076 /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */ 2080 2077 for (idx = 0; idx <= 2; idx++) { 2081 2078 rtlpriv->link_info.num_rx_in4period[idx] = ··· 2234 2233 mod_timer(&rtlpriv->works.watchdog_timer, 2235 2234 jiffies + MSECS(RTL_WATCH_DOG_TIME)); 2236 2235 } 2236 + 2237 2237 void rtl_fwevt_wq_callback(void *data) 2238 2238 { 2239 2239 struct rtl_works *rtlworks = ··· 2386 2384 2387 2385 rtlpriv->cfg->ops->dualmac_easy_concurrent(hw); 2388 2386 } 2387 + 2389 2388 /********************************************************* 2390 2389 * 2391 2390 * frame process functions
+8
drivers/net/wireless/realtek/rtlwifi/core.c
··· 188 188 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 189 189 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 190 190 struct rtl_tcb_desc tcb_desc; 191 + 191 192 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 192 193 193 194 if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON)) ··· 347 346 348 347 mutex_unlock(&rtlpriv->locks.conf_mutex); 349 348 } 349 + 350 350 static int rtl_op_change_interface(struct ieee80211_hw *hw, 351 351 struct ieee80211_vif *vif, 352 352 enum nl80211_iftype new_type, bool p2p) 353 353 { 354 354 struct rtl_priv *rtlpriv = rtl_priv(hw); 355 355 int ret; 356 + 356 357 rtl_op_remove_interface(hw, vif); 357 358 358 359 vif->type = new_type; ··· 884 881 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, 885 882 (u8 *)(&mac->rx_conf)); 886 883 } 884 + 887 885 static int rtl_op_sta_add(struct ieee80211_hw *hw, 888 886 struct ieee80211_vif *vif, 889 887 struct ieee80211_sta *sta) ··· 937 933 { 938 934 struct rtl_priv *rtlpriv = rtl_priv(hw); 939 935 struct rtl_sta_info *sta_entry; 936 + 940 937 if (sta) { 941 938 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG, 942 939 "Remove sta addr is %pM\n", sta->addr); ··· 950 945 } 951 946 return 0; 952 947 } 948 + 953 949 static int _rtl_get_hal_qnum(u16 queue) 954 950 { 955 951 int qnum; ··· 1072 1066 /*TODO: reference to enum ieee80211_bss_change */ 1073 1067 if (changed & BSS_CHANGED_ASSOC) { 1074 1068 u8 mstatus; 1069 + 1075 1070 if (bss_conf->assoc) { 1076 1071 struct ieee80211_sta *sta = NULL; 1077 1072 u8 keep_alive = 10; ··· 1301 1294 * set in sta_add, and will be NULL here */ 1302 1295 if (vif->type == NL80211_IFTYPE_STATION) { 1303 1296 struct rtl_sta_info *sta_entry; 1297 + 1304 1298 sta_entry = (struct rtl_sta_info *)sta->drv_priv; 1305 1299 sta_entry->wireless_mode = mac->mode; 1306 1300 }
+5 -3
drivers/net/wireless/realtek/rtlwifi/efuse.c
··· 474 474 475 475 if (word_en != 0x0F) { 476 476 u8 tmpdata[8]; 477 + 477 478 memcpy(tmpdata, 478 479 &rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base], 479 480 8); ··· 488 487 break; 489 488 } 490 489 } 491 - 492 490 } 493 491 494 492 efuse_power_switch(hw, true, false); ··· 662 662 static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse) 663 663 { 664 664 struct rtl_priv *rtlpriv = rtl_priv(hw); 665 + 665 666 efuse_power_switch(hw, false, true); 666 667 read_efuse(hw, 0, rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE], efuse); 667 668 efuse_power_switch(hw, false, false); ··· 813 812 if (0x0F != (badworden & 0x0F)) { 814 813 u8 reorg_offset = offset; 815 814 u8 reorg_worden = badworden; 815 + 816 816 efuse_pg_packet_write(hw, reorg_offset, 817 817 reorg_worden, 818 818 originaldata); ··· 903 901 if (0x0F != (badworden & 0x0F)) { 904 902 u8 reorg_offset = tmp_pkt.offset; 905 903 u8 reorg_worden = badworden; 904 + 906 905 efuse_pg_packet_write(hw, reorg_offset, 907 906 reorg_worden, 908 907 originaldata); ··· 960 957 961 958 while (continual && (efuse_addr < (EFUSE_MAX_SIZE - 962 959 rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))) { 963 - 964 960 if (write_state == PG_STATE_HEADER) { 965 961 dataempty = true; 966 962 badworden = 0x0F; ··· 1116 1114 u16 tmpv16; 1117 1115 1118 1116 if (pwrstate && (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)) { 1119 - 1120 1117 if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192CE && 1121 1118 rtlhal->hw_type != HARDWARE_TYPE_RTL8192DE) { 1122 1119 rtl_write_byte(rtlpriv, ··· 1220 1219 static u8 efuse_calculate_word_cnts(u8 word_en) 1221 1220 { 1222 1221 u8 word_cnts = 0; 1222 + 1223 1223 if (!(word_en & BIT(0))) 1224 1224 word_cnts++; 1225 1225 if (!(word_en & BIT(1)))
+1
drivers/net/wireless/realtek/rtlwifi/ps.c
··· 718 718 static u8 p2p_oui_ie_type[4] = {0x50, 0x6f, 0x9a, 0x09}; 719 719 u8 noa_num, index , i, noa_index = 0; 720 720 bool find_p2p_ie = false , find_p2p_ps_ie = false; 721 + 721 722 pos = (u8 *)mgmt->u.beacon.variable; 722 723 end = data + len; 723 724 ie = NULL;
+2
drivers/net/wireless/realtek/rtlwifi/rc.c
··· 236 236 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 237 237 if (ieee80211_is_data_qos(fc)) { 238 238 u8 tid = rtl_get_tid(skb); 239 + 239 240 if (_rtl_tx_aggr_check(rtlpriv, sta_entry, 240 241 tid)) { 241 242 sta_entry->tids[tid].agg.agg_state = ··· 294 293 struct ieee80211_sta *sta, void *priv_sta) 295 294 { 296 295 struct rtl_rate_priv *rate_priv = priv_sta; 296 + 297 297 kfree(rate_priv); 298 298 } 299 299
-1
drivers/net/wireless/realtek/rtlwifi/regd.c
··· 41 41 NL80211_RRF_PASSIVE_SCAN | \ 42 42 NL80211_RRF_NO_OFDM) 43 43 44 - 45 44 /* 5G chan 36 - chan 64*/ 46 45 #define RTL819x_5GHZ_5150_5350 \ 47 46 REG_RULE(5150-10, 5350+10, 80, 0, 30, 0)
+2 -4
drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
··· 425 425 if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate || 426 426 dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT || 427 427 dm_digtable->cursta_cstate == DIG_STA_CONNECT) { 428 - 429 428 if (dm_digtable->cursta_cstate != DIG_STA_DISCONNECT) { 430 429 dm_digtable->rssi_val_min = 431 430 rtl92c_dm_initial_gain_min_pwdb(hw); ··· 503 504 rtl92c_dm_cck_packet_detection_thresh(hw); 504 505 505 506 dm_digtable->presta_cstate = dm_digtable->cursta_cstate; 506 - 507 507 } 508 508 509 509 static void rtl92c_dm_dig(struct ieee80211_hw *hw) ··· 605 607 void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw) 606 608 { 607 609 struct rtl_priv *rtlpriv = rtl_priv(hw); 610 + 608 611 rtlpriv->dm.current_turbo_edca = false; 609 612 rtlpriv->dm.is_any_nonbepkts = false; 610 613 rtlpriv->dm.is_cur_rdlstate = false; ··· 659 660 660 661 if ((bt_change_edca) || ((!rtlpriv->dm.is_any_nonbepkts) && 661 662 (!rtlpriv->dm.disable_framebursting))) { 662 - 663 663 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; 664 664 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt; 665 665 ··· 683 685 } else { 684 686 if (rtlpriv->dm.current_turbo_edca) { 685 687 u8 tmp = AC0_BE; 688 + 686 689 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM, 687 690 &tmp); 688 691 rtlpriv->dm.current_turbo_edca = false; ··· 1633 1634 static void rtl92c_bt_ant_isolation(struct ieee80211_hw *hw, u8 tmp1byte) 1634 1635 { 1635 1636 struct rtl_priv *rtlpriv = rtl_priv(hw); 1636 - 1637 1637 1638 1638 /* Only enable HW BT coexist when BT in "Busy" state. */ 1639 1639 if (rtlpriv->mac80211.vendor == PEER_CISCO &&
+2 -3
drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
··· 18 18 19 19 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) { 20 20 u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL); 21 + 21 22 if (enable) 22 23 value32 |= MCUFWDL_EN; 23 24 else ··· 26 25 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32); 27 26 } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) { 28 27 u8 tmp; 29 - if (enable) { 30 28 29 + if (enable) { 31 30 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); 32 31 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 33 32 tmp | 0x04); ··· 38 37 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2); 39 38 rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7); 40 39 } else { 41 - 42 40 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL); 43 41 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe); 44 42 ··· 621 621 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, 622 622 "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", 623 623 u1rsvdpageloc, 3); 624 - 625 624 626 625 skb = dev_alloc_skb(totalpacketlen); 627 626 skb_put_data(skb, &reserved_page_packet, totalpacketlen);
-1
drivers/net/wireless/realtek/rtlwifi/rtl8192c/main.c
··· 4 4 #include "../wifi.h" 5 5 #include <linux/module.h> 6 6 7 - 8 7 MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>"); 9 8 MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); 10 9 MODULE_AUTHOR("Georgia <georgia@realtek.com>");
+1
drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
··· 747 747 struct rtl_priv *rtlpriv = rtl_priv(hw); 748 748 struct rtl_phy *rtlphy = &(rtlpriv->phy); 749 749 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 750 + 750 751 if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version)) { 751 752 if (channel == 6 && 752 753 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+6 -1
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
··· 144 144 case HW_VAR_BASIC_RATE:{ 145 145 u16 rate_cfg = ((u16 *) val)[0]; 146 146 u8 rate_index = 0; 147 + 147 148 rate_cfg &= 0x15f; 148 149 rate_cfg |= 0x01; 149 150 rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff); ··· 198 197 case HW_VAR_ACK_PREAMBLE:{ 199 198 u8 reg_tmp; 200 199 u8 short_preamble = (bool)*val; 200 + 201 201 reg_tmp = (mac->cur_40_prime_sc) << 5; 202 202 if (short_preamble) 203 203 reg_tmp |= 0x80; ··· 295 293 } 296 294 case HW_VAR_AC_PARAM:{ 297 295 u8 e_aci = *(val); 296 + 298 297 rtl92c_dm_init_edca_turbo(hw); 299 298 300 299 if (rtlpci->acm_method != EACMWAY2_SW) ··· 459 456 break; 460 457 case HW_VAR_AID:{ 461 458 u16 u2btmp; 459 + 462 460 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT); 463 461 u2btmp &= 0xC000; 464 462 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp | ··· 665 661 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); 666 662 if (rtlpriv->btcoexist.bt_coexistence) { 667 663 u32 value32; 664 + 668 665 value32 = rtl_read_dword(rtlpriv, REG_APS_FSMCO); 669 666 value32 |= (SOP_ABG | SOP_AMB | XOP_BTCK); 670 667 rtl_write_dword(rtlpriv, REG_APS_FSMCO, value32); ··· 1250 1245 void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci) 1251 1246 { 1252 1247 struct rtl_priv *rtlpriv = rtl_priv(hw); 1248 + 1253 1249 rtl92c_dm_init_edca_turbo(hw); 1254 1250 switch (aci) { 1255 1251 case AC1_BK: ··· 2284 2278 /* 0:Disable BT control A-MPDU, 1:Enable BT control A-MPDU. */ 2285 2279 rtlpriv->btcoexist.reg_bt_sco = 0; 2286 2280 } 2287 - 2288 2281 2289 2282 void rtl8192ce_bt_hw_init(struct ieee80211_hw *hw) 2290 2283 {
+1
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
··· 443 443 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { 444 444 bool rtstatus; 445 445 u32 initializecount = 0; 446 + 446 447 do { 447 448 initializecount++; 448 449 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
-1
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/table.c
··· 3 3 4 4 #include "table.h" 5 5 6 - 7 6 u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH] = { 8 7 0x024, 0x0011800f, 9 8 0x028, 0x00ffdb83,
+8
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
··· 36 36 static u8 _rtl92c_evm_db_to_percentage(s8 value) 37 37 { 38 38 s8 ret_val; 39 + 39 40 ret_val = value; 40 41 41 42 if (ret_val >= 0) ··· 110 109 111 110 if (is_cck_rate) { 112 111 u8 report, cck_highpwr; 112 + 113 113 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; 114 114 115 115 if (ppsc->rfpwr_state == ERFON) ··· 122 120 123 121 if (!cck_highpwr) { 124 122 u8 cck_agc_rpt = cck_buf->cck_agc_rpt; 123 + 125 124 report = cck_buf->cck_agc_rpt & 0xc0; 126 125 report = report >> 6; 127 126 switch (report) { ··· 141 138 } 142 139 } else { 143 140 u8 cck_agc_rpt = cck_buf->cck_agc_rpt; 141 + 144 142 report = p_drvinfo->cfosho[0] & 0x60; 145 143 report = report >> 5; 146 144 switch (report) { ··· 186 182 /* (3) Get Signal Quality (EVM) */ 187 183 if (packet_match_bssid) { 188 184 u8 sq; 185 + 189 186 if (pstats->rx_pwdb_all > 40) 190 187 sq = 100; 191 188 else { ··· 323 318 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc; 324 319 struct ieee80211_hdr *hdr; 325 320 u32 phystatus = GET_RX_DESC_PHYST(pdesc); 321 + 326 322 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc); 327 323 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) * 328 324 RX_DRV_INFO_SIZE_UNIT; ··· 503 497 504 498 if (sta) { 505 499 u8 ampdu_density = sta->ht_cap.ampdu_density; 500 + 506 501 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density); 507 502 } 508 503 ··· 740 733 void rtl92ce_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) 741 734 { 742 735 struct rtl_priv *rtlpriv = rtl_priv(hw); 736 + 743 737 if (hw_queue == BEACON_QUEUE) { 744 738 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4)); 745 739 } else {
+8 -1
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
··· 747 747 u8 queue_sel) 748 748 { 749 749 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 750 + 750 751 if (IS_NORMAL_CHIP(rtlhal->version)) 751 752 _rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num, 752 753 queue_sel); ··· 814 813 u8 wmm_enable = false; /* TODO */ 815 814 u8 out_ep_nums = rtlusb->out_ep_nums; 816 815 u8 queue_sel = rtlusb->out_queue_sel; 816 + 817 817 err = _rtl92cu_init_power_on(hw); 818 818 819 819 if (err) { ··· 1015 1013 e. SYS_FUNC_EN 0x02[7:0] = 0x14 reset BB state machine 1016 1014 ***************************************/ 1017 1015 u8 erfpath = 0, value8 = 0; 1016 + 1018 1017 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); 1019 1018 rtl_set_rfreg(hw, (enum radio_path)erfpath, 0x0, MASKBYTE0, 0x0); 1020 1019 ··· 1207 1204 struct rtl_priv *rtlpriv = rtl_priv(hw); 1208 1205 struct rtl_hal *rtlhal = rtl_hal(rtlpriv); 1209 1206 u8 tmp1byte = 0; 1207 + 1210 1208 if (IS_NORMAL_CHIP(rtlhal->version)) { 1211 1209 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2); 1212 1210 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, ··· 1357 1353 1358 1354 if (check_bssid) { 1359 1355 u8 tmp; 1356 + 1360 1357 if (IS_NORMAL_CHIP(rtlhal->version)) { 1361 1358 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1362 1359 tmp = BIT(4); ··· 1370 1365 _rtl92cu_set_bcn_ctrl_reg(hw, 0, tmp); 1371 1366 } else { 1372 1367 u8 tmp; 1368 + 1373 1369 if (IS_NORMAL_CHIP(rtlhal->version)) { 1374 1370 reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); 1375 1371 tmp = BIT(4); ··· 1637 1631 case HW_VAR_ACK_PREAMBLE:{ 1638 1632 u8 reg_tmp; 1639 1633 u8 short_preamble = (bool)*val; 1634 + 1640 1635 reg_tmp = 0; 1641 1636 if (short_preamble) 1642 1637 reg_tmp |= 0x80; ··· 1888 1881 break; 1889 1882 case HW_VAR_KEEP_ALIVE:{ 1890 1883 u8 array[2]; 1884 + 1891 1885 array[0] = 0xff; 1892 1886 array[1] = *((u8 *)val); 1893 1887 rtl92c_fill_h2c_cmd(hw, H2C_92C_KEEP_ALIVE_CTRL, 2, ··· 1971 1963 if (nmode && ((curtxbw_40mhz && 1972 1964 curshortgi_40mhz) || (!curtxbw_40mhz && 1973 1965 curshortgi_20mhz))) { 1974 - 1975 1966 ratr_value |= 0x10000000; 1976 1967 tmp_ratr_value = (ratr_value >> 12); 1977 1968
-4
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.h
··· 14 14 #define TX_TOTAL_PAGE_NUMBER 0xF8 15 15 #define TX_PAGE_BOUNDARY (TX_TOTAL_PAGE_NUMBER + 1) 16 16 17 - 18 17 #define CHIP_B_PAGE_NUM_PUBQ 0xE7 19 18 20 19 /* For Test Chip Setting 21 20 * (HPQ + LPQ + PUBQ) shall be TX_TOTAL_PAGE_NUMBER */ 22 21 #define CHIP_A_PAGE_NUM_PUBQ 0x7E 23 - 24 22 25 23 /* For Chip A Setting */ 26 24 #define WMM_CHIP_A_TX_TOTAL_PAGE_NUMBER 0xF5 ··· 28 30 #define WMM_CHIP_A_PAGE_NUM_PUBQ 0xA3 29 31 #define WMM_CHIP_A_PAGE_NUM_HPQ 0x29 30 32 #define WMM_CHIP_A_PAGE_NUM_LPQ 0x29 31 - 32 - 33 33 34 34 /* Note: For Chip B Setting ,modify later */ 35 35 #define WMM_CHIP_B_TX_TOTAL_PAGE_NUMBER 0xF5
+7 -1
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
··· 24 24 #define RX_EVM rx_evm_percentage 25 25 #define RX_SIGQ rx_mimo_sig_qual 26 26 27 - 28 27 void rtl92c_read_chip_version(struct ieee80211_hw *hw) 29 28 { 30 29 struct rtl_priv *rtlpriv = rtl_priv(hw); ··· 142 143 } while (++count); 143 144 return status; 144 145 } 146 + 145 147 /** 146 148 * rtl92c_init_LLT_table - Init LLT table 147 149 * @io: io callback ··· 189 189 } 190 190 return rst; 191 191 } 192 + 192 193 void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index, 193 194 u8 *p_macaddr, bool is_group, u8 enc_algo, 194 195 bool is_wepkey, bool clear_all) ··· 371 370 void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size) 372 371 { 373 372 struct rtl_priv *rtlpriv = rtl_priv(hw); 373 + 374 374 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size); 375 375 } 376 376 ··· 649 647 pstats->RX_SIGQ[1] = -1; 650 648 if (is_cck_rate) { 651 649 u8 report, cck_highpwr; 650 + 652 651 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo; 653 652 if (!in_powersavemode) 654 653 cck_highpwr = rtlphy->cck_high_power; ··· 657 654 cck_highpwr = false; 658 655 if (!cck_highpwr) { 659 656 u8 cck_agc_rpt = cck_buf->cck_agc_rpt; 657 + 660 658 report = cck_buf->cck_agc_rpt & 0xc0; 661 659 report = report >> 6; 662 660 switch (report) { ··· 676 672 } 677 673 } else { 678 674 u8 cck_agc_rpt = cck_buf->cck_agc_rpt; 675 + 679 676 report = p_drvinfo->cfosho[0] & 0x60; 680 677 report = report >> 5; 681 678 switch (report) { ··· 699 694 pstats->recvsignalpower = rx_pwr_all; 700 695 if (packet_match_bssid) { 701 696 u8 sq; 697 + 702 698 if (pstats->rx_pwdb_all > 40) 703 699 sq = 100; 704 700 else {
-3
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.h
··· 18 18 void rtl92c_disable_interrupt(struct ieee80211_hw *hw); 19 19 void rtl92c_set_qos(struct ieee80211_hw *hw, int aci); 20 20 21 - 22 21 /*--------------------------------------------------------------- 23 22 * Hardware init functions 24 23 *---------------------------------------------------------------*/ ··· 128 129 /*--------------------------------------------------------------- 129 130 * Card disable functions 130 131 *---------------------------------------------------------------*/ 131 - 132 - 133 132 134 133 #endif
+7
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
··· 108 108 struct rtl_ep_map *ep_map) 109 109 { 110 110 struct rtl_priv *rtlpriv = rtl_priv(hw); 111 + 111 112 if (bwificfg) { /* for WMM */ 112 113 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 113 114 "USB 3EP Setting for WMM.....\n"); ··· 142 141 ep_map->ep_mapping[RTL_TXQ_BCN] = 2; 143 142 ep_map->ep_mapping[RTL_TXQ_HI] = 2; 144 143 } 144 + 145 145 static int _out_ep_mapping(struct ieee80211_hw *hw) 146 146 { 147 147 int err = 0; ··· 176 174 return err; 177 175 178 176 } 177 + 179 178 /* endpoint mapping */ 180 179 int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw) 181 180 { 182 181 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 183 182 int error = 0; 183 + 184 184 if (likely(IS_NORMAL_CHIP(rtlhal->version))) 185 185 error = configvernoutep(hw); 186 186 else ··· 446 442 SET_TX_DESC_LAST_SEG(txdesc, 1); 447 443 SET_TX_DESC_FIRST_SEG(txdesc, 1); 448 444 } 445 + 449 446 /** 450 447 * For HW recovery information 451 448 */ ··· 536 531 sta = ieee80211_find_sta(mac->vif, mac->bssid); 537 532 if (sta) { 538 533 u8 ampdu_density = sta->ht_cap.ampdu_density; 534 + 539 535 SET_TX_DESC_AMPDU_DENSITY(txdesc, ampdu_density); 540 536 } 541 537 rcu_read_unlock(); 542 538 if (info->control.hw_key) { 543 539 struct ieee80211_key_conf *keyconf = info->control.hw_key; 540 + 544 541 switch (keyconf->cipher) { 545 542 case WLAN_CIPHER_SUITE_WEP40: 546 543 case WLAN_CIPHER_SUITE_WEP104:
-2
drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h
··· 198 198 #define SET_TX_DESC_OWN(__txdesc, __value) \ 199 199 SET_BITS_TO_LE_4BYTE(__txdesc, 31, 1, __value) 200 200 201 - 202 201 /* Dword 1 */ 203 202 #define SET_TX_DESC_MACID(__txdesc, __value) \ 204 203 SET_BITS_TO_LE_4BYTE(__txdesc + 4, 0, 5, __value) ··· 353 354 SET_BITS_TO_LE_4BYTE(__txdesc + 28, 24, 4, __value) 354 355 #define SET_TX_DESC_MCSG15_MAX_LEN(__txdesc, __value) \ 355 356 SET_BITS_TO_LE_4BYTE(__txdesc + 28, 28, 4, __value) 356 - 357 357 358 358 int rtl8192cu_endpoint_mapping(struct ieee80211_hw *hw); 359 359 u16 rtl8192cu_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+2 -4
drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
··· 136 136 struct rtl_priv *rtlpriv = rtl_priv(hw); 137 137 struct sk_buff *skb; 138 138 struct rtl_tcb_desc *tcb_desc; 139 - unsigned char *seg_ptr; 140 139 u16 frag_threshold = MAX_FIRMWARE_CODE_SIZE; 141 140 u16 frag_length, frag_offset = 0; 142 141 u16 extra_descoffset = 0; ··· 165 166 if (!skb) 166 167 return false; 167 168 skb_reserve(skb, extra_descoffset); 168 - seg_ptr = skb_put_data(skb, 169 - code_virtual_address + frag_offset, 170 - (u32)(frag_length - extra_descoffset)); 169 + skb_put_data(skb, code_virtual_address + frag_offset, 170 + (u32)(frag_length - extra_descoffset)); 171 171 172 172 tcb_desc = (struct rtl_tcb_desc *)(skb->cb); 173 173 tcb_desc->queue_index = TXCMD_QUEUE;
-2
drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
··· 1405 1405 static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw) 1406 1406 { 1407 1407 struct rtl_priv *rtlpriv = rtl_priv(hw); 1408 - u8 bt_retry_cnt; 1409 1408 u8 bt_info_original; 1410 1409 RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, 1411 1410 "[BTCoex] Get bt info by fw!!\n"); ··· 1416 1417 "[BTCoex] c2h for bt_info not rcvd yet!!\n"); 1417 1418 } 1418 1419 1419 - bt_retry_cnt = hal_coex_8723.bt_retry_cnt; 1420 1420 bt_info_original = hal_coex_8723.c2h_bt_info_original; 1421 1421 1422 1422 /* when bt inquiry or page scan, we have to set h2c 0x25 */
-3
drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
··· 995 995 u32 edca_be = 0x5ea42b; 996 996 u32 iot_peer = 0; 997 997 bool b_is_cur_rdlstate; 998 - bool b_last_is_cur_rdlstate = false; 999 998 bool b_bias_on_rx = false; 1000 999 bool b_edca_turbo_on = false; 1001 - 1002 - b_last_is_cur_rdlstate = rtlpriv->dm.is_cur_rdlstate; 1003 1000 1004 1001 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt; 1005 1002 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+3
drivers/net/wireless/realtek/rtlwifi/usb.c
··· 267 267 268 268 for (i = 0; i < __RTL_TXQ_NUM; i++) { 269 269 u32 ep_num = rtlusb->ep_map.ep_mapping[i]; 270 + 270 271 if (!ep_num) { 271 272 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, 272 273 "Invalid endpoint map setting!\n"); ··· 332 331 rtlusb->out_ep_nums = rtlusb->in_ep_nums = 0; 333 332 for (epidx = 0; epidx < epnums; epidx++) { 334 333 struct usb_endpoint_descriptor *pep_desc; 334 + 335 335 pep_desc = &usb_intf->cur_altsetting->endpoint[epidx].desc; 336 336 337 337 if (usb_endpoint_dir_in(pep_desc)) ··· 755 753 756 754 return err; 757 755 } 756 + 758 757 /** 759 758 * 760 759 *
+1 -4
drivers/net/wireless/realtek/rtlwifi/usb.h
··· 17 17 #define USB_HIGH_SPEED_BULK_SIZE 512 18 18 #define USB_FULL_SPEED_BULK_SIZE 64 19 19 20 - 21 20 #define RTL_USB_MAX_TXQ_NUM 4 /* max tx queue */ 22 21 #define RTL_USB_MAX_EP_NUM 6 /* max ep number */ 23 22 #define RTL_USB_MAX_TX_URBS_NUM 8 ··· 52 53 u32 ep_num) 53 54 { 54 55 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 56 + 55 57 info->rate_driver_data[0] = rtlusb; 56 58 info->rate_driver_data[1] = (void *)(__kernel_size_t)ep_num; 57 59 } 58 - 59 60 60 61 /* Add suspend/resume later */ 61 62 enum rtl_usb_state { ··· 131 132 132 133 #define rtl_usbpriv(hw) (((struct rtl_usb_priv *)(rtl_priv(hw))->priv)) 133 134 #define rtl_usbdev(usbpriv) (&((usbpriv)->dev)) 134 - 135 - 136 135 137 136 int rtl_usb_probe(struct usb_interface *intf, 138 137 const struct usb_device_id *id,
+178 -173
drivers/net/wireless/realtek/rtlwifi/wifi.h
··· 413 413 HW_VAR_MULTICAST_REG = 0x1, 414 414 HW_VAR_BASIC_RATE = 0x2, 415 415 HW_VAR_BSSID = 0x3, 416 - HW_VAR_MEDIA_STATUS= 0x4, 417 - HW_VAR_SECURITY_CONF= 0x5, 416 + HW_VAR_MEDIA_STATUS = 0x4, 417 + HW_VAR_SECURITY_CONF = 0x5, 418 418 HW_VAR_BEACON_INTERVAL = 0x6, 419 419 HW_VAR_ATIM_WINDOW = 0x7, 420 420 HW_VAR_LISTEN_INTERVAL = 0x8, ··· 431 431 HW_VAR_ACK_PREAMBLE = 0x13, 432 432 HW_VAR_CW_CONFIG = 0x14, 433 433 HW_VAR_CW_VALUES = 0x15, 434 - HW_VAR_RATE_FALLBACK_CONTROL= 0x16, 434 + HW_VAR_RATE_FALLBACK_CONTROL = 0x16, 435 435 HW_VAR_CONTENTION_WINDOW = 0x17, 436 436 HW_VAR_RETRY_COUNT = 0x18, 437 437 HW_VAR_TR_SWITCH = 0x19, ··· 598 598 }; 599 599 600 600 /* Ref: 802.11i sepc D10.0 7.3.2.25.1 601 - Cipher Suites Encryption Algorithms */ 601 + * Cipher Suites Encryption Algorithms 602 + */ 602 603 enum rt_enc_alg { 603 604 NO_ENCRYPTION = 0, 604 605 WEP40_ENCRYPTION = 1, ··· 749 748 RTL_IMR_ROK, /*Receive DMA OK Interrupt */ 750 749 RTL_IMR_HSISR_IND, /*HSISR Interrupt*/ 751 750 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BCNINT | RTL_IMR_TBDOK | 752 - * RTL_IMR_TBDER) */ 751 + * RTL_IMR_TBDER) 752 + */ 753 753 RTL_IMR_C2HCMD, /*fw interrupt*/ 754 754 755 755 /*CCK Rates, TxHT = 0 */ ··· 847 845 BANDMAX 848 846 }; 849 847 850 - /*aci/aifsn Field. 851 - Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/ 848 + /* aci/aifsn Field. 849 + * Ref: WMM spec 2.2.2: WME Parameter Element, p.12. 850 + */ 852 851 union aci_aifsn { 853 852 u8 char_data; 854 853 ··· 1065 1062 __le16 beacon_interval; 1066 1063 __le16 capability; 1067 1064 /*SSID, supported rates, FH params, DS params, 1068 - CF params, IBSS params, TIM (if beacon), RSN */ 1065 + * CF params, IBSS params, TIM (if beacon), RSN 1066 + */ 1069 1067 struct rtl_info_element info_element[0]; 1070 1068 } __packed; 1071 1069 ··· 1140 1136 1141 1137 long rx_snr_db[4]; 1142 1138 /*Correct smoothed ss in Dbm, only used 1143 - in driver to report real power now. */ 1139 + * in driver to report real power now. 1140 + */ 1144 1141 long recv_signal_power; 1145 1142 long signal_quality; 1146 1143 long last_sigstrength_inpercent; ··· 1149 1144 u32 rssi_calculate_cnt; 1150 1145 u32 pwdb_all_cnt; 1151 1146 1152 - /*Transformed, in dbm. Beautified signal 1153 - strength for UI, not correct. */ 1147 + /* Transformed, in dbm. Beautified signal 1148 + * strength for UI, not correct. 1149 + */ 1154 1150 long signal_strength; 1155 1151 1156 1152 u8 rx_rssi_percentage[4]; ··· 1462 1456 /*PCI IO map */ 1463 1457 unsigned long pci_base_addr; /*device I/O address */ 1464 1458 1465 - void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val); 1466 - void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val); 1467 - void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val); 1459 + void (*write8_async)(struct rtl_priv *rtlpriv, u32 addr, u8 val); 1460 + void (*write16_async)(struct rtl_priv *rtlpriv, u32 addr, u16 val); 1461 + void (*write32_async)(struct rtl_priv *rtlpriv, u32 addr, u32 val); 1468 1462 void (*writen_sync)(struct rtl_priv *rtlpriv, u32 addr, void *buf, 1469 - u16 len); 1463 + u16 len); 1470 1464 1471 - u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr); 1472 - u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr); 1473 - u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr); 1465 + u8 (*read8_sync)(struct rtl_priv *rtlpriv, u32 addr); 1466 + u16 (*read16_sync)(struct rtl_priv *rtlpriv, u32 addr); 1467 + u32 (*read32_sync)(struct rtl_priv *rtlpriv, u32 addr); 1474 1468 1475 1469 }; 1476 1470 ··· 1695 1689 bool during_mac1init_radioa; 1696 1690 bool reloadtxpowerindex; 1697 1691 /* True if IMR or IQK have done 1698 - for 2.4G in scan progress */ 1692 + * for 2.4G in scan progress 1693 + */ 1699 1694 bool load_imrandiqk_setting_for2g; 1700 1695 1701 1696 bool disable_amsdu_8k; ··· 1735 1728 u32 hwsec_cam_bitmap; 1736 1729 u8 hwsec_cam_sta_addr[TOTAL_CAM_ENTRY][ETH_ALEN]; 1737 1730 /*local Key buffer, indx 0 is for 1738 - pairwise key 1-4 is for agoup key. */ 1731 + * pairwise key 1-4 is for agoup key. 1732 + */ 1739 1733 u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN]; 1740 1734 u8 key_len[KEY_BUF_SIZE]; 1741 1735 1742 1736 /*The pointer of Pairwise Key, 1743 - it always points to KeyBuf[4] */ 1737 + * it always points to KeyBuf[4] 1738 + */ 1744 1739 u8 *pairwise_key; 1745 1740 }; 1746 1741 ··· 2006 1997 bool rfchange_inprogress; 2007 1998 bool swrf_processing; 2008 1999 bool hwradiooff; 2009 - /* 2010 - * just for PCIE ASPM 2000 + /* just for PCIE ASPM 2011 2001 * If it supports ASPM, Offset[560h] = 0x40, 2012 2002 * otherwise Offset[560h] = 0x00. 2013 - * */ 2003 + */ 2014 2004 bool support_aspm; 2015 2005 bool support_backdoor; 2016 2006 ··· 2089 2081 u8 nic_type; 2090 2082 u16 length; 2091 2083 u8 signalquality; /*in 0-100 index. */ 2092 - /* 2093 - * Real power in dBm for this packet, 2084 + /* Real power in dBm for this packet, 2094 2085 * no beautification and aggregation. 2095 - * */ 2086 + */ 2096 2087 s32 recvsignalpower; 2097 2088 s8 rxpower; /*in dBm Translate from PWdB */ 2098 2089 u8 signalstrength; /*in 0-100 index. */ ··· 2142 2135 u32 bt_rx_rssi_percentage; 2143 2136 u32 macid_valid_entry[2]; 2144 2137 }; 2145 - 2146 2138 2147 2139 struct rt_link_detect { 2148 2140 /* count for roaming */ ··· 2216 2210 }; 2217 2211 2218 2212 struct rtl_hal_ops { 2219 - int (*init_sw_vars) (struct ieee80211_hw *hw); 2220 - void (*deinit_sw_vars) (struct ieee80211_hw *hw); 2213 + int (*init_sw_vars)(struct ieee80211_hw *hw); 2214 + void (*deinit_sw_vars)(struct ieee80211_hw *hw); 2221 2215 void (*read_chip_version)(struct ieee80211_hw *hw); 2222 - void (*read_eeprom_info) (struct ieee80211_hw *hw); 2223 - void (*interrupt_recognized) (struct ieee80211_hw *hw, 2224 - struct rtl_int *intvec); 2225 - int (*hw_init) (struct ieee80211_hw *hw); 2226 - void (*hw_disable) (struct ieee80211_hw *hw); 2227 - void (*hw_suspend) (struct ieee80211_hw *hw); 2228 - void (*hw_resume) (struct ieee80211_hw *hw); 2229 - void (*enable_interrupt) (struct ieee80211_hw *hw); 2230 - void (*disable_interrupt) (struct ieee80211_hw *hw); 2231 - int (*set_network_type) (struct ieee80211_hw *hw, 2232 - enum nl80211_iftype type); 2216 + void (*read_eeprom_info)(struct ieee80211_hw *hw); 2217 + void (*interrupt_recognized)(struct ieee80211_hw *hw, 2218 + struct rtl_int *intvec); 2219 + int (*hw_init)(struct ieee80211_hw *hw); 2220 + void (*hw_disable)(struct ieee80211_hw *hw); 2221 + void (*hw_suspend)(struct ieee80211_hw *hw); 2222 + void (*hw_resume)(struct ieee80211_hw *hw); 2223 + void (*enable_interrupt)(struct ieee80211_hw *hw); 2224 + void (*disable_interrupt)(struct ieee80211_hw *hw); 2225 + int (*set_network_type)(struct ieee80211_hw *hw, 2226 + enum nl80211_iftype type); 2233 2227 void (*set_chk_bssid)(struct ieee80211_hw *hw, 2234 - bool check_bssid); 2235 - void (*set_bw_mode) (struct ieee80211_hw *hw, 2236 - enum nl80211_channel_type ch_type); 2237 - u8(*switch_channel) (struct ieee80211_hw *hw); 2238 - void (*set_qos) (struct ieee80211_hw *hw, int aci); 2239 - void (*set_bcn_reg) (struct ieee80211_hw *hw); 2240 - void (*set_bcn_intv) (struct ieee80211_hw *hw); 2241 - void (*update_interrupt_mask) (struct ieee80211_hw *hw, 2242 - u32 add_msr, u32 rm_msr); 2243 - void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); 2244 - void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val); 2245 - void (*update_rate_tbl) (struct ieee80211_hw *hw, 2246 - struct ieee80211_sta *sta, u8 rssi_leve, 2247 - bool update_bw); 2228 + bool check_bssid); 2229 + void (*set_bw_mode)(struct ieee80211_hw *hw, 2230 + enum nl80211_channel_type ch_type); 2231 + u8 (*switch_channel)(struct ieee80211_hw *hw); 2232 + void (*set_qos)(struct ieee80211_hw *hw, int aci); 2233 + void (*set_bcn_reg)(struct ieee80211_hw *hw); 2234 + void (*set_bcn_intv)(struct ieee80211_hw *hw); 2235 + void (*update_interrupt_mask)(struct ieee80211_hw *hw, 2236 + u32 add_msr, u32 rm_msr); 2237 + void (*get_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val); 2238 + void (*set_hw_reg)(struct ieee80211_hw *hw, u8 variable, u8 *val); 2239 + void (*update_rate_tbl)(struct ieee80211_hw *hw, 2240 + struct ieee80211_sta *sta, u8 rssi_leve, 2241 + bool update_bw); 2248 2242 void (*pre_fill_tx_bd_desc)(struct ieee80211_hw *hw, u8 *tx_bd_desc, 2249 2243 u8 *desc, u8 queue_index, 2250 2244 struct sk_buff *skb, dma_addr_t addr); 2251 - void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level); 2245 + void (*update_rate_mask)(struct ieee80211_hw *hw, u8 rssi_level); 2252 2246 u16 (*rx_desc_buff_remained_cnt)(struct ieee80211_hw *hw, 2253 2247 u8 queue_index); 2254 2248 void (*rx_check_dma_ok)(struct ieee80211_hw *hw, u8 *header_desc, 2255 2249 u8 queue_index); 2256 - void (*fill_tx_desc) (struct ieee80211_hw *hw, 2257 - struct ieee80211_hdr *hdr, u8 *pdesc_tx, 2258 - u8 *pbd_desc_tx, 2259 - struct ieee80211_tx_info *info, 2260 - struct ieee80211_sta *sta, 2261 - struct sk_buff *skb, u8 hw_queue, 2262 - struct rtl_tcb_desc *ptcb_desc); 2250 + void (*fill_tx_desc)(struct ieee80211_hw *hw, 2251 + struct ieee80211_hdr *hdr, u8 *pdesc_tx, 2252 + u8 *pbd_desc_tx, 2253 + struct ieee80211_tx_info *info, 2254 + struct ieee80211_sta *sta, 2255 + struct sk_buff *skb, u8 hw_queue, 2256 + struct rtl_tcb_desc *ptcb_desc); 2263 2257 void (*fill_fake_txdesc)(struct ieee80211_hw *hw, u8 *pdesc, 2264 2258 u32 buffer_len, bool bsspspoll); 2265 - void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc, 2266 - bool firstseg, bool lastseg, 2267 - struct sk_buff *skb); 2259 + void (*fill_tx_cmddesc)(struct ieee80211_hw *hw, u8 *pdesc, 2260 + bool firstseg, bool lastseg, 2261 + struct sk_buff *skb); 2268 2262 void (*fill_tx_special_desc)(struct ieee80211_hw *hw, 2269 2263 u8 *pdesc, u8 *pbd_desc, 2270 2264 struct sk_buff *skb, u8 hw_queue); 2271 - bool (*query_rx_desc) (struct ieee80211_hw *hw, 2272 - struct rtl_stats *stats, 2273 - struct ieee80211_rx_status *rx_status, 2274 - u8 *pdesc, struct sk_buff *skb); 2275 - void (*set_channel_access) (struct ieee80211_hw *hw); 2276 - bool (*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid); 2277 - void (*dm_watchdog) (struct ieee80211_hw *hw); 2278 - void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation); 2279 - bool (*set_rf_power_state) (struct ieee80211_hw *hw, 2280 - enum rf_pwrstate rfpwr_state); 2281 - void (*led_control) (struct ieee80211_hw *hw, 2282 - enum led_ctl_mode ledaction); 2265 + bool (*query_rx_desc)(struct ieee80211_hw *hw, 2266 + struct rtl_stats *stats, 2267 + struct ieee80211_rx_status *rx_status, 2268 + u8 *pdesc, struct sk_buff *skb); 2269 + void (*set_channel_access)(struct ieee80211_hw *hw); 2270 + bool (*radio_onoff_checking)(struct ieee80211_hw *hw, u8 *valid); 2271 + void (*dm_watchdog)(struct ieee80211_hw *hw); 2272 + void (*scan_operation_backup)(struct ieee80211_hw *hw, u8 operation); 2273 + bool (*set_rf_power_state)(struct ieee80211_hw *hw, 2274 + enum rf_pwrstate rfpwr_state); 2275 + void (*led_control)(struct ieee80211_hw *hw, 2276 + enum led_ctl_mode ledaction); 2283 2277 void (*set_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx, 2284 2278 u8 desc_name, u8 *val); 2285 2279 u64 (*get_desc)(struct ieee80211_hw *hw, u8 *pdesc, bool istx, 2286 2280 u8 desc_name); 2287 - bool (*is_tx_desc_closed) (struct ieee80211_hw *hw, 2288 - u8 hw_queue, u16 index); 2289 - void (*tx_polling) (struct ieee80211_hw *hw, u8 hw_queue); 2290 - void (*enable_hw_sec) (struct ieee80211_hw *hw); 2291 - void (*set_key) (struct ieee80211_hw *hw, u32 key_index, 2292 - u8 *macaddr, bool is_group, u8 enc_algo, 2293 - bool is_wepkey, bool clear_all); 2294 - void (*init_sw_leds) (struct ieee80211_hw *hw); 2295 - void (*deinit_sw_leds) (struct ieee80211_hw *hw); 2296 - u32 (*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); 2297 - void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, 2298 - u32 data); 2299 - u32 (*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 2300 - u32 regaddr, u32 bitmask); 2301 - void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath, 2302 - u32 regaddr, u32 bitmask, u32 data); 2303 - void (*linked_set_reg) (struct ieee80211_hw *hw); 2304 - void (*chk_switch_dmdp) (struct ieee80211_hw *hw); 2305 - void (*dualmac_easy_concurrent) (struct ieee80211_hw *hw); 2306 - void (*dualmac_switch_to_dmdp) (struct ieee80211_hw *hw); 2307 - bool (*phy_rf6052_config) (struct ieee80211_hw *hw); 2308 - void (*phy_rf6052_set_cck_txpower) (struct ieee80211_hw *hw, 2309 - u8 *powerlevel); 2310 - void (*phy_rf6052_set_ofdm_txpower) (struct ieee80211_hw *hw, 2311 - u8 *ppowerlevel, u8 channel); 2312 - bool (*config_bb_with_headerfile) (struct ieee80211_hw *hw, 2313 - u8 configtype); 2314 - bool (*config_bb_with_pgheaderfile) (struct ieee80211_hw *hw, 2315 - u8 configtype); 2316 - void (*phy_lc_calibrate) (struct ieee80211_hw *hw, bool is2t); 2317 - void (*phy_set_bw_mode_callback) (struct ieee80211_hw *hw); 2318 - void (*dm_dynamic_txpower) (struct ieee80211_hw *hw); 2319 - void (*c2h_command_handle) (struct ieee80211_hw *hw); 2320 - void (*bt_wifi_media_status_notify) (struct ieee80211_hw *hw, 2321 - bool mstate); 2322 - void (*bt_coex_off_before_lps) (struct ieee80211_hw *hw); 2323 - void (*fill_h2c_cmd) (struct ieee80211_hw *hw, u8 element_id, 2324 - u32 cmd_len, u8 *p_cmdbuffer); 2281 + bool (*is_tx_desc_closed)(struct ieee80211_hw *hw, 2282 + u8 hw_queue, u16 index); 2283 + void (*tx_polling)(struct ieee80211_hw *hw, u8 hw_queue); 2284 + void (*enable_hw_sec)(struct ieee80211_hw *hw); 2285 + void (*set_key)(struct ieee80211_hw *hw, u32 key_index, 2286 + u8 *macaddr, bool is_group, u8 enc_algo, 2287 + bool is_wepkey, bool clear_all); 2288 + void (*init_sw_leds)(struct ieee80211_hw *hw); 2289 + void (*deinit_sw_leds)(struct ieee80211_hw *hw); 2290 + u32 (*get_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask); 2291 + void (*set_bbreg)(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask, 2292 + u32 data); 2293 + u32 (*get_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath, 2294 + u32 regaddr, u32 bitmask); 2295 + void (*set_rfreg)(struct ieee80211_hw *hw, enum radio_path rfpath, 2296 + u32 regaddr, u32 bitmask, u32 data); 2297 + void (*linked_set_reg)(struct ieee80211_hw *hw); 2298 + void (*chk_switch_dmdp)(struct ieee80211_hw *hw); 2299 + void (*dualmac_easy_concurrent)(struct ieee80211_hw *hw); 2300 + void (*dualmac_switch_to_dmdp)(struct ieee80211_hw *hw); 2301 + bool (*phy_rf6052_config)(struct ieee80211_hw *hw); 2302 + void (*phy_rf6052_set_cck_txpower)(struct ieee80211_hw *hw, 2303 + u8 *powerlevel); 2304 + void (*phy_rf6052_set_ofdm_txpower)(struct ieee80211_hw *hw, 2305 + u8 *ppowerlevel, u8 channel); 2306 + bool (*config_bb_with_headerfile)(struct ieee80211_hw *hw, 2307 + u8 configtype); 2308 + bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw, 2309 + u8 configtype); 2310 + void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t); 2311 + void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw); 2312 + void (*dm_dynamic_txpower)(struct ieee80211_hw *hw); 2313 + void (*c2h_command_handle)(struct ieee80211_hw *hw); 2314 + void (*bt_wifi_media_status_notify)(struct ieee80211_hw *hw, 2315 + bool mstate); 2316 + void (*bt_coex_off_before_lps)(struct ieee80211_hw *hw); 2317 + void (*fill_h2c_cmd)(struct ieee80211_hw *hw, u8 element_id, 2318 + u32 cmd_len, u8 *p_cmdbuffer); 2325 2319 void (*set_default_port_id_cmd)(struct ieee80211_hw *hw); 2326 - bool (*get_btc_status) (void); 2320 + bool (*get_btc_status)(void); 2327 2321 bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); 2328 2322 void (*add_wowlan_pattern)(struct ieee80211_hw *hw, 2329 2323 struct rtl_wow_pattern *rtl_pattern, ··· 2336 2330 struct rtl_intf_ops { 2337 2331 /*com */ 2338 2332 void (*read_efuse_byte)(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf); 2339 - int (*adapter_start) (struct ieee80211_hw *hw); 2340 - void (*adapter_stop) (struct ieee80211_hw *hw); 2333 + int (*adapter_start)(struct ieee80211_hw *hw); 2334 + void (*adapter_stop)(struct ieee80211_hw *hw); 2341 2335 bool (*check_buddy_priv)(struct ieee80211_hw *hw, 2342 2336 struct rtl_priv **buddy_priv); 2343 2337 2344 - int (*adapter_tx) (struct ieee80211_hw *hw, 2345 - struct ieee80211_sta *sta, 2346 - struct sk_buff *skb, 2347 - struct rtl_tcb_desc *ptcb_desc); 2338 + int (*adapter_tx)(struct ieee80211_hw *hw, 2339 + struct ieee80211_sta *sta, 2340 + struct sk_buff *skb, 2341 + struct rtl_tcb_desc *ptcb_desc); 2348 2342 void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop); 2349 - int (*reset_trx_ring) (struct ieee80211_hw *hw); 2350 - bool (*waitq_insert) (struct ieee80211_hw *hw, 2351 - struct ieee80211_sta *sta, 2352 - struct sk_buff *skb); 2343 + int (*reset_trx_ring)(struct ieee80211_hw *hw); 2344 + bool (*waitq_insert)(struct ieee80211_hw *hw, 2345 + struct ieee80211_sta *sta, 2346 + struct sk_buff *skb); 2353 2347 2354 2348 /*pci */ 2355 - void (*disable_aspm) (struct ieee80211_hw *hw); 2356 - void (*enable_aspm) (struct ieee80211_hw *hw); 2349 + void (*disable_aspm)(struct ieee80211_hw *hw); 2350 + void (*enable_aspm)(struct ieee80211_hw *hw); 2357 2351 2358 2352 /*usb */ 2359 2353 }; ··· 2431 2425 enum rtl_spec_ver spec_ver; 2432 2426 2433 2427 /*this map used for some registers or vars 2434 - defined int HAL but used in MAIN */ 2428 + * defined int HAL but used in MAIN 2429 + */ 2435 2430 u32 maps[RTL_VAR_MAP_MAX]; 2436 2431 2437 2432 }; ··· 2594 2587 2595 2588 struct rtl_global_var { 2596 2589 /* from this list we can get 2597 - * other adapter's rtl_priv */ 2590 + * other adapter's rtl_priv 2591 + */ 2598 2592 struct list_head glb_priv_list; 2599 2593 spinlock_t glb_list_lock; 2600 2594 }; ··· 2674 2666 }; 2675 2667 2676 2668 struct rtl_btc_ops { 2677 - void (*btc_init_variables) (struct rtl_priv *rtlpriv); 2669 + void (*btc_init_variables)(struct rtl_priv *rtlpriv); 2678 2670 void (*btc_init_variables_wifi_only)(struct rtl_priv *rtlpriv); 2679 2671 void (*btc_deinit_variables)(struct rtl_priv *rtlpriv); 2680 - void (*btc_init_hal_vars) (struct rtl_priv *rtlpriv); 2672 + void (*btc_init_hal_vars)(struct rtl_priv *rtlpriv); 2681 2673 void (*btc_power_on_setting)(struct rtl_priv *rtlpriv); 2682 - void (*btc_init_hw_config) (struct rtl_priv *rtlpriv); 2674 + void (*btc_init_hw_config)(struct rtl_priv *rtlpriv); 2683 2675 void (*btc_init_hw_config_wifi_only)(struct rtl_priv *rtlpriv); 2684 - void (*btc_ips_notify) (struct rtl_priv *rtlpriv, u8 type); 2676 + void (*btc_ips_notify)(struct rtl_priv *rtlpriv, u8 type); 2685 2677 void (*btc_lps_notify)(struct rtl_priv *rtlpriv, u8 type); 2686 - void (*btc_scan_notify) (struct rtl_priv *rtlpriv, u8 scantype); 2678 + void (*btc_scan_notify)(struct rtl_priv *rtlpriv, u8 scantype); 2687 2679 void (*btc_scan_notify_wifi_only)(struct rtl_priv *rtlpriv, 2688 2680 u8 scantype); 2689 - void (*btc_connect_notify) (struct rtl_priv *rtlpriv, u8 action); 2690 - void (*btc_mediastatus_notify) (struct rtl_priv *rtlpriv, 2691 - enum rt_media_status mstatus); 2692 - void (*btc_periodical) (struct rtl_priv *rtlpriv); 2681 + void (*btc_connect_notify)(struct rtl_priv *rtlpriv, u8 action); 2682 + void (*btc_mediastatus_notify)(struct rtl_priv *rtlpriv, 2683 + enum rt_media_status mstatus); 2684 + void (*btc_periodical)(struct rtl_priv *rtlpriv); 2693 2685 void (*btc_halt_notify)(struct rtl_priv *rtlpriv); 2694 - void (*btc_btinfo_notify) (struct rtl_priv *rtlpriv, 2695 - u8 *tmp_buf, u8 length); 2686 + void (*btc_btinfo_notify)(struct rtl_priv *rtlpriv, 2687 + u8 *tmp_buf, u8 length); 2696 2688 void (*btc_btmpinfo_notify)(struct rtl_priv *rtlpriv, 2697 2689 u8 *tmp_buf, u8 length); 2698 - bool (*btc_is_limited_dig) (struct rtl_priv *rtlpriv); 2699 - bool (*btc_is_disable_edca_turbo) (struct rtl_priv *rtlpriv); 2700 - bool (*btc_is_bt_disabled) (struct rtl_priv *rtlpriv); 2690 + bool (*btc_is_limited_dig)(struct rtl_priv *rtlpriv); 2691 + bool (*btc_is_disable_edca_turbo)(struct rtl_priv *rtlpriv); 2692 + bool (*btc_is_bt_disabled)(struct rtl_priv *rtlpriv); 2701 2693 void (*btc_special_packet_notify)(struct rtl_priv *rtlpriv, 2702 2694 u8 pkt_type); 2703 2695 void (*btc_switch_band_notify)(struct rtl_priv *rtlpriv, u8 type, ··· 2783 2775 struct rtl_debug dbg; 2784 2776 int max_fw_size; 2785 2777 2786 - /* 2787 - *hal_cfg : for diff cards 2788 - *intf_ops : for diff interrface usb/pcie 2778 + /* hal_cfg : for diff cards 2779 + * intf_ops : for diff interrface usb/pcie 2789 2780 */ 2790 2781 struct rtl_hal_cfg *cfg; 2791 2782 const struct rtl_intf_ops *intf_ops; 2792 2783 2793 - /*this var will be set by set_bit, 2794 - and was used to indicate status of 2795 - interface or hardware */ 2784 + /* this var will be set by set_bit, 2785 + * and was used to indicate status of 2786 + * interface or hardware 2787 + */ 2796 2788 unsigned long status; 2797 2789 2798 2790 /* tables for dm */ ··· 2828 2820 #ifdef CONFIG_PM 2829 2821 struct wiphy_wowlan_support wowlan; 2830 2822 #endif 2831 - /*This must be the last item so 2832 - that it points to the data allocated 2833 - beyond this structure like: 2834 - rtl_pci_priv or rtl_usb_priv */ 2823 + /* This must be the last item so 2824 + * that it points to the data allocated 2825 + * beyond this structure like: 2826 + * rtl_pci_priv or rtl_usb_priv 2827 + */ 2835 2828 u8 priv[0] __aligned(sizeof(void *)); 2836 2829 }; 2837 2830 ··· 2842 2833 #define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse)) 2843 2834 #define rtl_psc(rtlpriv) (&((rtlpriv)->psc)) 2844 2835 2845 - 2846 - /*************************************** 2847 - Bluetooth Co-existence Related 2848 - ****************************************/ 2836 + /* Bluetooth Co-existence Related */ 2849 2837 2850 2838 enum bt_ant_num { 2851 2839 ANT_X2 = 0, ··· 2891 2885 BT_RADIO_INDIVIDUAL = 1, 2892 2886 }; 2893 2887 2894 - 2895 2888 /**************************************** 2896 - mem access macro define start 2897 - Call endian free function when 2898 - 1. Read/write packet content. 2899 - 2. Before write integer to IO. 2900 - 3. After read integer from IO. 2901 - ****************************************/ 2889 + * mem access macro define start 2890 + * Call endian free function when 2891 + * 1. Read/write packet content. 2892 + * 2. Before write integer to IO. 2893 + * 3. After read integer from IO. 2894 + ****************************************/ 2902 2895 /* Convert little data endian to host ordering */ 2903 2896 #define EF1BYTE(_val) \ 2904 2897 ((u8)(_val)) ··· 2953 2948 (EF1BYTE(*((u8 *)(__pstart)))) 2954 2949 2955 2950 /*Description: 2956 - Translate subfield (continuous bits in little-endian) of 4-byte 2957 - value to host byte ordering.*/ 2951 + * Translate subfield (continuous bits in little-endian) of 4-byte 2952 + * value to host byte ordering. 2953 + */ 2958 2954 #define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \ 2959 2955 ( \ 2960 2956 (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \ ··· 3017 3011 #define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \ 3018 3012 (__value) : (((__value + __aligment - 1) / __aligment) * __aligment)) 3019 3013 3020 - /**************************************** 3021 - mem access macro define end 3022 - ****************************************/ 3014 + /* mem access macro define end */ 3023 3015 3024 3016 #define byte(x, n) ((x >> (8 * n)) & 0xff) 3025 3017 ··· 3152 3148 } 3153 3149 3154 3150 static inline void rtl_set_bbreg_with_dwmask(struct ieee80211_hw *hw, 3155 - u32 regaddr, u32 data) 3151 + u32 regaddr, u32 data) 3156 3152 { 3157 3153 rtl_set_bbreg(hw, regaddr, 0xffffffff, data); 3158 3154 } ··· 3223 3219 } 3224 3220 3225 3221 static inline struct ieee80211_sta *rtl_find_sta(struct ieee80211_hw *hw, 3226 - u8 *mac_addr) 3222 + u8 *mac_addr) 3227 3223 { 3228 3224 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 3225 + 3229 3226 return ieee80211_find_sta(mac->vif, mac_addr); 3230 3227 } 3231 3228
+4
drivers/net/wireless/rsi/rsi_91x_mgmt.c
··· 1726 1726 1727 1727 probereq_skb = ieee80211_probereq_get(common->priv->hw, vif->addr, ssid, 1728 1728 ssid_len, scan_req->ie_len); 1729 + if (!probereq_skb) { 1730 + dev_kfree_skb(skb); 1731 + return -ENOMEM; 1732 + } 1729 1733 1730 1734 memcpy(&skb->data[frame_len], probereq_skb->data, probereq_skb->len); 1731 1735