Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7#include <linux/ieee80211.h>
8#include <linux/kernel.h>
9#include <linux/skbuff.h>
10#include <crypto/hash.h>
11#include "core.h"
12#include "debug.h"
13#include "hal_desc.h"
14#include "hw.h"
15#include "dp_rx.h"
16#include "hal_rx.h"
17#include "dp_tx.h"
18#include "peer.h"
19#include "dp_mon.h"
20#include "debugfs_htt_stats.h"
21
22#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23
24static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
25 struct hal_rx_desc *desc)
26{
27 if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
28 return HAL_ENCRYPT_TYPE_OPEN;
29
30 return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
31}
32
33u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
34 struct hal_rx_desc *desc)
35{
36 return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
37}
38
39static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
40 struct hal_rx_desc *desc)
41{
42 return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
43}
44
45static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
46 struct hal_rx_desc *desc)
47{
48 return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
49}
50
51static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
52 struct hal_rx_desc *desc)
53{
54 return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
55}
56
57static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
58 struct sk_buff *skb)
59{
60 struct ieee80211_hdr *hdr;
61
62 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
63 return ieee80211_has_morefrags(hdr->frame_control);
64}
65
66static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
67 struct sk_buff *skb)
68{
69 struct ieee80211_hdr *hdr;
70
71 hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
72 return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
73}
74
75static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
76 struct hal_rx_desc *desc)
77{
78 return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
79}
80
81static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
82 struct hal_rx_desc *desc)
83{
84 return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
85}
86
87static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
88 struct hal_rx_desc *desc)
89{
90 return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
91}
92
93static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
94 struct hal_rx_desc *desc)
95{
96 return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
97}
98
99static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
100 struct hal_rx_desc *desc)
101{
102 return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
103}
104
105u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
106 struct hal_rx_desc *desc)
107{
108 return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
109}
110
111static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
112 struct hal_rx_desc *desc)
113{
114 return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
115}
116
117static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
118 struct hal_rx_desc *desc)
119{
120 return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
121}
122
123static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
124 struct hal_rx_desc *desc)
125{
126 return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
127}
128
129static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
130 struct hal_rx_desc *desc)
131{
132 return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
133}
134
135static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
136 struct hal_rx_desc *desc)
137{
138 return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
139}
140
141static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
142 struct hal_rx_desc *desc)
143{
144 return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
145}
146
147static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
148 struct hal_rx_desc *desc)
149{
150 return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
151}
152
153static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
154 struct hal_rx_desc *desc)
155{
156 return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
157}
158
159static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
160 struct hal_rx_desc *desc)
161{
162 return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
163}
164
165u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
166 struct hal_rx_desc *desc)
167{
168 return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
169}
170
171static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
172 struct hal_rx_desc *desc)
173{
174 return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
175}
176
177static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
178 struct hal_rx_desc *desc)
179{
180 return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
181}
182
183static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
184 struct hal_rx_desc *fdesc,
185 struct hal_rx_desc *ldesc)
186{
187 ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
188}
189
190static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
191 struct hal_rx_desc *desc,
192 u16 len)
193{
194 ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
195}
196
197u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
198 struct hal_rx_desc *rx_desc)
199{
200 return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
201}
202
203bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
204 struct hal_rx_desc *rx_desc)
205{
206 u32 tlv_tag;
207
208 tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
209
210 return tlv_tag == HAL_RX_MPDU_START;
211}
212
213static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
214 struct hal_rx_desc *desc)
215{
216 return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
217 ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
218}
219
220static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
221 struct hal_rx_desc *desc)
222{
223 return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
224}
225
226static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
227 struct hal_rx_desc *desc)
228{
229 return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
230}
231
232static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
233 struct hal_rx_desc *desc,
234 struct ieee80211_hdr *hdr)
235{
236 ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
237}
238
239static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
240 struct hal_rx_desc *desc,
241 u8 *crypto_hdr,
242 enum hal_encrypt_type enctype)
243{
244 ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
245}
246
247static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
248 struct hal_rx_desc *desc)
249{
250 return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
251}
252
253static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
254{
255 struct sk_buff *skb;
256
257 while ((skb = __skb_dequeue(skb_list)))
258 dev_kfree_skb_any(skb);
259}
260
261static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
262 struct list_head *head,
263 size_t count)
264{
265 struct list_head *cur;
266 struct ath12k_rx_desc_info *rx_desc;
267 size_t nodes = 0;
268
269 if (!count) {
270 INIT_LIST_HEAD(list);
271 goto out;
272 }
273
274 list_for_each(cur, head) {
275 if (!count)
276 break;
277
278 rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
279 rx_desc->in_use = true;
280
281 count--;
282 nodes++;
283 }
284
285 list_cut_before(list, head, cur);
286out:
287 return nodes;
288}
289
290static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
291 struct list_head *used_list)
292{
293 struct ath12k_rx_desc_info *rx_desc, *safe;
294
295 /* Reset the use flag */
296 list_for_each_entry_safe(rx_desc, safe, used_list, list)
297 rx_desc->in_use = false;
298
299 spin_lock_bh(&dp->rx_desc_lock);
300 list_splice_tail(used_list, &dp->rx_desc_free_list);
301 spin_unlock_bh(&dp->rx_desc_lock);
302}
303
304/* Returns number of Rx buffers replenished */
305int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
306 struct dp_rxdma_ring *rx_ring,
307 struct list_head *used_list,
308 int req_entries)
309{
310 struct ath12k_buffer_addr *desc;
311 struct hal_srng *srng;
312 struct sk_buff *skb;
313 int num_free;
314 int num_remain;
315 u32 cookie;
316 dma_addr_t paddr;
317 struct ath12k_dp *dp = &ab->dp;
318 struct ath12k_rx_desc_info *rx_desc;
319 enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
320
321 req_entries = min(req_entries, rx_ring->bufs_max);
322
323 srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
324
325 spin_lock_bh(&srng->lock);
326
327 ath12k_hal_srng_access_begin(ab, srng);
328
329 num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
330 if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
331 req_entries = num_free;
332
333 req_entries = min(num_free, req_entries);
334 num_remain = req_entries;
335
336 if (!num_remain)
337 goto out;
338
339 /* Get the descriptor from free list */
340 if (list_empty(used_list)) {
341 spin_lock_bh(&dp->rx_desc_lock);
342 req_entries = ath12k_dp_list_cut_nodes(used_list,
343 &dp->rx_desc_free_list,
344 num_remain);
345 spin_unlock_bh(&dp->rx_desc_lock);
346 num_remain = req_entries;
347 }
348
349 while (num_remain > 0) {
350 skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
351 DP_RX_BUFFER_ALIGN_SIZE);
352 if (!skb)
353 break;
354
355 if (!IS_ALIGNED((unsigned long)skb->data,
356 DP_RX_BUFFER_ALIGN_SIZE)) {
357 skb_pull(skb,
358 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
359 skb->data);
360 }
361
362 paddr = dma_map_single(ab->dev, skb->data,
363 skb->len + skb_tailroom(skb),
364 DMA_FROM_DEVICE);
365 if (dma_mapping_error(ab->dev, paddr))
366 goto fail_free_skb;
367
368 rx_desc = list_first_entry_or_null(used_list,
369 struct ath12k_rx_desc_info,
370 list);
371 if (!rx_desc)
372 goto fail_dma_unmap;
373
374 rx_desc->skb = skb;
375 cookie = rx_desc->cookie;
376
377 desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
378 if (!desc)
379 goto fail_dma_unmap;
380
381 list_del(&rx_desc->list);
382 ATH12K_SKB_RXCB(skb)->paddr = paddr;
383
384 num_remain--;
385
386 ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
387 }
388
389 goto out;
390
391fail_dma_unmap:
392 dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
393 DMA_FROM_DEVICE);
394fail_free_skb:
395 dev_kfree_skb_any(skb);
396out:
397 ath12k_hal_srng_access_end(ab, srng);
398
399 if (!list_empty(used_list))
400 ath12k_dp_rx_enqueue_free(dp, used_list);
401
402 spin_unlock_bh(&srng->lock);
403
404 return req_entries - num_remain;
405}
406
407static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
408 struct dp_rxdma_mon_ring *rx_ring)
409{
410 struct sk_buff *skb;
411 int buf_id;
412
413 spin_lock_bh(&rx_ring->idr_lock);
414 idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
415 idr_remove(&rx_ring->bufs_idr, buf_id);
416 /* TODO: Understand where internal driver does this dma_unmap
417 * of rxdma_buffer.
418 */
419 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
420 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
421 dev_kfree_skb_any(skb);
422 }
423
424 idr_destroy(&rx_ring->bufs_idr);
425 spin_unlock_bh(&rx_ring->idr_lock);
426
427 return 0;
428}
429
430static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
431{
432 struct ath12k_dp *dp = &ab->dp;
433 int i;
434
435 ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
436
437 if (ab->hw_params->rxdma1_enable)
438 return 0;
439
440 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
441 ath12k_dp_rxdma_mon_buf_ring_free(ab,
442 &dp->rx_mon_status_refill_ring[i]);
443
444 return 0;
445}
446
447static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
448 struct dp_rxdma_mon_ring *rx_ring,
449 u32 ringtype)
450{
451 int num_entries;
452
453 num_entries = rx_ring->refill_buf_ring.size /
454 ath12k_hal_srng_get_entrysize(ab, ringtype);
455
456 rx_ring->bufs_max = num_entries;
457
458 if (ringtype == HAL_RXDMA_MONITOR_STATUS)
459 ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
460 num_entries);
461 else
462 ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
463
464 return 0;
465}
466
467static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
468 struct dp_rxdma_ring *rx_ring)
469{
470 LIST_HEAD(list);
471
472 rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
473 ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
474
475 ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
476
477 return 0;
478}
479
480static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
481{
482 struct ath12k_dp *dp = &ab->dp;
483 struct dp_rxdma_mon_ring *mon_ring;
484 int ret, i;
485
486 ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
487 if (ret) {
488 ath12k_warn(ab,
489 "failed to setup HAL_RXDMA_BUF\n");
490 return ret;
491 }
492
493 if (ab->hw_params->rxdma1_enable) {
494 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
495 &dp->rxdma_mon_buf_ring,
496 HAL_RXDMA_MONITOR_BUF);
497 if (ret)
498 ath12k_warn(ab,
499 "failed to setup HAL_RXDMA_MONITOR_BUF\n");
500 return ret;
501 }
502
503 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
504 mon_ring = &dp->rx_mon_status_refill_ring[i];
505 ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
506 HAL_RXDMA_MONITOR_STATUS);
507 if (ret) {
508 ath12k_warn(ab,
509 "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
510 return ret;
511 }
512 }
513
514 return 0;
515}
516
517static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
518{
519 struct ath12k_pdev_dp *dp = &ar->dp;
520 struct ath12k_base *ab = ar->ab;
521 int i;
522
523 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
524 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
525}
526
527void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
528{
529 struct ath12k_dp *dp = &ab->dp;
530 int i;
531
532 for (i = 0; i < DP_REO_DST_RING_MAX; i++)
533 ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
534}
535
536int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
537{
538 struct ath12k_dp *dp = &ab->dp;
539 int ret;
540 int i;
541
542 for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
543 ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
544 HAL_REO_DST, i, 0,
545 DP_REO_DST_RING_SIZE);
546 if (ret) {
547 ath12k_warn(ab, "failed to setup reo_dst_ring\n");
548 goto err_reo_cleanup;
549 }
550 }
551
552 return 0;
553
554err_reo_cleanup:
555 ath12k_dp_rx_pdev_reo_cleanup(ab);
556
557 return ret;
558}
559
560static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
561{
562 struct ath12k_pdev_dp *dp = &ar->dp;
563 struct ath12k_base *ab = ar->ab;
564 int i;
565 int ret;
566 u32 mac_id = dp->mac_id;
567
568 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
569 ret = ath12k_dp_srng_setup(ar->ab,
570 &dp->rxdma_mon_dst_ring[i],
571 HAL_RXDMA_MONITOR_DST,
572 0, mac_id + i,
573 DP_RXDMA_MONITOR_DST_RING_SIZE);
574 if (ret) {
575 ath12k_warn(ar->ab,
576 "failed to setup HAL_RXDMA_MONITOR_DST\n");
577 return ret;
578 }
579 }
580
581 return 0;
582}
583
584void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
585{
586 struct ath12k_dp *dp = &ab->dp;
587 struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
588 struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
589
590 spin_lock_bh(&dp->reo_cmd_lock);
591 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
592 list_del(&cmd->list);
593 dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
594 cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
595 kfree(cmd->data.qbuf.vaddr);
596 kfree(cmd);
597 }
598
599 list_for_each_entry_safe(cmd_cache, tmp_cache,
600 &dp->reo_cmd_cache_flush_list, list) {
601 list_del(&cmd_cache->list);
602 dp->reo_cmd_cache_flush_count--;
603 dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
604 cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
605 kfree(cmd_cache->data.qbuf.vaddr);
606 kfree(cmd_cache);
607 }
608 spin_unlock_bh(&dp->reo_cmd_lock);
609}
610
611static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
612 enum hal_reo_cmd_status status)
613{
614 struct ath12k_dp_rx_tid *rx_tid = ctx;
615
616 if (status != HAL_REO_CMD_SUCCESS)
617 ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
618 rx_tid->tid, status);
619
620 dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
621 DMA_BIDIRECTIONAL);
622 kfree(rx_tid->qbuf.vaddr);
623 rx_tid->qbuf.vaddr = NULL;
624}
625
626static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
627 enum hal_reo_cmd_type type,
628 struct ath12k_hal_reo_cmd *cmd,
629 void (*cb)(struct ath12k_dp *dp, void *ctx,
630 enum hal_reo_cmd_status status))
631{
632 struct ath12k_dp *dp = &ab->dp;
633 struct ath12k_dp_rx_reo_cmd *dp_cmd;
634 struct hal_srng *cmd_ring;
635 int cmd_num;
636
637 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
638 cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
639
640 /* cmd_num should start from 1, during failure return the error code */
641 if (cmd_num < 0)
642 return cmd_num;
643
644 /* reo cmd ring descriptors has cmd_num starting from 1 */
645 if (cmd_num == 0)
646 return -EINVAL;
647
648 if (!cb)
649 return 0;
650
651 /* Can this be optimized so that we keep the pending command list only
652 * for tid delete command to free up the resource on the command status
653 * indication?
654 */
655 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
656
657 if (!dp_cmd)
658 return -ENOMEM;
659
660 memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
661 dp_cmd->cmd_num = cmd_num;
662 dp_cmd->handler = cb;
663
664 spin_lock_bh(&dp->reo_cmd_lock);
665 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
666 spin_unlock_bh(&dp->reo_cmd_lock);
667
668 return 0;
669}
670
671static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
672 struct ath12k_dp_rx_tid *rx_tid)
673{
674 struct ath12k_hal_reo_cmd cmd = {0};
675 unsigned long tot_desc_sz, desc_sz;
676 int ret;
677
678 tot_desc_sz = rx_tid->qbuf.size;
679 desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
680
681 while (tot_desc_sz > desc_sz) {
682 tot_desc_sz -= desc_sz;
683 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
684 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
685 ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
686 HAL_REO_CMD_FLUSH_CACHE, &cmd,
687 NULL);
688 if (ret)
689 ath12k_warn(ab,
690 "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
691 rx_tid->tid, ret);
692 }
693
694 memset(&cmd, 0, sizeof(cmd));
695 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
696 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
697 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
698 ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
699 HAL_REO_CMD_FLUSH_CACHE,
700 &cmd, ath12k_dp_reo_cmd_free);
701 if (ret) {
702 ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
703 rx_tid->tid, ret);
704 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
705 DMA_BIDIRECTIONAL);
706 kfree(rx_tid->qbuf.vaddr);
707 rx_tid->qbuf.vaddr = NULL;
708 }
709}
710
711static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
712 enum hal_reo_cmd_status status)
713{
714 struct ath12k_base *ab = dp->ab;
715 struct ath12k_dp_rx_tid *rx_tid = ctx;
716 struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
717
718 if (status == HAL_REO_CMD_DRAIN) {
719 goto free_desc;
720 } else if (status != HAL_REO_CMD_SUCCESS) {
721 /* Shouldn't happen! Cleanup in case of other failure? */
722 ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
723 rx_tid->tid, status);
724 return;
725 }
726
727 elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
728 if (!elem)
729 goto free_desc;
730
731 elem->ts = jiffies;
732 memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
733
734 spin_lock_bh(&dp->reo_cmd_lock);
735 list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
736 dp->reo_cmd_cache_flush_count++;
737
738 /* Flush and invalidate aged REO desc from HW cache */
739 list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
740 list) {
741 if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
742 time_after(jiffies, elem->ts +
743 msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
744 list_del(&elem->list);
745 dp->reo_cmd_cache_flush_count--;
746
747 /* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
748 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
749 * is used in only two contexts, one is in this function called
750 * from napi and the other in ath12k_dp_free during core destroy.
751 * Before dp_free, the irqs would be disabled and would wait to
752 * synchronize. Hence there wouldn’t be any race against add or
753 * delete to this list. Hence unlock-lock is safe here.
754 */
755 spin_unlock_bh(&dp->reo_cmd_lock);
756
757 ath12k_dp_reo_cache_flush(ab, &elem->data);
758 kfree(elem);
759 spin_lock_bh(&dp->reo_cmd_lock);
760 }
761 }
762 spin_unlock_bh(&dp->reo_cmd_lock);
763
764 return;
765free_desc:
766 dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
767 DMA_BIDIRECTIONAL);
768 kfree(rx_tid->qbuf.vaddr);
769 rx_tid->qbuf.vaddr = NULL;
770}
771
772static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
773 dma_addr_t paddr)
774{
775 struct ath12k_reo_queue_ref *qref;
776 struct ath12k_dp *dp = &ab->dp;
777 bool ml_peer = false;
778
779 if (!ab->hw_params->reoq_lut_support)
780 return;
781
782 if (peer_id & ATH12K_PEER_ML_ID_VALID) {
783 peer_id &= ~ATH12K_PEER_ML_ID_VALID;
784 ml_peer = true;
785 }
786
787 if (ml_peer)
788 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
789 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
790 else
791 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
792 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
793
794 qref->info0 = u32_encode_bits(lower_32_bits(paddr),
795 BUFFER_ADDR_INFO0_ADDR);
796 qref->info1 = u32_encode_bits(upper_32_bits(paddr),
797 BUFFER_ADDR_INFO1_ADDR) |
798 u32_encode_bits(tid, DP_REO_QREF_NUM);
799 ath12k_hal_reo_shared_qaddr_cache_clear(ab);
800}
801
802static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
803{
804 struct ath12k_reo_queue_ref *qref;
805 struct ath12k_dp *dp = &ab->dp;
806 bool ml_peer = false;
807
808 if (!ab->hw_params->reoq_lut_support)
809 return;
810
811 if (peer_id & ATH12K_PEER_ML_ID_VALID) {
812 peer_id &= ~ATH12K_PEER_ML_ID_VALID;
813 ml_peer = true;
814 }
815
816 if (ml_peer)
817 qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
818 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
819 else
820 qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
821 (peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
822
823 qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
824 qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
825 u32_encode_bits(tid, DP_REO_QREF_NUM);
826}
827
828void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
829 struct ath12k_peer *peer, u8 tid)
830{
831 struct ath12k_hal_reo_cmd cmd = {0};
832 struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
833 int ret;
834
835 if (!rx_tid->active)
836 return;
837
838 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
839 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
840 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
841 cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
842 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
843 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
844 ath12k_dp_rx_tid_del_func);
845 if (ret) {
846 ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
847 tid, ret);
848 dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
849 rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
850 kfree(rx_tid->qbuf.vaddr);
851 rx_tid->qbuf.vaddr = NULL;
852 }
853
854 if (peer->mlo)
855 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
856 else
857 ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
858
859 rx_tid->active = false;
860}
861
862int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
863 struct ath12k_buffer_addr *buf_addr_info,
864 enum hal_wbm_rel_bm_act action)
865{
866 struct hal_wbm_release_ring *desc;
867 struct ath12k_dp *dp = &ab->dp;
868 struct hal_srng *srng;
869 int ret = 0;
870
871 srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
872
873 spin_lock_bh(&srng->lock);
874
875 ath12k_hal_srng_access_begin(ab, srng);
876
877 desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
878 if (!desc) {
879 ret = -ENOBUFS;
880 goto exit;
881 }
882
883 ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
884
885exit:
886 ath12k_hal_srng_access_end(ab, srng);
887
888 spin_unlock_bh(&srng->lock);
889
890 return ret;
891}
892
893static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
894 bool rel_link_desc)
895{
896 struct ath12k_buffer_addr *buf_addr_info;
897 struct ath12k_base *ab = rx_tid->ab;
898
899 lockdep_assert_held(&ab->base_lock);
900
901 if (rx_tid->dst_ring_desc) {
902 if (rel_link_desc) {
903 buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
904 ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
905 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
906 }
907 kfree(rx_tid->dst_ring_desc);
908 rx_tid->dst_ring_desc = NULL;
909 }
910
911 rx_tid->cur_sn = 0;
912 rx_tid->last_frag_no = 0;
913 rx_tid->rx_frag_bitmap = 0;
914 __skb_queue_purge(&rx_tid->rx_frags);
915}
916
917void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
918{
919 struct ath12k_dp_rx_tid *rx_tid;
920 int i;
921
922 lockdep_assert_held(&ar->ab->base_lock);
923
924 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
925 rx_tid = &peer->rx_tid[i];
926
927 ath12k_dp_rx_peer_tid_delete(ar, peer, i);
928 ath12k_dp_rx_frags_cleanup(rx_tid, true);
929
930 spin_unlock_bh(&ar->ab->base_lock);
931 timer_delete_sync(&rx_tid->frag_timer);
932 spin_lock_bh(&ar->ab->base_lock);
933 }
934}
935
936static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
937 struct ath12k_peer *peer,
938 struct ath12k_dp_rx_tid *rx_tid,
939 u32 ba_win_sz, u16 ssn,
940 bool update_ssn)
941{
942 struct ath12k_hal_reo_cmd cmd = {0};
943 int ret;
944
945 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
946 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
947 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
948 cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
949 cmd.ba_window_size = ba_win_sz;
950
951 if (update_ssn) {
952 cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
953 cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
954 }
955
956 ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
957 HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
958 NULL);
959 if (ret) {
960 ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
961 rx_tid->tid, ret);
962 return ret;
963 }
964
965 rx_tid->ba_win_sz = ba_win_sz;
966
967 return 0;
968}
969
970static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
971 struct ath12k_sta *ahsta,
972 struct ath12k_dp_rx_tid *rx_tid,
973 u16 ssn, enum hal_pn_type pn_type)
974{
975 u32 ba_win_sz = rx_tid->ba_win_sz;
976 struct ath12k_reoq_buf *buf;
977 void *vaddr, *vaddr_aligned;
978 dma_addr_t paddr_aligned;
979 u8 tid = rx_tid->tid;
980 u32 hw_desc_sz;
981 int ret;
982
983 buf = &ahsta->reoq_bufs[tid];
984 if (!buf->vaddr) {
985 /* TODO: Optimize the memory allocation for qos tid based on
986 * the actual BA window size in REO tid update path.
987 */
988 if (tid == HAL_DESC_REO_NON_QOS_TID)
989 hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
990 else
991 hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
992
993 vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
994 if (!vaddr)
995 return -ENOMEM;
996
997 vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
998
999 ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
1000 ssn, pn_type);
1001
1002 paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
1003 DMA_BIDIRECTIONAL);
1004 ret = dma_mapping_error(ab->dev, paddr_aligned);
1005 if (ret) {
1006 kfree(vaddr);
1007 return ret;
1008 }
1009
1010 buf->vaddr = vaddr;
1011 buf->paddr_aligned = paddr_aligned;
1012 buf->size = hw_desc_sz;
1013 }
1014
1015 rx_tid->qbuf = *buf;
1016 rx_tid->active = true;
1017
1018 return 0;
1019}
1020
1021int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
1022 u8 tid, u32 ba_win_sz, u16 ssn,
1023 enum hal_pn_type pn_type)
1024{
1025 struct ath12k_base *ab = ar->ab;
1026 struct ath12k_dp *dp = &ab->dp;
1027 struct ath12k_peer *peer;
1028 struct ath12k_sta *ahsta;
1029 struct ath12k_dp_rx_tid *rx_tid;
1030 dma_addr_t paddr_aligned;
1031 int ret;
1032
1033 spin_lock_bh(&ab->base_lock);
1034
1035 peer = ath12k_peer_find(ab, vdev_id, peer_mac);
1036 if (!peer) {
1037 spin_unlock_bh(&ab->base_lock);
1038 ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
1039 return -ENOENT;
1040 }
1041
1042 if (ab->hw_params->dp_primary_link_only &&
1043 !peer->primary_link) {
1044 spin_unlock_bh(&ab->base_lock);
1045 return 0;
1046 }
1047
1048 if (ab->hw_params->reoq_lut_support &&
1049 (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
1050 spin_unlock_bh(&ab->base_lock);
1051 ath12k_warn(ab, "reo qref table is not setup\n");
1052 return -EINVAL;
1053 }
1054
1055 if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
1056 ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
1057 peer->peer_id, tid);
1058 spin_unlock_bh(&ab->base_lock);
1059 return -EINVAL;
1060 }
1061
1062 rx_tid = &peer->rx_tid[tid];
1063 /* Update the tid queue if it is already setup */
1064 if (rx_tid->active) {
1065 ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1066 ba_win_sz, ssn, true);
1067 spin_unlock_bh(&ab->base_lock);
1068 if (ret) {
1069 ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
1070 return ret;
1071 }
1072
1073 if (!ab->hw_params->reoq_lut_support) {
1074 paddr_aligned = rx_tid->qbuf.paddr_aligned;
1075 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1076 peer_mac,
1077 paddr_aligned, tid,
1078 1, ba_win_sz);
1079 if (ret) {
1080 ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
1081 tid, ret);
1082 return ret;
1083 }
1084 }
1085
1086 return 0;
1087 }
1088
1089 rx_tid->tid = tid;
1090
1091 rx_tid->ba_win_sz = ba_win_sz;
1092
1093 ahsta = ath12k_sta_to_ahsta(peer->sta);
1094 ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
1095 if (ret) {
1096 spin_unlock_bh(&ab->base_lock);
1097 ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
1098 return ret;
1099 }
1100
1101 paddr_aligned = rx_tid->qbuf.paddr_aligned;
1102 if (ab->hw_params->reoq_lut_support) {
1103 /* Update the REO queue LUT at the corresponding peer id
1104 * and tid with qaddr.
1105 */
1106 if (peer->mlo)
1107 ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid,
1108 paddr_aligned);
1109 else
1110 ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid,
1111 paddr_aligned);
1112
1113 spin_unlock_bh(&ab->base_lock);
1114 } else {
1115 spin_unlock_bh(&ab->base_lock);
1116 ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1117 paddr_aligned, tid, 1,
1118 ba_win_sz);
1119 }
1120
1121 return ret;
1122}
1123
1124int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1125 struct ieee80211_ampdu_params *params,
1126 u8 link_id)
1127{
1128 struct ath12k_base *ab = ar->ab;
1129 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
1130 struct ath12k_link_sta *arsta;
1131 int vdev_id;
1132 int ret;
1133
1134 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1135
1136 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
1137 ahsta->link[link_id]);
1138 if (!arsta)
1139 return -ENOLINK;
1140
1141 vdev_id = arsta->arvif->vdev_id;
1142
1143 ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
1144 params->tid, params->buf_size,
1145 params->ssn, arsta->ahsta->pn_type);
1146 if (ret)
1147 ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1148
1149 return ret;
1150}
1151
1152int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1153 struct ieee80211_ampdu_params *params,
1154 u8 link_id)
1155{
1156 struct ath12k_base *ab = ar->ab;
1157 struct ath12k_peer *peer;
1158 struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
1159 struct ath12k_link_sta *arsta;
1160 int vdev_id;
1161 bool active;
1162 int ret;
1163
1164 lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1165
1166 arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
1167 ahsta->link[link_id]);
1168 if (!arsta)
1169 return -ENOLINK;
1170
1171 vdev_id = arsta->arvif->vdev_id;
1172
1173 spin_lock_bh(&ab->base_lock);
1174
1175 peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
1176 if (!peer) {
1177 spin_unlock_bh(&ab->base_lock);
1178 ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1179 return -ENOENT;
1180 }
1181
1182 active = peer->rx_tid[params->tid].active;
1183
1184 if (!active) {
1185 spin_unlock_bh(&ab->base_lock);
1186 return 0;
1187 }
1188
1189 ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1190 spin_unlock_bh(&ab->base_lock);
1191 if (ret) {
1192 ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1193 params->tid, ret);
1194 return ret;
1195 }
1196
1197 return ret;
1198}
1199
1200int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
1201 const u8 *peer_addr,
1202 enum set_key_cmd key_cmd,
1203 struct ieee80211_key_conf *key)
1204{
1205 struct ath12k *ar = arvif->ar;
1206 struct ath12k_base *ab = ar->ab;
1207 struct ath12k_hal_reo_cmd cmd = {0};
1208 struct ath12k_peer *peer;
1209 struct ath12k_dp_rx_tid *rx_tid;
1210 u8 tid;
1211 int ret = 0;
1212
1213 /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1214 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1215 * for now.
1216 */
1217 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1218 return 0;
1219
1220 cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1221 cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1222 HAL_REO_CMD_UPD0_PN_SIZE |
1223 HAL_REO_CMD_UPD0_PN_VALID |
1224 HAL_REO_CMD_UPD0_PN_CHECK |
1225 HAL_REO_CMD_UPD0_SVLD;
1226
1227 switch (key->cipher) {
1228 case WLAN_CIPHER_SUITE_TKIP:
1229 case WLAN_CIPHER_SUITE_CCMP:
1230 case WLAN_CIPHER_SUITE_CCMP_256:
1231 case WLAN_CIPHER_SUITE_GCMP:
1232 case WLAN_CIPHER_SUITE_GCMP_256:
1233 if (key_cmd == SET_KEY) {
1234 cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1235 cmd.pn_size = 48;
1236 }
1237 break;
1238 default:
1239 break;
1240 }
1241
1242 spin_lock_bh(&ab->base_lock);
1243
1244 peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1245 if (!peer) {
1246 spin_unlock_bh(&ab->base_lock);
1247 ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1248 peer_addr);
1249 return -ENOENT;
1250 }
1251
1252 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1253 rx_tid = &peer->rx_tid[tid];
1254 if (!rx_tid->active)
1255 continue;
1256 cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
1257 cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
1258 ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1259 HAL_REO_CMD_UPDATE_RX_QUEUE,
1260 &cmd, NULL);
1261 if (ret) {
1262 ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1263 tid, peer_addr, ret);
1264 break;
1265 }
1266 }
1267
1268 spin_unlock_bh(&ab->base_lock);
1269
1270 return ret;
1271}
1272
1273static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1274 u16 peer_id)
1275{
1276 int i;
1277
1278 for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1279 if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1280 if (peer_id == ppdu_stats->user_stats[i].peer_id)
1281 return i;
1282 } else {
1283 return i;
1284 }
1285 }
1286
1287 return -EINVAL;
1288}
1289
1290static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1291 u16 tag, u16 len, const void *ptr,
1292 void *data)
1293{
1294 const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1295 const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1296 const struct htt_ppdu_stats_user_rate *user_rate;
1297 struct htt_ppdu_stats_info *ppdu_info;
1298 struct htt_ppdu_user_stats *user_stats;
1299 int cur_user;
1300 u16 peer_id;
1301
1302 ppdu_info = data;
1303
1304 switch (tag) {
1305 case HTT_PPDU_STATS_TAG_COMMON:
1306 if (len < sizeof(struct htt_ppdu_stats_common)) {
1307 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1308 len, tag);
1309 return -EINVAL;
1310 }
1311 memcpy(&ppdu_info->ppdu_stats.common, ptr,
1312 sizeof(struct htt_ppdu_stats_common));
1313 break;
1314 case HTT_PPDU_STATS_TAG_USR_RATE:
1315 if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1316 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1317 len, tag);
1318 return -EINVAL;
1319 }
1320 user_rate = ptr;
1321 peer_id = le16_to_cpu(user_rate->sw_peer_id);
1322 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1323 peer_id);
1324 if (cur_user < 0)
1325 return -EINVAL;
1326 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1327 user_stats->peer_id = peer_id;
1328 user_stats->is_valid_peer_id = true;
1329 memcpy(&user_stats->rate, ptr,
1330 sizeof(struct htt_ppdu_stats_user_rate));
1331 user_stats->tlv_flags |= BIT(tag);
1332 break;
1333 case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1334 if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1335 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1336 len, tag);
1337 return -EINVAL;
1338 }
1339
1340 cmplt_cmn = ptr;
1341 peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1342 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1343 peer_id);
1344 if (cur_user < 0)
1345 return -EINVAL;
1346 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1347 user_stats->peer_id = peer_id;
1348 user_stats->is_valid_peer_id = true;
1349 memcpy(&user_stats->cmpltn_cmn, ptr,
1350 sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1351 user_stats->tlv_flags |= BIT(tag);
1352 break;
1353 case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1354 if (len <
1355 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1356 ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1357 len, tag);
1358 return -EINVAL;
1359 }
1360
1361 ba_status = ptr;
1362 peer_id = le16_to_cpu(ba_status->sw_peer_id);
1363 cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1364 peer_id);
1365 if (cur_user < 0)
1366 return -EINVAL;
1367 user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1368 user_stats->peer_id = peer_id;
1369 user_stats->is_valid_peer_id = true;
1370 memcpy(&user_stats->ack_ba, ptr,
1371 sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1372 user_stats->tlv_flags |= BIT(tag);
1373 break;
1374 }
1375 return 0;
1376}
1377
1378int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1379 int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1380 const void *ptr, void *data),
1381 void *data)
1382{
1383 const struct htt_tlv *tlv;
1384 const void *begin = ptr;
1385 u16 tlv_tag, tlv_len;
1386 int ret = -EINVAL;
1387
1388 while (len > 0) {
1389 if (len < sizeof(*tlv)) {
1390 ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1391 ptr - begin, len, sizeof(*tlv));
1392 return -EINVAL;
1393 }
1394 tlv = (struct htt_tlv *)ptr;
1395 tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1396 tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1397 ptr += sizeof(*tlv);
1398 len -= sizeof(*tlv);
1399
1400 if (tlv_len > len) {
1401 ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1402 tlv_tag, ptr - begin, len, tlv_len);
1403 return -EINVAL;
1404 }
1405 ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1406 if (ret == -ENOMEM)
1407 return ret;
1408
1409 ptr += tlv_len;
1410 len -= tlv_len;
1411 }
1412 return 0;
1413}
1414
1415static void
1416ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1417 struct htt_ppdu_stats *ppdu_stats, u8 user)
1418{
1419 struct ath12k_base *ab = ar->ab;
1420 struct ath12k_peer *peer;
1421 struct ieee80211_sta *sta;
1422 struct ath12k_sta *ahsta;
1423 struct ath12k_link_sta *arsta;
1424 struct htt_ppdu_stats_user_rate *user_rate;
1425 struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1426 struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1427 struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1428 int ret;
1429 u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1430 u32 v, succ_bytes = 0;
1431 u16 tones, rate = 0, succ_pkts = 0;
1432 u32 tx_duration = 0;
1433 u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1434 bool is_ampdu = false;
1435
1436 if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1437 return;
1438
1439 if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1440 is_ampdu =
1441 HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1442
1443 if (usr_stats->tlv_flags &
1444 BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1445 succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1446 succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1447 HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1448 tid = le32_get_bits(usr_stats->ack_ba.info,
1449 HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1450 }
1451
1452 if (common->fes_duration_us)
1453 tx_duration = le32_to_cpu(common->fes_duration_us);
1454
1455 user_rate = &usr_stats->rate;
1456 flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1457 bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1458 nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1459 mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1460 sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1461 dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1462
1463 /* Note: If host configured fixed rates and in some other special
1464 * cases, the broadcast/management frames are sent in different rates.
1465 * Firmware rate's control to be skipped for this?
1466 */
1467
1468 if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1469 ath12k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
1470 return;
1471 }
1472
1473 if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1474 ath12k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
1475 return;
1476 }
1477
1478 if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1479 ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1480 mcs, nss);
1481 return;
1482 }
1483
1484 if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1485 ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1486 flags,
1487 &rate_idx,
1488 &rate);
1489 if (ret < 0)
1490 return;
1491 }
1492
1493 rcu_read_lock();
1494 spin_lock_bh(&ab->base_lock);
1495 peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1496
1497 if (!peer || !peer->sta) {
1498 spin_unlock_bh(&ab->base_lock);
1499 rcu_read_unlock();
1500 return;
1501 }
1502
1503 sta = peer->sta;
1504 ahsta = ath12k_sta_to_ahsta(sta);
1505 arsta = &ahsta->deflink;
1506
1507 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1508
1509 switch (flags) {
1510 case WMI_RATE_PREAMBLE_OFDM:
1511 arsta->txrate.legacy = rate;
1512 break;
1513 case WMI_RATE_PREAMBLE_CCK:
1514 arsta->txrate.legacy = rate;
1515 break;
1516 case WMI_RATE_PREAMBLE_HT:
1517 arsta->txrate.mcs = mcs + 8 * (nss - 1);
1518 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1519 if (sgi)
1520 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1521 break;
1522 case WMI_RATE_PREAMBLE_VHT:
1523 arsta->txrate.mcs = mcs;
1524 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1525 if (sgi)
1526 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1527 break;
1528 case WMI_RATE_PREAMBLE_HE:
1529 arsta->txrate.mcs = mcs;
1530 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1531 arsta->txrate.he_dcm = dcm;
1532 arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1533 tones = le16_to_cpu(user_rate->ru_end) -
1534 le16_to_cpu(user_rate->ru_start) + 1;
1535 v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1536 arsta->txrate.he_ru_alloc = v;
1537 break;
1538 }
1539
1540 arsta->txrate.nss = nss;
1541 arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1542 arsta->tx_duration += tx_duration;
1543 memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1544
1545 /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1546 * So skip peer stats update for mgmt packets.
1547 */
1548 if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1549 memset(peer_stats, 0, sizeof(*peer_stats));
1550 peer_stats->succ_pkts = succ_pkts;
1551 peer_stats->succ_bytes = succ_bytes;
1552 peer_stats->is_ampdu = is_ampdu;
1553 peer_stats->duration = tx_duration;
1554 peer_stats->ba_fails =
1555 HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1556 HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1557 }
1558
1559 spin_unlock_bh(&ab->base_lock);
1560 rcu_read_unlock();
1561}
1562
1563static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1564 struct htt_ppdu_stats *ppdu_stats)
1565{
1566 u8 user;
1567
1568 for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1569 ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1570}
1571
1572static
1573struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1574 u32 ppdu_id)
1575{
1576 struct htt_ppdu_stats_info *ppdu_info;
1577
1578 lockdep_assert_held(&ar->data_lock);
1579 if (!list_empty(&ar->ppdu_stats_info)) {
1580 list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1581 if (ppdu_info->ppdu_id == ppdu_id)
1582 return ppdu_info;
1583 }
1584
1585 if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1586 ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1587 typeof(*ppdu_info), list);
1588 list_del(&ppdu_info->list);
1589 ar->ppdu_stat_list_depth--;
1590 ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1591 kfree(ppdu_info);
1592 }
1593 }
1594
1595 ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1596 if (!ppdu_info)
1597 return NULL;
1598
1599 list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1600 ar->ppdu_stat_list_depth++;
1601
1602 return ppdu_info;
1603}
1604
1605static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1606 struct htt_ppdu_user_stats *usr_stats)
1607{
1608 peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1609 peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1610 peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1611 peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1612 peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1613 peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1614 peer->ppdu_stats_delayba.resp_rate_flags =
1615 le32_to_cpu(usr_stats->rate.resp_rate_flags);
1616
1617 peer->delayba_flag = true;
1618}
1619
1620static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1621 struct htt_ppdu_user_stats *usr_stats)
1622{
1623 usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1624 usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1625 usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1626 usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1627 usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1628 usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1629 usr_stats->rate.resp_rate_flags =
1630 cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1631
1632 peer->delayba_flag = false;
1633}
1634
1635static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1636 struct sk_buff *skb)
1637{
1638 struct ath12k_htt_ppdu_stats_msg *msg;
1639 struct htt_ppdu_stats_info *ppdu_info;
1640 struct ath12k_peer *peer = NULL;
1641 struct htt_ppdu_user_stats *usr_stats = NULL;
1642 u32 peer_id = 0;
1643 struct ath12k *ar;
1644 int ret, i;
1645 u8 pdev_id;
1646 u32 ppdu_id, len;
1647
1648 msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1649 len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1650 if (len > (skb->len - struct_size(msg, data, 0))) {
1651 ath12k_warn(ab,
1652 "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
1653 len, skb->len);
1654 return -EINVAL;
1655 }
1656
1657 pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1658 ppdu_id = le32_to_cpu(msg->ppdu_id);
1659
1660 rcu_read_lock();
1661 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1662 if (!ar) {
1663 ret = -EINVAL;
1664 goto exit;
1665 }
1666
1667 spin_lock_bh(&ar->data_lock);
1668 ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1669 if (!ppdu_info) {
1670 spin_unlock_bh(&ar->data_lock);
1671 ret = -EINVAL;
1672 goto exit;
1673 }
1674
1675 ppdu_info->ppdu_id = ppdu_id;
1676 ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1677 ath12k_htt_tlv_ppdu_stats_parse,
1678 (void *)ppdu_info);
1679 if (ret) {
1680 spin_unlock_bh(&ar->data_lock);
1681 ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1682 goto exit;
1683 }
1684
1685 if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
1686 spin_unlock_bh(&ar->data_lock);
1687 ath12k_warn(ab,
1688 "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
1689 ppdu_info->ppdu_stats.common.num_users,
1690 HTT_PPDU_STATS_MAX_USERS);
1691 ret = -EINVAL;
1692 goto exit;
1693 }
1694
1695 /* back up data rate tlv for all peers */
1696 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1697 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1698 ppdu_info->delay_ba) {
1699 for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1700 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1701 spin_lock_bh(&ab->base_lock);
1702 peer = ath12k_peer_find_by_id(ab, peer_id);
1703 if (!peer) {
1704 spin_unlock_bh(&ab->base_lock);
1705 continue;
1706 }
1707
1708 usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1709 if (usr_stats->delay_ba)
1710 ath12k_copy_to_delay_stats(peer, usr_stats);
1711 spin_unlock_bh(&ab->base_lock);
1712 }
1713 }
1714
1715 /* restore all peers' data rate tlv to mu-bar tlv */
1716 if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1717 (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1718 for (i = 0; i < ppdu_info->bar_num_users; i++) {
1719 peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1720 spin_lock_bh(&ab->base_lock);
1721 peer = ath12k_peer_find_by_id(ab, peer_id);
1722 if (!peer) {
1723 spin_unlock_bh(&ab->base_lock);
1724 continue;
1725 }
1726
1727 usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1728 if (peer->delayba_flag)
1729 ath12k_copy_to_bar(peer, usr_stats);
1730 spin_unlock_bh(&ab->base_lock);
1731 }
1732 }
1733
1734 spin_unlock_bh(&ar->data_lock);
1735
1736exit:
1737 rcu_read_unlock();
1738
1739 return ret;
1740}
1741
1742static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1743 struct sk_buff *skb)
1744{
1745 struct ath12k_htt_mlo_offset_msg *msg;
1746 struct ath12k_pdev *pdev;
1747 struct ath12k *ar;
1748 u8 pdev_id;
1749
1750 msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1751 pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1752 HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1753
1754 rcu_read_lock();
1755 ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1756 if (!ar) {
1757 /* It is possible that the ar is not yet active (started).
1758 * The above function will only look for the active pdev
1759 * and hence %NULL return is possible. Just silently
1760 * discard this message
1761 */
1762 goto exit;
1763 }
1764
1765 spin_lock_bh(&ar->data_lock);
1766 pdev = ar->pdev;
1767
1768 pdev->timestamp.info = __le32_to_cpu(msg->info);
1769 pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1770 pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1771 pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1772 pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1773 pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1774 pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1775 pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1776
1777 spin_unlock_bh(&ar->data_lock);
1778exit:
1779 rcu_read_unlock();
1780}
1781
1782void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1783 struct sk_buff *skb)
1784{
1785 struct ath12k_dp *dp = &ab->dp;
1786 struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1787 enum htt_t2h_msg_type type;
1788 u16 peer_id;
1789 u8 vdev_id;
1790 u8 mac_addr[ETH_ALEN];
1791 u16 peer_mac_h16;
1792 u16 ast_hash = 0;
1793 u16 hw_peer_id;
1794
1795 type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1796
1797 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1798
1799 switch (type) {
1800 case HTT_T2H_MSG_TYPE_VERSION_CONF:
1801 dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1802 HTT_T2H_VERSION_CONF_MAJOR);
1803 dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1804 HTT_T2H_VERSION_CONF_MINOR);
1805 complete(&dp->htt_tgt_version_received);
1806 break;
1807 /* TODO: remove unused peer map versions after testing */
1808 case HTT_T2H_MSG_TYPE_PEER_MAP:
1809 vdev_id = le32_get_bits(resp->peer_map_ev.info,
1810 HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1811 peer_id = le32_get_bits(resp->peer_map_ev.info,
1812 HTT_T2H_PEER_MAP_INFO_PEER_ID);
1813 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1814 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1815 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1816 peer_mac_h16, mac_addr);
1817 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1818 break;
1819 case HTT_T2H_MSG_TYPE_PEER_MAP2:
1820 vdev_id = le32_get_bits(resp->peer_map_ev.info,
1821 HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1822 peer_id = le32_get_bits(resp->peer_map_ev.info,
1823 HTT_T2H_PEER_MAP_INFO_PEER_ID);
1824 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1825 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1826 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1827 peer_mac_h16, mac_addr);
1828 ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1829 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1830 hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1831 HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1832 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1833 hw_peer_id);
1834 break;
1835 case HTT_T2H_MSG_TYPE_PEER_MAP3:
1836 vdev_id = le32_get_bits(resp->peer_map_ev.info,
1837 HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1838 peer_id = le32_get_bits(resp->peer_map_ev.info,
1839 HTT_T2H_PEER_MAP_INFO_PEER_ID);
1840 peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1841 HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1842 ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1843 peer_mac_h16, mac_addr);
1844 ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1845 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
1846 hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
1847 HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
1848 ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1849 hw_peer_id);
1850 break;
1851 case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1852 case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1853 peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1854 HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1855 ath12k_peer_unmap_event(ab, peer_id);
1856 break;
1857 case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1858 ath12k_htt_pull_ppdu_stats(ab, skb);
1859 break;
1860 case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1861 ath12k_debugfs_htt_ext_stats_handler(ab, skb);
1862 break;
1863 case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1864 ath12k_htt_mlo_offset_event_handler(ab, skb);
1865 break;
1866 default:
1867 ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1868 type);
1869 break;
1870 }
1871
1872 dev_kfree_skb_any(skb);
1873}
1874
1875static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1876 struct sk_buff_head *msdu_list,
1877 struct sk_buff *first, struct sk_buff *last,
1878 u8 l3pad_bytes, int msdu_len)
1879{
1880 struct ath12k_base *ab = ar->ab;
1881 struct sk_buff *skb;
1882 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1883 int buf_first_hdr_len, buf_first_len;
1884 struct hal_rx_desc *ldesc;
1885 int space_extra, rem_len, buf_len;
1886 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
1887 bool is_continuation;
1888
1889 /* As the msdu is spread across multiple rx buffers,
1890 * find the offset to the start of msdu for computing
1891 * the length of the msdu in the first buffer.
1892 */
1893 buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1894 buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1895
1896 if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1897 skb_put(first, buf_first_hdr_len + msdu_len);
1898 skb_pull(first, buf_first_hdr_len);
1899 return 0;
1900 }
1901
1902 ldesc = (struct hal_rx_desc *)last->data;
1903 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1904 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1905
1906 /* MSDU spans over multiple buffers because the length of the MSDU
1907 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1908 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1909 */
1910 skb_put(first, DP_RX_BUFFER_SIZE);
1911 skb_pull(first, buf_first_hdr_len);
1912
1913 /* When an MSDU spread over multiple buffers MSDU_END
1914 * tlvs are valid only in the last buffer. Copy those tlvs.
1915 */
1916 ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1917
1918 space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1919 if (space_extra > 0 &&
1920 (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1921 /* Free up all buffers of the MSDU */
1922 while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1923 rxcb = ATH12K_SKB_RXCB(skb);
1924 if (!rxcb->is_continuation) {
1925 dev_kfree_skb_any(skb);
1926 break;
1927 }
1928 dev_kfree_skb_any(skb);
1929 }
1930 return -ENOMEM;
1931 }
1932
1933 rem_len = msdu_len - buf_first_len;
1934 while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1935 rxcb = ATH12K_SKB_RXCB(skb);
1936 is_continuation = rxcb->is_continuation;
1937 if (is_continuation)
1938 buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1939 else
1940 buf_len = rem_len;
1941
1942 if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1943 WARN_ON_ONCE(1);
1944 dev_kfree_skb_any(skb);
1945 return -EINVAL;
1946 }
1947
1948 skb_put(skb, buf_len + hal_rx_desc_sz);
1949 skb_pull(skb, hal_rx_desc_sz);
1950 skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1951 buf_len);
1952 dev_kfree_skb_any(skb);
1953
1954 rem_len -= buf_len;
1955 if (!is_continuation)
1956 break;
1957 }
1958
1959 return 0;
1960}
1961
1962static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1963 struct sk_buff *first)
1964{
1965 struct sk_buff *skb;
1966 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1967
1968 if (!rxcb->is_continuation)
1969 return first;
1970
1971 skb_queue_walk(msdu_list, skb) {
1972 rxcb = ATH12K_SKB_RXCB(skb);
1973 if (!rxcb->is_continuation)
1974 return skb;
1975 }
1976
1977 return NULL;
1978}
1979
1980static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
1981 struct ath12k_dp_rx_info *rx_info)
1982{
1983 msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
1984 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1985}
1986
1987int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
1988{
1989 switch (enctype) {
1990 case HAL_ENCRYPT_TYPE_OPEN:
1991 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1992 case HAL_ENCRYPT_TYPE_TKIP_MIC:
1993 return 0;
1994 case HAL_ENCRYPT_TYPE_CCMP_128:
1995 return IEEE80211_CCMP_MIC_LEN;
1996 case HAL_ENCRYPT_TYPE_CCMP_256:
1997 return IEEE80211_CCMP_256_MIC_LEN;
1998 case HAL_ENCRYPT_TYPE_GCMP_128:
1999 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
2000 return IEEE80211_GCMP_MIC_LEN;
2001 case HAL_ENCRYPT_TYPE_WEP_40:
2002 case HAL_ENCRYPT_TYPE_WEP_104:
2003 case HAL_ENCRYPT_TYPE_WEP_128:
2004 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
2005 case HAL_ENCRYPT_TYPE_WAPI:
2006 break;
2007 }
2008
2009 ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
2010 return 0;
2011}
2012
2013static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
2014 enum hal_encrypt_type enctype)
2015{
2016 switch (enctype) {
2017 case HAL_ENCRYPT_TYPE_OPEN:
2018 return 0;
2019 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
2020 case HAL_ENCRYPT_TYPE_TKIP_MIC:
2021 return IEEE80211_TKIP_IV_LEN;
2022 case HAL_ENCRYPT_TYPE_CCMP_128:
2023 return IEEE80211_CCMP_HDR_LEN;
2024 case HAL_ENCRYPT_TYPE_CCMP_256:
2025 return IEEE80211_CCMP_256_HDR_LEN;
2026 case HAL_ENCRYPT_TYPE_GCMP_128:
2027 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
2028 return IEEE80211_GCMP_HDR_LEN;
2029 case HAL_ENCRYPT_TYPE_WEP_40:
2030 case HAL_ENCRYPT_TYPE_WEP_104:
2031 case HAL_ENCRYPT_TYPE_WEP_128:
2032 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
2033 case HAL_ENCRYPT_TYPE_WAPI:
2034 break;
2035 }
2036
2037 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
2038 return 0;
2039}
2040
2041static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
2042 enum hal_encrypt_type enctype)
2043{
2044 switch (enctype) {
2045 case HAL_ENCRYPT_TYPE_OPEN:
2046 case HAL_ENCRYPT_TYPE_CCMP_128:
2047 case HAL_ENCRYPT_TYPE_CCMP_256:
2048 case HAL_ENCRYPT_TYPE_GCMP_128:
2049 case HAL_ENCRYPT_TYPE_AES_GCMP_256:
2050 return 0;
2051 case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
2052 case HAL_ENCRYPT_TYPE_TKIP_MIC:
2053 return IEEE80211_TKIP_ICV_LEN;
2054 case HAL_ENCRYPT_TYPE_WEP_40:
2055 case HAL_ENCRYPT_TYPE_WEP_104:
2056 case HAL_ENCRYPT_TYPE_WEP_128:
2057 case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
2058 case HAL_ENCRYPT_TYPE_WAPI:
2059 break;
2060 }
2061
2062 ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
2063 return 0;
2064}
2065
2066static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
2067 struct sk_buff *msdu,
2068 enum hal_encrypt_type enctype,
2069 struct ieee80211_rx_status *status)
2070{
2071 struct ath12k_base *ab = ar->ab;
2072 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2073 u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
2074 struct ieee80211_hdr *hdr;
2075 size_t hdr_len;
2076 u8 *crypto_hdr;
2077 u16 qos_ctl;
2078
2079 /* pull decapped header */
2080 hdr = (struct ieee80211_hdr *)msdu->data;
2081 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2082 skb_pull(msdu, hdr_len);
2083
2084 /* Rebuild qos header */
2085 hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2086
2087 /* Reset the order bit as the HT_Control header is stripped */
2088 hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2089
2090 qos_ctl = rxcb->tid;
2091
2092 if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
2093 qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2094
2095 /* TODO: Add other QoS ctl fields when required */
2096
2097 /* copy decap header before overwriting for reuse below */
2098 memcpy(decap_hdr, hdr, hdr_len);
2099
2100 /* Rebuild crypto header for mac80211 use */
2101 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2102 crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
2103 ath12k_dp_rx_desc_get_crypto_header(ar->ab,
2104 rxcb->rx_desc, crypto_hdr,
2105 enctype);
2106 }
2107
2108 memcpy(skb_push(msdu,
2109 IEEE80211_QOS_CTL_LEN), &qos_ctl,
2110 IEEE80211_QOS_CTL_LEN);
2111 memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2112}
2113
2114static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
2115 enum hal_encrypt_type enctype,
2116 struct ieee80211_rx_status *status,
2117 bool decrypted)
2118{
2119 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2120 struct ieee80211_hdr *hdr;
2121 size_t hdr_len;
2122 size_t crypto_len;
2123
2124 if (!rxcb->is_first_msdu ||
2125 !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2126 WARN_ON_ONCE(1);
2127 return;
2128 }
2129
2130 skb_trim(msdu, msdu->len - FCS_LEN);
2131
2132 if (!decrypted)
2133 return;
2134
2135 hdr = (void *)msdu->data;
2136
2137 /* Tail */
2138 if (status->flag & RX_FLAG_IV_STRIPPED) {
2139 skb_trim(msdu, msdu->len -
2140 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2141
2142 skb_trim(msdu, msdu->len -
2143 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2144 } else {
2145 /* MIC */
2146 if (status->flag & RX_FLAG_MIC_STRIPPED)
2147 skb_trim(msdu, msdu->len -
2148 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2149
2150 /* ICV */
2151 if (status->flag & RX_FLAG_ICV_STRIPPED)
2152 skb_trim(msdu, msdu->len -
2153 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2154 }
2155
2156 /* MMIC */
2157 if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2158 !ieee80211_has_morefrags(hdr->frame_control) &&
2159 enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2160 skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2161
2162 /* Head */
2163 if (status->flag & RX_FLAG_IV_STRIPPED) {
2164 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2165 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2166
2167 memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2168 skb_pull(msdu, crypto_len);
2169 }
2170}
2171
2172static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2173 struct sk_buff *msdu,
2174 struct ath12k_skb_rxcb *rxcb,
2175 struct ieee80211_rx_status *status,
2176 enum hal_encrypt_type enctype)
2177{
2178 struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2179 struct ath12k_base *ab = ar->ab;
2180 size_t hdr_len, crypto_len;
2181 struct ieee80211_hdr hdr;
2182 __le16 qos_ctl;
2183 u8 *crypto_hdr, mesh_ctrl;
2184
2185 ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
2186 hdr_len = ieee80211_hdrlen(hdr.frame_control);
2187 mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
2188
2189 if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2190 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2191 crypto_hdr = skb_push(msdu, crypto_len);
2192 ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2193 }
2194
2195 skb_push(msdu, hdr_len);
2196 memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
2197
2198 if (rxcb->is_mcbc)
2199 status->flag &= ~RX_FLAG_PN_VALIDATED;
2200
2201 /* Add QOS header */
2202 if (ieee80211_is_data_qos(hdr.frame_control)) {
2203 struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
2204
2205 qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
2206 if (mesh_ctrl)
2207 qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
2208
2209 memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
2210 }
2211}
2212
2213static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2214 struct sk_buff *msdu,
2215 enum hal_encrypt_type enctype,
2216 struct ieee80211_rx_status *status)
2217{
2218 struct ieee80211_hdr *hdr;
2219 struct ethhdr *eth;
2220 u8 da[ETH_ALEN];
2221 u8 sa[ETH_ALEN];
2222 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2223 struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2224
2225 eth = (struct ethhdr *)msdu->data;
2226 ether_addr_copy(da, eth->h_dest);
2227 ether_addr_copy(sa, eth->h_source);
2228 rfc.snap_type = eth->h_proto;
2229 skb_pull(msdu, sizeof(*eth));
2230 memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2231 sizeof(rfc));
2232 ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2233
2234 /* original 802.11 header has a different DA and in
2235 * case of 4addr it may also have different SA
2236 */
2237 hdr = (struct ieee80211_hdr *)msdu->data;
2238 ether_addr_copy(ieee80211_get_DA(hdr), da);
2239 ether_addr_copy(ieee80211_get_SA(hdr), sa);
2240}
2241
2242static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2243 struct hal_rx_desc *rx_desc,
2244 enum hal_encrypt_type enctype,
2245 struct ieee80211_rx_status *status,
2246 bool decrypted)
2247{
2248 struct ath12k_base *ab = ar->ab;
2249 u8 decap;
2250 struct ethhdr *ehdr;
2251
2252 decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2253
2254 switch (decap) {
2255 case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2256 ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2257 break;
2258 case DP_RX_DECAP_TYPE_RAW:
2259 ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2260 decrypted);
2261 break;
2262 case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2263 ehdr = (struct ethhdr *)msdu->data;
2264
2265 /* mac80211 allows fast path only for authorized STA */
2266 if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2267 ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2268 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2269 break;
2270 }
2271
2272 /* PN for mcast packets will be validated in mac80211;
2273 * remove eth header and add 802.11 header.
2274 */
2275 if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2276 ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2277 break;
2278 case DP_RX_DECAP_TYPE_8023:
2279 /* TODO: Handle undecap for these formats */
2280 break;
2281 }
2282}
2283
2284struct ath12k_peer *
2285ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
2286 struct ath12k_dp_rx_info *rx_info)
2287{
2288 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2289 struct ath12k_peer *peer = NULL;
2290
2291 lockdep_assert_held(&ab->base_lock);
2292
2293 if (rxcb->peer_id)
2294 peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2295
2296 if (peer)
2297 return peer;
2298
2299 if (rx_info->addr2_present)
2300 peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
2301
2302 return peer;
2303}
2304
2305static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2306 struct sk_buff *msdu,
2307 struct hal_rx_desc *rx_desc,
2308 struct ath12k_dp_rx_info *rx_info)
2309{
2310 struct ath12k_base *ab = ar->ab;
2311 struct ath12k_skb_rxcb *rxcb;
2312 enum hal_encrypt_type enctype;
2313 bool is_decrypted = false;
2314 struct ieee80211_hdr *hdr;
2315 struct ath12k_peer *peer;
2316 struct ieee80211_rx_status *rx_status = rx_info->rx_status;
2317 u32 err_bitmap;
2318
2319 /* PN for multicast packets will be checked in mac80211 */
2320 rxcb = ATH12K_SKB_RXCB(msdu);
2321 rxcb->is_mcbc = rx_info->is_mcbc;
2322
2323 if (rxcb->is_mcbc)
2324 rxcb->peer_id = rx_info->peer_id;
2325
2326 spin_lock_bh(&ar->ab->base_lock);
2327 peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
2328 if (peer) {
2329 /* resetting mcbc bit because mcbc packets are unicast
2330 * packets only for AP as STA sends unicast packets.
2331 */
2332 rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
2333
2334 if (rxcb->is_mcbc)
2335 enctype = peer->sec_type_grp;
2336 else
2337 enctype = peer->sec_type;
2338 } else {
2339 enctype = HAL_ENCRYPT_TYPE_OPEN;
2340 }
2341 spin_unlock_bh(&ar->ab->base_lock);
2342
2343 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2344 if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2345 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2346
2347 /* Clear per-MPDU flags while leaving per-PPDU flags intact */
2348 rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2349 RX_FLAG_MMIC_ERROR |
2350 RX_FLAG_DECRYPTED |
2351 RX_FLAG_IV_STRIPPED |
2352 RX_FLAG_MMIC_STRIPPED);
2353
2354 if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2355 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2356 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2357 rx_status->flag |= RX_FLAG_MMIC_ERROR;
2358
2359 if (is_decrypted) {
2360 rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2361
2362 if (rx_info->is_mcbc)
2363 rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2364 RX_FLAG_ICV_STRIPPED;
2365 else
2366 rx_status->flag |= RX_FLAG_IV_STRIPPED |
2367 RX_FLAG_PN_VALIDATED;
2368 }
2369
2370 ath12k_dp_rx_h_csum_offload(msdu, rx_info);
2371 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2372 enctype, rx_status, is_decrypted);
2373
2374 if (!is_decrypted || rx_info->is_mcbc)
2375 return;
2376
2377 if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2378 hdr = (void *)msdu->data;
2379 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2380 }
2381}
2382
2383static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
2384{
2385 struct ieee80211_supported_band *sband;
2386 struct ieee80211_rx_status *rx_status = rx_info->rx_status;
2387 enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
2388 u8 bw = rx_info->bw, sgi = rx_info->sgi;
2389 u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
2390 bool is_cck;
2391
2392 switch (pkt_type) {
2393 case RX_MSDU_START_PKT_TYPE_11A:
2394 case RX_MSDU_START_PKT_TYPE_11B:
2395 is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2396 sband = &ar->mac.sbands[rx_status->band];
2397 rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2398 is_cck);
2399 break;
2400 case RX_MSDU_START_PKT_TYPE_11N:
2401 rx_status->encoding = RX_ENC_HT;
2402 if (rate_mcs > ATH12K_HT_MCS_MAX) {
2403 ath12k_warn(ar->ab,
2404 "Received with invalid mcs in HT mode %d\n",
2405 rate_mcs);
2406 break;
2407 }
2408 rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2409 if (sgi)
2410 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2411 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2412 break;
2413 case RX_MSDU_START_PKT_TYPE_11AC:
2414 rx_status->encoding = RX_ENC_VHT;
2415 rx_status->rate_idx = rate_mcs;
2416 if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2417 ath12k_warn(ar->ab,
2418 "Received with invalid mcs in VHT mode %d\n",
2419 rate_mcs);
2420 break;
2421 }
2422 rx_status->nss = nss;
2423 if (sgi)
2424 rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2425 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2426 break;
2427 case RX_MSDU_START_PKT_TYPE_11AX:
2428 rx_status->rate_idx = rate_mcs;
2429 if (rate_mcs > ATH12K_HE_MCS_MAX) {
2430 ath12k_warn(ar->ab,
2431 "Received with invalid mcs in HE mode %d\n",
2432 rate_mcs);
2433 break;
2434 }
2435 rx_status->encoding = RX_ENC_HE;
2436 rx_status->nss = nss;
2437 rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2438 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2439 break;
2440 case RX_MSDU_START_PKT_TYPE_11BE:
2441 rx_status->rate_idx = rate_mcs;
2442
2443 if (rate_mcs > ATH12K_EHT_MCS_MAX) {
2444 ath12k_warn(ar->ab,
2445 "Received with invalid mcs in EHT mode %d\n",
2446 rate_mcs);
2447 break;
2448 }
2449
2450 rx_status->encoding = RX_ENC_EHT;
2451 rx_status->nss = nss;
2452 rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
2453 rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2454 break;
2455 default:
2456 break;
2457 }
2458}
2459
2460void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
2461 struct ath12k_dp_rx_info *rx_info)
2462{
2463 rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
2464 rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
2465 rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
2466 rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2467 rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2468 rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2469 rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2470 rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2471 rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2472 rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
2473 rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
2474 rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2475
2476 if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
2477 ether_addr_copy(rx_info->addr2,
2478 ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
2479 rx_info->addr2_present = true;
2480 }
2481
2482 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2483 rx_desc, sizeof(*rx_desc));
2484}
2485
2486void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
2487{
2488 struct ieee80211_rx_status *rx_status = rx_info->rx_status;
2489 u8 channel_num;
2490 u32 center_freq, meta_data;
2491 struct ieee80211_channel *channel;
2492
2493 rx_status->freq = 0;
2494 rx_status->rate_idx = 0;
2495 rx_status->nss = 0;
2496 rx_status->encoding = RX_ENC_LEGACY;
2497 rx_status->bw = RATE_INFO_BW_20;
2498 rx_status->enc_flags = 0;
2499
2500 rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2501
2502 meta_data = rx_info->phy_meta_data;
2503 channel_num = meta_data;
2504 center_freq = meta_data >> 16;
2505
2506 if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
2507 center_freq <= ATH12K_MAX_6GHZ_FREQ) {
2508 rx_status->band = NL80211_BAND_6GHZ;
2509 rx_status->freq = center_freq;
2510 } else if (channel_num >= 1 && channel_num <= 14) {
2511 rx_status->band = NL80211_BAND_2GHZ;
2512 } else if (channel_num >= 36 && channel_num <= 173) {
2513 rx_status->band = NL80211_BAND_5GHZ;
2514 } else {
2515 spin_lock_bh(&ar->data_lock);
2516 channel = ar->rx_channel;
2517 if (channel) {
2518 rx_status->band = channel->band;
2519 channel_num =
2520 ieee80211_frequency_to_channel(channel->center_freq);
2521 }
2522 spin_unlock_bh(&ar->data_lock);
2523 }
2524
2525 if (rx_status->band != NL80211_BAND_6GHZ)
2526 rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2527 rx_status->band);
2528
2529 ath12k_dp_rx_h_rate(ar, rx_info);
2530}
2531
2532static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2533 struct sk_buff *msdu,
2534 struct ath12k_dp_rx_info *rx_info)
2535{
2536 struct ath12k_base *ab = ar->ab;
2537 static const struct ieee80211_radiotap_he known = {
2538 .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2539 IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2540 .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2541 };
2542 struct ieee80211_radiotap_he *he;
2543 struct ieee80211_rx_status *rx_status;
2544 struct ieee80211_sta *pubsta;
2545 struct ath12k_peer *peer;
2546 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2547 struct ieee80211_rx_status *status = rx_info->rx_status;
2548 u8 decap = DP_RX_DECAP_TYPE_RAW;
2549 bool is_mcbc = rxcb->is_mcbc;
2550 bool is_eapol = rxcb->is_eapol;
2551
2552 if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2553 !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2554 he = skb_push(msdu, sizeof(known));
2555 memcpy(he, &known, sizeof(known));
2556 status->flag |= RX_FLAG_RADIOTAP_HE;
2557 }
2558
2559 if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2560 decap = rx_info->decap_type;
2561
2562 spin_lock_bh(&ab->base_lock);
2563 peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
2564
2565 pubsta = peer ? peer->sta : NULL;
2566
2567 if (pubsta && pubsta->valid_links) {
2568 status->link_valid = 1;
2569 status->link_id = peer->link_id;
2570 }
2571
2572 spin_unlock_bh(&ab->base_lock);
2573
2574 ath12k_dbg(ab, ATH12K_DBG_DATA,
2575 "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2576 msdu,
2577 msdu->len,
2578 peer ? peer->addr : NULL,
2579 rxcb->tid,
2580 is_mcbc ? "mcast" : "ucast",
2581 ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2582 (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2583 (status->encoding == RX_ENC_HT) ? "ht" : "",
2584 (status->encoding == RX_ENC_VHT) ? "vht" : "",
2585 (status->encoding == RX_ENC_HE) ? "he" : "",
2586 (status->encoding == RX_ENC_EHT) ? "eht" : "",
2587 (status->bw == RATE_INFO_BW_40) ? "40" : "",
2588 (status->bw == RATE_INFO_BW_80) ? "80" : "",
2589 (status->bw == RATE_INFO_BW_160) ? "160" : "",
2590 (status->bw == RATE_INFO_BW_320) ? "320" : "",
2591 status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2592 status->rate_idx,
2593 status->nss,
2594 status->freq,
2595 status->band, status->flag,
2596 !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2597 !!(status->flag & RX_FLAG_MMIC_ERROR),
2598 !!(status->flag & RX_FLAG_AMSDU_MORE));
2599
2600 ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2601 msdu->data, msdu->len);
2602
2603 rx_status = IEEE80211_SKB_RXCB(msdu);
2604 *rx_status = *status;
2605
2606 /* TODO: trace rx packet */
2607
2608 /* PN for multicast packets are not validate in HW,
2609 * so skip 802.3 rx path
2610 * Also, fast_rx expects the STA to be authorized, hence
2611 * eapol packets are sent in slow path.
2612 */
2613 if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2614 !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2615 rx_status->flag |= RX_FLAG_8023;
2616
2617 ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
2618}
2619
2620static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
2621 struct hal_rx_desc *rx_desc,
2622 struct sk_buff *msdu)
2623{
2624 struct ieee80211_hdr *hdr;
2625 u8 decap_type;
2626 u32 hdr_len;
2627
2628 decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2629 if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
2630 return true;
2631
2632 hdr = (struct ieee80211_hdr *)msdu->data;
2633 hdr_len = ieee80211_hdrlen(hdr->frame_control);
2634
2635 if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
2636 return true;
2637
2638 ab->device_stats.invalid_rbm++;
2639 WARN_ON_ONCE(1);
2640 return false;
2641}
2642
2643static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2644 struct sk_buff *msdu,
2645 struct sk_buff_head *msdu_list,
2646 struct ath12k_dp_rx_info *rx_info)
2647{
2648 struct ath12k_base *ab = ar->ab;
2649 struct hal_rx_desc *rx_desc, *lrx_desc;
2650 struct ath12k_skb_rxcb *rxcb;
2651 struct sk_buff *last_buf;
2652 u8 l3_pad_bytes;
2653 u16 msdu_len;
2654 int ret;
2655 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
2656
2657 last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2658 if (!last_buf) {
2659 ath12k_warn(ab,
2660 "No valid Rx buffer to access MSDU_END tlv\n");
2661 ret = -EIO;
2662 goto free_out;
2663 }
2664
2665 rx_desc = (struct hal_rx_desc *)msdu->data;
2666 lrx_desc = (struct hal_rx_desc *)last_buf->data;
2667 if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2668 ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2669 ret = -EIO;
2670 goto free_out;
2671 }
2672
2673 rxcb = ATH12K_SKB_RXCB(msdu);
2674 rxcb->rx_desc = rx_desc;
2675 msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2676 l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2677
2678 if (rxcb->is_frag) {
2679 skb_pull(msdu, hal_rx_desc_sz);
2680 } else if (!rxcb->is_continuation) {
2681 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2682 ret = -EINVAL;
2683 ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2684 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2685 sizeof(*rx_desc));
2686 goto free_out;
2687 }
2688 skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2689 skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2690 } else {
2691 ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2692 msdu, last_buf,
2693 l3_pad_bytes, msdu_len);
2694 if (ret) {
2695 ath12k_warn(ab,
2696 "failed to coalesce msdu rx buffer%d\n", ret);
2697 goto free_out;
2698 }
2699 }
2700
2701 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
2702 ret = -EINVAL;
2703 goto free_out;
2704 }
2705
2706 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
2707 ath12k_dp_rx_h_ppdu(ar, rx_info);
2708 ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
2709
2710 rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2711
2712 return 0;
2713
2714free_out:
2715 return ret;
2716}
2717
2718static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2719 struct napi_struct *napi,
2720 struct sk_buff_head *msdu_list,
2721 int ring_id)
2722{
2723 struct ath12k_hw_group *ag = ab->ag;
2724 struct ieee80211_rx_status rx_status = {0};
2725 struct ath12k_skb_rxcb *rxcb;
2726 struct sk_buff *msdu;
2727 struct ath12k *ar;
2728 struct ath12k_hw_link *hw_links = ag->hw_links;
2729 struct ath12k_base *partner_ab;
2730 struct ath12k_dp_rx_info rx_info;
2731 u8 hw_link_id, pdev_id;
2732 int ret;
2733
2734 if (skb_queue_empty(msdu_list))
2735 return;
2736
2737 rx_info.addr2_present = false;
2738 rx_info.rx_status = &rx_status;
2739
2740 rcu_read_lock();
2741
2742 while ((msdu = __skb_dequeue(msdu_list))) {
2743 rxcb = ATH12K_SKB_RXCB(msdu);
2744 hw_link_id = rxcb->hw_link_id;
2745 partner_ab = ath12k_ag_to_ab(ag,
2746 hw_links[hw_link_id].device_id);
2747 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
2748 hw_links[hw_link_id].pdev_idx);
2749 ar = partner_ab->pdevs[pdev_id].ar;
2750 if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
2751 dev_kfree_skb_any(msdu);
2752 continue;
2753 }
2754
2755 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
2756 dev_kfree_skb_any(msdu);
2757 continue;
2758 }
2759
2760 ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
2761 if (ret) {
2762 ath12k_dbg(ab, ATH12K_DBG_DATA,
2763 "Unable to process msdu %d", ret);
2764 dev_kfree_skb_any(msdu);
2765 continue;
2766 }
2767
2768 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
2769 }
2770
2771 rcu_read_unlock();
2772}
2773
2774static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
2775 enum ath12k_peer_metadata_version ver,
2776 __le32 peer_metadata)
2777{
2778 switch (ver) {
2779 default:
2780 ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
2781 fallthrough;
2782 case ATH12K_PEER_METADATA_V0:
2783 return le32_get_bits(peer_metadata,
2784 RX_MPDU_DESC_META_DATA_V0_PEER_ID);
2785 case ATH12K_PEER_METADATA_V1:
2786 return le32_get_bits(peer_metadata,
2787 RX_MPDU_DESC_META_DATA_V1_PEER_ID);
2788 case ATH12K_PEER_METADATA_V1A:
2789 return le32_get_bits(peer_metadata,
2790 RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
2791 case ATH12K_PEER_METADATA_V1B:
2792 return le32_get_bits(peer_metadata,
2793 RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
2794 }
2795}
2796
2797int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2798 struct napi_struct *napi, int budget)
2799{
2800 struct ath12k_hw_group *ag = ab->ag;
2801 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
2802 struct ath12k_hw_link *hw_links = ag->hw_links;
2803 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
2804 struct ath12k_rx_desc_info *desc_info;
2805 struct ath12k_dp *dp = &ab->dp;
2806 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2807 struct hal_reo_dest_ring *desc;
2808 struct ath12k_base *partner_ab;
2809 struct sk_buff_head msdu_list;
2810 struct ath12k_skb_rxcb *rxcb;
2811 int total_msdu_reaped = 0;
2812 u8 hw_link_id, device_id;
2813 struct hal_srng *srng;
2814 struct sk_buff *msdu;
2815 bool done = false;
2816 u64 desc_va;
2817
2818 __skb_queue_head_init(&msdu_list);
2819
2820 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
2821 INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
2822
2823 srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2824
2825 spin_lock_bh(&srng->lock);
2826
2827try_again:
2828 ath12k_hal_srng_access_begin(ab, srng);
2829
2830 while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2831 struct rx_mpdu_desc *mpdu_info;
2832 struct rx_msdu_desc *msdu_info;
2833 enum hal_reo_dest_ring_push_reason push_reason;
2834 u32 cookie;
2835
2836 cookie = le32_get_bits(desc->buf_addr_info.info1,
2837 BUFFER_ADDR_INFO1_SW_COOKIE);
2838
2839 hw_link_id = le32_get_bits(desc->info0,
2840 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2841
2842 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2843 le32_to_cpu(desc->buf_va_lo));
2844 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2845
2846 device_id = hw_links[hw_link_id].device_id;
2847 partner_ab = ath12k_ag_to_ab(ag, device_id);
2848 if (unlikely(!partner_ab)) {
2849 if (desc_info->skb) {
2850 dev_kfree_skb_any(desc_info->skb);
2851 desc_info->skb = NULL;
2852 }
2853
2854 continue;
2855 }
2856
2857 /* retry manual desc retrieval */
2858 if (!desc_info) {
2859 desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
2860 if (!desc_info) {
2861 ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
2862 cookie);
2863 continue;
2864 }
2865 }
2866
2867 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2868 ath12k_warn(ab, "Check HW CC implementation");
2869
2870 msdu = desc_info->skb;
2871 desc_info->skb = NULL;
2872
2873 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
2874
2875 rxcb = ATH12K_SKB_RXCB(msdu);
2876 dma_unmap_single(partner_ab->dev, rxcb->paddr,
2877 msdu->len + skb_tailroom(msdu),
2878 DMA_FROM_DEVICE);
2879
2880 num_buffs_reaped[device_id]++;
2881 ab->device_stats.reo_rx[ring_id][ab->device_id]++;
2882
2883 push_reason = le32_get_bits(desc->info0,
2884 HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2885 if (push_reason !=
2886 HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2887 dev_kfree_skb_any(msdu);
2888 ab->device_stats.hal_reo_error[ring_id]++;
2889 continue;
2890 }
2891
2892 msdu_info = &desc->rx_msdu_info;
2893 mpdu_info = &desc->rx_mpdu_info;
2894
2895 rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
2896 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2897 rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
2898 RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2899 rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
2900 RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2901 rxcb->hw_link_id = hw_link_id;
2902 rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
2903 mpdu_info->peer_meta_data);
2904 rxcb->tid = le32_get_bits(mpdu_info->info0,
2905 RX_MPDU_DESC_INFO0_TID);
2906
2907 __skb_queue_tail(&msdu_list, msdu);
2908
2909 if (!rxcb->is_continuation) {
2910 total_msdu_reaped++;
2911 done = true;
2912 } else {
2913 done = false;
2914 }
2915
2916 if (total_msdu_reaped >= budget)
2917 break;
2918 }
2919
2920 /* Hw might have updated the head pointer after we cached it.
2921 * In this case, even though there are entries in the ring we'll
2922 * get rx_desc NULL. Give the read another try with updated cached
2923 * head pointer so that we can reap complete MPDU in the current
2924 * rx processing.
2925 */
2926 if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2927 ath12k_hal_srng_access_end(ab, srng);
2928 goto try_again;
2929 }
2930
2931 ath12k_hal_srng_access_end(ab, srng);
2932
2933 spin_unlock_bh(&srng->lock);
2934
2935 if (!total_msdu_reaped)
2936 goto exit;
2937
2938 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
2939 if (!num_buffs_reaped[device_id])
2940 continue;
2941
2942 partner_ab = ath12k_ag_to_ab(ag, device_id);
2943 rx_ring = &partner_ab->dp.rx_refill_buf_ring;
2944
2945 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
2946 &rx_desc_used_list[device_id],
2947 num_buffs_reaped[device_id]);
2948 }
2949
2950 ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2951 ring_id);
2952
2953exit:
2954 return total_msdu_reaped;
2955}
2956
2957static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2958{
2959 struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
2960 frag_timer);
2961
2962 spin_lock_bh(&rx_tid->ab->base_lock);
2963 if (rx_tid->last_frag_no &&
2964 rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2965 spin_unlock_bh(&rx_tid->ab->base_lock);
2966 return;
2967 }
2968 ath12k_dp_rx_frags_cleanup(rx_tid, true);
2969 spin_unlock_bh(&rx_tid->ab->base_lock);
2970}
2971
2972int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2973{
2974 struct ath12k_base *ab = ar->ab;
2975 struct crypto_shash *tfm;
2976 struct ath12k_peer *peer;
2977 struct ath12k_dp_rx_tid *rx_tid;
2978 int i;
2979
2980 tfm = crypto_alloc_shash("michael_mic", 0, 0);
2981 if (IS_ERR(tfm))
2982 return PTR_ERR(tfm);
2983
2984 spin_lock_bh(&ab->base_lock);
2985
2986 peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2987 if (!peer) {
2988 spin_unlock_bh(&ab->base_lock);
2989 crypto_free_shash(tfm);
2990 ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2991 return -ENOENT;
2992 }
2993
2994 if (!peer->primary_link) {
2995 spin_unlock_bh(&ab->base_lock);
2996 crypto_free_shash(tfm);
2997 return 0;
2998 }
2999
3000 for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3001 rx_tid = &peer->rx_tid[i];
3002 rx_tid->ab = ab;
3003 timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
3004 skb_queue_head_init(&rx_tid->rx_frags);
3005 }
3006
3007 peer->tfm_mmic = tfm;
3008 peer->dp_setup_done = true;
3009 spin_unlock_bh(&ab->base_lock);
3010
3011 return 0;
3012}
3013
3014static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3015 struct ieee80211_hdr *hdr, u8 *data,
3016 size_t data_len, u8 *mic)
3017{
3018 SHASH_DESC_ON_STACK(desc, tfm);
3019 u8 mic_hdr[16] = {0};
3020 u8 tid = 0;
3021 int ret;
3022
3023 if (!tfm)
3024 return -EINVAL;
3025
3026 desc->tfm = tfm;
3027
3028 ret = crypto_shash_setkey(tfm, key, 8);
3029 if (ret)
3030 goto out;
3031
3032 ret = crypto_shash_init(desc);
3033 if (ret)
3034 goto out;
3035
3036 /* TKIP MIC header */
3037 memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3038 memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3039 if (ieee80211_is_data_qos(hdr->frame_control))
3040 tid = ieee80211_get_tid(hdr);
3041 mic_hdr[12] = tid;
3042
3043 ret = crypto_shash_update(desc, mic_hdr, 16);
3044 if (ret)
3045 goto out;
3046 ret = crypto_shash_update(desc, data, data_len);
3047 if (ret)
3048 goto out;
3049 ret = crypto_shash_final(desc, mic);
3050out:
3051 shash_desc_zero(desc);
3052 return ret;
3053}
3054
3055static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
3056 struct sk_buff *msdu)
3057{
3058 struct ath12k_base *ab = ar->ab;
3059 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3060 struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3061 struct ieee80211_key_conf *key_conf;
3062 struct ieee80211_hdr *hdr;
3063 struct ath12k_dp_rx_info rx_info;
3064 u8 mic[IEEE80211_CCMP_MIC_LEN];
3065 int head_len, tail_len, ret;
3066 size_t data_len;
3067 u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3068 u8 *key, *data;
3069 u8 key_idx;
3070
3071 if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
3072 return 0;
3073
3074 rx_info.addr2_present = false;
3075 rx_info.rx_status = rxs;
3076
3077 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3078 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3079 head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3080 tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3081
3082 if (!is_multicast_ether_addr(hdr->addr1))
3083 key_idx = peer->ucast_keyidx;
3084 else
3085 key_idx = peer->mcast_keyidx;
3086
3087 key_conf = peer->keys[key_idx];
3088
3089 data = msdu->data + head_len;
3090 data_len = msdu->len - head_len - tail_len;
3091 key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3092
3093 ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3094 if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3095 goto mic_fail;
3096
3097 return 0;
3098
3099mic_fail:
3100 (ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
3101 (ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
3102
3103 ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
3104
3105 rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3106 RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3107 skb_pull(msdu, hal_rx_desc_sz);
3108
3109 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
3110 return -EINVAL;
3111
3112 ath12k_dp_rx_h_ppdu(ar, &rx_info);
3113 ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
3114 HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3115 ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
3116 return -EINVAL;
3117}
3118
3119static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
3120 enum hal_encrypt_type enctype, u32 flags)
3121{
3122 struct ieee80211_hdr *hdr;
3123 size_t hdr_len;
3124 size_t crypto_len;
3125 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3126
3127 if (!flags)
3128 return;
3129
3130 hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3131
3132 if (flags & RX_FLAG_MIC_STRIPPED)
3133 skb_trim(msdu, msdu->len -
3134 ath12k_dp_rx_crypto_mic_len(ar, enctype));
3135
3136 if (flags & RX_FLAG_ICV_STRIPPED)
3137 skb_trim(msdu, msdu->len -
3138 ath12k_dp_rx_crypto_icv_len(ar, enctype));
3139
3140 if (flags & RX_FLAG_IV_STRIPPED) {
3141 hdr_len = ieee80211_hdrlen(hdr->frame_control);
3142 crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
3143
3144 memmove(msdu->data + hal_rx_desc_sz + crypto_len,
3145 msdu->data + hal_rx_desc_sz, hdr_len);
3146 skb_pull(msdu, crypto_len);
3147 }
3148}
3149
3150static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
3151 struct ath12k_peer *peer,
3152 struct ath12k_dp_rx_tid *rx_tid,
3153 struct sk_buff **defrag_skb)
3154{
3155 struct ath12k_base *ab = ar->ab;
3156 struct hal_rx_desc *rx_desc;
3157 struct sk_buff *skb, *first_frag, *last_frag;
3158 struct ieee80211_hdr *hdr;
3159 enum hal_encrypt_type enctype;
3160 bool is_decrypted = false;
3161 int msdu_len = 0;
3162 int extra_space;
3163 u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3164
3165 first_frag = skb_peek(&rx_tid->rx_frags);
3166 last_frag = skb_peek_tail(&rx_tid->rx_frags);
3167
3168 skb_queue_walk(&rx_tid->rx_frags, skb) {
3169 flags = 0;
3170 rx_desc = (struct hal_rx_desc *)skb->data;
3171 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3172
3173 enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
3174 if (enctype != HAL_ENCRYPT_TYPE_OPEN)
3175 is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
3176 rx_desc);
3177
3178 if (is_decrypted) {
3179 if (skb != first_frag)
3180 flags |= RX_FLAG_IV_STRIPPED;
3181 if (skb != last_frag)
3182 flags |= RX_FLAG_ICV_STRIPPED |
3183 RX_FLAG_MIC_STRIPPED;
3184 }
3185
3186 /* RX fragments are always raw packets */
3187 if (skb != last_frag)
3188 skb_trim(skb, skb->len - FCS_LEN);
3189 ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3190
3191 if (skb != first_frag)
3192 skb_pull(skb, hal_rx_desc_sz +
3193 ieee80211_hdrlen(hdr->frame_control));
3194 msdu_len += skb->len;
3195 }
3196
3197 extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3198 if (extra_space > 0 &&
3199 (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3200 return -ENOMEM;
3201
3202 __skb_unlink(first_frag, &rx_tid->rx_frags);
3203 while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3204 skb_put_data(first_frag, skb->data, skb->len);
3205 dev_kfree_skb_any(skb);
3206 }
3207
3208 hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3209 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3210 ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
3211
3212 if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3213 first_frag = NULL;
3214
3215 *defrag_skb = first_frag;
3216 return 0;
3217}
3218
3219static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
3220 struct ath12k_dp_rx_tid *rx_tid,
3221 struct sk_buff *defrag_skb)
3222{
3223 struct ath12k_base *ab = ar->ab;
3224 struct ath12k_dp *dp = &ab->dp;
3225 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3226 struct hal_reo_entrance_ring *reo_ent_ring;
3227 struct hal_reo_dest_ring *reo_dest_ring;
3228 struct dp_link_desc_bank *link_desc_banks;
3229 struct hal_rx_msdu_link *msdu_link;
3230 struct hal_rx_msdu_details *msdu0;
3231 struct hal_srng *srng;
3232 dma_addr_t link_paddr, buf_paddr;
3233 u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
3234 u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
3235 int ret;
3236 struct ath12k_rx_desc_info *desc_info;
3237 enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
3238 u8 dst_ind;
3239
3240 hal_rx_desc_sz = ab->hal.hal_desc_sz;
3241 link_desc_banks = dp->link_desc_banks;
3242 reo_dest_ring = rx_tid->dst_ring_desc;
3243
3244 ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
3245 &link_paddr, &cookie);
3246 desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
3247
3248 msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3249 (link_paddr - link_desc_banks[desc_bank].paddr));
3250 msdu0 = &msdu_link->msdu_link[0];
3251 msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
3252 dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
3253
3254 memset(msdu0, 0, sizeof(*msdu0));
3255
3256 msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
3257 u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
3258 u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
3259 u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
3260 RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
3261 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
3262 u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
3263 msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
3264 msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
3265
3266 /* change msdu len in hal rx desc */
3267 ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3268
3269 buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
3270 defrag_skb->len + skb_tailroom(defrag_skb),
3271 DMA_TO_DEVICE);
3272 if (dma_mapping_error(ab->dev, buf_paddr))
3273 return -ENOMEM;
3274
3275 spin_lock_bh(&dp->rx_desc_lock);
3276 desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3277 struct ath12k_rx_desc_info,
3278 list);
3279 if (!desc_info) {
3280 spin_unlock_bh(&dp->rx_desc_lock);
3281 ath12k_warn(ab, "failed to find rx desc for reinject\n");
3282 ret = -ENOMEM;
3283 goto err_unmap_dma;
3284 }
3285
3286 desc_info->skb = defrag_skb;
3287 desc_info->in_use = true;
3288
3289 list_del(&desc_info->list);
3290 spin_unlock_bh(&dp->rx_desc_lock);
3291
3292 ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3293
3294 ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3295 desc_info->cookie,
3296 HAL_RX_BUF_RBM_SW3_BM);
3297
3298 /* Fill mpdu details into reo entrance ring */
3299 srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3300
3301 spin_lock_bh(&srng->lock);
3302 ath12k_hal_srng_access_begin(ab, srng);
3303
3304 reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3305 if (!reo_ent_ring) {
3306 ath12k_hal_srng_access_end(ab, srng);
3307 spin_unlock_bh(&srng->lock);
3308 ret = -ENOSPC;
3309 goto err_free_desc;
3310 }
3311 memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3312
3313 ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3314 cookie,
3315 idle_link_rbm);
3316
3317 mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3318 u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3319 u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3320 u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3321 u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3322
3323 reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3324 reo_ent_ring->rx_mpdu_info.peer_meta_data =
3325 reo_dest_ring->rx_mpdu_info.peer_meta_data;
3326
3327 if (ab->hw_params->reoq_lut_support) {
3328 reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
3329 queue_addr_hi = 0;
3330 } else {
3331 reo_ent_ring->queue_addr_lo =
3332 cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
3333 queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
3334 }
3335
3336 reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
3337 HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
3338 le32_encode_bits(dst_ind,
3339 HAL_REO_ENTR_RING_INFO0_DEST_IND);
3340
3341 reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3342 HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3343 dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3344 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3345 reo_ent_ring->info2 =
3346 cpu_to_le32(u32_get_bits(dest_ring_info0,
3347 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3348
3349 ath12k_hal_srng_access_end(ab, srng);
3350 spin_unlock_bh(&srng->lock);
3351
3352 return 0;
3353
3354err_free_desc:
3355 spin_lock_bh(&dp->rx_desc_lock);
3356 desc_info->in_use = false;
3357 desc_info->skb = NULL;
3358 list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3359 spin_unlock_bh(&dp->rx_desc_lock);
3360err_unmap_dma:
3361 dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3362 DMA_TO_DEVICE);
3363 return ret;
3364}
3365
3366static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3367 struct sk_buff *a, struct sk_buff *b)
3368{
3369 int frag1, frag2;
3370
3371 frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3372 frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3373
3374 return frag1 - frag2;
3375}
3376
3377static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3378 struct sk_buff_head *frag_list,
3379 struct sk_buff *cur_frag)
3380{
3381 struct sk_buff *skb;
3382 int cmp;
3383
3384 skb_queue_walk(frag_list, skb) {
3385 cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3386 if (cmp < 0)
3387 continue;
3388 __skb_queue_before(frag_list, skb, cur_frag);
3389 return;
3390 }
3391 __skb_queue_tail(frag_list, cur_frag);
3392}
3393
3394static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3395{
3396 struct ieee80211_hdr *hdr;
3397 u64 pn = 0;
3398 u8 *ehdr;
3399 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3400
3401 hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3402 ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3403
3404 pn = ehdr[0];
3405 pn |= (u64)ehdr[1] << 8;
3406 pn |= (u64)ehdr[4] << 16;
3407 pn |= (u64)ehdr[5] << 24;
3408 pn |= (u64)ehdr[6] << 32;
3409 pn |= (u64)ehdr[7] << 40;
3410
3411 return pn;
3412}
3413
3414static bool
3415ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3416{
3417 struct ath12k_base *ab = ar->ab;
3418 enum hal_encrypt_type encrypt_type;
3419 struct sk_buff *first_frag, *skb;
3420 struct hal_rx_desc *desc;
3421 u64 last_pn;
3422 u64 cur_pn;
3423
3424 first_frag = skb_peek(&rx_tid->rx_frags);
3425 desc = (struct hal_rx_desc *)first_frag->data;
3426
3427 encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3428 if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3429 encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3430 encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3431 encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3432 return true;
3433
3434 last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3435 skb_queue_walk(&rx_tid->rx_frags, skb) {
3436 if (skb == first_frag)
3437 continue;
3438
3439 cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3440 if (cur_pn != last_pn + 1)
3441 return false;
3442 last_pn = cur_pn;
3443 }
3444 return true;
3445}
3446
3447static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3448 struct sk_buff *msdu,
3449 struct hal_reo_dest_ring *ring_desc)
3450{
3451 struct ath12k_base *ab = ar->ab;
3452 struct hal_rx_desc *rx_desc;
3453 struct ath12k_peer *peer;
3454 struct ath12k_dp_rx_tid *rx_tid;
3455 struct sk_buff *defrag_skb = NULL;
3456 u32 peer_id;
3457 u16 seqno, frag_no;
3458 u8 tid;
3459 int ret = 0;
3460 bool more_frags;
3461
3462 rx_desc = (struct hal_rx_desc *)msdu->data;
3463 peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3464 tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3465 seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3466 frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3467 more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3468
3469 if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3470 !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3471 tid > IEEE80211_NUM_TIDS)
3472 return -EINVAL;
3473
3474 /* received unfragmented packet in reo
3475 * exception ring, this shouldn't happen
3476 * as these packets typically come from
3477 * reo2sw srngs.
3478 */
3479 if (WARN_ON_ONCE(!frag_no && !more_frags))
3480 return -EINVAL;
3481
3482 spin_lock_bh(&ab->base_lock);
3483 peer = ath12k_peer_find_by_id(ab, peer_id);
3484 if (!peer) {
3485 ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3486 peer_id);
3487 ret = -ENOENT;
3488 goto out_unlock;
3489 }
3490
3491 if (!peer->dp_setup_done) {
3492 ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3493 peer->addr, peer_id);
3494 ret = -ENOENT;
3495 goto out_unlock;
3496 }
3497
3498 rx_tid = &peer->rx_tid[tid];
3499
3500 if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3501 skb_queue_empty(&rx_tid->rx_frags)) {
3502 /* Flush stored fragments and start a new sequence */
3503 ath12k_dp_rx_frags_cleanup(rx_tid, true);
3504 rx_tid->cur_sn = seqno;
3505 }
3506
3507 if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3508 /* Fragment already present */
3509 ret = -EINVAL;
3510 goto out_unlock;
3511 }
3512
3513 if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
3514 __skb_queue_tail(&rx_tid->rx_frags, msdu);
3515 else
3516 ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3517
3518 rx_tid->rx_frag_bitmap |= BIT(frag_no);
3519 if (!more_frags)
3520 rx_tid->last_frag_no = frag_no;
3521
3522 if (frag_no == 0) {
3523 rx_tid->dst_ring_desc = kmemdup(ring_desc,
3524 sizeof(*rx_tid->dst_ring_desc),
3525 GFP_ATOMIC);
3526 if (!rx_tid->dst_ring_desc) {
3527 ret = -ENOMEM;
3528 goto out_unlock;
3529 }
3530 } else {
3531 ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
3532 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3533 }
3534
3535 if (!rx_tid->last_frag_no ||
3536 rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3537 mod_timer(&rx_tid->frag_timer, jiffies +
3538 ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3539 goto out_unlock;
3540 }
3541
3542 spin_unlock_bh(&ab->base_lock);
3543 timer_delete_sync(&rx_tid->frag_timer);
3544 spin_lock_bh(&ab->base_lock);
3545
3546 peer = ath12k_peer_find_by_id(ab, peer_id);
3547 if (!peer)
3548 goto err_frags_cleanup;
3549
3550 if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3551 goto err_frags_cleanup;
3552
3553 if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3554 goto err_frags_cleanup;
3555
3556 if (!defrag_skb)
3557 goto err_frags_cleanup;
3558
3559 if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3560 goto err_frags_cleanup;
3561
3562 ath12k_dp_rx_frags_cleanup(rx_tid, false);
3563 goto out_unlock;
3564
3565err_frags_cleanup:
3566 dev_kfree_skb_any(defrag_skb);
3567 ath12k_dp_rx_frags_cleanup(rx_tid, true);
3568out_unlock:
3569 spin_unlock_bh(&ab->base_lock);
3570 return ret;
3571}
3572
3573static int
3574ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3575 struct list_head *used_list,
3576 bool drop, u32 cookie)
3577{
3578 struct ath12k_base *ab = ar->ab;
3579 struct sk_buff *msdu;
3580 struct ath12k_skb_rxcb *rxcb;
3581 struct hal_rx_desc *rx_desc;
3582 u16 msdu_len;
3583 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
3584 struct ath12k_rx_desc_info *desc_info;
3585 u64 desc_va;
3586
3587 desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3588 le32_to_cpu(desc->buf_va_lo));
3589 desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3590
3591 /* retry manual desc retrieval */
3592 if (!desc_info) {
3593 desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3594 if (!desc_info) {
3595 ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
3596 cookie);
3597 return -EINVAL;
3598 }
3599 }
3600
3601 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3602 ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3603
3604 msdu = desc_info->skb;
3605 desc_info->skb = NULL;
3606
3607 list_add_tail(&desc_info->list, used_list);
3608
3609 rxcb = ATH12K_SKB_RXCB(msdu);
3610 dma_unmap_single(ar->ab->dev, rxcb->paddr,
3611 msdu->len + skb_tailroom(msdu),
3612 DMA_FROM_DEVICE);
3613
3614 if (drop) {
3615 dev_kfree_skb_any(msdu);
3616 return 0;
3617 }
3618
3619 rcu_read_lock();
3620 if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3621 dev_kfree_skb_any(msdu);
3622 goto exit;
3623 }
3624
3625 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
3626 dev_kfree_skb_any(msdu);
3627 goto exit;
3628 }
3629
3630 rx_desc = (struct hal_rx_desc *)msdu->data;
3631 msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3632 if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3633 ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3634 ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3635 sizeof(*rx_desc));
3636 dev_kfree_skb_any(msdu);
3637 goto exit;
3638 }
3639
3640 skb_put(msdu, hal_rx_desc_sz + msdu_len);
3641
3642 if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3643 dev_kfree_skb_any(msdu);
3644 ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
3645 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3646 }
3647exit:
3648 rcu_read_unlock();
3649 return 0;
3650}
3651
3652int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3653 int budget)
3654{
3655 struct ath12k_hw_group *ag = ab->ag;
3656 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
3657 u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3658 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
3659 struct dp_link_desc_bank *link_desc_banks;
3660 enum hal_rx_buf_return_buf_manager rbm;
3661 struct hal_rx_msdu_link *link_desc_va;
3662 int tot_n_bufs_reaped, quota, ret, i;
3663 struct hal_reo_dest_ring *reo_desc;
3664 struct dp_rxdma_ring *rx_ring;
3665 struct dp_srng *reo_except;
3666 struct ath12k_hw_link *hw_links = ag->hw_links;
3667 struct ath12k_base *partner_ab;
3668 u8 hw_link_id, device_id;
3669 u32 desc_bank, num_msdus;
3670 struct hal_srng *srng;
3671 struct ath12k *ar;
3672 dma_addr_t paddr;
3673 bool is_frag;
3674 bool drop;
3675 int pdev_id;
3676
3677 tot_n_bufs_reaped = 0;
3678 quota = budget;
3679
3680 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
3681 INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
3682
3683 reo_except = &ab->dp.reo_except_ring;
3684
3685 srng = &ab->hal.srng_list[reo_except->ring_id];
3686
3687 spin_lock_bh(&srng->lock);
3688
3689 ath12k_hal_srng_access_begin(ab, srng);
3690
3691 while (budget &&
3692 (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3693 drop = false;
3694 ab->device_stats.err_ring_pkts++;
3695
3696 ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3697 &desc_bank);
3698 if (ret) {
3699 ath12k_warn(ab, "failed to parse error reo desc %d\n",
3700 ret);
3701 continue;
3702 }
3703
3704 hw_link_id = le32_get_bits(reo_desc->info0,
3705 HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3706 device_id = hw_links[hw_link_id].device_id;
3707 partner_ab = ath12k_ag_to_ab(ag, device_id);
3708
3709 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
3710 hw_links[hw_link_id].pdev_idx);
3711 ar = partner_ab->pdevs[pdev_id].ar;
3712
3713 link_desc_banks = partner_ab->dp.link_desc_banks;
3714 link_desc_va = link_desc_banks[desc_bank].vaddr +
3715 (paddr - link_desc_banks[desc_bank].paddr);
3716 ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3717 &rbm);
3718 if (rbm != partner_ab->dp.idle_link_rbm &&
3719 rbm != HAL_RX_BUF_RBM_SW3_BM &&
3720 rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
3721 ab->device_stats.invalid_rbm++;
3722 ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3723 ath12k_dp_rx_link_desc_return(partner_ab,
3724 &reo_desc->buf_addr_info,
3725 HAL_WBM_REL_BM_ACT_REL_MSDU);
3726 continue;
3727 }
3728
3729 is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3730 RX_MPDU_DESC_INFO0_FRAG_FLAG);
3731
3732 /* Process only rx fragments with one msdu per link desc below, and drop
3733 * msdu's indicated due to error reasons.
3734 * Dynamic fragmentation not supported in Multi-link client, so drop the
3735 * partner device buffers.
3736 */
3737 if (!is_frag || num_msdus > 1 ||
3738 partner_ab->device_id != ab->device_id) {
3739 drop = true;
3740
3741 /* Return the link desc back to wbm idle list */
3742 ath12k_dp_rx_link_desc_return(partner_ab,
3743 &reo_desc->buf_addr_info,
3744 HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3745 }
3746
3747 for (i = 0; i < num_msdus; i++) {
3748 if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
3749 &rx_desc_used_list[device_id],
3750 drop,
3751 msdu_cookies[i])) {
3752 num_buffs_reaped[device_id]++;
3753 tot_n_bufs_reaped++;
3754 }
3755 }
3756
3757 if (tot_n_bufs_reaped >= quota) {
3758 tot_n_bufs_reaped = quota;
3759 goto exit;
3760 }
3761
3762 budget = quota - tot_n_bufs_reaped;
3763 }
3764
3765exit:
3766 ath12k_hal_srng_access_end(ab, srng);
3767
3768 spin_unlock_bh(&srng->lock);
3769
3770 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
3771 if (!num_buffs_reaped[device_id])
3772 continue;
3773
3774 partner_ab = ath12k_ag_to_ab(ag, device_id);
3775 rx_ring = &partner_ab->dp.rx_refill_buf_ring;
3776
3777 ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
3778 &rx_desc_used_list[device_id],
3779 num_buffs_reaped[device_id]);
3780 }
3781
3782 return tot_n_bufs_reaped;
3783}
3784
3785static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3786 int msdu_len,
3787 struct sk_buff_head *msdu_list)
3788{
3789 struct sk_buff *skb, *tmp;
3790 struct ath12k_skb_rxcb *rxcb;
3791 int n_buffs;
3792
3793 n_buffs = DIV_ROUND_UP(msdu_len,
3794 (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
3795
3796 skb_queue_walk_safe(msdu_list, skb, tmp) {
3797 rxcb = ATH12K_SKB_RXCB(skb);
3798 if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3799 rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3800 if (!n_buffs)
3801 break;
3802 __skb_unlink(skb, msdu_list);
3803 dev_kfree_skb_any(skb);
3804 n_buffs--;
3805 }
3806 }
3807}
3808
3809static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3810 struct ath12k_dp_rx_info *rx_info,
3811 struct sk_buff_head *msdu_list)
3812{
3813 struct ath12k_base *ab = ar->ab;
3814 u16 msdu_len;
3815 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3816 u8 l3pad_bytes;
3817 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3818 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3819
3820 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3821
3822 if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3823 /* First buffer will be freed by the caller, so deduct it's length */
3824 msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3825 ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3826 return -EINVAL;
3827 }
3828
3829 /* Even after cleaning up the sg buffers in the msdu list with above check
3830 * any msdu received with continuation flag needs to be dropped as invalid.
3831 * This protects against some random err frame with continuation flag.
3832 */
3833 if (rxcb->is_continuation)
3834 return -EINVAL;
3835
3836 if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3837 ath12k_warn(ar->ab,
3838 "msdu_done bit not set in null_q_des processing\n");
3839 __skb_queue_purge(msdu_list);
3840 return -EIO;
3841 }
3842
3843 /* Handle NULL queue descriptor violations arising out a missing
3844 * REO queue for a given peer or a given TID. This typically
3845 * may happen if a packet is received on a QOS enabled TID before the
3846 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3847 * it may also happen for MC/BC frames if they are not routed to the
3848 * non-QOS TID queue, in the absence of any other default TID queue.
3849 * This error can show up both in a REO destination or WBM release ring.
3850 */
3851
3852 if (rxcb->is_frag) {
3853 skb_pull(msdu, hal_rx_desc_sz);
3854 } else {
3855 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3856
3857 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3858 return -EINVAL;
3859
3860 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3861 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3862 }
3863 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
3864 return -EINVAL;
3865
3866 ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
3867 ath12k_dp_rx_h_ppdu(ar, rx_info);
3868 ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
3869
3870 rxcb->tid = rx_info->tid;
3871
3872 /* Please note that caller will having the access to msdu and completing
3873 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3874 */
3875
3876 return 0;
3877}
3878
3879static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3880 struct ath12k_dp_rx_info *rx_info,
3881 struct sk_buff_head *msdu_list)
3882{
3883 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3884 bool drop = false;
3885
3886 ar->ab->device_stats.reo_error[rxcb->err_code]++;
3887
3888 switch (rxcb->err_code) {
3889 case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3890 if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
3891 drop = true;
3892 break;
3893 case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3894 /* TODO: Do not drop PN failed packets in the driver;
3895 * instead, it is good to drop such packets in mac80211
3896 * after incrementing the replay counters.
3897 */
3898 fallthrough;
3899 default:
3900 /* TODO: Review other errors and process them to mac80211
3901 * as appropriate.
3902 */
3903 drop = true;
3904 break;
3905 }
3906
3907 return drop;
3908}
3909
3910static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3911 struct ath12k_dp_rx_info *rx_info)
3912{
3913 struct ath12k_base *ab = ar->ab;
3914 u16 msdu_len;
3915 struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3916 u8 l3pad_bytes;
3917 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3918 u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3919
3920 rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3921 rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3922
3923 l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3924 msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3925
3926 if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
3927 ath12k_dbg(ab, ATH12K_DBG_DATA,
3928 "invalid msdu len in tkip mic err %u\n", msdu_len);
3929 ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
3930 sizeof(*desc));
3931 return true;
3932 }
3933
3934 skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3935 skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3936
3937 if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
3938 return true;
3939
3940 ath12k_dp_rx_h_ppdu(ar, rx_info);
3941
3942 rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3943 RX_FLAG_DECRYPTED);
3944
3945 ath12k_dp_rx_h_undecap(ar, msdu, desc,
3946 HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
3947 return false;
3948}
3949
3950static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar, struct sk_buff *msdu,
3951 struct ath12k_dp_rx_info *rx_info)
3952{
3953 struct ath12k_base *ab = ar->ab;
3954 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3955 struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3956 bool drop = false;
3957 u32 err_bitmap;
3958
3959 ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
3960
3961 switch (rxcb->err_code) {
3962 case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3963 case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3964 err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3965 if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3966 ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
3967 drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
3968 break;
3969 }
3970 fallthrough;
3971 default:
3972 /* TODO: Review other rxdma error code to check if anything is
3973 * worth reporting to mac80211
3974 */
3975 drop = true;
3976 break;
3977 }
3978
3979 return drop;
3980}
3981
3982static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3983 struct napi_struct *napi,
3984 struct sk_buff *msdu,
3985 struct sk_buff_head *msdu_list)
3986{
3987 struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3988 struct ieee80211_rx_status rxs = {0};
3989 struct ath12k_dp_rx_info rx_info;
3990 bool drop = true;
3991
3992 rx_info.addr2_present = false;
3993 rx_info.rx_status = &rxs;
3994
3995 switch (rxcb->err_rel_src) {
3996 case HAL_WBM_REL_SRC_MODULE_REO:
3997 drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
3998 break;
3999 case HAL_WBM_REL_SRC_MODULE_RXDMA:
4000 drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
4001 break;
4002 default:
4003 /* msdu will get freed */
4004 break;
4005 }
4006
4007 if (drop) {
4008 dev_kfree_skb_any(msdu);
4009 return;
4010 }
4011
4012 ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
4013}
4014
4015int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
4016 struct napi_struct *napi, int budget)
4017{
4018 struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
4019 struct ath12k_hw_group *ag = ab->ag;
4020 struct ath12k *ar;
4021 struct ath12k_dp *dp = &ab->dp;
4022 struct dp_rxdma_ring *rx_ring;
4023 struct hal_rx_wbm_rel_info err_info;
4024 struct hal_srng *srng;
4025 struct sk_buff *msdu;
4026 struct sk_buff_head msdu_list, scatter_msdu_list;
4027 struct ath12k_skb_rxcb *rxcb;
4028 void *rx_desc;
4029 int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
4030 int total_num_buffs_reaped = 0;
4031 struct ath12k_rx_desc_info *desc_info;
4032 struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
4033 struct ath12k_hw_link *hw_links = ag->hw_links;
4034 struct ath12k_base *partner_ab;
4035 u8 hw_link_id, device_id;
4036 int ret, pdev_id;
4037 struct hal_rx_desc *msdu_data;
4038
4039 __skb_queue_head_init(&msdu_list);
4040 __skb_queue_head_init(&scatter_msdu_list);
4041
4042 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
4043 INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
4044
4045 srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4046 spin_lock_bh(&srng->lock);
4047
4048 ath12k_hal_srng_access_begin(ab, srng);
4049
4050 while (budget) {
4051 rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
4052 if (!rx_desc)
4053 break;
4054
4055 ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4056 if (ret) {
4057 ath12k_warn(ab,
4058 "failed to parse rx error in wbm_rel ring desc %d\n",
4059 ret);
4060 continue;
4061 }
4062
4063 desc_info = err_info.rx_desc;
4064
4065 /* retry manual desc retrieval if hw cc is not done */
4066 if (!desc_info) {
4067 desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
4068 if (!desc_info) {
4069 ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
4070 err_info.cookie);
4071 continue;
4072 }
4073 }
4074
4075 if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
4076 ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
4077
4078 msdu = desc_info->skb;
4079 desc_info->skb = NULL;
4080
4081 device_id = desc_info->device_id;
4082 partner_ab = ath12k_ag_to_ab(ag, device_id);
4083 if (unlikely(!partner_ab)) {
4084 dev_kfree_skb_any(msdu);
4085
4086 /* In any case continuation bit is set
4087 * in the previous record, cleanup scatter_msdu_list
4088 */
4089 ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
4090 continue;
4091 }
4092
4093 list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
4094
4095 rxcb = ATH12K_SKB_RXCB(msdu);
4096 dma_unmap_single(partner_ab->dev, rxcb->paddr,
4097 msdu->len + skb_tailroom(msdu),
4098 DMA_FROM_DEVICE);
4099
4100 num_buffs_reaped[device_id]++;
4101 total_num_buffs_reaped++;
4102
4103 if (!err_info.continuation)
4104 budget--;
4105
4106 if (err_info.push_reason !=
4107 HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4108 dev_kfree_skb_any(msdu);
4109 continue;
4110 }
4111
4112 msdu_data = (struct hal_rx_desc *)msdu->data;
4113 rxcb->err_rel_src = err_info.err_rel_src;
4114 rxcb->err_code = err_info.err_code;
4115 rxcb->is_first_msdu = err_info.first_msdu;
4116 rxcb->is_last_msdu = err_info.last_msdu;
4117 rxcb->is_continuation = err_info.continuation;
4118 rxcb->rx_desc = msdu_data;
4119
4120 if (err_info.continuation) {
4121 __skb_queue_tail(&scatter_msdu_list, msdu);
4122 continue;
4123 }
4124
4125 hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
4126 msdu_data);
4127 if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
4128 dev_kfree_skb_any(msdu);
4129
4130 /* In any case continuation bit is set
4131 * in the previous record, cleanup scatter_msdu_list
4132 */
4133 ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
4134 continue;
4135 }
4136
4137 if (!skb_queue_empty(&scatter_msdu_list)) {
4138 struct sk_buff *msdu;
4139
4140 skb_queue_walk(&scatter_msdu_list, msdu) {
4141 rxcb = ATH12K_SKB_RXCB(msdu);
4142 rxcb->hw_link_id = hw_link_id;
4143 }
4144
4145 skb_queue_splice_tail_init(&scatter_msdu_list,
4146 &msdu_list);
4147 }
4148
4149 rxcb = ATH12K_SKB_RXCB(msdu);
4150 rxcb->hw_link_id = hw_link_id;
4151 __skb_queue_tail(&msdu_list, msdu);
4152 }
4153
4154 /* In any case continuation bit is set in the
4155 * last record, cleanup scatter_msdu_list
4156 */
4157 ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
4158
4159 ath12k_hal_srng_access_end(ab, srng);
4160
4161 spin_unlock_bh(&srng->lock);
4162
4163 if (!total_num_buffs_reaped)
4164 goto done;
4165
4166 for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
4167 if (!num_buffs_reaped[device_id])
4168 continue;
4169
4170 partner_ab = ath12k_ag_to_ab(ag, device_id);
4171 rx_ring = &partner_ab->dp.rx_refill_buf_ring;
4172
4173 ath12k_dp_rx_bufs_replenish(ab, rx_ring,
4174 &rx_desc_used_list[device_id],
4175 num_buffs_reaped[device_id]);
4176 }
4177
4178 rcu_read_lock();
4179 while ((msdu = __skb_dequeue(&msdu_list))) {
4180 rxcb = ATH12K_SKB_RXCB(msdu);
4181 hw_link_id = rxcb->hw_link_id;
4182
4183 device_id = hw_links[hw_link_id].device_id;
4184 partner_ab = ath12k_ag_to_ab(ag, device_id);
4185 if (unlikely(!partner_ab)) {
4186 ath12k_dbg(ab, ATH12K_DBG_DATA,
4187 "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
4188 hw_link_id, device_id);
4189 dev_kfree_skb_any(msdu);
4190 continue;
4191 }
4192
4193 pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
4194 hw_links[hw_link_id].pdev_idx);
4195 ar = partner_ab->pdevs[pdev_id].ar;
4196
4197 if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
4198 dev_kfree_skb_any(msdu);
4199 continue;
4200 }
4201
4202 if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
4203 dev_kfree_skb_any(msdu);
4204 continue;
4205 }
4206
4207 if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
4208 device_id = ar->ab->device_id;
4209 device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
4210 }
4211
4212 ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
4213 }
4214 rcu_read_unlock();
4215done:
4216 return total_num_buffs_reaped;
4217}
4218
4219void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
4220{
4221 struct ath12k_dp *dp = &ab->dp;
4222 struct hal_tlv_64_hdr *hdr;
4223 struct hal_srng *srng;
4224 struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
4225 bool found = false;
4226 u16 tag;
4227 struct hal_reo_status reo_status;
4228
4229 srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4230
4231 memset(&reo_status, 0, sizeof(reo_status));
4232
4233 spin_lock_bh(&srng->lock);
4234
4235 ath12k_hal_srng_access_begin(ab, srng);
4236
4237 while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
4238 tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
4239
4240 switch (tag) {
4241 case HAL_REO_GET_QUEUE_STATS_STATUS:
4242 ath12k_hal_reo_status_queue_stats(ab, hdr,
4243 &reo_status);
4244 break;
4245 case HAL_REO_FLUSH_QUEUE_STATUS:
4246 ath12k_hal_reo_flush_queue_status(ab, hdr,
4247 &reo_status);
4248 break;
4249 case HAL_REO_FLUSH_CACHE_STATUS:
4250 ath12k_hal_reo_flush_cache_status(ab, hdr,
4251 &reo_status);
4252 break;
4253 case HAL_REO_UNBLOCK_CACHE_STATUS:
4254 ath12k_hal_reo_unblk_cache_status(ab, hdr,
4255 &reo_status);
4256 break;
4257 case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4258 ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
4259 &reo_status);
4260 break;
4261 case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4262 ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
4263 &reo_status);
4264 break;
4265 case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4266 ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
4267 &reo_status);
4268 break;
4269 default:
4270 ath12k_warn(ab, "Unknown reo status type %d\n", tag);
4271 continue;
4272 }
4273
4274 spin_lock_bh(&dp->reo_cmd_lock);
4275 list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4276 if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4277 found = true;
4278 list_del(&cmd->list);
4279 break;
4280 }
4281 }
4282 spin_unlock_bh(&dp->reo_cmd_lock);
4283
4284 if (found) {
4285 cmd->handler(dp, (void *)&cmd->data,
4286 reo_status.uniform_hdr.cmd_status);
4287 kfree(cmd);
4288 }
4289
4290 found = false;
4291 }
4292
4293 ath12k_hal_srng_access_end(ab, srng);
4294
4295 spin_unlock_bh(&srng->lock);
4296}
4297
4298void ath12k_dp_rx_free(struct ath12k_base *ab)
4299{
4300 struct ath12k_dp *dp = &ab->dp;
4301 struct dp_srng *srng;
4302 int i;
4303
4304 ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
4305
4306 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4307 if (ab->hw_params->rx_mac_buf_ring)
4308 ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
4309 if (!ab->hw_params->rxdma1_enable) {
4310 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
4311 ath12k_dp_srng_cleanup(ab, srng);
4312 }
4313 }
4314
4315 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
4316 ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
4317
4318 ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
4319
4320 ath12k_dp_rxdma_buf_free(ab);
4321}
4322
4323void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
4324{
4325 struct ath12k *ar = ab->pdevs[mac_id].ar;
4326
4327 ath12k_dp_rx_pdev_srng_free(ar);
4328}
4329
4330int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
4331{
4332 struct ath12k_dp *dp = &ab->dp;
4333 struct htt_rx_ring_tlv_filter tlv_filter = {0};
4334 u32 ring_id;
4335 int ret;
4336 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
4337
4338 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4339
4340 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4341 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4342 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4343 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4344 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4345 tlv_filter.offset_valid = true;
4346 tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4347
4348 tlv_filter.rx_mpdu_start_offset =
4349 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
4350 tlv_filter.rx_msdu_end_offset =
4351 ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
4352
4353 if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
4354 tlv_filter.rx_mpdu_start_wmask =
4355 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
4356 tlv_filter.rx_msdu_end_wmask =
4357 ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
4358 ath12k_dbg(ab, ATH12K_DBG_DATA,
4359 "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
4360 tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
4361 }
4362
4363 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
4364 HAL_RXDMA_BUF,
4365 DP_RXDMA_REFILL_RING_SIZE,
4366 &tlv_filter);
4367
4368 return ret;
4369}
4370
4371int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
4372{
4373 struct ath12k_dp *dp = &ab->dp;
4374 struct htt_rx_ring_tlv_filter tlv_filter = {0};
4375 u32 ring_id;
4376 int ret = 0;
4377 u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
4378 int i;
4379
4380 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4381
4382 tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4383 tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4384 tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4385 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4386 HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4387 tlv_filter.offset_valid = true;
4388 tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4389
4390 tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
4391
4392 tlv_filter.rx_mpdu_start_offset =
4393 ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
4394 tlv_filter.rx_msdu_end_offset =
4395 ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
4396
4397 /* TODO: Selectively subscribe to required qwords within msdu_end
4398 * and mpdu_start and setup the mask in below msg
4399 * and modify the rx_desc struct
4400 */
4401
4402 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4403 ring_id = dp->rx_mac_buf_ring[i].ring_id;
4404 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
4405 HAL_RXDMA_BUF,
4406 DP_RXDMA_REFILL_RING_SIZE,
4407 &tlv_filter);
4408 }
4409
4410 return ret;
4411}
4412
4413int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
4414{
4415 struct ath12k_dp *dp = &ab->dp;
4416 u32 ring_id;
4417 int i, ret;
4418
4419 /* TODO: Need to verify the HTT setup for QCN9224 */
4420 ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4421 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4422 if (ret) {
4423 ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4424 ret);
4425 return ret;
4426 }
4427
4428 if (ab->hw_params->rx_mac_buf_ring) {
4429 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4430 ring_id = dp->rx_mac_buf_ring[i].ring_id;
4431 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4432 i, HAL_RXDMA_BUF);
4433 if (ret) {
4434 ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4435 i, ret);
4436 return ret;
4437 }
4438 }
4439 }
4440
4441 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4442 ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4443 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4444 i, HAL_RXDMA_DST);
4445 if (ret) {
4446 ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4447 i, ret);
4448 return ret;
4449 }
4450 }
4451
4452 if (ab->hw_params->rxdma1_enable) {
4453 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4454 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4455 0, HAL_RXDMA_MONITOR_BUF);
4456 if (ret) {
4457 ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4458 ret);
4459 return ret;
4460 }
4461 } else {
4462 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4463 ring_id =
4464 dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4465 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
4466 HAL_RXDMA_MONITOR_STATUS);
4467 if (ret) {
4468 ath12k_warn(ab,
4469 "failed to configure mon_status_refill_ring%d %d\n",
4470 i, ret);
4471 return ret;
4472 }
4473 }
4474 }
4475
4476 ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4477 if (ret) {
4478 ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4479 return ret;
4480 }
4481
4482 return 0;
4483}
4484
4485int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4486{
4487 struct ath12k_dp *dp = &ab->dp;
4488 struct dp_srng *srng;
4489 int i, ret;
4490
4491 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4492 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4493
4494 ret = ath12k_dp_srng_setup(ab,
4495 &dp->rx_refill_buf_ring.refill_buf_ring,
4496 HAL_RXDMA_BUF, 0, 0,
4497 DP_RXDMA_BUF_RING_SIZE);
4498 if (ret) {
4499 ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4500 return ret;
4501 }
4502
4503 if (ab->hw_params->rx_mac_buf_ring) {
4504 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4505 ret = ath12k_dp_srng_setup(ab,
4506 &dp->rx_mac_buf_ring[i],
4507 HAL_RXDMA_BUF, 1,
4508 i, DP_RX_MAC_BUF_RING_SIZE);
4509 if (ret) {
4510 ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4511 i);
4512 return ret;
4513 }
4514 }
4515 }
4516
4517 for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4518 ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4519 HAL_RXDMA_DST, 0, i,
4520 DP_RXDMA_ERR_DST_RING_SIZE);
4521 if (ret) {
4522 ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4523 return ret;
4524 }
4525 }
4526
4527 if (ab->hw_params->rxdma1_enable) {
4528 ret = ath12k_dp_srng_setup(ab,
4529 &dp->rxdma_mon_buf_ring.refill_buf_ring,
4530 HAL_RXDMA_MONITOR_BUF, 0, 0,
4531 DP_RXDMA_MONITOR_BUF_RING_SIZE);
4532 if (ret) {
4533 ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4534 return ret;
4535 }
4536 } else {
4537 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4538 idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
4539 spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
4540 }
4541
4542 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4543 srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
4544 ret = ath12k_dp_srng_setup(ab, srng,
4545 HAL_RXDMA_MONITOR_STATUS, 0, i,
4546 DP_RXDMA_MON_STATUS_RING_SIZE);
4547 if (ret) {
4548 ath12k_warn(ab, "failed to setup mon status ring %d\n",
4549 i);
4550 return ret;
4551 }
4552 }
4553 }
4554
4555 ret = ath12k_dp_rxdma_buf_setup(ab);
4556 if (ret) {
4557 ath12k_warn(ab, "failed to setup rxdma ring\n");
4558 return ret;
4559 }
4560
4561 return 0;
4562}
4563
4564int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4565{
4566 struct ath12k *ar = ab->pdevs[mac_id].ar;
4567 struct ath12k_pdev_dp *dp = &ar->dp;
4568 u32 ring_id;
4569 int i;
4570 int ret;
4571
4572 if (!ab->hw_params->rxdma1_enable)
4573 goto out;
4574
4575 ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4576 if (ret) {
4577 ath12k_warn(ab, "failed to setup rx srngs\n");
4578 return ret;
4579 }
4580
4581 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4582 ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4583 ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4584 mac_id + i,
4585 HAL_RXDMA_MONITOR_DST);
4586 if (ret) {
4587 ath12k_warn(ab,
4588 "failed to configure rxdma_mon_dst_ring %d %d\n",
4589 i, ret);
4590 return ret;
4591 }
4592 }
4593out:
4594 return 0;
4595}
4596
4597static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4598{
4599 struct ath12k_pdev_dp *dp = &ar->dp;
4600 struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4601
4602 skb_queue_head_init(&pmon->rx_status_q);
4603
4604 pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4605
4606 memset(&pmon->rx_mon_stats, 0,
4607 sizeof(pmon->rx_mon_stats));
4608 return 0;
4609}
4610
4611int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4612{
4613 struct ath12k_pdev_dp *dp = &ar->dp;
4614 struct ath12k_mon_data *pmon = &dp->mon_data;
4615 int ret = 0;
4616
4617 ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4618 if (ret) {
4619 ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4620 return ret;
4621 }
4622
4623 pmon->mon_last_linkdesc_paddr = 0;
4624 pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4625 spin_lock_init(&pmon->mon_lock);
4626
4627 if (!ar->ab->hw_params->rxdma1_enable)
4628 return 0;
4629
4630 INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
4631 pmon->mon_mpdu = NULL;
4632
4633 return 0;
4634}