Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: BSD-3-Clause-Clear
2/*
3 * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5 */
6
7#include "dp_rx.h"
8#include "debug.h"
9#include "hif.h"
10
11static int ath12k_ce_rx_buf_enqueue_pipe(struct ath12k_ce_pipe *pipe,
12 struct sk_buff *skb, dma_addr_t paddr)
13{
14 struct ath12k_base *ab = pipe->ab;
15 struct ath12k_ce_ring *ring = pipe->dest_ring;
16 struct hal_srng *srng;
17 unsigned int write_index;
18 unsigned int nentries_mask = ring->nentries_mask;
19 struct hal_ce_srng_dest_desc *desc;
20 int ret;
21
22 lockdep_assert_held(&ab->ce.ce_lock);
23
24 write_index = ring->write_index;
25
26 srng = &ab->hal.srng_list[ring->hal_ring_id];
27
28 spin_lock_bh(&srng->lock);
29
30 ath12k_hal_srng_access_begin(ab, srng);
31
32 if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
33 ret = -ENOSPC;
34 goto exit;
35 }
36
37 desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
38 if (!desc) {
39 ret = -ENOSPC;
40 goto exit;
41 }
42
43 ath12k_hal_ce_dst_set_desc(&ab->hal, desc, paddr);
44
45 ring->skb[write_index] = skb;
46 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
47 ring->write_index = write_index;
48
49 pipe->rx_buf_needed--;
50
51 ret = 0;
52exit:
53 ath12k_hal_srng_access_end(ab, srng);
54
55 spin_unlock_bh(&srng->lock);
56
57 return ret;
58}
59
60static int ath12k_ce_rx_post_pipe(struct ath12k_ce_pipe *pipe)
61{
62 struct ath12k_base *ab = pipe->ab;
63 struct sk_buff *skb;
64 dma_addr_t paddr;
65 int ret = 0;
66
67 if (!(pipe->dest_ring || pipe->status_ring))
68 return 0;
69
70 spin_lock_bh(&ab->ce.ce_lock);
71 while (pipe->rx_buf_needed) {
72 skb = dev_alloc_skb(pipe->buf_sz);
73 if (!skb) {
74 ret = -ENOMEM;
75 goto exit;
76 }
77
78 WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
79
80 paddr = dma_map_single(ab->dev, skb->data,
81 skb->len + skb_tailroom(skb),
82 DMA_FROM_DEVICE);
83 if (unlikely(dma_mapping_error(ab->dev, paddr))) {
84 ath12k_warn(ab, "failed to dma map ce rx buf\n");
85 dev_kfree_skb_any(skb);
86 ret = -EIO;
87 goto exit;
88 }
89
90 ATH12K_SKB_RXCB(skb)->paddr = paddr;
91
92 ret = ath12k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
93 if (ret) {
94 ath12k_dbg(ab, ATH12K_DBG_CE, "failed to enqueue rx buf: %d\n",
95 ret);
96 dma_unmap_single(ab->dev, paddr,
97 skb->len + skb_tailroom(skb),
98 DMA_FROM_DEVICE);
99 dev_kfree_skb_any(skb);
100 goto exit;
101 }
102 }
103
104exit:
105 spin_unlock_bh(&ab->ce.ce_lock);
106 return ret;
107}
108
109static int ath12k_ce_completed_recv_next(struct ath12k_ce_pipe *pipe,
110 struct sk_buff **skb, int *nbytes)
111{
112 struct ath12k_base *ab = pipe->ab;
113 struct hal_ce_srng_dst_status_desc *desc;
114 struct hal_srng *srng;
115 unsigned int sw_index;
116 unsigned int nentries_mask;
117 int ret = 0;
118
119 spin_lock_bh(&ab->ce.ce_lock);
120
121 sw_index = pipe->dest_ring->sw_index;
122 nentries_mask = pipe->dest_ring->nentries_mask;
123
124 srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
125
126 spin_lock_bh(&srng->lock);
127
128 ath12k_hal_srng_access_begin(ab, srng);
129
130 desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
131 if (!desc) {
132 ret = -EIO;
133 goto err;
134 }
135
136 *nbytes = ath12k_hal_ce_dst_status_get_length(&ab->hal, desc);
137
138 *skb = pipe->dest_ring->skb[sw_index];
139 pipe->dest_ring->skb[sw_index] = NULL;
140
141 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
142 pipe->dest_ring->sw_index = sw_index;
143
144 pipe->rx_buf_needed++;
145err:
146 ath12k_hal_srng_access_end(ab, srng);
147
148 spin_unlock_bh(&srng->lock);
149
150 spin_unlock_bh(&ab->ce.ce_lock);
151
152 return ret;
153}
154
155static void ath12k_ce_recv_process_cb(struct ath12k_ce_pipe *pipe)
156{
157 struct ath12k_base *ab = pipe->ab;
158 struct sk_buff *skb;
159 struct sk_buff_head list;
160 unsigned int nbytes, max_nbytes;
161 int ret;
162
163 __skb_queue_head_init(&list);
164 while (ath12k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
165 max_nbytes = skb->len + skb_tailroom(skb);
166 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
167 max_nbytes, DMA_FROM_DEVICE);
168
169 if (unlikely(max_nbytes < nbytes || nbytes == 0)) {
170 ath12k_warn(ab, "unexpected rx length (nbytes %d, max %d)",
171 nbytes, max_nbytes);
172 dev_kfree_skb_any(skb);
173 continue;
174 }
175
176 skb_put(skb, nbytes);
177 __skb_queue_tail(&list, skb);
178 }
179
180 while ((skb = __skb_dequeue(&list))) {
181 ath12k_dbg(ab, ATH12K_DBG_CE, "rx ce pipe %d len %d\n",
182 pipe->pipe_num, skb->len);
183 pipe->recv_cb(ab, skb);
184 }
185
186 ret = ath12k_ce_rx_post_pipe(pipe);
187 if (ret && ret != -ENOSPC) {
188 ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
189 pipe->pipe_num, ret);
190 mod_timer(&ab->rx_replenish_retry,
191 jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
192 }
193}
194
195static struct sk_buff *ath12k_ce_completed_send_next(struct ath12k_ce_pipe *pipe)
196{
197 struct ath12k_base *ab = pipe->ab;
198 struct hal_ce_srng_src_desc *desc;
199 struct hal_srng *srng;
200 unsigned int sw_index;
201 unsigned int nentries_mask;
202 struct sk_buff *skb;
203
204 spin_lock_bh(&ab->ce.ce_lock);
205
206 sw_index = pipe->src_ring->sw_index;
207 nentries_mask = pipe->src_ring->nentries_mask;
208
209 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
210
211 spin_lock_bh(&srng->lock);
212
213 ath12k_hal_srng_access_begin(ab, srng);
214
215 desc = ath12k_hal_srng_src_reap_next(ab, srng);
216 if (!desc) {
217 skb = ERR_PTR(-EIO);
218 goto err_unlock;
219 }
220
221 skb = pipe->src_ring->skb[sw_index];
222
223 pipe->src_ring->skb[sw_index] = NULL;
224
225 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
226 pipe->src_ring->sw_index = sw_index;
227
228err_unlock:
229 spin_unlock_bh(&srng->lock);
230
231 spin_unlock_bh(&ab->ce.ce_lock);
232
233 return skb;
234}
235
236static void ath12k_ce_send_done_cb(struct ath12k_ce_pipe *pipe)
237{
238 struct ath12k_base *ab = pipe->ab;
239 struct sk_buff *skb;
240
241 while (!IS_ERR(skb = ath12k_ce_completed_send_next(pipe))) {
242 if (!skb)
243 continue;
244
245 dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr, skb->len,
246 DMA_TO_DEVICE);
247 dev_kfree_skb_any(skb);
248 }
249}
250
251static void ath12k_ce_srng_msi_ring_params_setup(struct ath12k_base *ab, u32 ce_id,
252 struct hal_srng_params *ring_params)
253{
254 u32 msi_data_start;
255 u32 msi_data_count, msi_data_idx;
256 u32 msi_irq_start;
257 u32 addr_lo;
258 u32 addr_hi;
259 int ret;
260
261 ret = ath12k_hif_get_user_msi_vector(ab, "CE",
262 &msi_data_count, &msi_data_start,
263 &msi_irq_start);
264
265 if (ret)
266 return;
267
268 ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
269 ath12k_hif_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
270
271 ring_params->msi_addr = addr_lo;
272 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
273 ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
274 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
275}
276
277static int ath12k_ce_init_ring(struct ath12k_base *ab,
278 struct ath12k_ce_ring *ce_ring,
279 int ce_id, enum hal_ring_type type)
280{
281 struct hal_srng_params params = {};
282 int ret;
283
284 params.ring_base_paddr = ce_ring->base_addr_ce_space;
285 params.ring_base_vaddr = ce_ring->base_addr_owner_space;
286 params.num_entries = ce_ring->nentries;
287
288 if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
289 ath12k_ce_srng_msi_ring_params_setup(ab, ce_id, ¶ms);
290
291 switch (type) {
292 case HAL_CE_SRC:
293 if (!(CE_ATTR_DIS_INTR & ab->hw_params->host_ce_config[ce_id].flags))
294 params.intr_batch_cntr_thres_entries = 1;
295 break;
296 case HAL_CE_DST:
297 params.max_buffer_len = ab->hw_params->host_ce_config[ce_id].src_sz_max;
298 if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
299 params.intr_timer_thres_us = 1024;
300 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
301 params.low_threshold = ce_ring->nentries - 3;
302 }
303 break;
304 case HAL_CE_DST_STATUS:
305 if (!(ab->hw_params->host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
306 params.intr_batch_cntr_thres_entries = 1;
307 params.intr_timer_thres_us = 0x1000;
308 }
309 break;
310 default:
311 ath12k_warn(ab, "Invalid CE ring type %d\n", type);
312 return -EINVAL;
313 }
314
315 /* TODO: Init other params needed by HAL to init the ring */
316
317 ret = ath12k_hal_srng_setup(ab, type, ce_id, 0, ¶ms);
318 if (ret < 0) {
319 ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
320 ret, ce_id);
321 return ret;
322 }
323
324 ce_ring->hal_ring_id = ret;
325
326 return 0;
327}
328
329static struct ath12k_ce_ring *
330ath12k_ce_alloc_ring(struct ath12k_base *ab, int nentries, int desc_sz)
331{
332 struct ath12k_ce_ring *ce_ring;
333 dma_addr_t base_addr;
334
335 ce_ring = kzalloc_flex(*ce_ring, skb, nentries);
336 if (!ce_ring)
337 return ERR_PTR(-ENOMEM);
338
339 ce_ring->nentries = nentries;
340 ce_ring->nentries_mask = nentries - 1;
341
342 /* Legacy platforms that do not support cache
343 * coherent DMA are unsupported
344 */
345 ce_ring->base_addr_owner_space_unaligned =
346 dma_alloc_coherent(ab->dev,
347 nentries * desc_sz + CE_DESC_RING_ALIGN,
348 &base_addr, GFP_KERNEL);
349 if (!ce_ring->base_addr_owner_space_unaligned) {
350 kfree(ce_ring);
351 return ERR_PTR(-ENOMEM);
352 }
353
354 ce_ring->base_addr_ce_space_unaligned = base_addr;
355
356 ce_ring->base_addr_owner_space =
357 PTR_ALIGN(ce_ring->base_addr_owner_space_unaligned,
358 CE_DESC_RING_ALIGN);
359
360 ce_ring->base_addr_ce_space = ALIGN(ce_ring->base_addr_ce_space_unaligned,
361 CE_DESC_RING_ALIGN);
362
363 return ce_ring;
364}
365
366static int ath12k_ce_alloc_pipe(struct ath12k_base *ab, int ce_id)
367{
368 struct ath12k_hal *hal = &ab->hal;
369 struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
370 const struct ce_attr *attr = &ab->hw_params->host_ce_config[ce_id];
371 struct ath12k_ce_ring *ring;
372 int nentries;
373 int desc_sz;
374
375 pipe->attr_flags = attr->flags;
376
377 if (attr->src_nentries) {
378 pipe->send_cb = ath12k_ce_send_done_cb;
379 nentries = roundup_pow_of_two(attr->src_nentries);
380 desc_sz = ath12k_hal_ce_get_desc_size(hal, HAL_CE_DESC_SRC);
381 ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
382 if (IS_ERR(ring))
383 return PTR_ERR(ring);
384 pipe->src_ring = ring;
385 }
386
387 if (attr->dest_nentries) {
388 pipe->recv_cb = attr->recv_cb;
389 nentries = roundup_pow_of_two(attr->dest_nentries);
390 desc_sz = ath12k_hal_ce_get_desc_size(hal, HAL_CE_DESC_DST);
391 ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
392 if (IS_ERR(ring))
393 return PTR_ERR(ring);
394 pipe->dest_ring = ring;
395
396 desc_sz = ath12k_hal_ce_get_desc_size(hal, HAL_CE_DESC_DST_STATUS);
397 ring = ath12k_ce_alloc_ring(ab, nentries, desc_sz);
398 if (IS_ERR(ring))
399 return PTR_ERR(ring);
400 pipe->status_ring = ring;
401 }
402
403 return 0;
404}
405
406void ath12k_ce_per_engine_service(struct ath12k_base *ab, u16 ce_id)
407{
408 struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
409
410 if (pipe->send_cb)
411 pipe->send_cb(pipe);
412
413 if (pipe->recv_cb)
414 ath12k_ce_recv_process_cb(pipe);
415}
416
417void ath12k_ce_poll_send_completed(struct ath12k_base *ab, u8 pipe_id)
418{
419 struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
420
421 if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
422 pipe->send_cb(pipe);
423}
424
425int ath12k_ce_send(struct ath12k_base *ab, struct sk_buff *skb, u8 pipe_id,
426 u16 transfer_id)
427{
428 struct ath12k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
429 struct hal_ce_srng_src_desc *desc;
430 struct hal_srng *srng;
431 unsigned int write_index, sw_index;
432 unsigned int nentries_mask;
433 int ret = 0;
434 u8 byte_swap_data = 0;
435 int num_used;
436
437 /* Check if some entries could be regained by handling tx completion if
438 * the CE has interrupts disabled and the used entries is more than the
439 * defined usage threshold.
440 */
441 if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
442 spin_lock_bh(&ab->ce.ce_lock);
443 write_index = pipe->src_ring->write_index;
444
445 sw_index = pipe->src_ring->sw_index;
446
447 if (write_index >= sw_index)
448 num_used = write_index - sw_index;
449 else
450 num_used = pipe->src_ring->nentries - sw_index +
451 write_index;
452
453 spin_unlock_bh(&ab->ce.ce_lock);
454
455 if (num_used > ATH12K_CE_USAGE_THRESHOLD)
456 ath12k_ce_poll_send_completed(ab, pipe->pipe_num);
457 }
458
459 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
460 return -ESHUTDOWN;
461
462 spin_lock_bh(&ab->ce.ce_lock);
463
464 write_index = pipe->src_ring->write_index;
465 nentries_mask = pipe->src_ring->nentries_mask;
466
467 srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
468
469 spin_lock_bh(&srng->lock);
470
471 ath12k_hal_srng_access_begin(ab, srng);
472
473 if (unlikely(ath12k_hal_srng_src_num_free(ab, srng, false) < 1)) {
474 ath12k_hal_srng_access_end(ab, srng);
475 ret = -ENOBUFS;
476 goto unlock;
477 }
478
479 desc = ath12k_hal_srng_src_get_next_reaped(ab, srng);
480 if (!desc) {
481 ath12k_hal_srng_access_end(ab, srng);
482 ret = -ENOBUFS;
483 goto unlock;
484 }
485
486 if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
487 byte_swap_data = 1;
488
489 ath12k_hal_ce_src_set_desc(&ab->hal, desc, ATH12K_SKB_CB(skb)->paddr,
490 skb->len, transfer_id, byte_swap_data);
491
492 pipe->src_ring->skb[write_index] = skb;
493 pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
494 write_index);
495
496 ath12k_hal_srng_access_end(ab, srng);
497
498unlock:
499 spin_unlock_bh(&srng->lock);
500
501 spin_unlock_bh(&ab->ce.ce_lock);
502
503 return ret;
504}
505
506static void ath12k_ce_rx_pipe_cleanup(struct ath12k_ce_pipe *pipe)
507{
508 struct ath12k_base *ab = pipe->ab;
509 struct ath12k_ce_ring *ring = pipe->dest_ring;
510 struct sk_buff *skb;
511 int i;
512
513 if (!(ring && pipe->buf_sz))
514 return;
515
516 for (i = 0; i < ring->nentries; i++) {
517 skb = ring->skb[i];
518 if (!skb)
519 continue;
520
521 ring->skb[i] = NULL;
522 dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
523 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
524 dev_kfree_skb_any(skb);
525 }
526}
527
528void ath12k_ce_cleanup_pipes(struct ath12k_base *ab)
529{
530 struct ath12k_ce_pipe *pipe;
531 int pipe_num;
532
533 for (pipe_num = 0; pipe_num < ab->hw_params->ce_count; pipe_num++) {
534 pipe = &ab->ce.ce_pipe[pipe_num];
535 ath12k_ce_rx_pipe_cleanup(pipe);
536
537 /* Cleanup any src CE's which have interrupts disabled */
538 ath12k_ce_poll_send_completed(ab, pipe_num);
539
540 /* NOTE: Should we also clean up tx buffer in all pipes? */
541 }
542}
543
544void ath12k_ce_rx_post_buf(struct ath12k_base *ab)
545{
546 struct ath12k_ce_pipe *pipe;
547 int i;
548 int ret;
549
550 for (i = 0; i < ab->hw_params->ce_count; i++) {
551 pipe = &ab->ce.ce_pipe[i];
552 ret = ath12k_ce_rx_post_pipe(pipe);
553 if (ret) {
554 if (ret == -ENOSPC)
555 continue;
556
557 ath12k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
558 i, ret);
559 mod_timer(&ab->rx_replenish_retry,
560 jiffies + ATH12K_CE_RX_POST_RETRY_JIFFIES);
561
562 return;
563 }
564 }
565}
566
567void ath12k_ce_rx_replenish_retry(struct timer_list *t)
568{
569 struct ath12k_base *ab = timer_container_of(ab, t, rx_replenish_retry);
570
571 ath12k_ce_rx_post_buf(ab);
572}
573
574static void ath12k_ce_shadow_config(struct ath12k_base *ab)
575{
576 int i;
577
578 for (i = 0; i < ab->hw_params->ce_count; i++) {
579 if (ab->hw_params->host_ce_config[i].src_nentries)
580 ath12k_hal_srng_update_shadow_config(ab, HAL_CE_SRC, i);
581
582 if (ab->hw_params->host_ce_config[i].dest_nentries) {
583 ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST, i);
584 ath12k_hal_srng_update_shadow_config(ab, HAL_CE_DST_STATUS, i);
585 }
586 }
587}
588
589void ath12k_ce_get_shadow_config(struct ath12k_base *ab,
590 u32 **shadow_cfg, u32 *shadow_cfg_len)
591{
592 if (!ab->hw_params->supports_shadow_regs)
593 return;
594
595 ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
596
597 /* shadow is already configured */
598 if (*shadow_cfg_len)
599 return;
600
601 /* shadow isn't configured yet, configure now.
602 * non-CE srngs are configured firstly, then
603 * all CE srngs.
604 */
605 ath12k_hal_srng_shadow_config(ab);
606 ath12k_ce_shadow_config(ab);
607
608 /* get the shadow configuration */
609 ath12k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
610}
611
612int ath12k_ce_init_pipes(struct ath12k_base *ab)
613{
614 struct ath12k_ce_pipe *pipe;
615 int i;
616 int ret;
617
618 ath12k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v3,
619 &ab->qmi.ce_cfg.shadow_reg_v3_len);
620
621 for (i = 0; i < ab->hw_params->ce_count; i++) {
622 pipe = &ab->ce.ce_pipe[i];
623
624 if (pipe->src_ring) {
625 ret = ath12k_ce_init_ring(ab, pipe->src_ring, i,
626 HAL_CE_SRC);
627 if (ret) {
628 ath12k_warn(ab, "failed to init src ring: %d\n",
629 ret);
630 /* Should we clear any partial init */
631 return ret;
632 }
633
634 pipe->src_ring->write_index = 0;
635 pipe->src_ring->sw_index = 0;
636 }
637
638 if (pipe->dest_ring) {
639 ret = ath12k_ce_init_ring(ab, pipe->dest_ring, i,
640 HAL_CE_DST);
641 if (ret) {
642 ath12k_warn(ab, "failed to init dest ring: %d\n",
643 ret);
644 /* Should we clear any partial init */
645 return ret;
646 }
647
648 pipe->rx_buf_needed = pipe->dest_ring->nentries ?
649 pipe->dest_ring->nentries - 2 : 0;
650
651 pipe->dest_ring->write_index = 0;
652 pipe->dest_ring->sw_index = 0;
653 }
654
655 if (pipe->status_ring) {
656 ret = ath12k_ce_init_ring(ab, pipe->status_ring, i,
657 HAL_CE_DST_STATUS);
658 if (ret) {
659 ath12k_warn(ab, "failed to init dest status ing: %d\n",
660 ret);
661 /* Should we clear any partial init */
662 return ret;
663 }
664
665 pipe->status_ring->write_index = 0;
666 pipe->status_ring->sw_index = 0;
667 }
668 }
669
670 return 0;
671}
672
673void ath12k_ce_free_pipes(struct ath12k_base *ab)
674{
675 struct ath12k_hal *hal = &ab->hal;
676 struct ath12k_ce_pipe *pipe;
677 int desc_sz;
678 int i;
679
680 for (i = 0; i < ab->hw_params->ce_count; i++) {
681 pipe = &ab->ce.ce_pipe[i];
682
683 if (pipe->src_ring) {
684 desc_sz = ath12k_hal_ce_get_desc_size(hal,
685 HAL_CE_DESC_SRC);
686 dma_free_coherent(ab->dev,
687 pipe->src_ring->nentries * desc_sz +
688 CE_DESC_RING_ALIGN,
689 pipe->src_ring->base_addr_owner_space_unaligned,
690 pipe->src_ring->base_addr_ce_space_unaligned);
691 kfree(pipe->src_ring);
692 pipe->src_ring = NULL;
693 }
694
695 if (pipe->dest_ring) {
696 desc_sz = ath12k_hal_ce_get_desc_size(hal,
697 HAL_CE_DESC_DST);
698 dma_free_coherent(ab->dev,
699 pipe->dest_ring->nentries * desc_sz +
700 CE_DESC_RING_ALIGN,
701 pipe->dest_ring->base_addr_owner_space_unaligned,
702 pipe->dest_ring->base_addr_ce_space_unaligned);
703 kfree(pipe->dest_ring);
704 pipe->dest_ring = NULL;
705 }
706
707 if (pipe->status_ring) {
708 desc_sz =
709 ath12k_hal_ce_get_desc_size(hal,
710 HAL_CE_DESC_DST_STATUS);
711 dma_free_coherent(ab->dev,
712 pipe->status_ring->nentries * desc_sz +
713 CE_DESC_RING_ALIGN,
714 pipe->status_ring->base_addr_owner_space_unaligned,
715 pipe->status_ring->base_addr_ce_space_unaligned);
716 kfree(pipe->status_ring);
717 pipe->status_ring = NULL;
718 }
719 }
720}
721
722int ath12k_ce_alloc_pipes(struct ath12k_base *ab)
723{
724 struct ath12k_ce_pipe *pipe;
725 int i;
726 int ret;
727 const struct ce_attr *attr;
728
729 spin_lock_init(&ab->ce.ce_lock);
730
731 for (i = 0; i < ab->hw_params->ce_count; i++) {
732 attr = &ab->hw_params->host_ce_config[i];
733 pipe = &ab->ce.ce_pipe[i];
734 pipe->pipe_num = i;
735 pipe->ab = ab;
736 pipe->buf_sz = attr->src_sz_max;
737
738 ret = ath12k_ce_alloc_pipe(ab, i);
739 if (ret) {
740 /* Free any partial successful allocation */
741 ath12k_ce_free_pipes(ab);
742 return ret;
743 }
744 }
745
746 return 0;
747}
748
749int ath12k_ce_get_attr_flags(struct ath12k_base *ab, int ce_id)
750{
751 if (ce_id >= ab->hw_params->ce_count)
752 return -EINVAL;
753
754 return ab->hw_params->host_ce_config[ce_id].flags;
755}