Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018 The Linux Foundation. All rights reserved.
6 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
7 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
8 */
9
10#include <linux/export.h>
11#include "hif.h"
12#include "ce.h"
13#include "debug.h"
14
15/*
16 * Support for Copy Engine hardware, which is mainly used for
17 * communication between Host and Target over a PCIe interconnect.
18 */
19
20/*
21 * A single CopyEngine (CE) comprises two "rings":
22 * a source ring
23 * a destination ring
24 *
25 * Each ring consists of a number of descriptors which specify
26 * an address, length, and meta-data.
27 *
28 * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
29 * controls one ring and the other side controls the other ring.
30 * The source side chooses when to initiate a transfer and it
31 * chooses what to send (buffer address, length). The destination
32 * side keeps a supply of "anonymous receive buffers" available and
33 * it handles incoming data as it arrives (when the destination
34 * receives an interrupt).
35 *
36 * The sender may send a simple buffer (address/length) or it may
37 * send a small list of buffers. When a small list is sent, hardware
38 * "gathers" these and they end up in a single destination buffer
39 * with a single interrupt.
40 *
41 * There are several "contexts" managed by this layer -- more, it
42 * may seem -- than should be needed. These are provided mainly for
43 * maximum flexibility and especially to facilitate a simpler HIF
44 * implementation. There are per-CopyEngine recv, send, and watermark
45 * contexts. These are supplied by the caller when a recv, send,
46 * or watermark handler is established and they are echoed back to
47 * the caller when the respective callbacks are invoked. There is
48 * also a per-transfer context supplied by the caller when a buffer
49 * (or sendlist) is sent and when a buffer is enqueued for recv.
50 * These per-transfer contexts are echoed back to the caller when
51 * the buffer is sent/received.
52 */
53
54static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
55 struct ath10k_ce_pipe *ce_state)
56{
57 u32 ce_id = ce_state->id;
58 u32 addr = 0;
59
60 switch (ce_id) {
61 case 0:
62 addr = 0x00032000;
63 break;
64 case 3:
65 addr = 0x0003200C;
66 break;
67 case 4:
68 addr = 0x00032010;
69 break;
70 case 5:
71 addr = 0x00032014;
72 break;
73 case 7:
74 addr = 0x0003201C;
75 break;
76 default:
77 ath10k_warn(ar, "invalid CE id: %d", ce_id);
78 break;
79 }
80 return addr;
81}
82
83static inline unsigned int
84ath10k_set_ring_byte(unsigned int offset,
85 const struct ath10k_hw_ce_regs_addr_map *addr_map)
86{
87 return ((offset << addr_map->lsb) & addr_map->mask);
88}
89
90static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
91{
92 struct ath10k_ce *ce = ath10k_ce_priv(ar);
93
94 return ce->bus_ops->read32(ar, offset);
95}
96
97static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
98{
99 struct ath10k_ce *ce = ath10k_ce_priv(ar);
100
101 ce->bus_ops->write32(ar, offset, value);
102}
103
104static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
105 u32 ce_ctrl_addr,
106 unsigned int n)
107{
108 ath10k_ce_write32(ar, ce_ctrl_addr +
109 ar->hw_ce_regs->dst_wr_index_addr, n);
110}
111
112static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
113 u32 ce_ctrl_addr)
114{
115 return ath10k_ce_read32(ar, ce_ctrl_addr +
116 ar->hw_ce_regs->dst_wr_index_addr);
117}
118
119static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
120 u32 ce_ctrl_addr,
121 unsigned int n)
122{
123 ath10k_ce_write32(ar, ce_ctrl_addr +
124 ar->hw_ce_regs->sr_wr_index_addr, n);
125}
126
127static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
128 u32 ce_ctrl_addr)
129{
130 return ath10k_ce_read32(ar, ce_ctrl_addr +
131 ar->hw_ce_regs->sr_wr_index_addr);
132}
133
134static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
135 u32 ce_id)
136{
137 struct ath10k_ce *ce = ath10k_ce_priv(ar);
138
139 return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
140}
141
142static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
143 u32 ce_ctrl_addr)
144{
145 struct ath10k_ce *ce = ath10k_ce_priv(ar);
146 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
147 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
148 u32 index;
149
150 if (ar->hw_params.rri_on_ddr &&
151 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
152 index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
153 else
154 index = ath10k_ce_read32(ar, ce_ctrl_addr +
155 ar->hw_ce_regs->current_srri_addr);
156
157 return index;
158}
159
160static inline void
161ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
162 struct ath10k_ce_pipe *ce_state,
163 unsigned int value)
164{
165 ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
166}
167
168static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
169 u32 ce_id,
170 u64 addr)
171{
172 struct ath10k_ce *ce = ath10k_ce_priv(ar);
173 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
174 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
175 u32 addr_lo = lower_32_bits(addr);
176
177 ath10k_ce_write32(ar, ce_ctrl_addr +
178 ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
179
180 if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
181 ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
182 addr);
183 }
184}
185
186static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
187 u32 ce_ctrl_addr,
188 u64 addr)
189{
190 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
191
192 ath10k_ce_write32(ar, ce_ctrl_addr +
193 ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
194}
195
196static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
197 u32 ce_ctrl_addr,
198 unsigned int n)
199{
200 ath10k_ce_write32(ar, ce_ctrl_addr +
201 ar->hw_ce_regs->sr_size_addr, n);
202}
203
204static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
205 u32 ce_ctrl_addr,
206 unsigned int n)
207{
208 const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
209
210 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
211 ctrl_regs->addr);
212
213 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
214 (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
215 ath10k_set_ring_byte(n, ctrl_regs->dmax));
216}
217
218static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
219 u32 ce_ctrl_addr,
220 unsigned int n)
221{
222 const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
223
224 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
225 ctrl_regs->addr);
226
227 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
228 (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
229 ath10k_set_ring_byte(n, ctrl_regs->src_ring));
230}
231
232static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
233 u32 ce_ctrl_addr,
234 unsigned int n)
235{
236 const struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
237
238 u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
239 ctrl_regs->addr);
240
241 ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
242 (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
243 ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
244}
245
246static inline
247 u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
248{
249 struct ath10k_ce *ce = ath10k_ce_priv(ar);
250
251 return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
252 CE_DDR_RRI_MASK;
253}
254
255static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
256 u32 ce_ctrl_addr)
257{
258 struct ath10k_ce *ce = ath10k_ce_priv(ar);
259 u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
260 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
261 u32 index;
262
263 if (ar->hw_params.rri_on_ddr &&
264 (ce_state->attr_flags & CE_ATTR_DIS_INTR))
265 index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
266 else
267 index = ath10k_ce_read32(ar, ce_ctrl_addr +
268 ar->hw_ce_regs->current_drri_addr);
269
270 return index;
271}
272
273static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
274 u32 ce_id,
275 u64 addr)
276{
277 struct ath10k_ce *ce = ath10k_ce_priv(ar);
278 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
279 u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
280 u32 addr_lo = lower_32_bits(addr);
281
282 ath10k_ce_write32(ar, ce_ctrl_addr +
283 ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
284
285 if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
286 ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
287 addr);
288 }
289}
290
291static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
292 u32 ce_ctrl_addr,
293 u64 addr)
294{
295 u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
296 u32 reg_value;
297
298 reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
299 ar->hw_ce_regs->dr_base_addr_hi);
300 reg_value &= ~CE_DESC_ADDR_HI_MASK;
301 reg_value |= addr_hi;
302 ath10k_ce_write32(ar, ce_ctrl_addr +
303 ar->hw_ce_regs->dr_base_addr_hi, reg_value);
304}
305
306static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
307 u32 ce_ctrl_addr,
308 unsigned int n)
309{
310 ath10k_ce_write32(ar, ce_ctrl_addr +
311 ar->hw_ce_regs->dr_size_addr, n);
312}
313
314static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
315 u32 ce_ctrl_addr,
316 unsigned int n)
317{
318 const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
319 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
320
321 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
322 (addr & ~(srcr_wm->wm_high->mask)) |
323 (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
324}
325
326static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
327 u32 ce_ctrl_addr,
328 unsigned int n)
329{
330 const struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
331 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
332
333 ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
334 (addr & ~(srcr_wm->wm_low->mask)) |
335 (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
336}
337
338static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
339 u32 ce_ctrl_addr,
340 unsigned int n)
341{
342 const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
343 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
344
345 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
346 (addr & ~(dstr_wm->wm_high->mask)) |
347 (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
348}
349
350static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
351 u32 ce_ctrl_addr,
352 unsigned int n)
353{
354 const struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
355 u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
356
357 ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
358 (addr & ~(dstr_wm->wm_low->mask)) |
359 (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
360}
361
362static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
363 u32 ce_ctrl_addr)
364{
365 const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
366
367 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
368 ar->hw_ce_regs->host_ie_addr);
369
370 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
371 host_ie_addr | host_ie->copy_complete->mask);
372}
373
374static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
375 u32 ce_ctrl_addr)
376{
377 const struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
378
379 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
380 ar->hw_ce_regs->host_ie_addr);
381
382 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
383 host_ie_addr & ~(host_ie->copy_complete->mask));
384}
385
386static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
387 u32 ce_ctrl_addr)
388{
389 const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
390
391 u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
392 ar->hw_ce_regs->host_ie_addr);
393
394 ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
395 host_ie_addr & ~(wm_regs->wm_mask));
396}
397
398static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
399 u32 ce_ctrl_addr)
400{
401 const struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
402
403 u32 misc_ie_addr = ath10k_ce_read32(ar,
404 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
405
406 ath10k_ce_write32(ar,
407 ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
408 misc_ie_addr & ~(misc_regs->err_mask));
409}
410
411static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
412 u32 ce_ctrl_addr,
413 unsigned int mask)
414{
415 const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
416
417 ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
418}
419
420/*
421 * Guts of ath10k_ce_send.
422 * The caller takes responsibility for any needed locking.
423 */
424static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
425 void *per_transfer_context,
426 dma_addr_t buffer,
427 unsigned int nbytes,
428 unsigned int transfer_id,
429 unsigned int flags)
430{
431 struct ath10k *ar = ce_state->ar;
432 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
433 struct ce_desc *desc, sdesc;
434 unsigned int nentries_mask = src_ring->nentries_mask;
435 unsigned int sw_index = src_ring->sw_index;
436 unsigned int write_index = src_ring->write_index;
437 u32 ctrl_addr = ce_state->ctrl_addr;
438 u32 desc_flags = 0;
439 int ret = 0;
440
441 if (nbytes > ce_state->src_sz_max)
442 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
443 __func__, nbytes, ce_state->src_sz_max);
444
445 if (unlikely(CE_RING_DELTA(nentries_mask,
446 write_index, sw_index - 1) <= 0)) {
447 ret = -ENOSR;
448 goto exit;
449 }
450
451 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
452 write_index);
453
454 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
455
456 if (flags & CE_SEND_FLAG_GATHER)
457 desc_flags |= CE_DESC_FLAGS_GATHER;
458 if (flags & CE_SEND_FLAG_BYTE_SWAP)
459 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
460
461 sdesc.addr = __cpu_to_le32(buffer);
462 sdesc.nbytes = __cpu_to_le16(nbytes);
463 sdesc.flags = __cpu_to_le16(desc_flags);
464
465 *desc = sdesc;
466
467 src_ring->per_transfer_context[write_index] = per_transfer_context;
468
469 /* Update Source Ring Write Index */
470 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
471
472 /* WORKAROUND */
473 if (!(flags & CE_SEND_FLAG_GATHER))
474 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
475
476 src_ring->write_index = write_index;
477exit:
478 return ret;
479}
480
481static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
482 void *per_transfer_context,
483 dma_addr_t buffer,
484 unsigned int nbytes,
485 unsigned int transfer_id,
486 unsigned int flags)
487{
488 struct ath10k *ar = ce_state->ar;
489 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
490 struct ce_desc_64 *desc, sdesc;
491 unsigned int nentries_mask = src_ring->nentries_mask;
492 unsigned int sw_index;
493 unsigned int write_index = src_ring->write_index;
494 u32 ctrl_addr = ce_state->ctrl_addr;
495 __le32 *addr;
496 u32 desc_flags = 0;
497 int ret = 0;
498
499 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
500 return -ESHUTDOWN;
501
502 if (nbytes > ce_state->src_sz_max)
503 ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
504 __func__, nbytes, ce_state->src_sz_max);
505
506 if (ar->hw_params.rri_on_ddr)
507 sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
508 else
509 sw_index = src_ring->sw_index;
510
511 if (unlikely(CE_RING_DELTA(nentries_mask,
512 write_index, sw_index - 1) <= 0)) {
513 ret = -ENOSR;
514 goto exit;
515 }
516
517 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
518 write_index);
519
520 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
521
522 if (flags & CE_SEND_FLAG_GATHER)
523 desc_flags |= CE_DESC_FLAGS_GATHER;
524
525 if (flags & CE_SEND_FLAG_BYTE_SWAP)
526 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
527
528 addr = (__le32 *)&sdesc.addr;
529
530 flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
531 addr[0] = __cpu_to_le32(buffer);
532 addr[1] = __cpu_to_le32(flags);
533 if (flags & CE_SEND_FLAG_GATHER)
534 addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
535 else
536 addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
537
538 sdesc.nbytes = __cpu_to_le16(nbytes);
539 sdesc.flags = __cpu_to_le16(desc_flags);
540
541 *desc = sdesc;
542
543 src_ring->per_transfer_context[write_index] = per_transfer_context;
544
545 /* Update Source Ring Write Index */
546 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
547
548 if (!(flags & CE_SEND_FLAG_GATHER)) {
549 if (ar->hw_params.shadow_reg_support)
550 ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
551 write_index);
552 else
553 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
554 write_index);
555 }
556
557 src_ring->write_index = write_index;
558exit:
559 return ret;
560}
561
562int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
563 void *per_transfer_context,
564 dma_addr_t buffer,
565 unsigned int nbytes,
566 unsigned int transfer_id,
567 unsigned int flags)
568{
569 return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
570 buffer, nbytes, transfer_id, flags);
571}
572EXPORT_SYMBOL(ath10k_ce_send_nolock);
573
574void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
575{
576 struct ath10k *ar = pipe->ar;
577 struct ath10k_ce *ce = ath10k_ce_priv(ar);
578 struct ath10k_ce_ring *src_ring = pipe->src_ring;
579 u32 ctrl_addr = pipe->ctrl_addr;
580
581 lockdep_assert_held(&ce->ce_lock);
582
583 /*
584 * This function must be called only if there is an incomplete
585 * scatter-gather transfer (before index register is updated)
586 * that needs to be cleaned up.
587 */
588 if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
589 return;
590
591 if (WARN_ON_ONCE(src_ring->write_index ==
592 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
593 return;
594
595 src_ring->write_index--;
596 src_ring->write_index &= src_ring->nentries_mask;
597
598 src_ring->per_transfer_context[src_ring->write_index] = NULL;
599}
600EXPORT_SYMBOL(__ath10k_ce_send_revert);
601
602int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
603 void *per_transfer_context,
604 dma_addr_t buffer,
605 unsigned int nbytes,
606 unsigned int transfer_id,
607 unsigned int flags)
608{
609 struct ath10k *ar = ce_state->ar;
610 struct ath10k_ce *ce = ath10k_ce_priv(ar);
611 int ret;
612
613 spin_lock_bh(&ce->ce_lock);
614 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
615 buffer, nbytes, transfer_id, flags);
616 spin_unlock_bh(&ce->ce_lock);
617
618 return ret;
619}
620EXPORT_SYMBOL(ath10k_ce_send);
621
622int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
623{
624 struct ath10k *ar = pipe->ar;
625 struct ath10k_ce *ce = ath10k_ce_priv(ar);
626 int delta;
627
628 spin_lock_bh(&ce->ce_lock);
629 delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
630 pipe->src_ring->write_index,
631 pipe->src_ring->sw_index - 1);
632 spin_unlock_bh(&ce->ce_lock);
633
634 return delta;
635}
636EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
637
638int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
639{
640 struct ath10k *ar = pipe->ar;
641 struct ath10k_ce *ce = ath10k_ce_priv(ar);
642 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
643 unsigned int nentries_mask = dest_ring->nentries_mask;
644 unsigned int write_index = dest_ring->write_index;
645 unsigned int sw_index = dest_ring->sw_index;
646
647 lockdep_assert_held(&ce->ce_lock);
648
649 return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
650}
651EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
652
653static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
654 dma_addr_t paddr)
655{
656 struct ath10k *ar = pipe->ar;
657 struct ath10k_ce *ce = ath10k_ce_priv(ar);
658 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
659 unsigned int nentries_mask = dest_ring->nentries_mask;
660 unsigned int write_index = dest_ring->write_index;
661 unsigned int sw_index = dest_ring->sw_index;
662 struct ce_desc *base = dest_ring->base_addr_owner_space;
663 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
664 u32 ctrl_addr = pipe->ctrl_addr;
665
666 lockdep_assert_held(&ce->ce_lock);
667
668 if ((pipe->id != 5) &&
669 CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
670 return -ENOSPC;
671
672 desc->addr = __cpu_to_le32(paddr);
673 desc->nbytes = 0;
674
675 dest_ring->per_transfer_context[write_index] = ctx;
676 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
677 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
678 dest_ring->write_index = write_index;
679
680 return 0;
681}
682
683static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
684 void *ctx,
685 dma_addr_t paddr)
686{
687 struct ath10k *ar = pipe->ar;
688 struct ath10k_ce *ce = ath10k_ce_priv(ar);
689 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
690 unsigned int nentries_mask = dest_ring->nentries_mask;
691 unsigned int write_index = dest_ring->write_index;
692 unsigned int sw_index = dest_ring->sw_index;
693 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
694 struct ce_desc_64 *desc =
695 CE_DEST_RING_TO_DESC_64(base, write_index);
696 u32 ctrl_addr = pipe->ctrl_addr;
697
698 lockdep_assert_held(&ce->ce_lock);
699
700 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
701 return -ENOSPC;
702
703 desc->addr = __cpu_to_le64(paddr);
704 desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
705
706 desc->nbytes = 0;
707
708 dest_ring->per_transfer_context[write_index] = ctx;
709 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
710 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
711 dest_ring->write_index = write_index;
712
713 return 0;
714}
715
716void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
717{
718 struct ath10k *ar = pipe->ar;
719 struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
720 unsigned int nentries_mask = dest_ring->nentries_mask;
721 unsigned int write_index = dest_ring->write_index;
722 u32 ctrl_addr = pipe->ctrl_addr;
723 u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
724
725 /* Prevent CE ring stuck issue that will occur when ring is full.
726 * Make sure that write index is 1 less than read index.
727 */
728 if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
729 nentries -= 1;
730
731 write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
732 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
733 dest_ring->write_index = write_index;
734}
735EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
736
737int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
738 dma_addr_t paddr)
739{
740 struct ath10k *ar = pipe->ar;
741 struct ath10k_ce *ce = ath10k_ce_priv(ar);
742 int ret;
743
744 spin_lock_bh(&ce->ce_lock);
745 ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
746 spin_unlock_bh(&ce->ce_lock);
747
748 return ret;
749}
750EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
751
752/*
753 * Guts of ath10k_ce_completed_recv_next.
754 * The caller takes responsibility for any necessary locking.
755 */
756static int
757 _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
758 void **per_transfer_contextp,
759 unsigned int *nbytesp)
760{
761 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
762 unsigned int nentries_mask = dest_ring->nentries_mask;
763 unsigned int sw_index = dest_ring->sw_index;
764
765 struct ce_desc *base = dest_ring->base_addr_owner_space;
766 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
767 struct ce_desc sdesc;
768 u16 nbytes;
769
770 /* Copy in one go for performance reasons */
771 sdesc = *desc;
772
773 nbytes = __le16_to_cpu(sdesc.nbytes);
774 if (nbytes == 0) {
775 /*
776 * This closes a relatively unusual race where the Host
777 * sees the updated DRRI before the update to the
778 * corresponding descriptor has completed. We treat this
779 * as a descriptor that is not yet done.
780 */
781 return -EIO;
782 }
783
784 desc->nbytes = 0;
785
786 /* Return data from completed destination descriptor */
787 *nbytesp = nbytes;
788
789 if (per_transfer_contextp)
790 *per_transfer_contextp =
791 dest_ring->per_transfer_context[sw_index];
792
793 /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
794 * So update transfer context all CEs except CE5.
795 */
796 if (ce_state->id != 5)
797 dest_ring->per_transfer_context[sw_index] = NULL;
798
799 /* Update sw_index */
800 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
801 dest_ring->sw_index = sw_index;
802
803 return 0;
804}
805
806static int
807_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
808 void **per_transfer_contextp,
809 unsigned int *nbytesp)
810{
811 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
812 unsigned int nentries_mask = dest_ring->nentries_mask;
813 unsigned int sw_index = dest_ring->sw_index;
814 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
815 struct ce_desc_64 *desc =
816 CE_DEST_RING_TO_DESC_64(base, sw_index);
817 struct ce_desc_64 sdesc;
818 u16 nbytes;
819
820 /* Copy in one go for performance reasons */
821 sdesc = *desc;
822
823 nbytes = __le16_to_cpu(sdesc.nbytes);
824 if (nbytes == 0) {
825 /* This closes a relatively unusual race where the Host
826 * sees the updated DRRI before the update to the
827 * corresponding descriptor has completed. We treat this
828 * as a descriptor that is not yet done.
829 */
830 return -EIO;
831 }
832
833 desc->nbytes = 0;
834
835 /* Return data from completed destination descriptor */
836 *nbytesp = nbytes;
837
838 if (per_transfer_contextp)
839 *per_transfer_contextp =
840 dest_ring->per_transfer_context[sw_index];
841
842 /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
843 * So update transfer context all CEs except CE5.
844 */
845 if (ce_state->id != 5)
846 dest_ring->per_transfer_context[sw_index] = NULL;
847
848 /* Update sw_index */
849 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
850 dest_ring->sw_index = sw_index;
851
852 return 0;
853}
854
855int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
856 void **per_transfer_ctx,
857 unsigned int *nbytesp)
858{
859 return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
860 per_transfer_ctx,
861 nbytesp);
862}
863EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
864
865int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
866 void **per_transfer_contextp,
867 unsigned int *nbytesp)
868{
869 struct ath10k *ar = ce_state->ar;
870 struct ath10k_ce *ce = ath10k_ce_priv(ar);
871 int ret;
872
873 spin_lock_bh(&ce->ce_lock);
874 ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
875 per_transfer_contextp,
876 nbytesp);
877
878 spin_unlock_bh(&ce->ce_lock);
879
880 return ret;
881}
882EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
883
884static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
885 void **per_transfer_contextp,
886 dma_addr_t *bufferp)
887{
888 struct ath10k_ce_ring *dest_ring;
889 unsigned int nentries_mask;
890 unsigned int sw_index;
891 unsigned int write_index;
892 int ret;
893 struct ath10k *ar;
894 struct ath10k_ce *ce;
895
896 dest_ring = ce_state->dest_ring;
897
898 if (!dest_ring)
899 return -EIO;
900
901 ar = ce_state->ar;
902 ce = ath10k_ce_priv(ar);
903
904 spin_lock_bh(&ce->ce_lock);
905
906 nentries_mask = dest_ring->nentries_mask;
907 sw_index = dest_ring->sw_index;
908 write_index = dest_ring->write_index;
909 if (write_index != sw_index) {
910 struct ce_desc *base = dest_ring->base_addr_owner_space;
911 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
912
913 /* Return data from completed destination descriptor */
914 *bufferp = __le32_to_cpu(desc->addr);
915
916 if (per_transfer_contextp)
917 *per_transfer_contextp =
918 dest_ring->per_transfer_context[sw_index];
919
920 /* sanity */
921 dest_ring->per_transfer_context[sw_index] = NULL;
922 desc->nbytes = 0;
923
924 /* Update sw_index */
925 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
926 dest_ring->sw_index = sw_index;
927 ret = 0;
928 } else {
929 ret = -EIO;
930 }
931
932 spin_unlock_bh(&ce->ce_lock);
933
934 return ret;
935}
936
937static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
938 void **per_transfer_contextp,
939 dma_addr_t *bufferp)
940{
941 struct ath10k_ce_ring *dest_ring;
942 unsigned int nentries_mask;
943 unsigned int sw_index;
944 unsigned int write_index;
945 int ret;
946 struct ath10k *ar;
947 struct ath10k_ce *ce;
948
949 dest_ring = ce_state->dest_ring;
950
951 if (!dest_ring)
952 return -EIO;
953
954 ar = ce_state->ar;
955 ce = ath10k_ce_priv(ar);
956
957 spin_lock_bh(&ce->ce_lock);
958
959 nentries_mask = dest_ring->nentries_mask;
960 sw_index = dest_ring->sw_index;
961 write_index = dest_ring->write_index;
962 if (write_index != sw_index) {
963 struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
964 struct ce_desc_64 *desc =
965 CE_DEST_RING_TO_DESC_64(base, sw_index);
966
967 /* Return data from completed destination descriptor */
968 *bufferp = __le64_to_cpu(desc->addr);
969
970 if (per_transfer_contextp)
971 *per_transfer_contextp =
972 dest_ring->per_transfer_context[sw_index];
973
974 /* sanity */
975 dest_ring->per_transfer_context[sw_index] = NULL;
976 desc->nbytes = 0;
977
978 /* Update sw_index */
979 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
980 dest_ring->sw_index = sw_index;
981 ret = 0;
982 } else {
983 ret = -EIO;
984 }
985
986 spin_unlock_bh(&ce->ce_lock);
987
988 return ret;
989}
990
991int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
992 void **per_transfer_contextp,
993 dma_addr_t *bufferp)
994{
995 return ce_state->ops->ce_revoke_recv_next(ce_state,
996 per_transfer_contextp,
997 bufferp);
998}
999EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
1000
1001/*
1002 * Guts of ath10k_ce_completed_send_next.
1003 * The caller takes responsibility for any necessary locking.
1004 */
1005static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1006 void **per_transfer_contextp)
1007{
1008 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1009 u32 ctrl_addr = ce_state->ctrl_addr;
1010 struct ath10k *ar = ce_state->ar;
1011 unsigned int nentries_mask = src_ring->nentries_mask;
1012 unsigned int sw_index = src_ring->sw_index;
1013 unsigned int read_index;
1014 struct ce_desc *desc;
1015
1016 if (src_ring->hw_index == sw_index) {
1017 /*
1018 * The SW completion index has caught up with the cached
1019 * version of the HW completion index.
1020 * Update the cached HW completion index to see whether
1021 * the SW has really caught up to the HW, or if the cached
1022 * value of the HW index has become stale.
1023 */
1024
1025 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1026 if (read_index == 0xffffffff)
1027 return -ENODEV;
1028
1029 read_index &= nentries_mask;
1030 src_ring->hw_index = read_index;
1031 }
1032
1033 if (ar->hw_params.rri_on_ddr)
1034 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1035 else
1036 read_index = src_ring->hw_index;
1037
1038 if (read_index == sw_index)
1039 return -EIO;
1040
1041 if (per_transfer_contextp)
1042 *per_transfer_contextp =
1043 src_ring->per_transfer_context[sw_index];
1044
1045 /* sanity */
1046 src_ring->per_transfer_context[sw_index] = NULL;
1047 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
1048 sw_index);
1049 desc->nbytes = 0;
1050
1051 /* Update sw_index */
1052 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1053 src_ring->sw_index = sw_index;
1054
1055 return 0;
1056}
1057
1058static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
1059 void **per_transfer_contextp)
1060{
1061 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1062 u32 ctrl_addr = ce_state->ctrl_addr;
1063 struct ath10k *ar = ce_state->ar;
1064 unsigned int nentries_mask = src_ring->nentries_mask;
1065 unsigned int sw_index = src_ring->sw_index;
1066 unsigned int read_index;
1067 struct ce_desc_64 *desc;
1068
1069 if (src_ring->hw_index == sw_index) {
1070 /*
1071 * The SW completion index has caught up with the cached
1072 * version of the HW completion index.
1073 * Update the cached HW completion index to see whether
1074 * the SW has really caught up to the HW, or if the cached
1075 * value of the HW index has become stale.
1076 */
1077
1078 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1079 if (read_index == 0xffffffff)
1080 return -ENODEV;
1081
1082 read_index &= nentries_mask;
1083 src_ring->hw_index = read_index;
1084 }
1085
1086 if (ar->hw_params.rri_on_ddr)
1087 read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1088 else
1089 read_index = src_ring->hw_index;
1090
1091 if (read_index == sw_index)
1092 return -EIO;
1093
1094 if (per_transfer_contextp)
1095 *per_transfer_contextp =
1096 src_ring->per_transfer_context[sw_index];
1097
1098 /* sanity */
1099 src_ring->per_transfer_context[sw_index] = NULL;
1100 desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
1101 sw_index);
1102 desc->nbytes = 0;
1103
1104 /* Update sw_index */
1105 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1106 src_ring->sw_index = sw_index;
1107
1108 return 0;
1109}
1110
1111int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
1112 void **per_transfer_contextp)
1113{
1114 return ce_state->ops->ce_completed_send_next_nolock(ce_state,
1115 per_transfer_contextp);
1116}
1117EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
1118
1119static void ath10k_ce_extract_desc_data(struct ath10k *ar,
1120 struct ath10k_ce_ring *src_ring,
1121 u32 sw_index,
1122 dma_addr_t *bufferp,
1123 u32 *nbytesp,
1124 u32 *transfer_idp)
1125{
1126 struct ce_desc *base = src_ring->base_addr_owner_space;
1127 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
1128
1129 /* Return data from completed source descriptor */
1130 *bufferp = __le32_to_cpu(desc->addr);
1131 *nbytesp = __le16_to_cpu(desc->nbytes);
1132 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1133 CE_DESC_FLAGS_META_DATA);
1134}
1135
1136static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
1137 struct ath10k_ce_ring *src_ring,
1138 u32 sw_index,
1139 dma_addr_t *bufferp,
1140 u32 *nbytesp,
1141 u32 *transfer_idp)
1142{
1143 struct ce_desc_64 *base = src_ring->base_addr_owner_space;
1144 struct ce_desc_64 *desc =
1145 CE_SRC_RING_TO_DESC_64(base, sw_index);
1146
1147 /* Return data from completed source descriptor */
1148 *bufferp = __le64_to_cpu(desc->addr);
1149 *nbytesp = __le16_to_cpu(desc->nbytes);
1150 *transfer_idp = MS(__le16_to_cpu(desc->flags),
1151 CE_DESC_FLAGS_META_DATA);
1152}
1153
1154/* NB: Modeled after ath10k_ce_completed_send_next */
1155int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
1156 void **per_transfer_contextp,
1157 dma_addr_t *bufferp,
1158 unsigned int *nbytesp,
1159 unsigned int *transfer_idp)
1160{
1161 struct ath10k_ce_ring *src_ring;
1162 unsigned int nentries_mask;
1163 unsigned int sw_index;
1164 unsigned int write_index;
1165 int ret;
1166 struct ath10k *ar;
1167 struct ath10k_ce *ce;
1168
1169 src_ring = ce_state->src_ring;
1170
1171 if (!src_ring)
1172 return -EIO;
1173
1174 ar = ce_state->ar;
1175 ce = ath10k_ce_priv(ar);
1176
1177 spin_lock_bh(&ce->ce_lock);
1178
1179 nentries_mask = src_ring->nentries_mask;
1180 sw_index = src_ring->sw_index;
1181 write_index = src_ring->write_index;
1182
1183 if (write_index != sw_index) {
1184 ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
1185 bufferp, nbytesp,
1186 transfer_idp);
1187
1188 if (per_transfer_contextp)
1189 *per_transfer_contextp =
1190 src_ring->per_transfer_context[sw_index];
1191
1192 /* sanity */
1193 src_ring->per_transfer_context[sw_index] = NULL;
1194
1195 /* Update sw_index */
1196 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
1197 src_ring->sw_index = sw_index;
1198 ret = 0;
1199 } else {
1200 ret = -EIO;
1201 }
1202
1203 spin_unlock_bh(&ce->ce_lock);
1204
1205 return ret;
1206}
1207EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
1208
1209int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
1210 void **per_transfer_contextp)
1211{
1212 struct ath10k *ar = ce_state->ar;
1213 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1214 int ret;
1215
1216 spin_lock_bh(&ce->ce_lock);
1217 ret = ath10k_ce_completed_send_next_nolock(ce_state,
1218 per_transfer_contextp);
1219 spin_unlock_bh(&ce->ce_lock);
1220
1221 return ret;
1222}
1223EXPORT_SYMBOL(ath10k_ce_completed_send_next);
1224
1225/*
1226 * Guts of interrupt handler for per-engine interrupts on a particular CE.
1227 *
1228 * Invokes registered callbacks for recv_complete,
1229 * send_complete, and watermarks.
1230 */
1231void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
1232{
1233 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1234 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1235 const struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
1236 u32 ctrl_addr = ce_state->ctrl_addr;
1237
1238 /*
1239 * Clear before handling
1240 *
1241 * Misc CE interrupts are not being handled, but still need
1242 * to be cleared.
1243 *
1244 * NOTE: When the last copy engine interrupt is cleared the
1245 * hardware will go to sleep. Once this happens any access to
1246 * the CE registers can cause a hardware fault.
1247 */
1248 ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
1249 wm_regs->cc_mask | wm_regs->wm_mask);
1250
1251 if (ce_state->recv_cb)
1252 ce_state->recv_cb(ce_state);
1253
1254 if (ce_state->send_cb)
1255 ce_state->send_cb(ce_state);
1256}
1257EXPORT_SYMBOL(ath10k_ce_per_engine_service);
1258
1259/*
1260 * Handler for per-engine interrupts on ALL active CEs.
1261 * This is used in cases where the system is sharing a
1262 * single interrupt for all CEs
1263 */
1264
1265void ath10k_ce_per_engine_service_any(struct ath10k *ar)
1266{
1267 int ce_id;
1268 u32 intr_summary;
1269
1270 intr_summary = ath10k_ce_interrupt_summary(ar);
1271
1272 for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
1273 if (intr_summary & (1 << ce_id))
1274 intr_summary &= ~(1 << ce_id);
1275 else
1276 /* no intr pending on this CE */
1277 continue;
1278
1279 ath10k_ce_per_engine_service(ar, ce_id);
1280 }
1281}
1282EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
1283
1284/*
1285 * Adjust interrupts for the copy complete handler.
1286 * If it's needed for either send or recv, then unmask
1287 * this interrupt; otherwise, mask it.
1288 *
1289 * Called with ce_lock held.
1290 */
1291static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
1292{
1293 u32 ctrl_addr = ce_state->ctrl_addr;
1294 struct ath10k *ar = ce_state->ar;
1295 bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
1296
1297 if ((!disable_copy_compl_intr) &&
1298 (ce_state->send_cb || ce_state->recv_cb))
1299 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
1300 else
1301 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1302
1303 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1304}
1305
1306void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
1307{
1308 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1309 struct ath10k_ce_pipe *ce_state;
1310 u32 ctrl_addr;
1311
1312 ce_state = &ce->ce_states[ce_id];
1313 if (ce_state->attr_flags & CE_ATTR_POLL)
1314 return;
1315
1316 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1317
1318 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
1319 ath10k_ce_error_intr_disable(ar, ctrl_addr);
1320 ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
1321}
1322EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
1323
1324void ath10k_ce_disable_interrupts(struct ath10k *ar)
1325{
1326 int ce_id;
1327
1328 for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1329 ath10k_ce_disable_interrupt(ar, ce_id);
1330}
1331EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
1332
1333void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
1334{
1335 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1336 struct ath10k_ce_pipe *ce_state;
1337
1338 ce_state = &ce->ce_states[ce_id];
1339 if (ce_state->attr_flags & CE_ATTR_POLL)
1340 return;
1341
1342 ath10k_ce_per_engine_handler_adjust(ce_state);
1343}
1344EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
1345
1346void ath10k_ce_enable_interrupts(struct ath10k *ar)
1347{
1348 int ce_id;
1349
1350 /* Enable interrupts for copy engine that
1351 * are not using polling mode.
1352 */
1353 for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
1354 ath10k_ce_enable_interrupt(ar, ce_id);
1355}
1356EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
1357
1358static int ath10k_ce_init_src_ring(struct ath10k *ar,
1359 unsigned int ce_id,
1360 const struct ce_attr *attr)
1361{
1362 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1363 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1364 struct ath10k_ce_ring *src_ring = ce_state->src_ring;
1365 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1366
1367 nentries = roundup_pow_of_two(attr->src_nentries);
1368
1369 if (ar->hw_params.target_64bit)
1370 memset(src_ring->base_addr_owner_space, 0,
1371 nentries * sizeof(struct ce_desc_64));
1372 else
1373 memset(src_ring->base_addr_owner_space, 0,
1374 nentries * sizeof(struct ce_desc));
1375
1376 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
1377 src_ring->sw_index &= src_ring->nentries_mask;
1378 src_ring->hw_index = src_ring->sw_index;
1379
1380 src_ring->write_index =
1381 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
1382 src_ring->write_index &= src_ring->nentries_mask;
1383
1384 ath10k_ce_src_ring_base_addr_set(ar, ce_id,
1385 src_ring->base_addr_ce_space);
1386 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
1387 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
1388 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
1389 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
1390 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
1391
1392 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1393 "boot init ce src ring id %d entries %d base_addr %p\n",
1394 ce_id, nentries, src_ring->base_addr_owner_space);
1395
1396 return 0;
1397}
1398
1399static int ath10k_ce_init_dest_ring(struct ath10k *ar,
1400 unsigned int ce_id,
1401 const struct ce_attr *attr)
1402{
1403 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1404 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1405 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
1406 u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1407
1408 nentries = roundup_pow_of_two(attr->dest_nentries);
1409
1410 if (ar->hw_params.target_64bit)
1411 memset(dest_ring->base_addr_owner_space, 0,
1412 nentries * sizeof(struct ce_desc_64));
1413 else
1414 memset(dest_ring->base_addr_owner_space, 0,
1415 nentries * sizeof(struct ce_desc));
1416
1417 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
1418 dest_ring->sw_index &= dest_ring->nentries_mask;
1419 dest_ring->write_index =
1420 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
1421 dest_ring->write_index &= dest_ring->nentries_mask;
1422
1423 ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
1424 dest_ring->base_addr_ce_space);
1425 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
1426 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
1427 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
1428 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
1429
1430 ath10k_dbg(ar, ATH10K_DBG_BOOT,
1431 "boot ce dest ring id %d entries %d base_addr %p\n",
1432 ce_id, nentries, dest_ring->base_addr_owner_space);
1433
1434 return 0;
1435}
1436
1437static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
1438 struct ath10k_ce_ring *src_ring,
1439 u32 nentries)
1440{
1441 src_ring->shadow_base_unaligned = kcalloc(nentries,
1442 sizeof(struct ce_desc_64),
1443 GFP_KERNEL);
1444 if (!src_ring->shadow_base_unaligned)
1445 return -ENOMEM;
1446
1447 src_ring->shadow_base = (struct ce_desc_64 *)
1448 PTR_ALIGN(src_ring->shadow_base_unaligned,
1449 CE_DESC_RING_ALIGN);
1450 return 0;
1451}
1452
1453static struct ath10k_ce_ring *
1454ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
1455 const struct ce_attr *attr)
1456{
1457 struct ath10k_ce_ring *src_ring;
1458 u32 nentries = attr->src_nentries;
1459 dma_addr_t base_addr;
1460 int ret;
1461
1462 nentries = roundup_pow_of_two(nentries);
1463
1464 src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries);
1465 if (src_ring == NULL)
1466 return ERR_PTR(-ENOMEM);
1467
1468 src_ring->nentries = nentries;
1469 src_ring->nentries_mask = nentries - 1;
1470
1471 /*
1472 * Legacy platforms that do not support cache
1473 * coherent DMA are unsupported
1474 */
1475 src_ring->base_addr_owner_space_unaligned =
1476 dma_alloc_coherent(ar->dev,
1477 (nentries * sizeof(struct ce_desc) +
1478 CE_DESC_RING_ALIGN),
1479 &base_addr, GFP_KERNEL);
1480 if (!src_ring->base_addr_owner_space_unaligned) {
1481 kfree(src_ring);
1482 return ERR_PTR(-ENOMEM);
1483 }
1484
1485 src_ring->base_addr_ce_space_unaligned = base_addr;
1486
1487 src_ring->base_addr_owner_space =
1488 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1489 CE_DESC_RING_ALIGN);
1490 src_ring->base_addr_ce_space =
1491 ALIGN(src_ring->base_addr_ce_space_unaligned,
1492 CE_DESC_RING_ALIGN);
1493
1494 if (ar->hw_params.shadow_reg_support) {
1495 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1496 if (ret) {
1497 dma_free_coherent(ar->dev,
1498 (nentries * sizeof(struct ce_desc) +
1499 CE_DESC_RING_ALIGN),
1500 src_ring->base_addr_owner_space_unaligned,
1501 base_addr);
1502 kfree(src_ring);
1503 return ERR_PTR(ret);
1504 }
1505 }
1506
1507 return src_ring;
1508}
1509
1510static struct ath10k_ce_ring *
1511ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
1512 const struct ce_attr *attr)
1513{
1514 struct ath10k_ce_ring *src_ring;
1515 u32 nentries = attr->src_nentries;
1516 dma_addr_t base_addr;
1517 int ret;
1518
1519 nentries = roundup_pow_of_two(nentries);
1520
1521 src_ring = kzalloc_flex(*src_ring, per_transfer_context, nentries);
1522 if (!src_ring)
1523 return ERR_PTR(-ENOMEM);
1524
1525 src_ring->nentries = nentries;
1526 src_ring->nentries_mask = nentries - 1;
1527
1528 /* Legacy platforms that do not support cache
1529 * coherent DMA are unsupported
1530 */
1531 src_ring->base_addr_owner_space_unaligned =
1532 dma_alloc_coherent(ar->dev,
1533 (nentries * sizeof(struct ce_desc_64) +
1534 CE_DESC_RING_ALIGN),
1535 &base_addr, GFP_KERNEL);
1536 if (!src_ring->base_addr_owner_space_unaligned) {
1537 kfree(src_ring);
1538 return ERR_PTR(-ENOMEM);
1539 }
1540
1541 src_ring->base_addr_ce_space_unaligned = base_addr;
1542
1543 src_ring->base_addr_owner_space =
1544 PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
1545 CE_DESC_RING_ALIGN);
1546 src_ring->base_addr_ce_space =
1547 ALIGN(src_ring->base_addr_ce_space_unaligned,
1548 CE_DESC_RING_ALIGN);
1549
1550 if (ar->hw_params.shadow_reg_support) {
1551 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
1552 if (ret) {
1553 dma_free_coherent(ar->dev,
1554 (nentries * sizeof(struct ce_desc_64) +
1555 CE_DESC_RING_ALIGN),
1556 src_ring->base_addr_owner_space_unaligned,
1557 base_addr);
1558 kfree(src_ring);
1559 return ERR_PTR(ret);
1560 }
1561 }
1562
1563 return src_ring;
1564}
1565
1566static struct ath10k_ce_ring *
1567ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
1568 const struct ce_attr *attr)
1569{
1570 struct ath10k_ce_ring *dest_ring;
1571 u32 nentries;
1572 dma_addr_t base_addr;
1573
1574 nentries = roundup_pow_of_two(attr->dest_nentries);
1575
1576 dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries);
1577 if (dest_ring == NULL)
1578 return ERR_PTR(-ENOMEM);
1579
1580 dest_ring->nentries = nentries;
1581 dest_ring->nentries_mask = nentries - 1;
1582
1583 /*
1584 * Legacy platforms that do not support cache
1585 * coherent DMA are unsupported
1586 */
1587 dest_ring->base_addr_owner_space_unaligned =
1588 dma_alloc_coherent(ar->dev,
1589 (nentries * sizeof(struct ce_desc) +
1590 CE_DESC_RING_ALIGN),
1591 &base_addr, GFP_KERNEL);
1592 if (!dest_ring->base_addr_owner_space_unaligned) {
1593 kfree(dest_ring);
1594 return ERR_PTR(-ENOMEM);
1595 }
1596
1597 dest_ring->base_addr_ce_space_unaligned = base_addr;
1598
1599 dest_ring->base_addr_owner_space =
1600 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1601 CE_DESC_RING_ALIGN);
1602 dest_ring->base_addr_ce_space =
1603 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1604 CE_DESC_RING_ALIGN);
1605
1606 return dest_ring;
1607}
1608
1609static struct ath10k_ce_ring *
1610ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
1611 const struct ce_attr *attr)
1612{
1613 struct ath10k_ce_ring *dest_ring;
1614 u32 nentries;
1615 dma_addr_t base_addr;
1616
1617 nentries = roundup_pow_of_two(attr->dest_nentries);
1618
1619 dest_ring = kzalloc_flex(*dest_ring, per_transfer_context, nentries);
1620 if (!dest_ring)
1621 return ERR_PTR(-ENOMEM);
1622
1623 dest_ring->nentries = nentries;
1624 dest_ring->nentries_mask = nentries - 1;
1625
1626 /* Legacy platforms that do not support cache
1627 * coherent DMA are unsupported
1628 */
1629 dest_ring->base_addr_owner_space_unaligned =
1630 dma_alloc_coherent(ar->dev,
1631 (nentries * sizeof(struct ce_desc_64) +
1632 CE_DESC_RING_ALIGN),
1633 &base_addr, GFP_KERNEL);
1634 if (!dest_ring->base_addr_owner_space_unaligned) {
1635 kfree(dest_ring);
1636 return ERR_PTR(-ENOMEM);
1637 }
1638
1639 dest_ring->base_addr_ce_space_unaligned = base_addr;
1640
1641 /* Correctly initialize memory to 0 to prevent garbage
1642 * data crashing system when download firmware
1643 */
1644 dest_ring->base_addr_owner_space =
1645 PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
1646 CE_DESC_RING_ALIGN);
1647 dest_ring->base_addr_ce_space =
1648 ALIGN(dest_ring->base_addr_ce_space_unaligned,
1649 CE_DESC_RING_ALIGN);
1650
1651 return dest_ring;
1652}
1653
1654/*
1655 * Initialize a Copy Engine based on caller-supplied attributes.
1656 * This may be called once to initialize both source and destination
1657 * rings or it may be called twice for separate source and destination
1658 * initialization. It may be that only one side or the other is
1659 * initialized by software/firmware.
1660 */
1661int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
1662 const struct ce_attr *attr)
1663{
1664 int ret;
1665
1666 if (attr->src_nentries) {
1667 ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
1668 if (ret) {
1669 ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
1670 ce_id, ret);
1671 return ret;
1672 }
1673 }
1674
1675 if (attr->dest_nentries) {
1676 ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
1677 if (ret) {
1678 ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
1679 ce_id, ret);
1680 return ret;
1681 }
1682 }
1683
1684 return 0;
1685}
1686EXPORT_SYMBOL(ath10k_ce_init_pipe);
1687
1688static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
1689{
1690 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1691
1692 ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
1693 ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
1694 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
1695 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
1696}
1697
1698static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
1699{
1700 u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1701
1702 ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
1703 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
1704 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
1705}
1706
1707void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
1708{
1709 ath10k_ce_deinit_src_ring(ar, ce_id);
1710 ath10k_ce_deinit_dest_ring(ar, ce_id);
1711}
1712EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
1713
1714static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1715{
1716 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1717 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1718
1719 if (ce_state->src_ring) {
1720 if (ar->hw_params.shadow_reg_support)
1721 kfree(ce_state->src_ring->shadow_base_unaligned);
1722 dma_free_coherent(ar->dev,
1723 (ce_state->src_ring->nentries *
1724 sizeof(struct ce_desc) +
1725 CE_DESC_RING_ALIGN),
1726 ce_state->src_ring->base_addr_owner_space_unaligned,
1727 ce_state->src_ring->base_addr_ce_space_unaligned);
1728 kfree(ce_state->src_ring);
1729 }
1730
1731 if (ce_state->dest_ring) {
1732 dma_free_coherent(ar->dev,
1733 (ce_state->dest_ring->nentries *
1734 sizeof(struct ce_desc) +
1735 CE_DESC_RING_ALIGN),
1736 ce_state->dest_ring->base_addr_owner_space_unaligned,
1737 ce_state->dest_ring->base_addr_ce_space_unaligned);
1738 kfree(ce_state->dest_ring);
1739 }
1740
1741 ce_state->src_ring = NULL;
1742 ce_state->dest_ring = NULL;
1743}
1744
1745static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
1746{
1747 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1748 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1749
1750 if (ce_state->src_ring) {
1751 if (ar->hw_params.shadow_reg_support)
1752 kfree(ce_state->src_ring->shadow_base_unaligned);
1753 dma_free_coherent(ar->dev,
1754 (ce_state->src_ring->nentries *
1755 sizeof(struct ce_desc_64) +
1756 CE_DESC_RING_ALIGN),
1757 ce_state->src_ring->base_addr_owner_space_unaligned,
1758 ce_state->src_ring->base_addr_ce_space_unaligned);
1759 kfree(ce_state->src_ring);
1760 }
1761
1762 if (ce_state->dest_ring) {
1763 dma_free_coherent(ar->dev,
1764 (ce_state->dest_ring->nentries *
1765 sizeof(struct ce_desc_64) +
1766 CE_DESC_RING_ALIGN),
1767 ce_state->dest_ring->base_addr_owner_space_unaligned,
1768 ce_state->dest_ring->base_addr_ce_space_unaligned);
1769 kfree(ce_state->dest_ring);
1770 }
1771
1772 ce_state->src_ring = NULL;
1773 ce_state->dest_ring = NULL;
1774}
1775
1776void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
1777{
1778 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1779 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1780
1781 ce_state->ops->ce_free_pipe(ar, ce_id);
1782}
1783EXPORT_SYMBOL(ath10k_ce_free_pipe);
1784
1785void ath10k_ce_dump_registers(struct ath10k *ar,
1786 struct ath10k_fw_crash_data *crash_data)
1787{
1788 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1789 struct ath10k_ce_crash_data ce_data;
1790 u32 addr, id;
1791
1792 lockdep_assert_held(&ar->dump_mutex);
1793
1794 ath10k_err(ar, "Copy Engine register dump:\n");
1795
1796 spin_lock_bh(&ce->ce_lock);
1797 for (id = 0; id < CE_COUNT; id++) {
1798 addr = ath10k_ce_base_address(ar, id);
1799 ce_data.base_addr = cpu_to_le32(addr);
1800
1801 ce_data.src_wr_idx =
1802 cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
1803 ce_data.src_r_idx =
1804 cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
1805 ce_data.dst_wr_idx =
1806 cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
1807 ce_data.dst_r_idx =
1808 cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
1809
1810 if (crash_data)
1811 crash_data->ce_crash_data[id] = ce_data;
1812
1813 ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
1814 le32_to_cpu(ce_data.base_addr),
1815 le32_to_cpu(ce_data.src_wr_idx),
1816 le32_to_cpu(ce_data.src_r_idx),
1817 le32_to_cpu(ce_data.dst_wr_idx),
1818 le32_to_cpu(ce_data.dst_r_idx));
1819 }
1820
1821 spin_unlock_bh(&ce->ce_lock);
1822}
1823EXPORT_SYMBOL(ath10k_ce_dump_registers);
1824
1825static const struct ath10k_ce_ops ce_ops = {
1826 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
1827 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
1828 .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
1829 .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
1830 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
1831 .ce_extract_desc_data = ath10k_ce_extract_desc_data,
1832 .ce_free_pipe = _ath10k_ce_free_pipe,
1833 .ce_send_nolock = _ath10k_ce_send_nolock,
1834 .ce_set_src_ring_base_addr_hi = NULL,
1835 .ce_set_dest_ring_base_addr_hi = NULL,
1836 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
1837};
1838
1839static const struct ath10k_ce_ops ce_64_ops = {
1840 .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
1841 .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
1842 .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
1843 .ce_completed_recv_next_nolock =
1844 _ath10k_ce_completed_recv_next_nolock_64,
1845 .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
1846 .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
1847 .ce_free_pipe = _ath10k_ce_free_pipe_64,
1848 .ce_send_nolock = _ath10k_ce_send_nolock_64,
1849 .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
1850 .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
1851 .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
1852};
1853
1854static void ath10k_ce_set_ops(struct ath10k *ar,
1855 struct ath10k_ce_pipe *ce_state)
1856{
1857 switch (ar->hw_rev) {
1858 case ATH10K_HW_WCN3990:
1859 ce_state->ops = &ce_64_ops;
1860 break;
1861 default:
1862 ce_state->ops = &ce_ops;
1863 break;
1864 }
1865}
1866
1867int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
1868 const struct ce_attr *attr)
1869{
1870 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1871 struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
1872 int ret;
1873
1874 ath10k_ce_set_ops(ar, ce_state);
1875 /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
1876 * additional TX locking checks.
1877 *
1878 * For the lack of a better place do the check here.
1879 */
1880 BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
1881 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1882 BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
1883 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1884 BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
1885 (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
1886
1887 ce_state->ar = ar;
1888 ce_state->id = ce_id;
1889 ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
1890 ce_state->attr_flags = attr->flags;
1891 ce_state->src_sz_max = attr->src_sz_max;
1892
1893 if (attr->src_nentries)
1894 ce_state->send_cb = attr->send_cb;
1895
1896 if (attr->dest_nentries)
1897 ce_state->recv_cb = attr->recv_cb;
1898
1899 if (attr->src_nentries) {
1900 ce_state->src_ring =
1901 ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
1902 if (IS_ERR(ce_state->src_ring)) {
1903 ret = PTR_ERR(ce_state->src_ring);
1904 ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
1905 ce_id, ret);
1906 ce_state->src_ring = NULL;
1907 return ret;
1908 }
1909 }
1910
1911 if (attr->dest_nentries) {
1912 ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
1913 ce_id,
1914 attr);
1915 if (IS_ERR(ce_state->dest_ring)) {
1916 ret = PTR_ERR(ce_state->dest_ring);
1917 ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
1918 ce_id, ret);
1919 ce_state->dest_ring = NULL;
1920 return ret;
1921 }
1922 }
1923
1924 return 0;
1925}
1926EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
1927
1928void ath10k_ce_alloc_rri(struct ath10k *ar)
1929{
1930 int i;
1931 u32 value;
1932 u32 ctrl1_regs;
1933 u32 ce_base_addr;
1934 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1935
1936 ce->vaddr_rri = dma_alloc_coherent(ar->dev,
1937 (CE_COUNT * sizeof(u32)),
1938 &ce->paddr_rri, GFP_KERNEL);
1939
1940 if (!ce->vaddr_rri)
1941 return;
1942
1943 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
1944 lower_32_bits(ce->paddr_rri));
1945 ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
1946 (upper_32_bits(ce->paddr_rri) &
1947 CE_DESC_ADDR_HI_MASK));
1948
1949 for (i = 0; i < CE_COUNT; i++) {
1950 ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
1951 ce_base_addr = ath10k_ce_base_address(ar, i);
1952 value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
1953 value |= ar->hw_ce_regs->upd->mask;
1954 ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
1955 }
1956}
1957EXPORT_SYMBOL(ath10k_ce_alloc_rri);
1958
1959void ath10k_ce_free_rri(struct ath10k *ar)
1960{
1961 struct ath10k_ce *ce = ath10k_ce_priv(ar);
1962
1963 dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
1964 ce->vaddr_rri,
1965 ce->paddr_rri);
1966}
1967EXPORT_SYMBOL(ath10k_ce_free_rri);