Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 */
5#ifndef IOATDMA_H
6#define IOATDMA_H
7
8#include <linux/dmaengine.h>
9#include <linux/init.h>
10#include <linux/dmapool.h>
11#include <linux/cache.h>
12#include <linux/pci_ids.h>
13#include <linux/circ_buf.h>
14#include <linux/interrupt.h>
15#include "registers.h"
16#include "hw.h"
17
18#define IOAT_DMA_VERSION "5.00"
19
20#define IOAT_DMA_DCA_ANY_CPU ~0
21
22#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
23#define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
24#define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
25
26#define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
27
28/* ioat hardware assumes at least two sources for raid operations */
29#define src_cnt_to_sw(x) ((x) + 2)
30#define src_cnt_to_hw(x) ((x) - 2)
31#define ndest_to_sw(x) ((x) + 1)
32#define ndest_to_hw(x) ((x) - 1)
33#define src16_cnt_to_sw(x) ((x) + 9)
34#define src16_cnt_to_hw(x) ((x) - 9)
35
36/*
37 * workaround for IOAT ver.3.0 null descriptor issue
38 * (channel returns error when size is 0)
39 */
40#define NULL_DESC_BUFFER_SIZE 1
41
42enum ioat_irq_mode {
43 IOAT_NOIRQ = 0,
44 IOAT_MSIX,
45 IOAT_MSI,
46 IOAT_INTX
47};
48
49/**
50 * struct ioatdma_device - internal representation of a IOAT device
51 * @pdev: PCI-Express device
52 * @reg_base: MMIO register space base address
53 * @completion_pool: DMA buffers for completion ops
54 * @sed_hw_pool: DMA super descriptor pools
55 * @dma_dev: embedded struct dma_device
56 * @version: version of ioatdma device
57 * @msix_entries: irq handlers
58 * @idx: per channel data
59 * @dca: direct cache access context
60 * @irq_mode: interrupt mode (INTX, MSI, MSIX)
61 * @cap: read DMA capabilities register
62 */
63struct ioatdma_device {
64 struct pci_dev *pdev;
65 void __iomem *reg_base;
66 struct dma_pool *completion_pool;
67#define MAX_SED_POOLS 5
68 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
69 struct dma_device dma_dev;
70 u8 version;
71#define IOAT_MAX_CHANS 4
72 struct msix_entry msix_entries[IOAT_MAX_CHANS];
73 struct ioatdma_chan *idx[IOAT_MAX_CHANS];
74 struct dca_provider *dca;
75 enum ioat_irq_mode irq_mode;
76 u32 cap;
77
78 /* shadow version for CB3.3 chan reset errata workaround */
79 u64 msixtba0;
80 u64 msixdata0;
81 u32 msixpba;
82};
83
84#define IOAT_MAX_ORDER 16
85#define IOAT_MAX_DESCS (1 << IOAT_MAX_ORDER)
86#define IOAT_CHUNK_SIZE (SZ_512K)
87#define IOAT_DESCS_PER_CHUNK (IOAT_CHUNK_SIZE / IOAT_DESC_SZ)
88
89struct ioat_descs {
90 void *virt;
91 dma_addr_t hw;
92};
93
94struct ioatdma_chan {
95 struct dma_chan dma_chan;
96 void __iomem *reg_base;
97 dma_addr_t last_completion;
98 spinlock_t cleanup_lock;
99 unsigned long state;
100 #define IOAT_CHAN_DOWN 0
101 #define IOAT_COMPLETION_ACK 1
102 #define IOAT_RESET_PENDING 2
103 #define IOAT_KOBJ_INIT_FAIL 3
104 #define IOAT_RUN 5
105 #define IOAT_CHAN_ACTIVE 6
106 struct timer_list timer;
107 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
108 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
109 #define RESET_DELAY msecs_to_jiffies(100)
110 struct ioatdma_device *ioat_dma;
111 dma_addr_t completion_dma;
112 u64 *completion;
113 struct tasklet_struct cleanup_task;
114 struct kobject kobj;
115
116/* ioat v2 / v3 channel attributes
117 * @xfercap_log; log2 of channel max transfer length (for fast division)
118 * @head: allocated index
119 * @issued: hardware notification point
120 * @tail: cleanup index
121 * @dmacount: identical to 'head' except for occasionally resetting to zero
122 * @alloc_order: log2 of the number of allocated descriptors
123 * @produce: number of descriptors to produce at submit time
124 * @ring: software ring buffer implementation of hardware ring
125 * @prep_lock: serializes descriptor preparation (producers)
126 */
127 size_t xfercap_log;
128 u16 head;
129 u16 issued;
130 u16 tail;
131 u16 dmacount;
132 u16 alloc_order;
133 u16 produce;
134 struct ioat_ring_ent **ring;
135 spinlock_t prep_lock;
136 struct ioat_descs descs[IOAT_MAX_DESCS / IOAT_DESCS_PER_CHUNK];
137 int desc_chunks;
138 int intr_coalesce;
139 int prev_intr_coalesce;
140};
141
142struct ioat_sysfs_entry {
143 struct attribute attr;
144 ssize_t (*show)(struct dma_chan *, char *);
145 ssize_t (*store)(struct dma_chan *, const char *, size_t);
146};
147
148/**
149 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
150 * @hw: hardware SED
151 * @dma: dma address for the SED
152 * @parent: point to the dma descriptor that's the parent
153 * @hw_pool: descriptor pool index
154 */
155struct ioat_sed_ent {
156 struct ioat_sed_raw_descriptor *hw;
157 dma_addr_t dma;
158 struct ioat_ring_ent *parent;
159 unsigned int hw_pool;
160};
161
162/**
163 * struct ioat_ring_ent - wrapper around hardware descriptor
164 * @hw: hardware DMA descriptor (for memcpy)
165 * @xor: hardware xor descriptor
166 * @xor_ex: hardware xor extension descriptor
167 * @pq: hardware pq descriptor
168 * @pq_ex: hardware pq extension descriptor
169 * @pqu: hardware pq update descriptor
170 * @raw: hardware raw (un-typed) descriptor
171 * @txd: the generic software descriptor for all engines
172 * @len: total transaction length for unmap
173 * @result: asynchronous result of validate operations
174 * @id: identifier for debug
175 * @sed: pointer to super extended descriptor sw desc
176 */
177
178struct ioat_ring_ent {
179 union {
180 struct ioat_dma_descriptor *hw;
181 struct ioat_xor_descriptor *xor;
182 struct ioat_xor_ext_descriptor *xor_ex;
183 struct ioat_pq_descriptor *pq;
184 struct ioat_pq_ext_descriptor *pq_ex;
185 struct ioat_pq_update_descriptor *pqu;
186 struct ioat_raw_descriptor *raw;
187 };
188 size_t len;
189 struct dma_async_tx_descriptor txd;
190 enum sum_check_flags *result;
191 #ifdef DEBUG
192 int id;
193 #endif
194 struct ioat_sed_ent *sed;
195};
196
197extern const struct sysfs_ops ioat_sysfs_ops;
198extern struct ioat_sysfs_entry ioat_version_attr;
199extern struct ioat_sysfs_entry ioat_cap_attr;
200extern int ioat_pending_level;
201extern int ioat_ring_alloc_order;
202extern struct kobj_type ioat_ktype;
203extern struct kmem_cache *ioat_cache;
204extern int ioat_ring_max_alloc_order;
205extern struct kmem_cache *ioat_sed_cache;
206
207static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
208{
209 return container_of(c, struct ioatdma_chan, dma_chan);
210}
211
212/* wrapper around hardware descriptor format + additional software fields */
213#ifdef DEBUG
214#define set_desc_id(desc, i) ((desc)->id = (i))
215#define desc_id(desc) ((desc)->id)
216#else
217#define set_desc_id(desc, i)
218#define desc_id(desc) (0)
219#endif
220
221static inline void
222__dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
223 struct dma_async_tx_descriptor *tx, int id)
224{
225 struct device *dev = to_dev(ioat_chan);
226
227 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
228 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
229 (unsigned long long) tx->phys,
230 (unsigned long long) hw->next, tx->cookie, tx->flags,
231 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
232}
233
234#define dump_desc_dbg(c, d) \
235 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
236
237static inline struct ioatdma_chan *
238ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
239{
240 return ioat_dma->idx[index];
241}
242
243static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
244{
245 return readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET);
246}
247
248static inline u64 ioat_chansts_to_addr(u64 status)
249{
250 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
251}
252
253static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
254{
255 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
256}
257
258static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
259{
260 u8 ver = ioat_chan->ioat_dma->version;
261
262 writeb(IOAT_CHANCMD_SUSPEND,
263 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
264}
265
266static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
267{
268 u8 ver = ioat_chan->ioat_dma->version;
269
270 writeb(IOAT_CHANCMD_RESET,
271 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
272}
273
274static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
275{
276 u8 ver = ioat_chan->ioat_dma->version;
277 u8 cmd;
278
279 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
280 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
281}
282
283static inline bool is_ioat_active(unsigned long status)
284{
285 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
286}
287
288static inline bool is_ioat_idle(unsigned long status)
289{
290 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
291}
292
293static inline bool is_ioat_halted(unsigned long status)
294{
295 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
296}
297
298static inline bool is_ioat_suspended(unsigned long status)
299{
300 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
301}
302
303/* channel was fatally programmed */
304static inline bool is_ioat_bug(unsigned long err)
305{
306 return !!err;
307}
308
309
310static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
311{
312 return 1 << ioat_chan->alloc_order;
313}
314
315/* count of descriptors in flight with the engine */
316static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
317{
318 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
319 ioat_ring_size(ioat_chan));
320}
321
322/* count of descriptors pending submission to hardware */
323static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
324{
325 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
326 ioat_ring_size(ioat_chan));
327}
328
329static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
330{
331 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
332}
333
334static inline u16
335ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
336{
337 u16 num_descs = len >> ioat_chan->xfercap_log;
338
339 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
340 return num_descs;
341}
342
343static inline struct ioat_ring_ent *
344ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
345{
346 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
347}
348
349static inline void
350ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
351{
352 writel(addr & 0x00000000FFFFFFFF,
353 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
354 writel(addr >> 32,
355 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
356}
357
358/* IOAT Prep functions */
359struct dma_async_tx_descriptor *
360ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
361 dma_addr_t dma_src, size_t len, unsigned long flags);
362struct dma_async_tx_descriptor *
363ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
364struct dma_async_tx_descriptor *
365ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
366 unsigned int src_cnt, size_t len, unsigned long flags);
367struct dma_async_tx_descriptor *
368ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
369 unsigned int src_cnt, size_t len,
370 enum sum_check_flags *result, unsigned long flags);
371struct dma_async_tx_descriptor *
372ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
373 unsigned int src_cnt, const unsigned char *scf, size_t len,
374 unsigned long flags);
375struct dma_async_tx_descriptor *
376ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
377 unsigned int src_cnt, const unsigned char *scf, size_t len,
378 enum sum_check_flags *pqres, unsigned long flags);
379struct dma_async_tx_descriptor *
380ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
381 unsigned int src_cnt, size_t len, unsigned long flags);
382struct dma_async_tx_descriptor *
383ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
384 unsigned int src_cnt, size_t len,
385 enum sum_check_flags *result, unsigned long flags);
386
387/* IOAT Operation functions */
388irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
389irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
390struct ioat_ring_ent **
391ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
392void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
393void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
394int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
395enum dma_status
396ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
397 struct dma_tx_state *txstate);
398void ioat_cleanup_event(unsigned long data);
399void ioat_timer_event(struct timer_list *t);
400int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
401void ioat_issue_pending(struct dma_chan *chan);
402
403/* IOAT Init functions */
404bool is_bwd_ioat(struct pci_dev *pdev);
405struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
406void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
407void ioat_kobject_del(struct ioatdma_device *ioat_dma);
408int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
409void ioat_stop(struct ioatdma_chan *ioat_chan);
410#endif /* IOATDMA_H */