Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <linux/dmaengine.h>
10#include <uapi/linux/idxd.h>
11#include "../dmaengine.h"
12#include "registers.h"
13#include "idxd.h"
14
15static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
16{
17 struct idxd_dma_chan *idxd_chan;
18
19 idxd_chan = container_of(c, struct idxd_dma_chan, chan);
20 return idxd_chan->wq;
21}
22
23void idxd_dma_complete_txd(struct idxd_desc *desc,
24 enum idxd_complete_type comp_type,
25 bool free_desc)
26{
27 struct idxd_device *idxd = desc->wq->idxd;
28 struct dma_async_tx_descriptor *tx;
29 struct dmaengine_result res;
30 int complete = 1;
31
32 if (desc->completion->status == DSA_COMP_SUCCESS) {
33 res.result = DMA_TRANS_NOERROR;
34 } else if (desc->completion->status) {
35 if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
36 desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
37 idxd_queue_int_handle_resubmit(desc))
38 return;
39 res.result = DMA_TRANS_WRITE_FAILED;
40 } else if (comp_type == IDXD_COMPLETE_ABORT) {
41 res.result = DMA_TRANS_ABORTED;
42 } else {
43 complete = 0;
44 }
45
46 tx = &desc->txd;
47 if (complete && tx->cookie) {
48 dma_cookie_complete(tx);
49 dma_descriptor_unmap(tx);
50 dmaengine_desc_get_callback_invoke(tx, &res);
51 tx->callback = NULL;
52 tx->callback_result = NULL;
53 }
54
55 if (free_desc)
56 idxd_free_desc(desc->wq, desc);
57}
58
59static void op_flag_setup(unsigned long flags, u32 *desc_flags)
60{
61 *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
62 if (flags & DMA_PREP_INTERRUPT)
63 *desc_flags |= IDXD_OP_FLAG_RCI;
64}
65
66static inline void idxd_prep_desc_common(struct idxd_wq *wq,
67 struct dsa_hw_desc *hw, char opcode,
68 u64 addr_f1, u64 addr_f2, u64 len,
69 u64 compl, u32 flags)
70{
71 hw->flags = flags;
72 hw->opcode = opcode;
73 hw->src_addr = addr_f1;
74 hw->dst_addr = addr_f2;
75 hw->xfer_size = len;
76 /*
77 * For dedicated WQ, this field is ignored and HW will use the WQCFG.priv
78 * field instead. This field should be set to 1 for kernel descriptors.
79 */
80 hw->priv = 1;
81 hw->completion_addr = compl;
82}
83
84static struct dma_async_tx_descriptor *
85idxd_dma_prep_interrupt(struct dma_chan *c, unsigned long flags)
86{
87 struct idxd_wq *wq = to_idxd_wq(c);
88 u32 desc_flags;
89 struct idxd_desc *desc;
90
91 if (wq->state != IDXD_WQ_ENABLED)
92 return NULL;
93
94 op_flag_setup(flags, &desc_flags);
95 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
96 if (IS_ERR(desc))
97 return NULL;
98
99 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_NOOP,
100 0, 0, 0, desc->compl_dma, desc_flags);
101 desc->txd.flags = flags;
102 return &desc->txd;
103}
104
105static struct dma_async_tx_descriptor *
106idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
107 dma_addr_t dma_src, size_t len, unsigned long flags)
108{
109 struct idxd_wq *wq = to_idxd_wq(c);
110 u32 desc_flags;
111 struct idxd_device *idxd = wq->idxd;
112 struct idxd_desc *desc;
113
114 if (wq->state != IDXD_WQ_ENABLED)
115 return NULL;
116
117 if (len > idxd->max_xfer_bytes)
118 return NULL;
119
120 op_flag_setup(flags, &desc_flags);
121 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
122 if (IS_ERR(desc))
123 return NULL;
124
125 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
126 dma_src, dma_dest, len, desc->compl_dma,
127 desc_flags);
128
129 desc->txd.flags = flags;
130
131 return &desc->txd;
132}
133
134static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
135{
136 struct idxd_wq *wq = to_idxd_wq(chan);
137 struct device *dev = &wq->idxd->pdev->dev;
138
139 idxd_wq_get(wq);
140 dev_dbg(dev, "%s: client_count: %d\n", __func__,
141 idxd_wq_refcount(wq));
142 return 0;
143}
144
145static void idxd_dma_free_chan_resources(struct dma_chan *chan)
146{
147 struct idxd_wq *wq = to_idxd_wq(chan);
148 struct device *dev = &wq->idxd->pdev->dev;
149
150 idxd_wq_put(wq);
151 dev_dbg(dev, "%s: client_count: %d\n", __func__,
152 idxd_wq_refcount(wq));
153}
154
155static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
156 dma_cookie_t cookie,
157 struct dma_tx_state *txstate)
158{
159 return DMA_OUT_OF_ORDER;
160}
161
162/*
163 * issue_pending() does not need to do anything since tx_submit() does the job
164 * already.
165 */
166static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
167{
168}
169
170static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
171{
172 struct dma_chan *c = tx->chan;
173 struct idxd_wq *wq = to_idxd_wq(c);
174 dma_cookie_t cookie;
175 int rc;
176 struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
177
178 cookie = dma_cookie_assign(tx);
179
180 rc = idxd_submit_desc(wq, desc);
181 if (rc < 0) {
182 idxd_free_desc(wq, desc);
183 return rc;
184 }
185
186 return cookie;
187}
188
189static void idxd_dma_release(struct dma_device *device)
190{
191 struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
192
193 kfree(idxd_dma);
194}
195
196int idxd_register_dma_device(struct idxd_device *idxd)
197{
198 struct idxd_dma_dev *idxd_dma;
199 struct dma_device *dma;
200 struct device *dev = &idxd->pdev->dev;
201 int rc;
202
203 idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
204 if (!idxd_dma)
205 return -ENOMEM;
206
207 dma = &idxd_dma->dma;
208 INIT_LIST_HEAD(&dma->channels);
209 dma->dev = dev;
210
211 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
212 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
213 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
214 dma->device_release = idxd_dma_release;
215
216 dma->device_prep_dma_interrupt = idxd_dma_prep_interrupt;
217 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
218 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
219 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
220 }
221
222 dma->device_tx_status = idxd_dma_tx_status;
223 dma->device_issue_pending = idxd_dma_issue_pending;
224 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
225 dma->device_free_chan_resources = idxd_dma_free_chan_resources;
226
227 rc = dma_async_device_register(dma);
228 if (rc < 0) {
229 kfree(idxd_dma);
230 return rc;
231 }
232
233 idxd_dma->idxd = idxd;
234 /*
235 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
236 * as long as there are outstanding channels.
237 */
238 idxd->idxd_dma = idxd_dma;
239 return 0;
240}
241
242void idxd_unregister_dma_device(struct idxd_device *idxd)
243{
244 dma_async_device_unregister(&idxd->idxd_dma->dma);
245}
246
247static int idxd_register_dma_channel(struct idxd_wq *wq)
248{
249 struct idxd_device *idxd = wq->idxd;
250 struct dma_device *dma = &idxd->idxd_dma->dma;
251 struct device *dev = &idxd->pdev->dev;
252 struct idxd_dma_chan *idxd_chan;
253 struct dma_chan *chan;
254 int rc, i;
255
256 idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
257 if (!idxd_chan)
258 return -ENOMEM;
259
260 chan = &idxd_chan->chan;
261 chan->device = dma;
262 list_add_tail(&chan->device_node, &dma->channels);
263
264 for (i = 0; i < wq->num_descs; i++) {
265 struct idxd_desc *desc = wq->descs[i];
266
267 dma_async_tx_descriptor_init(&desc->txd, chan);
268 desc->txd.tx_submit = idxd_dma_tx_submit;
269 }
270
271 rc = dma_async_device_channel_register(dma, chan);
272 if (rc < 0) {
273 kfree(idxd_chan);
274 return rc;
275 }
276
277 wq->idxd_chan = idxd_chan;
278 idxd_chan->wq = wq;
279 get_device(wq_confdev(wq));
280
281 return 0;
282}
283
284static void idxd_unregister_dma_channel(struct idxd_wq *wq)
285{
286 struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
287 struct dma_chan *chan = &idxd_chan->chan;
288 struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
289
290 dma_async_device_channel_unregister(&idxd_dma->dma, chan);
291 list_del(&chan->device_node);
292 kfree(wq->idxd_chan);
293 wq->idxd_chan = NULL;
294 put_device(wq_confdev(wq));
295}
296
297static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
298{
299 struct device *dev = &idxd_dev->conf_dev;
300 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
301 struct idxd_device *idxd = wq->idxd;
302 int rc;
303
304 if (idxd->state != IDXD_DEV_ENABLED)
305 return -ENXIO;
306
307 mutex_lock(&wq->wq_lock);
308 wq->type = IDXD_WQT_KERNEL;
309
310 rc = drv_enable_wq(wq);
311 if (rc < 0) {
312 dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
313 rc = -ENXIO;
314 goto err;
315 }
316
317 rc = idxd_register_dma_channel(wq);
318 if (rc < 0) {
319 idxd->cmd_status = IDXD_SCMD_DMA_CHAN_ERR;
320 dev_dbg(dev, "Failed to register dma channel\n");
321 goto err_dma;
322 }
323
324 idxd->cmd_status = 0;
325 mutex_unlock(&wq->wq_lock);
326 return 0;
327
328err_dma:
329 drv_disable_wq(wq);
330err:
331 wq->type = IDXD_WQT_NONE;
332 mutex_unlock(&wq->wq_lock);
333 return rc;
334}
335
336static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
337{
338 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
339
340 mutex_lock(&wq->wq_lock);
341 __idxd_wq_quiesce(wq);
342 idxd_unregister_dma_channel(wq);
343 drv_disable_wq(wq);
344 mutex_unlock(&wq->wq_lock);
345}
346
347static enum idxd_dev_type dev_types[] = {
348 IDXD_DEV_WQ,
349 IDXD_DEV_NONE,
350};
351
352struct idxd_device_driver idxd_dmaengine_drv = {
353 .probe = idxd_dmaengine_drv_probe,
354 .remove = idxd_dmaengine_drv_remove,
355 .name = "dmaengine",
356 .type = dev_types,
357};
358EXPORT_SYMBOL_GPL(idxd_dmaengine_drv);