Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/io-64-nonatomic-lo-hi.h>
9#include <linux/dmaengine.h>
10#include <uapi/linux/idxd.h>
11#include "../dmaengine.h"
12#include "registers.h"
13#include "idxd.h"
14
15static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
16{
17 struct idxd_dma_chan *idxd_chan;
18
19 idxd_chan = container_of(c, struct idxd_dma_chan, chan);
20 return idxd_chan->wq;
21}
22
23void idxd_dma_complete_txd(struct idxd_desc *desc,
24 enum idxd_complete_type comp_type)
25{
26 struct dma_async_tx_descriptor *tx;
27 struct dmaengine_result res;
28 int complete = 1;
29
30 if (desc->completion->status == DSA_COMP_SUCCESS)
31 res.result = DMA_TRANS_NOERROR;
32 else if (desc->completion->status)
33 res.result = DMA_TRANS_WRITE_FAILED;
34 else if (comp_type == IDXD_COMPLETE_ABORT)
35 res.result = DMA_TRANS_ABORTED;
36 else
37 complete = 0;
38
39 tx = &desc->txd;
40 if (complete && tx->cookie) {
41 dma_cookie_complete(tx);
42 dma_descriptor_unmap(tx);
43 dmaengine_desc_get_callback_invoke(tx, &res);
44 tx->callback = NULL;
45 tx->callback_result = NULL;
46 }
47}
48
49static void op_flag_setup(unsigned long flags, u32 *desc_flags)
50{
51 *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR;
52 if (flags & DMA_PREP_INTERRUPT)
53 *desc_flags |= IDXD_OP_FLAG_RCI;
54}
55
56static inline void set_completion_address(struct idxd_desc *desc,
57 u64 *compl_addr)
58{
59 *compl_addr = desc->compl_dma;
60}
61
62static inline void idxd_prep_desc_common(struct idxd_wq *wq,
63 struct dsa_hw_desc *hw, char opcode,
64 u64 addr_f1, u64 addr_f2, u64 len,
65 u64 compl, u32 flags)
66{
67 hw->flags = flags;
68 hw->opcode = opcode;
69 hw->src_addr = addr_f1;
70 hw->dst_addr = addr_f2;
71 hw->xfer_size = len;
72 hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
73 hw->completion_addr = compl;
74}
75
76static struct dma_async_tx_descriptor *
77idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
78 dma_addr_t dma_src, size_t len, unsigned long flags)
79{
80 struct idxd_wq *wq = to_idxd_wq(c);
81 u32 desc_flags;
82 struct idxd_device *idxd = wq->idxd;
83 struct idxd_desc *desc;
84
85 if (wq->state != IDXD_WQ_ENABLED)
86 return NULL;
87
88 if (len > idxd->max_xfer_bytes)
89 return NULL;
90
91 op_flag_setup(flags, &desc_flags);
92 desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK);
93 if (IS_ERR(desc))
94 return NULL;
95
96 idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE,
97 dma_src, dma_dest, len, desc->compl_dma,
98 desc_flags);
99
100 desc->txd.flags = flags;
101
102 return &desc->txd;
103}
104
105static int idxd_dma_alloc_chan_resources(struct dma_chan *chan)
106{
107 struct idxd_wq *wq = to_idxd_wq(chan);
108 struct device *dev = &wq->idxd->pdev->dev;
109
110 idxd_wq_get(wq);
111 dev_dbg(dev, "%s: client_count: %d\n", __func__,
112 idxd_wq_refcount(wq));
113 return 0;
114}
115
116static void idxd_dma_free_chan_resources(struct dma_chan *chan)
117{
118 struct idxd_wq *wq = to_idxd_wq(chan);
119 struct device *dev = &wq->idxd->pdev->dev;
120
121 idxd_wq_put(wq);
122 dev_dbg(dev, "%s: client_count: %d\n", __func__,
123 idxd_wq_refcount(wq));
124}
125
126static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan,
127 dma_cookie_t cookie,
128 struct dma_tx_state *txstate)
129{
130 return DMA_OUT_OF_ORDER;
131}
132
133/*
134 * issue_pending() does not need to do anything since tx_submit() does the job
135 * already.
136 */
137static void idxd_dma_issue_pending(struct dma_chan *dma_chan)
138{
139}
140
141static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
142{
143 struct dma_chan *c = tx->chan;
144 struct idxd_wq *wq = to_idxd_wq(c);
145 dma_cookie_t cookie;
146 int rc;
147 struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd);
148
149 cookie = dma_cookie_assign(tx);
150
151 rc = idxd_submit_desc(wq, desc);
152 if (rc < 0) {
153 idxd_free_desc(wq, desc);
154 return rc;
155 }
156
157 return cookie;
158}
159
160static void idxd_dma_release(struct dma_device *device)
161{
162 struct idxd_dma_dev *idxd_dma = container_of(device, struct idxd_dma_dev, dma);
163
164 kfree(idxd_dma);
165}
166
167int idxd_register_dma_device(struct idxd_device *idxd)
168{
169 struct idxd_dma_dev *idxd_dma;
170 struct dma_device *dma;
171 struct device *dev = &idxd->pdev->dev;
172 int rc;
173
174 idxd_dma = kzalloc_node(sizeof(*idxd_dma), GFP_KERNEL, dev_to_node(dev));
175 if (!idxd_dma)
176 return -ENOMEM;
177
178 dma = &idxd_dma->dma;
179 INIT_LIST_HEAD(&dma->channels);
180 dma->dev = dev;
181
182 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
183 dma_cap_set(DMA_COMPLETION_NO_ORDER, dma->cap_mask);
184 dma->device_release = idxd_dma_release;
185
186 if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) {
187 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
188 dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy;
189 }
190
191 dma->device_tx_status = idxd_dma_tx_status;
192 dma->device_issue_pending = idxd_dma_issue_pending;
193 dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources;
194 dma->device_free_chan_resources = idxd_dma_free_chan_resources;
195
196 rc = dma_async_device_register(dma);
197 if (rc < 0) {
198 kfree(idxd_dma);
199 return rc;
200 }
201
202 idxd_dma->idxd = idxd;
203 /*
204 * This pointer is protected by the refs taken by the dma_chan. It will remain valid
205 * as long as there are outstanding channels.
206 */
207 idxd->idxd_dma = idxd_dma;
208 return 0;
209}
210
211void idxd_unregister_dma_device(struct idxd_device *idxd)
212{
213 dma_async_device_unregister(&idxd->idxd_dma->dma);
214}
215
216int idxd_register_dma_channel(struct idxd_wq *wq)
217{
218 struct idxd_device *idxd = wq->idxd;
219 struct dma_device *dma = &idxd->idxd_dma->dma;
220 struct device *dev = &idxd->pdev->dev;
221 struct idxd_dma_chan *idxd_chan;
222 struct dma_chan *chan;
223 int rc, i;
224
225 idxd_chan = kzalloc_node(sizeof(*idxd_chan), GFP_KERNEL, dev_to_node(dev));
226 if (!idxd_chan)
227 return -ENOMEM;
228
229 chan = &idxd_chan->chan;
230 chan->device = dma;
231 list_add_tail(&chan->device_node, &dma->channels);
232
233 for (i = 0; i < wq->num_descs; i++) {
234 struct idxd_desc *desc = wq->descs[i];
235
236 dma_async_tx_descriptor_init(&desc->txd, chan);
237 desc->txd.tx_submit = idxd_dma_tx_submit;
238 }
239
240 rc = dma_async_device_channel_register(dma, chan);
241 if (rc < 0) {
242 kfree(idxd_chan);
243 return rc;
244 }
245
246 wq->idxd_chan = idxd_chan;
247 idxd_chan->wq = wq;
248 get_device(&wq->conf_dev);
249
250 return 0;
251}
252
253void idxd_unregister_dma_channel(struct idxd_wq *wq)
254{
255 struct idxd_dma_chan *idxd_chan = wq->idxd_chan;
256 struct dma_chan *chan = &idxd_chan->chan;
257 struct idxd_dma_dev *idxd_dma = wq->idxd->idxd_dma;
258
259 dma_async_device_channel_unregister(&idxd_dma->dma, chan);
260 list_del(&chan->device_node);
261 kfree(wq->idxd_chan);
262 wq->idxd_chan = NULL;
263 put_device(&wq->conf_dev);
264}