Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3#include <linux/init.h>
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/pci.h>
7#include <linux/device.h>
8#include <linux/sched/task.h>
9#include <linux/intel-svm.h>
10#include <linux/io-64-nonatomic-lo-hi.h>
11#include <linux/cdev.h>
12#include <linux/fs.h>
13#include <linux/poll.h>
14#include <uapi/linux/idxd.h>
15#include "registers.h"
16#include "idxd.h"
17
18struct idxd_cdev_context {
19 const char *name;
20 dev_t devt;
21 struct ida minor_ida;
22};
23
24/*
25 * ictx is an array based off of accelerator types. enum idxd_type
26 * is used as index
27 */
28static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
29 { .name = "dsa" },
30};
31
32struct idxd_user_context {
33 struct idxd_wq *wq;
34 struct task_struct *task;
35 unsigned int flags;
36};
37
38enum idxd_cdev_cleanup {
39 CDEV_NORMAL = 0,
40 CDEV_FAILED,
41};
42
43static void idxd_cdev_dev_release(struct device *dev)
44{
45 dev_dbg(dev, "releasing cdev device\n");
46 kfree(dev);
47}
48
49static struct device_type idxd_cdev_device_type = {
50 .name = "idxd_cdev",
51 .release = idxd_cdev_dev_release,
52};
53
54static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
55{
56 struct cdev *cdev = inode->i_cdev;
57
58 return container_of(cdev, struct idxd_cdev, cdev);
59}
60
61static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev)
62{
63 return container_of(idxd_cdev, struct idxd_wq, idxd_cdev);
64}
65
66static inline struct idxd_wq *inode_wq(struct inode *inode)
67{
68 return idxd_cdev_wq(inode_idxd_cdev(inode));
69}
70
71static int idxd_cdev_open(struct inode *inode, struct file *filp)
72{
73 struct idxd_user_context *ctx;
74 struct idxd_device *idxd;
75 struct idxd_wq *wq;
76 struct device *dev;
77 int rc = 0;
78
79 wq = inode_wq(inode);
80 idxd = wq->idxd;
81 dev = &idxd->pdev->dev;
82
83 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
84
85 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
86 if (!ctx)
87 return -ENOMEM;
88
89 mutex_lock(&wq->wq_lock);
90
91 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
92 rc = -EBUSY;
93 goto failed;
94 }
95
96 ctx->wq = wq;
97 filp->private_data = ctx;
98 idxd_wq_get(wq);
99 mutex_unlock(&wq->wq_lock);
100 return 0;
101
102 failed:
103 mutex_unlock(&wq->wq_lock);
104 kfree(ctx);
105 return rc;
106}
107
108static int idxd_cdev_release(struct inode *node, struct file *filep)
109{
110 struct idxd_user_context *ctx = filep->private_data;
111 struct idxd_wq *wq = ctx->wq;
112 struct idxd_device *idxd = wq->idxd;
113 struct device *dev = &idxd->pdev->dev;
114
115 dev_dbg(dev, "%s called\n", __func__);
116 filep->private_data = NULL;
117
118 kfree(ctx);
119 mutex_lock(&wq->wq_lock);
120 idxd_wq_put(wq);
121 mutex_unlock(&wq->wq_lock);
122 return 0;
123}
124
125static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
126 const char *func)
127{
128 struct device *dev = &wq->idxd->pdev->dev;
129
130 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
131 dev_info_ratelimited(dev,
132 "%s: %s: mapping too large: %lu\n",
133 current->comm, func,
134 vma->vm_end - vma->vm_start);
135 return -EINVAL;
136 }
137
138 return 0;
139}
140
141static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
142{
143 struct idxd_user_context *ctx = filp->private_data;
144 struct idxd_wq *wq = ctx->wq;
145 struct idxd_device *idxd = wq->idxd;
146 struct pci_dev *pdev = idxd->pdev;
147 phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
148 unsigned long pfn;
149 int rc;
150
151 dev_dbg(&pdev->dev, "%s called\n", __func__);
152 rc = check_vma(wq, vma, __func__);
153 if (rc < 0)
154 return rc;
155
156 vma->vm_flags |= VM_DONTCOPY;
157 pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
158 IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
159 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
160 vma->vm_private_data = ctx;
161
162 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
163 vma->vm_page_prot);
164}
165
166static __poll_t idxd_cdev_poll(struct file *filp,
167 struct poll_table_struct *wait)
168{
169 struct idxd_user_context *ctx = filp->private_data;
170 struct idxd_wq *wq = ctx->wq;
171 struct idxd_device *idxd = wq->idxd;
172 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
173 unsigned long flags;
174 __poll_t out = 0;
175
176 poll_wait(filp, &idxd_cdev->err_queue, wait);
177 spin_lock_irqsave(&idxd->dev_lock, flags);
178 if (idxd->sw_err.valid)
179 out = EPOLLIN | EPOLLRDNORM;
180 spin_unlock_irqrestore(&idxd->dev_lock, flags);
181
182 return out;
183}
184
185static const struct file_operations idxd_cdev_fops = {
186 .owner = THIS_MODULE,
187 .open = idxd_cdev_open,
188 .release = idxd_cdev_release,
189 .mmap = idxd_cdev_mmap,
190 .poll = idxd_cdev_poll,
191};
192
193int idxd_cdev_get_major(struct idxd_device *idxd)
194{
195 return MAJOR(ictx[idxd->type].devt);
196}
197
198static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
199{
200 struct idxd_device *idxd = wq->idxd;
201 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
202 struct idxd_cdev_context *cdev_ctx;
203 struct device *dev;
204 int minor, rc;
205
206 idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL);
207 if (!idxd_cdev->dev)
208 return -ENOMEM;
209
210 dev = idxd_cdev->dev;
211 dev->parent = &idxd->pdev->dev;
212 dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd),
213 idxd->id, wq->id);
214 dev->bus = idxd_get_bus_type(idxd);
215
216 cdev_ctx = &ictx[wq->idxd->type];
217 minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
218 if (minor < 0) {
219 rc = minor;
220 kfree(dev);
221 goto ida_err;
222 }
223
224 dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
225 dev->type = &idxd_cdev_device_type;
226 rc = device_register(dev);
227 if (rc < 0) {
228 dev_err(&idxd->pdev->dev, "device register failed\n");
229 goto dev_reg_err;
230 }
231 idxd_cdev->minor = minor;
232
233 return 0;
234
235 dev_reg_err:
236 ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
237 put_device(dev);
238 ida_err:
239 idxd_cdev->dev = NULL;
240 return rc;
241}
242
243static void idxd_wq_cdev_cleanup(struct idxd_wq *wq,
244 enum idxd_cdev_cleanup cdev_state)
245{
246 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
247 struct idxd_cdev_context *cdev_ctx;
248
249 cdev_ctx = &ictx[wq->idxd->type];
250 if (cdev_state == CDEV_NORMAL)
251 cdev_del(&idxd_cdev->cdev);
252 device_unregister(idxd_cdev->dev);
253 /*
254 * The device_type->release() will be called on the device and free
255 * the allocated struct device. We can just forget it.
256 */
257 ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor);
258 idxd_cdev->dev = NULL;
259 idxd_cdev->minor = -1;
260}
261
262int idxd_wq_add_cdev(struct idxd_wq *wq)
263{
264 struct idxd_cdev *idxd_cdev = &wq->idxd_cdev;
265 struct cdev *cdev = &idxd_cdev->cdev;
266 struct device *dev;
267 int rc;
268
269 rc = idxd_wq_cdev_dev_setup(wq);
270 if (rc < 0)
271 return rc;
272
273 dev = idxd_cdev->dev;
274 cdev_init(cdev, &idxd_cdev_fops);
275 cdev_set_parent(cdev, &dev->kobj);
276 rc = cdev_add(cdev, dev->devt, 1);
277 if (rc) {
278 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
279 idxd_wq_cdev_cleanup(wq, CDEV_FAILED);
280 return rc;
281 }
282
283 init_waitqueue_head(&idxd_cdev->err_queue);
284 return 0;
285}
286
287void idxd_wq_del_cdev(struct idxd_wq *wq)
288{
289 idxd_wq_cdev_cleanup(wq, CDEV_NORMAL);
290}
291
292int idxd_cdev_register(void)
293{
294 int rc, i;
295
296 for (i = 0; i < IDXD_TYPE_MAX; i++) {
297 ida_init(&ictx[i].minor_ida);
298 rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
299 ictx[i].name);
300 if (rc)
301 return rc;
302 }
303
304 return 0;
305}
306
307void idxd_cdev_remove(void)
308{
309 int i;
310
311 for (i = 0; i < IDXD_TYPE_MAX; i++) {
312 unregister_chrdev_region(ictx[i].devt, MINORMASK);
313 ida_destroy(&ictx[i].minor_ida);
314 }
315}