Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/mm.h>
19#include <linux/iommu.h>
20#include <linux/uuid.h>
21#include <linux/vdpa.h>
22#include <linux/nospec.h>
23#include <linux/vhost.h>
24#include <linux/virtio_net.h>
25
26#include "vhost.h"
27
28enum {
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
32};
33
34#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
35
36struct vhost_vdpa {
37 struct vhost_dev vdev;
38 struct iommu_domain *domain;
39 struct vhost_virtqueue *vqs;
40 struct completion completion;
41 struct vdpa_device *vdpa;
42 struct device dev;
43 struct cdev cdev;
44 atomic_t opened;
45 int nvqs;
46 int virtio_id;
47 int minor;
48 struct eventfd_ctx *config_ctx;
49 int in_batch;
50 struct vdpa_iova_range range;
51};
52
53static DEFINE_IDA(vhost_vdpa_ida);
54
55static dev_t vhost_vdpa_major;
56
57static void handle_vq_kick(struct vhost_work *work)
58{
59 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
60 poll.work);
61 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
62 const struct vdpa_config_ops *ops = v->vdpa->config;
63
64 ops->kick_vq(v->vdpa, vq - v->vqs);
65}
66
67static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
68{
69 struct vhost_virtqueue *vq = private;
70 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
71
72 if (call_ctx)
73 eventfd_signal(call_ctx, 1);
74
75 return IRQ_HANDLED;
76}
77
78static irqreturn_t vhost_vdpa_config_cb(void *private)
79{
80 struct vhost_vdpa *v = private;
81 struct eventfd_ctx *config_ctx = v->config_ctx;
82
83 if (config_ctx)
84 eventfd_signal(config_ctx, 1);
85
86 return IRQ_HANDLED;
87}
88
89static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
90{
91 struct vhost_virtqueue *vq = &v->vqs[qid];
92 const struct vdpa_config_ops *ops = v->vdpa->config;
93 struct vdpa_device *vdpa = v->vdpa;
94 int ret, irq;
95
96 if (!ops->get_vq_irq)
97 return;
98
99 irq = ops->get_vq_irq(vdpa, qid);
100 irq_bypass_unregister_producer(&vq->call_ctx.producer);
101 if (!vq->call_ctx.ctx || irq < 0)
102 return;
103
104 vq->call_ctx.producer.token = vq->call_ctx.ctx;
105 vq->call_ctx.producer.irq = irq;
106 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
107 if (unlikely(ret))
108 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
109 qid, vq->call_ctx.producer.token, ret);
110}
111
112static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
113{
114 struct vhost_virtqueue *vq = &v->vqs[qid];
115
116 irq_bypass_unregister_producer(&vq->call_ctx.producer);
117}
118
119static void vhost_vdpa_reset(struct vhost_vdpa *v)
120{
121 struct vdpa_device *vdpa = v->vdpa;
122
123 vdpa_reset(vdpa);
124 v->in_batch = 0;
125}
126
127static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
128{
129 struct vdpa_device *vdpa = v->vdpa;
130 const struct vdpa_config_ops *ops = vdpa->config;
131 u32 device_id;
132
133 device_id = ops->get_device_id(vdpa);
134
135 if (copy_to_user(argp, &device_id, sizeof(device_id)))
136 return -EFAULT;
137
138 return 0;
139}
140
141static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
142{
143 struct vdpa_device *vdpa = v->vdpa;
144 const struct vdpa_config_ops *ops = vdpa->config;
145 u8 status;
146
147 status = ops->get_status(vdpa);
148
149 if (copy_to_user(statusp, &status, sizeof(status)))
150 return -EFAULT;
151
152 return 0;
153}
154
155static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
156{
157 struct vdpa_device *vdpa = v->vdpa;
158 const struct vdpa_config_ops *ops = vdpa->config;
159 u8 status, status_old;
160 int nvqs = v->nvqs;
161 u16 i;
162
163 if (copy_from_user(&status, statusp, sizeof(status)))
164 return -EFAULT;
165
166 status_old = ops->get_status(vdpa);
167
168 /*
169 * Userspace shouldn't remove status bits unless reset the
170 * status to 0.
171 */
172 if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
173 return -EINVAL;
174
175 ops->set_status(vdpa, status);
176
177 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
178 for (i = 0; i < nvqs; i++)
179 vhost_vdpa_setup_vq_irq(v, i);
180
181 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
182 for (i = 0; i < nvqs; i++)
183 vhost_vdpa_unsetup_vq_irq(v, i);
184
185 return 0;
186}
187
188static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
189 struct vhost_vdpa_config *c)
190{
191 long size = 0;
192
193 switch (v->virtio_id) {
194 case VIRTIO_ID_NET:
195 size = sizeof(struct virtio_net_config);
196 break;
197 }
198
199 if (c->len == 0)
200 return -EINVAL;
201
202 if (c->len > size - c->off)
203 return -E2BIG;
204
205 return 0;
206}
207
208static long vhost_vdpa_get_config(struct vhost_vdpa *v,
209 struct vhost_vdpa_config __user *c)
210{
211 struct vdpa_device *vdpa = v->vdpa;
212 struct vhost_vdpa_config config;
213 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
214 u8 *buf;
215
216 if (copy_from_user(&config, c, size))
217 return -EFAULT;
218 if (vhost_vdpa_config_validate(v, &config))
219 return -EINVAL;
220 buf = kvzalloc(config.len, GFP_KERNEL);
221 if (!buf)
222 return -ENOMEM;
223
224 vdpa_get_config(vdpa, config.off, buf, config.len);
225
226 if (copy_to_user(c->buf, buf, config.len)) {
227 kvfree(buf);
228 return -EFAULT;
229 }
230
231 kvfree(buf);
232 return 0;
233}
234
235static long vhost_vdpa_set_config(struct vhost_vdpa *v,
236 struct vhost_vdpa_config __user *c)
237{
238 struct vdpa_device *vdpa = v->vdpa;
239 const struct vdpa_config_ops *ops = vdpa->config;
240 struct vhost_vdpa_config config;
241 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
242 u8 *buf;
243
244 if (copy_from_user(&config, c, size))
245 return -EFAULT;
246 if (vhost_vdpa_config_validate(v, &config))
247 return -EINVAL;
248
249 buf = vmemdup_user(c->buf, config.len);
250 if (IS_ERR(buf))
251 return PTR_ERR(buf);
252
253 ops->set_config(vdpa, config.off, buf, config.len);
254
255 kvfree(buf);
256 return 0;
257}
258
259static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
260{
261 struct vdpa_device *vdpa = v->vdpa;
262 const struct vdpa_config_ops *ops = vdpa->config;
263 u64 features;
264
265 features = ops->get_features(vdpa);
266
267 if (copy_to_user(featurep, &features, sizeof(features)))
268 return -EFAULT;
269
270 return 0;
271}
272
273static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
274{
275 struct vdpa_device *vdpa = v->vdpa;
276 const struct vdpa_config_ops *ops = vdpa->config;
277 u64 features;
278
279 /*
280 * It's not allowed to change the features after they have
281 * been negotiated.
282 */
283 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
284 return -EBUSY;
285
286 if (copy_from_user(&features, featurep, sizeof(features)))
287 return -EFAULT;
288
289 if (vdpa_set_features(vdpa, features))
290 return -EINVAL;
291
292 return 0;
293}
294
295static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
296{
297 struct vdpa_device *vdpa = v->vdpa;
298 const struct vdpa_config_ops *ops = vdpa->config;
299 u16 num;
300
301 num = ops->get_vq_num_max(vdpa);
302
303 if (copy_to_user(argp, &num, sizeof(num)))
304 return -EFAULT;
305
306 return 0;
307}
308
309static void vhost_vdpa_config_put(struct vhost_vdpa *v)
310{
311 if (v->config_ctx) {
312 eventfd_ctx_put(v->config_ctx);
313 v->config_ctx = NULL;
314 }
315}
316
317static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
318{
319 struct vdpa_callback cb;
320 int fd;
321 struct eventfd_ctx *ctx;
322
323 cb.callback = vhost_vdpa_config_cb;
324 cb.private = v->vdpa;
325 if (copy_from_user(&fd, argp, sizeof(fd)))
326 return -EFAULT;
327
328 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
329 swap(ctx, v->config_ctx);
330
331 if (!IS_ERR_OR_NULL(ctx))
332 eventfd_ctx_put(ctx);
333
334 if (IS_ERR(v->config_ctx)) {
335 long ret = PTR_ERR(v->config_ctx);
336
337 v->config_ctx = NULL;
338 return ret;
339 }
340
341 v->vdpa->config->set_config_cb(v->vdpa, &cb);
342
343 return 0;
344}
345
346static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
347{
348 struct vhost_vdpa_iova_range range = {
349 .first = v->range.first,
350 .last = v->range.last,
351 };
352
353 if (copy_to_user(argp, &range, sizeof(range)))
354 return -EFAULT;
355 return 0;
356}
357
358static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
359 void __user *argp)
360{
361 struct vdpa_device *vdpa = v->vdpa;
362 const struct vdpa_config_ops *ops = vdpa->config;
363 struct vdpa_vq_state vq_state;
364 struct vdpa_callback cb;
365 struct vhost_virtqueue *vq;
366 struct vhost_vring_state s;
367 u32 idx;
368 long r;
369
370 r = get_user(idx, (u32 __user *)argp);
371 if (r < 0)
372 return r;
373
374 if (idx >= v->nvqs)
375 return -ENOBUFS;
376
377 idx = array_index_nospec(idx, v->nvqs);
378 vq = &v->vqs[idx];
379
380 switch (cmd) {
381 case VHOST_VDPA_SET_VRING_ENABLE:
382 if (copy_from_user(&s, argp, sizeof(s)))
383 return -EFAULT;
384 ops->set_vq_ready(vdpa, idx, s.num);
385 return 0;
386 case VHOST_GET_VRING_BASE:
387 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
388 if (r)
389 return r;
390
391 vq->last_avail_idx = vq_state.avail_index;
392 break;
393 }
394
395 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
396 if (r)
397 return r;
398
399 switch (cmd) {
400 case VHOST_SET_VRING_ADDR:
401 if (ops->set_vq_address(vdpa, idx,
402 (u64)(uintptr_t)vq->desc,
403 (u64)(uintptr_t)vq->avail,
404 (u64)(uintptr_t)vq->used))
405 r = -EINVAL;
406 break;
407
408 case VHOST_SET_VRING_BASE:
409 vq_state.avail_index = vq->last_avail_idx;
410 if (ops->set_vq_state(vdpa, idx, &vq_state))
411 r = -EINVAL;
412 break;
413
414 case VHOST_SET_VRING_CALL:
415 if (vq->call_ctx.ctx) {
416 cb.callback = vhost_vdpa_virtqueue_cb;
417 cb.private = vq;
418 } else {
419 cb.callback = NULL;
420 cb.private = NULL;
421 }
422 ops->set_vq_cb(vdpa, idx, &cb);
423 vhost_vdpa_setup_vq_irq(v, idx);
424 break;
425
426 case VHOST_SET_VRING_NUM:
427 ops->set_vq_num(vdpa, idx, vq->num);
428 break;
429 }
430
431 return r;
432}
433
434static long vhost_vdpa_unlocked_ioctl(struct file *filep,
435 unsigned int cmd, unsigned long arg)
436{
437 struct vhost_vdpa *v = filep->private_data;
438 struct vhost_dev *d = &v->vdev;
439 void __user *argp = (void __user *)arg;
440 u64 __user *featurep = argp;
441 u64 features;
442 long r = 0;
443
444 if (cmd == VHOST_SET_BACKEND_FEATURES) {
445 if (copy_from_user(&features, featurep, sizeof(features)))
446 return -EFAULT;
447 if (features & ~VHOST_VDPA_BACKEND_FEATURES)
448 return -EOPNOTSUPP;
449 vhost_set_backend_features(&v->vdev, features);
450 return 0;
451 }
452
453 mutex_lock(&d->mutex);
454
455 switch (cmd) {
456 case VHOST_VDPA_GET_DEVICE_ID:
457 r = vhost_vdpa_get_device_id(v, argp);
458 break;
459 case VHOST_VDPA_GET_STATUS:
460 r = vhost_vdpa_get_status(v, argp);
461 break;
462 case VHOST_VDPA_SET_STATUS:
463 r = vhost_vdpa_set_status(v, argp);
464 break;
465 case VHOST_VDPA_GET_CONFIG:
466 r = vhost_vdpa_get_config(v, argp);
467 break;
468 case VHOST_VDPA_SET_CONFIG:
469 r = vhost_vdpa_set_config(v, argp);
470 break;
471 case VHOST_GET_FEATURES:
472 r = vhost_vdpa_get_features(v, argp);
473 break;
474 case VHOST_SET_FEATURES:
475 r = vhost_vdpa_set_features(v, argp);
476 break;
477 case VHOST_VDPA_GET_VRING_NUM:
478 r = vhost_vdpa_get_vring_num(v, argp);
479 break;
480 case VHOST_SET_LOG_BASE:
481 case VHOST_SET_LOG_FD:
482 r = -ENOIOCTLCMD;
483 break;
484 case VHOST_VDPA_SET_CONFIG_CALL:
485 r = vhost_vdpa_set_config_call(v, argp);
486 break;
487 case VHOST_GET_BACKEND_FEATURES:
488 features = VHOST_VDPA_BACKEND_FEATURES;
489 if (copy_to_user(featurep, &features, sizeof(features)))
490 r = -EFAULT;
491 break;
492 case VHOST_VDPA_GET_IOVA_RANGE:
493 r = vhost_vdpa_get_iova_range(v, argp);
494 break;
495 default:
496 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
497 if (r == -ENOIOCTLCMD)
498 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
499 break;
500 }
501
502 mutex_unlock(&d->mutex);
503 return r;
504}
505
506static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
507{
508 struct vhost_dev *dev = &v->vdev;
509 struct vhost_iotlb *iotlb = dev->iotlb;
510 struct vhost_iotlb_map *map;
511 struct page *page;
512 unsigned long pfn, pinned;
513
514 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
515 pinned = map->size >> PAGE_SHIFT;
516 for (pfn = map->addr >> PAGE_SHIFT;
517 pinned > 0; pfn++, pinned--) {
518 page = pfn_to_page(pfn);
519 if (map->perm & VHOST_ACCESS_WO)
520 set_page_dirty_lock(page);
521 unpin_user_page(page);
522 }
523 atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
524 vhost_iotlb_map_free(iotlb, map);
525 }
526}
527
528static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
529{
530 struct vhost_dev *dev = &v->vdev;
531
532 vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
533 kfree(dev->iotlb);
534 dev->iotlb = NULL;
535}
536
537static int perm_to_iommu_flags(u32 perm)
538{
539 int flags = 0;
540
541 switch (perm) {
542 case VHOST_ACCESS_WO:
543 flags |= IOMMU_WRITE;
544 break;
545 case VHOST_ACCESS_RO:
546 flags |= IOMMU_READ;
547 break;
548 case VHOST_ACCESS_RW:
549 flags |= (IOMMU_WRITE | IOMMU_READ);
550 break;
551 default:
552 WARN(1, "invalidate vhost IOTLB permission\n");
553 break;
554 }
555
556 return flags | IOMMU_CACHE;
557}
558
559static int vhost_vdpa_map(struct vhost_vdpa *v,
560 u64 iova, u64 size, u64 pa, u32 perm)
561{
562 struct vhost_dev *dev = &v->vdev;
563 struct vdpa_device *vdpa = v->vdpa;
564 const struct vdpa_config_ops *ops = vdpa->config;
565 int r = 0;
566
567 r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
568 pa, perm);
569 if (r)
570 return r;
571
572 if (ops->dma_map) {
573 r = ops->dma_map(vdpa, iova, size, pa, perm);
574 } else if (ops->set_map) {
575 if (!v->in_batch)
576 r = ops->set_map(vdpa, dev->iotlb);
577 } else {
578 r = iommu_map(v->domain, iova, pa, size,
579 perm_to_iommu_flags(perm));
580 }
581
582 if (r)
583 vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
584 else
585 atomic64_add(size >> PAGE_SHIFT, &dev->mm->pinned_vm);
586
587 return r;
588}
589
590static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
591{
592 struct vhost_dev *dev = &v->vdev;
593 struct vdpa_device *vdpa = v->vdpa;
594 const struct vdpa_config_ops *ops = vdpa->config;
595
596 vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
597
598 if (ops->dma_map) {
599 ops->dma_unmap(vdpa, iova, size);
600 } else if (ops->set_map) {
601 if (!v->in_batch)
602 ops->set_map(vdpa, dev->iotlb);
603 } else {
604 iommu_unmap(v->domain, iova, size);
605 }
606}
607
608static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
609 struct vhost_iotlb_msg *msg)
610{
611 struct vhost_dev *dev = &v->vdev;
612 struct vhost_iotlb *iotlb = dev->iotlb;
613 struct page **page_list;
614 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
615 unsigned int gup_flags = FOLL_LONGTERM;
616 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
617 unsigned long lock_limit, sz2pin, nchunks, i;
618 u64 iova = msg->iova;
619 long pinned;
620 int ret = 0;
621
622 if (msg->iova < v->range.first ||
623 msg->iova + msg->size - 1 > v->range.last)
624 return -EINVAL;
625
626 if (vhost_iotlb_itree_first(iotlb, msg->iova,
627 msg->iova + msg->size - 1))
628 return -EEXIST;
629
630 /* Limit the use of memory for bookkeeping */
631 page_list = (struct page **) __get_free_page(GFP_KERNEL);
632 if (!page_list)
633 return -ENOMEM;
634
635 if (msg->perm & VHOST_ACCESS_WO)
636 gup_flags |= FOLL_WRITE;
637
638 npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
639 if (!npages) {
640 ret = -EINVAL;
641 goto free;
642 }
643
644 mmap_read_lock(dev->mm);
645
646 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
647 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
648 ret = -ENOMEM;
649 goto unlock;
650 }
651
652 cur_base = msg->uaddr & PAGE_MASK;
653 iova &= PAGE_MASK;
654 nchunks = 0;
655
656 while (npages) {
657 sz2pin = min_t(unsigned long, npages, list_size);
658 pinned = pin_user_pages(cur_base, sz2pin,
659 gup_flags, page_list, NULL);
660 if (sz2pin != pinned) {
661 if (pinned < 0) {
662 ret = pinned;
663 } else {
664 unpin_user_pages(page_list, pinned);
665 ret = -ENOMEM;
666 }
667 goto out;
668 }
669 nchunks++;
670
671 if (!last_pfn)
672 map_pfn = page_to_pfn(page_list[0]);
673
674 for (i = 0; i < pinned; i++) {
675 unsigned long this_pfn = page_to_pfn(page_list[i]);
676 u64 csize;
677
678 if (last_pfn && (this_pfn != last_pfn + 1)) {
679 /* Pin a contiguous chunk of memory */
680 csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
681 ret = vhost_vdpa_map(v, iova, csize,
682 map_pfn << PAGE_SHIFT,
683 msg->perm);
684 if (ret) {
685 /*
686 * Unpin the pages that are left unmapped
687 * from this point on in the current
688 * page_list. The remaining outstanding
689 * ones which may stride across several
690 * chunks will be covered in the common
691 * error path subsequently.
692 */
693 unpin_user_pages(&page_list[i],
694 pinned - i);
695 goto out;
696 }
697
698 map_pfn = this_pfn;
699 iova += csize;
700 nchunks = 0;
701 }
702
703 last_pfn = this_pfn;
704 }
705
706 cur_base += pinned << PAGE_SHIFT;
707 npages -= pinned;
708 }
709
710 /* Pin the rest chunk */
711 ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
712 map_pfn << PAGE_SHIFT, msg->perm);
713out:
714 if (ret) {
715 if (nchunks) {
716 unsigned long pfn;
717
718 /*
719 * Unpin the outstanding pages which are yet to be
720 * mapped but haven't due to vdpa_map() or
721 * pin_user_pages() failure.
722 *
723 * Mapped pages are accounted in vdpa_map(), hence
724 * the corresponding unpinning will be handled by
725 * vdpa_unmap().
726 */
727 WARN_ON(!last_pfn);
728 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
729 unpin_user_page(pfn_to_page(pfn));
730 }
731 vhost_vdpa_unmap(v, msg->iova, msg->size);
732 }
733unlock:
734 mmap_read_unlock(dev->mm);
735free:
736 free_page((unsigned long)page_list);
737 return ret;
738}
739
740static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
741 struct vhost_iotlb_msg *msg)
742{
743 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
744 struct vdpa_device *vdpa = v->vdpa;
745 const struct vdpa_config_ops *ops = vdpa->config;
746 int r = 0;
747
748 r = vhost_dev_check_owner(dev);
749 if (r)
750 return r;
751
752 switch (msg->type) {
753 case VHOST_IOTLB_UPDATE:
754 r = vhost_vdpa_process_iotlb_update(v, msg);
755 break;
756 case VHOST_IOTLB_INVALIDATE:
757 vhost_vdpa_unmap(v, msg->iova, msg->size);
758 break;
759 case VHOST_IOTLB_BATCH_BEGIN:
760 v->in_batch = true;
761 break;
762 case VHOST_IOTLB_BATCH_END:
763 if (v->in_batch && ops->set_map)
764 ops->set_map(vdpa, dev->iotlb);
765 v->in_batch = false;
766 break;
767 default:
768 r = -EINVAL;
769 break;
770 }
771
772 return r;
773}
774
775static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
776 struct iov_iter *from)
777{
778 struct file *file = iocb->ki_filp;
779 struct vhost_vdpa *v = file->private_data;
780 struct vhost_dev *dev = &v->vdev;
781
782 return vhost_chr_write_iter(dev, from);
783}
784
785static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
786{
787 struct vdpa_device *vdpa = v->vdpa;
788 const struct vdpa_config_ops *ops = vdpa->config;
789 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
790 struct bus_type *bus;
791 int ret;
792
793 /* Device want to do DMA by itself */
794 if (ops->set_map || ops->dma_map)
795 return 0;
796
797 bus = dma_dev->bus;
798 if (!bus)
799 return -EFAULT;
800
801 if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
802 return -ENOTSUPP;
803
804 v->domain = iommu_domain_alloc(bus);
805 if (!v->domain)
806 return -EIO;
807
808 ret = iommu_attach_device(v->domain, dma_dev);
809 if (ret)
810 goto err_attach;
811
812 return 0;
813
814err_attach:
815 iommu_domain_free(v->domain);
816 return ret;
817}
818
819static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
820{
821 struct vdpa_device *vdpa = v->vdpa;
822 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
823
824 if (v->domain) {
825 iommu_detach_device(v->domain, dma_dev);
826 iommu_domain_free(v->domain);
827 }
828
829 v->domain = NULL;
830}
831
832static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
833{
834 struct vdpa_iova_range *range = &v->range;
835 struct iommu_domain_geometry geo;
836 struct vdpa_device *vdpa = v->vdpa;
837 const struct vdpa_config_ops *ops = vdpa->config;
838
839 if (ops->get_iova_range) {
840 *range = ops->get_iova_range(vdpa);
841 } else if (v->domain &&
842 !iommu_domain_get_attr(v->domain,
843 DOMAIN_ATTR_GEOMETRY, &geo) &&
844 geo.force_aperture) {
845 range->first = geo.aperture_start;
846 range->last = geo.aperture_end;
847 } else {
848 range->first = 0;
849 range->last = ULLONG_MAX;
850 }
851}
852
853static int vhost_vdpa_open(struct inode *inode, struct file *filep)
854{
855 struct vhost_vdpa *v;
856 struct vhost_dev *dev;
857 struct vhost_virtqueue **vqs;
858 int nvqs, i, r, opened;
859
860 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
861
862 opened = atomic_cmpxchg(&v->opened, 0, 1);
863 if (opened)
864 return -EBUSY;
865
866 nvqs = v->nvqs;
867 vhost_vdpa_reset(v);
868
869 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
870 if (!vqs) {
871 r = -ENOMEM;
872 goto err;
873 }
874
875 dev = &v->vdev;
876 for (i = 0; i < nvqs; i++) {
877 vqs[i] = &v->vqs[i];
878 vqs[i]->handle_kick = handle_vq_kick;
879 }
880 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
881 vhost_vdpa_process_iotlb_msg);
882
883 dev->iotlb = vhost_iotlb_alloc(0, 0);
884 if (!dev->iotlb) {
885 r = -ENOMEM;
886 goto err_init_iotlb;
887 }
888
889 r = vhost_vdpa_alloc_domain(v);
890 if (r)
891 goto err_init_iotlb;
892
893 vhost_vdpa_set_iova_range(v);
894
895 filep->private_data = v;
896
897 return 0;
898
899err_init_iotlb:
900 vhost_dev_cleanup(&v->vdev);
901 kfree(vqs);
902err:
903 atomic_dec(&v->opened);
904 return r;
905}
906
907static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
908{
909 int i;
910
911 for (i = 0; i < v->nvqs; i++)
912 vhost_vdpa_unsetup_vq_irq(v, i);
913}
914
915static int vhost_vdpa_release(struct inode *inode, struct file *filep)
916{
917 struct vhost_vdpa *v = filep->private_data;
918 struct vhost_dev *d = &v->vdev;
919
920 mutex_lock(&d->mutex);
921 filep->private_data = NULL;
922 vhost_vdpa_reset(v);
923 vhost_dev_stop(&v->vdev);
924 vhost_vdpa_iotlb_free(v);
925 vhost_vdpa_free_domain(v);
926 vhost_vdpa_config_put(v);
927 vhost_vdpa_clean_irq(v);
928 vhost_dev_cleanup(&v->vdev);
929 kfree(v->vdev.vqs);
930 mutex_unlock(&d->mutex);
931
932 atomic_dec(&v->opened);
933 complete(&v->completion);
934
935 return 0;
936}
937
938#ifdef CONFIG_MMU
939static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
940{
941 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
942 struct vdpa_device *vdpa = v->vdpa;
943 const struct vdpa_config_ops *ops = vdpa->config;
944 struct vdpa_notification_area notify;
945 struct vm_area_struct *vma = vmf->vma;
946 u16 index = vma->vm_pgoff;
947
948 notify = ops->get_vq_notification(vdpa, index);
949
950 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
951 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
952 notify.addr >> PAGE_SHIFT, PAGE_SIZE,
953 vma->vm_page_prot))
954 return VM_FAULT_SIGBUS;
955
956 return VM_FAULT_NOPAGE;
957}
958
959static const struct vm_operations_struct vhost_vdpa_vm_ops = {
960 .fault = vhost_vdpa_fault,
961};
962
963static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
964{
965 struct vhost_vdpa *v = vma->vm_file->private_data;
966 struct vdpa_device *vdpa = v->vdpa;
967 const struct vdpa_config_ops *ops = vdpa->config;
968 struct vdpa_notification_area notify;
969 unsigned long index = vma->vm_pgoff;
970
971 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
972 return -EINVAL;
973 if ((vma->vm_flags & VM_SHARED) == 0)
974 return -EINVAL;
975 if (vma->vm_flags & VM_READ)
976 return -EINVAL;
977 if (index > 65535)
978 return -EINVAL;
979 if (!ops->get_vq_notification)
980 return -ENOTSUPP;
981
982 /* To be safe and easily modelled by userspace, We only
983 * support the doorbell which sits on the page boundary and
984 * does not share the page with other registers.
985 */
986 notify = ops->get_vq_notification(vdpa, index);
987 if (notify.addr & (PAGE_SIZE - 1))
988 return -EINVAL;
989 if (vma->vm_end - vma->vm_start != notify.size)
990 return -ENOTSUPP;
991
992 vma->vm_ops = &vhost_vdpa_vm_ops;
993 return 0;
994}
995#endif /* CONFIG_MMU */
996
997static const struct file_operations vhost_vdpa_fops = {
998 .owner = THIS_MODULE,
999 .open = vhost_vdpa_open,
1000 .release = vhost_vdpa_release,
1001 .write_iter = vhost_vdpa_chr_write_iter,
1002 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1003#ifdef CONFIG_MMU
1004 .mmap = vhost_vdpa_mmap,
1005#endif /* CONFIG_MMU */
1006 .compat_ioctl = compat_ptr_ioctl,
1007};
1008
1009static void vhost_vdpa_release_dev(struct device *device)
1010{
1011 struct vhost_vdpa *v =
1012 container_of(device, struct vhost_vdpa, dev);
1013
1014 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1015 kfree(v->vqs);
1016 kfree(v);
1017}
1018
1019static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1020{
1021 const struct vdpa_config_ops *ops = vdpa->config;
1022 struct vhost_vdpa *v;
1023 int minor;
1024 int r;
1025
1026 /* Currently, we only accept the network devices. */
1027 if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
1028 return -ENOTSUPP;
1029
1030 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1031 if (!v)
1032 return -ENOMEM;
1033
1034 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1035 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1036 if (minor < 0) {
1037 kfree(v);
1038 return minor;
1039 }
1040
1041 atomic_set(&v->opened, 0);
1042 v->minor = minor;
1043 v->vdpa = vdpa;
1044 v->nvqs = vdpa->nvqs;
1045 v->virtio_id = ops->get_device_id(vdpa);
1046
1047 device_initialize(&v->dev);
1048 v->dev.release = vhost_vdpa_release_dev;
1049 v->dev.parent = &vdpa->dev;
1050 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1051 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1052 GFP_KERNEL);
1053 if (!v->vqs) {
1054 r = -ENOMEM;
1055 goto err;
1056 }
1057
1058 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1059 if (r)
1060 goto err;
1061
1062 cdev_init(&v->cdev, &vhost_vdpa_fops);
1063 v->cdev.owner = THIS_MODULE;
1064
1065 r = cdev_device_add(&v->cdev, &v->dev);
1066 if (r)
1067 goto err;
1068
1069 init_completion(&v->completion);
1070 vdpa_set_drvdata(vdpa, v);
1071
1072 return 0;
1073
1074err:
1075 put_device(&v->dev);
1076 return r;
1077}
1078
1079static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1080{
1081 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1082 int opened;
1083
1084 cdev_device_del(&v->cdev, &v->dev);
1085
1086 do {
1087 opened = atomic_cmpxchg(&v->opened, 0, 1);
1088 if (!opened)
1089 break;
1090 wait_for_completion(&v->completion);
1091 } while (1);
1092
1093 put_device(&v->dev);
1094}
1095
1096static struct vdpa_driver vhost_vdpa_driver = {
1097 .driver = {
1098 .name = "vhost_vdpa",
1099 },
1100 .probe = vhost_vdpa_probe,
1101 .remove = vhost_vdpa_remove,
1102};
1103
1104static int __init vhost_vdpa_init(void)
1105{
1106 int r;
1107
1108 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1109 "vhost-vdpa");
1110 if (r)
1111 goto err_alloc_chrdev;
1112
1113 r = vdpa_register_driver(&vhost_vdpa_driver);
1114 if (r)
1115 goto err_vdpa_register_driver;
1116
1117 return 0;
1118
1119err_vdpa_register_driver:
1120 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1121err_alloc_chrdev:
1122 return r;
1123}
1124module_init(vhost_vdpa_init);
1125
1126static void __exit vhost_vdpa_exit(void)
1127{
1128 vdpa_unregister_driver(&vhost_vdpa_driver);
1129 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1130}
1131module_exit(vhost_vdpa_exit);
1132
1133MODULE_VERSION("0.0.1");
1134MODULE_LICENSE("GPL v2");
1135MODULE_AUTHOR("Intel Corporation");
1136MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");