Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * VIRTIO based driver for vDPA device
4 *
5 * Copyright (c) 2020, Red Hat. All rights reserved.
6 * Author: Jason Wang <jasowang@redhat.com>
7 *
8 */
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/kernel.h>
14#include <linux/slab.h>
15#include <linux/uuid.h>
16#include <linux/virtio.h>
17#include <linux/vdpa.h>
18#include <linux/virtio_config.h>
19#include <linux/virtio_ring.h>
20
21#define MOD_VERSION "0.1"
22#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
23#define MOD_DESC "vDPA bus driver for virtio devices"
24#define MOD_LICENSE "GPL v2"
25
26struct virtio_vdpa_device {
27 struct virtio_device vdev;
28 struct vdpa_device *vdpa;
29 u64 features;
30
31 /* The lock to protect virtqueue list */
32 spinlock_t lock;
33 /* List of virtio_vdpa_vq_info */
34 struct list_head virtqueues;
35};
36
37struct virtio_vdpa_vq_info {
38 /* the actual virtqueue */
39 struct virtqueue *vq;
40
41 /* the list node for the virtqueues list */
42 struct list_head node;
43};
44
45static inline struct virtio_vdpa_device *
46to_virtio_vdpa_device(struct virtio_device *dev)
47{
48 return container_of(dev, struct virtio_vdpa_device, vdev);
49}
50
51static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
52{
53 return to_virtio_vdpa_device(vdev)->vdpa;
54}
55
56static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
57 void *buf, unsigned len)
58{
59 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
60
61 vdpa_get_config(vdpa, offset, buf, len);
62}
63
64static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
65 const void *buf, unsigned len)
66{
67 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
68
69 vdpa_set_config(vdpa, offset, buf, len);
70}
71
72static u32 virtio_vdpa_generation(struct virtio_device *vdev)
73{
74 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
75 const struct vdpa_config_ops *ops = vdpa->config;
76
77 if (ops->get_generation)
78 return ops->get_generation(vdpa);
79
80 return 0;
81}
82
83static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
84{
85 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
86 const struct vdpa_config_ops *ops = vdpa->config;
87
88 return ops->get_status(vdpa);
89}
90
91static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
92{
93 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
94 const struct vdpa_config_ops *ops = vdpa->config;
95
96 return ops->set_status(vdpa, status);
97}
98
99static void virtio_vdpa_reset(struct virtio_device *vdev)
100{
101 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
102
103 vdpa_reset(vdpa);
104}
105
106static bool virtio_vdpa_notify(struct virtqueue *vq)
107{
108 struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
109 const struct vdpa_config_ops *ops = vdpa->config;
110
111 ops->kick_vq(vdpa, vq->index);
112
113 return true;
114}
115
116static irqreturn_t virtio_vdpa_config_cb(void *private)
117{
118 struct virtio_vdpa_device *vd_dev = private;
119
120 virtio_config_changed(&vd_dev->vdev);
121
122 return IRQ_HANDLED;
123}
124
125static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
126{
127 struct virtio_vdpa_vq_info *info = private;
128
129 return vring_interrupt(0, info->vq);
130}
131
132static struct virtqueue *
133virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
134 void (*callback)(struct virtqueue *vq),
135 const char *name, bool ctx)
136{
137 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
138 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
139 const struct vdpa_config_ops *ops = vdpa->config;
140 struct virtio_vdpa_vq_info *info;
141 struct vdpa_callback cb;
142 struct virtqueue *vq;
143 u64 desc_addr, driver_addr, device_addr;
144 /* Assume split virtqueue, switch to packed if necessary */
145 struct vdpa_vq_state state = {0};
146 unsigned long flags;
147 u32 align, max_num, min_num = 1;
148 bool may_reduce_num = true;
149 int err;
150
151 if (!name)
152 return NULL;
153
154 if (index >= vdpa->nvqs)
155 return ERR_PTR(-ENOENT);
156
157 /* Queue shouldn't already be set up. */
158 if (ops->get_vq_ready(vdpa, index))
159 return ERR_PTR(-ENOENT);
160
161 /* Allocate and fill out our active queue description */
162 info = kmalloc(sizeof(*info), GFP_KERNEL);
163 if (!info)
164 return ERR_PTR(-ENOMEM);
165
166 max_num = ops->get_vq_num_max(vdpa);
167 if (max_num == 0) {
168 err = -ENOENT;
169 goto error_new_virtqueue;
170 }
171
172 if (ops->get_vq_num_min)
173 min_num = ops->get_vq_num_min(vdpa);
174
175 may_reduce_num = (max_num == min_num) ? false : true;
176
177 /* Create the vring */
178 align = ops->get_vq_align(vdpa);
179 vq = vring_create_virtqueue(index, max_num, align, vdev,
180 true, may_reduce_num, ctx,
181 virtio_vdpa_notify, callback, name);
182 if (!vq) {
183 err = -ENOMEM;
184 goto error_new_virtqueue;
185 }
186
187 /* Setup virtqueue callback */
188 cb.callback = virtio_vdpa_virtqueue_cb;
189 cb.private = info;
190 ops->set_vq_cb(vdpa, index, &cb);
191 ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
192
193 desc_addr = virtqueue_get_desc_addr(vq);
194 driver_addr = virtqueue_get_avail_addr(vq);
195 device_addr = virtqueue_get_used_addr(vq);
196
197 if (ops->set_vq_address(vdpa, index,
198 desc_addr, driver_addr,
199 device_addr)) {
200 err = -EINVAL;
201 goto err_vq;
202 }
203
204 /* reset virtqueue state index */
205 if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
206 struct vdpa_vq_state_packed *s = &state.packed;
207
208 s->last_avail_counter = 1;
209 s->last_avail_idx = 0;
210 s->last_used_counter = 1;
211 s->last_used_idx = 0;
212 }
213 err = ops->set_vq_state(vdpa, index, &state);
214 if (err)
215 goto err_vq;
216
217 ops->set_vq_ready(vdpa, index, 1);
218
219 vq->priv = info;
220 info->vq = vq;
221
222 spin_lock_irqsave(&vd_dev->lock, flags);
223 list_add(&info->node, &vd_dev->virtqueues);
224 spin_unlock_irqrestore(&vd_dev->lock, flags);
225
226 return vq;
227
228err_vq:
229 vring_del_virtqueue(vq);
230error_new_virtqueue:
231 ops->set_vq_ready(vdpa, index, 0);
232 /* VDPA driver should make sure vq is stopeed here */
233 WARN_ON(ops->get_vq_ready(vdpa, index));
234 kfree(info);
235 return ERR_PTR(err);
236}
237
238static void virtio_vdpa_del_vq(struct virtqueue *vq)
239{
240 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
241 struct vdpa_device *vdpa = vd_dev->vdpa;
242 const struct vdpa_config_ops *ops = vdpa->config;
243 struct virtio_vdpa_vq_info *info = vq->priv;
244 unsigned int index = vq->index;
245 unsigned long flags;
246
247 spin_lock_irqsave(&vd_dev->lock, flags);
248 list_del(&info->node);
249 spin_unlock_irqrestore(&vd_dev->lock, flags);
250
251 /* Select and deactivate the queue (best effort) */
252 ops->set_vq_ready(vdpa, index, 0);
253
254 vring_del_virtqueue(vq);
255
256 kfree(info);
257}
258
259static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
260{
261 struct virtqueue *vq, *n;
262
263 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
264 virtio_vdpa_del_vq(vq);
265}
266
267static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
268 struct virtqueue *vqs[],
269 vq_callback_t *callbacks[],
270 const char * const names[],
271 const bool *ctx,
272 struct irq_affinity *desc)
273{
274 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
275 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
276 const struct vdpa_config_ops *ops = vdpa->config;
277 struct vdpa_callback cb;
278 int i, err, queue_idx = 0;
279
280 for (i = 0; i < nvqs; ++i) {
281 if (!names[i]) {
282 vqs[i] = NULL;
283 continue;
284 }
285
286 vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
287 callbacks[i], names[i], ctx ?
288 ctx[i] : false);
289 if (IS_ERR(vqs[i])) {
290 err = PTR_ERR(vqs[i]);
291 goto err_setup_vq;
292 }
293 }
294
295 cb.callback = virtio_vdpa_config_cb;
296 cb.private = vd_dev;
297 ops->set_config_cb(vdpa, &cb);
298
299 return 0;
300
301err_setup_vq:
302 virtio_vdpa_del_vqs(vdev);
303 return err;
304}
305
306static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
307{
308 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
309 const struct vdpa_config_ops *ops = vdpa->config;
310
311 return ops->get_features(vdpa);
312}
313
314static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
315{
316 struct vdpa_device *vdpa = vd_get_vdpa(vdev);
317
318 /* Give virtio_ring a chance to accept features. */
319 vring_transport_features(vdev);
320
321 return vdpa_set_features(vdpa, vdev->features);
322}
323
324static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
325{
326 struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
327 struct vdpa_device *vdpa = vd_dev->vdpa;
328
329 return dev_name(&vdpa->dev);
330}
331
332static const struct virtio_config_ops virtio_vdpa_config_ops = {
333 .get = virtio_vdpa_get,
334 .set = virtio_vdpa_set,
335 .generation = virtio_vdpa_generation,
336 .get_status = virtio_vdpa_get_status,
337 .set_status = virtio_vdpa_set_status,
338 .reset = virtio_vdpa_reset,
339 .find_vqs = virtio_vdpa_find_vqs,
340 .del_vqs = virtio_vdpa_del_vqs,
341 .get_features = virtio_vdpa_get_features,
342 .finalize_features = virtio_vdpa_finalize_features,
343 .bus_name = virtio_vdpa_bus_name,
344};
345
346static void virtio_vdpa_release_dev(struct device *_d)
347{
348 struct virtio_device *vdev =
349 container_of(_d, struct virtio_device, dev);
350 struct virtio_vdpa_device *vd_dev =
351 container_of(vdev, struct virtio_vdpa_device, vdev);
352
353 kfree(vd_dev);
354}
355
356static int virtio_vdpa_probe(struct vdpa_device *vdpa)
357{
358 const struct vdpa_config_ops *ops = vdpa->config;
359 struct virtio_vdpa_device *vd_dev, *reg_dev = NULL;
360 int ret = -EINVAL;
361
362 vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL);
363 if (!vd_dev)
364 return -ENOMEM;
365
366 vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
367 vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
368 vd_dev->vdev.config = &virtio_vdpa_config_ops;
369 vd_dev->vdpa = vdpa;
370 INIT_LIST_HEAD(&vd_dev->virtqueues);
371 spin_lock_init(&vd_dev->lock);
372
373 vd_dev->vdev.id.device = ops->get_device_id(vdpa);
374 if (vd_dev->vdev.id.device == 0)
375 goto err;
376
377 vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa);
378 ret = register_virtio_device(&vd_dev->vdev);
379 reg_dev = vd_dev;
380 if (ret)
381 goto err;
382
383 vdpa_set_drvdata(vdpa, vd_dev);
384
385 return 0;
386
387err:
388 if (reg_dev)
389 put_device(&vd_dev->vdev.dev);
390 else
391 kfree(vd_dev);
392 return ret;
393}
394
395static void virtio_vdpa_remove(struct vdpa_device *vdpa)
396{
397 struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa);
398
399 unregister_virtio_device(&vd_dev->vdev);
400}
401
402static struct vdpa_driver virtio_vdpa_driver = {
403 .driver = {
404 .name = "virtio_vdpa",
405 },
406 .probe = virtio_vdpa_probe,
407 .remove = virtio_vdpa_remove,
408};
409
410module_vdpa_driver(virtio_vdpa_driver);
411
412MODULE_VERSION(MOD_VERSION);
413MODULE_LICENSE(MOD_LICENSE);
414MODULE_AUTHOR(MOD_AUTHOR);
415MODULE_DESCRIPTION(MOD_DESC);