Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio-mmio: Remove virtqueue list from mmio device

The MMIO transport implementation creates a list of virtqueues for a
virtio device, while the same is already available in the struct
virtio_device.

Don't create a duplicate list, and use the other one instead.

While at it, fix the virtio_device_for_each_vq() macro to accept an
argument like "&vm_dev->vdev" (which currently fails to build).

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Message-Id: <3e56c6f74002987e22f364d883cbad177cd9ad9c.1747827066.git.viresh.kumar@linaro.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>

authored by

Viresh Kumar and committed by
Michael S. Tsirkin
564a69ad 482bd84f

+4 -50
+3 -49
drivers/virtio/virtio_mmio.c
··· 65 65 #include <linux/platform_device.h> 66 66 #include <linux/pm.h> 67 67 #include <linux/slab.h> 68 - #include <linux/spinlock.h> 69 68 #include <linux/virtio.h> 70 69 #include <linux/virtio_config.h> 71 70 #include <uapi/linux/virtio_mmio.h> ··· 87 88 88 89 void __iomem *base; 89 90 unsigned long version; 90 - 91 - /* a list of queues so we can dispatch IRQs */ 92 - spinlock_t lock; 93 - struct list_head virtqueues; 94 91 }; 95 - 96 - struct virtio_mmio_vq_info { 97 - /* the actual virtqueue */ 98 - struct virtqueue *vq; 99 - 100 - /* the list node for the virtqueues list */ 101 - struct list_head node; 102 - }; 103 - 104 - 105 92 106 93 /* Configuration interface */ 107 94 ··· 285 300 static irqreturn_t vm_interrupt(int irq, void *opaque) 286 301 { 287 302 struct virtio_mmio_device *vm_dev = opaque; 288 - struct virtio_mmio_vq_info *info; 303 + struct virtqueue *vq; 289 304 unsigned long status; 290 - unsigned long flags; 291 305 irqreturn_t ret = IRQ_NONE; 292 306 293 307 /* Read and acknowledge interrupts */ ··· 299 315 } 300 316 301 317 if (likely(status & VIRTIO_MMIO_INT_VRING)) { 302 - spin_lock_irqsave(&vm_dev->lock, flags); 303 - list_for_each_entry(info, &vm_dev->virtqueues, node) 304 - ret |= vring_interrupt(irq, info->vq); 305 - spin_unlock_irqrestore(&vm_dev->lock, flags); 318 + virtio_device_for_each_vq(&vm_dev->vdev, vq) 319 + ret |= vring_interrupt(irq, vq); 306 320 } 307 321 308 322 return ret; ··· 311 329 static void vm_del_vq(struct virtqueue *vq) 312 330 { 313 331 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 314 - struct virtio_mmio_vq_info *info = vq->priv; 315 - unsigned long flags; 316 332 unsigned int index = vq->index; 317 - 318 - spin_lock_irqsave(&vm_dev->lock, flags); 319 - list_del(&info->node); 320 - spin_unlock_irqrestore(&vm_dev->lock, flags); 321 333 322 334 /* Select and deactivate the queue */ 323 335 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); ··· 323 347 } 324 348 325 349 vring_del_virtqueue(vq); 326 - 327 - kfree(info); 328 350 } 329 351 330 352 static void vm_del_vqs(struct virtio_device *vdev) ··· 349 375 { 350 376 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 351 377 bool (*notify)(struct virtqueue *vq); 352 - struct virtio_mmio_vq_info *info; 353 378 struct virtqueue *vq; 354 - unsigned long flags; 355 379 unsigned int num; 356 380 int err; 357 381 ··· 369 397 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) { 370 398 err = -ENOENT; 371 399 goto error_available; 372 - } 373 - 374 - /* Allocate and fill out our active queue description */ 375 - info = kmalloc(sizeof(*info), GFP_KERNEL); 376 - if (!info) { 377 - err = -ENOMEM; 378 - goto error_kmalloc; 379 400 } 380 401 381 402 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); ··· 428 463 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 429 464 } 430 465 431 - vq->priv = info; 432 - info->vq = vq; 433 - 434 - spin_lock_irqsave(&vm_dev->lock, flags); 435 - list_add(&info->node, &vm_dev->virtqueues); 436 - spin_unlock_irqrestore(&vm_dev->lock, flags); 437 - 438 466 return vq; 439 467 440 468 error_bad_pfn: ··· 439 481 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 440 482 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 441 483 } 442 - kfree(info); 443 - error_kmalloc: 444 484 error_available: 445 485 return ERR_PTR(err); 446 486 } ··· 583 627 vm_dev->vdev.dev.release = virtio_mmio_release_dev; 584 628 vm_dev->vdev.config = &virtio_mmio_config_ops; 585 629 vm_dev->pdev = pdev; 586 - INIT_LIST_HEAD(&vm_dev->virtqueues); 587 - spin_lock_init(&vm_dev->lock); 588 630 589 631 vm_dev->base = devm_platform_ioremap_resource(pdev, 0); 590 632 if (IS_ERR(vm_dev->base)) {
+1 -1
include/linux/virtio.h
··· 196 196 size_t virtio_max_dma_size(const struct virtio_device *vdev); 197 197 198 198 #define virtio_device_for_each_vq(vdev, vq) \ 199 - list_for_each_entry(vq, &vdev->vqs, list) 199 + list_for_each_entry(vq, &(vdev)->vqs, list) 200 200 201 201 /** 202 202 * struct virtio_driver - operations for a virtio I/O driver