Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio_mmio: Use the DMA API if enabled

This switches to vring_create_virtqueue, simplifying the driver and
adding DMA API support.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

authored by

Andy Lutomirski and committed by
Michael S. Tsirkin
b4211138 2a2d1382

+15 -52
+15 -52
drivers/virtio/virtio_mmio.c
··· 99 99 /* the actual virtqueue */ 100 100 struct virtqueue *vq; 101 101 102 - /* the number of entries in the queue */ 103 - unsigned int num; 104 - 105 - /* the virtual address of the ring queue */ 106 - void *queue; 107 - 108 102 /* the list node for the virtqueues list */ 109 103 struct list_head node; 110 104 }; ··· 316 322 { 317 323 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 318 324 struct virtio_mmio_vq_info *info = vq->priv; 319 - unsigned long flags, size; 325 + unsigned long flags; 320 326 unsigned int index = vq->index; 321 327 322 328 spin_lock_irqsave(&vm_dev->lock, flags); 323 329 list_del(&info->node); 324 330 spin_unlock_irqrestore(&vm_dev->lock, flags); 325 - 326 - vring_del_virtqueue(vq); 327 331 328 332 /* Select and deactivate the queue */ 329 333 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); ··· 332 340 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 333 341 } 334 342 335 - size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); 336 - free_pages_exact(info->queue, size); 343 + vring_del_virtqueue(vq); 344 + 337 345 kfree(info); 338 346 } 339 347 ··· 348 356 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); 349 357 } 350 358 351 - 352 - 353 359 static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, 354 360 void (*callback)(struct virtqueue *vq), 355 361 const char *name) ··· 355 365 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 356 366 struct virtio_mmio_vq_info *info; 357 367 struct virtqueue *vq; 358 - unsigned long flags, size; 368 + unsigned long flags; 369 + unsigned int num; 359 370 int err; 360 371 361 372 if (!name) ··· 379 388 goto error_kmalloc; 380 389 } 381 390 382 - /* Allocate pages for the queue - start with a queue as big as 383 - * possible (limited by maximum size allowed by device), drop down 384 - * to a minimal size, just big enough to fit descriptor table 385 - * and two rings (which makes it "alignment_size * 2") 386 - */ 387 - info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); 388 - 389 - /* If the device reports a 0 entry queue, we won't be able to 390 - * use it to perform I/O, and vring_new_virtqueue() can't create 391 - * empty queues anyway, so don't bother to set up the device. 392 - */ 393 - if (info->num == 0) { 391 + num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); 392 + if (num == 0) { 394 393 err = -ENOENT; 395 - goto error_alloc_pages; 396 - } 397 - 398 - while (1) { 399 - size = PAGE_ALIGN(vring_size(info->num, 400 - VIRTIO_MMIO_VRING_ALIGN)); 401 - /* Did the last iter shrink the queue below minimum size? */ 402 - if (size < VIRTIO_MMIO_VRING_ALIGN * 2) { 403 - err = -ENOMEM; 404 - goto error_alloc_pages; 405 - } 406 - 407 - info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 408 - if (info->queue) 409 - break; 410 - 411 - info->num /= 2; 394 + goto error_new_virtqueue; 412 395 } 413 396 414 397 /* Create the vring */ 415 - vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, 416 - true, info->queue, vm_notify, callback, name); 398 + vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev, 399 + true, true, vm_notify, callback, name); 417 400 if (!vq) { 418 401 err = -ENOMEM; 419 402 goto error_new_virtqueue; 420 403 } 421 404 422 405 /* Activate the queue */ 423 - writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); 406 + writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); 424 407 if (vm_dev->version == 1) { 425 408 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); 426 - writel(virt_to_phys(info->queue) >> PAGE_SHIFT, 409 + writel(virtqueue_get_desc_addr(vq) >> PAGE_SHIFT, 427 410 vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 428 411 } else { 429 412 u64 addr; 430 413 431 - addr = virt_to_phys(info->queue); 414 + addr = virtqueue_get_desc_addr(vq); 432 415 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); 433 416 writel((u32)(addr >> 32), 434 417 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); 435 418 436 - addr = virt_to_phys(virtqueue_get_avail(vq)); 419 + addr = virtqueue_get_avail_addr(vq); 437 420 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); 438 421 writel((u32)(addr >> 32), 439 422 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); 440 423 441 - addr = virt_to_phys(virtqueue_get_used(vq)); 424 + addr = virtqueue_get_used_addr(vq); 442 425 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); 443 426 writel((u32)(addr >> 32), 444 427 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); ··· 436 471 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); 437 472 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); 438 473 } 439 - free_pages_exact(info->queue, size); 440 - error_alloc_pages: 441 474 kfree(info); 442 475 error_kmalloc: 443 476 error_available: