Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio: Add platform bus driver for memory mapped virtio device

This patch, based on virtio PCI driver, adds support for memory
mapped (platform) virtio device. This should allow environments
like qemu to use virtio-based block & network devices even on
platforms without PCI support.

One can define and register a platform device which resources
will describe memory mapped control registers and "mailbox"
interrupt. Such device can be also instantiated using the Device
Tree node with compatible property equal "virtio,mmio".

Cc: Anthony Liguori <aliguori@us.ibm.com>
Cc: Michael S.Tsirkin <mst@redhat.com>
Signed-off-by: Pawel Moll <pawel.moll@arm.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

authored by

Pawel Moll and committed by
Rusty Russell
edfd52e6 005b20a8

+619
+17
Documentation/devicetree/bindings/virtio/mmio.txt
··· 1 + * virtio memory mapped device 2 + 3 + See http://ozlabs.org/~rusty/virtio-spec/ for more details. 4 + 5 + Required properties: 6 + 7 + - compatible: "virtio,mmio" compatibility string 8 + - reg: control registers base address and size including configuration space 9 + - interrupts: interrupt generated by the device 10 + 11 + Example: 12 + 13 + virtio_block@3000 { 14 + compatible = "virtio,mmio"; 15 + reg = <0x3000 0x100>; 16 + interrupts = <41>; 17 + }
+11
drivers/virtio/Kconfig
··· 35 35 36 36 If unsure, say M. 37 37 38 + config VIRTIO_MMIO 39 + tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" 40 + depends on EXPERIMENTAL 41 + select VIRTIO 42 + select VIRTIO_RING 43 + ---help--- 44 + This drivers provides support for memory mapped virtio 45 + platform device driver. 46 + 47 + If unsure, say N. 48 + 38 49 endmenu
+1
drivers/virtio/Makefile
··· 1 1 obj-$(CONFIG_VIRTIO) += virtio.o 2 2 obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o 3 + obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o 3 4 obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 4 5 obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
+479
drivers/virtio/virtio_mmio.c
··· 1 + /* 2 + * Virtio memory mapped device driver 3 + * 4 + * Copyright 2011, ARM Ltd. 5 + * 6 + * This module allows virtio devices to be used over a virtual, memory mapped 7 + * platform device. 8 + * 9 + * Registers layout (all 32-bit wide): 10 + * 11 + * offset d. name description 12 + * ------ -- ---------------- ----------------- 13 + * 14 + * 0x000 R MagicValue Magic value "virt" 15 + * 0x004 R Version Device version (current max. 1) 16 + * 0x008 R DeviceID Virtio device ID 17 + * 0x00c R VendorID Virtio vendor ID 18 + * 19 + * 0x010 R HostFeatures Features supported by the host 20 + * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures 21 + * 22 + * 0x020 W GuestFeatures Features activated by the guest 23 + * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures 24 + * 0x028 W GuestPageSize Size of guest's memory page in bytes 25 + * 26 + * 0x030 W QueueSel Queue selector 27 + * 0x034 R QueueNumMax Maximum size of the currently selected queue 28 + * 0x038 W QueueNum Queue size for the currently selected queue 29 + * 0x03c W QueueAlign Used Ring alignment for the current queue 30 + * 0x040 RW QueuePFN PFN for the currently selected queue 31 + * 32 + * 0x050 W QueueNotify Queue notifier 33 + * 0x060 R InterruptStatus Interrupt status register 34 + * 0x060 W InterruptACK Interrupt acknowledge register 35 + * 0x070 RW Status Device status register 36 + * 37 + * 0x100+ RW Device-specific configuration space 38 + * 39 + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 40 + * 41 + * This work is licensed under the terms of the GNU GPL, version 2 or later. 42 + * See the COPYING file in the top-level directory. 43 + */ 44 + 45 + #include <linux/highmem.h> 46 + #include <linux/interrupt.h> 47 + #include <linux/io.h> 48 + #include <linux/list.h> 49 + #include <linux/module.h> 50 + #include <linux/platform_device.h> 51 + #include <linux/slab.h> 52 + #include <linux/spinlock.h> 53 + #include <linux/virtio.h> 54 + #include <linux/virtio_config.h> 55 + #include <linux/virtio_mmio.h> 56 + #include <linux/virtio_ring.h> 57 + 58 + 59 + 60 + /* The alignment to use between consumer and producer parts of vring. 61 + * Currently hardcoded to the page size. */ 62 + #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE 63 + 64 + 65 + 66 + #define to_virtio_mmio_device(_plat_dev) \ 67 + container_of(_plat_dev, struct virtio_mmio_device, vdev) 68 + 69 + struct virtio_mmio_device { 70 + struct virtio_device vdev; 71 + struct platform_device *pdev; 72 + 73 + void __iomem *base; 74 + unsigned long version; 75 + 76 + /* a list of queues so we can dispatch IRQs */ 77 + spinlock_t lock; 78 + struct list_head virtqueues; 79 + }; 80 + 81 + struct virtio_mmio_vq_info { 82 + /* the actual virtqueue */ 83 + struct virtqueue *vq; 84 + 85 + /* the number of entries in the queue */ 86 + unsigned int num; 87 + 88 + /* the index of the queue */ 89 + int queue_index; 90 + 91 + /* the virtual address of the ring queue */ 92 + void *queue; 93 + 94 + /* the list node for the virtqueues list */ 95 + struct list_head node; 96 + }; 97 + 98 + 99 + 100 + /* Configuration interface */ 101 + 102 + static u32 vm_get_features(struct virtio_device *vdev) 103 + { 104 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 105 + 106 + /* TODO: Features > 32 bits */ 107 + writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); 108 + 109 + return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); 110 + } 111 + 112 + static void vm_finalize_features(struct virtio_device *vdev) 113 + { 114 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 115 + int i; 116 + 117 + /* Give virtio_ring a chance to accept features. */ 118 + vring_transport_features(vdev); 119 + 120 + for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { 121 + writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SET); 122 + writel(vdev->features[i], 123 + vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); 124 + } 125 + } 126 + 127 + static void vm_get(struct virtio_device *vdev, unsigned offset, 128 + void *buf, unsigned len) 129 + { 130 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 131 + u8 *ptr = buf; 132 + int i; 133 + 134 + for (i = 0; i < len; i++) 135 + ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 136 + } 137 + 138 + static void vm_set(struct virtio_device *vdev, unsigned offset, 139 + const void *buf, unsigned len) 140 + { 141 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 142 + const u8 *ptr = buf; 143 + int i; 144 + 145 + for (i = 0; i < len; i++) 146 + writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); 147 + } 148 + 149 + static u8 vm_get_status(struct virtio_device *vdev) 150 + { 151 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 152 + 153 + return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; 154 + } 155 + 156 + static void vm_set_status(struct virtio_device *vdev, u8 status) 157 + { 158 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 159 + 160 + /* We should never be setting status to 0. */ 161 + BUG_ON(status == 0); 162 + 163 + writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); 164 + } 165 + 166 + static void vm_reset(struct virtio_device *vdev) 167 + { 168 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 169 + 170 + /* 0 status means a reset. */ 171 + writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); 172 + } 173 + 174 + 175 + 176 + /* Transport interface */ 177 + 178 + /* the notify function used when creating a virt queue */ 179 + static void vm_notify(struct virtqueue *vq) 180 + { 181 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 182 + struct virtio_mmio_vq_info *info = vq->priv; 183 + 184 + /* We write the queue's selector into the notification register to 185 + * signal the other end */ 186 + writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); 187 + } 188 + 189 + /* Notify all virtqueues on an interrupt. */ 190 + static irqreturn_t vm_interrupt(int irq, void *opaque) 191 + { 192 + struct virtio_mmio_device *vm_dev = opaque; 193 + struct virtio_mmio_vq_info *info; 194 + struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, 195 + struct virtio_driver, driver); 196 + unsigned long status; 197 + unsigned long flags; 198 + irqreturn_t ret = IRQ_NONE; 199 + 200 + /* Read and acknowledge interrupts */ 201 + status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); 202 + writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); 203 + 204 + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) 205 + && vdrv && vdrv->config_changed) { 206 + vdrv->config_changed(&vm_dev->vdev); 207 + ret = IRQ_HANDLED; 208 + } 209 + 210 + if (likely(status & VIRTIO_MMIO_INT_VRING)) { 211 + spin_lock_irqsave(&vm_dev->lock, flags); 212 + list_for_each_entry(info, &vm_dev->virtqueues, node) 213 + ret |= vring_interrupt(irq, info->vq); 214 + spin_unlock_irqrestore(&vm_dev->lock, flags); 215 + } 216 + 217 + return ret; 218 + } 219 + 220 + 221 + 222 + static void vm_del_vq(struct virtqueue *vq) 223 + { 224 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); 225 + struct virtio_mmio_vq_info *info = vq->priv; 226 + unsigned long flags, size; 227 + 228 + spin_lock_irqsave(&vm_dev->lock, flags); 229 + list_del(&info->node); 230 + spin_unlock_irqrestore(&vm_dev->lock, flags); 231 + 232 + vring_del_virtqueue(vq); 233 + 234 + /* Select and deactivate the queue */ 235 + writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 236 + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 237 + 238 + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); 239 + free_pages_exact(info->queue, size); 240 + kfree(info); 241 + } 242 + 243 + static void vm_del_vqs(struct virtio_device *vdev) 244 + { 245 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 246 + struct virtqueue *vq, *n; 247 + 248 + list_for_each_entry_safe(vq, n, &vdev->vqs, list) 249 + vm_del_vq(vq); 250 + 251 + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); 252 + } 253 + 254 + 255 + 256 + static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, 257 + void (*callback)(struct virtqueue *vq), 258 + const char *name) 259 + { 260 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 261 + struct virtio_mmio_vq_info *info; 262 + struct virtqueue *vq; 263 + unsigned long flags, size; 264 + int err; 265 + 266 + /* Select the queue we're interested in */ 267 + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); 268 + 269 + /* Queue shouldn't already be set up. */ 270 + if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { 271 + err = -ENOENT; 272 + goto error_available; 273 + } 274 + 275 + /* Allocate and fill out our active queue description */ 276 + info = kmalloc(sizeof(*info), GFP_KERNEL); 277 + if (!info) { 278 + err = -ENOMEM; 279 + goto error_kmalloc; 280 + } 281 + info->queue_index = index; 282 + 283 + /* Allocate pages for the queue - start with a queue as big as 284 + * possible (limited by maximum size allowed by device), drop down 285 + * to a minimal size, just big enough to fit descriptor table 286 + * and two rings (which makes it "alignment_size * 2") 287 + */ 288 + info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); 289 + while (1) { 290 + size = PAGE_ALIGN(vring_size(info->num, 291 + VIRTIO_MMIO_VRING_ALIGN)); 292 + /* Already smallest possible allocation? */ 293 + if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) { 294 + err = -ENOMEM; 295 + goto error_alloc_pages; 296 + } 297 + 298 + info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 299 + if (info->queue) 300 + break; 301 + 302 + info->num /= 2; 303 + } 304 + 305 + /* Activate the queue */ 306 + writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); 307 + writel(VIRTIO_MMIO_VRING_ALIGN, 308 + vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); 309 + writel(virt_to_phys(info->queue) >> PAGE_SHIFT, 310 + vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 311 + 312 + /* Create the vring */ 313 + vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, 314 + vdev, info->queue, vm_notify, callback, name); 315 + if (!vq) { 316 + err = -ENOMEM; 317 + goto error_new_virtqueue; 318 + } 319 + 320 + vq->priv = info; 321 + info->vq = vq; 322 + 323 + spin_lock_irqsave(&vm_dev->lock, flags); 324 + list_add(&info->node, &vm_dev->virtqueues); 325 + spin_unlock_irqrestore(&vm_dev->lock, flags); 326 + 327 + return vq; 328 + 329 + error_new_virtqueue: 330 + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); 331 + free_pages_exact(info->queue, size); 332 + error_alloc_pages: 333 + kfree(info); 334 + error_kmalloc: 335 + error_available: 336 + return ERR_PTR(err); 337 + } 338 + 339 + static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, 340 + struct virtqueue *vqs[], 341 + vq_callback_t *callbacks[], 342 + const char *names[]) 343 + { 344 + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); 345 + unsigned int irq = platform_get_irq(vm_dev->pdev, 0); 346 + int i, err; 347 + 348 + err = request_irq(irq, vm_interrupt, IRQF_SHARED, 349 + dev_name(&vdev->dev), vm_dev); 350 + if (err) 351 + return err; 352 + 353 + for (i = 0; i < nvqs; ++i) { 354 + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); 355 + if (IS_ERR(vqs[i])) { 356 + vm_del_vqs(vdev); 357 + return PTR_ERR(vqs[i]); 358 + } 359 + } 360 + 361 + return 0; 362 + } 363 + 364 + 365 + 366 + static struct virtio_config_ops virtio_mmio_config_ops = { 367 + .get = vm_get, 368 + .set = vm_set, 369 + .get_status = vm_get_status, 370 + .set_status = vm_set_status, 371 + .reset = vm_reset, 372 + .find_vqs = vm_find_vqs, 373 + .del_vqs = vm_del_vqs, 374 + .get_features = vm_get_features, 375 + .finalize_features = vm_finalize_features, 376 + }; 377 + 378 + 379 + 380 + /* Platform device */ 381 + 382 + static int __devinit virtio_mmio_probe(struct platform_device *pdev) 383 + { 384 + struct virtio_mmio_device *vm_dev; 385 + struct resource *mem; 386 + unsigned long magic; 387 + 388 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 389 + if (!mem) 390 + return -EINVAL; 391 + 392 + if (!devm_request_mem_region(&pdev->dev, mem->start, 393 + resource_size(mem), pdev->name)) 394 + return -EBUSY; 395 + 396 + vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); 397 + if (!vm_dev) 398 + return -ENOMEM; 399 + 400 + vm_dev->vdev.dev.parent = &pdev->dev; 401 + vm_dev->vdev.config = &virtio_mmio_config_ops; 402 + vm_dev->pdev = pdev; 403 + INIT_LIST_HEAD(&vm_dev->virtqueues); 404 + spin_lock_init(&vm_dev->lock); 405 + 406 + vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); 407 + if (vm_dev->base == NULL) 408 + return -EFAULT; 409 + 410 + /* Check magic value */ 411 + magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); 412 + if (memcmp(&magic, "virt", 4) != 0) { 413 + dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); 414 + return -ENODEV; 415 + } 416 + 417 + /* Check device version */ 418 + vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); 419 + if (vm_dev->version != 1) { 420 + dev_err(&pdev->dev, "Version %ld not supported!\n", 421 + vm_dev->version); 422 + return -ENXIO; 423 + } 424 + 425 + vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); 426 + vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); 427 + 428 + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); 429 + 430 + platform_set_drvdata(pdev, vm_dev); 431 + 432 + return register_virtio_device(&vm_dev->vdev); 433 + } 434 + 435 + static int __devexit virtio_mmio_remove(struct platform_device *pdev) 436 + { 437 + struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); 438 + 439 + unregister_virtio_device(&vm_dev->vdev); 440 + 441 + return 0; 442 + } 443 + 444 + 445 + 446 + /* Platform driver */ 447 + 448 + static struct of_device_id virtio_mmio_match[] = { 449 + { .compatible = "virtio,mmio", }, 450 + {}, 451 + }; 452 + MODULE_DEVICE_TABLE(of, virtio_mmio_match); 453 + 454 + static struct platform_driver virtio_mmio_driver = { 455 + .probe = virtio_mmio_probe, 456 + .remove = __devexit_p(virtio_mmio_remove), 457 + .driver = { 458 + .name = "virtio-mmio", 459 + .owner = THIS_MODULE, 460 + .of_match_table = virtio_mmio_match, 461 + }, 462 + }; 463 + 464 + static int __init virtio_mmio_init(void) 465 + { 466 + return platform_driver_register(&virtio_mmio_driver); 467 + } 468 + 469 + static void __exit virtio_mmio_exit(void) 470 + { 471 + platform_driver_unregister(&virtio_mmio_driver); 472 + } 473 + 474 + module_init(virtio_mmio_init); 475 + module_exit(virtio_mmio_exit); 476 + 477 + MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>"); 478 + MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); 479 + MODULE_LICENSE("GPL");
+111
include/linux/virtio_mmio.h
··· 1 + /* 2 + * Virtio platform device driver 3 + * 4 + * Copyright 2011, ARM Ltd. 5 + * 6 + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 7 + * 8 + * This header is BSD licensed so anyone can use the definitions to implement 9 + * compatible drivers/servers. 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 1. Redistributions of source code must retain the above copyright 15 + * notice, this list of conditions and the following disclaimer. 16 + * 2. Redistributions in binary form must reproduce the above copyright 17 + * notice, this list of conditions and the following disclaimer in the 18 + * documentation and/or other materials provided with the distribution. 19 + * 3. Neither the name of IBM nor the names of its contributors 20 + * may be used to endorse or promote products derived from this software 21 + * without specific prior written permission. 22 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND 23 + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 + * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE 26 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 + * SUCH DAMAGE. 33 + */ 34 + 35 + #ifndef _LINUX_VIRTIO_MMIO_H 36 + #define _LINUX_VIRTIO_MMIO_H 37 + 38 + /* 39 + * Control registers 40 + */ 41 + 42 + /* Magic value ("virt" string) - Read Only */ 43 + #define VIRTIO_MMIO_MAGIC_VALUE 0x000 44 + 45 + /* Virtio device version - Read Only */ 46 + #define VIRTIO_MMIO_VERSION 0x004 47 + 48 + /* Virtio device ID - Read Only */ 49 + #define VIRTIO_MMIO_DEVICE_ID 0x008 50 + 51 + /* Virtio vendor ID - Read Only */ 52 + #define VIRTIO_MMIO_VENDOR_ID 0x00c 53 + 54 + /* Bitmask of the features supported by the host 55 + * (32 bits per set) - Read Only */ 56 + #define VIRTIO_MMIO_HOST_FEATURES 0x010 57 + 58 + /* Host features set selector - Write Only */ 59 + #define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014 60 + 61 + /* Bitmask of features activated by the guest 62 + * (32 bits per set) - Write Only */ 63 + #define VIRTIO_MMIO_GUEST_FEATURES 0x020 64 + 65 + /* Activated features set selector - Write Only */ 66 + #define VIRTIO_MMIO_GUEST_FEATURES_SET 0x024 67 + 68 + /* Guest's memory page size in bytes - Write Only */ 69 + #define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 70 + 71 + /* Queue selector - Write Only */ 72 + #define VIRTIO_MMIO_QUEUE_SEL 0x030 73 + 74 + /* Maximum size of the currently selected queue - Read Only */ 75 + #define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 76 + 77 + /* Queue size for the currently selected queue - Write Only */ 78 + #define VIRTIO_MMIO_QUEUE_NUM 0x038 79 + 80 + /* Used Ring alignment for the currently selected queue - Write Only */ 81 + #define VIRTIO_MMIO_QUEUE_ALIGN 0x03c 82 + 83 + /* Guest's PFN for the currently selected queue - Read Write */ 84 + #define VIRTIO_MMIO_QUEUE_PFN 0x040 85 + 86 + /* Queue notifier - Write Only */ 87 + #define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 88 + 89 + /* Interrupt status - Read Only */ 90 + #define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 91 + 92 + /* Interrupt acknowledge - Write Only */ 93 + #define VIRTIO_MMIO_INTERRUPT_ACK 0x064 94 + 95 + /* Device status register - Read Write */ 96 + #define VIRTIO_MMIO_STATUS 0x070 97 + 98 + /* The config space is defined by each driver as 99 + * the per-driver configuration space - Read Write */ 100 + #define VIRTIO_MMIO_CONFIG 0x100 101 + 102 + 103 + 104 + /* 105 + * Interrupt flags (re: interrupt status & acknowledge registers) 106 + */ 107 + 108 + #define VIRTIO_MMIO_INT_VRING (1 << 0) 109 + #define VIRTIO_MMIO_INT_CONFIG (1 << 1) 110 + 111 + #endif