Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

um: virt-pci: Refactor virtio_pcidev into its own module

Decouple virt-pci and virtio_pcidev, refactoring virtio_pcidev into
its own module. Define a set of APIs for virt-pci. This allows for
future addition of more PCI emulation implementations.

Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
Link: https://patch.msgid.link/20250315161910.4082396-3-tiwei.btw@antgroup.com
Signed-off-by: Johannes Berg <johannes.berg@intel.com>

authored by

Tiwei Bie and committed by
Johannes Berg
24ffa71b 887c5c12

+816 -611
+8 -4
arch/um/drivers/Kconfig
··· 345 345 by providing a fake RTC clock that causes a wakeup at the right 346 346 time. 347 347 348 - config UML_PCI_OVER_VIRTIO 349 - bool "Enable PCI over VIRTIO device simulation" 350 - # in theory, just VIRTIO is enough, but that causes recursion 351 - depends on VIRTIO_UML 348 + config UML_PCI 349 + bool 352 350 select FORCE_PCI 353 351 select UML_IOMEM_EMULATION 354 352 select UML_DMA_EMULATION 355 353 select PCI_MSI 356 354 select PCI_LOCKLESS_CONFIG 355 + 356 + config UML_PCI_OVER_VIRTIO 357 + bool "Enable PCI over VIRTIO device simulation" 358 + # in theory, just VIRTIO is enough, but that causes recursion 359 + depends on VIRTIO_UML 360 + select UML_PCI 357 361 358 362 config UML_PCI_OVER_VIRTIO_DEVICE_ID 359 363 int "set the virtio device ID for PCI emulation"
+2 -1
arch/um/drivers/Makefile
··· 60 60 obj-$(CONFIG_UML_RANDOM) += random.o 61 61 obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o 62 62 obj-$(CONFIG_UML_RTC) += rtc.o 63 - obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virt-pci.o 63 + obj-$(CONFIG_UML_PCI) += virt-pci.o 64 + obj-$(CONFIG_UML_PCI_OVER_VIRTIO) += virtio_pcidev.o 64 65 65 66 # pcap_user.o must be added explicitly. 66 67 USER_OBJS := fd.o null.o pty.o tty.o xterm.o slip_common.o vde_user.o vector_user.o
+137 -606
arch/um/drivers/virt-pci.c
··· 5 5 */ 6 6 #include <linux/module.h> 7 7 #include <linux/pci.h> 8 - #include <linux/virtio.h> 9 - #include <linux/virtio_config.h> 10 8 #include <linux/logic_iomem.h> 11 9 #include <linux/of_platform.h> 12 10 #include <linux/irqdomain.h> 13 - #include <linux/virtio_pcidev.h> 14 - #include <linux/virtio-uml.h> 15 - #include <linux/delay.h> 16 11 #include <linux/msi.h> 17 12 #include <linux/unaligned.h> 18 13 #include <irq_kern.h> 19 14 15 + #include "virt-pci.h" 16 + 20 17 #define MAX_DEVICES 8 21 18 #define MAX_MSI_VECTORS 32 22 19 #define CFG_SPACE_SIZE 4096 23 - 24 - /* for MSI-X we have a 32-bit payload */ 25 - #define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32)) 26 - #define NUM_IRQ_MSGS 10 27 - 28 - struct um_pci_message_buffer { 29 - struct virtio_pcidev_msg hdr; 30 - u8 data[8]; 31 - }; 32 - 33 - struct um_pci_device { 34 - struct virtio_device *vdev; 35 - 36 - /* for now just standard BARs */ 37 - u8 resptr[PCI_STD_NUM_BARS]; 38 - 39 - struct virtqueue *cmd_vq, *irq_vq; 40 - 41 - #define UM_PCI_WRITE_BUFS 20 42 - struct um_pci_message_buffer bufs[UM_PCI_WRITE_BUFS + 1]; 43 - void *extra_ptrs[UM_PCI_WRITE_BUFS + 1]; 44 - DECLARE_BITMAP(used_bufs, UM_PCI_WRITE_BUFS); 45 - 46 - #define UM_PCI_STAT_WAITING 0 47 - unsigned long status; 48 - 49 - int irq; 50 - 51 - bool platform; 52 - }; 53 20 54 21 struct um_pci_device_reg { 55 22 struct um_pci_device *dev; ··· 32 65 static struct irq_domain *um_pci_msi_domain; 33 66 static unsigned long um_pci_msi_used[BITS_TO_LONGS(MAX_MSI_VECTORS)]; 34 67 35 - static unsigned int um_pci_max_delay_us = 40000; 36 - module_param_named(max_delay_us, um_pci_max_delay_us, uint, 0644); 37 - 38 - static int um_pci_get_buf(struct um_pci_device *dev, bool *posted) 39 - { 40 - int i; 41 - 42 - for (i = 0; i < UM_PCI_WRITE_BUFS; i++) { 43 - if (!test_and_set_bit(i, dev->used_bufs)) 44 - return i; 45 - } 46 - 47 - *posted = false; 48 - return UM_PCI_WRITE_BUFS; 49 - } 50 - 51 - static void um_pci_free_buf(struct um_pci_device *dev, void *buf) 52 - { 53 - int i; 54 - 55 - if (buf == &dev->bufs[UM_PCI_WRITE_BUFS]) { 56 - kfree(dev->extra_ptrs[UM_PCI_WRITE_BUFS]); 57 - dev->extra_ptrs[UM_PCI_WRITE_BUFS] = NULL; 58 - return; 59 - } 60 - 61 - for (i = 0; i < UM_PCI_WRITE_BUFS; i++) { 62 - if (buf == &dev->bufs[i]) { 63 - kfree(dev->extra_ptrs[i]); 64 - dev->extra_ptrs[i] = NULL; 65 - WARN_ON(!test_and_clear_bit(i, dev->used_bufs)); 66 - return; 67 - } 68 - } 69 - 70 - WARN_ON(1); 71 - } 72 - 73 - static int um_pci_send_cmd(struct um_pci_device *dev, 74 - struct virtio_pcidev_msg *cmd, 75 - unsigned int cmd_size, 76 - const void *extra, unsigned int extra_size, 77 - void *out, unsigned int out_size) 78 - { 79 - struct scatterlist out_sg, extra_sg, in_sg; 80 - struct scatterlist *sgs_list[] = { 81 - [0] = &out_sg, 82 - [1] = extra ? &extra_sg : &in_sg, 83 - [2] = extra ? &in_sg : NULL, 84 - }; 85 - struct um_pci_message_buffer *buf; 86 - int delay_count = 0; 87 - bool bounce_out; 88 - int ret, len; 89 - int buf_idx; 90 - bool posted; 91 - 92 - if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf))) 93 - return -EINVAL; 94 - 95 - switch (cmd->op) { 96 - case VIRTIO_PCIDEV_OP_CFG_WRITE: 97 - case VIRTIO_PCIDEV_OP_MMIO_WRITE: 98 - case VIRTIO_PCIDEV_OP_MMIO_MEMSET: 99 - /* in PCI, writes are posted, so don't wait */ 100 - posted = !out; 101 - WARN_ON(!posted); 102 - break; 103 - default: 104 - posted = false; 105 - break; 106 - } 107 - 108 - bounce_out = !posted && cmd_size <= sizeof(*cmd) && 109 - out && out_size <= sizeof(buf->data); 110 - 111 - buf_idx = um_pci_get_buf(dev, &posted); 112 - buf = &dev->bufs[buf_idx]; 113 - memcpy(buf, cmd, cmd_size); 114 - 115 - if (posted && extra && extra_size > sizeof(buf) - cmd_size) { 116 - dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size, 117 - GFP_ATOMIC); 118 - 119 - if (!dev->extra_ptrs[buf_idx]) { 120 - um_pci_free_buf(dev, buf); 121 - return -ENOMEM; 122 - } 123 - extra = dev->extra_ptrs[buf_idx]; 124 - } else if (extra && extra_size <= sizeof(buf) - cmd_size) { 125 - memcpy((u8 *)buf + cmd_size, extra, extra_size); 126 - cmd_size += extra_size; 127 - extra_size = 0; 128 - extra = NULL; 129 - cmd = (void *)buf; 130 - } else { 131 - cmd = (void *)buf; 132 - } 133 - 134 - sg_init_one(&out_sg, cmd, cmd_size); 135 - if (extra) 136 - sg_init_one(&extra_sg, extra, extra_size); 137 - /* allow stack for small buffers */ 138 - if (bounce_out) 139 - sg_init_one(&in_sg, buf->data, out_size); 140 - else if (out) 141 - sg_init_one(&in_sg, out, out_size); 142 - 143 - /* add to internal virtio queue */ 144 - ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list, 145 - extra ? 2 : 1, 146 - out ? 1 : 0, 147 - cmd, GFP_ATOMIC); 148 - if (ret) { 149 - um_pci_free_buf(dev, buf); 150 - return ret; 151 - } 152 - 153 - if (posted) { 154 - virtqueue_kick(dev->cmd_vq); 155 - return 0; 156 - } 157 - 158 - /* kick and poll for getting a response on the queue */ 159 - set_bit(UM_PCI_STAT_WAITING, &dev->status); 160 - virtqueue_kick(dev->cmd_vq); 161 - ret = 0; 162 - 163 - while (1) { 164 - void *completed = virtqueue_get_buf(dev->cmd_vq, &len); 165 - 166 - if (completed == buf) 167 - break; 168 - 169 - if (completed) 170 - um_pci_free_buf(dev, completed); 171 - 172 - if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) || 173 - ++delay_count > um_pci_max_delay_us, 174 - "um virt-pci delay: %d", delay_count)) { 175 - ret = -EIO; 176 - break; 177 - } 178 - udelay(1); 179 - } 180 - clear_bit(UM_PCI_STAT_WAITING, &dev->status); 181 - 182 - if (bounce_out) 183 - memcpy(out, buf->data, out_size); 184 - 185 - um_pci_free_buf(dev, buf); 186 - 187 - return ret; 188 - } 189 - 190 68 static unsigned long um_pci_cfgspace_read(void *priv, unsigned int offset, 191 69 int size) 192 70 { 193 71 struct um_pci_device_reg *reg = priv; 194 72 struct um_pci_device *dev = reg->dev; 195 - struct virtio_pcidev_msg hdr = { 196 - .op = VIRTIO_PCIDEV_OP_CFG_READ, 197 - .size = size, 198 - .addr = offset, 199 - }; 200 - /* max 8, we might not use it all */ 201 - u8 data[8]; 202 73 203 74 if (!dev) 204 75 return ULONG_MAX; 205 - 206 - memset(data, 0xff, sizeof(data)); 207 76 208 77 switch (size) { 209 78 case 1: ··· 54 251 return ULONG_MAX; 55 252 } 56 253 57 - if (um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size)) 58 - return ULONG_MAX; 59 - 60 - switch (size) { 61 - case 1: 62 - return data[0]; 63 - case 2: 64 - return le16_to_cpup((void *)data); 65 - case 4: 66 - return le32_to_cpup((void *)data); 67 - #ifdef CONFIG_64BIT 68 - case 8: 69 - return le64_to_cpup((void *)data); 70 - #endif 71 - default: 72 - return ULONG_MAX; 73 - } 254 + return dev->ops->cfgspace_read(dev, offset, size); 74 255 } 75 256 76 257 static void um_pci_cfgspace_write(void *priv, unsigned int offset, int size, ··· 62 275 { 63 276 struct um_pci_device_reg *reg = priv; 64 277 struct um_pci_device *dev = reg->dev; 65 - struct { 66 - struct virtio_pcidev_msg hdr; 67 - /* maximum size - we may only use parts of it */ 68 - u8 data[8]; 69 - } msg = { 70 - .hdr = { 71 - .op = VIRTIO_PCIDEV_OP_CFG_WRITE, 72 - .size = size, 73 - .addr = offset, 74 - }, 75 - }; 76 278 77 279 if (!dev) 78 280 return; 79 281 80 282 switch (size) { 81 283 case 1: 82 - msg.data[0] = (u8)val; 83 - break; 84 284 case 2: 85 - put_unaligned_le16(val, (void *)msg.data); 86 - break; 87 285 case 4: 88 - put_unaligned_le32(val, (void *)msg.data); 89 - break; 90 286 #ifdef CONFIG_64BIT 91 287 case 8: 92 - put_unaligned_le64(val, (void *)msg.data); 93 - break; 94 288 #endif 289 + break; 95 290 default: 96 291 WARN(1, "invalid config space write size %d\n", size); 97 292 return; 98 293 } 99 294 100 - WARN_ON(um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0)); 295 + dev->ops->cfgspace_write(dev, offset, size, val); 101 296 } 102 297 103 298 static const struct logic_iomem_ops um_pci_device_cfgspace_ops = { 104 299 .read = um_pci_cfgspace_read, 105 300 .write = um_pci_cfgspace_write, 106 301 }; 302 + 303 + static unsigned long um_pci_bar_read(void *priv, unsigned int offset, 304 + int size) 305 + { 306 + u8 *resptr = priv; 307 + struct um_pci_device *dev = container_of(resptr - *resptr, 308 + struct um_pci_device, 309 + resptr[0]); 310 + u8 bar = *resptr; 311 + 312 + switch (size) { 313 + case 1: 314 + case 2: 315 + case 4: 316 + #ifdef CONFIG_64BIT 317 + case 8: 318 + #endif 319 + break; 320 + default: 321 + WARN(1, "invalid bar read size %d\n", size); 322 + return ULONG_MAX; 323 + } 324 + 325 + return dev->ops->bar_read(dev, bar, offset, size); 326 + } 327 + 328 + static void um_pci_bar_write(void *priv, unsigned int offset, int size, 329 + unsigned long val) 330 + { 331 + u8 *resptr = priv; 332 + struct um_pci_device *dev = container_of(resptr - *resptr, 333 + struct um_pci_device, 334 + resptr[0]); 335 + u8 bar = *resptr; 336 + 337 + switch (size) { 338 + case 1: 339 + case 2: 340 + case 4: 341 + #ifdef CONFIG_64BIT 342 + case 8: 343 + #endif 344 + break; 345 + default: 346 + WARN(1, "invalid bar write size %d\n", size); 347 + return; 348 + } 349 + 350 + dev->ops->bar_write(dev, bar, offset, size, val); 351 + } 107 352 108 353 static void um_pci_bar_copy_from(void *priv, void *buffer, 109 354 unsigned int offset, int size) ··· 144 325 struct um_pci_device *dev = container_of(resptr - *resptr, 145 326 struct um_pci_device, 146 327 resptr[0]); 147 - struct virtio_pcidev_msg hdr = { 148 - .op = VIRTIO_PCIDEV_OP_MMIO_READ, 149 - .bar = *resptr, 150 - .size = size, 151 - .addr = offset, 152 - }; 328 + u8 bar = *resptr; 153 329 154 - memset(buffer, 0xff, size); 155 - 156 - um_pci_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size); 157 - } 158 - 159 - static unsigned long um_pci_bar_read(void *priv, unsigned int offset, 160 - int size) 161 - { 162 - /* 8 is maximum size - we may only use parts of it */ 163 - u8 data[8]; 164 - 165 - switch (size) { 166 - case 1: 167 - case 2: 168 - case 4: 169 - #ifdef CONFIG_64BIT 170 - case 8: 171 - #endif 172 - break; 173 - default: 174 - WARN(1, "invalid config space read size %d\n", size); 175 - return ULONG_MAX; 176 - } 177 - 178 - um_pci_bar_copy_from(priv, data, offset, size); 179 - 180 - switch (size) { 181 - case 1: 182 - return data[0]; 183 - case 2: 184 - return le16_to_cpup((void *)data); 185 - case 4: 186 - return le32_to_cpup((void *)data); 187 - #ifdef CONFIG_64BIT 188 - case 8: 189 - return le64_to_cpup((void *)data); 190 - #endif 191 - default: 192 - return ULONG_MAX; 193 - } 330 + dev->ops->bar_copy_from(dev, bar, buffer, offset, size); 194 331 } 195 332 196 333 static void um_pci_bar_copy_to(void *priv, unsigned int offset, ··· 156 381 struct um_pci_device *dev = container_of(resptr - *resptr, 157 382 struct um_pci_device, 158 383 resptr[0]); 159 - struct virtio_pcidev_msg hdr = { 160 - .op = VIRTIO_PCIDEV_OP_MMIO_WRITE, 161 - .bar = *resptr, 162 - .size = size, 163 - .addr = offset, 164 - }; 384 + u8 bar = *resptr; 165 385 166 - um_pci_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0); 167 - } 168 - 169 - static void um_pci_bar_write(void *priv, unsigned int offset, int size, 170 - unsigned long val) 171 - { 172 - /* maximum size - we may only use parts of it */ 173 - u8 data[8]; 174 - 175 - switch (size) { 176 - case 1: 177 - data[0] = (u8)val; 178 - break; 179 - case 2: 180 - put_unaligned_le16(val, (void *)data); 181 - break; 182 - case 4: 183 - put_unaligned_le32(val, (void *)data); 184 - break; 185 - #ifdef CONFIG_64BIT 186 - case 8: 187 - put_unaligned_le64(val, (void *)data); 188 - break; 189 - #endif 190 - default: 191 - WARN(1, "invalid config space write size %d\n", size); 192 - return; 193 - } 194 - 195 - um_pci_bar_copy_to(priv, offset, data, size); 386 + dev->ops->bar_copy_to(dev, bar, offset, buffer, size); 196 387 } 197 388 198 389 static void um_pci_bar_set(void *priv, unsigned int offset, u8 value, int size) ··· 167 426 struct um_pci_device *dev = container_of(resptr - *resptr, 168 427 struct um_pci_device, 169 428 resptr[0]); 170 - struct { 171 - struct virtio_pcidev_msg hdr; 172 - u8 data; 173 - } msg = { 174 - .hdr = { 175 - .op = VIRTIO_PCIDEV_OP_CFG_WRITE, 176 - .bar = *resptr, 177 - .size = size, 178 - .addr = offset, 179 - }, 180 - .data = value, 181 - }; 429 + u8 bar = *resptr; 182 430 183 - um_pci_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0); 431 + dev->ops->bar_set(dev, bar, offset, value, size); 184 432 } 185 433 186 434 static const struct logic_iomem_ops um_pci_device_bar_ops = { ··· 216 486 pci_unlock_rescan_remove(); 217 487 } 218 488 219 - static void um_pci_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick) 220 - { 221 - struct scatterlist sg[1]; 222 - 223 - sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE); 224 - if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC)) 225 - kfree(buf); 226 - else if (kick) 227 - virtqueue_kick(vq); 228 - } 229 - 230 - static void um_pci_handle_irq_message(struct virtqueue *vq, 231 - struct virtio_pcidev_msg *msg) 232 - { 233 - struct virtio_device *vdev = vq->vdev; 234 - struct um_pci_device *dev = vdev->priv; 235 - 236 - if (!dev->irq) 237 - return; 238 - 239 - /* we should properly chain interrupts, but on ARCH=um we don't care */ 240 - 241 - switch (msg->op) { 242 - case VIRTIO_PCIDEV_OP_INT: 243 - generic_handle_irq(dev->irq); 244 - break; 245 - case VIRTIO_PCIDEV_OP_MSI: 246 - /* our MSI message is just the interrupt number */ 247 - if (msg->size == sizeof(u32)) 248 - generic_handle_irq(le32_to_cpup((void *)msg->data)); 249 - else 250 - generic_handle_irq(le16_to_cpup((void *)msg->data)); 251 - break; 252 - case VIRTIO_PCIDEV_OP_PME: 253 - /* nothing to do - we already woke up due to the message */ 254 - break; 255 - default: 256 - dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op); 257 - break; 258 - } 259 - } 260 - 261 - static void um_pci_cmd_vq_cb(struct virtqueue *vq) 262 - { 263 - struct virtio_device *vdev = vq->vdev; 264 - struct um_pci_device *dev = vdev->priv; 265 - void *cmd; 266 - int len; 267 - 268 - if (test_bit(UM_PCI_STAT_WAITING, &dev->status)) 269 - return; 270 - 271 - while ((cmd = virtqueue_get_buf(vq, &len))) 272 - um_pci_free_buf(dev, cmd); 273 - } 274 - 275 - static void um_pci_irq_vq_cb(struct virtqueue *vq) 276 - { 277 - struct virtio_pcidev_msg *msg; 278 - int len; 279 - 280 - while ((msg = virtqueue_get_buf(vq, &len))) { 281 - if (len >= sizeof(*msg)) 282 - um_pci_handle_irq_message(vq, msg); 283 - 284 - /* recycle the message buffer */ 285 - um_pci_irq_vq_addbuf(vq, msg, true); 286 - } 287 - } 288 - 289 489 #ifdef CONFIG_OF 290 490 /* Copied from arch/x86/kernel/devicetree.c */ 291 491 struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) ··· 236 576 return NULL; 237 577 } 238 578 #endif 239 - 240 - static int um_pci_init_vqs(struct um_pci_device *dev) 241 - { 242 - struct virtqueue_info vqs_info[] = { 243 - { "cmd", um_pci_cmd_vq_cb }, 244 - { "irq", um_pci_irq_vq_cb }, 245 - }; 246 - struct virtqueue *vqs[2]; 247 - int err, i; 248 - 249 - err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL); 250 - if (err) 251 - return err; 252 - 253 - dev->cmd_vq = vqs[0]; 254 - dev->irq_vq = vqs[1]; 255 - 256 - virtio_device_ready(dev->vdev); 257 - 258 - for (i = 0; i < NUM_IRQ_MSGS; i++) { 259 - void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL); 260 - 261 - if (msg) 262 - um_pci_irq_vq_addbuf(dev->irq_vq, msg, false); 263 - } 264 - 265 - virtqueue_kick(dev->irq_vq); 266 - 267 - return 0; 268 - } 269 - 270 - static void __um_pci_virtio_platform_remove(struct virtio_device *vdev, 271 - struct um_pci_device *dev) 272 - { 273 - virtio_reset_device(vdev); 274 - vdev->config->del_vqs(vdev); 275 - 276 - mutex_lock(&um_pci_mtx); 277 - um_pci_platform_device = NULL; 278 - mutex_unlock(&um_pci_mtx); 279 - 280 - kfree(dev); 281 - } 282 - 283 - static int um_pci_virtio_platform_probe(struct virtio_device *vdev, 284 - struct um_pci_device *dev) 285 - { 286 - int ret; 287 - 288 - dev->platform = true; 289 - 290 - mutex_lock(&um_pci_mtx); 291 - 292 - if (um_pci_platform_device) { 293 - mutex_unlock(&um_pci_mtx); 294 - ret = -EBUSY; 295 - goto out_free; 296 - } 297 - 298 - ret = um_pci_init_vqs(dev); 299 - if (ret) { 300 - mutex_unlock(&um_pci_mtx); 301 - goto out_free; 302 - } 303 - 304 - um_pci_platform_device = dev; 305 - 306 - mutex_unlock(&um_pci_mtx); 307 - 308 - ret = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev); 309 - if (ret) 310 - __um_pci_virtio_platform_remove(vdev, dev); 311 - 312 - return ret; 313 - 314 - out_free: 315 - kfree(dev); 316 - return ret; 317 - } 318 - 319 - static int um_pci_virtio_probe(struct virtio_device *vdev) 320 - { 321 - struct um_pci_device *dev; 322 - int i, free = -1; 323 - int err = -ENOSPC; 324 - 325 - dev = kzalloc(sizeof(*dev), GFP_KERNEL); 326 - if (!dev) 327 - return -ENOMEM; 328 - 329 - dev->vdev = vdev; 330 - vdev->priv = dev; 331 - 332 - if (of_device_is_compatible(vdev->dev.of_node, "simple-bus")) 333 - return um_pci_virtio_platform_probe(vdev, dev); 334 - 335 - mutex_lock(&um_pci_mtx); 336 - for (i = 0; i < MAX_DEVICES; i++) { 337 - if (um_pci_devices[i].dev) 338 - continue; 339 - free = i; 340 - break; 341 - } 342 - 343 - if (free < 0) 344 - goto error; 345 - 346 - err = um_pci_init_vqs(dev); 347 - if (err) 348 - goto error; 349 - 350 - dev->irq = irq_alloc_desc(numa_node_id()); 351 - if (dev->irq < 0) { 352 - err = dev->irq; 353 - goto err_reset; 354 - } 355 - um_pci_devices[free].dev = dev; 356 - vdev->priv = dev; 357 - 358 - mutex_unlock(&um_pci_mtx); 359 - 360 - device_set_wakeup_enable(&vdev->dev, true); 361 - 362 - /* 363 - * In order to do suspend-resume properly, don't allow VQs 364 - * to be suspended. 365 - */ 366 - virtio_uml_set_no_vq_suspend(vdev, true); 367 - 368 - um_pci_rescan(); 369 - return 0; 370 - err_reset: 371 - virtio_reset_device(vdev); 372 - vdev->config->del_vqs(vdev); 373 - error: 374 - mutex_unlock(&um_pci_mtx); 375 - kfree(dev); 376 - return err; 377 - } 378 - 379 - static void um_pci_virtio_remove(struct virtio_device *vdev) 380 - { 381 - struct um_pci_device *dev = vdev->priv; 382 - int i; 383 - 384 - if (dev->platform) { 385 - of_platform_depopulate(&vdev->dev); 386 - __um_pci_virtio_platform_remove(vdev, dev); 387 - return; 388 - } 389 - 390 - device_set_wakeup_enable(&vdev->dev, false); 391 - 392 - mutex_lock(&um_pci_mtx); 393 - for (i = 0; i < MAX_DEVICES; i++) { 394 - if (um_pci_devices[i].dev != dev) 395 - continue; 396 - 397 - um_pci_devices[i].dev = NULL; 398 - irq_free_desc(dev->irq); 399 - 400 - break; 401 - } 402 - mutex_unlock(&um_pci_mtx); 403 - 404 - if (i < MAX_DEVICES) { 405 - struct pci_dev *pci_dev; 406 - 407 - pci_dev = pci_get_slot(bridge->bus, i); 408 - if (pci_dev) 409 - pci_stop_and_remove_bus_device_locked(pci_dev); 410 - } 411 - 412 - /* Stop all virtqueues */ 413 - virtio_reset_device(vdev); 414 - dev->cmd_vq = NULL; 415 - dev->irq_vq = NULL; 416 - vdev->config->del_vqs(vdev); 417 - 418 - kfree(dev); 419 - } 420 - 421 - static struct virtio_device_id id_table[] = { 422 - { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID }, 423 - { 0 }, 424 - }; 425 - MODULE_DEVICE_TABLE(virtio, id_table); 426 - 427 - static struct virtio_driver um_pci_virtio_driver = { 428 - .driver.name = "virtio-pci", 429 - .id_table = id_table, 430 - .probe = um_pci_virtio_probe, 431 - .remove = um_pci_virtio_remove, 432 - }; 433 579 434 580 static struct resource virt_cfgspace_resource = { 435 581 .name = "PCI config space", ··· 355 889 } 356 890 357 891 static struct irq_chip um_pci_msi_bottom_irq_chip = { 358 - .name = "UM virtio MSI", 892 + .name = "UM virtual MSI", 359 893 .irq_compose_msi_msg = um_pci_compose_msi_msg, 360 894 }; 361 895 ··· 405 939 }; 406 940 407 941 static struct irq_chip um_pci_msi_irq_chip = { 408 - .name = "UM virtio PCIe MSI", 942 + .name = "UM virtual PCIe MSI", 409 943 .irq_mask = pci_msi_mask_irq, 410 944 .irq_unmask = pci_msi_unmask_irq, 411 945 }; ··· 464 998 .flags = IORESOURCE_MEM, 465 999 }; 466 1000 1001 + int um_pci_device_register(struct um_pci_device *dev) 1002 + { 1003 + int i, free = -1; 1004 + int err = 0; 1005 + 1006 + mutex_lock(&um_pci_mtx); 1007 + for (i = 0; i < MAX_DEVICES; i++) { 1008 + if (um_pci_devices[i].dev) 1009 + continue; 1010 + free = i; 1011 + break; 1012 + } 1013 + 1014 + if (free < 0) { 1015 + err = -ENOSPC; 1016 + goto out; 1017 + } 1018 + 1019 + dev->irq = irq_alloc_desc(numa_node_id()); 1020 + if (dev->irq < 0) { 1021 + err = dev->irq; 1022 + goto out; 1023 + } 1024 + 1025 + um_pci_devices[free].dev = dev; 1026 + 1027 + out: 1028 + mutex_unlock(&um_pci_mtx); 1029 + if (!err) 1030 + um_pci_rescan(); 1031 + return err; 1032 + } 1033 + 1034 + void um_pci_device_unregister(struct um_pci_device *dev) 1035 + { 1036 + int i; 1037 + 1038 + mutex_lock(&um_pci_mtx); 1039 + for (i = 0; i < MAX_DEVICES; i++) { 1040 + if (um_pci_devices[i].dev != dev) 1041 + continue; 1042 + um_pci_devices[i].dev = NULL; 1043 + irq_free_desc(dev->irq); 1044 + break; 1045 + } 1046 + mutex_unlock(&um_pci_mtx); 1047 + 1048 + if (i < MAX_DEVICES) { 1049 + struct pci_dev *pci_dev; 1050 + 1051 + pci_dev = pci_get_slot(bridge->bus, i); 1052 + if (pci_dev) 1053 + pci_stop_and_remove_bus_device_locked(pci_dev); 1054 + } 1055 + } 1056 + 1057 + int um_pci_platform_device_register(struct um_pci_device *dev) 1058 + { 1059 + guard(mutex)(&um_pci_mtx); 1060 + if (um_pci_platform_device) 1061 + return -EBUSY; 1062 + um_pci_platform_device = dev; 1063 + return 0; 1064 + } 1065 + 1066 + void um_pci_platform_device_unregister(struct um_pci_device *dev) 1067 + { 1068 + guard(mutex)(&um_pci_mtx); 1069 + if (um_pci_platform_device == dev) 1070 + um_pci_platform_device = NULL; 1071 + } 1072 + 467 1073 static int __init um_pci_init(void) 468 1074 { 469 1075 struct irq_domain_info inner_domain_info = { ··· 551 1013 &um_pci_iomem_ops)); 552 1014 WARN_ON(logic_iomem_add_region(&virt_platform_resource, 553 1015 &um_pci_platform_ops)); 554 - 555 - if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0, 556 - "No virtio device ID configured for PCI - no PCI support\n")) 557 - return 0; 558 1016 559 1017 bridge = pci_alloc_host_bridge(0); 560 1018 if (!bridge) { ··· 599 1065 if (err) 600 1066 goto free; 601 1067 602 - err = register_virtio_driver(&um_pci_virtio_driver); 603 - if (err) 604 - goto free; 605 1068 return 0; 1069 + 606 1070 free: 607 1071 if (!IS_ERR_OR_NULL(um_pci_inner_domain)) 608 1072 irq_domain_remove(um_pci_inner_domain); ··· 612 1080 } 613 1081 return err; 614 1082 } 615 - module_init(um_pci_init); 1083 + device_initcall(um_pci_init); 616 1084 617 1085 static void __exit um_pci_exit(void) 618 1086 { 619 - unregister_virtio_driver(&um_pci_virtio_driver); 620 1087 irq_domain_remove(um_pci_msi_domain); 621 1088 irq_domain_remove(um_pci_inner_domain); 622 1089 pci_free_resource_list(&bridge->windows);
+41
arch/um/drivers/virt-pci.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef __UM_VIRT_PCI_H 3 + #define __UM_VIRT_PCI_H 4 + 5 + #include <linux/pci.h> 6 + 7 + struct um_pci_device { 8 + const struct um_pci_ops *ops; 9 + 10 + /* for now just standard BARs */ 11 + u8 resptr[PCI_STD_NUM_BARS]; 12 + 13 + int irq; 14 + }; 15 + 16 + struct um_pci_ops { 17 + unsigned long (*cfgspace_read)(struct um_pci_device *dev, 18 + unsigned int offset, int size); 19 + void (*cfgspace_write)(struct um_pci_device *dev, unsigned int offset, 20 + int size, unsigned long val); 21 + 22 + unsigned long (*bar_read)(struct um_pci_device *dev, int bar, 23 + unsigned int offset, int size); 24 + void (*bar_write)(struct um_pci_device *dev, int bar, 25 + unsigned int offset, int size, unsigned long val); 26 + 27 + void (*bar_copy_from)(struct um_pci_device *dev, int bar, void *buffer, 28 + unsigned int offset, int size); 29 + void (*bar_copy_to)(struct um_pci_device *dev, int bar, 30 + unsigned int offset, const void *buffer, int size); 31 + void (*bar_set)(struct um_pci_device *dev, int bar, 32 + unsigned int offset, u8 value, int size); 33 + }; 34 + 35 + int um_pci_device_register(struct um_pci_device *dev); 36 + void um_pci_device_unregister(struct um_pci_device *dev); 37 + 38 + int um_pci_platform_device_register(struct um_pci_device *dev); 39 + void um_pci_platform_device_unregister(struct um_pci_device *dev); 40 + 41 + #endif /* __UM_VIRT_PCI_H */
+628
arch/um/drivers/virtio_pcidev.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Copyright (C) 2020 Intel Corporation 4 + * Author: Johannes Berg <johannes@sipsolutions.net> 5 + */ 6 + #include <linux/module.h> 7 + #include <linux/pci.h> 8 + #include <linux/virtio.h> 9 + #include <linux/virtio_config.h> 10 + #include <linux/logic_iomem.h> 11 + #include <linux/of_platform.h> 12 + #include <linux/irqdomain.h> 13 + #include <linux/virtio_pcidev.h> 14 + #include <linux/virtio-uml.h> 15 + #include <linux/delay.h> 16 + #include <linux/msi.h> 17 + #include <linux/unaligned.h> 18 + #include <irq_kern.h> 19 + 20 + #include "virt-pci.h" 21 + 22 + #define to_virtio_pcidev(_pdev) \ 23 + container_of(_pdev, struct virtio_pcidev_device, pdev) 24 + 25 + /* for MSI-X we have a 32-bit payload */ 26 + #define MAX_IRQ_MSG_SIZE (sizeof(struct virtio_pcidev_msg) + sizeof(u32)) 27 + #define NUM_IRQ_MSGS 10 28 + 29 + struct virtio_pcidev_message_buffer { 30 + struct virtio_pcidev_msg hdr; 31 + u8 data[8]; 32 + }; 33 + 34 + struct virtio_pcidev_device { 35 + struct um_pci_device pdev; 36 + struct virtio_device *vdev; 37 + 38 + struct virtqueue *cmd_vq, *irq_vq; 39 + 40 + #define VIRTIO_PCIDEV_WRITE_BUFS 20 41 + struct virtio_pcidev_message_buffer bufs[VIRTIO_PCIDEV_WRITE_BUFS + 1]; 42 + void *extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS + 1]; 43 + DECLARE_BITMAP(used_bufs, VIRTIO_PCIDEV_WRITE_BUFS); 44 + 45 + #define UM_PCI_STAT_WAITING 0 46 + unsigned long status; 47 + 48 + bool platform; 49 + }; 50 + 51 + static unsigned int virtio_pcidev_max_delay_us = 40000; 52 + module_param_named(max_delay_us, virtio_pcidev_max_delay_us, uint, 0644); 53 + 54 + static int virtio_pcidev_get_buf(struct virtio_pcidev_device *dev, bool *posted) 55 + { 56 + int i; 57 + 58 + for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) { 59 + if (!test_and_set_bit(i, dev->used_bufs)) 60 + return i; 61 + } 62 + 63 + *posted = false; 64 + return VIRTIO_PCIDEV_WRITE_BUFS; 65 + } 66 + 67 + static void virtio_pcidev_free_buf(struct virtio_pcidev_device *dev, void *buf) 68 + { 69 + int i; 70 + 71 + if (buf == &dev->bufs[VIRTIO_PCIDEV_WRITE_BUFS]) { 72 + kfree(dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS]); 73 + dev->extra_ptrs[VIRTIO_PCIDEV_WRITE_BUFS] = NULL; 74 + return; 75 + } 76 + 77 + for (i = 0; i < VIRTIO_PCIDEV_WRITE_BUFS; i++) { 78 + if (buf == &dev->bufs[i]) { 79 + kfree(dev->extra_ptrs[i]); 80 + dev->extra_ptrs[i] = NULL; 81 + WARN_ON(!test_and_clear_bit(i, dev->used_bufs)); 82 + return; 83 + } 84 + } 85 + 86 + WARN_ON(1); 87 + } 88 + 89 + static int virtio_pcidev_send_cmd(struct virtio_pcidev_device *dev, 90 + struct virtio_pcidev_msg *cmd, 91 + unsigned int cmd_size, 92 + const void *extra, unsigned int extra_size, 93 + void *out, unsigned int out_size) 94 + { 95 + struct scatterlist out_sg, extra_sg, in_sg; 96 + struct scatterlist *sgs_list[] = { 97 + [0] = &out_sg, 98 + [1] = extra ? &extra_sg : &in_sg, 99 + [2] = extra ? &in_sg : NULL, 100 + }; 101 + struct virtio_pcidev_message_buffer *buf; 102 + int delay_count = 0; 103 + bool bounce_out; 104 + int ret, len; 105 + int buf_idx; 106 + bool posted; 107 + 108 + if (WARN_ON(cmd_size < sizeof(*cmd) || cmd_size > sizeof(*buf))) 109 + return -EINVAL; 110 + 111 + switch (cmd->op) { 112 + case VIRTIO_PCIDEV_OP_CFG_WRITE: 113 + case VIRTIO_PCIDEV_OP_MMIO_WRITE: 114 + case VIRTIO_PCIDEV_OP_MMIO_MEMSET: 115 + /* in PCI, writes are posted, so don't wait */ 116 + posted = !out; 117 + WARN_ON(!posted); 118 + break; 119 + default: 120 + posted = false; 121 + break; 122 + } 123 + 124 + bounce_out = !posted && cmd_size <= sizeof(*cmd) && 125 + out && out_size <= sizeof(buf->data); 126 + 127 + buf_idx = virtio_pcidev_get_buf(dev, &posted); 128 + buf = &dev->bufs[buf_idx]; 129 + memcpy(buf, cmd, cmd_size); 130 + 131 + if (posted && extra && extra_size > sizeof(buf) - cmd_size) { 132 + dev->extra_ptrs[buf_idx] = kmemdup(extra, extra_size, 133 + GFP_ATOMIC); 134 + 135 + if (!dev->extra_ptrs[buf_idx]) { 136 + virtio_pcidev_free_buf(dev, buf); 137 + return -ENOMEM; 138 + } 139 + extra = dev->extra_ptrs[buf_idx]; 140 + } else if (extra && extra_size <= sizeof(buf) - cmd_size) { 141 + memcpy((u8 *)buf + cmd_size, extra, extra_size); 142 + cmd_size += extra_size; 143 + extra_size = 0; 144 + extra = NULL; 145 + cmd = (void *)buf; 146 + } else { 147 + cmd = (void *)buf; 148 + } 149 + 150 + sg_init_one(&out_sg, cmd, cmd_size); 151 + if (extra) 152 + sg_init_one(&extra_sg, extra, extra_size); 153 + /* allow stack for small buffers */ 154 + if (bounce_out) 155 + sg_init_one(&in_sg, buf->data, out_size); 156 + else if (out) 157 + sg_init_one(&in_sg, out, out_size); 158 + 159 + /* add to internal virtio queue */ 160 + ret = virtqueue_add_sgs(dev->cmd_vq, sgs_list, 161 + extra ? 2 : 1, 162 + out ? 1 : 0, 163 + cmd, GFP_ATOMIC); 164 + if (ret) { 165 + virtio_pcidev_free_buf(dev, buf); 166 + return ret; 167 + } 168 + 169 + if (posted) { 170 + virtqueue_kick(dev->cmd_vq); 171 + return 0; 172 + } 173 + 174 + /* kick and poll for getting a response on the queue */ 175 + set_bit(UM_PCI_STAT_WAITING, &dev->status); 176 + virtqueue_kick(dev->cmd_vq); 177 + ret = 0; 178 + 179 + while (1) { 180 + void *completed = virtqueue_get_buf(dev->cmd_vq, &len); 181 + 182 + if (completed == buf) 183 + break; 184 + 185 + if (completed) 186 + virtio_pcidev_free_buf(dev, completed); 187 + 188 + if (WARN_ONCE(virtqueue_is_broken(dev->cmd_vq) || 189 + ++delay_count > virtio_pcidev_max_delay_us, 190 + "um virt-pci delay: %d", delay_count)) { 191 + ret = -EIO; 192 + break; 193 + } 194 + udelay(1); 195 + } 196 + clear_bit(UM_PCI_STAT_WAITING, &dev->status); 197 + 198 + if (bounce_out) 199 + memcpy(out, buf->data, out_size); 200 + 201 + virtio_pcidev_free_buf(dev, buf); 202 + 203 + return ret; 204 + } 205 + 206 + static unsigned long virtio_pcidev_cfgspace_read(struct um_pci_device *pdev, 207 + unsigned int offset, int size) 208 + { 209 + struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev); 210 + struct virtio_pcidev_msg hdr = { 211 + .op = VIRTIO_PCIDEV_OP_CFG_READ, 212 + .size = size, 213 + .addr = offset, 214 + }; 215 + /* max 8, we might not use it all */ 216 + u8 data[8]; 217 + 218 + memset(data, 0xff, sizeof(data)); 219 + 220 + /* size has been checked in um_pci_cfgspace_read() */ 221 + if (virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, data, size)) 222 + return ULONG_MAX; 223 + 224 + switch (size) { 225 + case 1: 226 + return data[0]; 227 + case 2: 228 + return le16_to_cpup((void *)data); 229 + case 4: 230 + return le32_to_cpup((void *)data); 231 + #ifdef CONFIG_64BIT 232 + case 8: 233 + return le64_to_cpup((void *)data); 234 + #endif 235 + default: 236 + return ULONG_MAX; 237 + } 238 + } 239 + 240 + static void virtio_pcidev_cfgspace_write(struct um_pci_device *pdev, 241 + unsigned int offset, int size, 242 + unsigned long val) 243 + { 244 + struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev); 245 + struct { 246 + struct virtio_pcidev_msg hdr; 247 + /* maximum size - we may only use parts of it */ 248 + u8 data[8]; 249 + } msg = { 250 + .hdr = { 251 + .op = VIRTIO_PCIDEV_OP_CFG_WRITE, 252 + .size = size, 253 + .addr = offset, 254 + }, 255 + }; 256 + 257 + /* size has been checked in um_pci_cfgspace_write() */ 258 + switch (size) { 259 + case 1: 260 + msg.data[0] = (u8)val; 261 + break; 262 + case 2: 263 + put_unaligned_le16(val, (void *)msg.data); 264 + break; 265 + case 4: 266 + put_unaligned_le32(val, (void *)msg.data); 267 + break; 268 + #ifdef CONFIG_64BIT 269 + case 8: 270 + put_unaligned_le64(val, (void *)msg.data); 271 + break; 272 + #endif 273 + } 274 + 275 + WARN_ON(virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0)); 276 + } 277 + 278 + static void virtio_pcidev_bar_copy_from(struct um_pci_device *pdev, 279 + int bar, void *buffer, 280 + unsigned int offset, int size) 281 + { 282 + struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev); 283 + struct virtio_pcidev_msg hdr = { 284 + .op = VIRTIO_PCIDEV_OP_MMIO_READ, 285 + .bar = bar, 286 + .size = size, 287 + .addr = offset, 288 + }; 289 + 290 + memset(buffer, 0xff, size); 291 + 292 + virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), NULL, 0, buffer, size); 293 + } 294 + 295 + static unsigned long virtio_pcidev_bar_read(struct um_pci_device *pdev, int bar, 296 + unsigned int offset, int size) 297 + { 298 + /* 8 is maximum size - we may only use parts of it */ 299 + u8 data[8]; 300 + 301 + /* size has been checked in um_pci_bar_read() */ 302 + virtio_pcidev_bar_copy_from(pdev, bar, data, offset, size); 303 + 304 + switch (size) { 305 + case 1: 306 + return data[0]; 307 + case 2: 308 + return le16_to_cpup((void *)data); 309 + case 4: 310 + return le32_to_cpup((void *)data); 311 + #ifdef CONFIG_64BIT 312 + case 8: 313 + return le64_to_cpup((void *)data); 314 + #endif 315 + default: 316 + return ULONG_MAX; 317 + } 318 + } 319 + 320 + static void virtio_pcidev_bar_copy_to(struct um_pci_device *pdev, 321 + int bar, unsigned int offset, 322 + const void *buffer, int size) 323 + { 324 + struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev); 325 + struct virtio_pcidev_msg hdr = { 326 + .op = VIRTIO_PCIDEV_OP_MMIO_WRITE, 327 + .bar = bar, 328 + .size = size, 329 + .addr = offset, 330 + }; 331 + 332 + virtio_pcidev_send_cmd(dev, &hdr, sizeof(hdr), buffer, size, NULL, 0); 333 + } 334 + 335 + static void virtio_pcidev_bar_write(struct um_pci_device *pdev, int bar, 336 + unsigned int offset, int size, 337 + unsigned long val) 338 + { 339 + /* maximum size - we may only use parts of it */ 340 + u8 data[8]; 341 + 342 + /* size has been checked in um_pci_bar_write() */ 343 + switch (size) { 344 + case 1: 345 + data[0] = (u8)val; 346 + break; 347 + case 2: 348 + put_unaligned_le16(val, (void *)data); 349 + break; 350 + case 4: 351 + put_unaligned_le32(val, (void *)data); 352 + break; 353 + #ifdef CONFIG_64BIT 354 + case 8: 355 + put_unaligned_le64(val, (void *)data); 356 + break; 357 + #endif 358 + } 359 + 360 + virtio_pcidev_bar_copy_to(pdev, bar, offset, data, size); 361 + } 362 + 363 + static void virtio_pcidev_bar_set(struct um_pci_device *pdev, int bar, 364 + unsigned int offset, u8 value, int size) 365 + { 366 + struct virtio_pcidev_device *dev = to_virtio_pcidev(pdev); 367 + struct { 368 + struct virtio_pcidev_msg hdr; 369 + u8 data; 370 + } msg = { 371 + .hdr = { 372 + .op = VIRTIO_PCIDEV_OP_CFG_WRITE, 373 + .bar = bar, 374 + .size = size, 375 + .addr = offset, 376 + }, 377 + .data = value, 378 + }; 379 + 380 + virtio_pcidev_send_cmd(dev, &msg.hdr, sizeof(msg), NULL, 0, NULL, 0); 381 + } 382 + 383 + static const struct um_pci_ops virtio_pcidev_um_pci_ops = { 384 + .cfgspace_read = virtio_pcidev_cfgspace_read, 385 + .cfgspace_write = virtio_pcidev_cfgspace_write, 386 + .bar_read = virtio_pcidev_bar_read, 387 + .bar_write = virtio_pcidev_bar_write, 388 + .bar_copy_from = virtio_pcidev_bar_copy_from, 389 + .bar_copy_to = virtio_pcidev_bar_copy_to, 390 + .bar_set = virtio_pcidev_bar_set, 391 + }; 392 + 393 + static void virtio_pcidev_irq_vq_addbuf(struct virtqueue *vq, void *buf, bool kick) 394 + { 395 + struct scatterlist sg[1]; 396 + 397 + sg_init_one(sg, buf, MAX_IRQ_MSG_SIZE); 398 + if (virtqueue_add_inbuf(vq, sg, 1, buf, GFP_ATOMIC)) 399 + kfree(buf); 400 + else if (kick) 401 + virtqueue_kick(vq); 402 + } 403 + 404 + static void virtio_pcidev_handle_irq_message(struct virtqueue *vq, 405 + struct virtio_pcidev_msg *msg) 406 + { 407 + struct virtio_device *vdev = vq->vdev; 408 + struct virtio_pcidev_device *dev = vdev->priv; 409 + 410 + if (!dev->pdev.irq) 411 + return; 412 + 413 + /* we should properly chain interrupts, but on ARCH=um we don't care */ 414 + 415 + switch (msg->op) { 416 + case VIRTIO_PCIDEV_OP_INT: 417 + generic_handle_irq(dev->pdev.irq); 418 + break; 419 + case VIRTIO_PCIDEV_OP_MSI: 420 + /* our MSI message is just the interrupt number */ 421 + if (msg->size == sizeof(u32)) 422 + generic_handle_irq(le32_to_cpup((void *)msg->data)); 423 + else 424 + generic_handle_irq(le16_to_cpup((void *)msg->data)); 425 + break; 426 + case VIRTIO_PCIDEV_OP_PME: 427 + /* nothing to do - we already woke up due to the message */ 428 + break; 429 + default: 430 + dev_err(&vdev->dev, "unexpected virt-pci message %d\n", msg->op); 431 + break; 432 + } 433 + } 434 + 435 + static void virtio_pcidev_cmd_vq_cb(struct virtqueue *vq) 436 + { 437 + struct virtio_device *vdev = vq->vdev; 438 + struct virtio_pcidev_device *dev = vdev->priv; 439 + void *cmd; 440 + int len; 441 + 442 + if (test_bit(UM_PCI_STAT_WAITING, &dev->status)) 443 + return; 444 + 445 + while ((cmd = virtqueue_get_buf(vq, &len))) 446 + virtio_pcidev_free_buf(dev, cmd); 447 + } 448 + 449 + static void virtio_pcidev_irq_vq_cb(struct virtqueue *vq) 450 + { 451 + struct virtio_pcidev_msg *msg; 452 + int len; 453 + 454 + while ((msg = virtqueue_get_buf(vq, &len))) { 455 + if (len >= sizeof(*msg)) 456 + virtio_pcidev_handle_irq_message(vq, msg); 457 + 458 + /* recycle the message buffer */ 459 + virtio_pcidev_irq_vq_addbuf(vq, msg, true); 460 + } 461 + } 462 + 463 + static int virtio_pcidev_init_vqs(struct virtio_pcidev_device *dev) 464 + { 465 + struct virtqueue_info vqs_info[] = { 466 + { "cmd", virtio_pcidev_cmd_vq_cb }, 467 + { "irq", virtio_pcidev_irq_vq_cb }, 468 + }; 469 + struct virtqueue *vqs[2]; 470 + int err, i; 471 + 472 + err = virtio_find_vqs(dev->vdev, 2, vqs, vqs_info, NULL); 473 + if (err) 474 + return err; 475 + 476 + dev->cmd_vq = vqs[0]; 477 + dev->irq_vq = vqs[1]; 478 + 479 + virtio_device_ready(dev->vdev); 480 + 481 + for (i = 0; i < NUM_IRQ_MSGS; i++) { 482 + void *msg = kzalloc(MAX_IRQ_MSG_SIZE, GFP_KERNEL); 483 + 484 + if (msg) 485 + virtio_pcidev_irq_vq_addbuf(dev->irq_vq, msg, false); 486 + } 487 + 488 + virtqueue_kick(dev->irq_vq); 489 + 490 + return 0; 491 + } 492 + 493 + static void __virtio_pcidev_virtio_platform_remove(struct virtio_device *vdev, 494 + struct virtio_pcidev_device *dev) 495 + { 496 + um_pci_platform_device_unregister(&dev->pdev); 497 + 498 + virtio_reset_device(vdev); 499 + vdev->config->del_vqs(vdev); 500 + 501 + kfree(dev); 502 + } 503 + 504 + static int virtio_pcidev_virtio_platform_probe(struct virtio_device *vdev, 505 + struct virtio_pcidev_device *dev) 506 + { 507 + int err; 508 + 509 + dev->platform = true; 510 + 511 + err = virtio_pcidev_init_vqs(dev); 512 + if (err) 513 + goto err_free; 514 + 515 + err = um_pci_platform_device_register(&dev->pdev); 516 + if (err) 517 + goto err_reset; 518 + 519 + err = of_platform_default_populate(vdev->dev.of_node, NULL, &vdev->dev); 520 + if (err) 521 + goto err_unregister; 522 + 523 + return 0; 524 + 525 + err_unregister: 526 + um_pci_platform_device_unregister(&dev->pdev); 527 + err_reset: 528 + virtio_reset_device(vdev); 529 + vdev->config->del_vqs(vdev); 530 + err_free: 531 + kfree(dev); 532 + return err; 533 + } 534 + 535 + static int virtio_pcidev_virtio_probe(struct virtio_device *vdev) 536 + { 537 + struct virtio_pcidev_device *dev; 538 + int err; 539 + 540 + dev = kzalloc(sizeof(*dev), GFP_KERNEL); 541 + if (!dev) 542 + return -ENOMEM; 543 + 544 + dev->vdev = vdev; 545 + vdev->priv = dev; 546 + 547 + dev->pdev.ops = &virtio_pcidev_um_pci_ops; 548 + 549 + if (of_device_is_compatible(vdev->dev.of_node, "simple-bus")) 550 + return virtio_pcidev_virtio_platform_probe(vdev, dev); 551 + 552 + err = virtio_pcidev_init_vqs(dev); 553 + if (err) 554 + goto err_free; 555 + 556 + err = um_pci_device_register(&dev->pdev); 557 + if (err) 558 + goto err_reset; 559 + 560 + device_set_wakeup_enable(&vdev->dev, true); 561 + 562 + /* 563 + * In order to do suspend-resume properly, don't allow VQs 564 + * to be suspended. 565 + */ 566 + virtio_uml_set_no_vq_suspend(vdev, true); 567 + 568 + return 0; 569 + 570 + err_reset: 571 + virtio_reset_device(vdev); 572 + vdev->config->del_vqs(vdev); 573 + err_free: 574 + kfree(dev); 575 + return err; 576 + } 577 + 578 + static void virtio_pcidev_virtio_remove(struct virtio_device *vdev) 579 + { 580 + struct virtio_pcidev_device *dev = vdev->priv; 581 + 582 + if (dev->platform) { 583 + of_platform_depopulate(&vdev->dev); 584 + __virtio_pcidev_virtio_platform_remove(vdev, dev); 585 + return; 586 + } 587 + 588 + device_set_wakeup_enable(&vdev->dev, false); 589 + 590 + um_pci_device_unregister(&dev->pdev); 591 + 592 + /* Stop all virtqueues */ 593 + virtio_reset_device(vdev); 594 + dev->cmd_vq = NULL; 595 + dev->irq_vq = NULL; 596 + vdev->config->del_vqs(vdev); 597 + 598 + kfree(dev); 599 + } 600 + 601 + static struct virtio_device_id id_table[] = { 602 + { CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID, VIRTIO_DEV_ANY_ID }, 603 + { 0 }, 604 + }; 605 + MODULE_DEVICE_TABLE(virtio, id_table); 606 + 607 + static struct virtio_driver virtio_pcidev_virtio_driver = { 608 + .driver.name = "virtio-pci", 609 + .id_table = id_table, 610 + .probe = virtio_pcidev_virtio_probe, 611 + .remove = virtio_pcidev_virtio_remove, 612 + }; 613 + 614 + static int __init virtio_pcidev_init(void) 615 + { 616 + if (WARN(CONFIG_UML_PCI_OVER_VIRTIO_DEVICE_ID < 0, 617 + "No virtio device ID configured for PCI - no PCI support\n")) 618 + return 0; 619 + 620 + return register_virtio_driver(&virtio_pcidev_virtio_driver); 621 + } 622 + late_initcall(virtio_pcidev_init); 623 + 624 + static void __exit virtio_pcidev_exit(void) 625 + { 626 + unregister_virtio_driver(&virtio_pcidev_virtio_driver); 627 + } 628 + module_exit(virtio_pcidev_exit);