Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virtio-pci: introduce modern device module

Signed-off-by: Jason Wang <jasowang@redhat.com>
Link: https://lore.kernel.org/r/20210104065503.199631-17-jasowang@redhat.com

Including a bugfix:

virtio: don't prompt CONFIG_VIRTIO_PCI_MODERN

Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Anders Roxell <anders.roxell@linaro.org>
Cc: Guenter Roeck <linux@roeck-us.net>
Reported-by: Naresh Kamboju <naresh.kamboju@linaro.org>
Fixes: 86b87c9d858b6 ("virtio-pci: introduce modern device module")
Signed-off-by: Jason Wang <jasowang@redhat.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Link: https://lore.kernel.org/r/20210223061905.422659-2-jasowang@redhat.com
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>

authored by

Jason Wang and committed by
Michael S. Tsirkin
fd502729 8000a6b6

+721 -643
+9
drivers/virtio/Kconfig
··· 12 12 This option is selected if the architecture may need to enforce 13 13 VIRTIO_F_ACCESS_PLATFORM 14 14 15 + config VIRTIO_PCI_LIB 16 + tristate 17 + help 18 + Modern PCI device implementation. This module implements the 19 + basic probe and control for devices which are based on modern 20 + PCI device with possible vendor specific extensions. Any 21 + module that selects this module must depend on PCI. 22 + 15 23 menuconfig VIRTIO_MENU 16 24 bool "Virtio drivers" 17 25 default y ··· 29 21 config VIRTIO_PCI 30 22 tristate "PCI driver for virtio devices" 31 23 depends on PCI 24 + select VIRTIO_PCI_LIB 32 25 select VIRTIO 33 26 help 34 27 This driver provides support for virtio based paravirtual device
+1
drivers/virtio/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 2 2 obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o 3 + obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o 3 4 obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o 4 5 obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o 5 6 virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
+1 -26
drivers/virtio/virtio_pci_common.h
··· 25 25 #include <linux/virtio_config.h> 26 26 #include <linux/virtio_ring.h> 27 27 #include <linux/virtio_pci.h> 28 + #include <linux/virtio_pci_modern.h> 28 29 #include <linux/highmem.h> 29 30 #include <linux/spinlock.h> 30 31 ··· 38 37 39 38 /* MSI-X vector (or none) */ 40 39 unsigned msix_vector; 41 - }; 42 - 43 - struct virtio_pci_modern_device { 44 - struct pci_dev *pci_dev; 45 - 46 - struct virtio_pci_common_cfg __iomem *common; 47 - /* Device-specific data (non-legacy mode) */ 48 - void __iomem *device; 49 - /* Base of vq notifications (non-legacy mode). */ 50 - void __iomem *notify_base; 51 - /* Where to read and clear interrupt */ 52 - u8 __iomem *isr; 53 - 54 - /* So we can sanity-check accesses. */ 55 - size_t notify_len; 56 - size_t device_len; 57 - 58 - /* Capability for when we need to map notifications per-vq. */ 59 - int notify_map_cap; 60 - 61 - /* Multiply queue_notify_off by this value. (non-legacy mode). */ 62 - u32 notify_offset_multiplier; 63 - 64 - int modern_bars; 65 - 66 - struct virtio_device_id id; 67 40 }; 68 41 69 42 /* Our device structure */
-617
drivers/virtio/virtio_pci_modern.c
··· 19 19 #define VIRTIO_RING_NO_LEGACY 20 20 #include "virtio_pci_common.h" 21 21 22 - /* 23 - * Type-safe wrappers for io accesses. 24 - * Use these to enforce at compile time the following spec requirement: 25 - * 26 - * The driver MUST access each field using the “natural” access 27 - * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 28 - * for 16-bit fields and 8-bit accesses for 8-bit fields. 29 - */ 30 - static inline u8 vp_ioread8(const u8 __iomem *addr) 31 - { 32 - return ioread8(addr); 33 - } 34 - static inline u16 vp_ioread16 (const __le16 __iomem *addr) 35 - { 36 - return ioread16(addr); 37 - } 38 - 39 - static inline u32 vp_ioread32(const __le32 __iomem *addr) 40 - { 41 - return ioread32(addr); 42 - } 43 - 44 - static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 45 - { 46 - iowrite8(value, addr); 47 - } 48 - 49 - static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) 50 - { 51 - iowrite16(value, addr); 52 - } 53 - 54 - static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) 55 - { 56 - iowrite32(value, addr); 57 - } 58 - 59 - static void vp_iowrite64_twopart(u64 val, 60 - __le32 __iomem *lo, __le32 __iomem *hi) 61 - { 62 - vp_iowrite32((u32)val, lo); 63 - vp_iowrite32(val >> 32, hi); 64 - } 65 - 66 - /* 67 - * vp_modern_map_capability - map a part of virtio pci capability 68 - * @mdev: the modern virtio-pci device 69 - * @off: offset of the capability 70 - * @minlen: minimal length of the capability 71 - * @align: align requirement 72 - * @start: start from the capability 73 - * @size: map size 74 - * @len: the length that is actually mapped 75 - * 76 - * Returns the io address of for the part of the capability 77 - */ 78 - void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, 79 - size_t minlen, 80 - u32 align, 81 - u32 start, u32 size, 82 - size_t *len) 83 - { 84 - struct pci_dev *dev = mdev->pci_dev; 85 - u8 bar; 86 - u32 offset, length; 87 - void __iomem *p; 88 - 89 - pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 90 - bar), 91 - &bar); 92 - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 93 - &offset); 94 - pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 95 - &length); 96 - 97 - if (length <= start) { 98 - dev_err(&dev->dev, 99 - "virtio_pci: bad capability len %u (>%u expected)\n", 100 - length, start); 101 - return NULL; 102 - } 103 - 104 - if (length - start < minlen) { 105 - dev_err(&dev->dev, 106 - "virtio_pci: bad capability len %u (>=%zu expected)\n", 107 - length, minlen); 108 - return NULL; 109 - } 110 - 111 - length -= start; 112 - 113 - if (start + offset < offset) { 114 - dev_err(&dev->dev, 115 - "virtio_pci: map wrap-around %u+%u\n", 116 - start, offset); 117 - return NULL; 118 - } 119 - 120 - offset += start; 121 - 122 - if (offset & (align - 1)) { 123 - dev_err(&dev->dev, 124 - "virtio_pci: offset %u not aligned to %u\n", 125 - offset, align); 126 - return NULL; 127 - } 128 - 129 - if (length > size) 130 - length = size; 131 - 132 - if (len) 133 - *len = length; 134 - 135 - if (minlen + offset < minlen || 136 - minlen + offset > pci_resource_len(dev, bar)) { 137 - dev_err(&dev->dev, 138 - "virtio_pci: map virtio %zu@%u " 139 - "out of range on bar %i length %lu\n", 140 - minlen, offset, 141 - bar, (unsigned long)pci_resource_len(dev, bar)); 142 - return NULL; 143 - } 144 - 145 - p = pci_iomap_range(dev, bar, offset, length); 146 - if (!p) 147 - dev_err(&dev->dev, 148 - "virtio_pci: unable to map virtio %u@%u on bar %i\n", 149 - length, offset, bar); 150 - return p; 151 - } 152 - 153 - /* 154 - * vp_modern_get_features - get features from device 155 - * @mdev: the modern virtio-pci device 156 - * 157 - * Returns the features read from the device 158 - */ 159 - static u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev) 160 - { 161 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 162 - 163 - u64 features; 164 - 165 - vp_iowrite32(0, &cfg->device_feature_select); 166 - features = vp_ioread32(&cfg->device_feature); 167 - vp_iowrite32(1, &cfg->device_feature_select); 168 - features |= ((u64)vp_ioread32(&cfg->device_feature) << 32); 169 - 170 - return features; 171 - } 172 - 173 - /* virtio config->get_features() implementation */ 174 22 static u64 vp_get_features(struct virtio_device *vdev) 175 23 { 176 24 struct virtio_pci_device *vp_dev = to_vp_device(vdev); ··· 34 186 if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) && 35 187 pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV)) 36 188 __virtio_set_bit(vdev, VIRTIO_F_SR_IOV); 37 - } 38 - 39 - /* 40 - * vp_modern_set_features - set features to device 41 - * @mdev: the modern virtio-pci device 42 - * @features: the features set to device 43 - */ 44 - static void vp_modern_set_features(struct virtio_pci_modern_device *mdev, 45 - u64 features) 46 - { 47 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 48 - 49 - vp_iowrite32(0, &cfg->guest_feature_select); 50 - vp_iowrite32((u32)features, &cfg->guest_feature); 51 - vp_iowrite32(1, &cfg->guest_feature_select); 52 - vp_iowrite32(features >> 32, &cfg->guest_feature); 53 - } 54 - 55 - /* 56 - * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue 57 - * @mdev: the modern virtio-pci device 58 - * @index: queue index 59 - * @vector: the config vector 60 - * 61 - * Returns the config vector read from the device 62 - */ 63 - static u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, 64 - u16 index, u16 vector) 65 - { 66 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 67 - 68 - vp_iowrite16(index, &cfg->queue_select); 69 - vp_iowrite16(vector, &cfg->queue_msix_vector); 70 - /* Flush the write out to device */ 71 - return vp_ioread16(&cfg->queue_msix_vector); 72 - } 73 - 74 - /* 75 - * vp_modern_queue_address - set the virtqueue address 76 - * @mdev: the modern virtio-pci device 77 - * @index: the queue index 78 - * @desc_addr: address of the descriptor area 79 - * @driver_addr: address of the driver area 80 - * @device_addr: address of the device area 81 - */ 82 - static void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, 83 - u16 index, u64 desc_addr, u64 driver_addr, 84 - u64 device_addr) 85 - { 86 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 87 - 88 - vp_iowrite16(index, &cfg->queue_select); 89 - 90 - vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo, 91 - &cfg->queue_desc_hi); 92 - vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo, 93 - &cfg->queue_avail_hi); 94 - vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo, 95 - &cfg->queue_used_hi); 96 - } 97 - 98 - /* 99 - * vp_modern_set_queue_enable - enable a virtqueue 100 - * @mdev: the modern virtio-pci device 101 - * @index: the queue index 102 - * @enable: whether the virtqueue is enable or not 103 - */ 104 - static void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, 105 - u16 index, bool enable) 106 - { 107 - vp_iowrite16(index, &mdev->common->queue_select); 108 - vp_iowrite16(enable, &mdev->common->queue_enable); 109 - } 110 - 111 - /* 112 - * vp_modern_get_queue_enable - enable a virtqueue 113 - * @mdev: the modern virtio-pci device 114 - * @index: the queue index 115 - * 116 - * Returns whether a virtqueue is enabled or not 117 - */ 118 - static bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, 119 - u16 index) 120 - { 121 - vp_iowrite16(index, &mdev->common->queue_select); 122 - 123 - return vp_ioread16(&mdev->common->queue_enable); 124 - } 125 - 126 - /* 127 - * vp_modern_set_queue_size - set size for a virtqueue 128 - * @mdev: the modern virtio-pci device 129 - * @index: the queue index 130 - * @size: the size of the virtqueue 131 - */ 132 - static void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, 133 - u16 index, u16 size) 134 - { 135 - vp_iowrite16(index, &mdev->common->queue_select); 136 - vp_iowrite16(size, &mdev->common->queue_size); 137 - 138 - } 139 - 140 - /* 141 - * vp_modern_get_queue_size - get size for a virtqueue 142 - * @mdev: the modern virtio-pci device 143 - * @index: the queue index 144 - * 145 - * Returns the size of the virtqueue 146 - */ 147 - static u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, 148 - u16 index) 149 - { 150 - vp_iowrite16(index, &mdev->common->queue_select); 151 - 152 - return vp_ioread16(&mdev->common->queue_size); 153 - 154 - } 155 - 156 - /* 157 - * vp_modern_get_num_queues - get the number of virtqueues 158 - * @mdev: the modern virtio-pci device 159 - * 160 - * Returns the number of virtqueues 161 - */ 162 - static u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev) 163 - { 164 - return vp_ioread16(&mdev->common->num_queues); 165 - } 166 - 167 - /* 168 - * vp_modern_get_queue_notify_off - get notification offset for a virtqueue 169 - * @mdev: the modern virtio-pci device 170 - * @index: the queue index 171 - * 172 - * Returns the notification offset for a virtqueue 173 - */ 174 - static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, 175 - u16 index) 176 - { 177 - vp_iowrite16(index, &mdev->common->queue_select); 178 - 179 - return vp_ioread16(&mdev->common->queue_notify_off); 180 189 } 181 190 182 191 /* virtio config->finalize_features() implementation */ ··· 134 429 } 135 430 } 136 431 137 - /* 138 - * vp_modern_generation - get the device genreation 139 - * @mdev: the modern virtio-pci device 140 - * 141 - * Returns the genreation read from device 142 - */ 143 - static u32 vp_modern_generation(struct virtio_pci_modern_device *mdev) 144 - { 145 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 146 - 147 - return vp_ioread8(&cfg->config_generation); 148 - } 149 - 150 432 static u32 vp_generation(struct virtio_device *vdev) 151 433 { 152 434 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 153 435 154 436 return vp_modern_generation(&vp_dev->mdev); 155 - } 156 - 157 - /* 158 - * vp_modern_get_status - get the device status 159 - * @mdev: the modern virtio-pci device 160 - * 161 - * Returns the status read from device 162 - */ 163 - static u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev) 164 - { 165 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 166 - 167 - return vp_ioread8(&cfg->device_status); 168 437 } 169 438 170 439 /* config->{get,set}_status() implementations */ ··· 147 468 struct virtio_pci_device *vp_dev = to_vp_device(vdev); 148 469 149 470 return vp_modern_get_status(&vp_dev->mdev); 150 - } 151 - 152 - /* 153 - * vp_modern_set_status - set status to device 154 - * @mdev: the modern virtio-pci device 155 - * @status: the status set to device 156 - */ 157 - static void vp_modern_set_status(struct virtio_pci_modern_device *mdev, 158 - u8 status) 159 - { 160 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 161 - 162 - vp_iowrite8(status, &cfg->device_status); 163 471 } 164 472 165 473 static void vp_set_status(struct virtio_device *vdev, u8 status) ··· 174 508 msleep(1); 175 509 /* Flush pending VQ/configuration callbacks. */ 176 510 vp_synchronize_vectors(vdev); 177 - } 178 - 179 - /* 180 - * vp_modern_config_vector - set the vector for config interrupt 181 - * @mdev: the modern virtio-pci device 182 - * @vector: the config vector 183 - * 184 - * Returns the config vector read from the device 185 - */ 186 - static u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, 187 - u16 vector) 188 - { 189 - struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 190 - 191 - /* Setup the vector used for configuration events */ 192 - vp_iowrite16(vector, &cfg->msix_config); 193 - /* Verify we had enough resources to assign the vector */ 194 - /* Will also flush the write out to device */ 195 - return vp_ioread16(&cfg->msix_config); 196 511 } 197 512 198 513 static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) ··· 436 789 .get_shm_region = vp_get_shm_region, 437 790 }; 438 791 439 - /** 440 - * virtio_pci_find_capability - walk capabilities to find device info. 441 - * @dev: the pci device 442 - * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 443 - * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 444 - * @bars: the bitmask of BARs 445 - * 446 - * Returns offset of the capability, or 0. 447 - */ 448 - static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 449 - u32 ioresource_types, int *bars) 450 - { 451 - int pos; 452 - 453 - for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 454 - pos > 0; 455 - pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 456 - u8 type, bar; 457 - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 458 - cfg_type), 459 - &type); 460 - pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 461 - bar), 462 - &bar); 463 - 464 - /* Ignore structures with reserved BAR values */ 465 - if (bar > 0x5) 466 - continue; 467 - 468 - if (type == cfg_type) { 469 - if (pci_resource_len(dev, bar) && 470 - pci_resource_flags(dev, bar) & ioresource_types) { 471 - *bars |= (1 << bar); 472 - return pos; 473 - } 474 - } 475 - } 476 - return 0; 477 - } 478 - 479 - /* This is part of the ABI. Don't screw with it. */ 480 - static inline void check_offsets(void) 481 - { 482 - /* Note: disk space was harmed in compilation of this function. */ 483 - BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 484 - offsetof(struct virtio_pci_cap, cap_vndr)); 485 - BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 486 - offsetof(struct virtio_pci_cap, cap_next)); 487 - BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 488 - offsetof(struct virtio_pci_cap, cap_len)); 489 - BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 490 - offsetof(struct virtio_pci_cap, cfg_type)); 491 - BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 492 - offsetof(struct virtio_pci_cap, bar)); 493 - BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 494 - offsetof(struct virtio_pci_cap, offset)); 495 - BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 496 - offsetof(struct virtio_pci_cap, length)); 497 - BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 498 - offsetof(struct virtio_pci_notify_cap, 499 - notify_off_multiplier)); 500 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 501 - offsetof(struct virtio_pci_common_cfg, 502 - device_feature_select)); 503 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 504 - offsetof(struct virtio_pci_common_cfg, device_feature)); 505 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 506 - offsetof(struct virtio_pci_common_cfg, 507 - guest_feature_select)); 508 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 509 - offsetof(struct virtio_pci_common_cfg, guest_feature)); 510 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 511 - offsetof(struct virtio_pci_common_cfg, msix_config)); 512 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 513 - offsetof(struct virtio_pci_common_cfg, num_queues)); 514 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 515 - offsetof(struct virtio_pci_common_cfg, device_status)); 516 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 517 - offsetof(struct virtio_pci_common_cfg, config_generation)); 518 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 519 - offsetof(struct virtio_pci_common_cfg, queue_select)); 520 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 521 - offsetof(struct virtio_pci_common_cfg, queue_size)); 522 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 523 - offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 524 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 525 - offsetof(struct virtio_pci_common_cfg, queue_enable)); 526 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 527 - offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 528 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 529 - offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 530 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 531 - offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 532 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 533 - offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 534 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 535 - offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 536 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 537 - offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 538 - BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 539 - offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 540 - } 541 - 542 - /* 543 - * vp_modern_probe: probe the modern virtio pci device, note that the 544 - * caller is required to enable PCI device before calling this function. 545 - * @mdev: the modern virtio-pci device 546 - * 547 - * Return 0 on succeed otherwise fail 548 - */ 549 - static int vp_modern_probe(struct virtio_pci_modern_device *mdev) 550 - { 551 - struct pci_dev *pci_dev = mdev->pci_dev; 552 - int err, common, isr, notify, device; 553 - u32 notify_length; 554 - u32 notify_offset; 555 - 556 - check_offsets(); 557 - 558 - mdev->pci_dev = pci_dev; 559 - 560 - /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 561 - if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 562 - return -ENODEV; 563 - 564 - if (pci_dev->device < 0x1040) { 565 - /* Transitional devices: use the PCI subsystem device id as 566 - * virtio device id, same as legacy driver always did. 567 - */ 568 - mdev->id.device = pci_dev->subsystem_device; 569 - } else { 570 - /* Modern devices: simply use PCI device id, but start from 0x1040. */ 571 - mdev->id.device = pci_dev->device - 0x1040; 572 - } 573 - mdev->id.vendor = pci_dev->subsystem_vendor; 574 - 575 - /* check for a common config: if not, use legacy mode (bar 0). */ 576 - common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 577 - IORESOURCE_IO | IORESOURCE_MEM, 578 - &mdev->modern_bars); 579 - if (!common) { 580 - dev_info(&pci_dev->dev, 581 - "virtio_pci: leaving for legacy driver\n"); 582 - return -ENODEV; 583 - } 584 - 585 - /* If common is there, these should be too... */ 586 - isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 587 - IORESOURCE_IO | IORESOURCE_MEM, 588 - &mdev->modern_bars); 589 - notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 590 - IORESOURCE_IO | IORESOURCE_MEM, 591 - &mdev->modern_bars); 592 - if (!isr || !notify) { 593 - dev_err(&pci_dev->dev, 594 - "virtio_pci: missing capabilities %i/%i/%i\n", 595 - common, isr, notify); 596 - return -EINVAL; 597 - } 598 - 599 - err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 600 - if (err) 601 - err = dma_set_mask_and_coherent(&pci_dev->dev, 602 - DMA_BIT_MASK(32)); 603 - if (err) 604 - dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 605 - 606 - /* Device capability is only mandatory for devices that have 607 - * device-specific configuration. 608 - */ 609 - device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 610 - IORESOURCE_IO | IORESOURCE_MEM, 611 - &mdev->modern_bars); 612 - 613 - err = pci_request_selected_regions(pci_dev, mdev->modern_bars, 614 - "virtio-pci-modern"); 615 - if (err) 616 - return err; 617 - 618 - err = -EINVAL; 619 - mdev->common = vp_modern_map_capability(mdev, common, 620 - sizeof(struct virtio_pci_common_cfg), 4, 621 - 0, sizeof(struct virtio_pci_common_cfg), 622 - NULL); 623 - if (!mdev->common) 624 - goto err_map_common; 625 - mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, 626 - 0, 1, 627 - NULL); 628 - if (!mdev->isr) 629 - goto err_map_isr; 630 - 631 - /* Read notify_off_multiplier from config space. */ 632 - pci_read_config_dword(pci_dev, 633 - notify + offsetof(struct virtio_pci_notify_cap, 634 - notify_off_multiplier), 635 - &mdev->notify_offset_multiplier); 636 - /* Read notify length and offset from config space. */ 637 - pci_read_config_dword(pci_dev, 638 - notify + offsetof(struct virtio_pci_notify_cap, 639 - cap.length), 640 - &notify_length); 641 - 642 - pci_read_config_dword(pci_dev, 643 - notify + offsetof(struct virtio_pci_notify_cap, 644 - cap.offset), 645 - &notify_offset); 646 - 647 - /* We don't know how many VQs we'll map, ahead of the time. 648 - * If notify length is small, map it all now. 649 - * Otherwise, map each VQ individually later. 650 - */ 651 - if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 652 - mdev->notify_base = vp_modern_map_capability(mdev, notify, 653 - 2, 2, 654 - 0, notify_length, 655 - &mdev->notify_len); 656 - if (!mdev->notify_base) 657 - goto err_map_notify; 658 - } else { 659 - mdev->notify_map_cap = notify; 660 - } 661 - 662 - /* Again, we don't know how much we should map, but PAGE_SIZE 663 - * is more than enough for all existing devices. 664 - */ 665 - if (device) { 666 - mdev->device = vp_modern_map_capability(mdev, device, 0, 4, 667 - 0, PAGE_SIZE, 668 - &mdev->device_len); 669 - if (!mdev->device) 670 - goto err_map_device; 671 - } 672 - 673 - return 0; 674 - 675 - err_map_device: 676 - if (mdev->notify_base) 677 - pci_iounmap(pci_dev, mdev->notify_base); 678 - err_map_notify: 679 - pci_iounmap(pci_dev, mdev->isr); 680 - err_map_isr: 681 - pci_iounmap(pci_dev, mdev->common); 682 - err_map_common: 683 - return err; 684 - } 685 - 686 792 /* the PCI probing function */ 687 793 int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) 688 794 { ··· 461 1061 vp_dev->vdev.id = mdev->id; 462 1062 463 1063 return 0; 464 - } 465 - 466 - /* 467 - * vp_modern_probe: remove and cleanup the modern virtio pci device 468 - * @mdev: the modern virtio-pci device 469 - */ 470 - static void vp_modern_remove(struct virtio_pci_modern_device *mdev) 471 - { 472 - struct pci_dev *pci_dev = mdev->pci_dev; 473 - 474 - if (mdev->device) 475 - pci_iounmap(pci_dev, mdev->device); 476 - if (mdev->notify_base) 477 - pci_iounmap(pci_dev, mdev->notify_base); 478 - pci_iounmap(pci_dev, mdev->isr); 479 - pci_iounmap(pci_dev, mdev->common); 480 - pci_release_selected_regions(pci_dev, mdev->modern_bars); 481 1064 } 482 1065 483 1066 void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev)
+599
drivers/virtio/virtio_pci_modern_dev.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + 3 + #include <linux/virtio_pci_modern.h> 4 + #include <linux/module.h> 5 + #include <linux/pci.h> 6 + 7 + /* 8 + * vp_modern_map_capability - map a part of virtio pci capability 9 + * @mdev: the modern virtio-pci device 10 + * @off: offset of the capability 11 + * @minlen: minimal length of the capability 12 + * @align: align requirement 13 + * @start: start from the capability 14 + * @size: map size 15 + * @len: the length that is actually mapped 16 + * 17 + * Returns the io address of for the part of the capability 18 + */ 19 + void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, 20 + size_t minlen, 21 + u32 align, 22 + u32 start, u32 size, 23 + size_t *len) 24 + { 25 + struct pci_dev *dev = mdev->pci_dev; 26 + u8 bar; 27 + u32 offset, length; 28 + void __iomem *p; 29 + 30 + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, 31 + bar), 32 + &bar); 33 + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), 34 + &offset); 35 + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), 36 + &length); 37 + 38 + if (length <= start) { 39 + dev_err(&dev->dev, 40 + "virtio_pci: bad capability len %u (>%u expected)\n", 41 + length, start); 42 + return NULL; 43 + } 44 + 45 + if (length - start < minlen) { 46 + dev_err(&dev->dev, 47 + "virtio_pci: bad capability len %u (>=%zu expected)\n", 48 + length, minlen); 49 + return NULL; 50 + } 51 + 52 + length -= start; 53 + 54 + if (start + offset < offset) { 55 + dev_err(&dev->dev, 56 + "virtio_pci: map wrap-around %u+%u\n", 57 + start, offset); 58 + return NULL; 59 + } 60 + 61 + offset += start; 62 + 63 + if (offset & (align - 1)) { 64 + dev_err(&dev->dev, 65 + "virtio_pci: offset %u not aligned to %u\n", 66 + offset, align); 67 + return NULL; 68 + } 69 + 70 + if (length > size) 71 + length = size; 72 + 73 + if (len) 74 + *len = length; 75 + 76 + if (minlen + offset < minlen || 77 + minlen + offset > pci_resource_len(dev, bar)) { 78 + dev_err(&dev->dev, 79 + "virtio_pci: map virtio %zu@%u " 80 + "out of range on bar %i length %lu\n", 81 + minlen, offset, 82 + bar, (unsigned long)pci_resource_len(dev, bar)); 83 + return NULL; 84 + } 85 + 86 + p = pci_iomap_range(dev, bar, offset, length); 87 + if (!p) 88 + dev_err(&dev->dev, 89 + "virtio_pci: unable to map virtio %u@%u on bar %i\n", 90 + length, offset, bar); 91 + return p; 92 + } 93 + EXPORT_SYMBOL_GPL(vp_modern_map_capability); 94 + 95 + /** 96 + * virtio_pci_find_capability - walk capabilities to find device info. 97 + * @dev: the pci device 98 + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek 99 + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. 100 + * @bars: the bitmask of BARs 101 + * 102 + * Returns offset of the capability, or 0. 103 + */ 104 + static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, 105 + u32 ioresource_types, int *bars) 106 + { 107 + int pos; 108 + 109 + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); 110 + pos > 0; 111 + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { 112 + u8 type, bar; 113 + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 114 + cfg_type), 115 + &type); 116 + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, 117 + bar), 118 + &bar); 119 + 120 + /* Ignore structures with reserved BAR values */ 121 + if (bar > 0x5) 122 + continue; 123 + 124 + if (type == cfg_type) { 125 + if (pci_resource_len(dev, bar) && 126 + pci_resource_flags(dev, bar) & ioresource_types) { 127 + *bars |= (1 << bar); 128 + return pos; 129 + } 130 + } 131 + } 132 + return 0; 133 + } 134 + 135 + /* This is part of the ABI. Don't screw with it. */ 136 + static inline void check_offsets(void) 137 + { 138 + /* Note: disk space was harmed in compilation of this function. */ 139 + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != 140 + offsetof(struct virtio_pci_cap, cap_vndr)); 141 + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != 142 + offsetof(struct virtio_pci_cap, cap_next)); 143 + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != 144 + offsetof(struct virtio_pci_cap, cap_len)); 145 + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != 146 + offsetof(struct virtio_pci_cap, cfg_type)); 147 + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != 148 + offsetof(struct virtio_pci_cap, bar)); 149 + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != 150 + offsetof(struct virtio_pci_cap, offset)); 151 + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != 152 + offsetof(struct virtio_pci_cap, length)); 153 + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != 154 + offsetof(struct virtio_pci_notify_cap, 155 + notify_off_multiplier)); 156 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != 157 + offsetof(struct virtio_pci_common_cfg, 158 + device_feature_select)); 159 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != 160 + offsetof(struct virtio_pci_common_cfg, device_feature)); 161 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != 162 + offsetof(struct virtio_pci_common_cfg, 163 + guest_feature_select)); 164 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != 165 + offsetof(struct virtio_pci_common_cfg, guest_feature)); 166 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != 167 + offsetof(struct virtio_pci_common_cfg, msix_config)); 168 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != 169 + offsetof(struct virtio_pci_common_cfg, num_queues)); 170 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != 171 + offsetof(struct virtio_pci_common_cfg, device_status)); 172 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != 173 + offsetof(struct virtio_pci_common_cfg, config_generation)); 174 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != 175 + offsetof(struct virtio_pci_common_cfg, queue_select)); 176 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != 177 + offsetof(struct virtio_pci_common_cfg, queue_size)); 178 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != 179 + offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); 180 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != 181 + offsetof(struct virtio_pci_common_cfg, queue_enable)); 182 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != 183 + offsetof(struct virtio_pci_common_cfg, queue_notify_off)); 184 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != 185 + offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); 186 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != 187 + offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); 188 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != 189 + offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); 190 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != 191 + offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); 192 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != 193 + offsetof(struct virtio_pci_common_cfg, queue_used_lo)); 194 + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != 195 + offsetof(struct virtio_pci_common_cfg, queue_used_hi)); 196 + } 197 + 198 + /* 199 + * vp_modern_probe: probe the modern virtio pci device, note that the 200 + * caller is required to enable PCI device before calling this function. 201 + * @mdev: the modern virtio-pci device 202 + * 203 + * Return 0 on succeed otherwise fail 204 + */ 205 + int vp_modern_probe(struct virtio_pci_modern_device *mdev) 206 + { 207 + struct pci_dev *pci_dev = mdev->pci_dev; 208 + int err, common, isr, notify, device; 209 + u32 notify_length; 210 + u32 notify_offset; 211 + 212 + check_offsets(); 213 + 214 + mdev->pci_dev = pci_dev; 215 + 216 + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ 217 + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) 218 + return -ENODEV; 219 + 220 + if (pci_dev->device < 0x1040) { 221 + /* Transitional devices: use the PCI subsystem device id as 222 + * virtio device id, same as legacy driver always did. 223 + */ 224 + mdev->id.device = pci_dev->subsystem_device; 225 + } else { 226 + /* Modern devices: simply use PCI device id, but start from 0x1040. */ 227 + mdev->id.device = pci_dev->device - 0x1040; 228 + } 229 + mdev->id.vendor = pci_dev->subsystem_vendor; 230 + 231 + /* check for a common config: if not, use legacy mode (bar 0). */ 232 + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, 233 + IORESOURCE_IO | IORESOURCE_MEM, 234 + &mdev->modern_bars); 235 + if (!common) { 236 + dev_info(&pci_dev->dev, 237 + "virtio_pci: leaving for legacy driver\n"); 238 + return -ENODEV; 239 + } 240 + 241 + /* If common is there, these should be too... */ 242 + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, 243 + IORESOURCE_IO | IORESOURCE_MEM, 244 + &mdev->modern_bars); 245 + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, 246 + IORESOURCE_IO | IORESOURCE_MEM, 247 + &mdev->modern_bars); 248 + if (!isr || !notify) { 249 + dev_err(&pci_dev->dev, 250 + "virtio_pci: missing capabilities %i/%i/%i\n", 251 + common, isr, notify); 252 + return -EINVAL; 253 + } 254 + 255 + err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)); 256 + if (err) 257 + err = dma_set_mask_and_coherent(&pci_dev->dev, 258 + DMA_BIT_MASK(32)); 259 + if (err) 260 + dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n"); 261 + 262 + /* Device capability is only mandatory for devices that have 263 + * device-specific configuration. 264 + */ 265 + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, 266 + IORESOURCE_IO | IORESOURCE_MEM, 267 + &mdev->modern_bars); 268 + 269 + err = pci_request_selected_regions(pci_dev, mdev->modern_bars, 270 + "virtio-pci-modern"); 271 + if (err) 272 + return err; 273 + 274 + err = -EINVAL; 275 + mdev->common = vp_modern_map_capability(mdev, common, 276 + sizeof(struct virtio_pci_common_cfg), 4, 277 + 0, sizeof(struct virtio_pci_common_cfg), 278 + NULL); 279 + if (!mdev->common) 280 + goto err_map_common; 281 + mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1, 282 + 0, 1, 283 + NULL); 284 + if (!mdev->isr) 285 + goto err_map_isr; 286 + 287 + /* Read notify_off_multiplier from config space. */ 288 + pci_read_config_dword(pci_dev, 289 + notify + offsetof(struct virtio_pci_notify_cap, 290 + notify_off_multiplier), 291 + &mdev->notify_offset_multiplier); 292 + /* Read notify length and offset from config space. */ 293 + pci_read_config_dword(pci_dev, 294 + notify + offsetof(struct virtio_pci_notify_cap, 295 + cap.length), 296 + &notify_length); 297 + 298 + pci_read_config_dword(pci_dev, 299 + notify + offsetof(struct virtio_pci_notify_cap, 300 + cap.offset), 301 + &notify_offset); 302 + 303 + /* We don't know how many VQs we'll map, ahead of the time. 304 + * If notify length is small, map it all now. 305 + * Otherwise, map each VQ individually later. 306 + */ 307 + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { 308 + mdev->notify_base = vp_modern_map_capability(mdev, notify, 309 + 2, 2, 310 + 0, notify_length, 311 + &mdev->notify_len); 312 + if (!mdev->notify_base) 313 + goto err_map_notify; 314 + } else { 315 + mdev->notify_map_cap = notify; 316 + } 317 + 318 + /* Again, we don't know how much we should map, but PAGE_SIZE 319 + * is more than enough for all existing devices. 320 + */ 321 + if (device) { 322 + mdev->device = vp_modern_map_capability(mdev, device, 0, 4, 323 + 0, PAGE_SIZE, 324 + &mdev->device_len); 325 + if (!mdev->device) 326 + goto err_map_device; 327 + } 328 + 329 + return 0; 330 + 331 + err_map_device: 332 + if (mdev->notify_base) 333 + pci_iounmap(pci_dev, mdev->notify_base); 334 + err_map_notify: 335 + pci_iounmap(pci_dev, mdev->isr); 336 + err_map_isr: 337 + pci_iounmap(pci_dev, mdev->common); 338 + err_map_common: 339 + return err; 340 + } 341 + EXPORT_SYMBOL_GPL(vp_modern_probe); 342 + 343 + /* 344 + * vp_modern_probe: remove and cleanup the modern virtio pci device 345 + * @mdev: the modern virtio-pci device 346 + */ 347 + void vp_modern_remove(struct virtio_pci_modern_device *mdev) 348 + { 349 + struct pci_dev *pci_dev = mdev->pci_dev; 350 + 351 + if (mdev->device) 352 + pci_iounmap(pci_dev, mdev->device); 353 + if (mdev->notify_base) 354 + pci_iounmap(pci_dev, mdev->notify_base); 355 + pci_iounmap(pci_dev, mdev->isr); 356 + pci_iounmap(pci_dev, mdev->common); 357 + pci_release_selected_regions(pci_dev, mdev->modern_bars); 358 + } 359 + EXPORT_SYMBOL_GPL(vp_modern_remove); 360 + 361 + /* 362 + * vp_modern_get_features - get features from device 363 + * @mdev: the modern virtio-pci device 364 + * 365 + * Returns the features read from the device 366 + */ 367 + u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev) 368 + { 369 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 370 + 371 + u64 features; 372 + 373 + vp_iowrite32(0, &cfg->device_feature_select); 374 + features = vp_ioread32(&cfg->device_feature); 375 + vp_iowrite32(1, &cfg->device_feature_select); 376 + features |= ((u64)vp_ioread32(&cfg->device_feature) << 32); 377 + 378 + return features; 379 + } 380 + EXPORT_SYMBOL_GPL(vp_modern_get_features); 381 + 382 + /* 383 + * vp_modern_set_features - set features to device 384 + * @mdev: the modern virtio-pci device 385 + * @features: the features set to device 386 + */ 387 + void vp_modern_set_features(struct virtio_pci_modern_device *mdev, 388 + u64 features) 389 + { 390 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 391 + 392 + vp_iowrite32(0, &cfg->guest_feature_select); 393 + vp_iowrite32((u32)features, &cfg->guest_feature); 394 + vp_iowrite32(1, &cfg->guest_feature_select); 395 + vp_iowrite32(features >> 32, &cfg->guest_feature); 396 + } 397 + EXPORT_SYMBOL_GPL(vp_modern_set_features); 398 + 399 + /* 400 + * vp_modern_generation - get the device genreation 401 + * @mdev: the modern virtio-pci device 402 + * 403 + * Returns the genreation read from device 404 + */ 405 + u32 vp_modern_generation(struct virtio_pci_modern_device *mdev) 406 + { 407 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 408 + 409 + return vp_ioread8(&cfg->config_generation); 410 + } 411 + EXPORT_SYMBOL_GPL(vp_modern_generation); 412 + 413 + /* 414 + * vp_modern_get_status - get the device status 415 + * @mdev: the modern virtio-pci device 416 + * 417 + * Returns the status read from device 418 + */ 419 + u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev) 420 + { 421 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 422 + 423 + return vp_ioread8(&cfg->device_status); 424 + } 425 + EXPORT_SYMBOL_GPL(vp_modern_get_status); 426 + 427 + /* 428 + * vp_modern_set_status - set status to device 429 + * @mdev: the modern virtio-pci device 430 + * @status: the status set to device 431 + */ 432 + void vp_modern_set_status(struct virtio_pci_modern_device *mdev, 433 + u8 status) 434 + { 435 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 436 + 437 + vp_iowrite8(status, &cfg->device_status); 438 + } 439 + EXPORT_SYMBOL_GPL(vp_modern_set_status); 440 + 441 + /* 442 + * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue 443 + * @mdev: the modern virtio-pci device 444 + * @index: queue index 445 + * @vector: the config vector 446 + * 447 + * Returns the config vector read from the device 448 + */ 449 + u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, 450 + u16 index, u16 vector) 451 + { 452 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 453 + 454 + vp_iowrite16(index, &cfg->queue_select); 455 + vp_iowrite16(vector, &cfg->queue_msix_vector); 456 + /* Flush the write out to device */ 457 + return vp_ioread16(&cfg->queue_msix_vector); 458 + } 459 + EXPORT_SYMBOL_GPL(vp_modern_queue_vector); 460 + 461 + /* 462 + * vp_modern_config_vector - set the vector for config interrupt 463 + * @mdev: the modern virtio-pci device 464 + * @vector: the config vector 465 + * 466 + * Returns the config vector read from the device 467 + */ 468 + u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, 469 + u16 vector) 470 + { 471 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 472 + 473 + /* Setup the vector used for configuration events */ 474 + vp_iowrite16(vector, &cfg->msix_config); 475 + /* Verify we had enough resources to assign the vector */ 476 + /* Will also flush the write out to device */ 477 + return vp_ioread16(&cfg->msix_config); 478 + } 479 + EXPORT_SYMBOL_GPL(vp_modern_config_vector); 480 + 481 + /* 482 + * vp_modern_queue_address - set the virtqueue address 483 + * @mdev: the modern virtio-pci device 484 + * @index: the queue index 485 + * @desc_addr: address of the descriptor area 486 + * @driver_addr: address of the driver area 487 + * @device_addr: address of the device area 488 + */ 489 + void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, 490 + u16 index, u64 desc_addr, u64 driver_addr, 491 + u64 device_addr) 492 + { 493 + struct virtio_pci_common_cfg __iomem *cfg = mdev->common; 494 + 495 + vp_iowrite16(index, &cfg->queue_select); 496 + 497 + vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo, 498 + &cfg->queue_desc_hi); 499 + vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo, 500 + &cfg->queue_avail_hi); 501 + vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo, 502 + &cfg->queue_used_hi); 503 + } 504 + EXPORT_SYMBOL_GPL(vp_modern_queue_address); 505 + 506 + /* 507 + * vp_modern_set_queue_enable - enable a virtqueue 508 + * @mdev: the modern virtio-pci device 509 + * @index: the queue index 510 + * @enable: whether the virtqueue is enable or not 511 + */ 512 + void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, 513 + u16 index, bool enable) 514 + { 515 + vp_iowrite16(index, &mdev->common->queue_select); 516 + vp_iowrite16(enable, &mdev->common->queue_enable); 517 + } 518 + EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable); 519 + 520 + /* 521 + * vp_modern_get_queue_enable - enable a virtqueue 522 + * @mdev: the modern virtio-pci device 523 + * @index: the queue index 524 + * 525 + * Returns whether a virtqueue is enabled or not 526 + */ 527 + bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, 528 + u16 index) 529 + { 530 + vp_iowrite16(index, &mdev->common->queue_select); 531 + 532 + return vp_ioread16(&mdev->common->queue_enable); 533 + } 534 + EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable); 535 + 536 + /* 537 + * vp_modern_set_queue_size - set size for a virtqueue 538 + * @mdev: the modern virtio-pci device 539 + * @index: the queue index 540 + * @size: the size of the virtqueue 541 + */ 542 + void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, 543 + u16 index, u16 size) 544 + { 545 + vp_iowrite16(index, &mdev->common->queue_select); 546 + vp_iowrite16(size, &mdev->common->queue_size); 547 + 548 + } 549 + EXPORT_SYMBOL_GPL(vp_modern_set_queue_size); 550 + 551 + /* 552 + * vp_modern_get_queue_size - get size for a virtqueue 553 + * @mdev: the modern virtio-pci device 554 + * @index: the queue index 555 + * 556 + * Returns the size of the virtqueue 557 + */ 558 + u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, 559 + u16 index) 560 + { 561 + vp_iowrite16(index, &mdev->common->queue_select); 562 + 563 + return vp_ioread16(&mdev->common->queue_size); 564 + 565 + } 566 + EXPORT_SYMBOL_GPL(vp_modern_get_queue_size); 567 + 568 + /* 569 + * vp_modern_get_num_queues - get the number of virtqueues 570 + * @mdev: the modern virtio-pci device 571 + * 572 + * Returns the number of virtqueues 573 + */ 574 + u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev) 575 + { 576 + return vp_ioread16(&mdev->common->num_queues); 577 + } 578 + EXPORT_SYMBOL_GPL(vp_modern_get_num_queues); 579 + 580 + /* 581 + * vp_modern_get_queue_notify_off - get notification offset for a virtqueue 582 + * @mdev: the modern virtio-pci device 583 + * @index: the queue index 584 + * 585 + * Returns the notification offset for a virtqueue 586 + */ 587 + u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, 588 + u16 index) 589 + { 590 + vp_iowrite16(index, &mdev->common->queue_select); 591 + 592 + return vp_ioread16(&mdev->common->queue_notify_off); 593 + } 594 + EXPORT_SYMBOL_GPL(vp_modern_get_queue_notify_off); 595 + 596 + MODULE_VERSION("0.1"); 597 + MODULE_DESCRIPTION("Modern Virtio PCI Device"); 598 + MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); 599 + MODULE_LICENSE("GPL");
+111
include/linux/virtio_pci_modern.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + #ifndef _LINUX_VIRTIO_PCI_MODERN_H 3 + #define _LINUX_VIRTIO_PCI_MODERN_H 4 + 5 + #include <linux/pci.h> 6 + #include <linux/virtio_pci.h> 7 + 8 + struct virtio_pci_modern_device { 9 + struct pci_dev *pci_dev; 10 + 11 + struct virtio_pci_common_cfg __iomem *common; 12 + /* Device-specific data (non-legacy mode) */ 13 + void __iomem *device; 14 + /* Base of vq notifications (non-legacy mode). */ 15 + void __iomem *notify_base; 16 + /* Where to read and clear interrupt */ 17 + u8 __iomem *isr; 18 + 19 + /* So we can sanity-check accesses. */ 20 + size_t notify_len; 21 + size_t device_len; 22 + 23 + /* Capability for when we need to map notifications per-vq. */ 24 + int notify_map_cap; 25 + 26 + /* Multiply queue_notify_off by this value. (non-legacy mode). */ 27 + u32 notify_offset_multiplier; 28 + 29 + int modern_bars; 30 + 31 + struct virtio_device_id id; 32 + }; 33 + 34 + /* 35 + * Type-safe wrappers for io accesses. 36 + * Use these to enforce at compile time the following spec requirement: 37 + * 38 + * The driver MUST access each field using the “natural” access 39 + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses 40 + * for 16-bit fields and 8-bit accesses for 8-bit fields. 41 + */ 42 + static inline u8 vp_ioread8(const u8 __iomem *addr) 43 + { 44 + return ioread8(addr); 45 + } 46 + static inline u16 vp_ioread16 (const __le16 __iomem *addr) 47 + { 48 + return ioread16(addr); 49 + } 50 + 51 + static inline u32 vp_ioread32(const __le32 __iomem *addr) 52 + { 53 + return ioread32(addr); 54 + } 55 + 56 + static inline void vp_iowrite8(u8 value, u8 __iomem *addr) 57 + { 58 + iowrite8(value, addr); 59 + } 60 + 61 + static inline void vp_iowrite16(u16 value, __le16 __iomem *addr) 62 + { 63 + iowrite16(value, addr); 64 + } 65 + 66 + static inline void vp_iowrite32(u32 value, __le32 __iomem *addr) 67 + { 68 + iowrite32(value, addr); 69 + } 70 + 71 + static inline void vp_iowrite64_twopart(u64 val, 72 + __le32 __iomem *lo, 73 + __le32 __iomem *hi) 74 + { 75 + vp_iowrite32((u32)val, lo); 76 + vp_iowrite32(val >> 32, hi); 77 + } 78 + 79 + u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev); 80 + void vp_modern_set_features(struct virtio_pci_modern_device *mdev, 81 + u64 features); 82 + u32 vp_modern_generation(struct virtio_pci_modern_device *mdev); 83 + u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev); 84 + void vp_modern_set_status(struct virtio_pci_modern_device *mdev, 85 + u8 status); 86 + u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev, 87 + u16 idx, u16 vector); 88 + u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev, 89 + u16 vector); 90 + void vp_modern_queue_address(struct virtio_pci_modern_device *mdev, 91 + u16 index, u64 desc_addr, u64 driver_addr, 92 + u64 device_addr); 93 + void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev, 94 + u16 idx, bool enable); 95 + bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev, 96 + u16 idx); 97 + void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev, 98 + u16 idx, u16 size); 99 + u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev, 100 + u16 idx); 101 + u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev); 102 + u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev, 103 + u16 idx); 104 + void __iomem *vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off, 105 + size_t minlen, 106 + u32 align, 107 + u32 start, u32 size, 108 + size_t *len); 109 + int vp_modern_probe(struct virtio_pci_modern_device *mdev); 110 + void vp_modern_remove(struct virtio_pci_modern_device *mdev); 111 + #endif