at v5.15 8.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * VFIO API definition 4 * 5 * Copyright (C) 2012 Red Hat, Inc. All rights reserved. 6 * Author: Alex Williamson <alex.williamson@redhat.com> 7 */ 8#ifndef VFIO_H 9#define VFIO_H 10 11 12#include <linux/iommu.h> 13#include <linux/mm.h> 14#include <linux/workqueue.h> 15#include <linux/poll.h> 16#include <uapi/linux/vfio.h> 17 18/* 19 * VFIO devices can be placed in a set, this allows all devices to share this 20 * structure and the VFIO core will provide a lock that is held around 21 * open_device()/close_device() for all devices in the set. 22 */ 23struct vfio_device_set { 24 void *set_id; 25 struct mutex lock; 26 struct list_head device_list; 27 unsigned int device_count; 28}; 29 30struct vfio_device { 31 struct device *dev; 32 const struct vfio_device_ops *ops; 33 struct vfio_group *group; 34 struct vfio_device_set *dev_set; 35 struct list_head dev_set_list; 36 37 /* Members below here are private, not for driver use */ 38 refcount_t refcount; 39 unsigned int open_count; 40 struct completion comp; 41 struct list_head group_next; 42}; 43 44/** 45 * struct vfio_device_ops - VFIO bus driver device callbacks 46 * 47 * @open_device: Called when the first file descriptor is opened for this device 48 * @close_device: Opposite of open_device 49 * @read: Perform read(2) on device file descriptor 50 * @write: Perform write(2) on device file descriptor 51 * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* 52 * operations documented below 53 * @mmap: Perform mmap(2) on a region of the device file descriptor 54 * @request: Request for the bus driver to release the device 55 * @match: Optional device name match callback (return: 0 for no-match, >0 for 56 * match, -errno for abort (ex. match with insufficient or incorrect 57 * additional args) 58 */ 59struct vfio_device_ops { 60 char *name; 61 int (*open_device)(struct vfio_device *vdev); 62 void (*close_device)(struct vfio_device *vdev); 63 ssize_t (*read)(struct vfio_device *vdev, char __user *buf, 64 size_t count, loff_t *ppos); 65 ssize_t (*write)(struct vfio_device *vdev, const char __user *buf, 66 size_t count, loff_t *size); 67 long (*ioctl)(struct vfio_device *vdev, unsigned int cmd, 68 unsigned long arg); 69 int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); 70 void (*request)(struct vfio_device *vdev, unsigned int count); 71 int (*match)(struct vfio_device *vdev, char *buf); 72}; 73 74extern struct iommu_group *vfio_iommu_group_get(struct device *dev); 75extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); 76 77void vfio_init_group_dev(struct vfio_device *device, struct device *dev, 78 const struct vfio_device_ops *ops); 79void vfio_uninit_group_dev(struct vfio_device *device); 80int vfio_register_group_dev(struct vfio_device *device); 81void vfio_unregister_group_dev(struct vfio_device *device); 82extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); 83extern void vfio_device_put(struct vfio_device *device); 84 85int vfio_assign_device_set(struct vfio_device *device, void *set_id); 86 87/* events for the backend driver notify callback */ 88enum vfio_iommu_notify_type { 89 VFIO_IOMMU_CONTAINER_CLOSE = 0, 90}; 91 92/** 93 * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks 94 */ 95struct vfio_iommu_driver_ops { 96 char *name; 97 struct module *owner; 98 void *(*open)(unsigned long arg); 99 void (*release)(void *iommu_data); 100 ssize_t (*read)(void *iommu_data, char __user *buf, 101 size_t count, loff_t *ppos); 102 ssize_t (*write)(void *iommu_data, const char __user *buf, 103 size_t count, loff_t *size); 104 long (*ioctl)(void *iommu_data, unsigned int cmd, 105 unsigned long arg); 106 int (*mmap)(void *iommu_data, struct vm_area_struct *vma); 107 int (*attach_group)(void *iommu_data, 108 struct iommu_group *group); 109 void (*detach_group)(void *iommu_data, 110 struct iommu_group *group); 111 int (*pin_pages)(void *iommu_data, 112 struct iommu_group *group, 113 unsigned long *user_pfn, 114 int npage, int prot, 115 unsigned long *phys_pfn); 116 int (*unpin_pages)(void *iommu_data, 117 unsigned long *user_pfn, int npage); 118 int (*register_notifier)(void *iommu_data, 119 unsigned long *events, 120 struct notifier_block *nb); 121 int (*unregister_notifier)(void *iommu_data, 122 struct notifier_block *nb); 123 int (*dma_rw)(void *iommu_data, dma_addr_t user_iova, 124 void *data, size_t count, bool write); 125 struct iommu_domain *(*group_iommu_domain)(void *iommu_data, 126 struct iommu_group *group); 127 void (*notify)(void *iommu_data, 128 enum vfio_iommu_notify_type event); 129}; 130 131extern int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops); 132 133extern void vfio_unregister_iommu_driver( 134 const struct vfio_iommu_driver_ops *ops); 135 136/* 137 * External user API 138 */ 139extern struct vfio_group *vfio_group_get_external_user(struct file *filep); 140extern void vfio_group_put_external_user(struct vfio_group *group); 141extern struct vfio_group *vfio_group_get_external_user_from_dev(struct device 142 *dev); 143extern bool vfio_external_group_match_file(struct vfio_group *group, 144 struct file *filep); 145extern int vfio_external_user_iommu_id(struct vfio_group *group); 146extern long vfio_external_check_extension(struct vfio_group *group, 147 unsigned long arg); 148 149#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long)) 150 151extern int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, 152 int npage, int prot, unsigned long *phys_pfn); 153extern int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, 154 int npage); 155 156extern int vfio_group_pin_pages(struct vfio_group *group, 157 unsigned long *user_iova_pfn, int npage, 158 int prot, unsigned long *phys_pfn); 159extern int vfio_group_unpin_pages(struct vfio_group *group, 160 unsigned long *user_iova_pfn, int npage); 161 162extern int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, 163 void *data, size_t len, bool write); 164 165extern struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group); 166 167/* each type has independent events */ 168enum vfio_notify_type { 169 VFIO_IOMMU_NOTIFY = 0, 170 VFIO_GROUP_NOTIFY = 1, 171}; 172 173/* events for VFIO_IOMMU_NOTIFY */ 174#define VFIO_IOMMU_NOTIFY_DMA_UNMAP BIT(0) 175 176/* events for VFIO_GROUP_NOTIFY */ 177#define VFIO_GROUP_NOTIFY_SET_KVM BIT(0) 178 179extern int vfio_register_notifier(struct device *dev, 180 enum vfio_notify_type type, 181 unsigned long *required_events, 182 struct notifier_block *nb); 183extern int vfio_unregister_notifier(struct device *dev, 184 enum vfio_notify_type type, 185 struct notifier_block *nb); 186 187struct kvm; 188extern void vfio_group_set_kvm(struct vfio_group *group, struct kvm *kvm); 189 190/* 191 * Sub-module helpers 192 */ 193struct vfio_info_cap { 194 struct vfio_info_cap_header *buf; 195 size_t size; 196}; 197extern struct vfio_info_cap_header *vfio_info_cap_add( 198 struct vfio_info_cap *caps, size_t size, u16 id, u16 version); 199extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); 200 201extern int vfio_info_add_capability(struct vfio_info_cap *caps, 202 struct vfio_info_cap_header *cap, 203 size_t size); 204 205extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, 206 int num_irqs, int max_irq_type, 207 size_t *data_size); 208 209struct pci_dev; 210#if IS_ENABLED(CONFIG_VFIO_SPAPR_EEH) 211extern void vfio_spapr_pci_eeh_open(struct pci_dev *pdev); 212extern void vfio_spapr_pci_eeh_release(struct pci_dev *pdev); 213extern long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, 214 unsigned int cmd, 215 unsigned long arg); 216#else 217static inline void vfio_spapr_pci_eeh_open(struct pci_dev *pdev) 218{ 219} 220 221static inline void vfio_spapr_pci_eeh_release(struct pci_dev *pdev) 222{ 223} 224 225static inline long vfio_spapr_iommu_eeh_ioctl(struct iommu_group *group, 226 unsigned int cmd, 227 unsigned long arg) 228{ 229 return -ENOTTY; 230} 231#endif /* CONFIG_VFIO_SPAPR_EEH */ 232 233/* 234 * IRQfd - generic 235 */ 236struct virqfd { 237 void *opaque; 238 struct eventfd_ctx *eventfd; 239 int (*handler)(void *, void *); 240 void (*thread)(void *, void *); 241 void *data; 242 struct work_struct inject; 243 wait_queue_entry_t wait; 244 poll_table pt; 245 struct work_struct shutdown; 246 struct virqfd **pvirqfd; 247}; 248 249extern int vfio_virqfd_enable(void *opaque, 250 int (*handler)(void *, void *), 251 void (*thread)(void *, void *), 252 void *data, struct virqfd **pvirqfd, int fd); 253extern void vfio_virqfd_disable(struct virqfd **pvirqfd); 254 255#endif /* VFIO_H */