at v5.8 34 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7#ifndef __LINUX_IOMMU_H 8#define __LINUX_IOMMU_H 9 10#include <linux/scatterlist.h> 11#include <linux/device.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/err.h> 15#include <linux/of.h> 16#include <linux/ioasid.h> 17#include <uapi/linux/iommu.h> 18 19#define IOMMU_READ (1 << 0) 20#define IOMMU_WRITE (1 << 1) 21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22#define IOMMU_NOEXEC (1 << 3) 23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24/* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33#define IOMMU_PRIV (1 << 5) 34/* 35 * Non-coherent masters can use this page protection flag to set cacheable 36 * memory attributes for only a transparent outer level of cache, also known as 37 * the last-level or system cache. 38 */ 39#define IOMMU_SYS_CACHE_ONLY (1 << 6) 40 41struct iommu_ops; 42struct iommu_group; 43struct bus_type; 44struct device; 45struct iommu_domain; 46struct notifier_block; 47struct iommu_sva; 48struct iommu_fault_event; 49 50/* iommu fault flags */ 51#define IOMMU_FAULT_READ 0x0 52#define IOMMU_FAULT_WRITE 0x1 53 54typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 55 struct device *, unsigned long, int, void *); 56typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 57 58struct iommu_domain_geometry { 59 dma_addr_t aperture_start; /* First address that can be mapped */ 60 dma_addr_t aperture_end; /* Last address that can be mapped */ 61 bool force_aperture; /* DMA only allowed in mappable range? */ 62}; 63 64/* Domain feature flags */ 65#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 66#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 67 implementation */ 68#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 69 70/* 71 * This are the possible domain-types 72 * 73 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 74 * devices 75 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 76 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 77 * for VMs 78 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 79 * This flag allows IOMMU drivers to implement 80 * certain optimizations for these domains 81 */ 82#define IOMMU_DOMAIN_BLOCKED (0U) 83#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 84#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 85#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 86 __IOMMU_DOMAIN_DMA_API) 87 88struct iommu_domain { 89 unsigned type; 90 const struct iommu_ops *ops; 91 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 92 iommu_fault_handler_t handler; 93 void *handler_token; 94 struct iommu_domain_geometry geometry; 95 void *iova_cookie; 96}; 97 98enum iommu_cap { 99 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA 100 transactions */ 101 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 102 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 103}; 104 105/* 106 * Following constraints are specifc to FSL_PAMUV1: 107 * -aperture must be power of 2, and naturally aligned 108 * -number of windows must be power of 2, and address space size 109 * of each window is determined by aperture size / # of windows 110 * -the actual size of the mapped region of a window must be power 111 * of 2 starting with 4KB and physical address must be naturally 112 * aligned. 113 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints. 114 * The caller can invoke iommu_domain_get_attr to check if the underlying 115 * iommu implementation supports these constraints. 116 */ 117 118enum iommu_attr { 119 DOMAIN_ATTR_GEOMETRY, 120 DOMAIN_ATTR_PAGING, 121 DOMAIN_ATTR_WINDOWS, 122 DOMAIN_ATTR_FSL_PAMU_STASH, 123 DOMAIN_ATTR_FSL_PAMU_ENABLE, 124 DOMAIN_ATTR_FSL_PAMUV1, 125 DOMAIN_ATTR_NESTING, /* two stages of translation */ 126 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, 127 DOMAIN_ATTR_MAX, 128}; 129 130/* These are the possible reserved region types */ 131enum iommu_resv_type { 132 /* Memory regions which must be mapped 1:1 at all times */ 133 IOMMU_RESV_DIRECT, 134 /* 135 * Memory regions which are advertised to be 1:1 but are 136 * commonly considered relaxable in some conditions, 137 * for instance in device assignment use case (USB, Graphics) 138 */ 139 IOMMU_RESV_DIRECT_RELAXABLE, 140 /* Arbitrary "never map this or give it to a device" address ranges */ 141 IOMMU_RESV_RESERVED, 142 /* Hardware MSI region (untranslated) */ 143 IOMMU_RESV_MSI, 144 /* Software-managed MSI translation window */ 145 IOMMU_RESV_SW_MSI, 146}; 147 148/** 149 * struct iommu_resv_region - descriptor for a reserved memory region 150 * @list: Linked list pointers 151 * @start: System physical start address of the region 152 * @length: Length of the region in bytes 153 * @prot: IOMMU Protection flags (READ/WRITE/...) 154 * @type: Type of the reserved region 155 */ 156struct iommu_resv_region { 157 struct list_head list; 158 phys_addr_t start; 159 size_t length; 160 int prot; 161 enum iommu_resv_type type; 162}; 163 164/* Per device IOMMU features */ 165enum iommu_dev_features { 166 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */ 167 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */ 168}; 169 170#define IOMMU_PASID_INVALID (-1U) 171 172#ifdef CONFIG_IOMMU_API 173 174/** 175 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 176 * 177 * @start: IOVA representing the start of the range to be flushed 178 * @end: IOVA representing the end of the range to be flushed (exclusive) 179 * @pgsize: The interval at which to perform the flush 180 * 181 * This structure is intended to be updated by multiple calls to the 182 * ->unmap() function in struct iommu_ops before eventually being passed 183 * into ->iotlb_sync(). 184 */ 185struct iommu_iotlb_gather { 186 unsigned long start; 187 unsigned long end; 188 size_t pgsize; 189}; 190 191/** 192 * struct iommu_ops - iommu ops and capabilities 193 * @capable: check capability 194 * @domain_alloc: allocate iommu domain 195 * @domain_free: free iommu domain 196 * @attach_dev: attach device to an iommu domain 197 * @detach_dev: detach device from an iommu domain 198 * @map: map a physically contiguous memory region to an iommu domain 199 * @unmap: unmap a physically contiguous memory region from an iommu domain 200 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 201 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 202 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 203 * queue 204 * @iova_to_phys: translate iova to physical address 205 * @probe_device: Add device to iommu driver handling 206 * @release_device: Remove device from iommu driver handling 207 * @probe_finalize: Do final setup work after the device is added to an IOMMU 208 * group and attached to the groups domain 209 * @device_group: find iommu group for a particular device 210 * @domain_get_attr: Query domain attributes 211 * @domain_set_attr: Change domain attributes 212 * @get_resv_regions: Request list of reserved regions for a device 213 * @put_resv_regions: Free list of reserved regions for a device 214 * @apply_resv_region: Temporary helper call-back for iova reserved ranges 215 * @domain_window_enable: Configure and enable a particular window for a domain 216 * @domain_window_disable: Disable a particular window for a domain 217 * @of_xlate: add OF master IDs to iommu grouping 218 * @is_attach_deferred: Check if domain attach should be deferred from iommu 219 * driver init to device driver init (default no) 220 * @dev_has/enable/disable_feat: per device entries to check/enable/disable 221 * iommu specific features. 222 * @dev_feat_enabled: check enabled feature 223 * @aux_attach/detach_dev: aux-domain specific attach/detach entries. 224 * @aux_get_pasid: get the pasid given an aux-domain 225 * @sva_bind: Bind process address space to device 226 * @sva_unbind: Unbind process address space from device 227 * @sva_get_pasid: Get PASID associated to a SVA handle 228 * @page_response: handle page request response 229 * @cache_invalidate: invalidate translation caches 230 * @sva_bind_gpasid: bind guest pasid and mm 231 * @sva_unbind_gpasid: unbind guest pasid and mm 232 * @def_domain_type: device default domain type, return value: 233 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 234 * - IOMMU_DOMAIN_DMA: must use a dma domain 235 * - 0: use the default setting 236 * @pgsize_bitmap: bitmap of all possible supported page sizes 237 * @owner: Driver module providing these ops 238 */ 239struct iommu_ops { 240 bool (*capable)(enum iommu_cap); 241 242 /* Domain allocation and freeing by the iommu driver */ 243 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 244 void (*domain_free)(struct iommu_domain *); 245 246 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 247 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 248 int (*map)(struct iommu_domain *domain, unsigned long iova, 249 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 250 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 251 size_t size, struct iommu_iotlb_gather *iotlb_gather); 252 void (*flush_iotlb_all)(struct iommu_domain *domain); 253 void (*iotlb_sync_map)(struct iommu_domain *domain); 254 void (*iotlb_sync)(struct iommu_domain *domain, 255 struct iommu_iotlb_gather *iotlb_gather); 256 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 257 struct iommu_device *(*probe_device)(struct device *dev); 258 void (*release_device)(struct device *dev); 259 void (*probe_finalize)(struct device *dev); 260 struct iommu_group *(*device_group)(struct device *dev); 261 int (*domain_get_attr)(struct iommu_domain *domain, 262 enum iommu_attr attr, void *data); 263 int (*domain_set_attr)(struct iommu_domain *domain, 264 enum iommu_attr attr, void *data); 265 266 /* Request/Free a list of reserved regions for a device */ 267 void (*get_resv_regions)(struct device *dev, struct list_head *list); 268 void (*put_resv_regions)(struct device *dev, struct list_head *list); 269 void (*apply_resv_region)(struct device *dev, 270 struct iommu_domain *domain, 271 struct iommu_resv_region *region); 272 273 /* Window handling functions */ 274 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr, 275 phys_addr_t paddr, u64 size, int prot); 276 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr); 277 278 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 279 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev); 280 281 /* Per device IOMMU features */ 282 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f); 283 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f); 284 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 285 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 286 287 /* Aux-domain specific attach/detach entries */ 288 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev); 289 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev); 290 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev); 291 292 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, 293 void *drvdata); 294 void (*sva_unbind)(struct iommu_sva *handle); 295 int (*sva_get_pasid)(struct iommu_sva *handle); 296 297 int (*page_response)(struct device *dev, 298 struct iommu_fault_event *evt, 299 struct iommu_page_response *msg); 300 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev, 301 struct iommu_cache_invalidate_info *inv_info); 302 int (*sva_bind_gpasid)(struct iommu_domain *domain, 303 struct device *dev, struct iommu_gpasid_bind_data *data); 304 305 int (*sva_unbind_gpasid)(struct device *dev, int pasid); 306 307 int (*def_domain_type)(struct device *dev); 308 309 unsigned long pgsize_bitmap; 310 struct module *owner; 311}; 312 313/** 314 * struct iommu_device - IOMMU core representation of one IOMMU hardware 315 * instance 316 * @list: Used by the iommu-core to keep a list of registered iommus 317 * @ops: iommu-ops for talking to this iommu 318 * @dev: struct device for sysfs handling 319 */ 320struct iommu_device { 321 struct list_head list; 322 const struct iommu_ops *ops; 323 struct fwnode_handle *fwnode; 324 struct device *dev; 325}; 326 327/** 328 * struct iommu_fault_event - Generic fault event 329 * 330 * Can represent recoverable faults such as a page requests or 331 * unrecoverable faults such as DMA or IRQ remapping faults. 332 * 333 * @fault: fault descriptor 334 * @list: pending fault event list, used for tracking responses 335 */ 336struct iommu_fault_event { 337 struct iommu_fault fault; 338 struct list_head list; 339}; 340 341/** 342 * struct iommu_fault_param - per-device IOMMU fault data 343 * @handler: Callback function to handle IOMMU faults at device level 344 * @data: handler private data 345 * @faults: holds the pending faults which needs response 346 * @lock: protect pending faults list 347 */ 348struct iommu_fault_param { 349 iommu_dev_fault_handler_t handler; 350 void *data; 351 struct list_head faults; 352 struct mutex lock; 353}; 354 355/** 356 * struct dev_iommu - Collection of per-device IOMMU data 357 * 358 * @fault_param: IOMMU detected device fault reporting data 359 * @fwspec: IOMMU fwspec data 360 * @iommu_dev: IOMMU device this device is linked to 361 * @priv: IOMMU Driver private data 362 * 363 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 364 * struct iommu_group *iommu_group; 365 */ 366struct dev_iommu { 367 struct mutex lock; 368 struct iommu_fault_param *fault_param; 369 struct iommu_fwspec *fwspec; 370 struct iommu_device *iommu_dev; 371 void *priv; 372}; 373 374int iommu_device_register(struct iommu_device *iommu); 375void iommu_device_unregister(struct iommu_device *iommu); 376int iommu_device_sysfs_add(struct iommu_device *iommu, 377 struct device *parent, 378 const struct attribute_group **groups, 379 const char *fmt, ...) __printf(4, 5); 380void iommu_device_sysfs_remove(struct iommu_device *iommu); 381int iommu_device_link(struct iommu_device *iommu, struct device *link); 382void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 383 384static inline void __iommu_device_set_ops(struct iommu_device *iommu, 385 const struct iommu_ops *ops) 386{ 387 iommu->ops = ops; 388} 389 390#define iommu_device_set_ops(iommu, ops) \ 391do { \ 392 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \ 393 __ops->owner = THIS_MODULE; \ 394 __iommu_device_set_ops(iommu, __ops); \ 395} while (0) 396 397static inline void iommu_device_set_fwnode(struct iommu_device *iommu, 398 struct fwnode_handle *fwnode) 399{ 400 iommu->fwnode = fwnode; 401} 402 403static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 404{ 405 return (struct iommu_device *)dev_get_drvdata(dev); 406} 407 408static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 409{ 410 *gather = (struct iommu_iotlb_gather) { 411 .start = ULONG_MAX, 412 }; 413} 414 415#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ 416#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */ 417#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */ 418#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */ 419#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ 420#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ 421 422extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); 423extern int bus_iommu_probe(struct bus_type *bus); 424extern bool iommu_present(struct bus_type *bus); 425extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); 426extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 427extern struct iommu_group *iommu_group_get_by_id(int id); 428extern void iommu_domain_free(struct iommu_domain *domain); 429extern int iommu_attach_device(struct iommu_domain *domain, 430 struct device *dev); 431extern void iommu_detach_device(struct iommu_domain *domain, 432 struct device *dev); 433extern int iommu_cache_invalidate(struct iommu_domain *domain, 434 struct device *dev, 435 struct iommu_cache_invalidate_info *inv_info); 436extern int iommu_sva_bind_gpasid(struct iommu_domain *domain, 437 struct device *dev, struct iommu_gpasid_bind_data *data); 438extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 439 struct device *dev, ioasid_t pasid); 440extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 441extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 442extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 443 phys_addr_t paddr, size_t size, int prot); 444extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 445 phys_addr_t paddr, size_t size, int prot); 446extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 447 size_t size); 448extern size_t iommu_unmap_fast(struct iommu_domain *domain, 449 unsigned long iova, size_t size, 450 struct iommu_iotlb_gather *iotlb_gather); 451extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 452 struct scatterlist *sg,unsigned int nents, int prot); 453extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, 454 unsigned long iova, struct scatterlist *sg, 455 unsigned int nents, int prot); 456extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 457extern void iommu_set_fault_handler(struct iommu_domain *domain, 458 iommu_fault_handler_t handler, void *token); 459 460/** 461 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 462 * @domain: The IOMMU domain to perform the mapping 463 * @iova: The start address to map the buffer 464 * @sgt: The sg_table object describing the buffer 465 * @prot: IOMMU protection bits 466 * 467 * Creates a mapping at @iova for the buffer described by a scatterlist 468 * stored in the given sg_table object in the provided IOMMU domain. 469 */ 470static inline size_t iommu_map_sgtable(struct iommu_domain *domain, 471 unsigned long iova, struct sg_table *sgt, int prot) 472{ 473 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); 474} 475 476extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 477extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 478extern void generic_iommu_put_resv_regions(struct device *dev, 479 struct list_head *list); 480extern void iommu_set_default_passthrough(bool cmd_line); 481extern void iommu_set_default_translated(bool cmd_line); 482extern bool iommu_default_passthrough(void); 483extern struct iommu_resv_region * 484iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 485 enum iommu_resv_type type); 486extern int iommu_get_group_resv_regions(struct iommu_group *group, 487 struct list_head *head); 488 489extern int iommu_attach_group(struct iommu_domain *domain, 490 struct iommu_group *group); 491extern void iommu_detach_group(struct iommu_domain *domain, 492 struct iommu_group *group); 493extern struct iommu_group *iommu_group_alloc(void); 494extern void *iommu_group_get_iommudata(struct iommu_group *group); 495extern void iommu_group_set_iommudata(struct iommu_group *group, 496 void *iommu_data, 497 void (*release)(void *iommu_data)); 498extern int iommu_group_set_name(struct iommu_group *group, const char *name); 499extern int iommu_group_add_device(struct iommu_group *group, 500 struct device *dev); 501extern void iommu_group_remove_device(struct device *dev); 502extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 503 int (*fn)(struct device *, void *)); 504extern struct iommu_group *iommu_group_get(struct device *dev); 505extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 506extern void iommu_group_put(struct iommu_group *group); 507extern int iommu_group_register_notifier(struct iommu_group *group, 508 struct notifier_block *nb); 509extern int iommu_group_unregister_notifier(struct iommu_group *group, 510 struct notifier_block *nb); 511extern int iommu_register_device_fault_handler(struct device *dev, 512 iommu_dev_fault_handler_t handler, 513 void *data); 514 515extern int iommu_unregister_device_fault_handler(struct device *dev); 516 517extern int iommu_report_device_fault(struct device *dev, 518 struct iommu_fault_event *evt); 519extern int iommu_page_response(struct device *dev, 520 struct iommu_page_response *msg); 521 522extern int iommu_group_id(struct iommu_group *group); 523extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 524 525extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, 526 void *data); 527extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, 528 void *data); 529 530/* Window handling function prototypes */ 531extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, 532 phys_addr_t offset, u64 size, 533 int prot); 534extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); 535 536extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 537 unsigned long iova, int flags); 538 539static inline void iommu_flush_tlb_all(struct iommu_domain *domain) 540{ 541 if (domain->ops->flush_iotlb_all) 542 domain->ops->flush_iotlb_all(domain); 543} 544 545static inline void iommu_tlb_sync(struct iommu_domain *domain, 546 struct iommu_iotlb_gather *iotlb_gather) 547{ 548 if (domain->ops->iotlb_sync) 549 domain->ops->iotlb_sync(domain, iotlb_gather); 550 551 iommu_iotlb_gather_init(iotlb_gather); 552} 553 554static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 555 struct iommu_iotlb_gather *gather, 556 unsigned long iova, size_t size) 557{ 558 unsigned long start = iova, end = start + size; 559 560 /* 561 * If the new page is disjoint from the current range or is mapped at 562 * a different granularity, then sync the TLB so that the gather 563 * structure can be rewritten. 564 */ 565 if (gather->pgsize != size || 566 end < gather->start || start > gather->end) { 567 if (gather->pgsize) 568 iommu_tlb_sync(domain, gather); 569 gather->pgsize = size; 570 } 571 572 if (gather->end < end) 573 gather->end = end; 574 575 if (gather->start > start) 576 gather->start = start; 577} 578 579/* PCI device grouping function */ 580extern struct iommu_group *pci_device_group(struct device *dev); 581/* Generic device grouping function */ 582extern struct iommu_group *generic_device_group(struct device *dev); 583/* FSL-MC device grouping function */ 584struct iommu_group *fsl_mc_device_group(struct device *dev); 585 586/** 587 * struct iommu_fwspec - per-device IOMMU instance data 588 * @ops: ops for this device's IOMMU 589 * @iommu_fwnode: firmware handle for this device's IOMMU 590 * @iommu_priv: IOMMU driver private data for this device 591 * @num_pasid_bits: number of PASID bits supported by this device 592 * @num_ids: number of associated device IDs 593 * @ids: IDs which this device may present to the IOMMU 594 */ 595struct iommu_fwspec { 596 const struct iommu_ops *ops; 597 struct fwnode_handle *iommu_fwnode; 598 u32 flags; 599 u32 num_pasid_bits; 600 unsigned int num_ids; 601 u32 ids[]; 602}; 603 604/* ATS is supported */ 605#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 606 607/** 608 * struct iommu_sva - handle to a device-mm bond 609 */ 610struct iommu_sva { 611 struct device *dev; 612}; 613 614int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 615 const struct iommu_ops *ops); 616void iommu_fwspec_free(struct device *dev); 617int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 618const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 619 620static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 621{ 622 if (dev->iommu) 623 return dev->iommu->fwspec; 624 else 625 return NULL; 626} 627 628static inline void dev_iommu_fwspec_set(struct device *dev, 629 struct iommu_fwspec *fwspec) 630{ 631 dev->iommu->fwspec = fwspec; 632} 633 634static inline void *dev_iommu_priv_get(struct device *dev) 635{ 636 return dev->iommu->priv; 637} 638 639static inline void dev_iommu_priv_set(struct device *dev, void *priv) 640{ 641 dev->iommu->priv = priv; 642} 643 644int iommu_probe_device(struct device *dev); 645void iommu_release_device(struct device *dev); 646 647bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f); 648int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 649int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 650bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f); 651int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev); 652void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev); 653int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev); 654 655struct iommu_sva *iommu_sva_bind_device(struct device *dev, 656 struct mm_struct *mm, 657 void *drvdata); 658void iommu_sva_unbind_device(struct iommu_sva *handle); 659int iommu_sva_get_pasid(struct iommu_sva *handle); 660 661#else /* CONFIG_IOMMU_API */ 662 663struct iommu_ops {}; 664struct iommu_group {}; 665struct iommu_fwspec {}; 666struct iommu_device {}; 667struct iommu_fault_param {}; 668struct iommu_iotlb_gather {}; 669 670static inline bool iommu_present(struct bus_type *bus) 671{ 672 return false; 673} 674 675static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 676{ 677 return false; 678} 679 680static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 681{ 682 return NULL; 683} 684 685static inline struct iommu_group *iommu_group_get_by_id(int id) 686{ 687 return NULL; 688} 689 690static inline void iommu_domain_free(struct iommu_domain *domain) 691{ 692} 693 694static inline int iommu_attach_device(struct iommu_domain *domain, 695 struct device *dev) 696{ 697 return -ENODEV; 698} 699 700static inline void iommu_detach_device(struct iommu_domain *domain, 701 struct device *dev) 702{ 703} 704 705static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 706{ 707 return NULL; 708} 709 710static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 711 phys_addr_t paddr, size_t size, int prot) 712{ 713 return -ENODEV; 714} 715 716static inline int iommu_map_atomic(struct iommu_domain *domain, 717 unsigned long iova, phys_addr_t paddr, 718 size_t size, int prot) 719{ 720 return -ENODEV; 721} 722 723static inline size_t iommu_unmap(struct iommu_domain *domain, 724 unsigned long iova, size_t size) 725{ 726 return 0; 727} 728 729static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 730 unsigned long iova, int gfp_order, 731 struct iommu_iotlb_gather *iotlb_gather) 732{ 733 return 0; 734} 735 736static inline size_t iommu_map_sg(struct iommu_domain *domain, 737 unsigned long iova, struct scatterlist *sg, 738 unsigned int nents, int prot) 739{ 740 return 0; 741} 742 743static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, 744 unsigned long iova, struct scatterlist *sg, 745 unsigned int nents, int prot) 746{ 747 return 0; 748} 749 750static inline void iommu_flush_tlb_all(struct iommu_domain *domain) 751{ 752} 753 754static inline void iommu_tlb_sync(struct iommu_domain *domain, 755 struct iommu_iotlb_gather *iotlb_gather) 756{ 757} 758 759static inline int iommu_domain_window_enable(struct iommu_domain *domain, 760 u32 wnd_nr, phys_addr_t paddr, 761 u64 size, int prot) 762{ 763 return -ENODEV; 764} 765 766static inline void iommu_domain_window_disable(struct iommu_domain *domain, 767 u32 wnd_nr) 768{ 769} 770 771static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 772{ 773 return 0; 774} 775 776static inline void iommu_set_fault_handler(struct iommu_domain *domain, 777 iommu_fault_handler_t handler, void *token) 778{ 779} 780 781static inline void iommu_get_resv_regions(struct device *dev, 782 struct list_head *list) 783{ 784} 785 786static inline void iommu_put_resv_regions(struct device *dev, 787 struct list_head *list) 788{ 789} 790 791static inline int iommu_get_group_resv_regions(struct iommu_group *group, 792 struct list_head *head) 793{ 794 return -ENODEV; 795} 796 797static inline void iommu_set_default_passthrough(bool cmd_line) 798{ 799} 800 801static inline void iommu_set_default_translated(bool cmd_line) 802{ 803} 804 805static inline bool iommu_default_passthrough(void) 806{ 807 return true; 808} 809 810static inline int iommu_attach_group(struct iommu_domain *domain, 811 struct iommu_group *group) 812{ 813 return -ENODEV; 814} 815 816static inline void iommu_detach_group(struct iommu_domain *domain, 817 struct iommu_group *group) 818{ 819} 820 821static inline struct iommu_group *iommu_group_alloc(void) 822{ 823 return ERR_PTR(-ENODEV); 824} 825 826static inline void *iommu_group_get_iommudata(struct iommu_group *group) 827{ 828 return NULL; 829} 830 831static inline void iommu_group_set_iommudata(struct iommu_group *group, 832 void *iommu_data, 833 void (*release)(void *iommu_data)) 834{ 835} 836 837static inline int iommu_group_set_name(struct iommu_group *group, 838 const char *name) 839{ 840 return -ENODEV; 841} 842 843static inline int iommu_group_add_device(struct iommu_group *group, 844 struct device *dev) 845{ 846 return -ENODEV; 847} 848 849static inline void iommu_group_remove_device(struct device *dev) 850{ 851} 852 853static inline int iommu_group_for_each_dev(struct iommu_group *group, 854 void *data, 855 int (*fn)(struct device *, void *)) 856{ 857 return -ENODEV; 858} 859 860static inline struct iommu_group *iommu_group_get(struct device *dev) 861{ 862 return NULL; 863} 864 865static inline void iommu_group_put(struct iommu_group *group) 866{ 867} 868 869static inline int iommu_group_register_notifier(struct iommu_group *group, 870 struct notifier_block *nb) 871{ 872 return -ENODEV; 873} 874 875static inline int iommu_group_unregister_notifier(struct iommu_group *group, 876 struct notifier_block *nb) 877{ 878 return 0; 879} 880 881static inline 882int iommu_register_device_fault_handler(struct device *dev, 883 iommu_dev_fault_handler_t handler, 884 void *data) 885{ 886 return -ENODEV; 887} 888 889static inline int iommu_unregister_device_fault_handler(struct device *dev) 890{ 891 return 0; 892} 893 894static inline 895int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 896{ 897 return -ENODEV; 898} 899 900static inline int iommu_page_response(struct device *dev, 901 struct iommu_page_response *msg) 902{ 903 return -ENODEV; 904} 905 906static inline int iommu_group_id(struct iommu_group *group) 907{ 908 return -ENODEV; 909} 910 911static inline int iommu_domain_get_attr(struct iommu_domain *domain, 912 enum iommu_attr attr, void *data) 913{ 914 return -EINVAL; 915} 916 917static inline int iommu_domain_set_attr(struct iommu_domain *domain, 918 enum iommu_attr attr, void *data) 919{ 920 return -EINVAL; 921} 922 923static inline int iommu_device_register(struct iommu_device *iommu) 924{ 925 return -ENODEV; 926} 927 928static inline void iommu_device_set_ops(struct iommu_device *iommu, 929 const struct iommu_ops *ops) 930{ 931} 932 933static inline void iommu_device_set_fwnode(struct iommu_device *iommu, 934 struct fwnode_handle *fwnode) 935{ 936} 937 938static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 939{ 940 return NULL; 941} 942 943static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 944{ 945} 946 947static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 948 struct iommu_iotlb_gather *gather, 949 unsigned long iova, size_t size) 950{ 951} 952 953static inline void iommu_device_unregister(struct iommu_device *iommu) 954{ 955} 956 957static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 958 struct device *parent, 959 const struct attribute_group **groups, 960 const char *fmt, ...) 961{ 962 return -ENODEV; 963} 964 965static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 966{ 967} 968 969static inline int iommu_device_link(struct device *dev, struct device *link) 970{ 971 return -EINVAL; 972} 973 974static inline void iommu_device_unlink(struct device *dev, struct device *link) 975{ 976} 977 978static inline int iommu_fwspec_init(struct device *dev, 979 struct fwnode_handle *iommu_fwnode, 980 const struct iommu_ops *ops) 981{ 982 return -ENODEV; 983} 984 985static inline void iommu_fwspec_free(struct device *dev) 986{ 987} 988 989static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 990 int num_ids) 991{ 992 return -ENODEV; 993} 994 995static inline 996const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 997{ 998 return NULL; 999} 1000 1001static inline bool 1002iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat) 1003{ 1004 return false; 1005} 1006 1007static inline bool 1008iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat) 1009{ 1010 return false; 1011} 1012 1013static inline int 1014iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1015{ 1016 return -ENODEV; 1017} 1018 1019static inline int 1020iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1021{ 1022 return -ENODEV; 1023} 1024 1025static inline int 1026iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev) 1027{ 1028 return -ENODEV; 1029} 1030 1031static inline void 1032iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev) 1033{ 1034} 1035 1036static inline int 1037iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev) 1038{ 1039 return -ENODEV; 1040} 1041 1042static inline struct iommu_sva * 1043iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 1044{ 1045 return NULL; 1046} 1047 1048static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1049{ 1050} 1051 1052static inline int iommu_sva_get_pasid(struct iommu_sva *handle) 1053{ 1054 return IOMMU_PASID_INVALID; 1055} 1056 1057static inline int 1058iommu_cache_invalidate(struct iommu_domain *domain, 1059 struct device *dev, 1060 struct iommu_cache_invalidate_info *inv_info) 1061{ 1062 return -ENODEV; 1063} 1064static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain, 1065 struct device *dev, struct iommu_gpasid_bind_data *data) 1066{ 1067 return -ENODEV; 1068} 1069 1070static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 1071 struct device *dev, int pasid) 1072{ 1073 return -ENODEV; 1074} 1075 1076static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1077{ 1078 return NULL; 1079} 1080#endif /* CONFIG_IOMMU_API */ 1081 1082#ifdef CONFIG_IOMMU_DEBUGFS 1083extern struct dentry *iommu_debugfs_dir; 1084void iommu_debugfs_setup(void); 1085#else 1086static inline void iommu_debugfs_setup(void) {} 1087#endif 1088 1089#endif /* __LINUX_IOMMU_H */