at v6.0 33 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7#ifndef __LINUX_IOMMU_H 8#define __LINUX_IOMMU_H 9 10#include <linux/scatterlist.h> 11#include <linux/device.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/err.h> 15#include <linux/of.h> 16#include <linux/ioasid.h> 17#include <uapi/linux/iommu.h> 18 19#define IOMMU_READ (1 << 0) 20#define IOMMU_WRITE (1 << 1) 21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22#define IOMMU_NOEXEC (1 << 3) 23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24/* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33#define IOMMU_PRIV (1 << 5) 34 35struct iommu_ops; 36struct iommu_group; 37struct bus_type; 38struct device; 39struct iommu_domain; 40struct iommu_domain_ops; 41struct notifier_block; 42struct iommu_sva; 43struct iommu_fault_event; 44struct iommu_dma_cookie; 45 46/* iommu fault flags */ 47#define IOMMU_FAULT_READ 0x0 48#define IOMMU_FAULT_WRITE 0x1 49 50typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 51 struct device *, unsigned long, int, void *); 52typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 53 54struct iommu_domain_geometry { 55 dma_addr_t aperture_start; /* First address that can be mapped */ 56 dma_addr_t aperture_end; /* Last address that can be mapped */ 57 bool force_aperture; /* DMA only allowed in mappable range? */ 58}; 59 60/* Domain feature flags */ 61#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 62#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 63 implementation */ 64#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 65#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 66 67/* 68 * This are the possible domain-types 69 * 70 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 71 * devices 72 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 73 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 74 * for VMs 75 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 76 * This flag allows IOMMU drivers to implement 77 * certain optimizations for these domains 78 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 79 * invalidation. 80 */ 81#define IOMMU_DOMAIN_BLOCKED (0U) 82#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 83#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 84#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 85 __IOMMU_DOMAIN_DMA_API) 86#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 87 __IOMMU_DOMAIN_DMA_API | \ 88 __IOMMU_DOMAIN_DMA_FQ) 89 90struct iommu_domain { 91 unsigned type; 92 const struct iommu_domain_ops *ops; 93 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 94 iommu_fault_handler_t handler; 95 void *handler_token; 96 struct iommu_domain_geometry geometry; 97 struct iommu_dma_cookie *iova_cookie; 98}; 99 100static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 101{ 102 return domain->type & __IOMMU_DOMAIN_DMA_API; 103} 104 105enum iommu_cap { 106 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 107 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 108 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 109 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 110 DMA protection and we should too */ 111}; 112 113/* These are the possible reserved region types */ 114enum iommu_resv_type { 115 /* Memory regions which must be mapped 1:1 at all times */ 116 IOMMU_RESV_DIRECT, 117 /* 118 * Memory regions which are advertised to be 1:1 but are 119 * commonly considered relaxable in some conditions, 120 * for instance in device assignment use case (USB, Graphics) 121 */ 122 IOMMU_RESV_DIRECT_RELAXABLE, 123 /* Arbitrary "never map this or give it to a device" address ranges */ 124 IOMMU_RESV_RESERVED, 125 /* Hardware MSI region (untranslated) */ 126 IOMMU_RESV_MSI, 127 /* Software-managed MSI translation window */ 128 IOMMU_RESV_SW_MSI, 129}; 130 131/** 132 * struct iommu_resv_region - descriptor for a reserved memory region 133 * @list: Linked list pointers 134 * @start: System physical start address of the region 135 * @length: Length of the region in bytes 136 * @prot: IOMMU Protection flags (READ/WRITE/...) 137 * @type: Type of the reserved region 138 * @free: Callback to free associated memory allocations 139 */ 140struct iommu_resv_region { 141 struct list_head list; 142 phys_addr_t start; 143 size_t length; 144 int prot; 145 enum iommu_resv_type type; 146 void (*free)(struct device *dev, struct iommu_resv_region *region); 147}; 148 149struct iommu_iort_rmr_data { 150 struct iommu_resv_region rr; 151 152 /* Stream IDs associated with IORT RMR entry */ 153 const u32 *sids; 154 u32 num_sids; 155}; 156 157/** 158 * enum iommu_dev_features - Per device IOMMU features 159 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 160 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 161 * enabling %IOMMU_DEV_FEAT_SVA requires 162 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 163 * Faults themselves instead of relying on the IOMMU. When 164 * supported, this feature must be enabled before and 165 * disabled after %IOMMU_DEV_FEAT_SVA. 166 * 167 * Device drivers enable a feature using iommu_dev_enable_feature(). 168 */ 169enum iommu_dev_features { 170 IOMMU_DEV_FEAT_SVA, 171 IOMMU_DEV_FEAT_IOPF, 172}; 173 174#define IOMMU_PASID_INVALID (-1U) 175 176#ifdef CONFIG_IOMMU_API 177 178/** 179 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 180 * 181 * @start: IOVA representing the start of the range to be flushed 182 * @end: IOVA representing the end of the range to be flushed (inclusive) 183 * @pgsize: The interval at which to perform the flush 184 * @freelist: Removed pages to free after sync 185 * @queued: Indicates that the flush will be queued 186 * 187 * This structure is intended to be updated by multiple calls to the 188 * ->unmap() function in struct iommu_ops before eventually being passed 189 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 190 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 191 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 192 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 193 */ 194struct iommu_iotlb_gather { 195 unsigned long start; 196 unsigned long end; 197 size_t pgsize; 198 struct list_head freelist; 199 bool queued; 200}; 201 202/** 203 * struct iommu_ops - iommu ops and capabilities 204 * @capable: check capability 205 * @domain_alloc: allocate iommu domain 206 * @probe_device: Add device to iommu driver handling 207 * @release_device: Remove device from iommu driver handling 208 * @probe_finalize: Do final setup work after the device is added to an IOMMU 209 * group and attached to the groups domain 210 * @device_group: find iommu group for a particular device 211 * @get_resv_regions: Request list of reserved regions for a device 212 * @of_xlate: add OF master IDs to iommu grouping 213 * @is_attach_deferred: Check if domain attach should be deferred from iommu 214 * driver init to device driver init (default no) 215 * @dev_has/enable/disable_feat: per device entries to check/enable/disable 216 * iommu specific features. 217 * @sva_bind: Bind process address space to device 218 * @sva_unbind: Unbind process address space from device 219 * @sva_get_pasid: Get PASID associated to a SVA handle 220 * @page_response: handle page request response 221 * @def_domain_type: device default domain type, return value: 222 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 223 * - IOMMU_DOMAIN_DMA: must use a dma domain 224 * - 0: use the default setting 225 * @default_domain_ops: the default ops for domains 226 * @pgsize_bitmap: bitmap of all possible supported page sizes 227 * @owner: Driver module providing these ops 228 */ 229struct iommu_ops { 230 bool (*capable)(enum iommu_cap); 231 232 /* Domain allocation and freeing by the iommu driver */ 233 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 234 235 struct iommu_device *(*probe_device)(struct device *dev); 236 void (*release_device)(struct device *dev); 237 void (*probe_finalize)(struct device *dev); 238 struct iommu_group *(*device_group)(struct device *dev); 239 240 /* Request/Free a list of reserved regions for a device */ 241 void (*get_resv_regions)(struct device *dev, struct list_head *list); 242 243 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 244 bool (*is_attach_deferred)(struct device *dev); 245 246 /* Per device IOMMU features */ 247 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 248 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 249 250 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm, 251 void *drvdata); 252 void (*sva_unbind)(struct iommu_sva *handle); 253 u32 (*sva_get_pasid)(struct iommu_sva *handle); 254 255 int (*page_response)(struct device *dev, 256 struct iommu_fault_event *evt, 257 struct iommu_page_response *msg); 258 259 int (*def_domain_type)(struct device *dev); 260 261 const struct iommu_domain_ops *default_domain_ops; 262 unsigned long pgsize_bitmap; 263 struct module *owner; 264}; 265 266/** 267 * struct iommu_domain_ops - domain specific operations 268 * @attach_dev: attach an iommu domain to a device 269 * @detach_dev: detach an iommu domain from a device 270 * @map: map a physically contiguous memory region to an iommu domain 271 * @map_pages: map a physically contiguous set of pages of the same size to 272 * an iommu domain. 273 * @unmap: unmap a physically contiguous memory region from an iommu domain 274 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 275 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 276 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 277 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 278 * queue 279 * @iova_to_phys: translate iova to physical address 280 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 281 * including no-snoop TLPs on PCIe or other platform 282 * specific mechanisms. 283 * @enable_nesting: Enable nesting 284 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 285 * @free: Release the domain after use. 286 */ 287struct iommu_domain_ops { 288 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 289 void (*detach_dev)(struct iommu_domain *domain, struct device *dev); 290 291 int (*map)(struct iommu_domain *domain, unsigned long iova, 292 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 293 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 294 phys_addr_t paddr, size_t pgsize, size_t pgcount, 295 int prot, gfp_t gfp, size_t *mapped); 296 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 297 size_t size, struct iommu_iotlb_gather *iotlb_gather); 298 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 299 size_t pgsize, size_t pgcount, 300 struct iommu_iotlb_gather *iotlb_gather); 301 302 void (*flush_iotlb_all)(struct iommu_domain *domain); 303 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 304 size_t size); 305 void (*iotlb_sync)(struct iommu_domain *domain, 306 struct iommu_iotlb_gather *iotlb_gather); 307 308 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 309 dma_addr_t iova); 310 311 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 312 int (*enable_nesting)(struct iommu_domain *domain); 313 int (*set_pgtable_quirks)(struct iommu_domain *domain, 314 unsigned long quirks); 315 316 void (*free)(struct iommu_domain *domain); 317}; 318 319/** 320 * struct iommu_device - IOMMU core representation of one IOMMU hardware 321 * instance 322 * @list: Used by the iommu-core to keep a list of registered iommus 323 * @ops: iommu-ops for talking to this iommu 324 * @dev: struct device for sysfs handling 325 */ 326struct iommu_device { 327 struct list_head list; 328 const struct iommu_ops *ops; 329 struct fwnode_handle *fwnode; 330 struct device *dev; 331}; 332 333/** 334 * struct iommu_fault_event - Generic fault event 335 * 336 * Can represent recoverable faults such as a page requests or 337 * unrecoverable faults such as DMA or IRQ remapping faults. 338 * 339 * @fault: fault descriptor 340 * @list: pending fault event list, used for tracking responses 341 */ 342struct iommu_fault_event { 343 struct iommu_fault fault; 344 struct list_head list; 345}; 346 347/** 348 * struct iommu_fault_param - per-device IOMMU fault data 349 * @handler: Callback function to handle IOMMU faults at device level 350 * @data: handler private data 351 * @faults: holds the pending faults which needs response 352 * @lock: protect pending faults list 353 */ 354struct iommu_fault_param { 355 iommu_dev_fault_handler_t handler; 356 void *data; 357 struct list_head faults; 358 struct mutex lock; 359}; 360 361/** 362 * struct dev_iommu - Collection of per-device IOMMU data 363 * 364 * @fault_param: IOMMU detected device fault reporting data 365 * @iopf_param: I/O Page Fault queue and data 366 * @fwspec: IOMMU fwspec data 367 * @iommu_dev: IOMMU device this device is linked to 368 * @priv: IOMMU Driver private data 369 * 370 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 371 * struct iommu_group *iommu_group; 372 */ 373struct dev_iommu { 374 struct mutex lock; 375 struct iommu_fault_param *fault_param; 376 struct iopf_device_param *iopf_param; 377 struct iommu_fwspec *fwspec; 378 struct iommu_device *iommu_dev; 379 void *priv; 380}; 381 382int iommu_device_register(struct iommu_device *iommu, 383 const struct iommu_ops *ops, 384 struct device *hwdev); 385void iommu_device_unregister(struct iommu_device *iommu); 386int iommu_device_sysfs_add(struct iommu_device *iommu, 387 struct device *parent, 388 const struct attribute_group **groups, 389 const char *fmt, ...) __printf(4, 5); 390void iommu_device_sysfs_remove(struct iommu_device *iommu); 391int iommu_device_link(struct iommu_device *iommu, struct device *link); 392void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 393int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 394 395static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 396{ 397 return (struct iommu_device *)dev_get_drvdata(dev); 398} 399 400static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 401{ 402 *gather = (struct iommu_iotlb_gather) { 403 .start = ULONG_MAX, 404 .freelist = LIST_HEAD_INIT(gather->freelist), 405 }; 406} 407 408static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) 409{ 410 /* 411 * Assume that valid ops must be installed if iommu_probe_device() 412 * has succeeded. The device ops are essentially for internal use 413 * within the IOMMU subsystem itself, so we should be able to trust 414 * ourselves not to misuse the helper. 415 */ 416 return dev->iommu->iommu_dev->ops; 417} 418 419extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); 420extern int bus_iommu_probe(struct bus_type *bus); 421extern bool iommu_present(struct bus_type *bus); 422extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 423extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap); 424extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 425extern struct iommu_group *iommu_group_get_by_id(int id); 426extern void iommu_domain_free(struct iommu_domain *domain); 427extern int iommu_attach_device(struct iommu_domain *domain, 428 struct device *dev); 429extern void iommu_detach_device(struct iommu_domain *domain, 430 struct device *dev); 431extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 432 struct device *dev, ioasid_t pasid); 433extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 434extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 435extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 436 phys_addr_t paddr, size_t size, int prot); 437extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova, 438 phys_addr_t paddr, size_t size, int prot); 439extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 440 size_t size); 441extern size_t iommu_unmap_fast(struct iommu_domain *domain, 442 unsigned long iova, size_t size, 443 struct iommu_iotlb_gather *iotlb_gather); 444extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 445 struct scatterlist *sg, unsigned int nents, int prot); 446extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, 447 unsigned long iova, struct scatterlist *sg, 448 unsigned int nents, int prot); 449extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 450extern void iommu_set_fault_handler(struct iommu_domain *domain, 451 iommu_fault_handler_t handler, void *token); 452 453extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 454extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 455extern void iommu_set_default_passthrough(bool cmd_line); 456extern void iommu_set_default_translated(bool cmd_line); 457extern bool iommu_default_passthrough(void); 458extern struct iommu_resv_region * 459iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 460 enum iommu_resv_type type); 461extern int iommu_get_group_resv_regions(struct iommu_group *group, 462 struct list_head *head); 463 464extern int iommu_attach_group(struct iommu_domain *domain, 465 struct iommu_group *group); 466extern void iommu_detach_group(struct iommu_domain *domain, 467 struct iommu_group *group); 468extern struct iommu_group *iommu_group_alloc(void); 469extern void *iommu_group_get_iommudata(struct iommu_group *group); 470extern void iommu_group_set_iommudata(struct iommu_group *group, 471 void *iommu_data, 472 void (*release)(void *iommu_data)); 473extern int iommu_group_set_name(struct iommu_group *group, const char *name); 474extern int iommu_group_add_device(struct iommu_group *group, 475 struct device *dev); 476extern void iommu_group_remove_device(struct device *dev); 477extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 478 int (*fn)(struct device *, void *)); 479extern struct iommu_group *iommu_group_get(struct device *dev); 480extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 481extern void iommu_group_put(struct iommu_group *group); 482extern int iommu_register_device_fault_handler(struct device *dev, 483 iommu_dev_fault_handler_t handler, 484 void *data); 485 486extern int iommu_unregister_device_fault_handler(struct device *dev); 487 488extern int iommu_report_device_fault(struct device *dev, 489 struct iommu_fault_event *evt); 490extern int iommu_page_response(struct device *dev, 491 struct iommu_page_response *msg); 492 493extern int iommu_group_id(struct iommu_group *group); 494extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 495 496int iommu_enable_nesting(struct iommu_domain *domain); 497int iommu_set_pgtable_quirks(struct iommu_domain *domain, 498 unsigned long quirks); 499 500void iommu_set_dma_strict(void); 501 502extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 503 unsigned long iova, int flags); 504 505static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 506{ 507 if (domain->ops->flush_iotlb_all) 508 domain->ops->flush_iotlb_all(domain); 509} 510 511static inline void iommu_iotlb_sync(struct iommu_domain *domain, 512 struct iommu_iotlb_gather *iotlb_gather) 513{ 514 if (domain->ops->iotlb_sync) 515 domain->ops->iotlb_sync(domain, iotlb_gather); 516 517 iommu_iotlb_gather_init(iotlb_gather); 518} 519 520/** 521 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 522 * 523 * @gather: TLB gather data 524 * @iova: start of page to invalidate 525 * @size: size of page to invalidate 526 * 527 * Helper for IOMMU drivers to check whether a new range and the gathered range 528 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 529 * than merging the two, which might lead to unnecessary invalidations. 530 */ 531static inline 532bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 533 unsigned long iova, size_t size) 534{ 535 unsigned long start = iova, end = start + size - 1; 536 537 return gather->end != 0 && 538 (end + 1 < gather->start || start > gather->end + 1); 539} 540 541 542/** 543 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 544 * @gather: TLB gather data 545 * @iova: start of page to invalidate 546 * @size: size of page to invalidate 547 * 548 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 549 * where only the address range matters, and simply minimising intermediate 550 * syncs is preferred. 551 */ 552static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 553 unsigned long iova, size_t size) 554{ 555 unsigned long end = iova + size - 1; 556 557 if (gather->start > iova) 558 gather->start = iova; 559 if (gather->end < end) 560 gather->end = end; 561} 562 563/** 564 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 565 * @domain: IOMMU domain to be invalidated 566 * @gather: TLB gather data 567 * @iova: start of page to invalidate 568 * @size: size of page to invalidate 569 * 570 * Helper for IOMMU drivers to build invalidation commands based on individual 571 * pages, or with page size/table level hints which cannot be gathered if they 572 * differ. 573 */ 574static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 575 struct iommu_iotlb_gather *gather, 576 unsigned long iova, size_t size) 577{ 578 /* 579 * If the new page is disjoint from the current range or is mapped at 580 * a different granularity, then sync the TLB so that the gather 581 * structure can be rewritten. 582 */ 583 if ((gather->pgsize && gather->pgsize != size) || 584 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 585 iommu_iotlb_sync(domain, gather); 586 587 gather->pgsize = size; 588 iommu_iotlb_gather_add_range(gather, iova, size); 589} 590 591static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 592{ 593 return gather && gather->queued; 594} 595 596/* PCI device grouping function */ 597extern struct iommu_group *pci_device_group(struct device *dev); 598/* Generic device grouping function */ 599extern struct iommu_group *generic_device_group(struct device *dev); 600/* FSL-MC device grouping function */ 601struct iommu_group *fsl_mc_device_group(struct device *dev); 602 603/** 604 * struct iommu_fwspec - per-device IOMMU instance data 605 * @ops: ops for this device's IOMMU 606 * @iommu_fwnode: firmware handle for this device's IOMMU 607 * @flags: IOMMU_FWSPEC_* flags 608 * @num_ids: number of associated device IDs 609 * @ids: IDs which this device may present to the IOMMU 610 */ 611struct iommu_fwspec { 612 const struct iommu_ops *ops; 613 struct fwnode_handle *iommu_fwnode; 614 u32 flags; 615 unsigned int num_ids; 616 u32 ids[]; 617}; 618 619/* ATS is supported */ 620#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 621 622/** 623 * struct iommu_sva - handle to a device-mm bond 624 */ 625struct iommu_sva { 626 struct device *dev; 627}; 628 629int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 630 const struct iommu_ops *ops); 631void iommu_fwspec_free(struct device *dev); 632int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 633const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 634 635static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 636{ 637 if (dev->iommu) 638 return dev->iommu->fwspec; 639 else 640 return NULL; 641} 642 643static inline void dev_iommu_fwspec_set(struct device *dev, 644 struct iommu_fwspec *fwspec) 645{ 646 dev->iommu->fwspec = fwspec; 647} 648 649static inline void *dev_iommu_priv_get(struct device *dev) 650{ 651 if (dev->iommu) 652 return dev->iommu->priv; 653 else 654 return NULL; 655} 656 657static inline void dev_iommu_priv_set(struct device *dev, void *priv) 658{ 659 dev->iommu->priv = priv; 660} 661 662int iommu_probe_device(struct device *dev); 663void iommu_release_device(struct device *dev); 664 665int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 666int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 667 668struct iommu_sva *iommu_sva_bind_device(struct device *dev, 669 struct mm_struct *mm, 670 void *drvdata); 671void iommu_sva_unbind_device(struct iommu_sva *handle); 672u32 iommu_sva_get_pasid(struct iommu_sva *handle); 673 674int iommu_device_use_default_domain(struct device *dev); 675void iommu_device_unuse_default_domain(struct device *dev); 676 677int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 678void iommu_group_release_dma_owner(struct iommu_group *group); 679bool iommu_group_dma_owner_claimed(struct iommu_group *group); 680 681#else /* CONFIG_IOMMU_API */ 682 683struct iommu_ops {}; 684struct iommu_group {}; 685struct iommu_fwspec {}; 686struct iommu_device {}; 687struct iommu_fault_param {}; 688struct iommu_iotlb_gather {}; 689 690static inline bool iommu_present(struct bus_type *bus) 691{ 692 return false; 693} 694 695static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 696{ 697 return false; 698} 699 700static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap) 701{ 702 return false; 703} 704 705static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 706{ 707 return NULL; 708} 709 710static inline struct iommu_group *iommu_group_get_by_id(int id) 711{ 712 return NULL; 713} 714 715static inline void iommu_domain_free(struct iommu_domain *domain) 716{ 717} 718 719static inline int iommu_attach_device(struct iommu_domain *domain, 720 struct device *dev) 721{ 722 return -ENODEV; 723} 724 725static inline void iommu_detach_device(struct iommu_domain *domain, 726 struct device *dev) 727{ 728} 729 730static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 731{ 732 return NULL; 733} 734 735static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 736 phys_addr_t paddr, size_t size, int prot) 737{ 738 return -ENODEV; 739} 740 741static inline int iommu_map_atomic(struct iommu_domain *domain, 742 unsigned long iova, phys_addr_t paddr, 743 size_t size, int prot) 744{ 745 return -ENODEV; 746} 747 748static inline size_t iommu_unmap(struct iommu_domain *domain, 749 unsigned long iova, size_t size) 750{ 751 return 0; 752} 753 754static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 755 unsigned long iova, int gfp_order, 756 struct iommu_iotlb_gather *iotlb_gather) 757{ 758 return 0; 759} 760 761static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 762 unsigned long iova, struct scatterlist *sg, 763 unsigned int nents, int prot) 764{ 765 return -ENODEV; 766} 767 768static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, 769 unsigned long iova, struct scatterlist *sg, 770 unsigned int nents, int prot) 771{ 772 return -ENODEV; 773} 774 775static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 776{ 777} 778 779static inline void iommu_iotlb_sync(struct iommu_domain *domain, 780 struct iommu_iotlb_gather *iotlb_gather) 781{ 782} 783 784static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 785{ 786 return 0; 787} 788 789static inline void iommu_set_fault_handler(struct iommu_domain *domain, 790 iommu_fault_handler_t handler, void *token) 791{ 792} 793 794static inline void iommu_get_resv_regions(struct device *dev, 795 struct list_head *list) 796{ 797} 798 799static inline void iommu_put_resv_regions(struct device *dev, 800 struct list_head *list) 801{ 802} 803 804static inline int iommu_get_group_resv_regions(struct iommu_group *group, 805 struct list_head *head) 806{ 807 return -ENODEV; 808} 809 810static inline void iommu_set_default_passthrough(bool cmd_line) 811{ 812} 813 814static inline void iommu_set_default_translated(bool cmd_line) 815{ 816} 817 818static inline bool iommu_default_passthrough(void) 819{ 820 return true; 821} 822 823static inline int iommu_attach_group(struct iommu_domain *domain, 824 struct iommu_group *group) 825{ 826 return -ENODEV; 827} 828 829static inline void iommu_detach_group(struct iommu_domain *domain, 830 struct iommu_group *group) 831{ 832} 833 834static inline struct iommu_group *iommu_group_alloc(void) 835{ 836 return ERR_PTR(-ENODEV); 837} 838 839static inline void *iommu_group_get_iommudata(struct iommu_group *group) 840{ 841 return NULL; 842} 843 844static inline void iommu_group_set_iommudata(struct iommu_group *group, 845 void *iommu_data, 846 void (*release)(void *iommu_data)) 847{ 848} 849 850static inline int iommu_group_set_name(struct iommu_group *group, 851 const char *name) 852{ 853 return -ENODEV; 854} 855 856static inline int iommu_group_add_device(struct iommu_group *group, 857 struct device *dev) 858{ 859 return -ENODEV; 860} 861 862static inline void iommu_group_remove_device(struct device *dev) 863{ 864} 865 866static inline int iommu_group_for_each_dev(struct iommu_group *group, 867 void *data, 868 int (*fn)(struct device *, void *)) 869{ 870 return -ENODEV; 871} 872 873static inline struct iommu_group *iommu_group_get(struct device *dev) 874{ 875 return NULL; 876} 877 878static inline void iommu_group_put(struct iommu_group *group) 879{ 880} 881 882static inline 883int iommu_register_device_fault_handler(struct device *dev, 884 iommu_dev_fault_handler_t handler, 885 void *data) 886{ 887 return -ENODEV; 888} 889 890static inline int iommu_unregister_device_fault_handler(struct device *dev) 891{ 892 return 0; 893} 894 895static inline 896int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 897{ 898 return -ENODEV; 899} 900 901static inline int iommu_page_response(struct device *dev, 902 struct iommu_page_response *msg) 903{ 904 return -ENODEV; 905} 906 907static inline int iommu_group_id(struct iommu_group *group) 908{ 909 return -ENODEV; 910} 911 912static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 913 unsigned long quirks) 914{ 915 return 0; 916} 917 918static inline int iommu_device_register(struct iommu_device *iommu, 919 const struct iommu_ops *ops, 920 struct device *hwdev) 921{ 922 return -ENODEV; 923} 924 925static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 926{ 927 return NULL; 928} 929 930static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 931{ 932} 933 934static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 935 struct iommu_iotlb_gather *gather, 936 unsigned long iova, size_t size) 937{ 938} 939 940static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 941{ 942 return false; 943} 944 945static inline void iommu_device_unregister(struct iommu_device *iommu) 946{ 947} 948 949static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 950 struct device *parent, 951 const struct attribute_group **groups, 952 const char *fmt, ...) 953{ 954 return -ENODEV; 955} 956 957static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 958{ 959} 960 961static inline int iommu_device_link(struct device *dev, struct device *link) 962{ 963 return -EINVAL; 964} 965 966static inline void iommu_device_unlink(struct device *dev, struct device *link) 967{ 968} 969 970static inline int iommu_fwspec_init(struct device *dev, 971 struct fwnode_handle *iommu_fwnode, 972 const struct iommu_ops *ops) 973{ 974 return -ENODEV; 975} 976 977static inline void iommu_fwspec_free(struct device *dev) 978{ 979} 980 981static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 982 int num_ids) 983{ 984 return -ENODEV; 985} 986 987static inline 988const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 989{ 990 return NULL; 991} 992 993static inline int 994iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 995{ 996 return -ENODEV; 997} 998 999static inline int 1000iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1001{ 1002 return -ENODEV; 1003} 1004 1005static inline struct iommu_sva * 1006iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata) 1007{ 1008 return NULL; 1009} 1010 1011static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1012{ 1013} 1014 1015static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1016{ 1017 return IOMMU_PASID_INVALID; 1018} 1019 1020static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1021{ 1022 return NULL; 1023} 1024 1025static inline int iommu_device_use_default_domain(struct device *dev) 1026{ 1027 return 0; 1028} 1029 1030static inline void iommu_device_unuse_default_domain(struct device *dev) 1031{ 1032} 1033 1034static inline int 1035iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1036{ 1037 return -ENODEV; 1038} 1039 1040static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1041{ 1042} 1043 1044static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1045{ 1046 return false; 1047} 1048#endif /* CONFIG_IOMMU_API */ 1049 1050/** 1051 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1052 * @domain: The IOMMU domain to perform the mapping 1053 * @iova: The start address to map the buffer 1054 * @sgt: The sg_table object describing the buffer 1055 * @prot: IOMMU protection bits 1056 * 1057 * Creates a mapping at @iova for the buffer described by a scatterlist 1058 * stored in the given sg_table object in the provided IOMMU domain. 1059 */ 1060static inline size_t iommu_map_sgtable(struct iommu_domain *domain, 1061 unsigned long iova, struct sg_table *sgt, int prot) 1062{ 1063 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot); 1064} 1065 1066#ifdef CONFIG_IOMMU_DEBUGFS 1067extern struct dentry *iommu_debugfs_dir; 1068void iommu_debugfs_setup(void); 1069#else 1070static inline void iommu_debugfs_setup(void) {} 1071#endif 1072 1073#endif /* __LINUX_IOMMU_H */