at v6.3 1197 lines 37 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7#ifndef __LINUX_IOMMU_H 8#define __LINUX_IOMMU_H 9 10#include <linux/scatterlist.h> 11#include <linux/device.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/err.h> 15#include <linux/of.h> 16#include <linux/ioasid.h> 17#include <uapi/linux/iommu.h> 18 19#define IOMMU_READ (1 << 0) 20#define IOMMU_WRITE (1 << 1) 21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22#define IOMMU_NOEXEC (1 << 3) 23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24/* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33#define IOMMU_PRIV (1 << 5) 34 35struct iommu_ops; 36struct iommu_group; 37struct bus_type; 38struct device; 39struct iommu_domain; 40struct iommu_domain_ops; 41struct notifier_block; 42struct iommu_sva; 43struct iommu_fault_event; 44struct iommu_dma_cookie; 45 46/* iommu fault flags */ 47#define IOMMU_FAULT_READ 0x0 48#define IOMMU_FAULT_WRITE 0x1 49 50typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 51 struct device *, unsigned long, int, void *); 52typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 53 54struct iommu_domain_geometry { 55 dma_addr_t aperture_start; /* First address that can be mapped */ 56 dma_addr_t aperture_end; /* Last address that can be mapped */ 57 bool force_aperture; /* DMA only allowed in mappable range? */ 58}; 59 60/* Domain feature flags */ 61#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 62#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 63 implementation */ 64#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 65#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 66 67#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ 68 69/* 70 * This are the possible domain-types 71 * 72 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 73 * devices 74 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 75 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 76 * for VMs 77 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 78 * This flag allows IOMMU drivers to implement 79 * certain optimizations for these domains 80 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 81 * invalidation. 82 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses 83 * represented by mm_struct's. 84 */ 85#define IOMMU_DOMAIN_BLOCKED (0U) 86#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 87#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 88#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 89 __IOMMU_DOMAIN_DMA_API) 90#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 91 __IOMMU_DOMAIN_DMA_API | \ 92 __IOMMU_DOMAIN_DMA_FQ) 93#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) 94 95struct iommu_domain { 96 unsigned type; 97 const struct iommu_domain_ops *ops; 98 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 99 struct iommu_domain_geometry geometry; 100 struct iommu_dma_cookie *iova_cookie; 101 enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, 102 void *data); 103 void *fault_data; 104 union { 105 struct { 106 iommu_fault_handler_t handler; 107 void *handler_token; 108 }; 109 struct { /* IOMMU_DOMAIN_SVA */ 110 struct mm_struct *mm; 111 int users; 112 }; 113 }; 114}; 115 116static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 117{ 118 return domain->type & __IOMMU_DOMAIN_DMA_API; 119} 120 121enum iommu_cap { 122 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 123 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 124 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 125 DMA protection and we should too */ 126 /* 127 * Per-device flag indicating if enforce_cache_coherency() will work on 128 * this device. 129 */ 130 IOMMU_CAP_ENFORCE_CACHE_COHERENCY, 131}; 132 133/* These are the possible reserved region types */ 134enum iommu_resv_type { 135 /* Memory regions which must be mapped 1:1 at all times */ 136 IOMMU_RESV_DIRECT, 137 /* 138 * Memory regions which are advertised to be 1:1 but are 139 * commonly considered relaxable in some conditions, 140 * for instance in device assignment use case (USB, Graphics) 141 */ 142 IOMMU_RESV_DIRECT_RELAXABLE, 143 /* Arbitrary "never map this or give it to a device" address ranges */ 144 IOMMU_RESV_RESERVED, 145 /* Hardware MSI region (untranslated) */ 146 IOMMU_RESV_MSI, 147 /* Software-managed MSI translation window */ 148 IOMMU_RESV_SW_MSI, 149}; 150 151/** 152 * struct iommu_resv_region - descriptor for a reserved memory region 153 * @list: Linked list pointers 154 * @start: System physical start address of the region 155 * @length: Length of the region in bytes 156 * @prot: IOMMU Protection flags (READ/WRITE/...) 157 * @type: Type of the reserved region 158 * @free: Callback to free associated memory allocations 159 */ 160struct iommu_resv_region { 161 struct list_head list; 162 phys_addr_t start; 163 size_t length; 164 int prot; 165 enum iommu_resv_type type; 166 void (*free)(struct device *dev, struct iommu_resv_region *region); 167}; 168 169struct iommu_iort_rmr_data { 170 struct iommu_resv_region rr; 171 172 /* Stream IDs associated with IORT RMR entry */ 173 const u32 *sids; 174 u32 num_sids; 175}; 176 177/** 178 * enum iommu_dev_features - Per device IOMMU features 179 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 180 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 181 * enabling %IOMMU_DEV_FEAT_SVA requires 182 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 183 * Faults themselves instead of relying on the IOMMU. When 184 * supported, this feature must be enabled before and 185 * disabled after %IOMMU_DEV_FEAT_SVA. 186 * 187 * Device drivers enable a feature using iommu_dev_enable_feature(). 188 */ 189enum iommu_dev_features { 190 IOMMU_DEV_FEAT_SVA, 191 IOMMU_DEV_FEAT_IOPF, 192}; 193 194#define IOMMU_PASID_INVALID (-1U) 195 196#ifdef CONFIG_IOMMU_API 197 198/** 199 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 200 * 201 * @start: IOVA representing the start of the range to be flushed 202 * @end: IOVA representing the end of the range to be flushed (inclusive) 203 * @pgsize: The interval at which to perform the flush 204 * @freelist: Removed pages to free after sync 205 * @queued: Indicates that the flush will be queued 206 * 207 * This structure is intended to be updated by multiple calls to the 208 * ->unmap() function in struct iommu_ops before eventually being passed 209 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 210 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 211 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 212 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 213 */ 214struct iommu_iotlb_gather { 215 unsigned long start; 216 unsigned long end; 217 size_t pgsize; 218 struct list_head freelist; 219 bool queued; 220}; 221 222/** 223 * struct iommu_ops - iommu ops and capabilities 224 * @capable: check capability 225 * @domain_alloc: allocate iommu domain 226 * @probe_device: Add device to iommu driver handling 227 * @release_device: Remove device from iommu driver handling 228 * @probe_finalize: Do final setup work after the device is added to an IOMMU 229 * group and attached to the groups domain 230 * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op 231 * is to support old IOMMU drivers, new drivers should use 232 * default domains, and the common IOMMU DMA ops. 233 * @device_group: find iommu group for a particular device 234 * @get_resv_regions: Request list of reserved regions for a device 235 * @of_xlate: add OF master IDs to iommu grouping 236 * @is_attach_deferred: Check if domain attach should be deferred from iommu 237 * driver init to device driver init (default no) 238 * @dev_enable/disable_feat: per device entries to enable/disable 239 * iommu specific features. 240 * @page_response: handle page request response 241 * @def_domain_type: device default domain type, return value: 242 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 243 * - IOMMU_DOMAIN_DMA: must use a dma domain 244 * - 0: use the default setting 245 * @default_domain_ops: the default ops for domains 246 * @remove_dev_pasid: Remove any translation configurations of a specific 247 * pasid, so that any DMA transactions with this pasid 248 * will be blocked by the hardware. 249 * @pgsize_bitmap: bitmap of all possible supported page sizes 250 * @owner: Driver module providing these ops 251 */ 252struct iommu_ops { 253 bool (*capable)(struct device *dev, enum iommu_cap); 254 255 /* Domain allocation and freeing by the iommu driver */ 256 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 257 258 struct iommu_device *(*probe_device)(struct device *dev); 259 void (*release_device)(struct device *dev); 260 void (*probe_finalize)(struct device *dev); 261 void (*set_platform_dma_ops)(struct device *dev); 262 struct iommu_group *(*device_group)(struct device *dev); 263 264 /* Request/Free a list of reserved regions for a device */ 265 void (*get_resv_regions)(struct device *dev, struct list_head *list); 266 267 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 268 bool (*is_attach_deferred)(struct device *dev); 269 270 /* Per device IOMMU features */ 271 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 272 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 273 274 int (*page_response)(struct device *dev, 275 struct iommu_fault_event *evt, 276 struct iommu_page_response *msg); 277 278 int (*def_domain_type)(struct device *dev); 279 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); 280 281 const struct iommu_domain_ops *default_domain_ops; 282 unsigned long pgsize_bitmap; 283 struct module *owner; 284}; 285 286/** 287 * struct iommu_domain_ops - domain specific operations 288 * @attach_dev: attach an iommu domain to a device 289 * Return: 290 * * 0 - success 291 * * EINVAL - can indicate that device and domain are incompatible due to 292 * some previous configuration of the domain, in which case the 293 * driver shouldn't log an error, since it is legitimate for a 294 * caller to test reuse of existing domains. Otherwise, it may 295 * still represent some other fundamental problem 296 * * ENOMEM - out of memory 297 * * ENOSPC - non-ENOMEM type of resource allocation failures 298 * * EBUSY - device is attached to a domain and cannot be changed 299 * * ENODEV - device specific errors, not able to be attached 300 * * <others> - treated as ENODEV by the caller. Use is discouraged 301 * @set_dev_pasid: set an iommu domain to a pasid of device 302 * @map: map a physically contiguous memory region to an iommu domain 303 * @map_pages: map a physically contiguous set of pages of the same size to 304 * an iommu domain. 305 * @unmap: unmap a physically contiguous memory region from an iommu domain 306 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 307 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 308 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 309 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 310 * queue 311 * @iova_to_phys: translate iova to physical address 312 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 313 * including no-snoop TLPs on PCIe or other platform 314 * specific mechanisms. 315 * @enable_nesting: Enable nesting 316 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 317 * @free: Release the domain after use. 318 */ 319struct iommu_domain_ops { 320 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 321 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, 322 ioasid_t pasid); 323 324 int (*map)(struct iommu_domain *domain, unsigned long iova, 325 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 326 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 327 phys_addr_t paddr, size_t pgsize, size_t pgcount, 328 int prot, gfp_t gfp, size_t *mapped); 329 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 330 size_t size, struct iommu_iotlb_gather *iotlb_gather); 331 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 332 size_t pgsize, size_t pgcount, 333 struct iommu_iotlb_gather *iotlb_gather); 334 335 void (*flush_iotlb_all)(struct iommu_domain *domain); 336 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 337 size_t size); 338 void (*iotlb_sync)(struct iommu_domain *domain, 339 struct iommu_iotlb_gather *iotlb_gather); 340 341 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 342 dma_addr_t iova); 343 344 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 345 int (*enable_nesting)(struct iommu_domain *domain); 346 int (*set_pgtable_quirks)(struct iommu_domain *domain, 347 unsigned long quirks); 348 349 void (*free)(struct iommu_domain *domain); 350}; 351 352/** 353 * struct iommu_device - IOMMU core representation of one IOMMU hardware 354 * instance 355 * @list: Used by the iommu-core to keep a list of registered iommus 356 * @ops: iommu-ops for talking to this iommu 357 * @dev: struct device for sysfs handling 358 * @max_pasids: number of supported PASIDs 359 */ 360struct iommu_device { 361 struct list_head list; 362 const struct iommu_ops *ops; 363 struct fwnode_handle *fwnode; 364 struct device *dev; 365 u32 max_pasids; 366}; 367 368/** 369 * struct iommu_fault_event - Generic fault event 370 * 371 * Can represent recoverable faults such as a page requests or 372 * unrecoverable faults such as DMA or IRQ remapping faults. 373 * 374 * @fault: fault descriptor 375 * @list: pending fault event list, used for tracking responses 376 */ 377struct iommu_fault_event { 378 struct iommu_fault fault; 379 struct list_head list; 380}; 381 382/** 383 * struct iommu_fault_param - per-device IOMMU fault data 384 * @handler: Callback function to handle IOMMU faults at device level 385 * @data: handler private data 386 * @faults: holds the pending faults which needs response 387 * @lock: protect pending faults list 388 */ 389struct iommu_fault_param { 390 iommu_dev_fault_handler_t handler; 391 void *data; 392 struct list_head faults; 393 struct mutex lock; 394}; 395 396/** 397 * struct dev_iommu - Collection of per-device IOMMU data 398 * 399 * @fault_param: IOMMU detected device fault reporting data 400 * @iopf_param: I/O Page Fault queue and data 401 * @fwspec: IOMMU fwspec data 402 * @iommu_dev: IOMMU device this device is linked to 403 * @priv: IOMMU Driver private data 404 * @max_pasids: number of PASIDs this device can consume 405 * @attach_deferred: the dma domain attachment is deferred 406 * 407 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 408 * struct iommu_group *iommu_group; 409 */ 410struct dev_iommu { 411 struct mutex lock; 412 struct iommu_fault_param *fault_param; 413 struct iopf_device_param *iopf_param; 414 struct iommu_fwspec *fwspec; 415 struct iommu_device *iommu_dev; 416 void *priv; 417 u32 max_pasids; 418 u32 attach_deferred:1; 419}; 420 421int iommu_device_register(struct iommu_device *iommu, 422 const struct iommu_ops *ops, 423 struct device *hwdev); 424void iommu_device_unregister(struct iommu_device *iommu); 425int iommu_device_sysfs_add(struct iommu_device *iommu, 426 struct device *parent, 427 const struct attribute_group **groups, 428 const char *fmt, ...) __printf(4, 5); 429void iommu_device_sysfs_remove(struct iommu_device *iommu); 430int iommu_device_link(struct iommu_device *iommu, struct device *link); 431void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 432int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 433 434static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 435{ 436 return (struct iommu_device *)dev_get_drvdata(dev); 437} 438 439static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 440{ 441 *gather = (struct iommu_iotlb_gather) { 442 .start = ULONG_MAX, 443 .freelist = LIST_HEAD_INIT(gather->freelist), 444 }; 445} 446 447static inline const struct iommu_ops *dev_iommu_ops(struct device *dev) 448{ 449 /* 450 * Assume that valid ops must be installed if iommu_probe_device() 451 * has succeeded. The device ops are essentially for internal use 452 * within the IOMMU subsystem itself, so we should be able to trust 453 * ourselves not to misuse the helper. 454 */ 455 return dev->iommu->iommu_dev->ops; 456} 457 458extern int bus_iommu_probe(struct bus_type *bus); 459extern bool iommu_present(struct bus_type *bus); 460extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 461extern bool iommu_group_has_isolated_msi(struct iommu_group *group); 462extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); 463extern struct iommu_group *iommu_group_get_by_id(int id); 464extern void iommu_domain_free(struct iommu_domain *domain); 465extern int iommu_attach_device(struct iommu_domain *domain, 466 struct device *dev); 467extern void iommu_detach_device(struct iommu_domain *domain, 468 struct device *dev); 469extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 470 struct device *dev, ioasid_t pasid); 471extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 472extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 473extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 474 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 475extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 476 size_t size); 477extern size_t iommu_unmap_fast(struct iommu_domain *domain, 478 unsigned long iova, size_t size, 479 struct iommu_iotlb_gather *iotlb_gather); 480extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 481 struct scatterlist *sg, unsigned int nents, 482 int prot, gfp_t gfp); 483extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 484extern void iommu_set_fault_handler(struct iommu_domain *domain, 485 iommu_fault_handler_t handler, void *token); 486 487extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 488extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 489extern void iommu_set_default_passthrough(bool cmd_line); 490extern void iommu_set_default_translated(bool cmd_line); 491extern bool iommu_default_passthrough(void); 492extern struct iommu_resv_region * 493iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 494 enum iommu_resv_type type, gfp_t gfp); 495extern int iommu_get_group_resv_regions(struct iommu_group *group, 496 struct list_head *head); 497 498extern int iommu_attach_group(struct iommu_domain *domain, 499 struct iommu_group *group); 500extern void iommu_detach_group(struct iommu_domain *domain, 501 struct iommu_group *group); 502extern struct iommu_group *iommu_group_alloc(void); 503extern void *iommu_group_get_iommudata(struct iommu_group *group); 504extern void iommu_group_set_iommudata(struct iommu_group *group, 505 void *iommu_data, 506 void (*release)(void *iommu_data)); 507extern int iommu_group_set_name(struct iommu_group *group, const char *name); 508extern int iommu_group_add_device(struct iommu_group *group, 509 struct device *dev); 510extern void iommu_group_remove_device(struct device *dev); 511extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 512 int (*fn)(struct device *, void *)); 513extern struct iommu_group *iommu_group_get(struct device *dev); 514extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 515extern void iommu_group_put(struct iommu_group *group); 516extern int iommu_register_device_fault_handler(struct device *dev, 517 iommu_dev_fault_handler_t handler, 518 void *data); 519 520extern int iommu_unregister_device_fault_handler(struct device *dev); 521 522extern int iommu_report_device_fault(struct device *dev, 523 struct iommu_fault_event *evt); 524extern int iommu_page_response(struct device *dev, 525 struct iommu_page_response *msg); 526 527extern int iommu_group_id(struct iommu_group *group); 528extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 529 530int iommu_enable_nesting(struct iommu_domain *domain); 531int iommu_set_pgtable_quirks(struct iommu_domain *domain, 532 unsigned long quirks); 533 534void iommu_set_dma_strict(void); 535 536extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 537 unsigned long iova, int flags); 538 539static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 540{ 541 if (domain->ops->flush_iotlb_all) 542 domain->ops->flush_iotlb_all(domain); 543} 544 545static inline void iommu_iotlb_sync(struct iommu_domain *domain, 546 struct iommu_iotlb_gather *iotlb_gather) 547{ 548 if (domain->ops->iotlb_sync) 549 domain->ops->iotlb_sync(domain, iotlb_gather); 550 551 iommu_iotlb_gather_init(iotlb_gather); 552} 553 554/** 555 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 556 * 557 * @gather: TLB gather data 558 * @iova: start of page to invalidate 559 * @size: size of page to invalidate 560 * 561 * Helper for IOMMU drivers to check whether a new range and the gathered range 562 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 563 * than merging the two, which might lead to unnecessary invalidations. 564 */ 565static inline 566bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 567 unsigned long iova, size_t size) 568{ 569 unsigned long start = iova, end = start + size - 1; 570 571 return gather->end != 0 && 572 (end + 1 < gather->start || start > gather->end + 1); 573} 574 575 576/** 577 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 578 * @gather: TLB gather data 579 * @iova: start of page to invalidate 580 * @size: size of page to invalidate 581 * 582 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 583 * where only the address range matters, and simply minimising intermediate 584 * syncs is preferred. 585 */ 586static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 587 unsigned long iova, size_t size) 588{ 589 unsigned long end = iova + size - 1; 590 591 if (gather->start > iova) 592 gather->start = iova; 593 if (gather->end < end) 594 gather->end = end; 595} 596 597/** 598 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 599 * @domain: IOMMU domain to be invalidated 600 * @gather: TLB gather data 601 * @iova: start of page to invalidate 602 * @size: size of page to invalidate 603 * 604 * Helper for IOMMU drivers to build invalidation commands based on individual 605 * pages, or with page size/table level hints which cannot be gathered if they 606 * differ. 607 */ 608static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 609 struct iommu_iotlb_gather *gather, 610 unsigned long iova, size_t size) 611{ 612 /* 613 * If the new page is disjoint from the current range or is mapped at 614 * a different granularity, then sync the TLB so that the gather 615 * structure can be rewritten. 616 */ 617 if ((gather->pgsize && gather->pgsize != size) || 618 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 619 iommu_iotlb_sync(domain, gather); 620 621 gather->pgsize = size; 622 iommu_iotlb_gather_add_range(gather, iova, size); 623} 624 625static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 626{ 627 return gather && gather->queued; 628} 629 630/* PCI device grouping function */ 631extern struct iommu_group *pci_device_group(struct device *dev); 632/* Generic device grouping function */ 633extern struct iommu_group *generic_device_group(struct device *dev); 634/* FSL-MC device grouping function */ 635struct iommu_group *fsl_mc_device_group(struct device *dev); 636 637/** 638 * struct iommu_fwspec - per-device IOMMU instance data 639 * @ops: ops for this device's IOMMU 640 * @iommu_fwnode: firmware handle for this device's IOMMU 641 * @flags: IOMMU_FWSPEC_* flags 642 * @num_ids: number of associated device IDs 643 * @ids: IDs which this device may present to the IOMMU 644 * 645 * Note that the IDs (and any other information, really) stored in this structure should be 646 * considered private to the IOMMU device driver and are not to be used directly by IOMMU 647 * consumers. 648 */ 649struct iommu_fwspec { 650 const struct iommu_ops *ops; 651 struct fwnode_handle *iommu_fwnode; 652 u32 flags; 653 unsigned int num_ids; 654 u32 ids[]; 655}; 656 657/* ATS is supported */ 658#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 659 660/** 661 * struct iommu_sva - handle to a device-mm bond 662 */ 663struct iommu_sva { 664 struct device *dev; 665 struct iommu_domain *domain; 666}; 667 668int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 669 const struct iommu_ops *ops); 670void iommu_fwspec_free(struct device *dev); 671int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 672const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 673 674static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 675{ 676 if (dev->iommu) 677 return dev->iommu->fwspec; 678 else 679 return NULL; 680} 681 682static inline void dev_iommu_fwspec_set(struct device *dev, 683 struct iommu_fwspec *fwspec) 684{ 685 dev->iommu->fwspec = fwspec; 686} 687 688static inline void *dev_iommu_priv_get(struct device *dev) 689{ 690 if (dev->iommu) 691 return dev->iommu->priv; 692 else 693 return NULL; 694} 695 696static inline void dev_iommu_priv_set(struct device *dev, void *priv) 697{ 698 dev->iommu->priv = priv; 699} 700 701int iommu_probe_device(struct device *dev); 702void iommu_release_device(struct device *dev); 703 704int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 705int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 706 707int iommu_device_use_default_domain(struct device *dev); 708void iommu_device_unuse_default_domain(struct device *dev); 709 710int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 711void iommu_group_release_dma_owner(struct iommu_group *group); 712bool iommu_group_dma_owner_claimed(struct iommu_group *group); 713 714int iommu_device_claim_dma_owner(struct device *dev, void *owner); 715void iommu_device_release_dma_owner(struct device *dev); 716 717struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 718 struct mm_struct *mm); 719int iommu_attach_device_pasid(struct iommu_domain *domain, 720 struct device *dev, ioasid_t pasid); 721void iommu_detach_device_pasid(struct iommu_domain *domain, 722 struct device *dev, ioasid_t pasid); 723struct iommu_domain * 724iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 725 unsigned int type); 726#else /* CONFIG_IOMMU_API */ 727 728struct iommu_ops {}; 729struct iommu_group {}; 730struct iommu_fwspec {}; 731struct iommu_device {}; 732struct iommu_fault_param {}; 733struct iommu_iotlb_gather {}; 734 735static inline bool iommu_present(struct bus_type *bus) 736{ 737 return false; 738} 739 740static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 741{ 742 return false; 743} 744 745static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) 746{ 747 return NULL; 748} 749 750static inline struct iommu_group *iommu_group_get_by_id(int id) 751{ 752 return NULL; 753} 754 755static inline void iommu_domain_free(struct iommu_domain *domain) 756{ 757} 758 759static inline int iommu_attach_device(struct iommu_domain *domain, 760 struct device *dev) 761{ 762 return -ENODEV; 763} 764 765static inline void iommu_detach_device(struct iommu_domain *domain, 766 struct device *dev) 767{ 768} 769 770static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 771{ 772 return NULL; 773} 774 775static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 776 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 777{ 778 return -ENODEV; 779} 780 781static inline size_t iommu_unmap(struct iommu_domain *domain, 782 unsigned long iova, size_t size) 783{ 784 return 0; 785} 786 787static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 788 unsigned long iova, int gfp_order, 789 struct iommu_iotlb_gather *iotlb_gather) 790{ 791 return 0; 792} 793 794static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 795 unsigned long iova, struct scatterlist *sg, 796 unsigned int nents, int prot, gfp_t gfp) 797{ 798 return -ENODEV; 799} 800 801static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 802{ 803} 804 805static inline void iommu_iotlb_sync(struct iommu_domain *domain, 806 struct iommu_iotlb_gather *iotlb_gather) 807{ 808} 809 810static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 811{ 812 return 0; 813} 814 815static inline void iommu_set_fault_handler(struct iommu_domain *domain, 816 iommu_fault_handler_t handler, void *token) 817{ 818} 819 820static inline void iommu_get_resv_regions(struct device *dev, 821 struct list_head *list) 822{ 823} 824 825static inline void iommu_put_resv_regions(struct device *dev, 826 struct list_head *list) 827{ 828} 829 830static inline int iommu_get_group_resv_regions(struct iommu_group *group, 831 struct list_head *head) 832{ 833 return -ENODEV; 834} 835 836static inline void iommu_set_default_passthrough(bool cmd_line) 837{ 838} 839 840static inline void iommu_set_default_translated(bool cmd_line) 841{ 842} 843 844static inline bool iommu_default_passthrough(void) 845{ 846 return true; 847} 848 849static inline int iommu_attach_group(struct iommu_domain *domain, 850 struct iommu_group *group) 851{ 852 return -ENODEV; 853} 854 855static inline void iommu_detach_group(struct iommu_domain *domain, 856 struct iommu_group *group) 857{ 858} 859 860static inline struct iommu_group *iommu_group_alloc(void) 861{ 862 return ERR_PTR(-ENODEV); 863} 864 865static inline void *iommu_group_get_iommudata(struct iommu_group *group) 866{ 867 return NULL; 868} 869 870static inline void iommu_group_set_iommudata(struct iommu_group *group, 871 void *iommu_data, 872 void (*release)(void *iommu_data)) 873{ 874} 875 876static inline int iommu_group_set_name(struct iommu_group *group, 877 const char *name) 878{ 879 return -ENODEV; 880} 881 882static inline int iommu_group_add_device(struct iommu_group *group, 883 struct device *dev) 884{ 885 return -ENODEV; 886} 887 888static inline void iommu_group_remove_device(struct device *dev) 889{ 890} 891 892static inline int iommu_group_for_each_dev(struct iommu_group *group, 893 void *data, 894 int (*fn)(struct device *, void *)) 895{ 896 return -ENODEV; 897} 898 899static inline struct iommu_group *iommu_group_get(struct device *dev) 900{ 901 return NULL; 902} 903 904static inline void iommu_group_put(struct iommu_group *group) 905{ 906} 907 908static inline 909int iommu_register_device_fault_handler(struct device *dev, 910 iommu_dev_fault_handler_t handler, 911 void *data) 912{ 913 return -ENODEV; 914} 915 916static inline int iommu_unregister_device_fault_handler(struct device *dev) 917{ 918 return 0; 919} 920 921static inline 922int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 923{ 924 return -ENODEV; 925} 926 927static inline int iommu_page_response(struct device *dev, 928 struct iommu_page_response *msg) 929{ 930 return -ENODEV; 931} 932 933static inline int iommu_group_id(struct iommu_group *group) 934{ 935 return -ENODEV; 936} 937 938static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 939 unsigned long quirks) 940{ 941 return 0; 942} 943 944static inline int iommu_device_register(struct iommu_device *iommu, 945 const struct iommu_ops *ops, 946 struct device *hwdev) 947{ 948 return -ENODEV; 949} 950 951static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 952{ 953 return NULL; 954} 955 956static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 957{ 958} 959 960static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 961 struct iommu_iotlb_gather *gather, 962 unsigned long iova, size_t size) 963{ 964} 965 966static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 967{ 968 return false; 969} 970 971static inline void iommu_device_unregister(struct iommu_device *iommu) 972{ 973} 974 975static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 976 struct device *parent, 977 const struct attribute_group **groups, 978 const char *fmt, ...) 979{ 980 return -ENODEV; 981} 982 983static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 984{ 985} 986 987static inline int iommu_device_link(struct device *dev, struct device *link) 988{ 989 return -EINVAL; 990} 991 992static inline void iommu_device_unlink(struct device *dev, struct device *link) 993{ 994} 995 996static inline int iommu_fwspec_init(struct device *dev, 997 struct fwnode_handle *iommu_fwnode, 998 const struct iommu_ops *ops) 999{ 1000 return -ENODEV; 1001} 1002 1003static inline void iommu_fwspec_free(struct device *dev) 1004{ 1005} 1006 1007static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 1008 int num_ids) 1009{ 1010 return -ENODEV; 1011} 1012 1013static inline 1014const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 1015{ 1016 return NULL; 1017} 1018 1019static inline int 1020iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1021{ 1022 return -ENODEV; 1023} 1024 1025static inline int 1026iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1027{ 1028 return -ENODEV; 1029} 1030 1031static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1032{ 1033 return NULL; 1034} 1035 1036static inline int iommu_device_use_default_domain(struct device *dev) 1037{ 1038 return 0; 1039} 1040 1041static inline void iommu_device_unuse_default_domain(struct device *dev) 1042{ 1043} 1044 1045static inline int 1046iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1047{ 1048 return -ENODEV; 1049} 1050 1051static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1052{ 1053} 1054 1055static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1056{ 1057 return false; 1058} 1059 1060static inline void iommu_device_release_dma_owner(struct device *dev) 1061{ 1062} 1063 1064static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) 1065{ 1066 return -ENODEV; 1067} 1068 1069static inline struct iommu_domain * 1070iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) 1071{ 1072 return NULL; 1073} 1074 1075static inline int iommu_attach_device_pasid(struct iommu_domain *domain, 1076 struct device *dev, ioasid_t pasid) 1077{ 1078 return -ENODEV; 1079} 1080 1081static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1082 struct device *dev, ioasid_t pasid) 1083{ 1084} 1085 1086static inline struct iommu_domain * 1087iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1088 unsigned int type) 1089{ 1090 return NULL; 1091} 1092#endif /* CONFIG_IOMMU_API */ 1093 1094/** 1095 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1096 * @domain: The IOMMU domain to perform the mapping 1097 * @iova: The start address to map the buffer 1098 * @sgt: The sg_table object describing the buffer 1099 * @prot: IOMMU protection bits 1100 * 1101 * Creates a mapping at @iova for the buffer described by a scatterlist 1102 * stored in the given sg_table object in the provided IOMMU domain. 1103 */ 1104static inline size_t iommu_map_sgtable(struct iommu_domain *domain, 1105 unsigned long iova, struct sg_table *sgt, int prot) 1106{ 1107 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, 1108 GFP_KERNEL); 1109} 1110 1111#ifdef CONFIG_IOMMU_DEBUGFS 1112extern struct dentry *iommu_debugfs_dir; 1113void iommu_debugfs_setup(void); 1114#else 1115static inline void iommu_debugfs_setup(void) {} 1116#endif 1117 1118#ifdef CONFIG_IOMMU_DMA 1119#include <linux/msi.h> 1120 1121/* Setup call for arch DMA mapping code */ 1122void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); 1123 1124int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1125 1126int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1127void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); 1128 1129#else /* CONFIG_IOMMU_DMA */ 1130 1131struct msi_desc; 1132struct msi_msg; 1133 1134static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 1135{ 1136} 1137 1138static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1139{ 1140 return -ENODEV; 1141} 1142 1143static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1144{ 1145 return 0; 1146} 1147 1148static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1149{ 1150} 1151 1152#endif /* CONFIG_IOMMU_DMA */ 1153 1154/* 1155 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into 1156 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents 1157 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. 1158 */ 1159#define TEGRA_STREAM_ID_BYPASS 0x7f 1160 1161static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) 1162{ 1163#ifdef CONFIG_IOMMU_API 1164 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1165 1166 if (fwspec && fwspec->num_ids == 1) { 1167 *stream_id = fwspec->ids[0] & 0xffff; 1168 return true; 1169 } 1170#endif 1171 1172 return false; 1173} 1174 1175#ifdef CONFIG_IOMMU_SVA 1176struct iommu_sva *iommu_sva_bind_device(struct device *dev, 1177 struct mm_struct *mm); 1178void iommu_sva_unbind_device(struct iommu_sva *handle); 1179u32 iommu_sva_get_pasid(struct iommu_sva *handle); 1180#else 1181static inline struct iommu_sva * 1182iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 1183{ 1184 return NULL; 1185} 1186 1187static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1188{ 1189} 1190 1191static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1192{ 1193 return IOMMU_PASID_INVALID; 1194} 1195#endif /* CONFIG_IOMMU_SVA */ 1196 1197#endif /* __LINUX_IOMMU_H */