at v6.10 1620 lines 53 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7#ifndef __LINUX_IOMMU_H 8#define __LINUX_IOMMU_H 9 10#include <linux/scatterlist.h> 11#include <linux/device.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/err.h> 15#include <linux/of.h> 16#include <linux/iova_bitmap.h> 17 18#define IOMMU_READ (1 << 0) 19#define IOMMU_WRITE (1 << 1) 20#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 21#define IOMMU_NOEXEC (1 << 3) 22#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 23/* 24 * Where the bus hardware includes a privilege level as part of its access type 25 * markings, and certain devices are capable of issuing transactions marked as 26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 27 * given permission flags only apply to accesses at the higher privilege level, 28 * and that unprivileged transactions should have as little access as possible. 29 * This would usually imply the same permissions as kernel mappings on the CPU, 30 * if the IOMMU page table format is equivalent. 31 */ 32#define IOMMU_PRIV (1 << 5) 33 34struct iommu_ops; 35struct iommu_group; 36struct bus_type; 37struct device; 38struct iommu_domain; 39struct iommu_domain_ops; 40struct iommu_dirty_ops; 41struct notifier_block; 42struct iommu_sva; 43struct iommu_dma_cookie; 44struct iommu_fault_param; 45 46#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ 47#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ 48#define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ 49#define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ 50 51/* Generic fault types, can be expanded IRQ remapping fault */ 52enum iommu_fault_type { 53 IOMMU_FAULT_PAGE_REQ = 1, /* page request fault */ 54}; 55 56/** 57 * struct iommu_fault_page_request - Page Request data 58 * @flags: encodes whether the corresponding fields are valid and whether this 59 * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). 60 * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response 61 * must have the same PASID value as the page request. When it is clear, 62 * the page response should not have a PASID. 63 * @pasid: Process Address Space ID 64 * @grpid: Page Request Group Index 65 * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) 66 * @addr: page address 67 * @private_data: device-specific private information 68 */ 69struct iommu_fault_page_request { 70#define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) 71#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) 72#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2) 73 u32 flags; 74 u32 pasid; 75 u32 grpid; 76 u32 perm; 77 u64 addr; 78 u64 private_data[2]; 79}; 80 81/** 82 * struct iommu_fault - Generic fault data 83 * @type: fault type from &enum iommu_fault_type 84 * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ 85 */ 86struct iommu_fault { 87 u32 type; 88 struct iommu_fault_page_request prm; 89}; 90 91/** 92 * enum iommu_page_response_code - Return status of fault handlers 93 * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables 94 * populated, retry the access. This is "Success" in PCI PRI. 95 * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from 96 * this device if possible. This is "Response Failure" in PCI PRI. 97 * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the 98 * access. This is "Invalid Request" in PCI PRI. 99 */ 100enum iommu_page_response_code { 101 IOMMU_PAGE_RESP_SUCCESS = 0, 102 IOMMU_PAGE_RESP_INVALID, 103 IOMMU_PAGE_RESP_FAILURE, 104}; 105 106/** 107 * struct iommu_page_response - Generic page response information 108 * @pasid: Process Address Space ID 109 * @grpid: Page Request Group Index 110 * @code: response code from &enum iommu_page_response_code 111 */ 112struct iommu_page_response { 113 u32 pasid; 114 u32 grpid; 115 u32 code; 116}; 117 118struct iopf_fault { 119 struct iommu_fault fault; 120 /* node for pending lists */ 121 struct list_head list; 122}; 123 124struct iopf_group { 125 struct iopf_fault last_fault; 126 struct list_head faults; 127 /* list node for iommu_fault_param::faults */ 128 struct list_head pending_node; 129 struct work_struct work; 130 struct iommu_domain *domain; 131 /* The device's fault data parameter. */ 132 struct iommu_fault_param *fault_param; 133}; 134 135/** 136 * struct iopf_queue - IO Page Fault queue 137 * @wq: the fault workqueue 138 * @devices: devices attached to this queue 139 * @lock: protects the device list 140 */ 141struct iopf_queue { 142 struct workqueue_struct *wq; 143 struct list_head devices; 144 struct mutex lock; 145}; 146 147/* iommu fault flags */ 148#define IOMMU_FAULT_READ 0x0 149#define IOMMU_FAULT_WRITE 0x1 150 151typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 152 struct device *, unsigned long, int, void *); 153 154struct iommu_domain_geometry { 155 dma_addr_t aperture_start; /* First address that can be mapped */ 156 dma_addr_t aperture_end; /* Last address that can be mapped */ 157 bool force_aperture; /* DMA only allowed in mappable range? */ 158}; 159 160/* Domain feature flags */ 161#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 162#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 163 implementation */ 164#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 165#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 166 167#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ 168#define __IOMMU_DOMAIN_PLATFORM (1U << 5) 169 170#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested 171 on a stage-2 translation */ 172 173#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ 174/* 175 * This are the possible domain-types 176 * 177 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 178 * devices 179 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 180 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 181 * for VMs 182 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 183 * This flag allows IOMMU drivers to implement 184 * certain optimizations for these domains 185 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 186 * invalidation. 187 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses 188 * represented by mm_struct's. 189 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own 190 * dma_api stuff. Do not use in new drivers. 191 */ 192#define IOMMU_DOMAIN_BLOCKED (0U) 193#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 194#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 195#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 196 __IOMMU_DOMAIN_DMA_API) 197#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 198 __IOMMU_DOMAIN_DMA_API | \ 199 __IOMMU_DOMAIN_DMA_FQ) 200#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) 201#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) 202#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) 203 204struct iommu_domain { 205 unsigned type; 206 const struct iommu_domain_ops *ops; 207 const struct iommu_dirty_ops *dirty_ops; 208 const struct iommu_ops *owner; /* Whose domain_alloc we came from */ 209 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 210 struct iommu_domain_geometry geometry; 211 struct iommu_dma_cookie *iova_cookie; 212 int (*iopf_handler)(struct iopf_group *group); 213 void *fault_data; 214 union { 215 struct { 216 iommu_fault_handler_t handler; 217 void *handler_token; 218 }; 219 struct { /* IOMMU_DOMAIN_SVA */ 220 struct mm_struct *mm; 221 int users; 222 /* 223 * Next iommu_domain in mm->iommu_mm->sva-domains list 224 * protected by iommu_sva_lock. 225 */ 226 struct list_head next; 227 }; 228 }; 229}; 230 231static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 232{ 233 return domain->type & __IOMMU_DOMAIN_DMA_API; 234} 235 236enum iommu_cap { 237 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 238 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 239 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 240 DMA protection and we should too */ 241 /* 242 * Per-device flag indicating if enforce_cache_coherency() will work on 243 * this device. 244 */ 245 IOMMU_CAP_ENFORCE_CACHE_COHERENCY, 246 /* 247 * IOMMU driver does not issue TLB maintenance during .unmap, so can 248 * usefully support the non-strict DMA flush queue. 249 */ 250 IOMMU_CAP_DEFERRED_FLUSH, 251 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ 252}; 253 254/* These are the possible reserved region types */ 255enum iommu_resv_type { 256 /* Memory regions which must be mapped 1:1 at all times */ 257 IOMMU_RESV_DIRECT, 258 /* 259 * Memory regions which are advertised to be 1:1 but are 260 * commonly considered relaxable in some conditions, 261 * for instance in device assignment use case (USB, Graphics) 262 */ 263 IOMMU_RESV_DIRECT_RELAXABLE, 264 /* Arbitrary "never map this or give it to a device" address ranges */ 265 IOMMU_RESV_RESERVED, 266 /* Hardware MSI region (untranslated) */ 267 IOMMU_RESV_MSI, 268 /* Software-managed MSI translation window */ 269 IOMMU_RESV_SW_MSI, 270}; 271 272/** 273 * struct iommu_resv_region - descriptor for a reserved memory region 274 * @list: Linked list pointers 275 * @start: System physical start address of the region 276 * @length: Length of the region in bytes 277 * @prot: IOMMU Protection flags (READ/WRITE/...) 278 * @type: Type of the reserved region 279 * @free: Callback to free associated memory allocations 280 */ 281struct iommu_resv_region { 282 struct list_head list; 283 phys_addr_t start; 284 size_t length; 285 int prot; 286 enum iommu_resv_type type; 287 void (*free)(struct device *dev, struct iommu_resv_region *region); 288}; 289 290struct iommu_iort_rmr_data { 291 struct iommu_resv_region rr; 292 293 /* Stream IDs associated with IORT RMR entry */ 294 const u32 *sids; 295 u32 num_sids; 296}; 297 298/** 299 * enum iommu_dev_features - Per device IOMMU features 300 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 301 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 302 * enabling %IOMMU_DEV_FEAT_SVA requires 303 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 304 * Faults themselves instead of relying on the IOMMU. When 305 * supported, this feature must be enabled before and 306 * disabled after %IOMMU_DEV_FEAT_SVA. 307 * 308 * Device drivers enable a feature using iommu_dev_enable_feature(). 309 */ 310enum iommu_dev_features { 311 IOMMU_DEV_FEAT_SVA, 312 IOMMU_DEV_FEAT_IOPF, 313}; 314 315#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ 316#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ 317#define IOMMU_PASID_INVALID (-1U) 318typedef unsigned int ioasid_t; 319 320#ifdef CONFIG_IOMMU_API 321 322/** 323 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 324 * 325 * @start: IOVA representing the start of the range to be flushed 326 * @end: IOVA representing the end of the range to be flushed (inclusive) 327 * @pgsize: The interval at which to perform the flush 328 * @freelist: Removed pages to free after sync 329 * @queued: Indicates that the flush will be queued 330 * 331 * This structure is intended to be updated by multiple calls to the 332 * ->unmap() function in struct iommu_ops before eventually being passed 333 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 334 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 335 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 336 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 337 */ 338struct iommu_iotlb_gather { 339 unsigned long start; 340 unsigned long end; 341 size_t pgsize; 342 struct list_head freelist; 343 bool queued; 344}; 345 346/** 347 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state 348 * @bitmap: IOVA bitmap 349 * @gather: Range information for a pending IOTLB flush 350 */ 351struct iommu_dirty_bitmap { 352 struct iova_bitmap *bitmap; 353 struct iommu_iotlb_gather *gather; 354}; 355 356/* Read but do not clear any dirty bits */ 357#define IOMMU_DIRTY_NO_CLEAR (1 << 0) 358 359/** 360 * struct iommu_dirty_ops - domain specific dirty tracking operations 361 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain 362 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled 363 * into a bitmap, with a bit represented as a page. 364 * Reads the dirty PTE bits and clears it from IO 365 * pagetables. 366 */ 367struct iommu_dirty_ops { 368 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); 369 int (*read_and_clear_dirty)(struct iommu_domain *domain, 370 unsigned long iova, size_t size, 371 unsigned long flags, 372 struct iommu_dirty_bitmap *dirty); 373}; 374 375/** 376 * struct iommu_user_data - iommu driver specific user space data info 377 * @type: The data type of the user buffer 378 * @uptr: Pointer to the user buffer for copy_from_user() 379 * @len: The length of the user buffer in bytes 380 * 381 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h 382 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. 383 */ 384struct iommu_user_data { 385 unsigned int type; 386 void __user *uptr; 387 size_t len; 388}; 389 390/** 391 * struct iommu_user_data_array - iommu driver specific user space data array 392 * @type: The data type of all the entries in the user buffer array 393 * @uptr: Pointer to the user buffer array 394 * @entry_len: The fixed-width length of an entry in the array, in bytes 395 * @entry_num: The number of total entries in the array 396 * 397 * The user buffer includes an array of requests with format defined in 398 * include/uapi/linux/iommufd.h 399 */ 400struct iommu_user_data_array { 401 unsigned int type; 402 void __user *uptr; 403 size_t entry_len; 404 u32 entry_num; 405}; 406 407/** 408 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data 409 * @dst_data: Pointer to an iommu driver specific user data that is defined in 410 * include/uapi/linux/iommufd.h 411 * @src_data: Pointer to a struct iommu_user_data for user space data info 412 * @data_type: The data type of the @dst_data. Must match with @src_data.type 413 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 414 * @min_len: Initial length of user data structure for backward compatibility. 415 * This should be offsetofend using the last member in the user data 416 * struct that was initially added to include/uapi/linux/iommufd.h 417 */ 418static inline int __iommu_copy_struct_from_user( 419 void *dst_data, const struct iommu_user_data *src_data, 420 unsigned int data_type, size_t data_len, size_t min_len) 421{ 422 if (src_data->type != data_type) 423 return -EINVAL; 424 if (WARN_ON(!dst_data || !src_data)) 425 return -EINVAL; 426 if (src_data->len < min_len || data_len < src_data->len) 427 return -EINVAL; 428 return copy_struct_from_user(dst_data, data_len, src_data->uptr, 429 src_data->len); 430} 431 432/** 433 * iommu_copy_struct_from_user - Copy iommu driver specific user space data 434 * @kdst: Pointer to an iommu driver specific user data that is defined in 435 * include/uapi/linux/iommufd.h 436 * @user_data: Pointer to a struct iommu_user_data for user space data info 437 * @data_type: The data type of the @kdst. Must match with @user_data->type 438 * @min_last: The last memember of the data structure @kdst points in the 439 * initial version. 440 * Return 0 for success, otherwise -error. 441 */ 442#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ 443 __iommu_copy_struct_from_user(kdst, user_data, data_type, \ 444 sizeof(*kdst), \ 445 offsetofend(typeof(*kdst), min_last)) 446 447/** 448 * __iommu_copy_struct_from_user_array - Copy iommu driver specific user space 449 * data from an iommu_user_data_array 450 * @dst_data: Pointer to an iommu driver specific user data that is defined in 451 * include/uapi/linux/iommufd.h 452 * @src_array: Pointer to a struct iommu_user_data_array for a user space array 453 * @data_type: The data type of the @dst_data. Must match with @src_array.type 454 * @index: Index to the location in the array to copy user data from 455 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 456 * @min_len: Initial length of user data structure for backward compatibility. 457 * This should be offsetofend using the last member in the user data 458 * struct that was initially added to include/uapi/linux/iommufd.h 459 */ 460static inline int __iommu_copy_struct_from_user_array( 461 void *dst_data, const struct iommu_user_data_array *src_array, 462 unsigned int data_type, unsigned int index, size_t data_len, 463 size_t min_len) 464{ 465 struct iommu_user_data src_data; 466 467 if (WARN_ON(!src_array || index >= src_array->entry_num)) 468 return -EINVAL; 469 if (!src_array->entry_num) 470 return -EINVAL; 471 src_data.uptr = src_array->uptr + src_array->entry_len * index; 472 src_data.len = src_array->entry_len; 473 src_data.type = src_array->type; 474 475 return __iommu_copy_struct_from_user(dst_data, &src_data, data_type, 476 data_len, min_len); 477} 478 479/** 480 * iommu_copy_struct_from_user_array - Copy iommu driver specific user space 481 * data from an iommu_user_data_array 482 * @kdst: Pointer to an iommu driver specific user data that is defined in 483 * include/uapi/linux/iommufd.h 484 * @user_array: Pointer to a struct iommu_user_data_array for a user space 485 * array 486 * @data_type: The data type of the @kdst. Must match with @user_array->type 487 * @index: Index to the location in the array to copy user data from 488 * @min_last: The last member of the data structure @kdst points in the 489 * initial version. 490 * Return 0 for success, otherwise -error. 491 */ 492#define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \ 493 min_last) \ 494 __iommu_copy_struct_from_user_array( \ 495 kdst, user_array, data_type, index, sizeof(*(kdst)), \ 496 offsetofend(typeof(*(kdst)), min_last)) 497 498/** 499 * struct iommu_ops - iommu ops and capabilities 500 * @capable: check capability 501 * @hw_info: report iommu hardware information. The data buffer returned by this 502 * op is allocated in the iommu driver and freed by the caller after 503 * use. The information type is one of enum iommu_hw_info_type defined 504 * in include/uapi/linux/iommufd.h. 505 * @domain_alloc: allocate and return an iommu domain if success. Otherwise 506 * NULL is returned. The domain is not fully initialized until 507 * the caller iommu_domain_alloc() returns. 508 * @domain_alloc_user: Allocate an iommu domain corresponding to the input 509 * parameters as defined in include/uapi/linux/iommufd.h. 510 * Unlike @domain_alloc, it is called only by IOMMUFD and 511 * must fully initialize the new domain before return. 512 * Upon success, if the @user_data is valid and the @parent 513 * points to a kernel-managed domain, the new domain must be 514 * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be 515 * NULL while the @user_data can be optionally provided, the 516 * new domain must support __IOMMU_DOMAIN_PAGING. 517 * Upon failure, ERR_PTR must be returned. 518 * @domain_alloc_paging: Allocate an iommu_domain that can be used for 519 * UNMANAGED, DMA, and DMA_FQ domain types. 520 * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. 521 * @probe_device: Add device to iommu driver handling 522 * @release_device: Remove device from iommu driver handling 523 * @probe_finalize: Do final setup work after the device is added to an IOMMU 524 * group and attached to the groups domain 525 * @device_group: find iommu group for a particular device 526 * @get_resv_regions: Request list of reserved regions for a device 527 * @of_xlate: add OF master IDs to iommu grouping 528 * @is_attach_deferred: Check if domain attach should be deferred from iommu 529 * driver init to device driver init (default no) 530 * @dev_enable/disable_feat: per device entries to enable/disable 531 * iommu specific features. 532 * @page_response: handle page request response 533 * @def_domain_type: device default domain type, return value: 534 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 535 * - IOMMU_DOMAIN_DMA: must use a dma domain 536 * - 0: use the default setting 537 * @default_domain_ops: the default ops for domains 538 * @remove_dev_pasid: Remove any translation configurations of a specific 539 * pasid, so that any DMA transactions with this pasid 540 * will be blocked by the hardware. 541 * @pgsize_bitmap: bitmap of all possible supported page sizes 542 * @owner: Driver module providing these ops 543 * @identity_domain: An always available, always attachable identity 544 * translation. 545 * @blocked_domain: An always available, always attachable blocking 546 * translation. 547 * @default_domain: If not NULL this will always be set as the default domain. 548 * This should be an IDENTITY/BLOCKED/PLATFORM domain. 549 * Do not use in new drivers. 550 */ 551struct iommu_ops { 552 bool (*capable)(struct device *dev, enum iommu_cap); 553 void *(*hw_info)(struct device *dev, u32 *length, u32 *type); 554 555 /* Domain allocation and freeing by the iommu driver */ 556 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 557 struct iommu_domain *(*domain_alloc_user)( 558 struct device *dev, u32 flags, struct iommu_domain *parent, 559 const struct iommu_user_data *user_data); 560 struct iommu_domain *(*domain_alloc_paging)(struct device *dev); 561 struct iommu_domain *(*domain_alloc_sva)(struct device *dev, 562 struct mm_struct *mm); 563 564 struct iommu_device *(*probe_device)(struct device *dev); 565 void (*release_device)(struct device *dev); 566 void (*probe_finalize)(struct device *dev); 567 struct iommu_group *(*device_group)(struct device *dev); 568 569 /* Request/Free a list of reserved regions for a device */ 570 void (*get_resv_regions)(struct device *dev, struct list_head *list); 571 572 int (*of_xlate)(struct device *dev, const struct of_phandle_args *args); 573 bool (*is_attach_deferred)(struct device *dev); 574 575 /* Per device IOMMU features */ 576 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 577 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 578 579 void (*page_response)(struct device *dev, struct iopf_fault *evt, 580 struct iommu_page_response *msg); 581 582 int (*def_domain_type)(struct device *dev); 583 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, 584 struct iommu_domain *domain); 585 586 const struct iommu_domain_ops *default_domain_ops; 587 unsigned long pgsize_bitmap; 588 struct module *owner; 589 struct iommu_domain *identity_domain; 590 struct iommu_domain *blocked_domain; 591 struct iommu_domain *release_domain; 592 struct iommu_domain *default_domain; 593}; 594 595/** 596 * struct iommu_domain_ops - domain specific operations 597 * @attach_dev: attach an iommu domain to a device 598 * Return: 599 * * 0 - success 600 * * EINVAL - can indicate that device and domain are incompatible due to 601 * some previous configuration of the domain, in which case the 602 * driver shouldn't log an error, since it is legitimate for a 603 * caller to test reuse of existing domains. Otherwise, it may 604 * still represent some other fundamental problem 605 * * ENOMEM - out of memory 606 * * ENOSPC - non-ENOMEM type of resource allocation failures 607 * * EBUSY - device is attached to a domain and cannot be changed 608 * * ENODEV - device specific errors, not able to be attached 609 * * <others> - treated as ENODEV by the caller. Use is discouraged 610 * @set_dev_pasid: set an iommu domain to a pasid of device 611 * @map_pages: map a physically contiguous set of pages of the same size to 612 * an iommu domain. 613 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 614 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 615 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 616 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 617 * queue 618 * @cache_invalidate_user: Flush hardware cache for user space IO page table. 619 * The @domain must be IOMMU_DOMAIN_NESTED. The @array 620 * passes in the cache invalidation requests, in form 621 * of a driver data structure. The driver must update 622 * array->entry_num to report the number of handled 623 * invalidation requests. The driver data structure 624 * must be defined in include/uapi/linux/iommufd.h 625 * @iova_to_phys: translate iova to physical address 626 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 627 * including no-snoop TLPs on PCIe or other platform 628 * specific mechanisms. 629 * @enable_nesting: Enable nesting 630 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 631 * @free: Release the domain after use. 632 */ 633struct iommu_domain_ops { 634 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 635 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, 636 ioasid_t pasid); 637 638 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 639 phys_addr_t paddr, size_t pgsize, size_t pgcount, 640 int prot, gfp_t gfp, size_t *mapped); 641 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 642 size_t pgsize, size_t pgcount, 643 struct iommu_iotlb_gather *iotlb_gather); 644 645 void (*flush_iotlb_all)(struct iommu_domain *domain); 646 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 647 size_t size); 648 void (*iotlb_sync)(struct iommu_domain *domain, 649 struct iommu_iotlb_gather *iotlb_gather); 650 int (*cache_invalidate_user)(struct iommu_domain *domain, 651 struct iommu_user_data_array *array); 652 653 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 654 dma_addr_t iova); 655 656 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 657 int (*enable_nesting)(struct iommu_domain *domain); 658 int (*set_pgtable_quirks)(struct iommu_domain *domain, 659 unsigned long quirks); 660 661 void (*free)(struct iommu_domain *domain); 662}; 663 664/** 665 * struct iommu_device - IOMMU core representation of one IOMMU hardware 666 * instance 667 * @list: Used by the iommu-core to keep a list of registered iommus 668 * @ops: iommu-ops for talking to this iommu 669 * @dev: struct device for sysfs handling 670 * @singleton_group: Used internally for drivers that have only one group 671 * @max_pasids: number of supported PASIDs 672 */ 673struct iommu_device { 674 struct list_head list; 675 const struct iommu_ops *ops; 676 struct fwnode_handle *fwnode; 677 struct device *dev; 678 struct iommu_group *singleton_group; 679 u32 max_pasids; 680}; 681 682/** 683 * struct iommu_fault_param - per-device IOMMU fault data 684 * @lock: protect pending faults list 685 * @users: user counter to manage the lifetime of the data 686 * @rcu: rcu head for kfree_rcu() 687 * @dev: the device that owns this param 688 * @queue: IOPF queue 689 * @queue_list: index into queue->devices 690 * @partial: faults that are part of a Page Request Group for which the last 691 * request hasn't been submitted yet. 692 * @faults: holds the pending faults which need response 693 */ 694struct iommu_fault_param { 695 struct mutex lock; 696 refcount_t users; 697 struct rcu_head rcu; 698 699 struct device *dev; 700 struct iopf_queue *queue; 701 struct list_head queue_list; 702 703 struct list_head partial; 704 struct list_head faults; 705}; 706 707/** 708 * struct dev_iommu - Collection of per-device IOMMU data 709 * 710 * @fault_param: IOMMU detected device fault reporting data 711 * @fwspec: IOMMU fwspec data 712 * @iommu_dev: IOMMU device this device is linked to 713 * @priv: IOMMU Driver private data 714 * @max_pasids: number of PASIDs this device can consume 715 * @attach_deferred: the dma domain attachment is deferred 716 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs 717 * @require_direct: device requires IOMMU_RESV_DIRECT regions 718 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables 719 * 720 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 721 * struct iommu_group *iommu_group; 722 */ 723struct dev_iommu { 724 struct mutex lock; 725 struct iommu_fault_param __rcu *fault_param; 726 struct iommu_fwspec *fwspec; 727 struct iommu_device *iommu_dev; 728 void *priv; 729 u32 max_pasids; 730 u32 attach_deferred:1; 731 u32 pci_32bit_workaround:1; 732 u32 require_direct:1; 733 u32 shadow_on_flush:1; 734}; 735 736int iommu_device_register(struct iommu_device *iommu, 737 const struct iommu_ops *ops, 738 struct device *hwdev); 739void iommu_device_unregister(struct iommu_device *iommu); 740int iommu_device_sysfs_add(struct iommu_device *iommu, 741 struct device *parent, 742 const struct attribute_group **groups, 743 const char *fmt, ...) __printf(4, 5); 744void iommu_device_sysfs_remove(struct iommu_device *iommu); 745int iommu_device_link(struct iommu_device *iommu, struct device *link); 746void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 747int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 748 749static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 750{ 751 return (struct iommu_device *)dev_get_drvdata(dev); 752} 753 754/** 755 * iommu_get_iommu_dev - Get iommu_device for a device 756 * @dev: an end-point device 757 * 758 * Note that this function must be called from the iommu_ops 759 * to retrieve the iommu_device for a device, which the core code 760 * guarentees it will not invoke the op without an attached iommu. 761 */ 762static inline struct iommu_device *__iommu_get_iommu_dev(struct device *dev) 763{ 764 return dev->iommu->iommu_dev; 765} 766 767#define iommu_get_iommu_dev(dev, type, member) \ 768 container_of(__iommu_get_iommu_dev(dev), type, member) 769 770static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 771{ 772 *gather = (struct iommu_iotlb_gather) { 773 .start = ULONG_MAX, 774 .freelist = LIST_HEAD_INIT(gather->freelist), 775 }; 776} 777 778extern int bus_iommu_probe(const struct bus_type *bus); 779extern bool iommu_present(const struct bus_type *bus); 780extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 781extern bool iommu_group_has_isolated_msi(struct iommu_group *group); 782extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); 783extern void iommu_domain_free(struct iommu_domain *domain); 784extern int iommu_attach_device(struct iommu_domain *domain, 785 struct device *dev); 786extern void iommu_detach_device(struct iommu_domain *domain, 787 struct device *dev); 788extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 789 struct device *dev, ioasid_t pasid); 790extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 791extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 792extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 793 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 794extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 795 size_t size); 796extern size_t iommu_unmap_fast(struct iommu_domain *domain, 797 unsigned long iova, size_t size, 798 struct iommu_iotlb_gather *iotlb_gather); 799extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 800 struct scatterlist *sg, unsigned int nents, 801 int prot, gfp_t gfp); 802extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 803extern void iommu_set_fault_handler(struct iommu_domain *domain, 804 iommu_fault_handler_t handler, void *token); 805 806extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 807extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 808extern void iommu_set_default_passthrough(bool cmd_line); 809extern void iommu_set_default_translated(bool cmd_line); 810extern bool iommu_default_passthrough(void); 811extern struct iommu_resv_region * 812iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 813 enum iommu_resv_type type, gfp_t gfp); 814extern int iommu_get_group_resv_regions(struct iommu_group *group, 815 struct list_head *head); 816 817extern int iommu_attach_group(struct iommu_domain *domain, 818 struct iommu_group *group); 819extern void iommu_detach_group(struct iommu_domain *domain, 820 struct iommu_group *group); 821extern struct iommu_group *iommu_group_alloc(void); 822extern void *iommu_group_get_iommudata(struct iommu_group *group); 823extern void iommu_group_set_iommudata(struct iommu_group *group, 824 void *iommu_data, 825 void (*release)(void *iommu_data)); 826extern int iommu_group_set_name(struct iommu_group *group, const char *name); 827extern int iommu_group_add_device(struct iommu_group *group, 828 struct device *dev); 829extern void iommu_group_remove_device(struct device *dev); 830extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 831 int (*fn)(struct device *, void *)); 832extern struct iommu_group *iommu_group_get(struct device *dev); 833extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 834extern void iommu_group_put(struct iommu_group *group); 835 836extern int iommu_group_id(struct iommu_group *group); 837extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 838 839int iommu_enable_nesting(struct iommu_domain *domain); 840int iommu_set_pgtable_quirks(struct iommu_domain *domain, 841 unsigned long quirks); 842 843void iommu_set_dma_strict(void); 844 845extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 846 unsigned long iova, int flags); 847 848static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 849{ 850 if (domain->ops->flush_iotlb_all) 851 domain->ops->flush_iotlb_all(domain); 852} 853 854static inline void iommu_iotlb_sync(struct iommu_domain *domain, 855 struct iommu_iotlb_gather *iotlb_gather) 856{ 857 if (domain->ops->iotlb_sync) 858 domain->ops->iotlb_sync(domain, iotlb_gather); 859 860 iommu_iotlb_gather_init(iotlb_gather); 861} 862 863/** 864 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 865 * 866 * @gather: TLB gather data 867 * @iova: start of page to invalidate 868 * @size: size of page to invalidate 869 * 870 * Helper for IOMMU drivers to check whether a new range and the gathered range 871 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 872 * than merging the two, which might lead to unnecessary invalidations. 873 */ 874static inline 875bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 876 unsigned long iova, size_t size) 877{ 878 unsigned long start = iova, end = start + size - 1; 879 880 return gather->end != 0 && 881 (end + 1 < gather->start || start > gather->end + 1); 882} 883 884 885/** 886 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 887 * @gather: TLB gather data 888 * @iova: start of page to invalidate 889 * @size: size of page to invalidate 890 * 891 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 892 * where only the address range matters, and simply minimising intermediate 893 * syncs is preferred. 894 */ 895static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 896 unsigned long iova, size_t size) 897{ 898 unsigned long end = iova + size - 1; 899 900 if (gather->start > iova) 901 gather->start = iova; 902 if (gather->end < end) 903 gather->end = end; 904} 905 906/** 907 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 908 * @domain: IOMMU domain to be invalidated 909 * @gather: TLB gather data 910 * @iova: start of page to invalidate 911 * @size: size of page to invalidate 912 * 913 * Helper for IOMMU drivers to build invalidation commands based on individual 914 * pages, or with page size/table level hints which cannot be gathered if they 915 * differ. 916 */ 917static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 918 struct iommu_iotlb_gather *gather, 919 unsigned long iova, size_t size) 920{ 921 /* 922 * If the new page is disjoint from the current range or is mapped at 923 * a different granularity, then sync the TLB so that the gather 924 * structure can be rewritten. 925 */ 926 if ((gather->pgsize && gather->pgsize != size) || 927 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 928 iommu_iotlb_sync(domain, gather); 929 930 gather->pgsize = size; 931 iommu_iotlb_gather_add_range(gather, iova, size); 932} 933 934static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 935{ 936 return gather && gather->queued; 937} 938 939static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 940 struct iova_bitmap *bitmap, 941 struct iommu_iotlb_gather *gather) 942{ 943 if (gather) 944 iommu_iotlb_gather_init(gather); 945 946 dirty->bitmap = bitmap; 947 dirty->gather = gather; 948} 949 950static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 951 unsigned long iova, 952 unsigned long length) 953{ 954 if (dirty->bitmap) 955 iova_bitmap_set(dirty->bitmap, iova, length); 956 957 if (dirty->gather) 958 iommu_iotlb_gather_add_range(dirty->gather, iova, length); 959} 960 961/* PCI device grouping function */ 962extern struct iommu_group *pci_device_group(struct device *dev); 963/* Generic device grouping function */ 964extern struct iommu_group *generic_device_group(struct device *dev); 965/* FSL-MC device grouping function */ 966struct iommu_group *fsl_mc_device_group(struct device *dev); 967extern struct iommu_group *generic_single_device_group(struct device *dev); 968 969/** 970 * struct iommu_fwspec - per-device IOMMU instance data 971 * @ops: ops for this device's IOMMU 972 * @iommu_fwnode: firmware handle for this device's IOMMU 973 * @flags: IOMMU_FWSPEC_* flags 974 * @num_ids: number of associated device IDs 975 * @ids: IDs which this device may present to the IOMMU 976 * 977 * Note that the IDs (and any other information, really) stored in this structure should be 978 * considered private to the IOMMU device driver and are not to be used directly by IOMMU 979 * consumers. 980 */ 981struct iommu_fwspec { 982 const struct iommu_ops *ops; 983 struct fwnode_handle *iommu_fwnode; 984 u32 flags; 985 unsigned int num_ids; 986 u32 ids[]; 987}; 988 989/* ATS is supported */ 990#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 991 992/** 993 * struct iommu_sva - handle to a device-mm bond 994 */ 995struct iommu_sva { 996 struct device *dev; 997 struct iommu_domain *domain; 998 struct list_head handle_item; 999 refcount_t users; 1000}; 1001 1002struct iommu_mm_data { 1003 u32 pasid; 1004 struct list_head sva_domains; 1005 struct list_head sva_handles; 1006}; 1007 1008int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 1009 const struct iommu_ops *ops); 1010void iommu_fwspec_free(struct device *dev); 1011int iommu_fwspec_add_ids(struct device *dev, const u32 *ids, int num_ids); 1012const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode); 1013 1014static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1015{ 1016 if (dev->iommu) 1017 return dev->iommu->fwspec; 1018 else 1019 return NULL; 1020} 1021 1022static inline void dev_iommu_fwspec_set(struct device *dev, 1023 struct iommu_fwspec *fwspec) 1024{ 1025 dev->iommu->fwspec = fwspec; 1026} 1027 1028static inline void *dev_iommu_priv_get(struct device *dev) 1029{ 1030 if (dev->iommu) 1031 return dev->iommu->priv; 1032 else 1033 return NULL; 1034} 1035 1036void dev_iommu_priv_set(struct device *dev, void *priv); 1037 1038extern struct mutex iommu_probe_device_lock; 1039int iommu_probe_device(struct device *dev); 1040 1041int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 1042int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 1043 1044int iommu_device_use_default_domain(struct device *dev); 1045void iommu_device_unuse_default_domain(struct device *dev); 1046 1047int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 1048void iommu_group_release_dma_owner(struct iommu_group *group); 1049bool iommu_group_dma_owner_claimed(struct iommu_group *group); 1050 1051int iommu_device_claim_dma_owner(struct device *dev, void *owner); 1052void iommu_device_release_dma_owner(struct device *dev); 1053 1054int iommu_attach_device_pasid(struct iommu_domain *domain, 1055 struct device *dev, ioasid_t pasid); 1056void iommu_detach_device_pasid(struct iommu_domain *domain, 1057 struct device *dev, ioasid_t pasid); 1058struct iommu_domain * 1059iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1060 unsigned int type); 1061ioasid_t iommu_alloc_global_pasid(struct device *dev); 1062void iommu_free_global_pasid(ioasid_t pasid); 1063#else /* CONFIG_IOMMU_API */ 1064 1065struct iommu_ops {}; 1066struct iommu_group {}; 1067struct iommu_fwspec {}; 1068struct iommu_device {}; 1069struct iommu_fault_param {}; 1070struct iommu_iotlb_gather {}; 1071struct iommu_dirty_bitmap {}; 1072struct iommu_dirty_ops {}; 1073 1074static inline bool iommu_present(const struct bus_type *bus) 1075{ 1076 return false; 1077} 1078 1079static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 1080{ 1081 return false; 1082} 1083 1084static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 1085{ 1086 return NULL; 1087} 1088 1089static inline void iommu_domain_free(struct iommu_domain *domain) 1090{ 1091} 1092 1093static inline int iommu_attach_device(struct iommu_domain *domain, 1094 struct device *dev) 1095{ 1096 return -ENODEV; 1097} 1098 1099static inline void iommu_detach_device(struct iommu_domain *domain, 1100 struct device *dev) 1101{ 1102} 1103 1104static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 1105{ 1106 return NULL; 1107} 1108 1109static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 1110 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 1111{ 1112 return -ENODEV; 1113} 1114 1115static inline size_t iommu_unmap(struct iommu_domain *domain, 1116 unsigned long iova, size_t size) 1117{ 1118 return 0; 1119} 1120 1121static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 1122 unsigned long iova, int gfp_order, 1123 struct iommu_iotlb_gather *iotlb_gather) 1124{ 1125 return 0; 1126} 1127 1128static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 1129 unsigned long iova, struct scatterlist *sg, 1130 unsigned int nents, int prot, gfp_t gfp) 1131{ 1132 return -ENODEV; 1133} 1134 1135static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 1136{ 1137} 1138 1139static inline void iommu_iotlb_sync(struct iommu_domain *domain, 1140 struct iommu_iotlb_gather *iotlb_gather) 1141{ 1142} 1143 1144static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 1145{ 1146 return 0; 1147} 1148 1149static inline void iommu_set_fault_handler(struct iommu_domain *domain, 1150 iommu_fault_handler_t handler, void *token) 1151{ 1152} 1153 1154static inline void iommu_get_resv_regions(struct device *dev, 1155 struct list_head *list) 1156{ 1157} 1158 1159static inline void iommu_put_resv_regions(struct device *dev, 1160 struct list_head *list) 1161{ 1162} 1163 1164static inline int iommu_get_group_resv_regions(struct iommu_group *group, 1165 struct list_head *head) 1166{ 1167 return -ENODEV; 1168} 1169 1170static inline void iommu_set_default_passthrough(bool cmd_line) 1171{ 1172} 1173 1174static inline void iommu_set_default_translated(bool cmd_line) 1175{ 1176} 1177 1178static inline bool iommu_default_passthrough(void) 1179{ 1180 return true; 1181} 1182 1183static inline int iommu_attach_group(struct iommu_domain *domain, 1184 struct iommu_group *group) 1185{ 1186 return -ENODEV; 1187} 1188 1189static inline void iommu_detach_group(struct iommu_domain *domain, 1190 struct iommu_group *group) 1191{ 1192} 1193 1194static inline struct iommu_group *iommu_group_alloc(void) 1195{ 1196 return ERR_PTR(-ENODEV); 1197} 1198 1199static inline void *iommu_group_get_iommudata(struct iommu_group *group) 1200{ 1201 return NULL; 1202} 1203 1204static inline void iommu_group_set_iommudata(struct iommu_group *group, 1205 void *iommu_data, 1206 void (*release)(void *iommu_data)) 1207{ 1208} 1209 1210static inline int iommu_group_set_name(struct iommu_group *group, 1211 const char *name) 1212{ 1213 return -ENODEV; 1214} 1215 1216static inline int iommu_group_add_device(struct iommu_group *group, 1217 struct device *dev) 1218{ 1219 return -ENODEV; 1220} 1221 1222static inline void iommu_group_remove_device(struct device *dev) 1223{ 1224} 1225 1226static inline int iommu_group_for_each_dev(struct iommu_group *group, 1227 void *data, 1228 int (*fn)(struct device *, void *)) 1229{ 1230 return -ENODEV; 1231} 1232 1233static inline struct iommu_group *iommu_group_get(struct device *dev) 1234{ 1235 return NULL; 1236} 1237 1238static inline void iommu_group_put(struct iommu_group *group) 1239{ 1240} 1241 1242static inline int iommu_group_id(struct iommu_group *group) 1243{ 1244 return -ENODEV; 1245} 1246 1247static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 1248 unsigned long quirks) 1249{ 1250 return 0; 1251} 1252 1253static inline int iommu_device_register(struct iommu_device *iommu, 1254 const struct iommu_ops *ops, 1255 struct device *hwdev) 1256{ 1257 return -ENODEV; 1258} 1259 1260static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 1261{ 1262 return NULL; 1263} 1264 1265static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 1266{ 1267} 1268 1269static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 1270 struct iommu_iotlb_gather *gather, 1271 unsigned long iova, size_t size) 1272{ 1273} 1274 1275static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 1276{ 1277 return false; 1278} 1279 1280static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 1281 struct iova_bitmap *bitmap, 1282 struct iommu_iotlb_gather *gather) 1283{ 1284} 1285 1286static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 1287 unsigned long iova, 1288 unsigned long length) 1289{ 1290} 1291 1292static inline void iommu_device_unregister(struct iommu_device *iommu) 1293{ 1294} 1295 1296static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 1297 struct device *parent, 1298 const struct attribute_group **groups, 1299 const char *fmt, ...) 1300{ 1301 return -ENODEV; 1302} 1303 1304static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 1305{ 1306} 1307 1308static inline int iommu_device_link(struct device *dev, struct device *link) 1309{ 1310 return -EINVAL; 1311} 1312 1313static inline void iommu_device_unlink(struct device *dev, struct device *link) 1314{ 1315} 1316 1317static inline int iommu_fwspec_init(struct device *dev, 1318 struct fwnode_handle *iommu_fwnode, 1319 const struct iommu_ops *ops) 1320{ 1321 return -ENODEV; 1322} 1323 1324static inline void iommu_fwspec_free(struct device *dev) 1325{ 1326} 1327 1328static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 1329 int num_ids) 1330{ 1331 return -ENODEV; 1332} 1333 1334static inline 1335const struct iommu_ops *iommu_ops_from_fwnode(const struct fwnode_handle *fwnode) 1336{ 1337 return NULL; 1338} 1339 1340static inline int 1341iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1342{ 1343 return -ENODEV; 1344} 1345 1346static inline int 1347iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1348{ 1349 return -ENODEV; 1350} 1351 1352static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1353{ 1354 return NULL; 1355} 1356 1357static inline int iommu_device_use_default_domain(struct device *dev) 1358{ 1359 return 0; 1360} 1361 1362static inline void iommu_device_unuse_default_domain(struct device *dev) 1363{ 1364} 1365 1366static inline int 1367iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1368{ 1369 return -ENODEV; 1370} 1371 1372static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1373{ 1374} 1375 1376static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1377{ 1378 return false; 1379} 1380 1381static inline void iommu_device_release_dma_owner(struct device *dev) 1382{ 1383} 1384 1385static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) 1386{ 1387 return -ENODEV; 1388} 1389 1390static inline int iommu_attach_device_pasid(struct iommu_domain *domain, 1391 struct device *dev, ioasid_t pasid) 1392{ 1393 return -ENODEV; 1394} 1395 1396static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1397 struct device *dev, ioasid_t pasid) 1398{ 1399} 1400 1401static inline struct iommu_domain * 1402iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1403 unsigned int type) 1404{ 1405 return NULL; 1406} 1407 1408static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) 1409{ 1410 return IOMMU_PASID_INVALID; 1411} 1412 1413static inline void iommu_free_global_pasid(ioasid_t pasid) {} 1414#endif /* CONFIG_IOMMU_API */ 1415 1416#if IS_ENABLED(CONFIG_LOCKDEP) && IS_ENABLED(CONFIG_IOMMU_API) 1417void iommu_group_mutex_assert(struct device *dev); 1418#else 1419static inline void iommu_group_mutex_assert(struct device *dev) 1420{ 1421} 1422#endif 1423 1424/** 1425 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1426 * @domain: The IOMMU domain to perform the mapping 1427 * @iova: The start address to map the buffer 1428 * @sgt: The sg_table object describing the buffer 1429 * @prot: IOMMU protection bits 1430 * 1431 * Creates a mapping at @iova for the buffer described by a scatterlist 1432 * stored in the given sg_table object in the provided IOMMU domain. 1433 */ 1434static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, 1435 unsigned long iova, struct sg_table *sgt, int prot) 1436{ 1437 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, 1438 GFP_KERNEL); 1439} 1440 1441#ifdef CONFIG_IOMMU_DEBUGFS 1442extern struct dentry *iommu_debugfs_dir; 1443void iommu_debugfs_setup(void); 1444#else 1445static inline void iommu_debugfs_setup(void) {} 1446#endif 1447 1448#ifdef CONFIG_IOMMU_DMA 1449#include <linux/msi.h> 1450 1451int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1452 1453int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1454void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); 1455 1456#else /* CONFIG_IOMMU_DMA */ 1457 1458struct msi_desc; 1459struct msi_msg; 1460 1461static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1462{ 1463 return -ENODEV; 1464} 1465 1466static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1467{ 1468 return 0; 1469} 1470 1471static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1472{ 1473} 1474 1475#endif /* CONFIG_IOMMU_DMA */ 1476 1477/* 1478 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into 1479 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents 1480 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. 1481 */ 1482#define TEGRA_STREAM_ID_BYPASS 0x7f 1483 1484static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) 1485{ 1486#ifdef CONFIG_IOMMU_API 1487 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1488 1489 if (fwspec && fwspec->num_ids == 1) { 1490 *stream_id = fwspec->ids[0] & 0xffff; 1491 return true; 1492 } 1493#endif 1494 1495 return false; 1496} 1497 1498#ifdef CONFIG_IOMMU_MM_DATA 1499static inline void mm_pasid_init(struct mm_struct *mm) 1500{ 1501 /* 1502 * During dup_mm(), a new mm will be memcpy'd from an old one and that makes 1503 * the new mm and the old one point to a same iommu_mm instance. When either 1504 * one of the two mms gets released, the iommu_mm instance is freed, leaving 1505 * the other mm running into a use-after-free/double-free problem. To avoid 1506 * the problem, zeroing the iommu_mm pointer of a new mm is needed here. 1507 */ 1508 mm->iommu_mm = NULL; 1509} 1510 1511static inline bool mm_valid_pasid(struct mm_struct *mm) 1512{ 1513 return READ_ONCE(mm->iommu_mm); 1514} 1515 1516static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) 1517{ 1518 struct iommu_mm_data *iommu_mm = READ_ONCE(mm->iommu_mm); 1519 1520 if (!iommu_mm) 1521 return IOMMU_PASID_INVALID; 1522 return iommu_mm->pasid; 1523} 1524 1525void mm_pasid_drop(struct mm_struct *mm); 1526struct iommu_sva *iommu_sva_bind_device(struct device *dev, 1527 struct mm_struct *mm); 1528void iommu_sva_unbind_device(struct iommu_sva *handle); 1529u32 iommu_sva_get_pasid(struct iommu_sva *handle); 1530struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 1531 struct mm_struct *mm); 1532#else 1533static inline struct iommu_sva * 1534iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 1535{ 1536 return ERR_PTR(-ENODEV); 1537} 1538 1539static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1540{ 1541} 1542 1543static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1544{ 1545 return IOMMU_PASID_INVALID; 1546} 1547static inline void mm_pasid_init(struct mm_struct *mm) {} 1548static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } 1549 1550static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm) 1551{ 1552 return IOMMU_PASID_INVALID; 1553} 1554 1555static inline void mm_pasid_drop(struct mm_struct *mm) {} 1556 1557static inline struct iommu_domain * 1558iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) 1559{ 1560 return NULL; 1561} 1562#endif /* CONFIG_IOMMU_SVA */ 1563 1564#ifdef CONFIG_IOMMU_IOPF 1565int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev); 1566void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev); 1567int iopf_queue_flush_dev(struct device *dev); 1568struct iopf_queue *iopf_queue_alloc(const char *name); 1569void iopf_queue_free(struct iopf_queue *queue); 1570int iopf_queue_discard_partial(struct iopf_queue *queue); 1571void iopf_free_group(struct iopf_group *group); 1572void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt); 1573void iopf_group_response(struct iopf_group *group, 1574 enum iommu_page_response_code status); 1575#else 1576static inline int 1577iopf_queue_add_device(struct iopf_queue *queue, struct device *dev) 1578{ 1579 return -ENODEV; 1580} 1581 1582static inline void 1583iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev) 1584{ 1585} 1586 1587static inline int iopf_queue_flush_dev(struct device *dev) 1588{ 1589 return -ENODEV; 1590} 1591 1592static inline struct iopf_queue *iopf_queue_alloc(const char *name) 1593{ 1594 return NULL; 1595} 1596 1597static inline void iopf_queue_free(struct iopf_queue *queue) 1598{ 1599} 1600 1601static inline int iopf_queue_discard_partial(struct iopf_queue *queue) 1602{ 1603 return -ENODEV; 1604} 1605 1606static inline void iopf_free_group(struct iopf_group *group) 1607{ 1608} 1609 1610static inline void 1611iommu_report_device_fault(struct device *dev, struct iopf_fault *evt) 1612{ 1613} 1614 1615static inline void iopf_group_response(struct iopf_group *group, 1616 enum iommu_page_response_code status) 1617{ 1618} 1619#endif /* CONFIG_IOMMU_IOPF */ 1620#endif /* __LINUX_IOMMU_H */