at v6.7 1374 lines 44 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. 4 * Author: Joerg Roedel <joerg.roedel@amd.com> 5 */ 6 7#ifndef __LINUX_IOMMU_H 8#define __LINUX_IOMMU_H 9 10#include <linux/scatterlist.h> 11#include <linux/device.h> 12#include <linux/types.h> 13#include <linux/errno.h> 14#include <linux/err.h> 15#include <linux/of.h> 16#include <linux/iova_bitmap.h> 17#include <uapi/linux/iommu.h> 18 19#define IOMMU_READ (1 << 0) 20#define IOMMU_WRITE (1 << 1) 21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 22#define IOMMU_NOEXEC (1 << 3) 23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */ 24/* 25 * Where the bus hardware includes a privilege level as part of its access type 26 * markings, and certain devices are capable of issuing transactions marked as 27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other 28 * given permission flags only apply to accesses at the higher privilege level, 29 * and that unprivileged transactions should have as little access as possible. 30 * This would usually imply the same permissions as kernel mappings on the CPU, 31 * if the IOMMU page table format is equivalent. 32 */ 33#define IOMMU_PRIV (1 << 5) 34 35struct iommu_ops; 36struct iommu_group; 37struct bus_type; 38struct device; 39struct iommu_domain; 40struct iommu_domain_ops; 41struct iommu_dirty_ops; 42struct notifier_block; 43struct iommu_sva; 44struct iommu_fault_event; 45struct iommu_dma_cookie; 46 47/* iommu fault flags */ 48#define IOMMU_FAULT_READ 0x0 49#define IOMMU_FAULT_WRITE 0x1 50 51typedef int (*iommu_fault_handler_t)(struct iommu_domain *, 52 struct device *, unsigned long, int, void *); 53typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *); 54 55struct iommu_domain_geometry { 56 dma_addr_t aperture_start; /* First address that can be mapped */ 57 dma_addr_t aperture_end; /* Last address that can be mapped */ 58 bool force_aperture; /* DMA only allowed in mappable range? */ 59}; 60 61/* Domain feature flags */ 62#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ 63#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API 64 implementation */ 65#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ 66#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ 67 68#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */ 69#define __IOMMU_DOMAIN_PLATFORM (1U << 5) 70 71#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested 72 on a stage-2 translation */ 73 74#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ 75/* 76 * This are the possible domain-types 77 * 78 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate 79 * devices 80 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses 81 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used 82 * for VMs 83 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. 84 * This flag allows IOMMU drivers to implement 85 * certain optimizations for these domains 86 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB 87 * invalidation. 88 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses 89 * represented by mm_struct's. 90 * IOMMU_DOMAIN_PLATFORM - Legacy domain for drivers that do their own 91 * dma_api stuff. Do not use in new drivers. 92 */ 93#define IOMMU_DOMAIN_BLOCKED (0U) 94#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) 95#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) 96#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ 97 __IOMMU_DOMAIN_DMA_API) 98#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ 99 __IOMMU_DOMAIN_DMA_API | \ 100 __IOMMU_DOMAIN_DMA_FQ) 101#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA) 102#define IOMMU_DOMAIN_PLATFORM (__IOMMU_DOMAIN_PLATFORM) 103#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED) 104 105struct iommu_domain { 106 unsigned type; 107 const struct iommu_domain_ops *ops; 108 const struct iommu_dirty_ops *dirty_ops; 109 110 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ 111 struct iommu_domain_geometry geometry; 112 struct iommu_dma_cookie *iova_cookie; 113 enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, 114 void *data); 115 void *fault_data; 116 union { 117 struct { 118 iommu_fault_handler_t handler; 119 void *handler_token; 120 }; 121 struct { /* IOMMU_DOMAIN_SVA */ 122 struct mm_struct *mm; 123 int users; 124 }; 125 }; 126}; 127 128static inline bool iommu_is_dma_domain(struct iommu_domain *domain) 129{ 130 return domain->type & __IOMMU_DOMAIN_DMA_API; 131} 132 133enum iommu_cap { 134 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */ 135 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ 136 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for 137 DMA protection and we should too */ 138 /* 139 * Per-device flag indicating if enforce_cache_coherency() will work on 140 * this device. 141 */ 142 IOMMU_CAP_ENFORCE_CACHE_COHERENCY, 143 /* 144 * IOMMU driver does not issue TLB maintenance during .unmap, so can 145 * usefully support the non-strict DMA flush queue. 146 */ 147 IOMMU_CAP_DEFERRED_FLUSH, 148 IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */ 149}; 150 151/* These are the possible reserved region types */ 152enum iommu_resv_type { 153 /* Memory regions which must be mapped 1:1 at all times */ 154 IOMMU_RESV_DIRECT, 155 /* 156 * Memory regions which are advertised to be 1:1 but are 157 * commonly considered relaxable in some conditions, 158 * for instance in device assignment use case (USB, Graphics) 159 */ 160 IOMMU_RESV_DIRECT_RELAXABLE, 161 /* Arbitrary "never map this or give it to a device" address ranges */ 162 IOMMU_RESV_RESERVED, 163 /* Hardware MSI region (untranslated) */ 164 IOMMU_RESV_MSI, 165 /* Software-managed MSI translation window */ 166 IOMMU_RESV_SW_MSI, 167}; 168 169/** 170 * struct iommu_resv_region - descriptor for a reserved memory region 171 * @list: Linked list pointers 172 * @start: System physical start address of the region 173 * @length: Length of the region in bytes 174 * @prot: IOMMU Protection flags (READ/WRITE/...) 175 * @type: Type of the reserved region 176 * @free: Callback to free associated memory allocations 177 */ 178struct iommu_resv_region { 179 struct list_head list; 180 phys_addr_t start; 181 size_t length; 182 int prot; 183 enum iommu_resv_type type; 184 void (*free)(struct device *dev, struct iommu_resv_region *region); 185}; 186 187struct iommu_iort_rmr_data { 188 struct iommu_resv_region rr; 189 190 /* Stream IDs associated with IORT RMR entry */ 191 const u32 *sids; 192 u32 num_sids; 193}; 194 195/** 196 * enum iommu_dev_features - Per device IOMMU features 197 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses 198 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally 199 * enabling %IOMMU_DEV_FEAT_SVA requires 200 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page 201 * Faults themselves instead of relying on the IOMMU. When 202 * supported, this feature must be enabled before and 203 * disabled after %IOMMU_DEV_FEAT_SVA. 204 * 205 * Device drivers enable a feature using iommu_dev_enable_feature(). 206 */ 207enum iommu_dev_features { 208 IOMMU_DEV_FEAT_SVA, 209 IOMMU_DEV_FEAT_IOPF, 210}; 211 212#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */ 213#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */ 214#define IOMMU_PASID_INVALID (-1U) 215typedef unsigned int ioasid_t; 216 217#ifdef CONFIG_IOMMU_API 218 219/** 220 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush 221 * 222 * @start: IOVA representing the start of the range to be flushed 223 * @end: IOVA representing the end of the range to be flushed (inclusive) 224 * @pgsize: The interval at which to perform the flush 225 * @freelist: Removed pages to free after sync 226 * @queued: Indicates that the flush will be queued 227 * 228 * This structure is intended to be updated by multiple calls to the 229 * ->unmap() function in struct iommu_ops before eventually being passed 230 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after 231 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to 232 * them. @queued is set to indicate when ->iotlb_flush_all() will be called 233 * later instead of ->iotlb_sync(), so drivers may optimise accordingly. 234 */ 235struct iommu_iotlb_gather { 236 unsigned long start; 237 unsigned long end; 238 size_t pgsize; 239 struct list_head freelist; 240 bool queued; 241}; 242 243/** 244 * struct iommu_dirty_bitmap - Dirty IOVA bitmap state 245 * @bitmap: IOVA bitmap 246 * @gather: Range information for a pending IOTLB flush 247 */ 248struct iommu_dirty_bitmap { 249 struct iova_bitmap *bitmap; 250 struct iommu_iotlb_gather *gather; 251}; 252 253/* Read but do not clear any dirty bits */ 254#define IOMMU_DIRTY_NO_CLEAR (1 << 0) 255 256/** 257 * struct iommu_dirty_ops - domain specific dirty tracking operations 258 * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain 259 * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled 260 * into a bitmap, with a bit represented as a page. 261 * Reads the dirty PTE bits and clears it from IO 262 * pagetables. 263 */ 264struct iommu_dirty_ops { 265 int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled); 266 int (*read_and_clear_dirty)(struct iommu_domain *domain, 267 unsigned long iova, size_t size, 268 unsigned long flags, 269 struct iommu_dirty_bitmap *dirty); 270}; 271 272/** 273 * struct iommu_user_data - iommu driver specific user space data info 274 * @type: The data type of the user buffer 275 * @uptr: Pointer to the user buffer for copy_from_user() 276 * @len: The length of the user buffer in bytes 277 * 278 * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h 279 * @type, @uptr and @len should be just copied from an iommufd core uAPI struct. 280 */ 281struct iommu_user_data { 282 unsigned int type; 283 void __user *uptr; 284 size_t len; 285}; 286 287/** 288 * __iommu_copy_struct_from_user - Copy iommu driver specific user space data 289 * @dst_data: Pointer to an iommu driver specific user data that is defined in 290 * include/uapi/linux/iommufd.h 291 * @src_data: Pointer to a struct iommu_user_data for user space data info 292 * @data_type: The data type of the @dst_data. Must match with @src_data.type 293 * @data_len: Length of current user data structure, i.e. sizeof(struct _dst) 294 * @min_len: Initial length of user data structure for backward compatibility. 295 * This should be offsetofend using the last member in the user data 296 * struct that was initially added to include/uapi/linux/iommufd.h 297 */ 298static inline int __iommu_copy_struct_from_user( 299 void *dst_data, const struct iommu_user_data *src_data, 300 unsigned int data_type, size_t data_len, size_t min_len) 301{ 302 if (src_data->type != data_type) 303 return -EINVAL; 304 if (WARN_ON(!dst_data || !src_data)) 305 return -EINVAL; 306 if (src_data->len < min_len || data_len < src_data->len) 307 return -EINVAL; 308 return copy_struct_from_user(dst_data, data_len, src_data->uptr, 309 src_data->len); 310} 311 312/** 313 * iommu_copy_struct_from_user - Copy iommu driver specific user space data 314 * @kdst: Pointer to an iommu driver specific user data that is defined in 315 * include/uapi/linux/iommufd.h 316 * @user_data: Pointer to a struct iommu_user_data for user space data info 317 * @data_type: The data type of the @kdst. Must match with @user_data->type 318 * @min_last: The last memember of the data structure @kdst points in the 319 * initial version. 320 * Return 0 for success, otherwise -error. 321 */ 322#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \ 323 __iommu_copy_struct_from_user(kdst, user_data, data_type, \ 324 sizeof(*kdst), \ 325 offsetofend(typeof(*kdst), min_last)) 326 327/** 328 * struct iommu_ops - iommu ops and capabilities 329 * @capable: check capability 330 * @hw_info: report iommu hardware information. The data buffer returned by this 331 * op is allocated in the iommu driver and freed by the caller after 332 * use. The information type is one of enum iommu_hw_info_type defined 333 * in include/uapi/linux/iommufd.h. 334 * @domain_alloc: allocate and return an iommu domain if success. Otherwise 335 * NULL is returned. The domain is not fully initialized until 336 * the caller iommu_domain_alloc() returns. 337 * @domain_alloc_user: Allocate an iommu domain corresponding to the input 338 * parameters as defined in include/uapi/linux/iommufd.h. 339 * Unlike @domain_alloc, it is called only by IOMMUFD and 340 * must fully initialize the new domain before return. 341 * Upon success, if the @user_data is valid and the @parent 342 * points to a kernel-managed domain, the new domain must be 343 * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be 344 * NULL while the @user_data can be optionally provided, the 345 * new domain must support __IOMMU_DOMAIN_PAGING. 346 * Upon failure, ERR_PTR must be returned. 347 * @domain_alloc_paging: Allocate an iommu_domain that can be used for 348 * UNMANAGED, DMA, and DMA_FQ domain types. 349 * @probe_device: Add device to iommu driver handling 350 * @release_device: Remove device from iommu driver handling 351 * @probe_finalize: Do final setup work after the device is added to an IOMMU 352 * group and attached to the groups domain 353 * @device_group: find iommu group for a particular device 354 * @get_resv_regions: Request list of reserved regions for a device 355 * @of_xlate: add OF master IDs to iommu grouping 356 * @is_attach_deferred: Check if domain attach should be deferred from iommu 357 * driver init to device driver init (default no) 358 * @dev_enable/disable_feat: per device entries to enable/disable 359 * iommu specific features. 360 * @page_response: handle page request response 361 * @def_domain_type: device default domain type, return value: 362 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain 363 * - IOMMU_DOMAIN_DMA: must use a dma domain 364 * - 0: use the default setting 365 * @default_domain_ops: the default ops for domains 366 * @remove_dev_pasid: Remove any translation configurations of a specific 367 * pasid, so that any DMA transactions with this pasid 368 * will be blocked by the hardware. 369 * @pgsize_bitmap: bitmap of all possible supported page sizes 370 * @owner: Driver module providing these ops 371 * @identity_domain: An always available, always attachable identity 372 * translation. 373 * @blocked_domain: An always available, always attachable blocking 374 * translation. 375 * @default_domain: If not NULL this will always be set as the default domain. 376 * This should be an IDENTITY/BLOCKED/PLATFORM domain. 377 * Do not use in new drivers. 378 */ 379struct iommu_ops { 380 bool (*capable)(struct device *dev, enum iommu_cap); 381 void *(*hw_info)(struct device *dev, u32 *length, u32 *type); 382 383 /* Domain allocation and freeing by the iommu driver */ 384 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); 385 struct iommu_domain *(*domain_alloc_user)( 386 struct device *dev, u32 flags, struct iommu_domain *parent, 387 const struct iommu_user_data *user_data); 388 struct iommu_domain *(*domain_alloc_paging)(struct device *dev); 389 390 struct iommu_device *(*probe_device)(struct device *dev); 391 void (*release_device)(struct device *dev); 392 void (*probe_finalize)(struct device *dev); 393 struct iommu_group *(*device_group)(struct device *dev); 394 395 /* Request/Free a list of reserved regions for a device */ 396 void (*get_resv_regions)(struct device *dev, struct list_head *list); 397 398 int (*of_xlate)(struct device *dev, struct of_phandle_args *args); 399 bool (*is_attach_deferred)(struct device *dev); 400 401 /* Per device IOMMU features */ 402 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f); 403 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f); 404 405 int (*page_response)(struct device *dev, 406 struct iommu_fault_event *evt, 407 struct iommu_page_response *msg); 408 409 int (*def_domain_type)(struct device *dev); 410 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); 411 412 const struct iommu_domain_ops *default_domain_ops; 413 unsigned long pgsize_bitmap; 414 struct module *owner; 415 struct iommu_domain *identity_domain; 416 struct iommu_domain *blocked_domain; 417 struct iommu_domain *default_domain; 418}; 419 420/** 421 * struct iommu_domain_ops - domain specific operations 422 * @attach_dev: attach an iommu domain to a device 423 * Return: 424 * * 0 - success 425 * * EINVAL - can indicate that device and domain are incompatible due to 426 * some previous configuration of the domain, in which case the 427 * driver shouldn't log an error, since it is legitimate for a 428 * caller to test reuse of existing domains. Otherwise, it may 429 * still represent some other fundamental problem 430 * * ENOMEM - out of memory 431 * * ENOSPC - non-ENOMEM type of resource allocation failures 432 * * EBUSY - device is attached to a domain and cannot be changed 433 * * ENODEV - device specific errors, not able to be attached 434 * * <others> - treated as ENODEV by the caller. Use is discouraged 435 * @set_dev_pasid: set an iommu domain to a pasid of device 436 * @map_pages: map a physically contiguous set of pages of the same size to 437 * an iommu domain. 438 * @unmap_pages: unmap a number of pages of the same size from an iommu domain 439 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain 440 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware 441 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush 442 * queue 443 * @iova_to_phys: translate iova to physical address 444 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, 445 * including no-snoop TLPs on PCIe or other platform 446 * specific mechanisms. 447 * @enable_nesting: Enable nesting 448 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) 449 * @free: Release the domain after use. 450 */ 451struct iommu_domain_ops { 452 int (*attach_dev)(struct iommu_domain *domain, struct device *dev); 453 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, 454 ioasid_t pasid); 455 456 int (*map_pages)(struct iommu_domain *domain, unsigned long iova, 457 phys_addr_t paddr, size_t pgsize, size_t pgcount, 458 int prot, gfp_t gfp, size_t *mapped); 459 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, 460 size_t pgsize, size_t pgcount, 461 struct iommu_iotlb_gather *iotlb_gather); 462 463 void (*flush_iotlb_all)(struct iommu_domain *domain); 464 int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, 465 size_t size); 466 void (*iotlb_sync)(struct iommu_domain *domain, 467 struct iommu_iotlb_gather *iotlb_gather); 468 469 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, 470 dma_addr_t iova); 471 472 bool (*enforce_cache_coherency)(struct iommu_domain *domain); 473 int (*enable_nesting)(struct iommu_domain *domain); 474 int (*set_pgtable_quirks)(struct iommu_domain *domain, 475 unsigned long quirks); 476 477 void (*free)(struct iommu_domain *domain); 478}; 479 480/** 481 * struct iommu_device - IOMMU core representation of one IOMMU hardware 482 * instance 483 * @list: Used by the iommu-core to keep a list of registered iommus 484 * @ops: iommu-ops for talking to this iommu 485 * @dev: struct device for sysfs handling 486 * @singleton_group: Used internally for drivers that have only one group 487 * @max_pasids: number of supported PASIDs 488 */ 489struct iommu_device { 490 struct list_head list; 491 const struct iommu_ops *ops; 492 struct fwnode_handle *fwnode; 493 struct device *dev; 494 struct iommu_group *singleton_group; 495 u32 max_pasids; 496}; 497 498/** 499 * struct iommu_fault_event - Generic fault event 500 * 501 * Can represent recoverable faults such as a page requests or 502 * unrecoverable faults such as DMA or IRQ remapping faults. 503 * 504 * @fault: fault descriptor 505 * @list: pending fault event list, used for tracking responses 506 */ 507struct iommu_fault_event { 508 struct iommu_fault fault; 509 struct list_head list; 510}; 511 512/** 513 * struct iommu_fault_param - per-device IOMMU fault data 514 * @handler: Callback function to handle IOMMU faults at device level 515 * @data: handler private data 516 * @faults: holds the pending faults which needs response 517 * @lock: protect pending faults list 518 */ 519struct iommu_fault_param { 520 iommu_dev_fault_handler_t handler; 521 void *data; 522 struct list_head faults; 523 struct mutex lock; 524}; 525 526/** 527 * struct dev_iommu - Collection of per-device IOMMU data 528 * 529 * @fault_param: IOMMU detected device fault reporting data 530 * @iopf_param: I/O Page Fault queue and data 531 * @fwspec: IOMMU fwspec data 532 * @iommu_dev: IOMMU device this device is linked to 533 * @priv: IOMMU Driver private data 534 * @max_pasids: number of PASIDs this device can consume 535 * @attach_deferred: the dma domain attachment is deferred 536 * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs 537 * @require_direct: device requires IOMMU_RESV_DIRECT regions 538 * @shadow_on_flush: IOTLB flushes are used to sync shadow tables 539 * 540 * TODO: migrate other per device data pointers under iommu_dev_data, e.g. 541 * struct iommu_group *iommu_group; 542 */ 543struct dev_iommu { 544 struct mutex lock; 545 struct iommu_fault_param *fault_param; 546 struct iopf_device_param *iopf_param; 547 struct iommu_fwspec *fwspec; 548 struct iommu_device *iommu_dev; 549 void *priv; 550 u32 max_pasids; 551 u32 attach_deferred:1; 552 u32 pci_32bit_workaround:1; 553 u32 require_direct:1; 554 u32 shadow_on_flush:1; 555}; 556 557int iommu_device_register(struct iommu_device *iommu, 558 const struct iommu_ops *ops, 559 struct device *hwdev); 560void iommu_device_unregister(struct iommu_device *iommu); 561int iommu_device_sysfs_add(struct iommu_device *iommu, 562 struct device *parent, 563 const struct attribute_group **groups, 564 const char *fmt, ...) __printf(4, 5); 565void iommu_device_sysfs_remove(struct iommu_device *iommu); 566int iommu_device_link(struct iommu_device *iommu, struct device *link); 567void iommu_device_unlink(struct iommu_device *iommu, struct device *link); 568int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain); 569 570static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 571{ 572 return (struct iommu_device *)dev_get_drvdata(dev); 573} 574 575static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 576{ 577 *gather = (struct iommu_iotlb_gather) { 578 .start = ULONG_MAX, 579 .freelist = LIST_HEAD_INIT(gather->freelist), 580 }; 581} 582 583extern int bus_iommu_probe(const struct bus_type *bus); 584extern bool iommu_present(const struct bus_type *bus); 585extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); 586extern bool iommu_group_has_isolated_msi(struct iommu_group *group); 587extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); 588extern void iommu_domain_free(struct iommu_domain *domain); 589extern int iommu_attach_device(struct iommu_domain *domain, 590 struct device *dev); 591extern void iommu_detach_device(struct iommu_domain *domain, 592 struct device *dev); 593extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain, 594 struct device *dev, ioasid_t pasid); 595extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev); 596extern struct iommu_domain *iommu_get_dma_domain(struct device *dev); 597extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 598 phys_addr_t paddr, size_t size, int prot, gfp_t gfp); 599extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 600 size_t size); 601extern size_t iommu_unmap_fast(struct iommu_domain *domain, 602 unsigned long iova, size_t size, 603 struct iommu_iotlb_gather *iotlb_gather); 604extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 605 struct scatterlist *sg, unsigned int nents, 606 int prot, gfp_t gfp); 607extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 608extern void iommu_set_fault_handler(struct iommu_domain *domain, 609 iommu_fault_handler_t handler, void *token); 610 611extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); 612extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); 613extern void iommu_set_default_passthrough(bool cmd_line); 614extern void iommu_set_default_translated(bool cmd_line); 615extern bool iommu_default_passthrough(void); 616extern struct iommu_resv_region * 617iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, 618 enum iommu_resv_type type, gfp_t gfp); 619extern int iommu_get_group_resv_regions(struct iommu_group *group, 620 struct list_head *head); 621 622extern int iommu_attach_group(struct iommu_domain *domain, 623 struct iommu_group *group); 624extern void iommu_detach_group(struct iommu_domain *domain, 625 struct iommu_group *group); 626extern struct iommu_group *iommu_group_alloc(void); 627extern void *iommu_group_get_iommudata(struct iommu_group *group); 628extern void iommu_group_set_iommudata(struct iommu_group *group, 629 void *iommu_data, 630 void (*release)(void *iommu_data)); 631extern int iommu_group_set_name(struct iommu_group *group, const char *name); 632extern int iommu_group_add_device(struct iommu_group *group, 633 struct device *dev); 634extern void iommu_group_remove_device(struct device *dev); 635extern int iommu_group_for_each_dev(struct iommu_group *group, void *data, 636 int (*fn)(struct device *, void *)); 637extern struct iommu_group *iommu_group_get(struct device *dev); 638extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group); 639extern void iommu_group_put(struct iommu_group *group); 640extern int iommu_register_device_fault_handler(struct device *dev, 641 iommu_dev_fault_handler_t handler, 642 void *data); 643 644extern int iommu_unregister_device_fault_handler(struct device *dev); 645 646extern int iommu_report_device_fault(struct device *dev, 647 struct iommu_fault_event *evt); 648extern int iommu_page_response(struct device *dev, 649 struct iommu_page_response *msg); 650 651extern int iommu_group_id(struct iommu_group *group); 652extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); 653 654int iommu_enable_nesting(struct iommu_domain *domain); 655int iommu_set_pgtable_quirks(struct iommu_domain *domain, 656 unsigned long quirks); 657 658void iommu_set_dma_strict(void); 659 660extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, 661 unsigned long iova, int flags); 662 663static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 664{ 665 if (domain->ops->flush_iotlb_all) 666 domain->ops->flush_iotlb_all(domain); 667} 668 669static inline void iommu_iotlb_sync(struct iommu_domain *domain, 670 struct iommu_iotlb_gather *iotlb_gather) 671{ 672 if (domain->ops->iotlb_sync) 673 domain->ops->iotlb_sync(domain, iotlb_gather); 674 675 iommu_iotlb_gather_init(iotlb_gather); 676} 677 678/** 679 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint 680 * 681 * @gather: TLB gather data 682 * @iova: start of page to invalidate 683 * @size: size of page to invalidate 684 * 685 * Helper for IOMMU drivers to check whether a new range and the gathered range 686 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better 687 * than merging the two, which might lead to unnecessary invalidations. 688 */ 689static inline 690bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, 691 unsigned long iova, size_t size) 692{ 693 unsigned long start = iova, end = start + size - 1; 694 695 return gather->end != 0 && 696 (end + 1 < gather->start || start > gather->end + 1); 697} 698 699 700/** 701 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation 702 * @gather: TLB gather data 703 * @iova: start of page to invalidate 704 * @size: size of page to invalidate 705 * 706 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands 707 * where only the address range matters, and simply minimising intermediate 708 * syncs is preferred. 709 */ 710static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, 711 unsigned long iova, size_t size) 712{ 713 unsigned long end = iova + size - 1; 714 715 if (gather->start > iova) 716 gather->start = iova; 717 if (gather->end < end) 718 gather->end = end; 719} 720 721/** 722 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation 723 * @domain: IOMMU domain to be invalidated 724 * @gather: TLB gather data 725 * @iova: start of page to invalidate 726 * @size: size of page to invalidate 727 * 728 * Helper for IOMMU drivers to build invalidation commands based on individual 729 * pages, or with page size/table level hints which cannot be gathered if they 730 * differ. 731 */ 732static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 733 struct iommu_iotlb_gather *gather, 734 unsigned long iova, size_t size) 735{ 736 /* 737 * If the new page is disjoint from the current range or is mapped at 738 * a different granularity, then sync the TLB so that the gather 739 * structure can be rewritten. 740 */ 741 if ((gather->pgsize && gather->pgsize != size) || 742 iommu_iotlb_gather_is_disjoint(gather, iova, size)) 743 iommu_iotlb_sync(domain, gather); 744 745 gather->pgsize = size; 746 iommu_iotlb_gather_add_range(gather, iova, size); 747} 748 749static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 750{ 751 return gather && gather->queued; 752} 753 754static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 755 struct iova_bitmap *bitmap, 756 struct iommu_iotlb_gather *gather) 757{ 758 if (gather) 759 iommu_iotlb_gather_init(gather); 760 761 dirty->bitmap = bitmap; 762 dirty->gather = gather; 763} 764 765static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 766 unsigned long iova, 767 unsigned long length) 768{ 769 if (dirty->bitmap) 770 iova_bitmap_set(dirty->bitmap, iova, length); 771 772 if (dirty->gather) 773 iommu_iotlb_gather_add_range(dirty->gather, iova, length); 774} 775 776/* PCI device grouping function */ 777extern struct iommu_group *pci_device_group(struct device *dev); 778/* Generic device grouping function */ 779extern struct iommu_group *generic_device_group(struct device *dev); 780/* FSL-MC device grouping function */ 781struct iommu_group *fsl_mc_device_group(struct device *dev); 782extern struct iommu_group *generic_single_device_group(struct device *dev); 783 784/** 785 * struct iommu_fwspec - per-device IOMMU instance data 786 * @ops: ops for this device's IOMMU 787 * @iommu_fwnode: firmware handle for this device's IOMMU 788 * @flags: IOMMU_FWSPEC_* flags 789 * @num_ids: number of associated device IDs 790 * @ids: IDs which this device may present to the IOMMU 791 * 792 * Note that the IDs (and any other information, really) stored in this structure should be 793 * considered private to the IOMMU device driver and are not to be used directly by IOMMU 794 * consumers. 795 */ 796struct iommu_fwspec { 797 const struct iommu_ops *ops; 798 struct fwnode_handle *iommu_fwnode; 799 u32 flags; 800 unsigned int num_ids; 801 u32 ids[]; 802}; 803 804/* ATS is supported */ 805#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) 806 807/** 808 * struct iommu_sva - handle to a device-mm bond 809 */ 810struct iommu_sva { 811 struct device *dev; 812 struct iommu_domain *domain; 813}; 814 815int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, 816 const struct iommu_ops *ops); 817void iommu_fwspec_free(struct device *dev); 818int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); 819const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); 820 821static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 822{ 823 if (dev->iommu) 824 return dev->iommu->fwspec; 825 else 826 return NULL; 827} 828 829static inline void dev_iommu_fwspec_set(struct device *dev, 830 struct iommu_fwspec *fwspec) 831{ 832 dev->iommu->fwspec = fwspec; 833} 834 835static inline void *dev_iommu_priv_get(struct device *dev) 836{ 837 if (dev->iommu) 838 return dev->iommu->priv; 839 else 840 return NULL; 841} 842 843static inline void dev_iommu_priv_set(struct device *dev, void *priv) 844{ 845 dev->iommu->priv = priv; 846} 847 848extern struct mutex iommu_probe_device_lock; 849int iommu_probe_device(struct device *dev); 850 851int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f); 852int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f); 853 854int iommu_device_use_default_domain(struct device *dev); 855void iommu_device_unuse_default_domain(struct device *dev); 856 857int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner); 858void iommu_group_release_dma_owner(struct iommu_group *group); 859bool iommu_group_dma_owner_claimed(struct iommu_group *group); 860 861int iommu_device_claim_dma_owner(struct device *dev, void *owner); 862void iommu_device_release_dma_owner(struct device *dev); 863 864struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, 865 struct mm_struct *mm); 866int iommu_attach_device_pasid(struct iommu_domain *domain, 867 struct device *dev, ioasid_t pasid); 868void iommu_detach_device_pasid(struct iommu_domain *domain, 869 struct device *dev, ioasid_t pasid); 870struct iommu_domain * 871iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 872 unsigned int type); 873ioasid_t iommu_alloc_global_pasid(struct device *dev); 874void iommu_free_global_pasid(ioasid_t pasid); 875#else /* CONFIG_IOMMU_API */ 876 877struct iommu_ops {}; 878struct iommu_group {}; 879struct iommu_fwspec {}; 880struct iommu_device {}; 881struct iommu_fault_param {}; 882struct iommu_iotlb_gather {}; 883struct iommu_dirty_bitmap {}; 884struct iommu_dirty_ops {}; 885 886static inline bool iommu_present(const struct bus_type *bus) 887{ 888 return false; 889} 890 891static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) 892{ 893 return false; 894} 895 896static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) 897{ 898 return NULL; 899} 900 901static inline void iommu_domain_free(struct iommu_domain *domain) 902{ 903} 904 905static inline int iommu_attach_device(struct iommu_domain *domain, 906 struct device *dev) 907{ 908 return -ENODEV; 909} 910 911static inline void iommu_detach_device(struct iommu_domain *domain, 912 struct device *dev) 913{ 914} 915 916static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev) 917{ 918 return NULL; 919} 920 921static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, 922 phys_addr_t paddr, size_t size, int prot, gfp_t gfp) 923{ 924 return -ENODEV; 925} 926 927static inline size_t iommu_unmap(struct iommu_domain *domain, 928 unsigned long iova, size_t size) 929{ 930 return 0; 931} 932 933static inline size_t iommu_unmap_fast(struct iommu_domain *domain, 934 unsigned long iova, int gfp_order, 935 struct iommu_iotlb_gather *iotlb_gather) 936{ 937 return 0; 938} 939 940static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 941 unsigned long iova, struct scatterlist *sg, 942 unsigned int nents, int prot, gfp_t gfp) 943{ 944 return -ENODEV; 945} 946 947static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) 948{ 949} 950 951static inline void iommu_iotlb_sync(struct iommu_domain *domain, 952 struct iommu_iotlb_gather *iotlb_gather) 953{ 954} 955 956static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) 957{ 958 return 0; 959} 960 961static inline void iommu_set_fault_handler(struct iommu_domain *domain, 962 iommu_fault_handler_t handler, void *token) 963{ 964} 965 966static inline void iommu_get_resv_regions(struct device *dev, 967 struct list_head *list) 968{ 969} 970 971static inline void iommu_put_resv_regions(struct device *dev, 972 struct list_head *list) 973{ 974} 975 976static inline int iommu_get_group_resv_regions(struct iommu_group *group, 977 struct list_head *head) 978{ 979 return -ENODEV; 980} 981 982static inline void iommu_set_default_passthrough(bool cmd_line) 983{ 984} 985 986static inline void iommu_set_default_translated(bool cmd_line) 987{ 988} 989 990static inline bool iommu_default_passthrough(void) 991{ 992 return true; 993} 994 995static inline int iommu_attach_group(struct iommu_domain *domain, 996 struct iommu_group *group) 997{ 998 return -ENODEV; 999} 1000 1001static inline void iommu_detach_group(struct iommu_domain *domain, 1002 struct iommu_group *group) 1003{ 1004} 1005 1006static inline struct iommu_group *iommu_group_alloc(void) 1007{ 1008 return ERR_PTR(-ENODEV); 1009} 1010 1011static inline void *iommu_group_get_iommudata(struct iommu_group *group) 1012{ 1013 return NULL; 1014} 1015 1016static inline void iommu_group_set_iommudata(struct iommu_group *group, 1017 void *iommu_data, 1018 void (*release)(void *iommu_data)) 1019{ 1020} 1021 1022static inline int iommu_group_set_name(struct iommu_group *group, 1023 const char *name) 1024{ 1025 return -ENODEV; 1026} 1027 1028static inline int iommu_group_add_device(struct iommu_group *group, 1029 struct device *dev) 1030{ 1031 return -ENODEV; 1032} 1033 1034static inline void iommu_group_remove_device(struct device *dev) 1035{ 1036} 1037 1038static inline int iommu_group_for_each_dev(struct iommu_group *group, 1039 void *data, 1040 int (*fn)(struct device *, void *)) 1041{ 1042 return -ENODEV; 1043} 1044 1045static inline struct iommu_group *iommu_group_get(struct device *dev) 1046{ 1047 return NULL; 1048} 1049 1050static inline void iommu_group_put(struct iommu_group *group) 1051{ 1052} 1053 1054static inline 1055int iommu_register_device_fault_handler(struct device *dev, 1056 iommu_dev_fault_handler_t handler, 1057 void *data) 1058{ 1059 return -ENODEV; 1060} 1061 1062static inline int iommu_unregister_device_fault_handler(struct device *dev) 1063{ 1064 return 0; 1065} 1066 1067static inline 1068int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) 1069{ 1070 return -ENODEV; 1071} 1072 1073static inline int iommu_page_response(struct device *dev, 1074 struct iommu_page_response *msg) 1075{ 1076 return -ENODEV; 1077} 1078 1079static inline int iommu_group_id(struct iommu_group *group) 1080{ 1081 return -ENODEV; 1082} 1083 1084static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain, 1085 unsigned long quirks) 1086{ 1087 return 0; 1088} 1089 1090static inline int iommu_device_register(struct iommu_device *iommu, 1091 const struct iommu_ops *ops, 1092 struct device *hwdev) 1093{ 1094 return -ENODEV; 1095} 1096 1097static inline struct iommu_device *dev_to_iommu_device(struct device *dev) 1098{ 1099 return NULL; 1100} 1101 1102static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) 1103{ 1104} 1105 1106static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, 1107 struct iommu_iotlb_gather *gather, 1108 unsigned long iova, size_t size) 1109{ 1110} 1111 1112static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) 1113{ 1114 return false; 1115} 1116 1117static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty, 1118 struct iova_bitmap *bitmap, 1119 struct iommu_iotlb_gather *gather) 1120{ 1121} 1122 1123static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty, 1124 unsigned long iova, 1125 unsigned long length) 1126{ 1127} 1128 1129static inline void iommu_device_unregister(struct iommu_device *iommu) 1130{ 1131} 1132 1133static inline int iommu_device_sysfs_add(struct iommu_device *iommu, 1134 struct device *parent, 1135 const struct attribute_group **groups, 1136 const char *fmt, ...) 1137{ 1138 return -ENODEV; 1139} 1140 1141static inline void iommu_device_sysfs_remove(struct iommu_device *iommu) 1142{ 1143} 1144 1145static inline int iommu_device_link(struct device *dev, struct device *link) 1146{ 1147 return -EINVAL; 1148} 1149 1150static inline void iommu_device_unlink(struct device *dev, struct device *link) 1151{ 1152} 1153 1154static inline int iommu_fwspec_init(struct device *dev, 1155 struct fwnode_handle *iommu_fwnode, 1156 const struct iommu_ops *ops) 1157{ 1158 return -ENODEV; 1159} 1160 1161static inline void iommu_fwspec_free(struct device *dev) 1162{ 1163} 1164 1165static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids, 1166 int num_ids) 1167{ 1168 return -ENODEV; 1169} 1170 1171static inline 1172const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode) 1173{ 1174 return NULL; 1175} 1176 1177static inline int 1178iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat) 1179{ 1180 return -ENODEV; 1181} 1182 1183static inline int 1184iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat) 1185{ 1186 return -ENODEV; 1187} 1188 1189static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) 1190{ 1191 return NULL; 1192} 1193 1194static inline int iommu_device_use_default_domain(struct device *dev) 1195{ 1196 return 0; 1197} 1198 1199static inline void iommu_device_unuse_default_domain(struct device *dev) 1200{ 1201} 1202 1203static inline int 1204iommu_group_claim_dma_owner(struct iommu_group *group, void *owner) 1205{ 1206 return -ENODEV; 1207} 1208 1209static inline void iommu_group_release_dma_owner(struct iommu_group *group) 1210{ 1211} 1212 1213static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group) 1214{ 1215 return false; 1216} 1217 1218static inline void iommu_device_release_dma_owner(struct device *dev) 1219{ 1220} 1221 1222static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner) 1223{ 1224 return -ENODEV; 1225} 1226 1227static inline struct iommu_domain * 1228iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm) 1229{ 1230 return NULL; 1231} 1232 1233static inline int iommu_attach_device_pasid(struct iommu_domain *domain, 1234 struct device *dev, ioasid_t pasid) 1235{ 1236 return -ENODEV; 1237} 1238 1239static inline void iommu_detach_device_pasid(struct iommu_domain *domain, 1240 struct device *dev, ioasid_t pasid) 1241{ 1242} 1243 1244static inline struct iommu_domain * 1245iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid, 1246 unsigned int type) 1247{ 1248 return NULL; 1249} 1250 1251static inline ioasid_t iommu_alloc_global_pasid(struct device *dev) 1252{ 1253 return IOMMU_PASID_INVALID; 1254} 1255 1256static inline void iommu_free_global_pasid(ioasid_t pasid) {} 1257#endif /* CONFIG_IOMMU_API */ 1258 1259/** 1260 * iommu_map_sgtable - Map the given buffer to the IOMMU domain 1261 * @domain: The IOMMU domain to perform the mapping 1262 * @iova: The start address to map the buffer 1263 * @sgt: The sg_table object describing the buffer 1264 * @prot: IOMMU protection bits 1265 * 1266 * Creates a mapping at @iova for the buffer described by a scatterlist 1267 * stored in the given sg_table object in the provided IOMMU domain. 1268 */ 1269static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain, 1270 unsigned long iova, struct sg_table *sgt, int prot) 1271{ 1272 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot, 1273 GFP_KERNEL); 1274} 1275 1276#ifdef CONFIG_IOMMU_DEBUGFS 1277extern struct dentry *iommu_debugfs_dir; 1278void iommu_debugfs_setup(void); 1279#else 1280static inline void iommu_debugfs_setup(void) {} 1281#endif 1282 1283#ifdef CONFIG_IOMMU_DMA 1284#include <linux/msi.h> 1285 1286/* Setup call for arch DMA mapping code */ 1287void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); 1288 1289int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); 1290 1291int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); 1292void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); 1293 1294#else /* CONFIG_IOMMU_DMA */ 1295 1296struct msi_desc; 1297struct msi_msg; 1298 1299static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) 1300{ 1301} 1302 1303static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) 1304{ 1305 return -ENODEV; 1306} 1307 1308static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr) 1309{ 1310 return 0; 1311} 1312 1313static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg) 1314{ 1315} 1316 1317#endif /* CONFIG_IOMMU_DMA */ 1318 1319/* 1320 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into 1321 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents 1322 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals. 1323 */ 1324#define TEGRA_STREAM_ID_BYPASS 0x7f 1325 1326static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id) 1327{ 1328#ifdef CONFIG_IOMMU_API 1329 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); 1330 1331 if (fwspec && fwspec->num_ids == 1) { 1332 *stream_id = fwspec->ids[0] & 0xffff; 1333 return true; 1334 } 1335#endif 1336 1337 return false; 1338} 1339 1340#ifdef CONFIG_IOMMU_SVA 1341static inline void mm_pasid_init(struct mm_struct *mm) 1342{ 1343 mm->pasid = IOMMU_PASID_INVALID; 1344} 1345static inline bool mm_valid_pasid(struct mm_struct *mm) 1346{ 1347 return mm->pasid != IOMMU_PASID_INVALID; 1348} 1349void mm_pasid_drop(struct mm_struct *mm); 1350struct iommu_sva *iommu_sva_bind_device(struct device *dev, 1351 struct mm_struct *mm); 1352void iommu_sva_unbind_device(struct iommu_sva *handle); 1353u32 iommu_sva_get_pasid(struct iommu_sva *handle); 1354#else 1355static inline struct iommu_sva * 1356iommu_sva_bind_device(struct device *dev, struct mm_struct *mm) 1357{ 1358 return NULL; 1359} 1360 1361static inline void iommu_sva_unbind_device(struct iommu_sva *handle) 1362{ 1363} 1364 1365static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle) 1366{ 1367 return IOMMU_PASID_INVALID; 1368} 1369static inline void mm_pasid_init(struct mm_struct *mm) {} 1370static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; } 1371static inline void mm_pasid_drop(struct mm_struct *mm) {} 1372#endif /* CONFIG_IOMMU_SVA */ 1373 1374#endif /* __LINUX_IOMMU_H */