Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7#ifndef __LINUX_IOMMU_H
8#define __LINUX_IOMMU_H
9
10#include <linux/scatterlist.h>
11#include <linux/device.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/of.h>
16#include <uapi/linux/iommu.h>
17
18#define IOMMU_READ (1 << 0)
19#define IOMMU_WRITE (1 << 1)
20#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
21#define IOMMU_NOEXEC (1 << 3)
22#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
23/*
24 * Where the bus hardware includes a privilege level as part of its access type
25 * markings, and certain devices are capable of issuing transactions marked as
26 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
27 * given permission flags only apply to accesses at the higher privilege level,
28 * and that unprivileged transactions should have as little access as possible.
29 * This would usually imply the same permissions as kernel mappings on the CPU,
30 * if the IOMMU page table format is equivalent.
31 */
32#define IOMMU_PRIV (1 << 5)
33
34struct iommu_ops;
35struct iommu_group;
36struct bus_type;
37struct device;
38struct iommu_domain;
39struct iommu_domain_ops;
40struct notifier_block;
41struct iommu_sva;
42struct iommu_fault_event;
43struct iommu_dma_cookie;
44
45/* iommu fault flags */
46#define IOMMU_FAULT_READ 0x0
47#define IOMMU_FAULT_WRITE 0x1
48
49typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
50 struct device *, unsigned long, int, void *);
51typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
52
53struct iommu_domain_geometry {
54 dma_addr_t aperture_start; /* First address that can be mapped */
55 dma_addr_t aperture_end; /* Last address that can be mapped */
56 bool force_aperture; /* DMA only allowed in mappable range? */
57};
58
59/* Domain feature flags */
60#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
61#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
62 implementation */
63#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
64#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */
65
66#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
67
68#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
69/*
70 * This are the possible domain-types
71 *
72 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
73 * devices
74 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
75 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
76 * for VMs
77 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
78 * This flag allows IOMMU drivers to implement
79 * certain optimizations for these domains
80 * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB
81 * invalidation.
82 * IOMMU_DOMAIN_SVA - DMA addresses are shared process addresses
83 * represented by mm_struct's.
84 */
85#define IOMMU_DOMAIN_BLOCKED (0U)
86#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
87#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
88#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
89 __IOMMU_DOMAIN_DMA_API)
90#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \
91 __IOMMU_DOMAIN_DMA_API | \
92 __IOMMU_DOMAIN_DMA_FQ)
93#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
94
95struct iommu_domain {
96 unsigned type;
97 const struct iommu_domain_ops *ops;
98 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
99 struct iommu_domain_geometry geometry;
100 struct iommu_dma_cookie *iova_cookie;
101 enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault,
102 void *data);
103 void *fault_data;
104 union {
105 struct {
106 iommu_fault_handler_t handler;
107 void *handler_token;
108 };
109 struct { /* IOMMU_DOMAIN_SVA */
110 struct mm_struct *mm;
111 int users;
112 };
113 };
114};
115
116static inline bool iommu_is_dma_domain(struct iommu_domain *domain)
117{
118 return domain->type & __IOMMU_DOMAIN_DMA_API;
119}
120
121enum iommu_cap {
122 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU_CACHE is supported */
123 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
124 IOMMU_CAP_PRE_BOOT_PROTECTION, /* Firmware says it used the IOMMU for
125 DMA protection and we should too */
126 /*
127 * Per-device flag indicating if enforce_cache_coherency() will work on
128 * this device.
129 */
130 IOMMU_CAP_ENFORCE_CACHE_COHERENCY,
131 /*
132 * IOMMU driver does not issue TLB maintenance during .unmap, so can
133 * usefully support the non-strict DMA flush queue.
134 */
135 IOMMU_CAP_DEFERRED_FLUSH,
136};
137
138/* These are the possible reserved region types */
139enum iommu_resv_type {
140 /* Memory regions which must be mapped 1:1 at all times */
141 IOMMU_RESV_DIRECT,
142 /*
143 * Memory regions which are advertised to be 1:1 but are
144 * commonly considered relaxable in some conditions,
145 * for instance in device assignment use case (USB, Graphics)
146 */
147 IOMMU_RESV_DIRECT_RELAXABLE,
148 /* Arbitrary "never map this or give it to a device" address ranges */
149 IOMMU_RESV_RESERVED,
150 /* Hardware MSI region (untranslated) */
151 IOMMU_RESV_MSI,
152 /* Software-managed MSI translation window */
153 IOMMU_RESV_SW_MSI,
154};
155
156/**
157 * struct iommu_resv_region - descriptor for a reserved memory region
158 * @list: Linked list pointers
159 * @start: System physical start address of the region
160 * @length: Length of the region in bytes
161 * @prot: IOMMU Protection flags (READ/WRITE/...)
162 * @type: Type of the reserved region
163 * @free: Callback to free associated memory allocations
164 */
165struct iommu_resv_region {
166 struct list_head list;
167 phys_addr_t start;
168 size_t length;
169 int prot;
170 enum iommu_resv_type type;
171 void (*free)(struct device *dev, struct iommu_resv_region *region);
172};
173
174struct iommu_iort_rmr_data {
175 struct iommu_resv_region rr;
176
177 /* Stream IDs associated with IORT RMR entry */
178 const u32 *sids;
179 u32 num_sids;
180};
181
182/**
183 * enum iommu_dev_features - Per device IOMMU features
184 * @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
185 * @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
186 * enabling %IOMMU_DEV_FEAT_SVA requires
187 * %IOMMU_DEV_FEAT_IOPF, but some devices manage I/O Page
188 * Faults themselves instead of relying on the IOMMU. When
189 * supported, this feature must be enabled before and
190 * disabled after %IOMMU_DEV_FEAT_SVA.
191 *
192 * Device drivers enable a feature using iommu_dev_enable_feature().
193 */
194enum iommu_dev_features {
195 IOMMU_DEV_FEAT_SVA,
196 IOMMU_DEV_FEAT_IOPF,
197};
198
199#define IOMMU_PASID_INVALID (-1U)
200typedef unsigned int ioasid_t;
201
202#ifdef CONFIG_IOMMU_API
203
204/**
205 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
206 *
207 * @start: IOVA representing the start of the range to be flushed
208 * @end: IOVA representing the end of the range to be flushed (inclusive)
209 * @pgsize: The interval at which to perform the flush
210 * @freelist: Removed pages to free after sync
211 * @queued: Indicates that the flush will be queued
212 *
213 * This structure is intended to be updated by multiple calls to the
214 * ->unmap() function in struct iommu_ops before eventually being passed
215 * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after
216 * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to
217 * them. @queued is set to indicate when ->iotlb_flush_all() will be called
218 * later instead of ->iotlb_sync(), so drivers may optimise accordingly.
219 */
220struct iommu_iotlb_gather {
221 unsigned long start;
222 unsigned long end;
223 size_t pgsize;
224 struct list_head freelist;
225 bool queued;
226};
227
228/**
229 * struct iommu_ops - iommu ops and capabilities
230 * @capable: check capability
231 * @domain_alloc: allocate iommu domain
232 * @probe_device: Add device to iommu driver handling
233 * @release_device: Remove device from iommu driver handling
234 * @probe_finalize: Do final setup work after the device is added to an IOMMU
235 * group and attached to the groups domain
236 * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op
237 * is to support old IOMMU drivers, new drivers should use
238 * default domains, and the common IOMMU DMA ops.
239 * @device_group: find iommu group for a particular device
240 * @get_resv_regions: Request list of reserved regions for a device
241 * @of_xlate: add OF master IDs to iommu grouping
242 * @is_attach_deferred: Check if domain attach should be deferred from iommu
243 * driver init to device driver init (default no)
244 * @dev_enable/disable_feat: per device entries to enable/disable
245 * iommu specific features.
246 * @page_response: handle page request response
247 * @def_domain_type: device default domain type, return value:
248 * - IOMMU_DOMAIN_IDENTITY: must use an identity domain
249 * - IOMMU_DOMAIN_DMA: must use a dma domain
250 * - 0: use the default setting
251 * @default_domain_ops: the default ops for domains
252 * @remove_dev_pasid: Remove any translation configurations of a specific
253 * pasid, so that any DMA transactions with this pasid
254 * will be blocked by the hardware.
255 * @pgsize_bitmap: bitmap of all possible supported page sizes
256 * @owner: Driver module providing these ops
257 */
258struct iommu_ops {
259 bool (*capable)(struct device *dev, enum iommu_cap);
260
261 /* Domain allocation and freeing by the iommu driver */
262 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
263
264 struct iommu_device *(*probe_device)(struct device *dev);
265 void (*release_device)(struct device *dev);
266 void (*probe_finalize)(struct device *dev);
267 void (*set_platform_dma_ops)(struct device *dev);
268 struct iommu_group *(*device_group)(struct device *dev);
269
270 /* Request/Free a list of reserved regions for a device */
271 void (*get_resv_regions)(struct device *dev, struct list_head *list);
272
273 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
274 bool (*is_attach_deferred)(struct device *dev);
275
276 /* Per device IOMMU features */
277 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
278 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
279
280 int (*page_response)(struct device *dev,
281 struct iommu_fault_event *evt,
282 struct iommu_page_response *msg);
283
284 int (*def_domain_type)(struct device *dev);
285 void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid);
286
287 const struct iommu_domain_ops *default_domain_ops;
288 unsigned long pgsize_bitmap;
289 struct module *owner;
290};
291
292/**
293 * struct iommu_domain_ops - domain specific operations
294 * @attach_dev: attach an iommu domain to a device
295 * Return:
296 * * 0 - success
297 * * EINVAL - can indicate that device and domain are incompatible due to
298 * some previous configuration of the domain, in which case the
299 * driver shouldn't log an error, since it is legitimate for a
300 * caller to test reuse of existing domains. Otherwise, it may
301 * still represent some other fundamental problem
302 * * ENOMEM - out of memory
303 * * ENOSPC - non-ENOMEM type of resource allocation failures
304 * * EBUSY - device is attached to a domain and cannot be changed
305 * * ENODEV - device specific errors, not able to be attached
306 * * <others> - treated as ENODEV by the caller. Use is discouraged
307 * @set_dev_pasid: set an iommu domain to a pasid of device
308 * @map: map a physically contiguous memory region to an iommu domain
309 * @map_pages: map a physically contiguous set of pages of the same size to
310 * an iommu domain.
311 * @unmap: unmap a physically contiguous memory region from an iommu domain
312 * @unmap_pages: unmap a number of pages of the same size from an iommu domain
313 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
314 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
315 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
316 * queue
317 * @iova_to_phys: translate iova to physical address
318 * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE,
319 * including no-snoop TLPs on PCIe or other platform
320 * specific mechanisms.
321 * @enable_nesting: Enable nesting
322 * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
323 * @free: Release the domain after use.
324 */
325struct iommu_domain_ops {
326 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
327 int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
328 ioasid_t pasid);
329
330 int (*map)(struct iommu_domain *domain, unsigned long iova,
331 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
332 int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
333 phys_addr_t paddr, size_t pgsize, size_t pgcount,
334 int prot, gfp_t gfp, size_t *mapped);
335 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
336 size_t size, struct iommu_iotlb_gather *iotlb_gather);
337 size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
338 size_t pgsize, size_t pgcount,
339 struct iommu_iotlb_gather *iotlb_gather);
340
341 void (*flush_iotlb_all)(struct iommu_domain *domain);
342 void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
343 size_t size);
344 void (*iotlb_sync)(struct iommu_domain *domain,
345 struct iommu_iotlb_gather *iotlb_gather);
346
347 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
348 dma_addr_t iova);
349
350 bool (*enforce_cache_coherency)(struct iommu_domain *domain);
351 int (*enable_nesting)(struct iommu_domain *domain);
352 int (*set_pgtable_quirks)(struct iommu_domain *domain,
353 unsigned long quirks);
354
355 void (*free)(struct iommu_domain *domain);
356};
357
358/**
359 * struct iommu_device - IOMMU core representation of one IOMMU hardware
360 * instance
361 * @list: Used by the iommu-core to keep a list of registered iommus
362 * @ops: iommu-ops for talking to this iommu
363 * @dev: struct device for sysfs handling
364 * @max_pasids: number of supported PASIDs
365 */
366struct iommu_device {
367 struct list_head list;
368 const struct iommu_ops *ops;
369 struct fwnode_handle *fwnode;
370 struct device *dev;
371 u32 max_pasids;
372};
373
374/**
375 * struct iommu_fault_event - Generic fault event
376 *
377 * Can represent recoverable faults such as a page requests or
378 * unrecoverable faults such as DMA or IRQ remapping faults.
379 *
380 * @fault: fault descriptor
381 * @list: pending fault event list, used for tracking responses
382 */
383struct iommu_fault_event {
384 struct iommu_fault fault;
385 struct list_head list;
386};
387
388/**
389 * struct iommu_fault_param - per-device IOMMU fault data
390 * @handler: Callback function to handle IOMMU faults at device level
391 * @data: handler private data
392 * @faults: holds the pending faults which needs response
393 * @lock: protect pending faults list
394 */
395struct iommu_fault_param {
396 iommu_dev_fault_handler_t handler;
397 void *data;
398 struct list_head faults;
399 struct mutex lock;
400};
401
402/**
403 * struct dev_iommu - Collection of per-device IOMMU data
404 *
405 * @fault_param: IOMMU detected device fault reporting data
406 * @iopf_param: I/O Page Fault queue and data
407 * @fwspec: IOMMU fwspec data
408 * @iommu_dev: IOMMU device this device is linked to
409 * @priv: IOMMU Driver private data
410 * @max_pasids: number of PASIDs this device can consume
411 * @attach_deferred: the dma domain attachment is deferred
412 *
413 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
414 * struct iommu_group *iommu_group;
415 */
416struct dev_iommu {
417 struct mutex lock;
418 struct iommu_fault_param *fault_param;
419 struct iopf_device_param *iopf_param;
420 struct iommu_fwspec *fwspec;
421 struct iommu_device *iommu_dev;
422 void *priv;
423 u32 max_pasids;
424 u32 attach_deferred:1;
425};
426
427int iommu_device_register(struct iommu_device *iommu,
428 const struct iommu_ops *ops,
429 struct device *hwdev);
430void iommu_device_unregister(struct iommu_device *iommu);
431int iommu_device_sysfs_add(struct iommu_device *iommu,
432 struct device *parent,
433 const struct attribute_group **groups,
434 const char *fmt, ...) __printf(4, 5);
435void iommu_device_sysfs_remove(struct iommu_device *iommu);
436int iommu_device_link(struct iommu_device *iommu, struct device *link);
437void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
438int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain);
439
440static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
441{
442 return (struct iommu_device *)dev_get_drvdata(dev);
443}
444
445static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
446{
447 *gather = (struct iommu_iotlb_gather) {
448 .start = ULONG_MAX,
449 .freelist = LIST_HEAD_INIT(gather->freelist),
450 };
451}
452
453static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
454{
455 /*
456 * Assume that valid ops must be installed if iommu_probe_device()
457 * has succeeded. The device ops are essentially for internal use
458 * within the IOMMU subsystem itself, so we should be able to trust
459 * ourselves not to misuse the helper.
460 */
461 return dev->iommu->iommu_dev->ops;
462}
463
464extern int bus_iommu_probe(const struct bus_type *bus);
465extern bool iommu_present(const struct bus_type *bus);
466extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
467extern bool iommu_group_has_isolated_msi(struct iommu_group *group);
468extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus);
469extern void iommu_domain_free(struct iommu_domain *domain);
470extern int iommu_attach_device(struct iommu_domain *domain,
471 struct device *dev);
472extern void iommu_detach_device(struct iommu_domain *domain,
473 struct device *dev);
474extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
475 struct device *dev, ioasid_t pasid);
476extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
477extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
478extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
479 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
480extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
481 size_t size);
482extern size_t iommu_unmap_fast(struct iommu_domain *domain,
483 unsigned long iova, size_t size,
484 struct iommu_iotlb_gather *iotlb_gather);
485extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
486 struct scatterlist *sg, unsigned int nents,
487 int prot, gfp_t gfp);
488extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
489extern void iommu_set_fault_handler(struct iommu_domain *domain,
490 iommu_fault_handler_t handler, void *token);
491
492extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
493extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
494extern void iommu_set_default_passthrough(bool cmd_line);
495extern void iommu_set_default_translated(bool cmd_line);
496extern bool iommu_default_passthrough(void);
497extern struct iommu_resv_region *
498iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
499 enum iommu_resv_type type, gfp_t gfp);
500extern int iommu_get_group_resv_regions(struct iommu_group *group,
501 struct list_head *head);
502
503extern int iommu_attach_group(struct iommu_domain *domain,
504 struct iommu_group *group);
505extern void iommu_detach_group(struct iommu_domain *domain,
506 struct iommu_group *group);
507extern struct iommu_group *iommu_group_alloc(void);
508extern void *iommu_group_get_iommudata(struct iommu_group *group);
509extern void iommu_group_set_iommudata(struct iommu_group *group,
510 void *iommu_data,
511 void (*release)(void *iommu_data));
512extern int iommu_group_set_name(struct iommu_group *group, const char *name);
513extern int iommu_group_add_device(struct iommu_group *group,
514 struct device *dev);
515extern void iommu_group_remove_device(struct device *dev);
516extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
517 int (*fn)(struct device *, void *));
518extern struct iommu_group *iommu_group_get(struct device *dev);
519extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
520extern void iommu_group_put(struct iommu_group *group);
521extern int iommu_register_device_fault_handler(struct device *dev,
522 iommu_dev_fault_handler_t handler,
523 void *data);
524
525extern int iommu_unregister_device_fault_handler(struct device *dev);
526
527extern int iommu_report_device_fault(struct device *dev,
528 struct iommu_fault_event *evt);
529extern int iommu_page_response(struct device *dev,
530 struct iommu_page_response *msg);
531
532extern int iommu_group_id(struct iommu_group *group);
533extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
534
535int iommu_enable_nesting(struct iommu_domain *domain);
536int iommu_set_pgtable_quirks(struct iommu_domain *domain,
537 unsigned long quirks);
538
539void iommu_set_dma_strict(void);
540
541extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
542 unsigned long iova, int flags);
543
544static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
545{
546 if (domain->ops->flush_iotlb_all)
547 domain->ops->flush_iotlb_all(domain);
548}
549
550static inline void iommu_iotlb_sync(struct iommu_domain *domain,
551 struct iommu_iotlb_gather *iotlb_gather)
552{
553 if (domain->ops->iotlb_sync)
554 domain->ops->iotlb_sync(domain, iotlb_gather);
555
556 iommu_iotlb_gather_init(iotlb_gather);
557}
558
559/**
560 * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
561 *
562 * @gather: TLB gather data
563 * @iova: start of page to invalidate
564 * @size: size of page to invalidate
565 *
566 * Helper for IOMMU drivers to check whether a new range and the gathered range
567 * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better
568 * than merging the two, which might lead to unnecessary invalidations.
569 */
570static inline
571bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
572 unsigned long iova, size_t size)
573{
574 unsigned long start = iova, end = start + size - 1;
575
576 return gather->end != 0 &&
577 (end + 1 < gather->start || start > gather->end + 1);
578}
579
580
581/**
582 * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
583 * @gather: TLB gather data
584 * @iova: start of page to invalidate
585 * @size: size of page to invalidate
586 *
587 * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands
588 * where only the address range matters, and simply minimising intermediate
589 * syncs is preferred.
590 */
591static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
592 unsigned long iova, size_t size)
593{
594 unsigned long end = iova + size - 1;
595
596 if (gather->start > iova)
597 gather->start = iova;
598 if (gather->end < end)
599 gather->end = end;
600}
601
602/**
603 * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation
604 * @domain: IOMMU domain to be invalidated
605 * @gather: TLB gather data
606 * @iova: start of page to invalidate
607 * @size: size of page to invalidate
608 *
609 * Helper for IOMMU drivers to build invalidation commands based on individual
610 * pages, or with page size/table level hints which cannot be gathered if they
611 * differ.
612 */
613static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
614 struct iommu_iotlb_gather *gather,
615 unsigned long iova, size_t size)
616{
617 /*
618 * If the new page is disjoint from the current range or is mapped at
619 * a different granularity, then sync the TLB so that the gather
620 * structure can be rewritten.
621 */
622 if ((gather->pgsize && gather->pgsize != size) ||
623 iommu_iotlb_gather_is_disjoint(gather, iova, size))
624 iommu_iotlb_sync(domain, gather);
625
626 gather->pgsize = size;
627 iommu_iotlb_gather_add_range(gather, iova, size);
628}
629
630static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
631{
632 return gather && gather->queued;
633}
634
635/* PCI device grouping function */
636extern struct iommu_group *pci_device_group(struct device *dev);
637/* Generic device grouping function */
638extern struct iommu_group *generic_device_group(struct device *dev);
639/* FSL-MC device grouping function */
640struct iommu_group *fsl_mc_device_group(struct device *dev);
641
642/**
643 * struct iommu_fwspec - per-device IOMMU instance data
644 * @ops: ops for this device's IOMMU
645 * @iommu_fwnode: firmware handle for this device's IOMMU
646 * @flags: IOMMU_FWSPEC_* flags
647 * @num_ids: number of associated device IDs
648 * @ids: IDs which this device may present to the IOMMU
649 *
650 * Note that the IDs (and any other information, really) stored in this structure should be
651 * considered private to the IOMMU device driver and are not to be used directly by IOMMU
652 * consumers.
653 */
654struct iommu_fwspec {
655 const struct iommu_ops *ops;
656 struct fwnode_handle *iommu_fwnode;
657 u32 flags;
658 unsigned int num_ids;
659 u32 ids[];
660};
661
662/* ATS is supported */
663#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
664
665/**
666 * struct iommu_sva - handle to a device-mm bond
667 */
668struct iommu_sva {
669 struct device *dev;
670 struct iommu_domain *domain;
671};
672
673int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
674 const struct iommu_ops *ops);
675void iommu_fwspec_free(struct device *dev);
676int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
677const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
678
679static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
680{
681 if (dev->iommu)
682 return dev->iommu->fwspec;
683 else
684 return NULL;
685}
686
687static inline void dev_iommu_fwspec_set(struct device *dev,
688 struct iommu_fwspec *fwspec)
689{
690 dev->iommu->fwspec = fwspec;
691}
692
693static inline void *dev_iommu_priv_get(struct device *dev)
694{
695 if (dev->iommu)
696 return dev->iommu->priv;
697 else
698 return NULL;
699}
700
701static inline void dev_iommu_priv_set(struct device *dev, void *priv)
702{
703 dev->iommu->priv = priv;
704}
705
706int iommu_probe_device(struct device *dev);
707
708int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
709int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
710
711int iommu_device_use_default_domain(struct device *dev);
712void iommu_device_unuse_default_domain(struct device *dev);
713
714int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
715void iommu_group_release_dma_owner(struct iommu_group *group);
716bool iommu_group_dma_owner_claimed(struct iommu_group *group);
717
718int iommu_device_claim_dma_owner(struct device *dev, void *owner);
719void iommu_device_release_dma_owner(struct device *dev);
720
721struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
722 struct mm_struct *mm);
723int iommu_attach_device_pasid(struct iommu_domain *domain,
724 struct device *dev, ioasid_t pasid);
725void iommu_detach_device_pasid(struct iommu_domain *domain,
726 struct device *dev, ioasid_t pasid);
727struct iommu_domain *
728iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
729 unsigned int type);
730#else /* CONFIG_IOMMU_API */
731
732struct iommu_ops {};
733struct iommu_group {};
734struct iommu_fwspec {};
735struct iommu_device {};
736struct iommu_fault_param {};
737struct iommu_iotlb_gather {};
738
739static inline bool iommu_present(const struct bus_type *bus)
740{
741 return false;
742}
743
744static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap)
745{
746 return false;
747}
748
749static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
750{
751 return NULL;
752}
753
754static inline void iommu_domain_free(struct iommu_domain *domain)
755{
756}
757
758static inline int iommu_attach_device(struct iommu_domain *domain,
759 struct device *dev)
760{
761 return -ENODEV;
762}
763
764static inline void iommu_detach_device(struct iommu_domain *domain,
765 struct device *dev)
766{
767}
768
769static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
770{
771 return NULL;
772}
773
774static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
775 phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
776{
777 return -ENODEV;
778}
779
780static inline size_t iommu_unmap(struct iommu_domain *domain,
781 unsigned long iova, size_t size)
782{
783 return 0;
784}
785
786static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
787 unsigned long iova, int gfp_order,
788 struct iommu_iotlb_gather *iotlb_gather)
789{
790 return 0;
791}
792
793static inline ssize_t iommu_map_sg(struct iommu_domain *domain,
794 unsigned long iova, struct scatterlist *sg,
795 unsigned int nents, int prot, gfp_t gfp)
796{
797 return -ENODEV;
798}
799
800static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
801{
802}
803
804static inline void iommu_iotlb_sync(struct iommu_domain *domain,
805 struct iommu_iotlb_gather *iotlb_gather)
806{
807}
808
809static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
810{
811 return 0;
812}
813
814static inline void iommu_set_fault_handler(struct iommu_domain *domain,
815 iommu_fault_handler_t handler, void *token)
816{
817}
818
819static inline void iommu_get_resv_regions(struct device *dev,
820 struct list_head *list)
821{
822}
823
824static inline void iommu_put_resv_regions(struct device *dev,
825 struct list_head *list)
826{
827}
828
829static inline int iommu_get_group_resv_regions(struct iommu_group *group,
830 struct list_head *head)
831{
832 return -ENODEV;
833}
834
835static inline void iommu_set_default_passthrough(bool cmd_line)
836{
837}
838
839static inline void iommu_set_default_translated(bool cmd_line)
840{
841}
842
843static inline bool iommu_default_passthrough(void)
844{
845 return true;
846}
847
848static inline int iommu_attach_group(struct iommu_domain *domain,
849 struct iommu_group *group)
850{
851 return -ENODEV;
852}
853
854static inline void iommu_detach_group(struct iommu_domain *domain,
855 struct iommu_group *group)
856{
857}
858
859static inline struct iommu_group *iommu_group_alloc(void)
860{
861 return ERR_PTR(-ENODEV);
862}
863
864static inline void *iommu_group_get_iommudata(struct iommu_group *group)
865{
866 return NULL;
867}
868
869static inline void iommu_group_set_iommudata(struct iommu_group *group,
870 void *iommu_data,
871 void (*release)(void *iommu_data))
872{
873}
874
875static inline int iommu_group_set_name(struct iommu_group *group,
876 const char *name)
877{
878 return -ENODEV;
879}
880
881static inline int iommu_group_add_device(struct iommu_group *group,
882 struct device *dev)
883{
884 return -ENODEV;
885}
886
887static inline void iommu_group_remove_device(struct device *dev)
888{
889}
890
891static inline int iommu_group_for_each_dev(struct iommu_group *group,
892 void *data,
893 int (*fn)(struct device *, void *))
894{
895 return -ENODEV;
896}
897
898static inline struct iommu_group *iommu_group_get(struct device *dev)
899{
900 return NULL;
901}
902
903static inline void iommu_group_put(struct iommu_group *group)
904{
905}
906
907static inline
908int iommu_register_device_fault_handler(struct device *dev,
909 iommu_dev_fault_handler_t handler,
910 void *data)
911{
912 return -ENODEV;
913}
914
915static inline int iommu_unregister_device_fault_handler(struct device *dev)
916{
917 return 0;
918}
919
920static inline
921int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
922{
923 return -ENODEV;
924}
925
926static inline int iommu_page_response(struct device *dev,
927 struct iommu_page_response *msg)
928{
929 return -ENODEV;
930}
931
932static inline int iommu_group_id(struct iommu_group *group)
933{
934 return -ENODEV;
935}
936
937static inline int iommu_set_pgtable_quirks(struct iommu_domain *domain,
938 unsigned long quirks)
939{
940 return 0;
941}
942
943static inline int iommu_device_register(struct iommu_device *iommu,
944 const struct iommu_ops *ops,
945 struct device *hwdev)
946{
947 return -ENODEV;
948}
949
950static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
951{
952 return NULL;
953}
954
955static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
956{
957}
958
959static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
960 struct iommu_iotlb_gather *gather,
961 unsigned long iova, size_t size)
962{
963}
964
965static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
966{
967 return false;
968}
969
970static inline void iommu_device_unregister(struct iommu_device *iommu)
971{
972}
973
974static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
975 struct device *parent,
976 const struct attribute_group **groups,
977 const char *fmt, ...)
978{
979 return -ENODEV;
980}
981
982static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
983{
984}
985
986static inline int iommu_device_link(struct device *dev, struct device *link)
987{
988 return -EINVAL;
989}
990
991static inline void iommu_device_unlink(struct device *dev, struct device *link)
992{
993}
994
995static inline int iommu_fwspec_init(struct device *dev,
996 struct fwnode_handle *iommu_fwnode,
997 const struct iommu_ops *ops)
998{
999 return -ENODEV;
1000}
1001
1002static inline void iommu_fwspec_free(struct device *dev)
1003{
1004}
1005
1006static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
1007 int num_ids)
1008{
1009 return -ENODEV;
1010}
1011
1012static inline
1013const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1014{
1015 return NULL;
1016}
1017
1018static inline int
1019iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1020{
1021 return -ENODEV;
1022}
1023
1024static inline int
1025iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1026{
1027 return -ENODEV;
1028}
1029
1030static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
1031{
1032 return NULL;
1033}
1034
1035static inline int iommu_device_use_default_domain(struct device *dev)
1036{
1037 return 0;
1038}
1039
1040static inline void iommu_device_unuse_default_domain(struct device *dev)
1041{
1042}
1043
1044static inline int
1045iommu_group_claim_dma_owner(struct iommu_group *group, void *owner)
1046{
1047 return -ENODEV;
1048}
1049
1050static inline void iommu_group_release_dma_owner(struct iommu_group *group)
1051{
1052}
1053
1054static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
1055{
1056 return false;
1057}
1058
1059static inline void iommu_device_release_dma_owner(struct device *dev)
1060{
1061}
1062
1063static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
1064{
1065 return -ENODEV;
1066}
1067
1068static inline struct iommu_domain *
1069iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
1070{
1071 return NULL;
1072}
1073
1074static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
1075 struct device *dev, ioasid_t pasid)
1076{
1077 return -ENODEV;
1078}
1079
1080static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
1081 struct device *dev, ioasid_t pasid)
1082{
1083}
1084
1085static inline struct iommu_domain *
1086iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
1087 unsigned int type)
1088{
1089 return NULL;
1090}
1091#endif /* CONFIG_IOMMU_API */
1092
1093/**
1094 * iommu_map_sgtable - Map the given buffer to the IOMMU domain
1095 * @domain: The IOMMU domain to perform the mapping
1096 * @iova: The start address to map the buffer
1097 * @sgt: The sg_table object describing the buffer
1098 * @prot: IOMMU protection bits
1099 *
1100 * Creates a mapping at @iova for the buffer described by a scatterlist
1101 * stored in the given sg_table object in the provided IOMMU domain.
1102 */
1103static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
1104 unsigned long iova, struct sg_table *sgt, int prot)
1105{
1106 return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
1107 GFP_KERNEL);
1108}
1109
1110#ifdef CONFIG_IOMMU_DEBUGFS
1111extern struct dentry *iommu_debugfs_dir;
1112void iommu_debugfs_setup(void);
1113#else
1114static inline void iommu_debugfs_setup(void) {}
1115#endif
1116
1117#ifdef CONFIG_IOMMU_DMA
1118#include <linux/msi.h>
1119
1120/* Setup call for arch DMA mapping code */
1121void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit);
1122
1123int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1124
1125int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr);
1126void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg);
1127
1128#else /* CONFIG_IOMMU_DMA */
1129
1130struct msi_desc;
1131struct msi_msg;
1132
1133static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit)
1134{
1135}
1136
1137static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1138{
1139 return -ENODEV;
1140}
1141
1142static inline int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1143{
1144 return 0;
1145}
1146
1147static inline void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
1148{
1149}
1150
1151#endif /* CONFIG_IOMMU_DMA */
1152
1153/*
1154 * Newer generations of Tegra SoCs require devices' stream IDs to be directly programmed into
1155 * some registers. These are always paired with a Tegra SMMU or ARM SMMU, for which the contents
1156 * of the struct iommu_fwspec are known. Use this helper to formalize access to these internals.
1157 */
1158#define TEGRA_STREAM_ID_BYPASS 0x7f
1159
1160static inline bool tegra_dev_iommu_get_stream_id(struct device *dev, u32 *stream_id)
1161{
1162#ifdef CONFIG_IOMMU_API
1163 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
1164
1165 if (fwspec && fwspec->num_ids == 1) {
1166 *stream_id = fwspec->ids[0] & 0xffff;
1167 return true;
1168 }
1169#endif
1170
1171 return false;
1172}
1173
1174#ifdef CONFIG_IOMMU_SVA
1175static inline void mm_pasid_init(struct mm_struct *mm)
1176{
1177 mm->pasid = IOMMU_PASID_INVALID;
1178}
1179static inline bool mm_valid_pasid(struct mm_struct *mm)
1180{
1181 return mm->pasid != IOMMU_PASID_INVALID;
1182}
1183void mm_pasid_drop(struct mm_struct *mm);
1184struct iommu_sva *iommu_sva_bind_device(struct device *dev,
1185 struct mm_struct *mm);
1186void iommu_sva_unbind_device(struct iommu_sva *handle);
1187u32 iommu_sva_get_pasid(struct iommu_sva *handle);
1188#else
1189static inline struct iommu_sva *
1190iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
1191{
1192 return NULL;
1193}
1194
1195static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1196{
1197}
1198
1199static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
1200{
1201 return IOMMU_PASID_INVALID;
1202}
1203static inline void mm_pasid_init(struct mm_struct *mm) {}
1204static inline bool mm_valid_pasid(struct mm_struct *mm) { return false; }
1205static inline void mm_pasid_drop(struct mm_struct *mm) {}
1206#endif /* CONFIG_IOMMU_SVA */
1207
1208#endif /* __LINUX_IOMMU_H */