Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
4 * Author: Joerg Roedel <joerg.roedel@amd.com>
5 */
6
7#ifndef __LINUX_IOMMU_H
8#define __LINUX_IOMMU_H
9
10#include <linux/scatterlist.h>
11#include <linux/device.h>
12#include <linux/types.h>
13#include <linux/errno.h>
14#include <linux/err.h>
15#include <linux/of.h>
16#include <linux/ioasid.h>
17#include <uapi/linux/iommu.h>
18
19#define IOMMU_READ (1 << 0)
20#define IOMMU_WRITE (1 << 1)
21#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
22#define IOMMU_NOEXEC (1 << 3)
23#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
24/*
25 * Where the bus hardware includes a privilege level as part of its access type
26 * markings, and certain devices are capable of issuing transactions marked as
27 * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
28 * given permission flags only apply to accesses at the higher privilege level,
29 * and that unprivileged transactions should have as little access as possible.
30 * This would usually imply the same permissions as kernel mappings on the CPU,
31 * if the IOMMU page table format is equivalent.
32 */
33#define IOMMU_PRIV (1 << 5)
34/*
35 * Non-coherent masters can use this page protection flag to set cacheable
36 * memory attributes for only a transparent outer level of cache, also known as
37 * the last-level or system cache.
38 */
39#define IOMMU_SYS_CACHE_ONLY (1 << 6)
40
41struct iommu_ops;
42struct iommu_group;
43struct bus_type;
44struct device;
45struct iommu_domain;
46struct notifier_block;
47struct iommu_sva;
48struct iommu_fault_event;
49
50/* iommu fault flags */
51#define IOMMU_FAULT_READ 0x0
52#define IOMMU_FAULT_WRITE 0x1
53
54typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
55 struct device *, unsigned long, int, void *);
56typedef int (*iommu_mm_exit_handler_t)(struct device *dev, struct iommu_sva *,
57 void *);
58typedef int (*iommu_dev_fault_handler_t)(struct iommu_fault *, void *);
59
60struct iommu_domain_geometry {
61 dma_addr_t aperture_start; /* First address that can be mapped */
62 dma_addr_t aperture_end; /* Last address that can be mapped */
63 bool force_aperture; /* DMA only allowed in mappable range? */
64};
65
66/* Domain feature flags */
67#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */
68#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API
69 implementation */
70#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */
71
72/*
73 * This are the possible domain-types
74 *
75 * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate
76 * devices
77 * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses
78 * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used
79 * for VMs
80 * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations.
81 * This flag allows IOMMU drivers to implement
82 * certain optimizations for these domains
83 */
84#define IOMMU_DOMAIN_BLOCKED (0U)
85#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT)
86#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING)
87#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \
88 __IOMMU_DOMAIN_DMA_API)
89
90struct iommu_domain {
91 unsigned type;
92 const struct iommu_ops *ops;
93 unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
94 iommu_fault_handler_t handler;
95 void *handler_token;
96 struct iommu_domain_geometry geometry;
97 void *iova_cookie;
98};
99
100enum iommu_cap {
101 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
102 transactions */
103 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
104 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
105};
106
107/*
108 * Following constraints are specifc to FSL_PAMUV1:
109 * -aperture must be power of 2, and naturally aligned
110 * -number of windows must be power of 2, and address space size
111 * of each window is determined by aperture size / # of windows
112 * -the actual size of the mapped region of a window must be power
113 * of 2 starting with 4KB and physical address must be naturally
114 * aligned.
115 * DOMAIN_ATTR_FSL_PAMUV1 corresponds to the above mentioned contraints.
116 * The caller can invoke iommu_domain_get_attr to check if the underlying
117 * iommu implementation supports these constraints.
118 */
119
120enum iommu_attr {
121 DOMAIN_ATTR_GEOMETRY,
122 DOMAIN_ATTR_PAGING,
123 DOMAIN_ATTR_WINDOWS,
124 DOMAIN_ATTR_FSL_PAMU_STASH,
125 DOMAIN_ATTR_FSL_PAMU_ENABLE,
126 DOMAIN_ATTR_FSL_PAMUV1,
127 DOMAIN_ATTR_NESTING, /* two stages of translation */
128 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
129 DOMAIN_ATTR_MAX,
130};
131
132/* These are the possible reserved region types */
133enum iommu_resv_type {
134 /* Memory regions which must be mapped 1:1 at all times */
135 IOMMU_RESV_DIRECT,
136 /*
137 * Memory regions which are advertised to be 1:1 but are
138 * commonly considered relaxable in some conditions,
139 * for instance in device assignment use case (USB, Graphics)
140 */
141 IOMMU_RESV_DIRECT_RELAXABLE,
142 /* Arbitrary "never map this or give it to a device" address ranges */
143 IOMMU_RESV_RESERVED,
144 /* Hardware MSI region (untranslated) */
145 IOMMU_RESV_MSI,
146 /* Software-managed MSI translation window */
147 IOMMU_RESV_SW_MSI,
148};
149
150/**
151 * struct iommu_resv_region - descriptor for a reserved memory region
152 * @list: Linked list pointers
153 * @start: System physical start address of the region
154 * @length: Length of the region in bytes
155 * @prot: IOMMU Protection flags (READ/WRITE/...)
156 * @type: Type of the reserved region
157 */
158struct iommu_resv_region {
159 struct list_head list;
160 phys_addr_t start;
161 size_t length;
162 int prot;
163 enum iommu_resv_type type;
164};
165
166/* Per device IOMMU features */
167enum iommu_dev_features {
168 IOMMU_DEV_FEAT_AUX, /* Aux-domain feature */
169 IOMMU_DEV_FEAT_SVA, /* Shared Virtual Addresses */
170};
171
172#define IOMMU_PASID_INVALID (-1U)
173
174/**
175 * struct iommu_sva_ops - device driver callbacks for an SVA context
176 *
177 * @mm_exit: called when the mm is about to be torn down by exit_mmap. After
178 * @mm_exit returns, the device must not issue any more transaction
179 * with the PASID given as argument.
180 *
181 * The @mm_exit handler is allowed to sleep. Be careful about the
182 * locks taken in @mm_exit, because they might lead to deadlocks if
183 * they are also held when dropping references to the mm. Consider the
184 * following call chain:
185 * mutex_lock(A); mmput(mm) -> exit_mm() -> @mm_exit() -> mutex_lock(A)
186 * Using mmput_async() prevents this scenario.
187 *
188 */
189struct iommu_sva_ops {
190 iommu_mm_exit_handler_t mm_exit;
191};
192
193#ifdef CONFIG_IOMMU_API
194
195/**
196 * struct iommu_iotlb_gather - Range information for a pending IOTLB flush
197 *
198 * @start: IOVA representing the start of the range to be flushed
199 * @end: IOVA representing the end of the range to be flushed (exclusive)
200 * @pgsize: The interval at which to perform the flush
201 *
202 * This structure is intended to be updated by multiple calls to the
203 * ->unmap() function in struct iommu_ops before eventually being passed
204 * into ->iotlb_sync().
205 */
206struct iommu_iotlb_gather {
207 unsigned long start;
208 unsigned long end;
209 size_t pgsize;
210};
211
212/**
213 * struct iommu_ops - iommu ops and capabilities
214 * @capable: check capability
215 * @domain_alloc: allocate iommu domain
216 * @domain_free: free iommu domain
217 * @attach_dev: attach device to an iommu domain
218 * @detach_dev: detach device from an iommu domain
219 * @map: map a physically contiguous memory region to an iommu domain
220 * @unmap: unmap a physically contiguous memory region from an iommu domain
221 * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
222 * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
223 * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
224 * queue
225 * @iova_to_phys: translate iova to physical address
226 * @add_device: add device to iommu grouping
227 * @remove_device: remove device from iommu grouping
228 * @device_group: find iommu group for a particular device
229 * @domain_get_attr: Query domain attributes
230 * @domain_set_attr: Change domain attributes
231 * @get_resv_regions: Request list of reserved regions for a device
232 * @put_resv_regions: Free list of reserved regions for a device
233 * @apply_resv_region: Temporary helper call-back for iova reserved ranges
234 * @domain_window_enable: Configure and enable a particular window for a domain
235 * @domain_window_disable: Disable a particular window for a domain
236 * @of_xlate: add OF master IDs to iommu grouping
237 * @is_attach_deferred: Check if domain attach should be deferred from iommu
238 * driver init to device driver init (default no)
239 * @dev_has/enable/disable_feat: per device entries to check/enable/disable
240 * iommu specific features.
241 * @dev_feat_enabled: check enabled feature
242 * @aux_attach/detach_dev: aux-domain specific attach/detach entries.
243 * @aux_get_pasid: get the pasid given an aux-domain
244 * @sva_bind: Bind process address space to device
245 * @sva_unbind: Unbind process address space from device
246 * @sva_get_pasid: Get PASID associated to a SVA handle
247 * @page_response: handle page request response
248 * @cache_invalidate: invalidate translation caches
249 * @sva_bind_gpasid: bind guest pasid and mm
250 * @sva_unbind_gpasid: unbind guest pasid and mm
251 * @pgsize_bitmap: bitmap of all possible supported page sizes
252 * @owner: Driver module providing these ops
253 */
254struct iommu_ops {
255 bool (*capable)(enum iommu_cap);
256
257 /* Domain allocation and freeing by the iommu driver */
258 struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
259 void (*domain_free)(struct iommu_domain *);
260
261 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
262 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
263 int (*map)(struct iommu_domain *domain, unsigned long iova,
264 phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
265 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
266 size_t size, struct iommu_iotlb_gather *iotlb_gather);
267 void (*flush_iotlb_all)(struct iommu_domain *domain);
268 void (*iotlb_sync_map)(struct iommu_domain *domain);
269 void (*iotlb_sync)(struct iommu_domain *domain,
270 struct iommu_iotlb_gather *iotlb_gather);
271 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
272 int (*add_device)(struct device *dev);
273 void (*remove_device)(struct device *dev);
274 struct iommu_group *(*device_group)(struct device *dev);
275 int (*domain_get_attr)(struct iommu_domain *domain,
276 enum iommu_attr attr, void *data);
277 int (*domain_set_attr)(struct iommu_domain *domain,
278 enum iommu_attr attr, void *data);
279
280 /* Request/Free a list of reserved regions for a device */
281 void (*get_resv_regions)(struct device *dev, struct list_head *list);
282 void (*put_resv_regions)(struct device *dev, struct list_head *list);
283 void (*apply_resv_region)(struct device *dev,
284 struct iommu_domain *domain,
285 struct iommu_resv_region *region);
286
287 /* Window handling functions */
288 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
289 phys_addr_t paddr, u64 size, int prot);
290 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
291
292 int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
293 bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
294
295 /* Per device IOMMU features */
296 bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
297 bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
298 int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
299 int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
300
301 /* Aux-domain specific attach/detach entries */
302 int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
303 void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
304 int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
305
306 struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
307 void *drvdata);
308 void (*sva_unbind)(struct iommu_sva *handle);
309 int (*sva_get_pasid)(struct iommu_sva *handle);
310
311 int (*page_response)(struct device *dev,
312 struct iommu_fault_event *evt,
313 struct iommu_page_response *msg);
314 int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
315 struct iommu_cache_invalidate_info *inv_info);
316 int (*sva_bind_gpasid)(struct iommu_domain *domain,
317 struct device *dev, struct iommu_gpasid_bind_data *data);
318
319 int (*sva_unbind_gpasid)(struct device *dev, int pasid);
320
321 unsigned long pgsize_bitmap;
322 struct module *owner;
323};
324
325/**
326 * struct iommu_device - IOMMU core representation of one IOMMU hardware
327 * instance
328 * @list: Used by the iommu-core to keep a list of registered iommus
329 * @ops: iommu-ops for talking to this iommu
330 * @dev: struct device for sysfs handling
331 */
332struct iommu_device {
333 struct list_head list;
334 const struct iommu_ops *ops;
335 struct fwnode_handle *fwnode;
336 struct device *dev;
337};
338
339/**
340 * struct iommu_fault_event - Generic fault event
341 *
342 * Can represent recoverable faults such as a page requests or
343 * unrecoverable faults such as DMA or IRQ remapping faults.
344 *
345 * @fault: fault descriptor
346 * @list: pending fault event list, used for tracking responses
347 */
348struct iommu_fault_event {
349 struct iommu_fault fault;
350 struct list_head list;
351};
352
353/**
354 * struct iommu_fault_param - per-device IOMMU fault data
355 * @handler: Callback function to handle IOMMU faults at device level
356 * @data: handler private data
357 * @faults: holds the pending faults which needs response
358 * @lock: protect pending faults list
359 */
360struct iommu_fault_param {
361 iommu_dev_fault_handler_t handler;
362 void *data;
363 struct list_head faults;
364 struct mutex lock;
365};
366
367/**
368 * struct iommu_param - collection of per-device IOMMU data
369 *
370 * @fault_param: IOMMU detected device fault reporting data
371 *
372 * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
373 * struct iommu_group *iommu_group;
374 * struct iommu_fwspec *iommu_fwspec;
375 */
376struct iommu_param {
377 struct mutex lock;
378 struct iommu_fault_param *fault_param;
379};
380
381int iommu_device_register(struct iommu_device *iommu);
382void iommu_device_unregister(struct iommu_device *iommu);
383int iommu_device_sysfs_add(struct iommu_device *iommu,
384 struct device *parent,
385 const struct attribute_group **groups,
386 const char *fmt, ...) __printf(4, 5);
387void iommu_device_sysfs_remove(struct iommu_device *iommu);
388int iommu_device_link(struct iommu_device *iommu, struct device *link);
389void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
390
391static inline void __iommu_device_set_ops(struct iommu_device *iommu,
392 const struct iommu_ops *ops)
393{
394 iommu->ops = ops;
395}
396
397#define iommu_device_set_ops(iommu, ops) \
398do { \
399 struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
400 __ops->owner = THIS_MODULE; \
401 __iommu_device_set_ops(iommu, __ops); \
402} while (0)
403
404static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
405 struct fwnode_handle *fwnode)
406{
407 iommu->fwnode = fwnode;
408}
409
410static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
411{
412 return (struct iommu_device *)dev_get_drvdata(dev);
413}
414
415static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
416{
417 *gather = (struct iommu_iotlb_gather) {
418 .start = ULONG_MAX,
419 };
420}
421
422#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
423#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
424#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
425#define IOMMU_GROUP_NOTIFY_BOUND_DRIVER 4 /* Post Driver bind */
426#define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */
427#define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */
428
429extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops);
430extern bool iommu_present(struct bus_type *bus);
431extern bool iommu_capable(struct bus_type *bus, enum iommu_cap cap);
432extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus);
433extern struct iommu_group *iommu_group_get_by_id(int id);
434extern void iommu_domain_free(struct iommu_domain *domain);
435extern int iommu_attach_device(struct iommu_domain *domain,
436 struct device *dev);
437extern void iommu_detach_device(struct iommu_domain *domain,
438 struct device *dev);
439extern int iommu_cache_invalidate(struct iommu_domain *domain,
440 struct device *dev,
441 struct iommu_cache_invalidate_info *inv_info);
442extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
443 struct device *dev, struct iommu_gpasid_bind_data *data);
444extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
445 struct device *dev, ioasid_t pasid);
446extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
447extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
448extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
449 phys_addr_t paddr, size_t size, int prot);
450extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
451 phys_addr_t paddr, size_t size, int prot);
452extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
453 size_t size);
454extern size_t iommu_unmap_fast(struct iommu_domain *domain,
455 unsigned long iova, size_t size,
456 struct iommu_iotlb_gather *iotlb_gather);
457extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
458 struct scatterlist *sg,unsigned int nents, int prot);
459extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
460 unsigned long iova, struct scatterlist *sg,
461 unsigned int nents, int prot);
462extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
463extern void iommu_set_fault_handler(struct iommu_domain *domain,
464 iommu_fault_handler_t handler, void *token);
465
466extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
467extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
468extern void generic_iommu_put_resv_regions(struct device *dev,
469 struct list_head *list);
470extern int iommu_request_dm_for_dev(struct device *dev);
471extern int iommu_request_dma_domain_for_dev(struct device *dev);
472extern void iommu_set_default_passthrough(bool cmd_line);
473extern void iommu_set_default_translated(bool cmd_line);
474extern bool iommu_default_passthrough(void);
475extern struct iommu_resv_region *
476iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
477 enum iommu_resv_type type);
478extern int iommu_get_group_resv_regions(struct iommu_group *group,
479 struct list_head *head);
480
481extern int iommu_attach_group(struct iommu_domain *domain,
482 struct iommu_group *group);
483extern void iommu_detach_group(struct iommu_domain *domain,
484 struct iommu_group *group);
485extern struct iommu_group *iommu_group_alloc(void);
486extern void *iommu_group_get_iommudata(struct iommu_group *group);
487extern void iommu_group_set_iommudata(struct iommu_group *group,
488 void *iommu_data,
489 void (*release)(void *iommu_data));
490extern int iommu_group_set_name(struct iommu_group *group, const char *name);
491extern int iommu_group_add_device(struct iommu_group *group,
492 struct device *dev);
493extern void iommu_group_remove_device(struct device *dev);
494extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
495 int (*fn)(struct device *, void *));
496extern struct iommu_group *iommu_group_get(struct device *dev);
497extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
498extern void iommu_group_put(struct iommu_group *group);
499extern int iommu_group_register_notifier(struct iommu_group *group,
500 struct notifier_block *nb);
501extern int iommu_group_unregister_notifier(struct iommu_group *group,
502 struct notifier_block *nb);
503extern int iommu_register_device_fault_handler(struct device *dev,
504 iommu_dev_fault_handler_t handler,
505 void *data);
506
507extern int iommu_unregister_device_fault_handler(struct device *dev);
508
509extern int iommu_report_device_fault(struct device *dev,
510 struct iommu_fault_event *evt);
511extern int iommu_page_response(struct device *dev,
512 struct iommu_page_response *msg);
513
514extern int iommu_group_id(struct iommu_group *group);
515extern struct iommu_group *iommu_group_get_for_dev(struct device *dev);
516extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
517
518extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
519 void *data);
520extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
521 void *data);
522
523/* Window handling function prototypes */
524extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
525 phys_addr_t offset, u64 size,
526 int prot);
527extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
528
529extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
530 unsigned long iova, int flags);
531
532static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
533{
534 if (domain->ops->flush_iotlb_all)
535 domain->ops->flush_iotlb_all(domain);
536}
537
538static inline void iommu_tlb_sync(struct iommu_domain *domain,
539 struct iommu_iotlb_gather *iotlb_gather)
540{
541 if (domain->ops->iotlb_sync)
542 domain->ops->iotlb_sync(domain, iotlb_gather);
543
544 iommu_iotlb_gather_init(iotlb_gather);
545}
546
547static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
548 struct iommu_iotlb_gather *gather,
549 unsigned long iova, size_t size)
550{
551 unsigned long start = iova, end = start + size;
552
553 /*
554 * If the new page is disjoint from the current range or is mapped at
555 * a different granularity, then sync the TLB so that the gather
556 * structure can be rewritten.
557 */
558 if (gather->pgsize != size ||
559 end < gather->start || start > gather->end) {
560 if (gather->pgsize)
561 iommu_tlb_sync(domain, gather);
562 gather->pgsize = size;
563 }
564
565 if (gather->end < end)
566 gather->end = end;
567
568 if (gather->start > start)
569 gather->start = start;
570}
571
572/* PCI device grouping function */
573extern struct iommu_group *pci_device_group(struct device *dev);
574/* Generic device grouping function */
575extern struct iommu_group *generic_device_group(struct device *dev);
576/* FSL-MC device grouping function */
577struct iommu_group *fsl_mc_device_group(struct device *dev);
578
579/**
580 * struct iommu_fwspec - per-device IOMMU instance data
581 * @ops: ops for this device's IOMMU
582 * @iommu_fwnode: firmware handle for this device's IOMMU
583 * @iommu_priv: IOMMU driver private data for this device
584 * @num_pasid_bits: number of PASID bits supported by this device
585 * @num_ids: number of associated device IDs
586 * @ids: IDs which this device may present to the IOMMU
587 */
588struct iommu_fwspec {
589 const struct iommu_ops *ops;
590 struct fwnode_handle *iommu_fwnode;
591 void *iommu_priv;
592 u32 flags;
593 u32 num_pasid_bits;
594 unsigned int num_ids;
595 u32 ids[1];
596};
597
598/* ATS is supported */
599#define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0)
600
601/**
602 * struct iommu_sva - handle to a device-mm bond
603 */
604struct iommu_sva {
605 struct device *dev;
606 const struct iommu_sva_ops *ops;
607};
608
609int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
610 const struct iommu_ops *ops);
611void iommu_fwspec_free(struct device *dev);
612int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids);
613const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode);
614
615static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
616{
617 return dev->iommu_fwspec;
618}
619
620static inline void dev_iommu_fwspec_set(struct device *dev,
621 struct iommu_fwspec *fwspec)
622{
623 dev->iommu_fwspec = fwspec;
624}
625
626int iommu_probe_device(struct device *dev);
627void iommu_release_device(struct device *dev);
628
629bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features f);
630int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
631int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
632bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
633int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
634void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
635int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
636
637struct iommu_sva *iommu_sva_bind_device(struct device *dev,
638 struct mm_struct *mm,
639 void *drvdata);
640void iommu_sva_unbind_device(struct iommu_sva *handle);
641int iommu_sva_set_ops(struct iommu_sva *handle,
642 const struct iommu_sva_ops *ops);
643int iommu_sva_get_pasid(struct iommu_sva *handle);
644
645#else /* CONFIG_IOMMU_API */
646
647struct iommu_ops {};
648struct iommu_group {};
649struct iommu_fwspec {};
650struct iommu_device {};
651struct iommu_fault_param {};
652struct iommu_iotlb_gather {};
653
654static inline bool iommu_present(struct bus_type *bus)
655{
656 return false;
657}
658
659static inline bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
660{
661 return false;
662}
663
664static inline struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
665{
666 return NULL;
667}
668
669static inline struct iommu_group *iommu_group_get_by_id(int id)
670{
671 return NULL;
672}
673
674static inline void iommu_domain_free(struct iommu_domain *domain)
675{
676}
677
678static inline int iommu_attach_device(struct iommu_domain *domain,
679 struct device *dev)
680{
681 return -ENODEV;
682}
683
684static inline void iommu_detach_device(struct iommu_domain *domain,
685 struct device *dev)
686{
687}
688
689static inline struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
690{
691 return NULL;
692}
693
694static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
695 phys_addr_t paddr, size_t size, int prot)
696{
697 return -ENODEV;
698}
699
700static inline int iommu_map_atomic(struct iommu_domain *domain,
701 unsigned long iova, phys_addr_t paddr,
702 size_t size, int prot)
703{
704 return -ENODEV;
705}
706
707static inline size_t iommu_unmap(struct iommu_domain *domain,
708 unsigned long iova, size_t size)
709{
710 return 0;
711}
712
713static inline size_t iommu_unmap_fast(struct iommu_domain *domain,
714 unsigned long iova, int gfp_order,
715 struct iommu_iotlb_gather *iotlb_gather)
716{
717 return 0;
718}
719
720static inline size_t iommu_map_sg(struct iommu_domain *domain,
721 unsigned long iova, struct scatterlist *sg,
722 unsigned int nents, int prot)
723{
724 return 0;
725}
726
727static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
728 unsigned long iova, struct scatterlist *sg,
729 unsigned int nents, int prot)
730{
731 return 0;
732}
733
734static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
735{
736}
737
738static inline void iommu_tlb_sync(struct iommu_domain *domain,
739 struct iommu_iotlb_gather *iotlb_gather)
740{
741}
742
743static inline int iommu_domain_window_enable(struct iommu_domain *domain,
744 u32 wnd_nr, phys_addr_t paddr,
745 u64 size, int prot)
746{
747 return -ENODEV;
748}
749
750static inline void iommu_domain_window_disable(struct iommu_domain *domain,
751 u32 wnd_nr)
752{
753}
754
755static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
756{
757 return 0;
758}
759
760static inline void iommu_set_fault_handler(struct iommu_domain *domain,
761 iommu_fault_handler_t handler, void *token)
762{
763}
764
765static inline void iommu_get_resv_regions(struct device *dev,
766 struct list_head *list)
767{
768}
769
770static inline void iommu_put_resv_regions(struct device *dev,
771 struct list_head *list)
772{
773}
774
775static inline int iommu_get_group_resv_regions(struct iommu_group *group,
776 struct list_head *head)
777{
778 return -ENODEV;
779}
780
781static inline int iommu_request_dm_for_dev(struct device *dev)
782{
783 return -ENODEV;
784}
785
786static inline int iommu_request_dma_domain_for_dev(struct device *dev)
787{
788 return -ENODEV;
789}
790
791static inline void iommu_set_default_passthrough(bool cmd_line)
792{
793}
794
795static inline void iommu_set_default_translated(bool cmd_line)
796{
797}
798
799static inline bool iommu_default_passthrough(void)
800{
801 return true;
802}
803
804static inline int iommu_attach_group(struct iommu_domain *domain,
805 struct iommu_group *group)
806{
807 return -ENODEV;
808}
809
810static inline void iommu_detach_group(struct iommu_domain *domain,
811 struct iommu_group *group)
812{
813}
814
815static inline struct iommu_group *iommu_group_alloc(void)
816{
817 return ERR_PTR(-ENODEV);
818}
819
820static inline void *iommu_group_get_iommudata(struct iommu_group *group)
821{
822 return NULL;
823}
824
825static inline void iommu_group_set_iommudata(struct iommu_group *group,
826 void *iommu_data,
827 void (*release)(void *iommu_data))
828{
829}
830
831static inline int iommu_group_set_name(struct iommu_group *group,
832 const char *name)
833{
834 return -ENODEV;
835}
836
837static inline int iommu_group_add_device(struct iommu_group *group,
838 struct device *dev)
839{
840 return -ENODEV;
841}
842
843static inline void iommu_group_remove_device(struct device *dev)
844{
845}
846
847static inline int iommu_group_for_each_dev(struct iommu_group *group,
848 void *data,
849 int (*fn)(struct device *, void *))
850{
851 return -ENODEV;
852}
853
854static inline struct iommu_group *iommu_group_get(struct device *dev)
855{
856 return NULL;
857}
858
859static inline void iommu_group_put(struct iommu_group *group)
860{
861}
862
863static inline int iommu_group_register_notifier(struct iommu_group *group,
864 struct notifier_block *nb)
865{
866 return -ENODEV;
867}
868
869static inline int iommu_group_unregister_notifier(struct iommu_group *group,
870 struct notifier_block *nb)
871{
872 return 0;
873}
874
875static inline
876int iommu_register_device_fault_handler(struct device *dev,
877 iommu_dev_fault_handler_t handler,
878 void *data)
879{
880 return -ENODEV;
881}
882
883static inline int iommu_unregister_device_fault_handler(struct device *dev)
884{
885 return 0;
886}
887
888static inline
889int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
890{
891 return -ENODEV;
892}
893
894static inline int iommu_page_response(struct device *dev,
895 struct iommu_page_response *msg)
896{
897 return -ENODEV;
898}
899
900static inline int iommu_group_id(struct iommu_group *group)
901{
902 return -ENODEV;
903}
904
905static inline int iommu_domain_get_attr(struct iommu_domain *domain,
906 enum iommu_attr attr, void *data)
907{
908 return -EINVAL;
909}
910
911static inline int iommu_domain_set_attr(struct iommu_domain *domain,
912 enum iommu_attr attr, void *data)
913{
914 return -EINVAL;
915}
916
917static inline int iommu_device_register(struct iommu_device *iommu)
918{
919 return -ENODEV;
920}
921
922static inline void iommu_device_set_ops(struct iommu_device *iommu,
923 const struct iommu_ops *ops)
924{
925}
926
927static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
928 struct fwnode_handle *fwnode)
929{
930}
931
932static inline struct iommu_device *dev_to_iommu_device(struct device *dev)
933{
934 return NULL;
935}
936
937static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
938{
939}
940
941static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
942 struct iommu_iotlb_gather *gather,
943 unsigned long iova, size_t size)
944{
945}
946
947static inline void iommu_device_unregister(struct iommu_device *iommu)
948{
949}
950
951static inline int iommu_device_sysfs_add(struct iommu_device *iommu,
952 struct device *parent,
953 const struct attribute_group **groups,
954 const char *fmt, ...)
955{
956 return -ENODEV;
957}
958
959static inline void iommu_device_sysfs_remove(struct iommu_device *iommu)
960{
961}
962
963static inline int iommu_device_link(struct device *dev, struct device *link)
964{
965 return -EINVAL;
966}
967
968static inline void iommu_device_unlink(struct device *dev, struct device *link)
969{
970}
971
972static inline int iommu_fwspec_init(struct device *dev,
973 struct fwnode_handle *iommu_fwnode,
974 const struct iommu_ops *ops)
975{
976 return -ENODEV;
977}
978
979static inline void iommu_fwspec_free(struct device *dev)
980{
981}
982
983static inline int iommu_fwspec_add_ids(struct device *dev, u32 *ids,
984 int num_ids)
985{
986 return -ENODEV;
987}
988
989static inline
990const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
991{
992 return NULL;
993}
994
995static inline bool
996iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
997{
998 return false;
999}
1000
1001static inline bool
1002iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
1003{
1004 return false;
1005}
1006
1007static inline int
1008iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
1009{
1010 return -ENODEV;
1011}
1012
1013static inline int
1014iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
1015{
1016 return -ENODEV;
1017}
1018
1019static inline int
1020iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
1021{
1022 return -ENODEV;
1023}
1024
1025static inline void
1026iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
1027{
1028}
1029
1030static inline int
1031iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
1032{
1033 return -ENODEV;
1034}
1035
1036static inline struct iommu_sva *
1037iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
1038{
1039 return NULL;
1040}
1041
1042static inline void iommu_sva_unbind_device(struct iommu_sva *handle)
1043{
1044}
1045
1046static inline int iommu_sva_set_ops(struct iommu_sva *handle,
1047 const struct iommu_sva_ops *ops)
1048{
1049 return -EINVAL;
1050}
1051
1052static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
1053{
1054 return IOMMU_PASID_INVALID;
1055}
1056
1057static inline int
1058iommu_cache_invalidate(struct iommu_domain *domain,
1059 struct device *dev,
1060 struct iommu_cache_invalidate_info *inv_info)
1061{
1062 return -ENODEV;
1063}
1064static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1065 struct device *dev, struct iommu_gpasid_bind_data *data)
1066{
1067 return -ENODEV;
1068}
1069
1070static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
1071 struct device *dev, int pasid)
1072{
1073 return -ENODEV;
1074}
1075
1076#endif /* CONFIG_IOMMU_API */
1077
1078#ifdef CONFIG_IOMMU_DEBUGFS
1079extern struct dentry *iommu_debugfs_dir;
1080void iommu_debugfs_setup(void);
1081#else
1082static inline void iommu_debugfs_setup(void) {}
1083#endif
1084
1085#endif /* __LINUX_IOMMU_H */