Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.11-rc7 329 lines 10 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation 4 * Rewrite, cleanup: 5 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation 6 */ 7 8#ifndef _ASM_IOMMU_H 9#define _ASM_IOMMU_H 10#ifdef __KERNEL__ 11 12#include <linux/compiler.h> 13#include <linux/spinlock.h> 14#include <linux/device.h> 15#include <linux/dma-map-ops.h> 16#include <linux/bitops.h> 17#include <asm/machdep.h> 18#include <asm/types.h> 19#include <asm/pci-bridge.h> 20#include <asm/asm-const.h> 21 22#define IOMMU_PAGE_SHIFT_4K 12 23#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K) 24#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) 25#define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K) 26 27#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift) 28#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1)) 29#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr)) 30 31#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" 32#define DMA64_PROPNAME "linux,dma64-ddr-window-info" 33 34#define MIN_DDW_VPMEM_DMA_WINDOW SZ_2G 35 36/* Boot time flags */ 37extern int iommu_is_off; 38extern int iommu_force_on; 39 40struct iommu_table_ops { 41 /* 42 * When called with direction==DMA_NONE, it is equal to clear(). 43 * uaddr is a linear map address. 44 */ 45 int (*set)(struct iommu_table *tbl, 46 long index, long npages, 47 unsigned long uaddr, 48 enum dma_data_direction direction, 49 unsigned long attrs); 50#ifdef CONFIG_IOMMU_API 51 /* 52 * Exchanges existing TCE with new TCE plus direction bits; 53 * returns old TCE and DMA direction mask. 54 * @tce is a physical address. 55 */ 56 int (*xchg_no_kill)(struct iommu_table *tbl, 57 long index, 58 unsigned long *hpa, 59 enum dma_data_direction *direction); 60 61 void (*tce_kill)(struct iommu_table *tbl, 62 unsigned long index, 63 unsigned long pages); 64 65 __be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc); 66#endif 67 void (*clear)(struct iommu_table *tbl, 68 long index, long npages); 69 /* get() returns a physical address */ 70 unsigned long (*get)(struct iommu_table *tbl, long index); 71 void (*flush)(struct iommu_table *tbl); 72 void (*free)(struct iommu_table *tbl); 73}; 74 75/* These are used by VIO */ 76extern struct iommu_table_ops iommu_table_lpar_multi_ops; 77extern struct iommu_table_ops iommu_table_pseries_ops; 78 79/* 80 * IOMAP_MAX_ORDER defines the largest contiguous block 81 * of dma space we can get. IOMAP_MAX_ORDER = 13 82 * allows up to 2**12 pages (4096 * 4096) = 16 MB 83 */ 84#define IOMAP_MAX_ORDER 13 85 86#define IOMMU_POOL_HASHBITS 2 87#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS) 88 89struct iommu_pool { 90 unsigned long start; 91 unsigned long end; 92 unsigned long hint; 93 spinlock_t lock; 94} ____cacheline_aligned_in_smp; 95 96struct iommu_table { 97 unsigned long it_busno; /* Bus number this table belongs to */ 98 unsigned long it_size; /* Size of iommu table in entries */ 99 unsigned long it_indirect_levels; 100 unsigned long it_level_size; 101 unsigned long it_allocated_size; 102 unsigned long it_offset; /* Offset into global table */ 103 unsigned long it_base; /* mapped address of tce table */ 104 unsigned long it_index; /* which iommu table this is */ 105 unsigned long it_type; /* type: PCI or Virtual Bus */ 106 unsigned long it_blocksize; /* Entries in each block (cacheline) */ 107 unsigned long poolsize; 108 unsigned long nr_pools; 109 struct iommu_pool large_pool; 110 struct iommu_pool pools[IOMMU_NR_POOLS]; 111 unsigned long *it_map; /* A simple allocation bitmap for now */ 112 unsigned long it_page_shift;/* table iommu page size */ 113 struct list_head it_group_list;/* List of iommu_table_group_link */ 114 __be64 *it_userspace; /* userspace view of the table */ 115 struct iommu_table_ops *it_ops; 116 struct kref it_kref; 117 int it_nid; 118 unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */ 119 unsigned long it_reserved_end; 120}; 121 122#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \ 123 ((tbl)->it_ops->useraddrptr((tbl), (entry), false)) 124#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \ 125 ((tbl)->it_ops->useraddrptr((tbl), (entry), true)) 126 127/* Pure 2^n version of get_order */ 128static inline __attribute_const__ 129int get_iommu_order(unsigned long size, struct iommu_table *tbl) 130{ 131 return __ilog2((size - 1) >> tbl->it_page_shift) + 1; 132} 133 134 135struct scatterlist; 136 137#ifdef CONFIG_PPC64 138 139static inline void set_iommu_table_base(struct device *dev, 140 struct iommu_table *base) 141{ 142 dev->archdata.iommu_table_base = base; 143} 144 145static inline void *get_iommu_table_base(struct device *dev) 146{ 147 return dev->archdata.iommu_table_base; 148} 149 150extern int dma_iommu_dma_supported(struct device *dev, u64 mask); 151 152extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl); 153extern int iommu_tce_table_put(struct iommu_table *tbl); 154 155/* Initializes an iommu_table based in values set in the passed-in 156 * structure 157 */ 158extern struct iommu_table *iommu_init_table(struct iommu_table *tbl, 159 int nid, unsigned long res_start, unsigned long res_end); 160bool iommu_table_in_use(struct iommu_table *tbl); 161extern void iommu_table_reserve_pages(struct iommu_table *tbl, 162 unsigned long res_start, unsigned long res_end); 163extern void iommu_table_clear(struct iommu_table *tbl); 164 165#define IOMMU_TABLE_GROUP_MAX_TABLES 2 166 167struct iommu_table_group; 168 169struct iommu_table_group_ops { 170 unsigned long (*get_table_size)( 171 __u32 page_shift, 172 __u64 window_size, 173 __u32 levels); 174 long (*create_table)(struct iommu_table_group *table_group, 175 int num, 176 __u32 page_shift, 177 __u64 window_size, 178 __u32 levels, 179 struct iommu_table **ptbl); 180 long (*set_window)(struct iommu_table_group *table_group, 181 int num, 182 struct iommu_table *tblnew); 183 long (*unset_window)(struct iommu_table_group *table_group, 184 int num); 185 /* Switch ownership from platform code to external user (e.g. VFIO) */ 186 long (*take_ownership)(struct iommu_table_group *table_group, struct device *dev); 187 /* Switch ownership from external user (e.g. VFIO) back to core */ 188 void (*release_ownership)(struct iommu_table_group *table_group, struct device *dev); 189}; 190 191struct iommu_table_group_link { 192 struct list_head next; 193 struct rcu_head rcu; 194 struct iommu_table_group *table_group; 195}; 196 197struct iommu_table_group { 198 /* IOMMU properties */ 199 __u32 tce32_start; 200 __u32 tce32_size; 201 __u64 pgsizes; /* Bitmap of supported page sizes */ 202 __u32 max_dynamic_windows_supported; 203 __u32 max_levels; 204 205 struct iommu_group *group; 206 struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES]; 207 struct iommu_table_group_ops *ops; 208}; 209 210#ifdef CONFIG_IOMMU_API 211 212extern void iommu_register_group(struct iommu_table_group *table_group, 213 int pci_domain_number, unsigned long pe_num); 214extern int iommu_add_device(struct iommu_table_group *table_group, 215 struct device *dev); 216extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl, 217 unsigned long entry, unsigned long *hpa, 218 enum dma_data_direction *direction); 219extern long iommu_tce_xchg_no_kill(struct mm_struct *mm, 220 struct iommu_table *tbl, 221 unsigned long entry, unsigned long *hpa, 222 enum dma_data_direction *direction); 223extern void iommu_tce_kill(struct iommu_table *tbl, 224 unsigned long entry, unsigned long pages); 225int dev_has_iommu_table(struct device *dev, void *data); 226 227#else 228static inline void iommu_register_group(struct iommu_table_group *table_group, 229 int pci_domain_number, 230 unsigned long pe_num) 231{ 232} 233 234static inline int iommu_add_device(struct iommu_table_group *table_group, 235 struct device *dev) 236{ 237 return 0; 238} 239 240static inline int dev_has_iommu_table(struct device *dev, void *data) 241{ 242 return 0; 243} 244#endif /* !CONFIG_IOMMU_API */ 245 246u64 dma_iommu_get_required_mask(struct device *dev); 247#else 248 249static inline void *get_iommu_table_base(struct device *dev) 250{ 251 return NULL; 252} 253 254static inline int dma_iommu_dma_supported(struct device *dev, u64 mask) 255{ 256 return 0; 257} 258 259#endif /* CONFIG_PPC64 */ 260 261extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, 262 struct scatterlist *sglist, int nelems, 263 unsigned long mask, 264 enum dma_data_direction direction, 265 unsigned long attrs); 266extern void ppc_iommu_unmap_sg(struct iommu_table *tbl, 267 struct scatterlist *sglist, 268 int nelems, 269 enum dma_data_direction direction, 270 unsigned long attrs); 271 272extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, 273 size_t size, dma_addr_t *dma_handle, 274 unsigned long mask, gfp_t flag, int node); 275extern void iommu_free_coherent(struct iommu_table *tbl, size_t size, 276 void *vaddr, dma_addr_t dma_handle); 277extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, 278 struct page *page, unsigned long offset, 279 size_t size, unsigned long mask, 280 enum dma_data_direction direction, 281 unsigned long attrs); 282extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, 283 size_t size, enum dma_data_direction direction, 284 unsigned long attrs); 285 286void __init iommu_init_early_pSeries(void); 287extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops); 288extern void iommu_init_early_pasemi(void); 289 290#if defined(CONFIG_PPC64) && defined(CONFIG_PM) 291static inline void iommu_restore(void) 292{ 293 if (ppc_md.iommu_restore) 294 ppc_md.iommu_restore(); 295} 296#endif 297 298/* The API to support IOMMU operations for VFIO */ 299extern int iommu_tce_check_ioba(unsigned long page_shift, 300 unsigned long offset, unsigned long size, 301 unsigned long ioba, unsigned long npages); 302extern int iommu_tce_check_gpa(unsigned long page_shift, 303 unsigned long gpa); 304 305#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \ 306 (iommu_tce_check_ioba((tbl)->it_page_shift, \ 307 (tbl)->it_offset, (tbl)->it_size, \ 308 (ioba), (npages)) || (tce_value)) 309#define iommu_tce_put_param_check(tbl, ioba, gpa) \ 310 (iommu_tce_check_ioba((tbl)->it_page_shift, \ 311 (tbl)->it_offset, (tbl)->it_size, \ 312 (ioba), 1) || \ 313 iommu_tce_check_gpa((tbl)->it_page_shift, (gpa))) 314 315extern void iommu_flush_tce(struct iommu_table *tbl); 316 317extern enum dma_data_direction iommu_tce_direction(unsigned long tce); 318extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir); 319 320#ifdef CONFIG_PPC_CELL_NATIVE 321extern bool iommu_fixed_is_weak; 322#else 323#define iommu_fixed_is_weak false 324#endif 325 326extern const struct dma_map_ops dma_iommu_ops; 327 328#endif /* __KERNEL__ */ 329#endif /* _ASM_IOMMU_H */