Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9-rc6 208 lines 6.8 kB view raw
1#ifndef __IO_PGTABLE_H 2#define __IO_PGTABLE_H 3#include <linux/bitops.h> 4 5/* 6 * Public API for use by IOMMU drivers 7 */ 8enum io_pgtable_fmt { 9 ARM_32_LPAE_S1, 10 ARM_32_LPAE_S2, 11 ARM_64_LPAE_S1, 12 ARM_64_LPAE_S2, 13 ARM_V7S, 14 IO_PGTABLE_NUM_FMTS, 15}; 16 17/** 18 * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. 19 * 20 * @tlb_flush_all: Synchronously invalidate the entire TLB context. 21 * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. 22 * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and 23 * any corresponding page table updates are visible to the 24 * IOMMU. 25 * 26 * Note that these can all be called in atomic context and must therefore 27 * not block. 28 */ 29struct iommu_gather_ops { 30 void (*tlb_flush_all)(void *cookie); 31 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, 32 bool leaf, void *cookie); 33 void (*tlb_sync)(void *cookie); 34}; 35 36/** 37 * struct io_pgtable_cfg - Configuration data for a set of page tables. 38 * 39 * @quirks: A bitmap of hardware quirks that require some special 40 * action by the low-level page table allocator. 41 * @pgsize_bitmap: A bitmap of page sizes supported by this set of page 42 * tables. 43 * @ias: Input address (iova) size, in bits. 44 * @oas: Output address (paddr) size, in bits. 45 * @tlb: TLB management callbacks for this set of tables. 46 * @iommu_dev: The device representing the DMA configuration for the 47 * page table walker. 48 */ 49struct io_pgtable_cfg { 50 /* 51 * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in 52 * stage 1 PTEs, for hardware which insists on validating them 53 * even in non-secure state where they should normally be ignored. 54 * 55 * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and 56 * IOMMU_NOEXEC flags and map everything with full access, for 57 * hardware which does not implement the permissions of a given 58 * format, and/or requires some format-specific default value. 59 * 60 * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid 61 * (unmapped) entries but the hardware might do so anyway, perform 62 * TLB maintenance when mapping as well as when unmapping. 63 * 64 * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all 65 * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit 66 * when the SoC is in "4GB mode" and they can only access the high 67 * remap of DRAM (0x1_00000000 to 0x1_ffffffff). 68 */ 69 #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) 70 #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) 71 #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) 72 #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) 73 unsigned long quirks; 74 unsigned long pgsize_bitmap; 75 unsigned int ias; 76 unsigned int oas; 77 const struct iommu_gather_ops *tlb; 78 struct device *iommu_dev; 79 80 /* Low-level data specific to the table format */ 81 union { 82 struct { 83 u64 ttbr[2]; 84 u64 tcr; 85 u64 mair[2]; 86 } arm_lpae_s1_cfg; 87 88 struct { 89 u64 vttbr; 90 u64 vtcr; 91 } arm_lpae_s2_cfg; 92 93 struct { 94 u32 ttbr[2]; 95 u32 tcr; 96 u32 nmrr; 97 u32 prrr; 98 } arm_v7s_cfg; 99 }; 100}; 101 102/** 103 * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. 104 * 105 * @map: Map a physically contiguous memory region. 106 * @unmap: Unmap a physically contiguous memory region. 107 * @iova_to_phys: Translate iova to physical address. 108 * 109 * These functions map directly onto the iommu_ops member functions with 110 * the same names. 111 */ 112struct io_pgtable_ops { 113 int (*map)(struct io_pgtable_ops *ops, unsigned long iova, 114 phys_addr_t paddr, size_t size, int prot); 115 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, 116 size_t size); 117 phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, 118 unsigned long iova); 119}; 120 121/** 122 * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. 123 * 124 * @fmt: The page table format. 125 * @cfg: The page table configuration. This will be modified to represent 126 * the configuration actually provided by the allocator (e.g. the 127 * pgsize_bitmap may be restricted). 128 * @cookie: An opaque token provided by the IOMMU driver and passed back to 129 * the callback routines in cfg->tlb. 130 */ 131struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, 132 struct io_pgtable_cfg *cfg, 133 void *cookie); 134 135/** 136 * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller 137 * *must* ensure that the page table is no longer 138 * live, but the TLB can be dirty. 139 * 140 * @ops: The ops returned from alloc_io_pgtable_ops. 141 */ 142void free_io_pgtable_ops(struct io_pgtable_ops *ops); 143 144 145/* 146 * Internal structures for page table allocator implementations. 147 */ 148 149/** 150 * struct io_pgtable - Internal structure describing a set of page tables. 151 * 152 * @fmt: The page table format. 153 * @cookie: An opaque token provided by the IOMMU driver and passed back to 154 * any callback routines. 155 * @tlb_sync_pending: Private flag for optimising out redundant syncs. 156 * @cfg: A copy of the page table configuration. 157 * @ops: The page table operations in use for this set of page tables. 158 */ 159struct io_pgtable { 160 enum io_pgtable_fmt fmt; 161 void *cookie; 162 bool tlb_sync_pending; 163 struct io_pgtable_cfg cfg; 164 struct io_pgtable_ops ops; 165}; 166 167#define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) 168 169static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) 170{ 171 iop->cfg.tlb->tlb_flush_all(iop->cookie); 172 iop->tlb_sync_pending = true; 173} 174 175static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, 176 unsigned long iova, size_t size, size_t granule, bool leaf) 177{ 178 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); 179 iop->tlb_sync_pending = true; 180} 181 182static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) 183{ 184 if (iop->tlb_sync_pending) { 185 iop->cfg.tlb->tlb_sync(iop->cookie); 186 iop->tlb_sync_pending = false; 187 } 188} 189 190/** 191 * struct io_pgtable_init_fns - Alloc/free a set of page tables for a 192 * particular format. 193 * 194 * @alloc: Allocate a set of page tables described by cfg. 195 * @free: Free the page tables associated with iop. 196 */ 197struct io_pgtable_init_fns { 198 struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); 199 void (*free)(struct io_pgtable *iop); 200}; 201 202extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; 203extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; 204extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; 205extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; 206extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; 207 208#endif /* __IO_PGTABLE_H */