at v6.8 8.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef __LINUX_SWIOTLB_H 3#define __LINUX_SWIOTLB_H 4 5#include <linux/device.h> 6#include <linux/dma-direction.h> 7#include <linux/init.h> 8#include <linux/types.h> 9#include <linux/limits.h> 10#include <linux/spinlock.h> 11#include <linux/workqueue.h> 12 13struct device; 14struct page; 15struct scatterlist; 16 17#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ 18#define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ 19#define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */ 20 21/* 22 * Maximum allowable number of contiguous slabs to map, 23 * must be a power of 2. What is the appropriate value ? 24 * The complexity of {map,unmap}_single is linearly dependent on this value. 25 */ 26#define IO_TLB_SEGSIZE 128 27 28/* 29 * log of the size of each IO TLB slab. The number of slabs is command line 30 * controllable. 31 */ 32#define IO_TLB_SHIFT 11 33#define IO_TLB_SIZE (1 << IO_TLB_SHIFT) 34 35/* default to 64MB */ 36#define IO_TLB_DEFAULT_SIZE (64UL<<20) 37 38unsigned long swiotlb_size_or_default(void); 39void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, 40 int (*remap)(void *tlb, unsigned long nslabs)); 41int swiotlb_init_late(size_t size, gfp_t gfp_mask, 42 int (*remap)(void *tlb, unsigned long nslabs)); 43extern void __init swiotlb_update_mem_attributes(void); 44 45phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys, 46 size_t mapping_size, size_t alloc_size, 47 unsigned int alloc_aligned_mask, enum dma_data_direction dir, 48 unsigned long attrs); 49 50extern void swiotlb_tbl_unmap_single(struct device *hwdev, 51 phys_addr_t tlb_addr, 52 size_t mapping_size, 53 enum dma_data_direction dir, 54 unsigned long attrs); 55 56void swiotlb_sync_single_for_device(struct device *dev, phys_addr_t tlb_addr, 57 size_t size, enum dma_data_direction dir); 58void swiotlb_sync_single_for_cpu(struct device *dev, phys_addr_t tlb_addr, 59 size_t size, enum dma_data_direction dir); 60dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys, 61 size_t size, enum dma_data_direction dir, unsigned long attrs); 62 63#ifdef CONFIG_SWIOTLB 64 65/** 66 * struct io_tlb_pool - IO TLB memory pool descriptor 67 * @start: The start address of the swiotlb memory pool. Used to do a quick 68 * range check to see if the memory was in fact allocated by this 69 * API. 70 * @end: The end address of the swiotlb memory pool. Used to do a quick 71 * range check to see if the memory was in fact allocated by this 72 * API. 73 * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool 74 * may be remapped in the memory encrypted case and store virtual 75 * address for bounce buffer operation. 76 * @nslabs: The number of IO TLB slots between @start and @end. For the 77 * default swiotlb, this can be adjusted with a boot parameter, 78 * see setup_io_tlb_npages(). 79 * @late_alloc: %true if allocated using the page allocator. 80 * @nareas: Number of areas in the pool. 81 * @area_nslabs: Number of slots in each area. 82 * @areas: Array of memory area descriptors. 83 * @slots: Array of slot descriptors. 84 * @node: Member of the IO TLB memory pool list. 85 * @rcu: RCU head for swiotlb_dyn_free(). 86 * @transient: %true if transient memory pool. 87 */ 88struct io_tlb_pool { 89 phys_addr_t start; 90 phys_addr_t end; 91 void *vaddr; 92 unsigned long nslabs; 93 bool late_alloc; 94 unsigned int nareas; 95 unsigned int area_nslabs; 96 struct io_tlb_area *areas; 97 struct io_tlb_slot *slots; 98#ifdef CONFIG_SWIOTLB_DYNAMIC 99 struct list_head node; 100 struct rcu_head rcu; 101 bool transient; 102#endif 103}; 104 105/** 106 * struct io_tlb_mem - Software IO TLB allocator 107 * @defpool: Default (initial) IO TLB memory pool descriptor. 108 * @pool: IO TLB memory pool descriptor (if not dynamic). 109 * @nslabs: Total number of IO TLB slabs in all pools. 110 * @debugfs: The dentry to debugfs. 111 * @force_bounce: %true if swiotlb bouncing is forced 112 * @for_alloc: %true if the pool is used for memory allocation 113 * @can_grow: %true if more pools can be allocated dynamically. 114 * @phys_limit: Maximum allowed physical address. 115 * @lock: Lock to synchronize changes to the list. 116 * @pools: List of IO TLB memory pool descriptors (if dynamic). 117 * @dyn_alloc: Dynamic IO TLB pool allocation work. 118 * @total_used: The total number of slots in the pool that are currently used 119 * across all areas. Used only for calculating used_hiwater in 120 * debugfs. 121 * @used_hiwater: The high water mark for total_used. Used only for reporting 122 * in debugfs. 123 */ 124struct io_tlb_mem { 125 struct io_tlb_pool defpool; 126 unsigned long nslabs; 127 struct dentry *debugfs; 128 bool force_bounce; 129 bool for_alloc; 130#ifdef CONFIG_SWIOTLB_DYNAMIC 131 bool can_grow; 132 u64 phys_limit; 133 spinlock_t lock; 134 struct list_head pools; 135 struct work_struct dyn_alloc; 136#endif 137#ifdef CONFIG_DEBUG_FS 138 atomic_long_t total_used; 139 atomic_long_t used_hiwater; 140#endif 141}; 142 143#ifdef CONFIG_SWIOTLB_DYNAMIC 144 145struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr); 146 147#else 148 149static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev, 150 phys_addr_t paddr) 151{ 152 return &dev->dma_io_tlb_mem->defpool; 153} 154 155#endif 156 157/** 158 * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb 159 * @dev: Device which has mapped the buffer. 160 * @paddr: Physical address within the DMA buffer. 161 * 162 * Check if @paddr points into a bounce buffer. 163 * 164 * Return: 165 * * %true if @paddr points into a bounce buffer 166 * * %false otherwise 167 */ 168static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) 169{ 170 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 171 172 if (!mem) 173 return false; 174 175#ifdef CONFIG_SWIOTLB_DYNAMIC 176 /* 177 * All SWIOTLB buffer addresses must have been returned by 178 * swiotlb_tbl_map_single() and passed to a device driver. 179 * If a SWIOTLB address is checked on another CPU, then it was 180 * presumably loaded by the device driver from an unspecified private 181 * data structure. Make sure that this load is ordered before reading 182 * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool(). 183 * 184 * This barrier pairs with smp_mb() in swiotlb_find_slots(). 185 */ 186 smp_rmb(); 187 return READ_ONCE(dev->dma_uses_io_tlb) && 188 swiotlb_find_pool(dev, paddr); 189#else 190 return paddr >= mem->defpool.start && paddr < mem->defpool.end; 191#endif 192} 193 194static inline bool is_swiotlb_force_bounce(struct device *dev) 195{ 196 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; 197 198 return mem && mem->force_bounce; 199} 200 201void swiotlb_init(bool addressing_limited, unsigned int flags); 202void __init swiotlb_exit(void); 203void swiotlb_dev_init(struct device *dev); 204size_t swiotlb_max_mapping_size(struct device *dev); 205bool is_swiotlb_allocated(void); 206bool is_swiotlb_active(struct device *dev); 207void __init swiotlb_adjust_size(unsigned long size); 208phys_addr_t default_swiotlb_base(void); 209phys_addr_t default_swiotlb_limit(void); 210#else 211static inline void swiotlb_init(bool addressing_limited, unsigned int flags) 212{ 213} 214 215static inline void swiotlb_dev_init(struct device *dev) 216{ 217} 218 219static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) 220{ 221 return false; 222} 223static inline bool is_swiotlb_force_bounce(struct device *dev) 224{ 225 return false; 226} 227static inline void swiotlb_exit(void) 228{ 229} 230static inline size_t swiotlb_max_mapping_size(struct device *dev) 231{ 232 return SIZE_MAX; 233} 234 235static inline bool is_swiotlb_allocated(void) 236{ 237 return false; 238} 239 240static inline bool is_swiotlb_active(struct device *dev) 241{ 242 return false; 243} 244 245static inline void swiotlb_adjust_size(unsigned long size) 246{ 247} 248 249static inline phys_addr_t default_swiotlb_base(void) 250{ 251 return 0; 252} 253 254static inline phys_addr_t default_swiotlb_limit(void) 255{ 256 return 0; 257} 258#endif /* CONFIG_SWIOTLB */ 259 260extern void swiotlb_print_info(void); 261 262#ifdef CONFIG_DMA_RESTRICTED_POOL 263struct page *swiotlb_alloc(struct device *dev, size_t size); 264bool swiotlb_free(struct device *dev, struct page *page, size_t size); 265 266static inline bool is_swiotlb_for_alloc(struct device *dev) 267{ 268 return dev->dma_io_tlb_mem->for_alloc; 269} 270#else 271static inline struct page *swiotlb_alloc(struct device *dev, size_t size) 272{ 273 return NULL; 274} 275static inline bool swiotlb_free(struct device *dev, struct page *page, 276 size_t size) 277{ 278 return false; 279} 280static inline bool is_swiotlb_for_alloc(struct device *dev) 281{ 282 return false; 283} 284#endif /* CONFIG_DMA_RESTRICTED_POOL */ 285 286#endif /* __LINUX_SWIOTLB_H */