at v4.13 3.8 kB view raw
1#ifndef __LINUX_SWIOTLB_H 2#define __LINUX_SWIOTLB_H 3 4#include <linux/dma-direction.h> 5#include <linux/init.h> 6#include <linux/types.h> 7 8struct device; 9struct page; 10struct scatterlist; 11 12enum swiotlb_force { 13 SWIOTLB_NORMAL, /* Default - depending on HW DMA mask etc. */ 14 SWIOTLB_FORCE, /* swiotlb=force */ 15 SWIOTLB_NO_FORCE, /* swiotlb=noforce */ 16}; 17 18extern enum swiotlb_force swiotlb_force; 19 20/* 21 * Maximum allowable number of contiguous slabs to map, 22 * must be a power of 2. What is the appropriate value ? 23 * The complexity of {map,unmap}_single is linearly dependent on this value. 24 */ 25#define IO_TLB_SEGSIZE 128 26 27/* 28 * log of the size of each IO TLB slab. The number of slabs is command line 29 * controllable. 30 */ 31#define IO_TLB_SHIFT 11 32 33extern void swiotlb_init(int verbose); 34int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 35extern unsigned long swiotlb_nr_tbl(void); 36unsigned long swiotlb_size_or_default(void); 37extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); 38 39/* 40 * Enumeration for sync targets 41 */ 42enum dma_sync_target { 43 SYNC_FOR_CPU = 0, 44 SYNC_FOR_DEVICE = 1, 45}; 46 47/* define the last possible byte of physical address space as a mapping error */ 48#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0) 49 50extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 51 dma_addr_t tbl_dma_addr, 52 phys_addr_t phys, size_t size, 53 enum dma_data_direction dir, 54 unsigned long attrs); 55 56extern void swiotlb_tbl_unmap_single(struct device *hwdev, 57 phys_addr_t tlb_addr, 58 size_t size, enum dma_data_direction dir, 59 unsigned long attrs); 60 61extern void swiotlb_tbl_sync_single(struct device *hwdev, 62 phys_addr_t tlb_addr, 63 size_t size, enum dma_data_direction dir, 64 enum dma_sync_target target); 65 66/* Accessory functions. */ 67extern void 68*swiotlb_alloc_coherent(struct device *hwdev, size_t size, 69 dma_addr_t *dma_handle, gfp_t flags); 70 71extern void 72swiotlb_free_coherent(struct device *hwdev, size_t size, 73 void *vaddr, dma_addr_t dma_handle); 74 75extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 76 unsigned long offset, size_t size, 77 enum dma_data_direction dir, 78 unsigned long attrs); 79extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 80 size_t size, enum dma_data_direction dir, 81 unsigned long attrs); 82 83extern int 84swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 85 enum dma_data_direction dir, 86 unsigned long attrs); 87 88extern void 89swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 90 int nelems, enum dma_data_direction dir, 91 unsigned long attrs); 92 93extern void 94swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 95 size_t size, enum dma_data_direction dir); 96 97extern void 98swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 99 int nelems, enum dma_data_direction dir); 100 101extern void 102swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 103 size_t size, enum dma_data_direction dir); 104 105extern void 106swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 107 int nelems, enum dma_data_direction dir); 108 109extern int 110swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr); 111 112extern int 113swiotlb_dma_supported(struct device *hwdev, u64 mask); 114 115#ifdef CONFIG_SWIOTLB 116extern void __init swiotlb_free(void); 117unsigned int swiotlb_max_segment(void); 118#else 119static inline void swiotlb_free(void) { } 120static inline unsigned int swiotlb_max_segment(void) { return 0; } 121#endif 122 123extern void swiotlb_print_info(void); 124extern int is_swiotlb_buffer(phys_addr_t paddr); 125extern void swiotlb_set_max_segment(unsigned int); 126 127#endif /* __LINUX_SWIOTLB_H */