Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.25 154 lines 4.8 kB view raw
1#ifndef _ASM_SPARC64_DMA_MAPPING_H 2#define _ASM_SPARC64_DMA_MAPPING_H 3 4#include <linux/scatterlist.h> 5#include <linux/mm.h> 6 7#define DMA_ERROR_CODE (~(dma_addr_t)0x0) 8 9struct dma_ops { 10 void *(*alloc_coherent)(struct device *dev, size_t size, 11 dma_addr_t *dma_handle, gfp_t flag); 12 void (*free_coherent)(struct device *dev, size_t size, 13 void *cpu_addr, dma_addr_t dma_handle); 14 dma_addr_t (*map_single)(struct device *dev, void *cpu_addr, 15 size_t size, 16 enum dma_data_direction direction); 17 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr, 18 size_t size, 19 enum dma_data_direction direction); 20 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 21 enum dma_data_direction direction); 22 void (*unmap_sg)(struct device *dev, struct scatterlist *sg, 23 int nhwentries, 24 enum dma_data_direction direction); 25 void (*sync_single_for_cpu)(struct device *dev, 26 dma_addr_t dma_handle, size_t size, 27 enum dma_data_direction direction); 28 void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, 29 int nelems, 30 enum dma_data_direction direction); 31}; 32extern const struct dma_ops *dma_ops; 33 34extern int dma_supported(struct device *dev, u64 mask); 35extern int dma_set_mask(struct device *dev, u64 dma_mask); 36 37static inline void *dma_alloc_coherent(struct device *dev, size_t size, 38 dma_addr_t *dma_handle, gfp_t flag) 39{ 40 return dma_ops->alloc_coherent(dev, size, dma_handle, flag); 41} 42 43static inline void dma_free_coherent(struct device *dev, size_t size, 44 void *cpu_addr, dma_addr_t dma_handle) 45{ 46 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle); 47} 48 49static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 50 size_t size, 51 enum dma_data_direction direction) 52{ 53 return dma_ops->map_single(dev, cpu_addr, size, direction); 54} 55 56static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, 57 size_t size, 58 enum dma_data_direction direction) 59{ 60 dma_ops->unmap_single(dev, dma_addr, size, direction); 61} 62 63static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, 64 unsigned long offset, size_t size, 65 enum dma_data_direction direction) 66{ 67 return dma_ops->map_single(dev, page_address(page) + offset, 68 size, direction); 69} 70 71static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, 72 size_t size, 73 enum dma_data_direction direction) 74{ 75 dma_ops->unmap_single(dev, dma_address, size, direction); 76} 77 78static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, 79 int nents, enum dma_data_direction direction) 80{ 81 return dma_ops->map_sg(dev, sg, nents, direction); 82} 83 84static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 85 int nents, enum dma_data_direction direction) 86{ 87 dma_ops->unmap_sg(dev, sg, nents, direction); 88} 89 90static inline void dma_sync_single_for_cpu(struct device *dev, 91 dma_addr_t dma_handle, size_t size, 92 enum dma_data_direction direction) 93{ 94 dma_ops->sync_single_for_cpu(dev, dma_handle, size, direction); 95} 96 97static inline void dma_sync_single_for_device(struct device *dev, 98 dma_addr_t dma_handle, 99 size_t size, 100 enum dma_data_direction direction) 101{ 102 /* No flushing needed to sync cpu writes to the device. */ 103} 104 105static inline void dma_sync_single_range_for_cpu(struct device *dev, 106 dma_addr_t dma_handle, 107 unsigned long offset, 108 size_t size, 109 enum dma_data_direction direction) 110{ 111 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction); 112} 113 114static inline void dma_sync_single_range_for_device(struct device *dev, 115 dma_addr_t dma_handle, 116 unsigned long offset, 117 size_t size, 118 enum dma_data_direction direction) 119{ 120 /* No flushing needed to sync cpu writes to the device. */ 121} 122 123 124static inline void dma_sync_sg_for_cpu(struct device *dev, 125 struct scatterlist *sg, int nelems, 126 enum dma_data_direction direction) 127{ 128 dma_ops->sync_sg_for_cpu(dev, sg, nelems, direction); 129} 130 131static inline void dma_sync_sg_for_device(struct device *dev, 132 struct scatterlist *sg, int nelems, 133 enum dma_data_direction direction) 134{ 135 /* No flushing needed to sync cpu writes to the device. */ 136} 137 138static inline int dma_mapping_error(dma_addr_t dma_addr) 139{ 140 return (dma_addr == DMA_ERROR_CODE); 141} 142 143static inline int dma_get_cache_alignment(void) 144{ 145 /* no easy way to get cache size on all processors, so return 146 * the maximum possible, to be safe */ 147 return (1 << INTERNODE_CACHE_SHIFT); 148} 149 150#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 151#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 152#define dma_is_consistent(d, h) (1) 153 154#endif /* _ASM_SPARC64_DMA_MAPPING_H */