Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cris: convert to dma_map_ops

Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Mikael Starvik <starvik@axis.com>
Cc: Jesper Nilsson <jesper.nilsson@axis.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: Joerg Roedel <jroedel@suse.de>
Cc: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Christoph Hellwig and committed by
Linus Torvalds
e20dd889 4605f04b

+52 -166
+1
arch/cris/Kconfig
··· 54 54 select GENERIC_ATOMIC64 55 55 select HAVE_UID16 56 56 select VIRT_TO_BUS 57 + select HAVE_DMA_ATTRS 57 58 select ARCH_WANT_IPC_PARSE_VERSION 58 59 select GENERIC_IRQ_SHOW 59 60 select GENERIC_IOMAP
+43 -13
arch/cris/arch-v32/drivers/pci/dma.c
··· 16 16 #include <linux/gfp.h> 17 17 #include <asm/io.h> 18 18 19 - void *dma_alloc_coherent(struct device *dev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t gfp) 19 + static void *v32_dma_alloc(struct device *dev, size_t size, 20 + dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs) 21 21 { 22 22 void *ret; 23 - int order = get_order(size); 23 + 24 24 /* ignore region specifiers */ 25 25 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 26 - 27 - if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) 28 - return ret; 29 26 30 27 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 31 28 gfp |= GFP_DMA; 32 29 33 - ret = (void *)__get_free_pages(gfp, order); 30 + ret = (void *)__get_free_pages(gfp, get_order(size)); 34 31 35 32 if (ret != NULL) { 36 33 memset(ret, 0, size); ··· 36 39 return ret; 37 40 } 38 41 39 - void dma_free_coherent(struct device *dev, size_t size, 40 - void *vaddr, dma_addr_t dma_handle) 42 + static void v32_dma_free(struct device *dev, size_t size, void *vaddr, 43 + dma_addr_t dma_handle, struct dma_attrs *attrs) 41 44 { 42 - int order = get_order(size); 43 - 44 - if (!dma_release_from_coherent(dev, order, vaddr)) 45 - free_pages((unsigned long)vaddr, order); 45 + free_pages((unsigned long)vaddr, get_order(size)); 46 46 } 47 47 48 + static inline dma_addr_t v32_dma_map_page(struct device *dev, 49 + struct page *page, unsigned long offset, size_t size, 50 + enum dma_data_direction direction, 51 + struct dma_attrs *attrs) 52 + { 53 + return page_to_phys(page) + offset; 54 + } 55 + 56 + static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg, 57 + int nents, enum dma_data_direction direction, 58 + struct dma_attrs *attrs) 59 + { 60 + printk("Map sg\n"); 61 + return nents; 62 + } 63 + 64 + static inline int v32_dma_supported(struct device *dev, u64 mask) 65 + { 66 + /* 67 + * we fall back to GFP_DMA when the mask isn't all 1s, 68 + * so we can't guarantee allocations that must be 69 + * within a tighter range than GFP_DMA.. 70 + */ 71 + if (mask < 0x00ffffff) 72 + return 0; 73 + return 1; 74 + } 75 + 76 + struct dma_map_ops v32_dma_ops = { 77 + .alloc = v32_dma_alloc, 78 + .free = v32_dma_free, 79 + .map_page = v32_dma_map_page, 80 + .map_sg = v32_dma_map_sg, 81 + .dma_supported = v32_dma_supported, 82 + }; 83 + EXPORT_SYMBOL(v32_dma_ops);
+8 -153
arch/cris/include/asm/dma-mapping.h
··· 1 - /* DMA mapping. Nothing tricky here, just virt_to_phys */ 2 - 3 1 #ifndef _ASM_CRIS_DMA_MAPPING_H 4 2 #define _ASM_CRIS_DMA_MAPPING_H 5 3 6 - #include <linux/mm.h> 7 - #include <linux/kernel.h> 8 - #include <linux/scatterlist.h> 9 - 10 - #include <asm/cache.h> 11 - #include <asm/io.h> 12 - 13 - #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 14 - #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 15 - 16 4 #ifdef CONFIG_PCI 17 - #include <asm-generic/dma-coherent.h> 5 + extern struct dma_map_ops v32_dma_ops; 18 6 19 - void *dma_alloc_coherent(struct device *dev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t flag); 21 - 22 - void dma_free_coherent(struct device *dev, size_t size, 23 - void *vaddr, dma_addr_t dma_handle); 24 - #else 25 - static inline void * 26 - dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 27 - gfp_t flag) 7 + static inline struct dma_map_ops *get_dma_ops(struct device *dev) 28 8 { 29 - BUG(); 30 - return NULL; 9 + return &v32_dma_ops; 31 10 } 32 - 33 - static inline void 34 - dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, 35 - dma_addr_t dma_handle) 11 + #else 12 + static inline struct dma_map_ops *get_dma_ops(struct device *dev) 36 13 { 37 - BUG(); 14 + BUG(); 15 + return NULL; 38 16 } 39 17 #endif 40 - static inline dma_addr_t 41 - dma_map_single(struct device *dev, void *ptr, size_t size, 42 - enum dma_data_direction direction) 43 - { 44 - BUG_ON(direction == DMA_NONE); 45 - return virt_to_phys(ptr); 46 - } 47 18 48 - static inline void 49 - dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 50 - enum dma_data_direction direction) 51 - { 52 - BUG_ON(direction == DMA_NONE); 53 - } 54 - 55 - static inline int 56 - dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 57 - enum dma_data_direction direction) 58 - { 59 - printk("Map sg\n"); 60 - return nents; 61 - } 62 - 63 - static inline dma_addr_t 64 - dma_map_page(struct device *dev, struct page *page, unsigned long offset, 65 - size_t size, enum dma_data_direction direction) 66 - { 67 - BUG_ON(direction == DMA_NONE); 68 - return page_to_phys(page) + offset; 69 - } 70 - 71 - static inline void 72 - dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, 73 - enum dma_data_direction direction) 74 - { 75 - BUG_ON(direction == DMA_NONE); 76 - } 77 - 78 - 79 - static inline void 80 - dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, 81 - enum dma_data_direction direction) 82 - { 83 - BUG_ON(direction == DMA_NONE); 84 - } 85 - 86 - static inline void 87 - dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, 88 - enum dma_data_direction direction) 89 - { 90 - } 91 - 92 - static inline void 93 - dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, 94 - enum dma_data_direction direction) 95 - { 96 - } 97 - 98 - static inline void 99 - dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, 100 - unsigned long offset, size_t size, 101 - enum dma_data_direction direction) 102 - { 103 - } 104 - 105 - static inline void 106 - dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, 107 - unsigned long offset, size_t size, 108 - enum dma_data_direction direction) 109 - { 110 - } 111 - 112 - static inline void 113 - dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, 114 - enum dma_data_direction direction) 115 - { 116 - } 117 - 118 - static inline void 119 - dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, 120 - enum dma_data_direction direction) 121 - { 122 - } 123 - 124 - static inline int 125 - dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 126 - { 127 - return 0; 128 - } 129 - 130 - static inline int 131 - dma_supported(struct device *dev, u64 mask) 132 - { 133 - /* 134 - * we fall back to GFP_DMA when the mask isn't all 1s, 135 - * so we can't guarantee allocations that must be 136 - * within a tighter range than GFP_DMA.. 137 - */ 138 - if(mask < 0x00ffffff) 139 - return 0; 140 - 141 - return 1; 142 - } 143 - 144 - static inline int 145 - dma_set_mask(struct device *dev, u64 mask) 146 - { 147 - if(!dev->dma_mask || !dma_supported(dev, mask)) 148 - return -EIO; 149 - 150 - *dev->dma_mask = mask; 151 - 152 - return 0; 153 - } 19 + #include <asm-generic/dma-mapping-common.h> 154 20 155 21 static inline void 156 22 dma_cache_sync(struct device *dev, void *vaddr, size_t size, 157 23 enum dma_data_direction direction) 158 24 { 159 25 } 160 - 161 - /* drivers/base/dma-mapping.c */ 162 - extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, 163 - void *cpu_addr, dma_addr_t dma_addr, size_t size); 164 - extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, 165 - void *cpu_addr, dma_addr_t dma_addr, 166 - size_t size); 167 - 168 - #define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) 169 - #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) 170 - 171 26 172 27 #endif