Merge branch 'core/generic-dma-coherent' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into for-linus

authored by Jesse Barnes and committed by Jesse Barnes 756f7bc6 979b1791

+217 -339
+1
arch/arm/Kconfig
··· 17 select HAVE_KRETPROBES if (HAVE_KPROBES) 18 select HAVE_FTRACE if (!XIP_KERNEL) 19 select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE) 20 help 21 The ARM series is a line of low-power-consumption RISC chip designs 22 licensed by ARM Ltd and targeted at embedded applications and
··· 17 select HAVE_KRETPROBES if (HAVE_KPROBES) 18 select HAVE_FTRACE if (!XIP_KERNEL) 19 select HAVE_DYNAMIC_FTRACE if (HAVE_FTRACE) 20 + select HAVE_GENERIC_DMA_COHERENT 21 help 22 The ARM series is a line of low-power-consumption RISC chip designs 23 licensed by ARM Ltd and targeted at embedded applications and
+8
arch/arm/mm/consistent.c
··· 274 void * 275 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 276 { 277 if (arch_is_coherent()) { 278 void *virt; 279 ··· 366 u32 off; 367 368 WARN_ON(irqs_disabled()); 369 370 if (arch_is_coherent()) { 371 kfree(cpu_addr);
··· 274 void * 275 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 276 { 277 + void *memory; 278 + 279 + if (dma_alloc_from_coherent(dev, size, handle, &memory)) 280 + return memory; 281 + 282 if (arch_is_coherent()) { 283 void *virt; 284 ··· 361 u32 off; 362 363 WARN_ON(irqs_disabled()); 364 + 365 + if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 366 + return; 367 368 if (arch_is_coherent()) { 369 kfree(cpu_addr);
+1
arch/cris/arch-v32/drivers/Kconfig
··· 641 bool 642 depends on ETRAX_CARDBUS 643 default y 644 645 config ETRAX_IOP_FW_LOAD 646 tristate "IO-processor hotplug firmware loading support"
··· 641 bool 642 depends on ETRAX_CARDBUS 643 default y 644 + select HAVE_GENERIC_DMA_COHERENT 645 646 config ETRAX_IOP_FW_LOAD 647 tristate "IO-processor hotplug firmware loading support"
+3 -103
arch/cris/arch-v32/drivers/pci/dma.c
··· 15 #include <linux/pci.h> 16 #include <asm/io.h> 17 18 - struct dma_coherent_mem { 19 - void *virt_base; 20 - u32 device_base; 21 - int size; 22 - int flags; 23 - unsigned long *bitmap; 24 - }; 25 - 26 void *dma_alloc_coherent(struct device *dev, size_t size, 27 dma_addr_t *dma_handle, gfp_t gfp) 28 { 29 void *ret; 30 - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 31 int order = get_order(size); 32 /* ignore region specifiers */ 33 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 34 35 - if (mem) { 36 - int page = bitmap_find_free_region(mem->bitmap, mem->size, 37 - order); 38 - if (page >= 0) { 39 - *dma_handle = mem->device_base + (page << PAGE_SHIFT); 40 - ret = mem->virt_base + (page << PAGE_SHIFT); 41 - memset(ret, 0, size); 42 - return ret; 43 - } 44 - if (mem->flags & DMA_MEMORY_EXCLUSIVE) 45 - return NULL; 46 - } 47 48 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 49 gfp |= GFP_DMA; ··· 41 void dma_free_coherent(struct device *dev, size_t size, 42 void *vaddr, dma_addr_t dma_handle) 43 { 44 - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 45 int order = get_order(size); 46 47 - if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { 48 - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 49 - 50 - bitmap_release_region(mem->bitmap, page, order); 51 - } else 52 free_pages((unsigned long)vaddr, order); 53 } 54 55 - int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 56 - dma_addr_t device_addr, size_t size, int flags) 57 - { 58 - void __iomem *mem_base; 59 - int pages = size >> PAGE_SHIFT; 60 - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 61 - 62 - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) 63 - goto out; 64 - if (!size) 65 - goto out; 66 - if (dev->dma_mem) 67 - goto out; 68 - 69 - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 70 - 71 - mem_base = ioremap(bus_addr, size); 72 - if (!mem_base) 73 - goto out; 74 - 75 - dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 76 - if (!dev->dma_mem) 77 - goto iounmap_out; 78 - dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 79 - if (!dev->dma_mem->bitmap) 80 - goto free1_out; 81 - 82 - dev->dma_mem->virt_base = mem_base; 83 - dev->dma_mem->device_base = device_addr; 84 - dev->dma_mem->size = pages; 85 - dev->dma_mem->flags = flags; 86 - 87 - if (flags & DMA_MEMORY_MAP) 88 - return DMA_MEMORY_MAP; 89 - 90 - return DMA_MEMORY_IO; 91 - 92 - free1_out: 93 - kfree(dev->dma_mem); 94 - iounmap_out: 95 - iounmap(mem_base); 96 - out: 97 - return 0; 98 - } 99 - EXPORT_SYMBOL(dma_declare_coherent_memory); 100 - 101 - void dma_release_declared_memory(struct device *dev) 102 - { 103 - struct dma_coherent_mem *mem = dev->dma_mem; 104 - 105 - if(!mem) 106 - return; 107 - dev->dma_mem = NULL; 108 - iounmap(mem->virt_base); 109 - kfree(mem->bitmap); 110 - kfree(mem); 111 - } 112 - EXPORT_SYMBOL(dma_release_declared_memory); 113 - 114 - void *dma_mark_declared_memory_occupied(struct device *dev, 115 - dma_addr_t device_addr, size_t size) 116 - { 117 - struct dma_coherent_mem *mem = dev->dma_mem; 118 - int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 119 - int pos, err; 120 - 121 - if (!mem) 122 - return ERR_PTR(-EINVAL); 123 - 124 - pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 125 - err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); 126 - if (err != 0) 127 - return ERR_PTR(err); 128 - return mem->virt_base + (pos << PAGE_SHIFT); 129 - } 130 - EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
··· 15 #include <linux/pci.h> 16 #include <asm/io.h> 17 18 void *dma_alloc_coherent(struct device *dev, size_t size, 19 dma_addr_t *dma_handle, gfp_t gfp) 20 { 21 void *ret; 22 int order = get_order(size); 23 /* ignore region specifiers */ 24 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM); 25 26 + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) 27 + return ret; 28 29 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff)) 30 gfp |= GFP_DMA; ··· 60 void dma_free_coherent(struct device *dev, size_t size, 61 void *vaddr, dma_addr_t dma_handle) 62 { 63 int order = get_order(size); 64 65 + if (!dma_release_from_coherent(dev, order, vaddr)) 66 free_pages((unsigned long)vaddr, order); 67 } 68
+1
arch/sh/Kconfig
··· 11 select HAVE_CLK 12 select HAVE_IDE 13 select HAVE_OPROFILE 14 help 15 The SuperH is a RISC processor targeted for use in embedded systems 16 and consumer electronics; it was also used in the Sega Dreamcast
··· 11 select HAVE_CLK 12 select HAVE_IDE 13 select HAVE_OPROFILE 14 + select HAVE_GENERIC_DMA_COHERENT 15 help 16 The SuperH is a RISC processor targeted for use in embedded systems 17 and consumer electronics; it was also used in the Sega Dreamcast
+3 -95
arch/sh/mm/consistent.c
··· 28 dma_addr_t *dma_handle, gfp_t gfp) 29 { 30 void *ret, *ret_nocache; 31 - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 32 int order = get_order(size); 33 34 - if (mem) { 35 - int page = bitmap_find_free_region(mem->bitmap, mem->size, 36 - order); 37 - if (page >= 0) { 38 - *dma_handle = mem->device_base + (page << PAGE_SHIFT); 39 - ret = mem->virt_base + (page << PAGE_SHIFT); 40 - memset(ret, 0, size); 41 - return ret; 42 - } 43 - if (mem->flags & DMA_MEMORY_EXCLUSIVE) 44 - return NULL; 45 - } 46 47 ret = (void *)__get_free_pages(gfp, order); 48 if (!ret) ··· 61 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 62 int order = get_order(size); 63 64 - if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { 65 - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 66 - 67 - bitmap_release_region(mem->bitmap, page, order); 68 - } else { 69 WARN_ON(irqs_disabled()); /* for portability */ 70 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); 71 free_pages((unsigned long)phys_to_virt(dma_handle), order); ··· 69 } 70 } 71 EXPORT_SYMBOL(dma_free_coherent); 72 - 73 - int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 74 - dma_addr_t device_addr, size_t size, int flags) 75 - { 76 - void __iomem *mem_base = NULL; 77 - int pages = size >> PAGE_SHIFT; 78 - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 79 - 80 - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) 81 - goto out; 82 - if (!size) 83 - goto out; 84 - if (dev->dma_mem) 85 - goto out; 86 - 87 - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 88 - 89 - mem_base = ioremap_nocache(bus_addr, size); 90 - if (!mem_base) 91 - goto out; 92 - 93 - dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 94 - if (!dev->dma_mem) 95 - goto out; 96 - dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 97 - if (!dev->dma_mem->bitmap) 98 - goto free1_out; 99 - 100 - dev->dma_mem->virt_base = mem_base; 101 - dev->dma_mem->device_base = device_addr; 102 - dev->dma_mem->size = pages; 103 - dev->dma_mem->flags = flags; 104 - 105 - if (flags & DMA_MEMORY_MAP) 106 - return DMA_MEMORY_MAP; 107 - 108 - return DMA_MEMORY_IO; 109 - 110 - free1_out: 111 - kfree(dev->dma_mem); 112 - out: 113 - if (mem_base) 114 - iounmap(mem_base); 115 - return 0; 116 - } 117 - EXPORT_SYMBOL(dma_declare_coherent_memory); 118 - 119 - void dma_release_declared_memory(struct device *dev) 120 - { 121 - struct dma_coherent_mem *mem = dev->dma_mem; 122 - 123 - if (!mem) 124 - return; 125 - dev->dma_mem = NULL; 126 - iounmap(mem->virt_base); 127 - kfree(mem->bitmap); 128 - kfree(mem); 129 - } 130 - EXPORT_SYMBOL(dma_release_declared_memory); 131 - 132 - void *dma_mark_declared_memory_occupied(struct device *dev, 133 - dma_addr_t device_addr, size_t size) 134 - { 135 - struct dma_coherent_mem *mem = dev->dma_mem; 136 - int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 - int pos, err; 138 - 139 - if (!mem) 140 - return ERR_PTR(-EINVAL); 141 - 142 - pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 143 - err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); 144 - if (err != 0) 145 - return ERR_PTR(err); 146 - return mem->virt_base + (pos << PAGE_SHIFT); 147 - } 148 - EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 149 150 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 151 enum dma_data_direction direction)
··· 28 dma_addr_t *dma_handle, gfp_t gfp) 29 { 30 void *ret, *ret_nocache; 31 int order = get_order(size); 32 33 + if (dma_alloc_from_coherent(dev, size, dma_handle, &ret)) 34 + return ret; 35 36 ret = (void *)__get_free_pages(gfp, order); 37 if (!ret) ··· 72 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 73 int order = get_order(size); 74 75 + if (!dma_release_from_coherent(dev, order, vaddr)) { 76 WARN_ON(irqs_disabled()); /* for portability */ 77 BUG_ON(mem && mem->flags & DMA_MEMORY_EXCLUSIVE); 78 free_pages((unsigned long)phys_to_virt(dma_handle), order); ··· 84 } 85 } 86 EXPORT_SYMBOL(dma_free_coherent); 87 88 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 89 enum dma_data_direction direction)
+1
arch/x86/Kconfig
··· 30 select HAVE_FTRACE 31 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 32 select HAVE_ARCH_KGDB if !X86_VOYAGER 33 select HAVE_EFFICIENT_UNALIGNED_ACCESS 34 35 config ARCH_DEFCONFIG
··· 30 select HAVE_FTRACE 31 select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64) 32 select HAVE_ARCH_KGDB if !X86_VOYAGER 33 + select HAVE_GENERIC_DMA_COHERENT if X86_32 34 select HAVE_EFFICIENT_UNALIGNED_ACCESS 35 36 config ARCH_DEFCONFIG
+2 -120
arch/x86/kernel/pci-dma.c
··· 192 } 193 early_param("iommu", iommu_setup); 194 195 - #ifdef CONFIG_X86_32 196 - int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 197 - dma_addr_t device_addr, size_t size, int flags) 198 - { 199 - void __iomem *mem_base = NULL; 200 - int pages = size >> PAGE_SHIFT; 201 - int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 202 - 203 - if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) 204 - goto out; 205 - if (!size) 206 - goto out; 207 - if (dev->dma_mem) 208 - goto out; 209 - 210 - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 211 - 212 - mem_base = ioremap(bus_addr, size); 213 - if (!mem_base) 214 - goto out; 215 - 216 - dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 217 - if (!dev->dma_mem) 218 - goto out; 219 - dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 220 - if (!dev->dma_mem->bitmap) 221 - goto free1_out; 222 - 223 - dev->dma_mem->virt_base = mem_base; 224 - dev->dma_mem->device_base = device_addr; 225 - dev->dma_mem->size = pages; 226 - dev->dma_mem->flags = flags; 227 - 228 - if (flags & DMA_MEMORY_MAP) 229 - return DMA_MEMORY_MAP; 230 - 231 - return DMA_MEMORY_IO; 232 - 233 - free1_out: 234 - kfree(dev->dma_mem); 235 - out: 236 - if (mem_base) 237 - iounmap(mem_base); 238 - return 0; 239 - } 240 - EXPORT_SYMBOL(dma_declare_coherent_memory); 241 - 242 - void dma_release_declared_memory(struct device *dev) 243 - { 244 - struct dma_coherent_mem *mem = dev->dma_mem; 245 - 246 - if (!mem) 247 - return; 248 - dev->dma_mem = NULL; 249 - iounmap(mem->virt_base); 250 - kfree(mem->bitmap); 251 - kfree(mem); 252 - } 253 - EXPORT_SYMBOL(dma_release_declared_memory); 254 - 255 - void *dma_mark_declared_memory_occupied(struct device *dev, 256 - dma_addr_t device_addr, size_t size) 257 - { 258 - struct dma_coherent_mem *mem = dev->dma_mem; 259 - int pos, err; 260 - int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); 261 - 262 - pages >>= PAGE_SHIFT; 263 - 264 - if (!mem) 265 - return ERR_PTR(-EINVAL); 266 - 267 - pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 268 - err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); 269 - if (err != 0) 270 - return ERR_PTR(err); 271 - return mem->virt_base + (pos << PAGE_SHIFT); 272 - } 273 - EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 274 - 275 - static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, 276 - dma_addr_t *dma_handle, void **ret) 277 - { 278 - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 279 - int order = get_order(size); 280 - 281 - if (mem) { 282 - int page = bitmap_find_free_region(mem->bitmap, mem->size, 283 - order); 284 - if (page >= 0) { 285 - *dma_handle = mem->device_base + (page << PAGE_SHIFT); 286 - *ret = mem->virt_base + (page << PAGE_SHIFT); 287 - memset(*ret, 0, size); 288 - } 289 - if (mem->flags & DMA_MEMORY_EXCLUSIVE) 290 - *ret = NULL; 291 - } 292 - return (mem != NULL); 293 - } 294 - 295 - static int dma_release_coherent(struct device *dev, int order, void *vaddr) 296 - { 297 - struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 298 - 299 - if (mem && vaddr >= mem->virt_base && vaddr < 300 - (mem->virt_base + (mem->size << PAGE_SHIFT))) { 301 - int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 302 - 303 - bitmap_release_region(mem->bitmap, page, order); 304 - return 1; 305 - } 306 - return 0; 307 - } 308 - #else 309 - #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) 310 - #define dma_release_coherent(dev, order, vaddr) (0) 311 - #endif /* CONFIG_X86_32 */ 312 - 313 int dma_supported(struct device *dev, u64 mask) 314 { 315 struct dma_mapping_ops *ops = get_dma_ops(dev); ··· 261 /* ignore region specifiers */ 262 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 263 264 - if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) 265 return memory; 266 267 if (!dev) { ··· 366 367 int order = get_order(size); 368 WARN_ON(irqs_disabled()); /* for portability */ 369 - if (dma_release_coherent(dev, order, vaddr)) 370 return; 371 if (ops->unmap_single) 372 ops->unmap_single(dev, bus, size, 0);
··· 192 } 193 early_param("iommu", iommu_setup); 194 195 int dma_supported(struct device *dev, u64 mask) 196 { 197 struct dma_mapping_ops *ops = get_dma_ops(dev); ··· 379 /* ignore region specifiers */ 380 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 381 382 + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) 383 return memory; 384 385 if (!dev) { ··· 484 485 int order = get_order(size); 486 WARN_ON(irqs_disabled()); /* for portability */ 487 + if (dma_release_from_coherent(dev, order, vaddr)) 488 return; 489 if (ops->unmap_single) 490 ops->unmap_single(dev, bus, size, 0);
+2
include/asm-arm/dma-mapping.h
··· 7 8 #include <linux/scatterlist.h> 9 10 /* 11 * DMA-consistent mapping functions. These allocate/free a region of 12 * uncached, unwrite-buffered mapped memory space for use with DMA
··· 7 8 #include <linux/scatterlist.h> 9 10 + #include <asm-generic/dma-coherent.h> 11 + 12 /* 13 * DMA-consistent mapping functions. These allocate/free a region of 14 * uncached, unwrite-buffered mapped memory space for use with DMA
+2
include/asm-cris/dma-mapping.h
··· 14 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 15 16 #ifdef CONFIG_PCI 17 void *dma_alloc_coherent(struct device *dev, size_t size, 18 dma_addr_t *dma_handle, gfp_t flag); 19
··· 14 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 15 16 #ifdef CONFIG_PCI 17 + #include <asm-generic/dma-coherent.h> 18 + 19 void *dma_alloc_coherent(struct device *dev, size_t size, 20 dma_addr_t *dma_handle, gfp_t flag); 21
+32
include/asm-generic/dma-coherent.h
···
··· 1 + #ifndef DMA_COHERENT_H 2 + #define DMA_COHERENT_H 3 + 4 + #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 5 + /* 6 + * These two functions are only for dma allocator. 7 + * Don't use them in device drivers. 8 + */ 9 + int dma_alloc_from_coherent(struct device *dev, ssize_t size, 10 + dma_addr_t *dma_handle, void **ret); 11 + int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 12 + 13 + /* 14 + * Standard interface 15 + */ 16 + #define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 17 + extern int 18 + dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 19 + dma_addr_t device_addr, size_t size, int flags); 20 + 21 + extern void 22 + dma_release_declared_memory(struct device *dev); 23 + 24 + extern void * 25 + dma_mark_declared_memory_occupied(struct device *dev, 26 + dma_addr_t device_addr, size_t size); 27 + #else 28 + #define dma_alloc_from_coherent(dev, size, handle, ret) (0) 29 + #define dma_release_from_coherent(dev, order, vaddr) (0) 30 + #endif 31 + 32 + #endif
+1
include/asm-sh/dma-mapping.h
··· 5 #include <linux/scatterlist.h> 6 #include <asm/cacheflush.h> 7 #include <asm/io.h> 8 9 extern struct bus_type pci_bus_type; 10
··· 5 #include <linux/scatterlist.h> 6 #include <asm/cacheflush.h> 7 #include <asm/io.h> 8 + #include <asm-generic/dma-coherent.h> 9 10 extern struct bus_type pci_bus_type; 11
+1 -21
include/asm-x86/dma-mapping.h
··· 249 250 #define dma_is_consistent(d, h) (1) 251 252 - #ifdef CONFIG_X86_32 253 - # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY 254 - struct dma_coherent_mem { 255 - void *virt_base; 256 - u32 device_base; 257 - int size; 258 - int flags; 259 - unsigned long *bitmap; 260 - }; 261 - 262 - extern int 263 - dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 264 - dma_addr_t device_addr, size_t size, int flags); 265 - 266 - extern void 267 - dma_release_declared_memory(struct device *dev); 268 - 269 - extern void * 270 - dma_mark_declared_memory_occupied(struct device *dev, 271 - dma_addr_t device_addr, size_t size); 272 - #endif /* CONFIG_X86_32 */ 273 #endif
··· 249 250 #define dma_is_consistent(d, h) (1) 251 252 + #include <asm-generic/dma-coherent.h> 253 #endif
+4
init/Kconfig
··· 802 803 endmenu # General setup 804 805 config SLABINFO 806 bool 807 depends on PROC_FS
··· 802 803 endmenu # General setup 804 805 + config HAVE_GENERIC_DMA_COHERENT 806 + bool 807 + default n 808 + 809 config SLABINFO 810 bool 811 depends on PROC_FS
+1
kernel/Makefile
··· 84 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 85 obj-$(CONFIG_MARKERS) += marker.o 86 obj-$(CONFIG_LATENCYTOP) += latencytop.o 87 obj-$(CONFIG_FTRACE) += trace/ 88 obj-$(CONFIG_TRACING) += trace/ 89 obj-$(CONFIG_SMP) += sched_cpupri.o
··· 84 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 85 obj-$(CONFIG_MARKERS) += marker.o 86 obj-$(CONFIG_LATENCYTOP) += latencytop.o 87 + obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 88 obj-$(CONFIG_FTRACE) += trace/ 89 obj-$(CONFIG_TRACING) += trace/ 90 obj-$(CONFIG_SMP) += sched_cpupri.o
+154
kernel/dma-coherent.c
···
··· 1 + /* 2 + * Coherent per-device memory handling. 3 + * Borrowed from i386 4 + */ 5 + #include <linux/kernel.h> 6 + #include <linux/dma-mapping.h> 7 + 8 + struct dma_coherent_mem { 9 + void *virt_base; 10 + u32 device_base; 11 + int size; 12 + int flags; 13 + unsigned long *bitmap; 14 + }; 15 + 16 + int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 17 + dma_addr_t device_addr, size_t size, int flags) 18 + { 19 + void __iomem *mem_base = NULL; 20 + int pages = size >> PAGE_SHIFT; 21 + int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 22 + 23 + if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) 24 + goto out; 25 + if (!size) 26 + goto out; 27 + if (dev->dma_mem) 28 + goto out; 29 + 30 + /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 31 + 32 + mem_base = ioremap(bus_addr, size); 33 + if (!mem_base) 34 + goto out; 35 + 36 + dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 37 + if (!dev->dma_mem) 38 + goto out; 39 + dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 40 + if (!dev->dma_mem->bitmap) 41 + goto free1_out; 42 + 43 + dev->dma_mem->virt_base = mem_base; 44 + dev->dma_mem->device_base = device_addr; 45 + dev->dma_mem->size = pages; 46 + dev->dma_mem->flags = flags; 47 + 48 + if (flags & DMA_MEMORY_MAP) 49 + return DMA_MEMORY_MAP; 50 + 51 + return DMA_MEMORY_IO; 52 + 53 + free1_out: 54 + kfree(dev->dma_mem); 55 + out: 56 + if (mem_base) 57 + iounmap(mem_base); 58 + return 0; 59 + } 60 + EXPORT_SYMBOL(dma_declare_coherent_memory); 61 + 62 + void dma_release_declared_memory(struct device *dev) 63 + { 64 + struct dma_coherent_mem *mem = dev->dma_mem; 65 + 66 + if (!mem) 67 + return; 68 + dev->dma_mem = NULL; 69 + iounmap(mem->virt_base); 70 + kfree(mem->bitmap); 71 + kfree(mem); 72 + } 73 + EXPORT_SYMBOL(dma_release_declared_memory); 74 + 75 + void *dma_mark_declared_memory_occupied(struct device *dev, 76 + dma_addr_t device_addr, size_t size) 77 + { 78 + struct dma_coherent_mem *mem = dev->dma_mem; 79 + int pos, err; 80 + int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); 81 + 82 + pages >>= PAGE_SHIFT; 83 + 84 + if (!mem) 85 + return ERR_PTR(-EINVAL); 86 + 87 + pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 88 + err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); 89 + if (err != 0) 90 + return ERR_PTR(err); 91 + return mem->virt_base + (pos << PAGE_SHIFT); 92 + } 93 + EXPORT_SYMBOL(dma_mark_declared_memory_occupied); 94 + 95 + /** 96 + * Try to allocate memory from the per-device coherent area. 97 + * 98 + * @dev: device from which we allocate memory 99 + * @size: size of requested memory area 100 + * @dma_handle: This will be filled with the correct dma handle 101 + * @ret: This pointer will be filled with the virtual address 102 + * to allocated area. 103 + * 104 + * This function should be only called from per-arch %dma_alloc_coherent() 105 + * to support allocation from per-device coherent memory pools. 106 + * 107 + * Returns 0 if dma_alloc_coherent should continue with allocating from 108 + * generic memory areas, or !0 if dma_alloc_coherent should return %ret. 109 + */ 110 + int dma_alloc_from_coherent(struct device *dev, ssize_t size, 111 + dma_addr_t *dma_handle, void **ret) 112 + { 113 + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 114 + int order = get_order(size); 115 + 116 + if (mem) { 117 + int page = bitmap_find_free_region(mem->bitmap, mem->size, 118 + order); 119 + if (page >= 0) { 120 + *dma_handle = mem->device_base + (page << PAGE_SHIFT); 121 + *ret = mem->virt_base + (page << PAGE_SHIFT); 122 + memset(*ret, 0, size); 123 + } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) 124 + *ret = NULL; 125 + } 126 + return (mem != NULL); 127 + } 128 + 129 + /** 130 + * Try to free the memory allocated from per-device coherent memory pool. 131 + * @dev: device from which the memory was allocated 132 + * @order: the order of pages allocated 133 + * @vaddr: virtual address of allocated pages 134 + * 135 + * This checks whether the memory was allocated from the per-device 136 + * coherent memory pool and if so, releases that memory. 137 + * 138 + * Returns 1 if we correctly released the memory, or 0 if 139 + * %dma_release_coherent() should proceed with releasing memory from 140 + * generic pools. 141 + */ 142 + int dma_release_from_coherent(struct device *dev, int order, void *vaddr) 143 + { 144 + struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; 145 + 146 + if (mem && vaddr >= mem->virt_base && vaddr < 147 + (mem->virt_base + (mem->size << PAGE_SHIFT))) { 148 + int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 149 + 150 + bitmap_release_region(mem->bitmap, page, order); 151 + return 1; 152 + } 153 + return 0; 154 + }