Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers: dma-coherent: add initialization from device tree

Initialization procedure of dma coherent pool has been split into two
parts, so memory pool can now be initialized without assigning to
particular struct device. Then initialized region can be assigned to more
than one struct device. To protect from concurent allocations from
structure. The last part of this patch adds support for handling
'shared-dma-pool' reserved-memory device tree nodes.

[akpm@linux-foundation.org: use more appropriate printk facility levels]
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Josh Cartwright <joshc@codeaurora.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Marek Szyprowski and committed by
Linus Torvalds
7bfa5ab6 71458cfc

+129 -22
+129 -22
drivers/base/dma-coherent.c
··· 14 14 int size; 15 15 int flags; 16 16 unsigned long *bitmap; 17 + spinlock_t spinlock; 17 18 }; 18 19 19 - int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 20 - dma_addr_t device_addr, size_t size, int flags) 20 + static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_addr, 21 + size_t size, int flags, 22 + struct dma_coherent_mem **mem) 21 23 { 24 + struct dma_coherent_mem *dma_mem = NULL; 22 25 void __iomem *mem_base = NULL; 23 26 int pages = size >> PAGE_SHIFT; 24 27 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); ··· 30 27 goto out; 31 28 if (!size) 32 29 goto out; 33 - if (dev->dma_mem) 34 - goto out; 35 - 36 - /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 37 30 38 31 mem_base = ioremap(phys_addr, size); 39 32 if (!mem_base) 40 33 goto out; 41 34 42 - dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 43 - if (!dev->dma_mem) 35 + dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 36 + if (!dma_mem) 44 37 goto out; 45 - dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 46 - if (!dev->dma_mem->bitmap) 47 - goto free1_out; 38 + dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 39 + if (!dma_mem->bitmap) 40 + goto out; 48 41 49 - dev->dma_mem->virt_base = mem_base; 50 - dev->dma_mem->device_base = device_addr; 51 - dev->dma_mem->pfn_base = PFN_DOWN(phys_addr); 52 - dev->dma_mem->size = pages; 53 - dev->dma_mem->flags = flags; 42 + dma_mem->virt_base = mem_base; 43 + dma_mem->device_base = device_addr; 44 + dma_mem->pfn_base = PFN_DOWN(phys_addr); 45 + dma_mem->size = pages; 46 + dma_mem->flags = flags; 47 + spin_lock_init(&dma_mem->spinlock); 48 + 49 + *mem = dma_mem; 54 50 55 51 if (flags & DMA_MEMORY_MAP) 56 52 return DMA_MEMORY_MAP; 57 53 58 54 return DMA_MEMORY_IO; 59 55 60 - free1_out: 61 - kfree(dev->dma_mem); 62 - out: 56 + out: 57 + kfree(dma_mem); 63 58 if (mem_base) 64 59 iounmap(mem_base); 60 + return 0; 61 + } 62 + 63 + static void dma_release_coherent_memory(struct dma_coherent_mem *mem) 64 + { 65 + if (!mem) 66 + return; 67 + iounmap(mem->virt_base); 68 + kfree(mem->bitmap); 69 + kfree(mem); 70 + } 71 + 72 + static int dma_assign_coherent_memory(struct device *dev, 73 + struct dma_coherent_mem *mem) 74 + { 75 + if (dev->dma_mem) 76 + return -EBUSY; 77 + 78 + dev->dma_mem = mem; 79 + /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 80 + 81 + return 0; 82 + } 83 + 84 + int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, 85 + dma_addr_t device_addr, size_t size, int flags) 86 + { 87 + struct dma_coherent_mem *mem; 88 + int ret; 89 + 90 + ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, 91 + &mem); 92 + if (ret == 0) 93 + return 0; 94 + 95 + if (dma_assign_coherent_memory(dev, mem) == 0) 96 + return ret; 97 + 98 + dma_release_coherent_memory(mem); 65 99 return 0; 66 100 } 67 101 EXPORT_SYMBOL(dma_declare_coherent_memory); ··· 109 69 110 70 if (!mem) 111 71 return; 72 + dma_release_coherent_memory(mem); 112 73 dev->dma_mem = NULL; 113 - iounmap(mem->virt_base); 114 - kfree(mem->bitmap); 115 - kfree(mem); 116 74 } 117 75 EXPORT_SYMBOL(dma_release_declared_memory); 118 76 ··· 118 80 dma_addr_t device_addr, size_t size) 119 81 { 120 82 struct dma_coherent_mem *mem = dev->dma_mem; 83 + unsigned long flags; 121 84 int pos, err; 122 85 123 86 size += device_addr & ~PAGE_MASK; ··· 126 87 if (!mem) 127 88 return ERR_PTR(-EINVAL); 128 89 90 + spin_lock_irqsave(&mem->spinlock, flags); 129 91 pos = (device_addr - mem->device_base) >> PAGE_SHIFT; 130 92 err = bitmap_allocate_region(mem->bitmap, pos, get_order(size)); 93 + spin_unlock_irqrestore(&mem->spinlock, flags); 94 + 131 95 if (err != 0) 132 96 return ERR_PTR(err); 133 97 return mem->virt_base + (pos << PAGE_SHIFT); ··· 157 115 { 158 116 struct dma_coherent_mem *mem; 159 117 int order = get_order(size); 118 + unsigned long flags; 160 119 int pageno; 161 120 162 121 if (!dev) ··· 167 124 return 0; 168 125 169 126 *ret = NULL; 127 + spin_lock_irqsave(&mem->spinlock, flags); 170 128 171 129 if (unlikely(size > (mem->size << PAGE_SHIFT))) 172 130 goto err; ··· 182 138 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); 183 139 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 184 140 memset(*ret, 0, size); 141 + spin_unlock_irqrestore(&mem->spinlock, flags); 185 142 186 143 return 1; 187 144 188 145 err: 146 + spin_unlock_irqrestore(&mem->spinlock, flags); 189 147 /* 190 148 * In the case where the allocation can not be satisfied from the 191 149 * per-device area, try to fall back to generic memory if the ··· 217 171 if (mem && vaddr >= mem->virt_base && vaddr < 218 172 (mem->virt_base + (mem->size << PAGE_SHIFT))) { 219 173 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; 174 + unsigned long flags; 220 175 176 + spin_lock_irqsave(&mem->spinlock, flags); 221 177 bitmap_release_region(mem->bitmap, page, order); 178 + spin_unlock_irqrestore(&mem->spinlock, flags); 222 179 return 1; 223 180 } 224 181 return 0; ··· 267 218 return 0; 268 219 } 269 220 EXPORT_SYMBOL(dma_mmap_from_coherent); 221 + 222 + /* 223 + * Support for reserved memory regions defined in device tree 224 + */ 225 + #ifdef CONFIG_OF_RESERVED_MEM 226 + #include <linux/of.h> 227 + #include <linux/of_fdt.h> 228 + #include <linux/of_reserved_mem.h> 229 + 230 + static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) 231 + { 232 + struct dma_coherent_mem *mem = rmem->priv; 233 + 234 + if (!mem && 235 + dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, 236 + DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE, 237 + &mem) != DMA_MEMORY_MAP) { 238 + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", 239 + &rmem->base, (unsigned long)rmem->size / SZ_1M); 240 + return -ENODEV; 241 + } 242 + rmem->priv = mem; 243 + dma_assign_coherent_memory(dev, mem); 244 + return 0; 245 + } 246 + 247 + static void rmem_dma_device_release(struct reserved_mem *rmem, 248 + struct device *dev) 249 + { 250 + dev->dma_mem = NULL; 251 + } 252 + 253 + static const struct reserved_mem_ops rmem_dma_ops = { 254 + .device_init = rmem_dma_device_init, 255 + .device_release = rmem_dma_device_release, 256 + }; 257 + 258 + static int __init rmem_dma_setup(struct reserved_mem *rmem) 259 + { 260 + unsigned long node = rmem->fdt_node; 261 + 262 + if (of_get_flat_dt_prop(node, "reusable", NULL)) 263 + return -EINVAL; 264 + 265 + #ifdef CONFIG_ARM 266 + if (!of_get_flat_dt_prop(node, "no-map", NULL)) { 267 + pr_err("Reserved memory: regions without no-map are not yet supported\n"); 268 + return -EINVAL; 269 + } 270 + #endif 271 + 272 + rmem->ops = &rmem_dma_ops; 273 + pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n", 274 + &rmem->base, (unsigned long)rmem->size / SZ_1M); 275 + return 0; 276 + } 277 + RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); 278 + #endif