Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.11-rc8 281 lines 8.2 kB view raw
1/* 2 * Contiguous Memory Allocator for DMA mapping framework 3 * Copyright (c) 2010-2011 by Samsung Electronics. 4 * Written by: 5 * Marek Szyprowski <m.szyprowski@samsung.com> 6 * Michal Nazarewicz <mina86@mina86.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of the 11 * License or (at your optional) any later version of the license. 12 */ 13 14#define pr_fmt(fmt) "cma: " fmt 15 16#ifdef CONFIG_CMA_DEBUG 17#ifndef DEBUG 18# define DEBUG 19#endif 20#endif 21 22#include <asm/page.h> 23#include <asm/dma-contiguous.h> 24 25#include <linux/memblock.h> 26#include <linux/err.h> 27#include <linux/sizes.h> 28#include <linux/dma-contiguous.h> 29#include <linux/cma.h> 30 31#ifdef CONFIG_CMA_SIZE_MBYTES 32#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 33#else 34#define CMA_SIZE_MBYTES 0 35#endif 36 37struct cma *dma_contiguous_default_area; 38 39/* 40 * Default global CMA area size can be defined in kernel's .config. 41 * This is useful mainly for distro maintainers to create a kernel 42 * that works correctly for most supported systems. 43 * The size can be set in bytes or as a percentage of the total memory 44 * in the system. 45 * 46 * Users, who want to set the size of global CMA area for their system 47 * should use cma= kernel parameter. 48 */ 49static const phys_addr_t size_bytes = (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M; 50static phys_addr_t size_cmdline = -1; 51static phys_addr_t base_cmdline; 52static phys_addr_t limit_cmdline; 53 54static int __init early_cma(char *p) 55{ 56 pr_debug("%s(%s)\n", __func__, p); 57 size_cmdline = memparse(p, &p); 58 if (*p != '@') 59 return 0; 60 base_cmdline = memparse(p + 1, &p); 61 if (*p != '-') { 62 limit_cmdline = base_cmdline + size_cmdline; 63 return 0; 64 } 65 limit_cmdline = memparse(p + 1, &p); 66 67 return 0; 68} 69early_param("cma", early_cma); 70 71#ifdef CONFIG_CMA_SIZE_PERCENTAGE 72 73static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) 74{ 75 struct memblock_region *reg; 76 unsigned long total_pages = 0; 77 78 /* 79 * We cannot use memblock_phys_mem_size() here, because 80 * memblock_analyze() has not been called yet. 81 */ 82 for_each_memblock(memory, reg) 83 total_pages += memblock_region_memory_end_pfn(reg) - 84 memblock_region_memory_base_pfn(reg); 85 86 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; 87} 88 89#else 90 91static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) 92{ 93 return 0; 94} 95 96#endif 97 98/** 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling 100 * @limit: End address of the reserved memory (optional, 0 for any). 101 * 102 * This function reserves memory from early allocator. It should be 103 * called by arch specific code once the early allocator (memblock or bootmem) 104 * has been activated and all other subsystems have already allocated/reserved 105 * memory. 106 */ 107void __init dma_contiguous_reserve(phys_addr_t limit) 108{ 109 phys_addr_t selected_size = 0; 110 phys_addr_t selected_base = 0; 111 phys_addr_t selected_limit = limit; 112 bool fixed = false; 113 114 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); 115 116 if (size_cmdline != -1) { 117 selected_size = size_cmdline; 118 selected_base = base_cmdline; 119 selected_limit = min_not_zero(limit_cmdline, limit); 120 if (base_cmdline + size_cmdline == limit_cmdline) 121 fixed = true; 122 } else { 123#ifdef CONFIG_CMA_SIZE_SEL_MBYTES 124 selected_size = size_bytes; 125#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) 126 selected_size = cma_early_percent_memory(); 127#elif defined(CONFIG_CMA_SIZE_SEL_MIN) 128 selected_size = min(size_bytes, cma_early_percent_memory()); 129#elif defined(CONFIG_CMA_SIZE_SEL_MAX) 130 selected_size = max(size_bytes, cma_early_percent_memory()); 131#endif 132 } 133 134 if (selected_size && !dma_contiguous_default_area) { 135 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 136 (unsigned long)selected_size / SZ_1M); 137 138 dma_contiguous_reserve_area(selected_size, selected_base, 139 selected_limit, 140 &dma_contiguous_default_area, 141 fixed); 142 } 143} 144 145/** 146 * dma_contiguous_reserve_area() - reserve custom contiguous area 147 * @size: Size of the reserved area (in bytes), 148 * @base: Base address of the reserved area optional, use 0 for any 149 * @limit: End address of the reserved memory (optional, 0 for any). 150 * @res_cma: Pointer to store the created cma region. 151 * @fixed: hint about where to place the reserved area 152 * 153 * This function reserves memory from early allocator. It should be 154 * called by arch specific code once the early allocator (memblock or bootmem) 155 * has been activated and all other subsystems have already allocated/reserved 156 * memory. This function allows to create custom reserved areas for specific 157 * devices. 158 * 159 * If @fixed is true, reserve contiguous area at exactly @base. If false, 160 * reserve in range from @base to @limit. 161 */ 162int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 163 phys_addr_t limit, struct cma **res_cma, 164 bool fixed) 165{ 166 int ret; 167 168 ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed, res_cma); 169 if (ret) 170 return ret; 171 172 /* Architecture specific contiguous memory fixup. */ 173 dma_contiguous_early_fixup(cma_get_base(*res_cma), 174 cma_get_size(*res_cma)); 175 176 return 0; 177} 178 179/** 180 * dma_alloc_from_contiguous() - allocate pages from contiguous area 181 * @dev: Pointer to device for which the allocation is performed. 182 * @count: Requested number of pages. 183 * @align: Requested alignment of pages (in PAGE_SIZE order). 184 * @gfp_mask: GFP flags to use for this allocation. 185 * 186 * This function allocates memory buffer for specified device. It uses 187 * device specific contiguous memory area if available or the default 188 * global one. Requires architecture specific dev_get_cma_area() helper 189 * function. 190 */ 191struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, 192 unsigned int align, gfp_t gfp_mask) 193{ 194 if (align > CONFIG_CMA_ALIGNMENT) 195 align = CONFIG_CMA_ALIGNMENT; 196 197 return cma_alloc(dev_get_cma_area(dev), count, align, gfp_mask); 198} 199 200/** 201 * dma_release_from_contiguous() - release allocated pages 202 * @dev: Pointer to device for which the pages were allocated. 203 * @pages: Allocated pages. 204 * @count: Number of allocated pages. 205 * 206 * This function releases memory allocated by dma_alloc_from_contiguous(). 207 * It returns false when provided pages do not belong to contiguous area and 208 * true otherwise. 209 */ 210bool dma_release_from_contiguous(struct device *dev, struct page *pages, 211 int count) 212{ 213 return cma_release(dev_get_cma_area(dev), pages, count); 214} 215 216/* 217 * Support for reserved memory regions defined in device tree 218 */ 219#ifdef CONFIG_OF_RESERVED_MEM 220#include <linux/of.h> 221#include <linux/of_fdt.h> 222#include <linux/of_reserved_mem.h> 223 224#undef pr_fmt 225#define pr_fmt(fmt) fmt 226 227static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) 228{ 229 dev_set_cma_area(dev, rmem->priv); 230 return 0; 231} 232 233static void rmem_cma_device_release(struct reserved_mem *rmem, 234 struct device *dev) 235{ 236 dev_set_cma_area(dev, NULL); 237} 238 239static const struct reserved_mem_ops rmem_cma_ops = { 240 .device_init = rmem_cma_device_init, 241 .device_release = rmem_cma_device_release, 242}; 243 244static int __init rmem_cma_setup(struct reserved_mem *rmem) 245{ 246 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 247 phys_addr_t mask = align - 1; 248 unsigned long node = rmem->fdt_node; 249 struct cma *cma; 250 int err; 251 252 if (!of_get_flat_dt_prop(node, "reusable", NULL) || 253 of_get_flat_dt_prop(node, "no-map", NULL)) 254 return -EINVAL; 255 256 if ((rmem->base & mask) || (rmem->size & mask)) { 257 pr_err("Reserved memory: incorrect alignment of CMA region\n"); 258 return -EINVAL; 259 } 260 261 err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma); 262 if (err) { 263 pr_err("Reserved memory: unable to setup CMA region\n"); 264 return err; 265 } 266 /* Architecture specific contiguous memory fixup. */ 267 dma_contiguous_early_fixup(rmem->base, rmem->size); 268 269 if (of_get_flat_dt_prop(node, "linux,cma-default", NULL)) 270 dma_contiguous_set_default(cma); 271 272 rmem->ops = &rmem_cma_ops; 273 rmem->priv = cma; 274 275 pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n", 276 &rmem->base, (unsigned long)rmem->size / SZ_1M); 277 278 return 0; 279} 280RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup); 281#endif