Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.12 359 lines 9.5 kB view raw
1/* 2 * Contiguous Memory Allocator for DMA mapping framework 3 * Copyright (c) 2010-2011 by Samsung Electronics. 4 * Written by: 5 * Marek Szyprowski <m.szyprowski@samsung.com> 6 * Michal Nazarewicz <mina86@mina86.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of the 11 * License or (at your optional) any later version of the license. 12 */ 13 14#define pr_fmt(fmt) "cma: " fmt 15 16#ifdef CONFIG_CMA_DEBUG 17#ifndef DEBUG 18# define DEBUG 19#endif 20#endif 21 22#include <asm/page.h> 23#include <asm/dma-contiguous.h> 24 25#include <linux/memblock.h> 26#include <linux/err.h> 27#include <linux/mm.h> 28#include <linux/mutex.h> 29#include <linux/page-isolation.h> 30#include <linux/sizes.h> 31#include <linux/slab.h> 32#include <linux/swap.h> 33#include <linux/mm_types.h> 34#include <linux/dma-contiguous.h> 35 36struct cma { 37 unsigned long base_pfn; 38 unsigned long count; 39 unsigned long *bitmap; 40}; 41 42struct cma *dma_contiguous_default_area; 43 44#ifdef CONFIG_CMA_SIZE_MBYTES 45#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 46#else 47#define CMA_SIZE_MBYTES 0 48#endif 49 50/* 51 * Default global CMA area size can be defined in kernel's .config. 52 * This is usefull mainly for distro maintainers to create a kernel 53 * that works correctly for most supported systems. 54 * The size can be set in bytes or as a percentage of the total memory 55 * in the system. 56 * 57 * Users, who want to set the size of global CMA area for their system 58 * should use cma= kernel parameter. 59 */ 60static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; 61static phys_addr_t size_cmdline = -1; 62 63static int __init early_cma(char *p) 64{ 65 pr_debug("%s(%s)\n", __func__, p); 66 size_cmdline = memparse(p, &p); 67 return 0; 68} 69early_param("cma", early_cma); 70 71#ifdef CONFIG_CMA_SIZE_PERCENTAGE 72 73static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) 74{ 75 struct memblock_region *reg; 76 unsigned long total_pages = 0; 77 78 /* 79 * We cannot use memblock_phys_mem_size() here, because 80 * memblock_analyze() has not been called yet. 81 */ 82 for_each_memblock(memory, reg) 83 total_pages += memblock_region_memory_end_pfn(reg) - 84 memblock_region_memory_base_pfn(reg); 85 86 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; 87} 88 89#else 90 91static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) 92{ 93 return 0; 94} 95 96#endif 97 98/** 99 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling 100 * @limit: End address of the reserved memory (optional, 0 for any). 101 * 102 * This function reserves memory from early allocator. It should be 103 * called by arch specific code once the early allocator (memblock or bootmem) 104 * has been activated and all other subsystems have already allocated/reserved 105 * memory. 106 */ 107void __init dma_contiguous_reserve(phys_addr_t limit) 108{ 109 phys_addr_t selected_size = 0; 110 111 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); 112 113 if (size_cmdline != -1) { 114 selected_size = size_cmdline; 115 } else { 116#ifdef CONFIG_CMA_SIZE_SEL_MBYTES 117 selected_size = size_bytes; 118#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) 119 selected_size = cma_early_percent_memory(); 120#elif defined(CONFIG_CMA_SIZE_SEL_MIN) 121 selected_size = min(size_bytes, cma_early_percent_memory()); 122#elif defined(CONFIG_CMA_SIZE_SEL_MAX) 123 selected_size = max(size_bytes, cma_early_percent_memory()); 124#endif 125 } 126 127 if (selected_size && !dma_contiguous_default_area) { 128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 129 (unsigned long)selected_size / SZ_1M); 130 131 dma_contiguous_reserve_area(selected_size, 0, limit, 132 &dma_contiguous_default_area); 133 } 134}; 135 136static DEFINE_MUTEX(cma_mutex); 137 138static int __init cma_activate_area(struct cma *cma) 139{ 140 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); 141 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 142 unsigned i = cma->count >> pageblock_order; 143 struct zone *zone; 144 145 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 146 147 if (!cma->bitmap) 148 return -ENOMEM; 149 150 WARN_ON_ONCE(!pfn_valid(pfn)); 151 zone = page_zone(pfn_to_page(pfn)); 152 153 do { 154 unsigned j; 155 base_pfn = pfn; 156 for (j = pageblock_nr_pages; j; --j, pfn++) { 157 WARN_ON_ONCE(!pfn_valid(pfn)); 158 if (page_zone(pfn_to_page(pfn)) != zone) 159 return -EINVAL; 160 } 161 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 162 } while (--i); 163 164 return 0; 165} 166 167static struct cma cma_areas[MAX_CMA_AREAS]; 168static unsigned cma_area_count; 169 170static int __init cma_init_reserved_areas(void) 171{ 172 int i; 173 174 for (i = 0; i < cma_area_count; i++) { 175 int ret = cma_activate_area(&cma_areas[i]); 176 if (ret) 177 return ret; 178 } 179 180 return 0; 181} 182core_initcall(cma_init_reserved_areas); 183 184/** 185 * dma_contiguous_reserve_area() - reserve custom contiguous area 186 * @size: Size of the reserved area (in bytes), 187 * @base: Base address of the reserved area optional, use 0 for any 188 * @limit: End address of the reserved memory (optional, 0 for any). 189 * @res_cma: Pointer to store the created cma region. 190 * 191 * This function reserves memory from early allocator. It should be 192 * called by arch specific code once the early allocator (memblock or bootmem) 193 * has been activated and all other subsystems have already allocated/reserved 194 * memory. This function allows to create custom reserved areas for specific 195 * devices. 196 */ 197int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 198 phys_addr_t limit, struct cma **res_cma) 199{ 200 struct cma *cma = &cma_areas[cma_area_count]; 201 phys_addr_t alignment; 202 int ret = 0; 203 204 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 205 (unsigned long)size, (unsigned long)base, 206 (unsigned long)limit); 207 208 /* Sanity checks */ 209 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 210 pr_err("Not enough slots for CMA reserved regions!\n"); 211 return -ENOSPC; 212 } 213 214 if (!size) 215 return -EINVAL; 216 217 /* Sanitise input arguments */ 218 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 219 base = ALIGN(base, alignment); 220 size = ALIGN(size, alignment); 221 limit &= ~(alignment - 1); 222 223 /* Reserve memory */ 224 if (base) { 225 if (memblock_is_region_reserved(base, size) || 226 memblock_reserve(base, size) < 0) { 227 ret = -EBUSY; 228 goto err; 229 } 230 } else { 231 /* 232 * Use __memblock_alloc_base() since 233 * memblock_alloc_base() panic()s. 234 */ 235 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); 236 if (!addr) { 237 ret = -ENOMEM; 238 goto err; 239 } else { 240 base = addr; 241 } 242 } 243 244 /* 245 * Each reserved area must be initialised later, when more kernel 246 * subsystems (like slab allocator) are available. 247 */ 248 cma->base_pfn = PFN_DOWN(base); 249 cma->count = size >> PAGE_SHIFT; 250 *res_cma = cma; 251 cma_area_count++; 252 253 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 254 (unsigned long)base); 255 256 /* Architecture specific contiguous memory fixup. */ 257 dma_contiguous_early_fixup(base, size); 258 return 0; 259err: 260 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 261 return ret; 262} 263 264/** 265 * dma_alloc_from_contiguous() - allocate pages from contiguous area 266 * @dev: Pointer to device for which the allocation is performed. 267 * @count: Requested number of pages. 268 * @align: Requested alignment of pages (in PAGE_SIZE order). 269 * 270 * This function allocates memory buffer for specified device. It uses 271 * device specific contiguous memory area if available or the default 272 * global one. Requires architecture specific get_dev_cma_area() helper 273 * function. 274 */ 275struct page *dma_alloc_from_contiguous(struct device *dev, int count, 276 unsigned int align) 277{ 278 unsigned long mask, pfn, pageno, start = 0; 279 struct cma *cma = dev_get_cma_area(dev); 280 struct page *page = NULL; 281 int ret; 282 283 if (!cma || !cma->count) 284 return NULL; 285 286 if (align > CONFIG_CMA_ALIGNMENT) 287 align = CONFIG_CMA_ALIGNMENT; 288 289 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 290 count, align); 291 292 if (!count) 293 return NULL; 294 295 mask = (1 << align) - 1; 296 297 mutex_lock(&cma_mutex); 298 299 for (;;) { 300 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 301 start, count, mask); 302 if (pageno >= cma->count) 303 break; 304 305 pfn = cma->base_pfn + pageno; 306 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 307 if (ret == 0) { 308 bitmap_set(cma->bitmap, pageno, count); 309 page = pfn_to_page(pfn); 310 break; 311 } else if (ret != -EBUSY) { 312 break; 313 } 314 pr_debug("%s(): memory range at %p is busy, retrying\n", 315 __func__, pfn_to_page(pfn)); 316 /* try again with a bit different memory target */ 317 start = pageno + mask + 1; 318 } 319 320 mutex_unlock(&cma_mutex); 321 pr_debug("%s(): returned %p\n", __func__, page); 322 return page; 323} 324 325/** 326 * dma_release_from_contiguous() - release allocated pages 327 * @dev: Pointer to device for which the pages were allocated. 328 * @pages: Allocated pages. 329 * @count: Number of allocated pages. 330 * 331 * This function releases memory allocated by dma_alloc_from_contiguous(). 332 * It returns false when provided pages do not belong to contiguous area and 333 * true otherwise. 334 */ 335bool dma_release_from_contiguous(struct device *dev, struct page *pages, 336 int count) 337{ 338 struct cma *cma = dev_get_cma_area(dev); 339 unsigned long pfn; 340 341 if (!cma || !pages) 342 return false; 343 344 pr_debug("%s(page %p)\n", __func__, (void *)pages); 345 346 pfn = page_to_pfn(pages); 347 348 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 349 return false; 350 351 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 352 353 mutex_lock(&cma_mutex); 354 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); 355 free_contig_range(pfn, count); 356 mutex_unlock(&cma_mutex); 357 358 return true; 359}