Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.16-rc5 409 lines 11 kB view raw
1/* 2 * Contiguous Memory Allocator for DMA mapping framework 3 * Copyright (c) 2010-2011 by Samsung Electronics. 4 * Written by: 5 * Marek Szyprowski <m.szyprowski@samsung.com> 6 * Michal Nazarewicz <mina86@mina86.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License as 10 * published by the Free Software Foundation; either version 2 of the 11 * License or (at your optional) any later version of the license. 12 */ 13 14#define pr_fmt(fmt) "cma: " fmt 15 16#ifdef CONFIG_CMA_DEBUG 17#ifndef DEBUG 18# define DEBUG 19#endif 20#endif 21 22#include <asm/page.h> 23#include <asm/dma-contiguous.h> 24 25#include <linux/memblock.h> 26#include <linux/err.h> 27#include <linux/mm.h> 28#include <linux/mutex.h> 29#include <linux/page-isolation.h> 30#include <linux/sizes.h> 31#include <linux/slab.h> 32#include <linux/swap.h> 33#include <linux/mm_types.h> 34#include <linux/dma-contiguous.h> 35 36struct cma { 37 unsigned long base_pfn; 38 unsigned long count; 39 unsigned long *bitmap; 40 struct mutex lock; 41}; 42 43struct cma *dma_contiguous_default_area; 44 45#ifdef CONFIG_CMA_SIZE_MBYTES 46#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES 47#else 48#define CMA_SIZE_MBYTES 0 49#endif 50 51/* 52 * Default global CMA area size can be defined in kernel's .config. 53 * This is useful mainly for distro maintainers to create a kernel 54 * that works correctly for most supported systems. 55 * The size can be set in bytes or as a percentage of the total memory 56 * in the system. 57 * 58 * Users, who want to set the size of global CMA area for their system 59 * should use cma= kernel parameter. 60 */ 61static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; 62static phys_addr_t size_cmdline = -1; 63static phys_addr_t base_cmdline; 64static phys_addr_t limit_cmdline; 65 66static int __init early_cma(char *p) 67{ 68 pr_debug("%s(%s)\n", __func__, p); 69 size_cmdline = memparse(p, &p); 70 if (*p != '@') 71 return 0; 72 base_cmdline = memparse(p + 1, &p); 73 if (*p != '-') { 74 limit_cmdline = base_cmdline + size_cmdline; 75 return 0; 76 } 77 limit_cmdline = memparse(p + 1, &p); 78 79 return 0; 80} 81early_param("cma", early_cma); 82 83#ifdef CONFIG_CMA_SIZE_PERCENTAGE 84 85static phys_addr_t __init __maybe_unused cma_early_percent_memory(void) 86{ 87 struct memblock_region *reg; 88 unsigned long total_pages = 0; 89 90 /* 91 * We cannot use memblock_phys_mem_size() here, because 92 * memblock_analyze() has not been called yet. 93 */ 94 for_each_memblock(memory, reg) 95 total_pages += memblock_region_memory_end_pfn(reg) - 96 memblock_region_memory_base_pfn(reg); 97 98 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT; 99} 100 101#else 102 103static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) 104{ 105 return 0; 106} 107 108#endif 109 110/** 111 * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling 112 * @limit: End address of the reserved memory (optional, 0 for any). 113 * 114 * This function reserves memory from early allocator. It should be 115 * called by arch specific code once the early allocator (memblock or bootmem) 116 * has been activated and all other subsystems have already allocated/reserved 117 * memory. 118 */ 119void __init dma_contiguous_reserve(phys_addr_t limit) 120{ 121 phys_addr_t selected_size = 0; 122 phys_addr_t selected_base = 0; 123 phys_addr_t selected_limit = limit; 124 bool fixed = false; 125 126 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); 127 128 if (size_cmdline != -1) { 129 selected_size = size_cmdline; 130 selected_base = base_cmdline; 131 selected_limit = min_not_zero(limit_cmdline, limit); 132 if (base_cmdline + size_cmdline == limit_cmdline) 133 fixed = true; 134 } else { 135#ifdef CONFIG_CMA_SIZE_SEL_MBYTES 136 selected_size = size_bytes; 137#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE) 138 selected_size = cma_early_percent_memory(); 139#elif defined(CONFIG_CMA_SIZE_SEL_MIN) 140 selected_size = min(size_bytes, cma_early_percent_memory()); 141#elif defined(CONFIG_CMA_SIZE_SEL_MAX) 142 selected_size = max(size_bytes, cma_early_percent_memory()); 143#endif 144 } 145 146 if (selected_size && !dma_contiguous_default_area) { 147 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 148 (unsigned long)selected_size / SZ_1M); 149 150 dma_contiguous_reserve_area(selected_size, selected_base, 151 selected_limit, 152 &dma_contiguous_default_area, 153 fixed); 154 } 155} 156 157static DEFINE_MUTEX(cma_mutex); 158 159static int __init cma_activate_area(struct cma *cma) 160{ 161 int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long); 162 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; 163 unsigned i = cma->count >> pageblock_order; 164 struct zone *zone; 165 166 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 167 168 if (!cma->bitmap) 169 return -ENOMEM; 170 171 WARN_ON_ONCE(!pfn_valid(pfn)); 172 zone = page_zone(pfn_to_page(pfn)); 173 174 do { 175 unsigned j; 176 base_pfn = pfn; 177 for (j = pageblock_nr_pages; j; --j, pfn++) { 178 WARN_ON_ONCE(!pfn_valid(pfn)); 179 /* 180 * alloc_contig_range requires the pfn range 181 * specified to be in the same zone. Make this 182 * simple by forcing the entire CMA resv range 183 * to be in the same zone. 184 */ 185 if (page_zone(pfn_to_page(pfn)) != zone) 186 goto err; 187 } 188 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 189 } while (--i); 190 191 mutex_init(&cma->lock); 192 return 0; 193 194err: 195 kfree(cma->bitmap); 196 return -EINVAL; 197} 198 199static struct cma cma_areas[MAX_CMA_AREAS]; 200static unsigned cma_area_count; 201 202static int __init cma_init_reserved_areas(void) 203{ 204 int i; 205 206 for (i = 0; i < cma_area_count; i++) { 207 int ret = cma_activate_area(&cma_areas[i]); 208 if (ret) 209 return ret; 210 } 211 212 return 0; 213} 214core_initcall(cma_init_reserved_areas); 215 216/** 217 * dma_contiguous_reserve_area() - reserve custom contiguous area 218 * @size: Size of the reserved area (in bytes), 219 * @base: Base address of the reserved area optional, use 0 for any 220 * @limit: End address of the reserved memory (optional, 0 for any). 221 * @res_cma: Pointer to store the created cma region. 222 * @fixed: hint about where to place the reserved area 223 * 224 * This function reserves memory from early allocator. It should be 225 * called by arch specific code once the early allocator (memblock or bootmem) 226 * has been activated and all other subsystems have already allocated/reserved 227 * memory. This function allows to create custom reserved areas for specific 228 * devices. 229 * 230 * If @fixed is true, reserve contiguous area at exactly @base. If false, 231 * reserve in range from @base to @limit. 232 */ 233int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 234 phys_addr_t limit, struct cma **res_cma, 235 bool fixed) 236{ 237 struct cma *cma = &cma_areas[cma_area_count]; 238 phys_addr_t alignment; 239 int ret = 0; 240 241 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 242 (unsigned long)size, (unsigned long)base, 243 (unsigned long)limit); 244 245 /* Sanity checks */ 246 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 247 pr_err("Not enough slots for CMA reserved regions!\n"); 248 return -ENOSPC; 249 } 250 251 if (!size) 252 return -EINVAL; 253 254 /* Sanitise input arguments */ 255 alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); 256 base = ALIGN(base, alignment); 257 size = ALIGN(size, alignment); 258 limit &= ~(alignment - 1); 259 260 /* Reserve memory */ 261 if (base && fixed) { 262 if (memblock_is_region_reserved(base, size) || 263 memblock_reserve(base, size) < 0) { 264 ret = -EBUSY; 265 goto err; 266 } 267 } else { 268 phys_addr_t addr = memblock_alloc_range(size, alignment, base, 269 limit); 270 if (!addr) { 271 ret = -ENOMEM; 272 goto err; 273 } else { 274 base = addr; 275 } 276 } 277 278 /* 279 * Each reserved area must be initialised later, when more kernel 280 * subsystems (like slab allocator) are available. 281 */ 282 cma->base_pfn = PFN_DOWN(base); 283 cma->count = size >> PAGE_SHIFT; 284 *res_cma = cma; 285 cma_area_count++; 286 287 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 288 (unsigned long)base); 289 290 /* Architecture specific contiguous memory fixup. */ 291 dma_contiguous_early_fixup(base, size); 292 return 0; 293err: 294 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); 295 return ret; 296} 297 298static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) 299{ 300 mutex_lock(&cma->lock); 301 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); 302 mutex_unlock(&cma->lock); 303} 304 305/** 306 * dma_alloc_from_contiguous() - allocate pages from contiguous area 307 * @dev: Pointer to device for which the allocation is performed. 308 * @count: Requested number of pages. 309 * @align: Requested alignment of pages (in PAGE_SIZE order). 310 * 311 * This function allocates memory buffer for specified device. It uses 312 * device specific contiguous memory area if available or the default 313 * global one. Requires architecture specific dev_get_cma_area() helper 314 * function. 315 */ 316struct page *dma_alloc_from_contiguous(struct device *dev, int count, 317 unsigned int align) 318{ 319 unsigned long mask, pfn, pageno, start = 0; 320 struct cma *cma = dev_get_cma_area(dev); 321 struct page *page = NULL; 322 int ret; 323 324 if (!cma || !cma->count) 325 return NULL; 326 327 if (align > CONFIG_CMA_ALIGNMENT) 328 align = CONFIG_CMA_ALIGNMENT; 329 330 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma, 331 count, align); 332 333 if (!count) 334 return NULL; 335 336 mask = (1 << align) - 1; 337 338 339 for (;;) { 340 mutex_lock(&cma->lock); 341 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 342 start, count, mask); 343 if (pageno >= cma->count) { 344 mutex_unlock(&cma->lock); 345 break; 346 } 347 bitmap_set(cma->bitmap, pageno, count); 348 /* 349 * It's safe to drop the lock here. We've marked this region for 350 * our exclusive use. If the migration fails we will take the 351 * lock again and unmark it. 352 */ 353 mutex_unlock(&cma->lock); 354 355 pfn = cma->base_pfn + pageno; 356 mutex_lock(&cma_mutex); 357 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 358 mutex_unlock(&cma_mutex); 359 if (ret == 0) { 360 page = pfn_to_page(pfn); 361 break; 362 } else if (ret != -EBUSY) { 363 clear_cma_bitmap(cma, pfn, count); 364 break; 365 } 366 clear_cma_bitmap(cma, pfn, count); 367 pr_debug("%s(): memory range at %p is busy, retrying\n", 368 __func__, pfn_to_page(pfn)); 369 /* try again with a bit different memory target */ 370 start = pageno + mask + 1; 371 } 372 373 pr_debug("%s(): returned %p\n", __func__, page); 374 return page; 375} 376 377/** 378 * dma_release_from_contiguous() - release allocated pages 379 * @dev: Pointer to device for which the pages were allocated. 380 * @pages: Allocated pages. 381 * @count: Number of allocated pages. 382 * 383 * This function releases memory allocated by dma_alloc_from_contiguous(). 384 * It returns false when provided pages do not belong to contiguous area and 385 * true otherwise. 386 */ 387bool dma_release_from_contiguous(struct device *dev, struct page *pages, 388 int count) 389{ 390 struct cma *cma = dev_get_cma_area(dev); 391 unsigned long pfn; 392 393 if (!cma || !pages) 394 return false; 395 396 pr_debug("%s(page %p)\n", __func__, (void *)pages); 397 398 pfn = page_to_pfn(pages); 399 400 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) 401 return false; 402 403 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 404 405 free_contig_range(pfn, count); 406 clear_cma_bitmap(cma, pfn, count); 407 408 return true; 409}