[ARM] 3209/1: Configurable DMA-consistent memory region

Patch from Kevin Hilman

This patch increase available DMA-consistent memory allocated by dma_coherent_alloc(). The default remains at 2M (defined in asm/memory.h) and each platform has the ability to override in asm/arch-foo/memory.h.

Signed-off-by: Kevin Hilman <kevin@hilman.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

authored by Kevin Hilman and committed by Russell King 37134cd5 a3e49436

+50 -12
+41 -12
arch/arm/mm/consistent.c
··· 20 21 #include <asm/cacheflush.h> 22 #include <asm/tlbflush.h> 23 24 - #define CONSISTENT_BASE (0xffc00000) 25 #define CONSISTENT_END (0xffe00000) 26 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 27 28 /* 29 - * This is the page table (2MB) covering uncached, DMA consistent allocations 30 */ 31 - static pte_t *consistent_pte; 32 static DEFINE_SPINLOCK(consistent_lock); 33 34 /* ··· 152 unsigned long order; 153 u64 mask = ISA_DMA_THRESHOLD, limit; 154 155 - if (!consistent_pte) { 156 printk(KERN_ERR "%s: not initialised\n", __func__); 157 dump_stack(); 158 return NULL; ··· 215 c = vm_region_alloc(&consistent_head, size, 216 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 217 if (c) { 218 - pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start); 219 struct page *end = page + (1 << order); 220 221 c->vm_pages = page; 222 223 /* ··· 239 set_pte(pte, mk_pte(page, prot)); 240 page++; 241 pte++; 242 } while (size -= PAGE_SIZE); 243 244 /* ··· 345 struct vm_region *c; 346 unsigned long flags, addr; 347 pte_t *ptep; 348 349 WARN_ON(irqs_disabled()); 350 ··· 367 size = c->vm_end - c->vm_start; 368 } 369 370 - ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start); 371 addr = c->vm_start; 372 do { 373 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); ··· 377 378 ptep++; 379 addr += PAGE_SIZE; 380 381 if (!pte_none(pte) && pte_present(pte)) { 382 pfn = pte_pfn(pte); ··· 428 pgd_t *pgd; 429 pmd_t *pmd; 430 pte_t *pte; 431 - int ret = 0; 432 433 do { 434 - pgd = pgd_offset(&init_mm, CONSISTENT_BASE); 435 - pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE); 436 if (!pmd) { 437 printk(KERN_ERR "%s: no pmd tables\n", __func__); 438 ret = -ENOMEM; ··· 441 } 442 WARN_ON(!pmd_none(*pmd)); 443 444 - pte = pte_alloc_kernel(pmd, CONSISTENT_BASE); 445 if (!pte) { 446 printk(KERN_ERR "%s: no pte tables\n", __func__); 447 ret = -ENOMEM; 448 break; 449 } 450 451 - consistent_pte = pte; 452 - } while (0); 453 454 return ret; 455 }
··· 20 21 #include <asm/cacheflush.h> 22 #include <asm/tlbflush.h> 23 + #include <asm/sizes.h> 24 25 + /* Sanity check size */ 26 + #if (CONSISTENT_DMA_SIZE % SZ_2M) 27 + #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" 28 + #endif 29 + 30 #define CONSISTENT_END (0xffe00000) 31 + #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) 32 + 33 #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 34 + #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 35 + #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 36 + 37 38 /* 39 + * These are the page tables (2MB each) covering uncached, DMA consistent allocations 40 */ 41 + static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; 42 static DEFINE_SPINLOCK(consistent_lock); 43 44 /* ··· 142 unsigned long order; 143 u64 mask = ISA_DMA_THRESHOLD, limit; 144 145 + if (!consistent_pte[0]) { 146 printk(KERN_ERR "%s: not initialised\n", __func__); 147 dump_stack(); 148 return NULL; ··· 205 c = vm_region_alloc(&consistent_head, size, 206 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 207 if (c) { 208 + pte_t *pte; 209 struct page *end = page + (1 << order); 210 + int idx = CONSISTENT_PTE_INDEX(c->vm_start); 211 + u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 212 213 + pte = consistent_pte[idx] + off; 214 c->vm_pages = page; 215 216 /* ··· 226 set_pte(pte, mk_pte(page, prot)); 227 page++; 228 pte++; 229 + off++; 230 + if (off >= PTRS_PER_PTE) { 231 + off = 0; 232 + pte = consistent_pte[++idx]; 233 + } 234 } while (size -= PAGE_SIZE); 235 236 /* ··· 327 struct vm_region *c; 328 unsigned long flags, addr; 329 pte_t *ptep; 330 + int idx; 331 + u32 off; 332 333 WARN_ON(irqs_disabled()); 334 ··· 347 size = c->vm_end - c->vm_start; 348 } 349 350 + idx = CONSISTENT_PTE_INDEX(c->vm_start); 351 + off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 352 + ptep = consistent_pte[idx] + off; 353 addr = c->vm_start; 354 do { 355 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); ··· 355 356 ptep++; 357 addr += PAGE_SIZE; 358 + off++; 359 + if (off >= PTRS_PER_PTE) { 360 + off = 0; 361 + ptep = consistent_pte[++idx]; 362 + } 363 364 if (!pte_none(pte) && pte_present(pte)) { 365 pfn = pte_pfn(pte); ··· 401 pgd_t *pgd; 402 pmd_t *pmd; 403 pte_t *pte; 404 + int ret = 0, i = 0; 405 + u32 base = CONSISTENT_BASE; 406 407 do { 408 + pgd = pgd_offset(&init_mm, base); 409 + pmd = pmd_alloc(&init_mm, pgd, base); 410 if (!pmd) { 411 printk(KERN_ERR "%s: no pmd tables\n", __func__); 412 ret = -ENOMEM; ··· 413 } 414 WARN_ON(!pmd_none(*pmd)); 415 416 + pte = pte_alloc_kernel(pmd, base); 417 if (!pte) { 418 printk(KERN_ERR "%s: no pte tables\n", __func__); 419 ret = -ENOMEM; 420 break; 421 } 422 423 + consistent_pte[i++] = pte; 424 + base += (1 << PGDIR_SHIFT); 425 + } while (base < CONSISTENT_END); 426 427 return ret; 428 }
+9
include/asm-arm/memory.h
··· 25 #include <linux/config.h> 26 #include <linux/compiler.h> 27 #include <asm/arch/memory.h> 28 29 #ifndef TASK_SIZE 30 /* ··· 46 */ 47 #ifndef PAGE_OFFSET 48 #define PAGE_OFFSET UL(0xc0000000) 49 #endif 50 51 /*
··· 25 #include <linux/config.h> 26 #include <linux/compiler.h> 27 #include <asm/arch/memory.h> 28 + #include <asm/sizes.h> 29 30 #ifndef TASK_SIZE 31 /* ··· 45 */ 46 #ifndef PAGE_OFFSET 47 #define PAGE_OFFSET UL(0xc0000000) 48 + #endif 49 + 50 + /* 51 + * Size of DMA-consistent memory region. Must be multiple of 2M, 52 + * between 2MB and 14MB inclusive. 53 + */ 54 + #ifndef CONSISTENT_DMA_SIZE 55 + #define CONSISTENT_DMA_SIZE SZ_2M 56 #endif 57 58 /*