at v2.6.21 507 lines 12 kB view raw
1/* 2 * linux/arch/arm/mm/consistent.c 3 * 4 * Copyright (C) 2000-2004 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * DMA uncached mapping support. 11 */ 12#include <linux/module.h> 13#include <linux/mm.h> 14#include <linux/slab.h> 15#include <linux/errno.h> 16#include <linux/list.h> 17#include <linux/init.h> 18#include <linux/device.h> 19#include <linux/dma-mapping.h> 20 21#include <asm/memory.h> 22#include <asm/cacheflush.h> 23#include <asm/tlbflush.h> 24#include <asm/sizes.h> 25 26/* Sanity check size */ 27#if (CONSISTENT_DMA_SIZE % SZ_2M) 28#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" 29#endif 30 31#define CONSISTENT_END (0xffe00000) 32#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) 33 34#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) 35#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 36#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 37 38 39/* 40 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 41 */ 42static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; 43static DEFINE_SPINLOCK(consistent_lock); 44 45/* 46 * VM region handling support. 47 * 48 * This should become something generic, handling VM region allocations for 49 * vmalloc and similar (ioremap, module space, etc). 50 * 51 * I envisage vmalloc()'s supporting vm_struct becoming: 52 * 53 * struct vm_struct { 54 * struct vm_region region; 55 * unsigned long flags; 56 * struct page **pages; 57 * unsigned int nr_pages; 58 * unsigned long phys_addr; 59 * }; 60 * 61 * get_vm_area() would then call vm_region_alloc with an appropriate 62 * struct vm_region head (eg): 63 * 64 * struct vm_region vmalloc_head = { 65 * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), 66 * .vm_start = VMALLOC_START, 67 * .vm_end = VMALLOC_END, 68 * }; 69 * 70 * However, vmalloc_head.vm_start is variable (typically, it is dependent on 71 * the amount of RAM found at boot time.) I would imagine that get_vm_area() 72 * would have to initialise this each time prior to calling vm_region_alloc(). 73 */ 74struct vm_region { 75 struct list_head vm_list; 76 unsigned long vm_start; 77 unsigned long vm_end; 78 struct page *vm_pages; 79 int vm_active; 80}; 81 82static struct vm_region consistent_head = { 83 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), 84 .vm_start = CONSISTENT_BASE, 85 .vm_end = CONSISTENT_END, 86}; 87 88static struct vm_region * 89vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp) 90{ 91 unsigned long addr = head->vm_start, end = head->vm_end - size; 92 unsigned long flags; 93 struct vm_region *c, *new; 94 95 new = kmalloc(sizeof(struct vm_region), gfp); 96 if (!new) 97 goto out; 98 99 spin_lock_irqsave(&consistent_lock, flags); 100 101 list_for_each_entry(c, &head->vm_list, vm_list) { 102 if ((addr + size) < addr) 103 goto nospc; 104 if ((addr + size) <= c->vm_start) 105 goto found; 106 addr = c->vm_end; 107 if (addr > end) 108 goto nospc; 109 } 110 111 found: 112 /* 113 * Insert this entry _before_ the one we found. 114 */ 115 list_add_tail(&new->vm_list, &c->vm_list); 116 new->vm_start = addr; 117 new->vm_end = addr + size; 118 new->vm_active = 1; 119 120 spin_unlock_irqrestore(&consistent_lock, flags); 121 return new; 122 123 nospc: 124 spin_unlock_irqrestore(&consistent_lock, flags); 125 kfree(new); 126 out: 127 return NULL; 128} 129 130static struct vm_region *vm_region_find(struct vm_region *head, unsigned long addr) 131{ 132 struct vm_region *c; 133 134 list_for_each_entry(c, &head->vm_list, vm_list) { 135 if (c->vm_active && c->vm_start == addr) 136 goto out; 137 } 138 c = NULL; 139 out: 140 return c; 141} 142 143#ifdef CONFIG_HUGETLB_PAGE 144#error ARM Coherent DMA allocator does not (yet) support huge TLB 145#endif 146 147static void * 148__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 149 pgprot_t prot) 150{ 151 struct page *page; 152 struct vm_region *c; 153 unsigned long order; 154 u64 mask = ISA_DMA_THRESHOLD, limit; 155 156 if (!consistent_pte[0]) { 157 printk(KERN_ERR "%s: not initialised\n", __func__); 158 dump_stack(); 159 return NULL; 160 } 161 162 if (dev) { 163 mask = dev->coherent_dma_mask; 164 165 /* 166 * Sanity check the DMA mask - it must be non-zero, and 167 * must be able to be satisfied by a DMA allocation. 168 */ 169 if (mask == 0) { 170 dev_warn(dev, "coherent DMA mask is unset\n"); 171 goto no_page; 172 } 173 174 if ((~mask) & ISA_DMA_THRESHOLD) { 175 dev_warn(dev, "coherent DMA mask %#llx is smaller " 176 "than system GFP_DMA mask %#llx\n", 177 mask, (unsigned long long)ISA_DMA_THRESHOLD); 178 goto no_page; 179 } 180 } 181 182 /* 183 * Sanity check the allocation size. 184 */ 185 size = PAGE_ALIGN(size); 186 limit = (mask + 1) & ~mask; 187 if ((limit && size >= limit) || 188 size >= (CONSISTENT_END - CONSISTENT_BASE)) { 189 printk(KERN_WARNING "coherent allocation too big " 190 "(requested %#x mask %#llx)\n", size, mask); 191 goto no_page; 192 } 193 194 order = get_order(size); 195 196 if (mask != 0xffffffff) 197 gfp |= GFP_DMA; 198 199 page = alloc_pages(gfp, order); 200 if (!page) 201 goto no_page; 202 203 /* 204 * Invalidate any data that might be lurking in the 205 * kernel direct-mapped region for device DMA. 206 */ 207 { 208 void *ptr = page_address(page); 209 memset(ptr, 0, size); 210 dmac_flush_range(ptr, ptr + size); 211 outer_flush_range(__pa(ptr), __pa(ptr) + size); 212 } 213 214 /* 215 * Allocate a virtual address in the consistent mapping region. 216 */ 217 c = vm_region_alloc(&consistent_head, size, 218 gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); 219 if (c) { 220 pte_t *pte; 221 struct page *end = page + (1 << order); 222 int idx = CONSISTENT_PTE_INDEX(c->vm_start); 223 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 224 225 pte = consistent_pte[idx] + off; 226 c->vm_pages = page; 227 228 split_page(page, order); 229 230 /* 231 * Set the "dma handle" 232 */ 233 *handle = page_to_dma(dev, page); 234 235 do { 236 BUG_ON(!pte_none(*pte)); 237 238 /* 239 * x86 does not mark the pages reserved... 240 */ 241 SetPageReserved(page); 242 set_pte_ext(pte, mk_pte(page, prot), 0); 243 page++; 244 pte++; 245 off++; 246 if (off >= PTRS_PER_PTE) { 247 off = 0; 248 pte = consistent_pte[++idx]; 249 } 250 } while (size -= PAGE_SIZE); 251 252 /* 253 * Free the otherwise unused pages. 254 */ 255 while (page < end) { 256 __free_page(page); 257 page++; 258 } 259 260 return (void *)c->vm_start; 261 } 262 263 if (page) 264 __free_pages(page, order); 265 no_page: 266 *handle = ~0; 267 return NULL; 268} 269 270/* 271 * Allocate DMA-coherent memory space and return both the kernel remapped 272 * virtual and bus address for that space. 273 */ 274void * 275dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 276{ 277 if (arch_is_coherent()) { 278 void *virt; 279 280 virt = kmalloc(size, gfp); 281 if (!virt) 282 return NULL; 283 *handle = virt_to_dma(dev, virt); 284 285 return virt; 286 } 287 288 return __dma_alloc(dev, size, handle, gfp, 289 pgprot_noncached(pgprot_kernel)); 290} 291EXPORT_SYMBOL(dma_alloc_coherent); 292 293/* 294 * Allocate a writecombining region, in much the same way as 295 * dma_alloc_coherent above. 296 */ 297void * 298dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 299{ 300 return __dma_alloc(dev, size, handle, gfp, 301 pgprot_writecombine(pgprot_kernel)); 302} 303EXPORT_SYMBOL(dma_alloc_writecombine); 304 305static int dma_mmap(struct device *dev, struct vm_area_struct *vma, 306 void *cpu_addr, dma_addr_t dma_addr, size_t size) 307{ 308 unsigned long flags, user_size, kern_size; 309 struct vm_region *c; 310 int ret = -ENXIO; 311 312 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 313 314 spin_lock_irqsave(&consistent_lock, flags); 315 c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); 316 spin_unlock_irqrestore(&consistent_lock, flags); 317 318 if (c) { 319 unsigned long off = vma->vm_pgoff; 320 321 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; 322 323 if (off < kern_size && 324 user_size <= (kern_size - off)) { 325 vma->vm_flags |= VM_RESERVED; 326 ret = remap_pfn_range(vma, vma->vm_start, 327 page_to_pfn(c->vm_pages) + off, 328 user_size << PAGE_SHIFT, 329 vma->vm_page_prot); 330 } 331 } 332 333 return ret; 334} 335 336int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, 337 void *cpu_addr, dma_addr_t dma_addr, size_t size) 338{ 339 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 340 return dma_mmap(dev, vma, cpu_addr, dma_addr, size); 341} 342EXPORT_SYMBOL(dma_mmap_coherent); 343 344int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, 345 void *cpu_addr, dma_addr_t dma_addr, size_t size) 346{ 347 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); 348 return dma_mmap(dev, vma, cpu_addr, dma_addr, size); 349} 350EXPORT_SYMBOL(dma_mmap_writecombine); 351 352/* 353 * free a page as defined by the above mapping. 354 * Must not be called with IRQs disabled. 355 */ 356void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 357{ 358 struct vm_region *c; 359 unsigned long flags, addr; 360 pte_t *ptep; 361 int idx; 362 u32 off; 363 364 WARN_ON(irqs_disabled()); 365 366 if (arch_is_coherent()) { 367 kfree(cpu_addr); 368 return; 369 } 370 371 size = PAGE_ALIGN(size); 372 373 spin_lock_irqsave(&consistent_lock, flags); 374 c = vm_region_find(&consistent_head, (unsigned long)cpu_addr); 375 if (!c) 376 goto no_area; 377 378 c->vm_active = 0; 379 spin_unlock_irqrestore(&consistent_lock, flags); 380 381 if ((c->vm_end - c->vm_start) != size) { 382 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", 383 __func__, c->vm_end - c->vm_start, size); 384 dump_stack(); 385 size = c->vm_end - c->vm_start; 386 } 387 388 idx = CONSISTENT_PTE_INDEX(c->vm_start); 389 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 390 ptep = consistent_pte[idx] + off; 391 addr = c->vm_start; 392 do { 393 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); 394 unsigned long pfn; 395 396 ptep++; 397 addr += PAGE_SIZE; 398 off++; 399 if (off >= PTRS_PER_PTE) { 400 off = 0; 401 ptep = consistent_pte[++idx]; 402 } 403 404 if (!pte_none(pte) && pte_present(pte)) { 405 pfn = pte_pfn(pte); 406 407 if (pfn_valid(pfn)) { 408 struct page *page = pfn_to_page(pfn); 409 410 /* 411 * x86 does not mark the pages reserved... 412 */ 413 ClearPageReserved(page); 414 415 __free_page(page); 416 continue; 417 } 418 } 419 420 printk(KERN_CRIT "%s: bad page in kernel page table\n", 421 __func__); 422 } while (size -= PAGE_SIZE); 423 424 flush_tlb_kernel_range(c->vm_start, c->vm_end); 425 426 spin_lock_irqsave(&consistent_lock, flags); 427 list_del(&c->vm_list); 428 spin_unlock_irqrestore(&consistent_lock, flags); 429 430 kfree(c); 431 return; 432 433 no_area: 434 spin_unlock_irqrestore(&consistent_lock, flags); 435 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", 436 __func__, cpu_addr); 437 dump_stack(); 438} 439EXPORT_SYMBOL(dma_free_coherent); 440 441/* 442 * Initialise the consistent memory allocation. 443 */ 444static int __init consistent_init(void) 445{ 446 pgd_t *pgd; 447 pmd_t *pmd; 448 pte_t *pte; 449 int ret = 0, i = 0; 450 u32 base = CONSISTENT_BASE; 451 452 do { 453 pgd = pgd_offset(&init_mm, base); 454 pmd = pmd_alloc(&init_mm, pgd, base); 455 if (!pmd) { 456 printk(KERN_ERR "%s: no pmd tables\n", __func__); 457 ret = -ENOMEM; 458 break; 459 } 460 WARN_ON(!pmd_none(*pmd)); 461 462 pte = pte_alloc_kernel(pmd, base); 463 if (!pte) { 464 printk(KERN_ERR "%s: no pte tables\n", __func__); 465 ret = -ENOMEM; 466 break; 467 } 468 469 consistent_pte[i++] = pte; 470 base += (1 << PGDIR_SHIFT); 471 } while (base < CONSISTENT_END); 472 473 return ret; 474} 475 476core_initcall(consistent_init); 477 478/* 479 * Make an area consistent for devices. 480 * Note: Drivers should NOT use this function directly, as it will break 481 * platforms with CONFIG_DMABOUNCE. 482 * Use the driver DMA support - see dma-mapping.h (dma_sync_*) 483 */ 484void consistent_sync(const void *start, size_t size, int direction) 485{ 486 const void *end = start + size; 487 488 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1)); 489 490 switch (direction) { 491 case DMA_FROM_DEVICE: /* invalidate only */ 492 dmac_inv_range(start, end); 493 outer_inv_range(__pa(start), __pa(end)); 494 break; 495 case DMA_TO_DEVICE: /* writeback only */ 496 dmac_clean_range(start, end); 497 outer_clean_range(__pa(start), __pa(end)); 498 break; 499 case DMA_BIDIRECTIONAL: /* writeback and invalidate */ 500 dmac_flush_range(start, end); 501 outer_flush_range(__pa(start), __pa(end)); 502 break; 503 default: 504 BUG(); 505 } 506} 507EXPORT_SYMBOL(consistent_sync);