Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.8-rc3 952 lines 27 kB view raw
1/* 2 * Dynamic DMA mapping support. 3 * 4 * This implementation is a fallback for platforms that do not support 5 * I/O TLBs (aka DMA address translation hardware). 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 8 * Copyright (C) 2000, 2003 Hewlett-Packard Co 9 * David Mosberger-Tang <davidm@hpl.hp.com> 10 * 11 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 12 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 13 * unnecessary i-cache flushing. 14 * 04/07/.. ak Better overflow handling. Assorted fixes. 15 * 05/09/10 linville Add support for syncing ranges, support syncing for 16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 17 * 08/12/11 beckyb Add highmem support 18 */ 19 20#include <linux/cache.h> 21#include <linux/dma-mapping.h> 22#include <linux/mm.h> 23#include <linux/export.h> 24#include <linux/spinlock.h> 25#include <linux/string.h> 26#include <linux/swiotlb.h> 27#include <linux/pfn.h> 28#include <linux/types.h> 29#include <linux/ctype.h> 30#include <linux/highmem.h> 31#include <linux/gfp.h> 32 33#include <asm/io.h> 34#include <asm/dma.h> 35#include <asm/scatterlist.h> 36 37#include <linux/init.h> 38#include <linux/bootmem.h> 39#include <linux/iommu-helper.h> 40 41#define OFFSET(val,align) ((unsigned long) \ 42 ( (val) & ( (align) - 1))) 43 44#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 45 46/* 47 * Minimum IO TLB size to bother booting with. Systems with mainly 48 * 64bit capable cards will only lightly use the swiotlb. If we can't 49 * allocate a contiguous 1MB, we're probably in trouble anyway. 50 */ 51#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 52 53int swiotlb_force; 54 55/* 56 * Used to do a quick range check in swiotlb_tbl_unmap_single and 57 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 58 * API. 59 */ 60static phys_addr_t io_tlb_start, io_tlb_end; 61 62/* 63 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and 64 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. 65 */ 66static unsigned long io_tlb_nslabs; 67 68/* 69 * When the IOMMU overflows we return a fallback buffer. This sets the size. 70 */ 71static unsigned long io_tlb_overflow = 32*1024; 72 73static phys_addr_t io_tlb_overflow_buffer; 74 75/* 76 * This is a free list describing the number of free entries available from 77 * each index 78 */ 79static unsigned int *io_tlb_list; 80static unsigned int io_tlb_index; 81 82/* 83 * We need to save away the original address corresponding to a mapped entry 84 * for the sync operations. 85 */ 86static phys_addr_t *io_tlb_orig_addr; 87 88/* 89 * Protect the above data structures in the map and unmap calls 90 */ 91static DEFINE_SPINLOCK(io_tlb_lock); 92 93static int late_alloc; 94 95static int __init 96setup_io_tlb_npages(char *str) 97{ 98 if (isdigit(*str)) { 99 io_tlb_nslabs = simple_strtoul(str, &str, 0); 100 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 101 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 102 } 103 if (*str == ',') 104 ++str; 105 if (!strcmp(str, "force")) 106 swiotlb_force = 1; 107 108 return 1; 109} 110__setup("swiotlb=", setup_io_tlb_npages); 111/* make io_tlb_overflow tunable too? */ 112 113unsigned long swiotlb_nr_tbl(void) 114{ 115 return io_tlb_nslabs; 116} 117EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 118/* Note that this doesn't work with highmem page */ 119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 120 volatile void *address) 121{ 122 return phys_to_dma(hwdev, virt_to_phys(address)); 123} 124 125void swiotlb_print_info(void) 126{ 127 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; 128 unsigned char *vstart, *vend; 129 130 vstart = phys_to_virt(io_tlb_start); 131 vend = phys_to_virt(io_tlb_end); 132 133 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", 134 (unsigned long long)io_tlb_start, 135 (unsigned long long)io_tlb_end, 136 bytes >> 20, vstart, vend - 1); 137} 138 139void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 140{ 141 void *v_overflow_buffer; 142 unsigned long i, bytes; 143 144 bytes = nslabs << IO_TLB_SHIFT; 145 146 io_tlb_nslabs = nslabs; 147 io_tlb_start = __pa(tlb); 148 io_tlb_end = io_tlb_start + bytes; 149 150 /* 151 * Get the overflow emergency buffer 152 */ 153 v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); 154 if (!v_overflow_buffer) 155 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 156 157 io_tlb_overflow_buffer = __pa(v_overflow_buffer); 158 159 /* 160 * Allocate and initialize the free list array. This array is used 161 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 162 * between io_tlb_start and io_tlb_end. 163 */ 164 io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 165 for (i = 0; i < io_tlb_nslabs; i++) 166 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 167 io_tlb_index = 0; 168 io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 169 170 if (verbose) 171 swiotlb_print_info(); 172} 173 174/* 175 * Statically reserve bounce buffer space and initialize bounce buffer data 176 * structures for the software IO TLB used to implement the DMA API. 177 */ 178static void __init 179swiotlb_init_with_default_size(size_t default_size, int verbose) 180{ 181 unsigned char *vstart; 182 unsigned long bytes; 183 184 if (!io_tlb_nslabs) { 185 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 186 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 187 } 188 189 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 190 191 /* 192 * Get IO TLB memory from the low pages 193 */ 194 vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); 195 if (!vstart) 196 panic("Cannot allocate SWIOTLB buffer"); 197 198 swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose); 199} 200 201void __init 202swiotlb_init(int verbose) 203{ 204 swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ 205} 206 207/* 208 * Systems with larger DMA zones (those that don't support ISA) can 209 * initialize the swiotlb later using the slab allocator if needed. 210 * This should be just like above, but with some error catching. 211 */ 212int 213swiotlb_late_init_with_default_size(size_t default_size) 214{ 215 unsigned long bytes, req_nslabs = io_tlb_nslabs; 216 unsigned char *vstart = NULL; 217 unsigned int order; 218 int rc = 0; 219 220 if (!io_tlb_nslabs) { 221 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 222 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 223 } 224 225 /* 226 * Get IO TLB memory from the low pages 227 */ 228 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); 229 io_tlb_nslabs = SLABS_PER_PAGE << order; 230 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 231 232 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 233 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 234 order); 235 if (vstart) 236 break; 237 order--; 238 } 239 240 if (!vstart) { 241 io_tlb_nslabs = req_nslabs; 242 return -ENOMEM; 243 } 244 if (order != get_order(bytes)) { 245 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 246 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 247 io_tlb_nslabs = SLABS_PER_PAGE << order; 248 } 249 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); 250 if (rc) 251 free_pages((unsigned long)vstart, order); 252 return rc; 253} 254 255int 256swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 257{ 258 unsigned long i, bytes; 259 unsigned char *v_overflow_buffer; 260 261 bytes = nslabs << IO_TLB_SHIFT; 262 263 io_tlb_nslabs = nslabs; 264 io_tlb_start = virt_to_phys(tlb); 265 io_tlb_end = io_tlb_start + bytes; 266 267 memset(tlb, 0, bytes); 268 269 /* 270 * Get the overflow emergency buffer 271 */ 272 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, 273 get_order(io_tlb_overflow)); 274 if (!v_overflow_buffer) 275 goto cleanup2; 276 277 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); 278 279 /* 280 * Allocate and initialize the free list array. This array is used 281 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 282 * between io_tlb_start and io_tlb_end. 283 */ 284 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, 285 get_order(io_tlb_nslabs * sizeof(int))); 286 if (!io_tlb_list) 287 goto cleanup3; 288 289 for (i = 0; i < io_tlb_nslabs; i++) 290 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 291 io_tlb_index = 0; 292 293 io_tlb_orig_addr = (phys_addr_t *) 294 __get_free_pages(GFP_KERNEL, 295 get_order(io_tlb_nslabs * 296 sizeof(phys_addr_t))); 297 if (!io_tlb_orig_addr) 298 goto cleanup4; 299 300 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); 301 302 swiotlb_print_info(); 303 304 late_alloc = 1; 305 306 return 0; 307 308cleanup4: 309 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 310 sizeof(int))); 311 io_tlb_list = NULL; 312cleanup3: 313 free_pages((unsigned long)v_overflow_buffer, 314 get_order(io_tlb_overflow)); 315 io_tlb_overflow_buffer = 0; 316cleanup2: 317 io_tlb_end = 0; 318 io_tlb_start = 0; 319 io_tlb_nslabs = 0; 320 return -ENOMEM; 321} 322 323void __init swiotlb_free(void) 324{ 325 if (!io_tlb_orig_addr) 326 return; 327 328 if (late_alloc) { 329 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), 330 get_order(io_tlb_overflow)); 331 free_pages((unsigned long)io_tlb_orig_addr, 332 get_order(io_tlb_nslabs * sizeof(phys_addr_t))); 333 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 334 sizeof(int))); 335 free_pages((unsigned long)phys_to_virt(io_tlb_start), 336 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 337 } else { 338 free_bootmem_late(io_tlb_overflow_buffer, 339 PAGE_ALIGN(io_tlb_overflow)); 340 free_bootmem_late(__pa(io_tlb_orig_addr), 341 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 342 free_bootmem_late(__pa(io_tlb_list), 343 PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 344 free_bootmem_late(io_tlb_start, 345 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 346 } 347 io_tlb_nslabs = 0; 348} 349 350static int is_swiotlb_buffer(phys_addr_t paddr) 351{ 352 return paddr >= io_tlb_start && paddr < io_tlb_end; 353} 354 355/* 356 * Bounce: copy the swiotlb buffer back to the original dma location 357 */ 358static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, 359 size_t size, enum dma_data_direction dir) 360{ 361 unsigned long pfn = PFN_DOWN(orig_addr); 362 unsigned char *vaddr = phys_to_virt(tlb_addr); 363 364 if (PageHighMem(pfn_to_page(pfn))) { 365 /* The buffer does not have a mapping. Map it in and copy */ 366 unsigned int offset = orig_addr & ~PAGE_MASK; 367 char *buffer; 368 unsigned int sz = 0; 369 unsigned long flags; 370 371 while (size) { 372 sz = min_t(size_t, PAGE_SIZE - offset, size); 373 374 local_irq_save(flags); 375 buffer = kmap_atomic(pfn_to_page(pfn)); 376 if (dir == DMA_TO_DEVICE) 377 memcpy(vaddr, buffer + offset, sz); 378 else 379 memcpy(buffer + offset, vaddr, sz); 380 kunmap_atomic(buffer); 381 local_irq_restore(flags); 382 383 size -= sz; 384 pfn++; 385 vaddr += sz; 386 offset = 0; 387 } 388 } else if (dir == DMA_TO_DEVICE) { 389 memcpy(vaddr, phys_to_virt(orig_addr), size); 390 } else { 391 memcpy(phys_to_virt(orig_addr), vaddr, size); 392 } 393} 394 395phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 396 dma_addr_t tbl_dma_addr, 397 phys_addr_t orig_addr, size_t size, 398 enum dma_data_direction dir) 399{ 400 unsigned long flags; 401 phys_addr_t tlb_addr; 402 unsigned int nslots, stride, index, wrap; 403 int i; 404 unsigned long mask; 405 unsigned long offset_slots; 406 unsigned long max_slots; 407 408 mask = dma_get_seg_boundary(hwdev); 409 410 tbl_dma_addr &= mask; 411 412 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 413 414 /* 415 * Carefully handle integer overflow which can occur when mask == ~0UL. 416 */ 417 max_slots = mask + 1 418 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 419 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 420 421 /* 422 * For mappings greater than a page, we limit the stride (and 423 * hence alignment) to a page size. 424 */ 425 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 426 if (size > PAGE_SIZE) 427 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 428 else 429 stride = 1; 430 431 BUG_ON(!nslots); 432 433 /* 434 * Find suitable number of IO TLB entries size that will fit this 435 * request and allocate a buffer from that IO TLB pool. 436 */ 437 spin_lock_irqsave(&io_tlb_lock, flags); 438 index = ALIGN(io_tlb_index, stride); 439 if (index >= io_tlb_nslabs) 440 index = 0; 441 wrap = index; 442 443 do { 444 while (iommu_is_span_boundary(index, nslots, offset_slots, 445 max_slots)) { 446 index += stride; 447 if (index >= io_tlb_nslabs) 448 index = 0; 449 if (index == wrap) 450 goto not_found; 451 } 452 453 /* 454 * If we find a slot that indicates we have 'nslots' number of 455 * contiguous buffers, we allocate the buffers from that slot 456 * and mark the entries as '0' indicating unavailable. 457 */ 458 if (io_tlb_list[index] >= nslots) { 459 int count = 0; 460 461 for (i = index; i < (int) (index + nslots); i++) 462 io_tlb_list[i] = 0; 463 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) 464 io_tlb_list[i] = ++count; 465 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); 466 467 /* 468 * Update the indices to avoid searching in the next 469 * round. 470 */ 471 io_tlb_index = ((index + nslots) < io_tlb_nslabs 472 ? (index + nslots) : 0); 473 474 goto found; 475 } 476 index += stride; 477 if (index >= io_tlb_nslabs) 478 index = 0; 479 } while (index != wrap); 480 481not_found: 482 spin_unlock_irqrestore(&io_tlb_lock, flags); 483 return SWIOTLB_MAP_ERROR; 484found: 485 spin_unlock_irqrestore(&io_tlb_lock, flags); 486 487 /* 488 * Save away the mapping from the original address to the DMA address. 489 * This is needed when we sync the memory. Then we sync the buffer if 490 * needed. 491 */ 492 for (i = 0; i < nslots; i++) 493 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); 494 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 495 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); 496 497 return tlb_addr; 498} 499EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 500 501/* 502 * Allocates bounce buffer and returns its kernel virtual address. 503 */ 504 505phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size, 506 enum dma_data_direction dir) 507{ 508 dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start); 509 510 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); 511} 512 513/* 514 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 515 */ 516void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, 517 size_t size, enum dma_data_direction dir) 518{ 519 unsigned long flags; 520 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 521 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 522 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 523 524 /* 525 * First, sync the memory before unmapping the entry 526 */ 527 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 528 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 529 530 /* 531 * Return the buffer to the free list by setting the corresponding 532 * entries to indicate the number of contiguous entries available. 533 * While returning the entries to the free list, we merge the entries 534 * with slots below and above the pool being returned. 535 */ 536 spin_lock_irqsave(&io_tlb_lock, flags); 537 { 538 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? 539 io_tlb_list[index + nslots] : 0); 540 /* 541 * Step 1: return the slots to the free list, merging the 542 * slots with superceeding slots 543 */ 544 for (i = index + nslots - 1; i >= index; i--) 545 io_tlb_list[i] = ++count; 546 /* 547 * Step 2: merge the returned slots with the preceding slots, 548 * if available (non zero) 549 */ 550 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) 551 io_tlb_list[i] = ++count; 552 } 553 spin_unlock_irqrestore(&io_tlb_lock, flags); 554} 555EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); 556 557void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, 558 size_t size, enum dma_data_direction dir, 559 enum dma_sync_target target) 560{ 561 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 562 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 563 564 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); 565 566 switch (target) { 567 case SYNC_FOR_CPU: 568 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 569 swiotlb_bounce(orig_addr, tlb_addr, 570 size, DMA_FROM_DEVICE); 571 else 572 BUG_ON(dir != DMA_TO_DEVICE); 573 break; 574 case SYNC_FOR_DEVICE: 575 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 576 swiotlb_bounce(orig_addr, tlb_addr, 577 size, DMA_TO_DEVICE); 578 else 579 BUG_ON(dir != DMA_FROM_DEVICE); 580 break; 581 default: 582 BUG(); 583 } 584} 585EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); 586 587void * 588swiotlb_alloc_coherent(struct device *hwdev, size_t size, 589 dma_addr_t *dma_handle, gfp_t flags) 590{ 591 dma_addr_t dev_addr; 592 void *ret; 593 int order = get_order(size); 594 u64 dma_mask = DMA_BIT_MASK(32); 595 596 if (hwdev && hwdev->coherent_dma_mask) 597 dma_mask = hwdev->coherent_dma_mask; 598 599 ret = (void *)__get_free_pages(flags, order); 600 if (ret) { 601 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 602 if (dev_addr + size - 1 > dma_mask) { 603 /* 604 * The allocated memory isn't reachable by the device. 605 */ 606 free_pages((unsigned long) ret, order); 607 ret = NULL; 608 } 609 } 610 if (!ret) { 611 /* 612 * We are either out of memory or the device can't DMA to 613 * GFP_DMA memory; fall back on map_single(), which 614 * will grab memory from the lowest available address range. 615 */ 616 phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); 617 if (paddr == SWIOTLB_MAP_ERROR) 618 return NULL; 619 620 ret = phys_to_virt(paddr); 621 dev_addr = phys_to_dma(hwdev, paddr); 622 623 /* Confirm address can be DMA'd by device */ 624 if (dev_addr + size - 1 > dma_mask) { 625 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 626 (unsigned long long)dma_mask, 627 (unsigned long long)dev_addr); 628 629 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 630 swiotlb_tbl_unmap_single(hwdev, paddr, 631 size, DMA_TO_DEVICE); 632 return NULL; 633 } 634 } 635 636 *dma_handle = dev_addr; 637 memset(ret, 0, size); 638 639 return ret; 640} 641EXPORT_SYMBOL(swiotlb_alloc_coherent); 642 643void 644swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 645 dma_addr_t dev_addr) 646{ 647 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 648 649 WARN_ON(irqs_disabled()); 650 if (!is_swiotlb_buffer(paddr)) 651 free_pages((unsigned long)vaddr, get_order(size)); 652 else 653 /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ 654 swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); 655} 656EXPORT_SYMBOL(swiotlb_free_coherent); 657 658static void 659swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 660 int do_panic) 661{ 662 /* 663 * Ran out of IOMMU space for this operation. This is very bad. 664 * Unfortunately the drivers cannot handle this operation properly. 665 * unless they check for dma_mapping_error (most don't) 666 * When the mapping is small enough return a static buffer to limit 667 * the damage, or panic when the transfer is too big. 668 */ 669 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at " 670 "device %s\n", size, dev ? dev_name(dev) : "?"); 671 672 if (size <= io_tlb_overflow || !do_panic) 673 return; 674 675 if (dir == DMA_BIDIRECTIONAL) 676 panic("DMA: Random memory could be DMA accessed\n"); 677 if (dir == DMA_FROM_DEVICE) 678 panic("DMA: Random memory could be DMA written\n"); 679 if (dir == DMA_TO_DEVICE) 680 panic("DMA: Random memory could be DMA read\n"); 681} 682 683/* 684 * Map a single buffer of the indicated size for DMA in streaming mode. The 685 * physical address to use is returned. 686 * 687 * Once the device is given the dma address, the device owns this memory until 688 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. 689 */ 690dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 691 unsigned long offset, size_t size, 692 enum dma_data_direction dir, 693 struct dma_attrs *attrs) 694{ 695 phys_addr_t map, phys = page_to_phys(page) + offset; 696 dma_addr_t dev_addr = phys_to_dma(dev, phys); 697 698 BUG_ON(dir == DMA_NONE); 699 /* 700 * If the address happens to be in the device's DMA window, 701 * we can safely return the device addr and not worry about bounce 702 * buffering it. 703 */ 704 if (dma_capable(dev, dev_addr, size) && !swiotlb_force) 705 return dev_addr; 706 707 /* Oh well, have to allocate and map a bounce buffer. */ 708 map = map_single(dev, phys, size, dir); 709 if (map == SWIOTLB_MAP_ERROR) { 710 swiotlb_full(dev, size, dir, 1); 711 return phys_to_dma(dev, io_tlb_overflow_buffer); 712 } 713 714 dev_addr = phys_to_dma(dev, map); 715 716 /* Ensure that the address returned is DMA'ble */ 717 if (!dma_capable(dev, dev_addr, size)) { 718 swiotlb_tbl_unmap_single(dev, map, size, dir); 719 return phys_to_dma(dev, io_tlb_overflow_buffer); 720 } 721 722 return dev_addr; 723} 724EXPORT_SYMBOL_GPL(swiotlb_map_page); 725 726/* 727 * Unmap a single streaming mode DMA translation. The dma_addr and size must 728 * match what was provided for in a previous swiotlb_map_page call. All 729 * other usages are undefined. 730 * 731 * After this call, reads by the cpu to the buffer are guaranteed to see 732 * whatever the device wrote there. 733 */ 734static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 735 size_t size, enum dma_data_direction dir) 736{ 737 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 738 739 BUG_ON(dir == DMA_NONE); 740 741 if (is_swiotlb_buffer(paddr)) { 742 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); 743 return; 744 } 745 746 if (dir != DMA_FROM_DEVICE) 747 return; 748 749 /* 750 * phys_to_virt doesn't work with hihgmem page but we could 751 * call dma_mark_clean() with hihgmem page here. However, we 752 * are fine since dma_mark_clean() is null on POWERPC. We can 753 * make dma_mark_clean() take a physical address if necessary. 754 */ 755 dma_mark_clean(phys_to_virt(paddr), size); 756} 757 758void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 759 size_t size, enum dma_data_direction dir, 760 struct dma_attrs *attrs) 761{ 762 unmap_single(hwdev, dev_addr, size, dir); 763} 764EXPORT_SYMBOL_GPL(swiotlb_unmap_page); 765 766/* 767 * Make physical memory consistent for a single streaming mode DMA translation 768 * after a transfer. 769 * 770 * If you perform a swiotlb_map_page() but wish to interrogate the buffer 771 * using the cpu, yet do not wish to teardown the dma mapping, you must 772 * call this function before doing so. At the next point you give the dma 773 * address back to the card, you must first perform a 774 * swiotlb_dma_sync_for_device, and then the device again owns the buffer 775 */ 776static void 777swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 778 size_t size, enum dma_data_direction dir, 779 enum dma_sync_target target) 780{ 781 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 782 783 BUG_ON(dir == DMA_NONE); 784 785 if (is_swiotlb_buffer(paddr)) { 786 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 787 return; 788 } 789 790 if (dir != DMA_FROM_DEVICE) 791 return; 792 793 dma_mark_clean(phys_to_virt(paddr), size); 794} 795 796void 797swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 798 size_t size, enum dma_data_direction dir) 799{ 800 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 801} 802EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); 803 804void 805swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 806 size_t size, enum dma_data_direction dir) 807{ 808 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 809} 810EXPORT_SYMBOL(swiotlb_sync_single_for_device); 811 812/* 813 * Map a set of buffers described by scatterlist in streaming mode for DMA. 814 * This is the scatter-gather version of the above swiotlb_map_page 815 * interface. Here the scatter gather list elements are each tagged with the 816 * appropriate dma address and length. They are obtained via 817 * sg_dma_{address,length}(SG). 818 * 819 * NOTE: An implementation may be able to use a smaller number of 820 * DMA address/length pairs than there are SG table elements. 821 * (for example via virtual mapping capabilities) 822 * The routine returns the number of addr/length pairs actually 823 * used, at most nents. 824 * 825 * Device ownership issues as mentioned above for swiotlb_map_page are the 826 * same here. 827 */ 828int 829swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 830 enum dma_data_direction dir, struct dma_attrs *attrs) 831{ 832 struct scatterlist *sg; 833 int i; 834 835 BUG_ON(dir == DMA_NONE); 836 837 for_each_sg(sgl, sg, nelems, i) { 838 phys_addr_t paddr = sg_phys(sg); 839 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); 840 841 if (swiotlb_force || 842 !dma_capable(hwdev, dev_addr, sg->length)) { 843 phys_addr_t map = map_single(hwdev, sg_phys(sg), 844 sg->length, dir); 845 if (map == SWIOTLB_MAP_ERROR) { 846 /* Don't panic here, we expect map_sg users 847 to do proper error handling. */ 848 swiotlb_full(hwdev, sg->length, dir, 0); 849 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, 850 attrs); 851 sgl[0].dma_length = 0; 852 return 0; 853 } 854 sg->dma_address = phys_to_dma(hwdev, map); 855 } else 856 sg->dma_address = dev_addr; 857 sg->dma_length = sg->length; 858 } 859 return nelems; 860} 861EXPORT_SYMBOL(swiotlb_map_sg_attrs); 862 863int 864swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 865 enum dma_data_direction dir) 866{ 867 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 868} 869EXPORT_SYMBOL(swiotlb_map_sg); 870 871/* 872 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 873 * concerning calls here are the same as for swiotlb_unmap_page() above. 874 */ 875void 876swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 877 int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) 878{ 879 struct scatterlist *sg; 880 int i; 881 882 BUG_ON(dir == DMA_NONE); 883 884 for_each_sg(sgl, sg, nelems, i) 885 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); 886 887} 888EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 889 890void 891swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 892 enum dma_data_direction dir) 893{ 894 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 895} 896EXPORT_SYMBOL(swiotlb_unmap_sg); 897 898/* 899 * Make physical memory consistent for a set of streaming mode DMA translations 900 * after a transfer. 901 * 902 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules 903 * and usage. 904 */ 905static void 906swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 907 int nelems, enum dma_data_direction dir, 908 enum dma_sync_target target) 909{ 910 struct scatterlist *sg; 911 int i; 912 913 for_each_sg(sgl, sg, nelems, i) 914 swiotlb_sync_single(hwdev, sg->dma_address, 915 sg->dma_length, dir, target); 916} 917 918void 919swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 920 int nelems, enum dma_data_direction dir) 921{ 922 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 923} 924EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 925 926void 927swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 928 int nelems, enum dma_data_direction dir) 929{ 930 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 931} 932EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 933 934int 935swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 936{ 937 return (dma_addr == phys_to_dma(hwdev, io_tlb_overflow_buffer)); 938} 939EXPORT_SYMBOL(swiotlb_dma_mapping_error); 940 941/* 942 * Return whether the given device DMA address mask can be supported 943 * properly. For example, if your device can only drive the low 24-bits 944 * during bus mastering, then you would pass 0x00ffffff as the mask to 945 * this function. 946 */ 947int 948swiotlb_dma_supported(struct device *hwdev, u64 mask) 949{ 950 return phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 951} 952EXPORT_SYMBOL(swiotlb_dma_supported);