Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.15-rc6 1086 lines 30 kB view raw
1/* 2 * Dynamic DMA mapping support. 3 * 4 * This implementation is a fallback for platforms that do not support 5 * I/O TLBs (aka DMA address translation hardware). 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 8 * Copyright (C) 2000, 2003 Hewlett-Packard Co 9 * David Mosberger-Tang <davidm@hpl.hp.com> 10 * 11 * 03/05/07 davidm Switch from PCI-DMA to generic device DMA API. 12 * 00/12/13 davidm Rename to swiotlb.c and add mark_clean() to avoid 13 * unnecessary i-cache flushing. 14 * 04/07/.. ak Better overflow handling. Assorted fixes. 15 * 05/09/10 linville Add support for syncing ranges, support syncing for 16 * DMA_BIDIRECTIONAL mappings, miscellaneous cleanup. 17 * 08/12/11 beckyb Add highmem support 18 */ 19 20#include <linux/cache.h> 21#include <linux/dma-mapping.h> 22#include <linux/mm.h> 23#include <linux/export.h> 24#include <linux/spinlock.h> 25#include <linux/string.h> 26#include <linux/swiotlb.h> 27#include <linux/pfn.h> 28#include <linux/types.h> 29#include <linux/ctype.h> 30#include <linux/highmem.h> 31#include <linux/gfp.h> 32#include <linux/scatterlist.h> 33#include <linux/mem_encrypt.h> 34 35#include <asm/io.h> 36#include <asm/dma.h> 37 38#include <linux/init.h> 39#include <linux/bootmem.h> 40#include <linux/iommu-helper.h> 41 42#define CREATE_TRACE_POINTS 43#include <trace/events/swiotlb.h> 44 45#define OFFSET(val,align) ((unsigned long) \ 46 ( (val) & ( (align) - 1))) 47 48#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 49 50/* 51 * Minimum IO TLB size to bother booting with. Systems with mainly 52 * 64bit capable cards will only lightly use the swiotlb. If we can't 53 * allocate a contiguous 1MB, we're probably in trouble anyway. 54 */ 55#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 56 57enum swiotlb_force swiotlb_force; 58 59/* 60 * Used to do a quick range check in swiotlb_tbl_unmap_single and 61 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 62 * API. 63 */ 64static phys_addr_t io_tlb_start, io_tlb_end; 65 66/* 67 * The number of IO TLB blocks (in groups of 64) between io_tlb_start and 68 * io_tlb_end. This is command line adjustable via setup_io_tlb_npages. 69 */ 70static unsigned long io_tlb_nslabs; 71 72/* 73 * When the IOMMU overflows we return a fallback buffer. This sets the size. 74 */ 75static unsigned long io_tlb_overflow = 32*1024; 76 77static phys_addr_t io_tlb_overflow_buffer; 78 79/* 80 * This is a free list describing the number of free entries available from 81 * each index 82 */ 83static unsigned int *io_tlb_list; 84static unsigned int io_tlb_index; 85 86/* 87 * Max segment that we can provide which (if pages are contingous) will 88 * not be bounced (unless SWIOTLB_FORCE is set). 89 */ 90unsigned int max_segment; 91 92/* 93 * We need to save away the original address corresponding to a mapped entry 94 * for the sync operations. 95 */ 96#define INVALID_PHYS_ADDR (~(phys_addr_t)0) 97static phys_addr_t *io_tlb_orig_addr; 98 99/* 100 * Protect the above data structures in the map and unmap calls 101 */ 102static DEFINE_SPINLOCK(io_tlb_lock); 103 104static int late_alloc; 105 106static int __init 107setup_io_tlb_npages(char *str) 108{ 109 if (isdigit(*str)) { 110 io_tlb_nslabs = simple_strtoul(str, &str, 0); 111 /* avoid tail segment of size < IO_TLB_SEGSIZE */ 112 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 113 } 114 if (*str == ',') 115 ++str; 116 if (!strcmp(str, "force")) { 117 swiotlb_force = SWIOTLB_FORCE; 118 } else if (!strcmp(str, "noforce")) { 119 swiotlb_force = SWIOTLB_NO_FORCE; 120 io_tlb_nslabs = 1; 121 } 122 123 return 0; 124} 125early_param("swiotlb", setup_io_tlb_npages); 126/* make io_tlb_overflow tunable too? */ 127 128unsigned long swiotlb_nr_tbl(void) 129{ 130 return io_tlb_nslabs; 131} 132EXPORT_SYMBOL_GPL(swiotlb_nr_tbl); 133 134unsigned int swiotlb_max_segment(void) 135{ 136 return max_segment; 137} 138EXPORT_SYMBOL_GPL(swiotlb_max_segment); 139 140void swiotlb_set_max_segment(unsigned int val) 141{ 142 if (swiotlb_force == SWIOTLB_FORCE) 143 max_segment = 1; 144 else 145 max_segment = rounddown(val, PAGE_SIZE); 146} 147 148/* default to 64MB */ 149#define IO_TLB_DEFAULT_SIZE (64UL<<20) 150unsigned long swiotlb_size_or_default(void) 151{ 152 unsigned long size; 153 154 size = io_tlb_nslabs << IO_TLB_SHIFT; 155 156 return size ? size : (IO_TLB_DEFAULT_SIZE); 157} 158 159void __weak swiotlb_set_mem_attributes(void *vaddr, unsigned long size) { } 160 161/* For swiotlb, clear memory encryption mask from dma addresses */ 162static dma_addr_t swiotlb_phys_to_dma(struct device *hwdev, 163 phys_addr_t address) 164{ 165 return __sme_clr(phys_to_dma(hwdev, address)); 166} 167 168/* Note that this doesn't work with highmem page */ 169static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 170 volatile void *address) 171{ 172 return phys_to_dma(hwdev, virt_to_phys(address)); 173} 174 175static bool no_iotlb_memory; 176 177void swiotlb_print_info(void) 178{ 179 unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; 180 unsigned char *vstart, *vend; 181 182 if (no_iotlb_memory) { 183 pr_warn("software IO TLB: No low mem\n"); 184 return; 185 } 186 187 vstart = phys_to_virt(io_tlb_start); 188 vend = phys_to_virt(io_tlb_end); 189 190 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n", 191 (unsigned long long)io_tlb_start, 192 (unsigned long long)io_tlb_end, 193 bytes >> 20, vstart, vend - 1); 194} 195 196/* 197 * Early SWIOTLB allocation may be too early to allow an architecture to 198 * perform the desired operations. This function allows the architecture to 199 * call SWIOTLB when the operations are possible. It needs to be called 200 * before the SWIOTLB memory is used. 201 */ 202void __init swiotlb_update_mem_attributes(void) 203{ 204 void *vaddr; 205 unsigned long bytes; 206 207 if (no_iotlb_memory || late_alloc) 208 return; 209 210 vaddr = phys_to_virt(io_tlb_start); 211 bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); 212 swiotlb_set_mem_attributes(vaddr, bytes); 213 memset(vaddr, 0, bytes); 214 215 vaddr = phys_to_virt(io_tlb_overflow_buffer); 216 bytes = PAGE_ALIGN(io_tlb_overflow); 217 swiotlb_set_mem_attributes(vaddr, bytes); 218 memset(vaddr, 0, bytes); 219} 220 221int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 222{ 223 void *v_overflow_buffer; 224 unsigned long i, bytes; 225 226 bytes = nslabs << IO_TLB_SHIFT; 227 228 io_tlb_nslabs = nslabs; 229 io_tlb_start = __pa(tlb); 230 io_tlb_end = io_tlb_start + bytes; 231 232 /* 233 * Get the overflow emergency buffer 234 */ 235 v_overflow_buffer = memblock_virt_alloc_low_nopanic( 236 PAGE_ALIGN(io_tlb_overflow), 237 PAGE_SIZE); 238 if (!v_overflow_buffer) 239 return -ENOMEM; 240 241 io_tlb_overflow_buffer = __pa(v_overflow_buffer); 242 243 /* 244 * Allocate and initialize the free list array. This array is used 245 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 246 * between io_tlb_start and io_tlb_end. 247 */ 248 io_tlb_list = memblock_virt_alloc( 249 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), 250 PAGE_SIZE); 251 io_tlb_orig_addr = memblock_virt_alloc( 252 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), 253 PAGE_SIZE); 254 for (i = 0; i < io_tlb_nslabs; i++) { 255 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 256 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; 257 } 258 io_tlb_index = 0; 259 260 if (verbose) 261 swiotlb_print_info(); 262 263 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); 264 return 0; 265} 266 267/* 268 * Statically reserve bounce buffer space and initialize bounce buffer data 269 * structures for the software IO TLB used to implement the DMA API. 270 */ 271void __init 272swiotlb_init(int verbose) 273{ 274 size_t default_size = IO_TLB_DEFAULT_SIZE; 275 unsigned char *vstart; 276 unsigned long bytes; 277 278 if (!io_tlb_nslabs) { 279 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 280 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 281 } 282 283 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 284 285 /* Get IO TLB memory from the low pages */ 286 vstart = memblock_virt_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE); 287 if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose)) 288 return; 289 290 if (io_tlb_start) 291 memblock_free_early(io_tlb_start, 292 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 293 pr_warn("Cannot allocate SWIOTLB buffer"); 294 no_iotlb_memory = true; 295} 296 297/* 298 * Systems with larger DMA zones (those that don't support ISA) can 299 * initialize the swiotlb later using the slab allocator if needed. 300 * This should be just like above, but with some error catching. 301 */ 302int 303swiotlb_late_init_with_default_size(size_t default_size) 304{ 305 unsigned long bytes, req_nslabs = io_tlb_nslabs; 306 unsigned char *vstart = NULL; 307 unsigned int order; 308 int rc = 0; 309 310 if (!io_tlb_nslabs) { 311 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 312 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 313 } 314 315 /* 316 * Get IO TLB memory from the low pages 317 */ 318 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT); 319 io_tlb_nslabs = SLABS_PER_PAGE << order; 320 bytes = io_tlb_nslabs << IO_TLB_SHIFT; 321 322 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 323 vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 324 order); 325 if (vstart) 326 break; 327 order--; 328 } 329 330 if (!vstart) { 331 io_tlb_nslabs = req_nslabs; 332 return -ENOMEM; 333 } 334 if (order != get_order(bytes)) { 335 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 336 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 337 io_tlb_nslabs = SLABS_PER_PAGE << order; 338 } 339 rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs); 340 if (rc) 341 free_pages((unsigned long)vstart, order); 342 343 return rc; 344} 345 346int 347swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) 348{ 349 unsigned long i, bytes; 350 unsigned char *v_overflow_buffer; 351 352 bytes = nslabs << IO_TLB_SHIFT; 353 354 io_tlb_nslabs = nslabs; 355 io_tlb_start = virt_to_phys(tlb); 356 io_tlb_end = io_tlb_start + bytes; 357 358 swiotlb_set_mem_attributes(tlb, bytes); 359 memset(tlb, 0, bytes); 360 361 /* 362 * Get the overflow emergency buffer 363 */ 364 v_overflow_buffer = (void *)__get_free_pages(GFP_DMA, 365 get_order(io_tlb_overflow)); 366 if (!v_overflow_buffer) 367 goto cleanup2; 368 369 swiotlb_set_mem_attributes(v_overflow_buffer, io_tlb_overflow); 370 memset(v_overflow_buffer, 0, io_tlb_overflow); 371 io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer); 372 373 /* 374 * Allocate and initialize the free list array. This array is used 375 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE 376 * between io_tlb_start and io_tlb_end. 377 */ 378 io_tlb_list = (unsigned int *)__get_free_pages(GFP_KERNEL, 379 get_order(io_tlb_nslabs * sizeof(int))); 380 if (!io_tlb_list) 381 goto cleanup3; 382 383 io_tlb_orig_addr = (phys_addr_t *) 384 __get_free_pages(GFP_KERNEL, 385 get_order(io_tlb_nslabs * 386 sizeof(phys_addr_t))); 387 if (!io_tlb_orig_addr) 388 goto cleanup4; 389 390 for (i = 0; i < io_tlb_nslabs; i++) { 391 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 392 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; 393 } 394 io_tlb_index = 0; 395 396 swiotlb_print_info(); 397 398 late_alloc = 1; 399 400 swiotlb_set_max_segment(io_tlb_nslabs << IO_TLB_SHIFT); 401 402 return 0; 403 404cleanup4: 405 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 406 sizeof(int))); 407 io_tlb_list = NULL; 408cleanup3: 409 free_pages((unsigned long)v_overflow_buffer, 410 get_order(io_tlb_overflow)); 411 io_tlb_overflow_buffer = 0; 412cleanup2: 413 io_tlb_end = 0; 414 io_tlb_start = 0; 415 io_tlb_nslabs = 0; 416 max_segment = 0; 417 return -ENOMEM; 418} 419 420void __init swiotlb_free(void) 421{ 422 if (!io_tlb_orig_addr) 423 return; 424 425 if (late_alloc) { 426 free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer), 427 get_order(io_tlb_overflow)); 428 free_pages((unsigned long)io_tlb_orig_addr, 429 get_order(io_tlb_nslabs * sizeof(phys_addr_t))); 430 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 431 sizeof(int))); 432 free_pages((unsigned long)phys_to_virt(io_tlb_start), 433 get_order(io_tlb_nslabs << IO_TLB_SHIFT)); 434 } else { 435 memblock_free_late(io_tlb_overflow_buffer, 436 PAGE_ALIGN(io_tlb_overflow)); 437 memblock_free_late(__pa(io_tlb_orig_addr), 438 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); 439 memblock_free_late(__pa(io_tlb_list), 440 PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); 441 memblock_free_late(io_tlb_start, 442 PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); 443 } 444 io_tlb_nslabs = 0; 445 max_segment = 0; 446} 447 448int is_swiotlb_buffer(phys_addr_t paddr) 449{ 450 return paddr >= io_tlb_start && paddr < io_tlb_end; 451} 452 453/* 454 * Bounce: copy the swiotlb buffer back to the original dma location 455 */ 456static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr, 457 size_t size, enum dma_data_direction dir) 458{ 459 unsigned long pfn = PFN_DOWN(orig_addr); 460 unsigned char *vaddr = phys_to_virt(tlb_addr); 461 462 if (PageHighMem(pfn_to_page(pfn))) { 463 /* The buffer does not have a mapping. Map it in and copy */ 464 unsigned int offset = orig_addr & ~PAGE_MASK; 465 char *buffer; 466 unsigned int sz = 0; 467 unsigned long flags; 468 469 while (size) { 470 sz = min_t(size_t, PAGE_SIZE - offset, size); 471 472 local_irq_save(flags); 473 buffer = kmap_atomic(pfn_to_page(pfn)); 474 if (dir == DMA_TO_DEVICE) 475 memcpy(vaddr, buffer + offset, sz); 476 else 477 memcpy(buffer + offset, vaddr, sz); 478 kunmap_atomic(buffer); 479 local_irq_restore(flags); 480 481 size -= sz; 482 pfn++; 483 vaddr += sz; 484 offset = 0; 485 } 486 } else if (dir == DMA_TO_DEVICE) { 487 memcpy(vaddr, phys_to_virt(orig_addr), size); 488 } else { 489 memcpy(phys_to_virt(orig_addr), vaddr, size); 490 } 491} 492 493phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, 494 dma_addr_t tbl_dma_addr, 495 phys_addr_t orig_addr, size_t size, 496 enum dma_data_direction dir, 497 unsigned long attrs) 498{ 499 unsigned long flags; 500 phys_addr_t tlb_addr; 501 unsigned int nslots, stride, index, wrap; 502 int i; 503 unsigned long mask; 504 unsigned long offset_slots; 505 unsigned long max_slots; 506 507 if (no_iotlb_memory) 508 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 509 510 if (mem_encrypt_active()) 511 pr_warn_once("%s is active and system is using DMA bounce buffers\n", 512 sme_active() ? "SME" : "SEV"); 513 514 mask = dma_get_seg_boundary(hwdev); 515 516 tbl_dma_addr &= mask; 517 518 offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 519 520 /* 521 * Carefully handle integer overflow which can occur when mask == ~0UL. 522 */ 523 max_slots = mask + 1 524 ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT 525 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT); 526 527 /* 528 * For mappings greater than or equal to a page, we limit the stride 529 * (and hence alignment) to a page size. 530 */ 531 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 532 if (size >= PAGE_SIZE) 533 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); 534 else 535 stride = 1; 536 537 BUG_ON(!nslots); 538 539 /* 540 * Find suitable number of IO TLB entries size that will fit this 541 * request and allocate a buffer from that IO TLB pool. 542 */ 543 spin_lock_irqsave(&io_tlb_lock, flags); 544 index = ALIGN(io_tlb_index, stride); 545 if (index >= io_tlb_nslabs) 546 index = 0; 547 wrap = index; 548 549 do { 550 while (iommu_is_span_boundary(index, nslots, offset_slots, 551 max_slots)) { 552 index += stride; 553 if (index >= io_tlb_nslabs) 554 index = 0; 555 if (index == wrap) 556 goto not_found; 557 } 558 559 /* 560 * If we find a slot that indicates we have 'nslots' number of 561 * contiguous buffers, we allocate the buffers from that slot 562 * and mark the entries as '0' indicating unavailable. 563 */ 564 if (io_tlb_list[index] >= nslots) { 565 int count = 0; 566 567 for (i = index; i < (int) (index + nslots); i++) 568 io_tlb_list[i] = 0; 569 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) 570 io_tlb_list[i] = ++count; 571 tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT); 572 573 /* 574 * Update the indices to avoid searching in the next 575 * round. 576 */ 577 io_tlb_index = ((index + nslots) < io_tlb_nslabs 578 ? (index + nslots) : 0); 579 580 goto found; 581 } 582 index += stride; 583 if (index >= io_tlb_nslabs) 584 index = 0; 585 } while (index != wrap); 586 587not_found: 588 spin_unlock_irqrestore(&io_tlb_lock, flags); 589 if (printk_ratelimit()) 590 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size); 591 return SWIOTLB_MAP_ERROR; 592found: 593 spin_unlock_irqrestore(&io_tlb_lock, flags); 594 595 /* 596 * Save away the mapping from the original address to the DMA address. 597 * This is needed when we sync the memory. Then we sync the buffer if 598 * needed. 599 */ 600 for (i = 0; i < nslots; i++) 601 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT); 602 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 603 (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 604 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE); 605 606 return tlb_addr; 607} 608EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 609 610/* 611 * Allocates bounce buffer and returns its kernel virtual address. 612 */ 613 614static phys_addr_t 615map_single(struct device *hwdev, phys_addr_t phys, size_t size, 616 enum dma_data_direction dir, unsigned long attrs) 617{ 618 dma_addr_t start_dma_addr; 619 620 if (swiotlb_force == SWIOTLB_NO_FORCE) { 621 dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n", 622 &phys); 623 return SWIOTLB_MAP_ERROR; 624 } 625 626 start_dma_addr = swiotlb_phys_to_dma(hwdev, io_tlb_start); 627 return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, 628 dir, attrs); 629} 630 631/* 632 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 633 */ 634void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr, 635 size_t size, enum dma_data_direction dir, 636 unsigned long attrs) 637{ 638 unsigned long flags; 639 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 640 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 641 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 642 643 /* 644 * First, sync the memory before unmapping the entry 645 */ 646 if (orig_addr != INVALID_PHYS_ADDR && 647 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && 648 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 649 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 650 651 /* 652 * Return the buffer to the free list by setting the corresponding 653 * entries to indicate the number of contiguous entries available. 654 * While returning the entries to the free list, we merge the entries 655 * with slots below and above the pool being returned. 656 */ 657 spin_lock_irqsave(&io_tlb_lock, flags); 658 { 659 count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ? 660 io_tlb_list[index + nslots] : 0); 661 /* 662 * Step 1: return the slots to the free list, merging the 663 * slots with superceeding slots 664 */ 665 for (i = index + nslots - 1; i >= index; i--) { 666 io_tlb_list[i] = ++count; 667 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR; 668 } 669 /* 670 * Step 2: merge the returned slots with the preceding slots, 671 * if available (non zero) 672 */ 673 for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) 674 io_tlb_list[i] = ++count; 675 } 676 spin_unlock_irqrestore(&io_tlb_lock, flags); 677} 678EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); 679 680void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr, 681 size_t size, enum dma_data_direction dir, 682 enum dma_sync_target target) 683{ 684 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 685 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 686 687 if (orig_addr == INVALID_PHYS_ADDR) 688 return; 689 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); 690 691 switch (target) { 692 case SYNC_FOR_CPU: 693 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 694 swiotlb_bounce(orig_addr, tlb_addr, 695 size, DMA_FROM_DEVICE); 696 else 697 BUG_ON(dir != DMA_TO_DEVICE); 698 break; 699 case SYNC_FOR_DEVICE: 700 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 701 swiotlb_bounce(orig_addr, tlb_addr, 702 size, DMA_TO_DEVICE); 703 else 704 BUG_ON(dir != DMA_FROM_DEVICE); 705 break; 706 default: 707 BUG(); 708 } 709} 710EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); 711 712void * 713swiotlb_alloc_coherent(struct device *hwdev, size_t size, 714 dma_addr_t *dma_handle, gfp_t flags) 715{ 716 dma_addr_t dev_addr; 717 void *ret; 718 int order = get_order(size); 719 u64 dma_mask = DMA_BIT_MASK(32); 720 721 if (hwdev && hwdev->coherent_dma_mask) 722 dma_mask = hwdev->coherent_dma_mask; 723 724 ret = (void *)__get_free_pages(flags, order); 725 if (ret) { 726 dev_addr = swiotlb_virt_to_bus(hwdev, ret); 727 if (dev_addr + size - 1 > dma_mask) { 728 /* 729 * The allocated memory isn't reachable by the device. 730 */ 731 free_pages((unsigned long) ret, order); 732 ret = NULL; 733 } 734 } 735 if (!ret) { 736 /* 737 * We are either out of memory or the device can't DMA to 738 * GFP_DMA memory; fall back on map_single(), which 739 * will grab memory from the lowest available address range. 740 */ 741 phys_addr_t paddr = map_single(hwdev, 0, size, 742 DMA_FROM_DEVICE, 0); 743 if (paddr == SWIOTLB_MAP_ERROR) 744 goto err_warn; 745 746 ret = phys_to_virt(paddr); 747 dev_addr = swiotlb_phys_to_dma(hwdev, paddr); 748 749 /* Confirm address can be DMA'd by device */ 750 if (dev_addr + size - 1 > dma_mask) { 751 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", 752 (unsigned long long)dma_mask, 753 (unsigned long long)dev_addr); 754 755 /* 756 * DMA_TO_DEVICE to avoid memcpy in unmap_single. 757 * The DMA_ATTR_SKIP_CPU_SYNC is optional. 758 */ 759 swiotlb_tbl_unmap_single(hwdev, paddr, 760 size, DMA_TO_DEVICE, 761 DMA_ATTR_SKIP_CPU_SYNC); 762 goto err_warn; 763 } 764 } 765 766 *dma_handle = dev_addr; 767 memset(ret, 0, size); 768 769 return ret; 770 771err_warn: 772 pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", 773 dev_name(hwdev), size); 774 dump_stack(); 775 776 return NULL; 777} 778EXPORT_SYMBOL(swiotlb_alloc_coherent); 779 780void 781swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 782 dma_addr_t dev_addr) 783{ 784 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 785 786 WARN_ON(irqs_disabled()); 787 if (!is_swiotlb_buffer(paddr)) 788 free_pages((unsigned long)vaddr, get_order(size)); 789 else 790 /* 791 * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single. 792 * DMA_ATTR_SKIP_CPU_SYNC is optional. 793 */ 794 swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE, 795 DMA_ATTR_SKIP_CPU_SYNC); 796} 797EXPORT_SYMBOL(swiotlb_free_coherent); 798 799static void 800swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 801 int do_panic) 802{ 803 if (swiotlb_force == SWIOTLB_NO_FORCE) 804 return; 805 806 /* 807 * Ran out of IOMMU space for this operation. This is very bad. 808 * Unfortunately the drivers cannot handle this operation properly. 809 * unless they check for dma_mapping_error (most don't) 810 * When the mapping is small enough return a static buffer to limit 811 * the damage, or panic when the transfer is too big. 812 */ 813 dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n", 814 size); 815 816 if (size <= io_tlb_overflow || !do_panic) 817 return; 818 819 if (dir == DMA_BIDIRECTIONAL) 820 panic("DMA: Random memory could be DMA accessed\n"); 821 if (dir == DMA_FROM_DEVICE) 822 panic("DMA: Random memory could be DMA written\n"); 823 if (dir == DMA_TO_DEVICE) 824 panic("DMA: Random memory could be DMA read\n"); 825} 826 827/* 828 * Map a single buffer of the indicated size for DMA in streaming mode. The 829 * physical address to use is returned. 830 * 831 * Once the device is given the dma address, the device owns this memory until 832 * either swiotlb_unmap_page or swiotlb_dma_sync_single is performed. 833 */ 834dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, 835 unsigned long offset, size_t size, 836 enum dma_data_direction dir, 837 unsigned long attrs) 838{ 839 phys_addr_t map, phys = page_to_phys(page) + offset; 840 dma_addr_t dev_addr = phys_to_dma(dev, phys); 841 842 BUG_ON(dir == DMA_NONE); 843 /* 844 * If the address happens to be in the device's DMA window, 845 * we can safely return the device addr and not worry about bounce 846 * buffering it. 847 */ 848 if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE) 849 return dev_addr; 850 851 trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); 852 853 /* Oh well, have to allocate and map a bounce buffer. */ 854 map = map_single(dev, phys, size, dir, attrs); 855 if (map == SWIOTLB_MAP_ERROR) { 856 swiotlb_full(dev, size, dir, 1); 857 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 858 } 859 860 dev_addr = swiotlb_phys_to_dma(dev, map); 861 862 /* Ensure that the address returned is DMA'ble */ 863 if (dma_capable(dev, dev_addr, size)) 864 return dev_addr; 865 866 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 867 swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); 868 869 return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer); 870} 871EXPORT_SYMBOL_GPL(swiotlb_map_page); 872 873/* 874 * Unmap a single streaming mode DMA translation. The dma_addr and size must 875 * match what was provided for in a previous swiotlb_map_page call. All 876 * other usages are undefined. 877 * 878 * After this call, reads by the cpu to the buffer are guaranteed to see 879 * whatever the device wrote there. 880 */ 881static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 882 size_t size, enum dma_data_direction dir, 883 unsigned long attrs) 884{ 885 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 886 887 BUG_ON(dir == DMA_NONE); 888 889 if (is_swiotlb_buffer(paddr)) { 890 swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs); 891 return; 892 } 893 894 if (dir != DMA_FROM_DEVICE) 895 return; 896 897 /* 898 * phys_to_virt doesn't work with hihgmem page but we could 899 * call dma_mark_clean() with hihgmem page here. However, we 900 * are fine since dma_mark_clean() is null on POWERPC. We can 901 * make dma_mark_clean() take a physical address if necessary. 902 */ 903 dma_mark_clean(phys_to_virt(paddr), size); 904} 905 906void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, 907 size_t size, enum dma_data_direction dir, 908 unsigned long attrs) 909{ 910 unmap_single(hwdev, dev_addr, size, dir, attrs); 911} 912EXPORT_SYMBOL_GPL(swiotlb_unmap_page); 913 914/* 915 * Make physical memory consistent for a single streaming mode DMA translation 916 * after a transfer. 917 * 918 * If you perform a swiotlb_map_page() but wish to interrogate the buffer 919 * using the cpu, yet do not wish to teardown the dma mapping, you must 920 * call this function before doing so. At the next point you give the dma 921 * address back to the card, you must first perform a 922 * swiotlb_dma_sync_for_device, and then the device again owns the buffer 923 */ 924static void 925swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 926 size_t size, enum dma_data_direction dir, 927 enum dma_sync_target target) 928{ 929 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 930 931 BUG_ON(dir == DMA_NONE); 932 933 if (is_swiotlb_buffer(paddr)) { 934 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 935 return; 936 } 937 938 if (dir != DMA_FROM_DEVICE) 939 return; 940 941 dma_mark_clean(phys_to_virt(paddr), size); 942} 943 944void 945swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, 946 size_t size, enum dma_data_direction dir) 947{ 948 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); 949} 950EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); 951 952void 953swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, 954 size_t size, enum dma_data_direction dir) 955{ 956 swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); 957} 958EXPORT_SYMBOL(swiotlb_sync_single_for_device); 959 960/* 961 * Map a set of buffers described by scatterlist in streaming mode for DMA. 962 * This is the scatter-gather version of the above swiotlb_map_page 963 * interface. Here the scatter gather list elements are each tagged with the 964 * appropriate dma address and length. They are obtained via 965 * sg_dma_{address,length}(SG). 966 * 967 * NOTE: An implementation may be able to use a smaller number of 968 * DMA address/length pairs than there are SG table elements. 969 * (for example via virtual mapping capabilities) 970 * The routine returns the number of addr/length pairs actually 971 * used, at most nents. 972 * 973 * Device ownership issues as mentioned above for swiotlb_map_page are the 974 * same here. 975 */ 976int 977swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, 978 enum dma_data_direction dir, unsigned long attrs) 979{ 980 struct scatterlist *sg; 981 int i; 982 983 BUG_ON(dir == DMA_NONE); 984 985 for_each_sg(sgl, sg, nelems, i) { 986 phys_addr_t paddr = sg_phys(sg); 987 dma_addr_t dev_addr = phys_to_dma(hwdev, paddr); 988 989 if (swiotlb_force == SWIOTLB_FORCE || 990 !dma_capable(hwdev, dev_addr, sg->length)) { 991 phys_addr_t map = map_single(hwdev, sg_phys(sg), 992 sg->length, dir, attrs); 993 if (map == SWIOTLB_MAP_ERROR) { 994 /* Don't panic here, we expect map_sg users 995 to do proper error handling. */ 996 swiotlb_full(hwdev, sg->length, dir, 0); 997 attrs |= DMA_ATTR_SKIP_CPU_SYNC; 998 swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, 999 attrs); 1000 sg_dma_len(sgl) = 0; 1001 return 0; 1002 } 1003 sg->dma_address = swiotlb_phys_to_dma(hwdev, map); 1004 } else 1005 sg->dma_address = dev_addr; 1006 sg_dma_len(sg) = sg->length; 1007 } 1008 return nelems; 1009} 1010EXPORT_SYMBOL(swiotlb_map_sg_attrs); 1011 1012/* 1013 * Unmap a set of streaming mode DMA translations. Again, cpu read rules 1014 * concerning calls here are the same as for swiotlb_unmap_page() above. 1015 */ 1016void 1017swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, 1018 int nelems, enum dma_data_direction dir, 1019 unsigned long attrs) 1020{ 1021 struct scatterlist *sg; 1022 int i; 1023 1024 BUG_ON(dir == DMA_NONE); 1025 1026 for_each_sg(sgl, sg, nelems, i) 1027 unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, 1028 attrs); 1029} 1030EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); 1031 1032/* 1033 * Make physical memory consistent for a set of streaming mode DMA translations 1034 * after a transfer. 1035 * 1036 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules 1037 * and usage. 1038 */ 1039static void 1040swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 1041 int nelems, enum dma_data_direction dir, 1042 enum dma_sync_target target) 1043{ 1044 struct scatterlist *sg; 1045 int i; 1046 1047 for_each_sg(sgl, sg, nelems, i) 1048 swiotlb_sync_single(hwdev, sg->dma_address, 1049 sg_dma_len(sg), dir, target); 1050} 1051 1052void 1053swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, 1054 int nelems, enum dma_data_direction dir) 1055{ 1056 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); 1057} 1058EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 1059 1060void 1061swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, 1062 int nelems, enum dma_data_direction dir) 1063{ 1064 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 1065} 1066EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 1067 1068int 1069swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) 1070{ 1071 return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer)); 1072} 1073EXPORT_SYMBOL(swiotlb_dma_mapping_error); 1074 1075/* 1076 * Return whether the given device DMA address mask can be supported 1077 * properly. For example, if your device can only drive the low 24-bits 1078 * during bus mastering, then you would pass 0x00ffffff as the mask to 1079 * this function. 1080 */ 1081int 1082swiotlb_dma_supported(struct device *hwdev, u64 mask) 1083{ 1084 return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask; 1085} 1086EXPORT_SYMBOL(swiotlb_dma_supported);