Merge branch 'stable/swiotlb-0.8.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6

* 'stable/swiotlb-0.8.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6:
swiotlb: Make swiotlb bookkeeping functions visible in the header file.
swiotlb: search and replace "int dir" with "enum dma_data_direction dir"
swiotlb: Make internal bookkeeping functions have 'swiotlb_tbl' prefix.
swiotlb: add the swiotlb initialization function with iotlb memory
swiotlb: add swiotlb_tbl_map_single library function

+106 -58
+25 -2
include/linux/swiotlb.h
··· 23 #define IO_TLB_SHIFT 11 24 25 extern void swiotlb_init(int verbose); 26 27 extern void 28 *swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 65 66 extern int 67 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, 68 - int direction); 69 70 extern void 71 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 72 - int direction); 73 74 extern int 75 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
··· 23 #define IO_TLB_SHIFT 11 24 25 extern void swiotlb_init(int verbose); 26 + extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 27 + 28 + /* 29 + * Enumeration for sync targets 30 + */ 31 + enum dma_sync_target { 32 + SYNC_FOR_CPU = 0, 33 + SYNC_FOR_DEVICE = 1, 34 + }; 35 + extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 36 + phys_addr_t phys, size_t size, 37 + enum dma_data_direction dir); 38 + 39 + extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, 40 + size_t size, enum dma_data_direction dir); 41 + 42 + extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, 43 + size_t size, enum dma_data_direction dir, 44 + enum dma_sync_target target); 45 + 46 + /* Accessory functions. */ 47 + extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 48 + enum dma_data_direction dir); 49 50 extern void 51 *swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 42 43 extern int 44 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, 45 + enum dma_data_direction dir); 46 47 extern void 48 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 49 + enum dma_data_direction dir); 50 51 extern int 52 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+81 -56
lib/swiotlb.c
··· 50 */ 51 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 52 53 - /* 54 - * Enumeration for sync targets 55 - */ 56 - enum dma_sync_target { 57 - SYNC_FOR_CPU = 0, 58 - SYNC_FOR_DEVICE = 1, 59 - }; 60 - 61 int swiotlb_force; 62 63 /* 64 - * Used to do a quick range check in unmap_single and 65 - * sync_single_*, to see if the memory was in fact allocated by this 66 * API. 67 */ 68 static char *io_tlb_start, *io_tlb_end; ··· 132 (unsigned long long)pend); 133 } 134 135 - /* 136 - * Statically reserve bounce buffer space and initialize bounce buffer data 137 - * structures for the software IO TLB used to implement the DMA API. 138 - */ 139 - void __init 140 - swiotlb_init_with_default_size(size_t default_size, int verbose) 141 { 142 unsigned long i, bytes; 143 144 - if (!io_tlb_nslabs) { 145 - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 146 - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 147 - } 148 149 - bytes = io_tlb_nslabs << IO_TLB_SHIFT; 150 - 151 - /* 152 - * Get IO TLB memory from the low pages 153 - */ 154 - io_tlb_start = alloc_bootmem_low_pages(bytes); 155 - if (!io_tlb_start) 156 - panic("Cannot allocate SWIOTLB buffer"); 157 io_tlb_end = io_tlb_start + bytes; 158 159 /* ··· 161 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 162 if (verbose) 163 swiotlb_print_info(); 164 } 165 166 void __init ··· 327 /* 328 * Bounce: copy the swiotlb buffer back to the original dma location 329 */ 330 - static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 331 - enum dma_data_direction dir) 332 { 333 unsigned long pfn = PFN_DOWN(phys); 334 ··· 364 memcpy(phys_to_virt(phys), dma_addr, size); 365 } 366 } 367 368 - /* 369 - * Allocates bounce buffer and returns its kernel virtual address. 370 - */ 371 - static void * 372 - map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) 373 { 374 unsigned long flags; 375 char *dma_addr; 376 unsigned int nslots, stride, index, wrap; 377 int i; 378 - unsigned long start_dma_addr; 379 unsigned long mask; 380 unsigned long offset_slots; 381 unsigned long max_slots; 382 383 mask = dma_get_seg_boundary(hwdev); 384 - start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; 385 386 - offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 387 388 /* 389 * Carefully handle integer overflow which can occur when mask == ~0UL. ··· 469 470 return dma_addr; 471 } 472 473 /* 474 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 475 */ 476 - static void 477 - do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 478 { 479 unsigned long flags; 480 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; ··· 527 } 528 spin_unlock_irqrestore(&io_tlb_lock, flags); 529 } 530 531 - static void 532 - sync_single(struct device *hwdev, char *dma_addr, size_t size, 533 - int dir, int target) 534 { 535 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 536 phys_addr_t phys = io_tlb_orig_addr[index]; ··· 556 BUG(); 557 } 558 } 559 560 void * 561 swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 580 } 581 if (!ret) { 582 /* 583 - * We are either out of memory or the device can't DMA 584 - * to GFP_DMA memory; fall back on map_single(), which 585 * will grab memory from the lowest available address range. 586 */ 587 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); ··· 599 (unsigned long long)dev_addr); 600 601 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 602 - do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 603 return NULL; 604 } 605 *dma_handle = dev_addr; ··· 617 if (!is_swiotlb_buffer(paddr)) 618 free_pages((unsigned long)vaddr, get_order(size)); 619 else 620 - /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 621 - do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 622 } 623 EXPORT_SYMBOL(swiotlb_free_coherent); 624 625 static void 626 - swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 627 { 628 /* 629 * Ran out of IOMMU space for this operation. This is very bad. ··· 702 * whatever the device wrote there. 703 */ 704 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 705 - size_t size, int dir) 706 { 707 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 708 709 BUG_ON(dir == DMA_NONE); 710 711 if (is_swiotlb_buffer(paddr)) { 712 - do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 713 return; 714 } 715 ··· 745 */ 746 static void 747 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 748 - size_t size, int dir, int target) 749 { 750 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 752 BUG_ON(dir == DMA_NONE); 753 754 if (is_swiotlb_buffer(paddr)) { 755 - sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 756 return; 757 } 758 ··· 833 834 int 835 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 836 - int dir) 837 { 838 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 839 } ··· 860 861 void 862 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 863 - int dir) 864 { 865 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 866 } ··· 875 */ 876 static void 877 swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 878 - int nelems, int dir, int target) 879 { 880 struct scatterlist *sg; 881 int i;
··· 50 */ 51 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 52 53 int swiotlb_force; 54 55 /* 56 + * Used to do a quick range check in swiotlb_tbl_unmap_single and 57 + * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 58 * API. 59 */ 60 static char *io_tlb_start, *io_tlb_end; ··· 140 (unsigned long long)pend); 141 } 142 143 + void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 144 { 145 unsigned long i, bytes; 146 147 + bytes = nslabs << IO_TLB_SHIFT; 148 149 + io_tlb_nslabs = nslabs; 150 + io_tlb_start = tlb; 151 io_tlb_end = io_tlb_start + bytes; 152 153 /* ··· 183 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 184 if (verbose) 185 swiotlb_print_info(); 186 + } 187 + 188 + /* 189 + * Statically reserve bounce buffer space and initialize bounce buffer data 190 + * structures for the software IO TLB used to implement the DMA API. 191 + */ 192 + void __init 193 + swiotlb_init_with_default_size(size_t default_size, int verbose) 194 + { 195 + unsigned long bytes; 196 + 197 + if (!io_tlb_nslabs) { 198 + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 199 + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 200 + } 201 + 202 + bytes = io_tlb_nslabs << IO_TLB_SHIFT; 203 + 204 + /* 205 + * Get IO TLB memory from the low pages 206 + */ 207 + io_tlb_start = alloc_bootmem_low_pages(bytes); 208 + if (!io_tlb_start) 209 + panic("Cannot allocate SWIOTLB buffer"); 210 + 211 + swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); 212 } 213 214 void __init ··· 323 /* 324 * Bounce: copy the swiotlb buffer back to the original dma location 325 */ 326 + void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 327 + enum dma_data_direction dir) 328 { 329 unsigned long pfn = PFN_DOWN(phys); 330 ··· 360 memcpy(phys_to_virt(phys), dma_addr, size); 361 } 362 } 363 + EXPORT_SYMBOL_GPL(swiotlb_bounce); 364 365 + void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 366 + phys_addr_t phys, size_t size, 367 + enum dma_data_direction dir) 368 { 369 unsigned long flags; 370 char *dma_addr; 371 unsigned int nslots, stride, index, wrap; 372 int i; 373 unsigned long mask; 374 unsigned long offset_slots; 375 unsigned long max_slots; 376 377 mask = dma_get_seg_boundary(hwdev); 378 379 + tbl_dma_addr &= mask; 380 + 381 + offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 382 383 /* 384 * Carefully handle integer overflow which can occur when mask == ~0UL. ··· 466 467 return dma_addr; 468 } 469 + EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 470 + 471 + /* 472 + * Allocates bounce buffer and returns its kernel virtual address. 473 + */ 474 + 475 + static void * 476 + map_single(struct device *hwdev, phys_addr_t phys, size_t size, 477 + enum dma_data_direction dir) 478 + { 479 + dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); 480 + 481 + return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); 482 + } 483 484 /* 485 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 486 */ 487 + void 488 + swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, 489 + enum dma_data_direction dir) 490 { 491 unsigned long flags; 492 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; ··· 509 } 510 spin_unlock_irqrestore(&io_tlb_lock, flags); 511 } 512 + EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); 513 514 + void 515 + swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, 516 + enum dma_data_direction dir, 517 + enum dma_sync_target target) 518 { 519 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 520 phys_addr_t phys = io_tlb_orig_addr[index]; ··· 536 BUG(); 537 } 538 } 539 + EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); 540 541 void * 542 swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 559 } 560 if (!ret) { 561 /* 562 + * We are either out of memory or the device can't DMA to 563 + * GFP_DMA memory; fall back on map_single(), which 564 * will grab memory from the lowest available address range. 565 */ 566 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); ··· 578 (unsigned long long)dev_addr); 579 580 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 581 + swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 582 return NULL; 583 } 584 *dma_handle = dev_addr; ··· 596 if (!is_swiotlb_buffer(paddr)) 597 free_pages((unsigned long)vaddr, get_order(size)); 598 else 599 + /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ 600 + swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 601 } 602 EXPORT_SYMBOL(swiotlb_free_coherent); 603 604 static void 605 + swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 606 + int do_panic) 607 { 608 /* 609 * Ran out of IOMMU space for this operation. This is very bad. ··· 680 * whatever the device wrote there. 681 */ 682 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 683 + size_t size, enum dma_data_direction dir) 684 { 685 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 686 687 BUG_ON(dir == DMA_NONE); 688 689 if (is_swiotlb_buffer(paddr)) { 690 + swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 691 return; 692 } 693 ··· 723 */ 724 static void 725 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 726 + size_t size, enum dma_data_direction dir, 727 + enum dma_sync_target target) 728 { 729 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 730 731 BUG_ON(dir == DMA_NONE); 732 733 if (is_swiotlb_buffer(paddr)) { 734 + swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, 735 + target); 736 return; 737 } 738 ··· 809 810 int 811 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 812 + enum dma_data_direction dir) 813 { 814 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 815 } ··· 836 837 void 838 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 839 + enum dma_data_direction dir) 840 { 841 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 842 } ··· 851 */ 852 static void 853 swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 854 + int nelems, enum dma_data_direction dir, 855 + enum dma_sync_target target) 856 { 857 struct scatterlist *sg; 858 int i;