Merge branch 'stable/swiotlb-0.8.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6

* 'stable/swiotlb-0.8.3' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb-2.6:
swiotlb: Make swiotlb bookkeeping functions visible in the header file.
swiotlb: search and replace "int dir" with "enum dma_data_direction dir"
swiotlb: Make internal bookkeeping functions have 'swiotlb_tbl' prefix.
swiotlb: add the swiotlb initialization function with iotlb memory
swiotlb: add swiotlb_tbl_map_single library function

+106 -58
+25 -2
include/linux/swiotlb.h
··· 23 23 #define IO_TLB_SHIFT 11 24 24 25 25 extern void swiotlb_init(int verbose); 26 + extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 27 + 28 + /* 29 + * Enumeration for sync targets 30 + */ 31 + enum dma_sync_target { 32 + SYNC_FOR_CPU = 0, 33 + SYNC_FOR_DEVICE = 1, 34 + }; 35 + extern void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 36 + phys_addr_t phys, size_t size, 37 + enum dma_data_direction dir); 38 + 39 + extern void swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, 40 + size_t size, enum dma_data_direction dir); 41 + 42 + extern void swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, 43 + size_t size, enum dma_data_direction dir, 44 + enum dma_sync_target target); 45 + 46 + /* Accessory functions. */ 47 + extern void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 48 + enum dma_data_direction dir); 26 49 27 50 extern void 28 51 *swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 65 42 66 43 extern int 67 44 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, 68 - int direction); 45 + enum dma_data_direction dir); 69 46 70 47 extern void 71 48 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, 72 - int direction); 49 + enum dma_data_direction dir); 73 50 74 51 extern int 75 52 swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+81 -56
lib/swiotlb.c
··· 50 50 */ 51 51 #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 52 52 53 - /* 54 - * Enumeration for sync targets 55 - */ 56 - enum dma_sync_target { 57 - SYNC_FOR_CPU = 0, 58 - SYNC_FOR_DEVICE = 1, 59 - }; 60 - 61 53 int swiotlb_force; 62 54 63 55 /* 64 - * Used to do a quick range check in unmap_single and 65 - * sync_single_*, to see if the memory was in fact allocated by this 56 + * Used to do a quick range check in swiotlb_tbl_unmap_single and 57 + * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this 66 58 * API. 67 59 */ 68 60 static char *io_tlb_start, *io_tlb_end; ··· 132 140 (unsigned long long)pend); 133 141 } 134 142 135 - /* 136 - * Statically reserve bounce buffer space and initialize bounce buffer data 137 - * structures for the software IO TLB used to implement the DMA API. 138 - */ 139 - void __init 140 - swiotlb_init_with_default_size(size_t default_size, int verbose) 143 + void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 141 144 { 142 145 unsigned long i, bytes; 143 146 144 - if (!io_tlb_nslabs) { 145 - io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 146 - io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 147 - } 147 + bytes = nslabs << IO_TLB_SHIFT; 148 148 149 - bytes = io_tlb_nslabs << IO_TLB_SHIFT; 150 - 151 - /* 152 - * Get IO TLB memory from the low pages 153 - */ 154 - io_tlb_start = alloc_bootmem_low_pages(bytes); 155 - if (!io_tlb_start) 156 - panic("Cannot allocate SWIOTLB buffer"); 149 + io_tlb_nslabs = nslabs; 150 + io_tlb_start = tlb; 157 151 io_tlb_end = io_tlb_start + bytes; 158 152 159 153 /* ··· 161 183 panic("Cannot allocate SWIOTLB overflow buffer!\n"); 162 184 if (verbose) 163 185 swiotlb_print_info(); 186 + } 187 + 188 + /* 189 + * Statically reserve bounce buffer space and initialize bounce buffer data 190 + * structures for the software IO TLB used to implement the DMA API. 191 + */ 192 + void __init 193 + swiotlb_init_with_default_size(size_t default_size, int verbose) 194 + { 195 + unsigned long bytes; 196 + 197 + if (!io_tlb_nslabs) { 198 + io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 199 + io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 200 + } 201 + 202 + bytes = io_tlb_nslabs << IO_TLB_SHIFT; 203 + 204 + /* 205 + * Get IO TLB memory from the low pages 206 + */ 207 + io_tlb_start = alloc_bootmem_low_pages(bytes); 208 + if (!io_tlb_start) 209 + panic("Cannot allocate SWIOTLB buffer"); 210 + 211 + swiotlb_init_with_tbl(io_tlb_start, io_tlb_nslabs, verbose); 164 212 } 165 213 166 214 void __init ··· 327 323 /* 328 324 * Bounce: copy the swiotlb buffer back to the original dma location 329 325 */ 330 - static void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 331 - enum dma_data_direction dir) 326 + void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size, 327 + enum dma_data_direction dir) 332 328 { 333 329 unsigned long pfn = PFN_DOWN(phys); 334 330 ··· 364 360 memcpy(phys_to_virt(phys), dma_addr, size); 365 361 } 366 362 } 363 + EXPORT_SYMBOL_GPL(swiotlb_bounce); 367 364 368 - /* 369 - * Allocates bounce buffer and returns its kernel virtual address. 370 - */ 371 - static void * 372 - map_single(struct device *hwdev, phys_addr_t phys, size_t size, int dir) 365 + void *swiotlb_tbl_map_single(struct device *hwdev, dma_addr_t tbl_dma_addr, 366 + phys_addr_t phys, size_t size, 367 + enum dma_data_direction dir) 373 368 { 374 369 unsigned long flags; 375 370 char *dma_addr; 376 371 unsigned int nslots, stride, index, wrap; 377 372 int i; 378 - unsigned long start_dma_addr; 379 373 unsigned long mask; 380 374 unsigned long offset_slots; 381 375 unsigned long max_slots; 382 376 383 377 mask = dma_get_seg_boundary(hwdev); 384 - start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start) & mask; 385 378 386 - offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 379 + tbl_dma_addr &= mask; 380 + 381 + offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 387 382 388 383 /* 389 384 * Carefully handle integer overflow which can occur when mask == ~0UL. ··· 469 466 470 467 return dma_addr; 471 468 } 469 + EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single); 470 + 471 + /* 472 + * Allocates bounce buffer and returns its kernel virtual address. 473 + */ 474 + 475 + static void * 476 + map_single(struct device *hwdev, phys_addr_t phys, size_t size, 477 + enum dma_data_direction dir) 478 + { 479 + dma_addr_t start_dma_addr = swiotlb_virt_to_bus(hwdev, io_tlb_start); 480 + 481 + return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size, dir); 482 + } 472 483 473 484 /* 474 485 * dma_addr is the kernel virtual address of the bounce buffer to unmap. 475 486 */ 476 - static void 477 - do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) 487 + void 488 + swiotlb_tbl_unmap_single(struct device *hwdev, char *dma_addr, size_t size, 489 + enum dma_data_direction dir) 478 490 { 479 491 unsigned long flags; 480 492 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; ··· 527 509 } 528 510 spin_unlock_irqrestore(&io_tlb_lock, flags); 529 511 } 512 + EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single); 530 513 531 - static void 532 - sync_single(struct device *hwdev, char *dma_addr, size_t size, 533 - int dir, int target) 514 + void 515 + swiotlb_tbl_sync_single(struct device *hwdev, char *dma_addr, size_t size, 516 + enum dma_data_direction dir, 517 + enum dma_sync_target target) 534 518 { 535 519 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 536 520 phys_addr_t phys = io_tlb_orig_addr[index]; ··· 556 536 BUG(); 557 537 } 558 538 } 539 + EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single); 559 540 560 541 void * 561 542 swiotlb_alloc_coherent(struct device *hwdev, size_t size, ··· 580 559 } 581 560 if (!ret) { 582 561 /* 583 - * We are either out of memory or the device can't DMA 584 - * to GFP_DMA memory; fall back on map_single(), which 562 + * We are either out of memory or the device can't DMA to 563 + * GFP_DMA memory; fall back on map_single(), which 585 564 * will grab memory from the lowest available address range. 586 565 */ 587 566 ret = map_single(hwdev, 0, size, DMA_FROM_DEVICE); ··· 599 578 (unsigned long long)dev_addr); 600 579 601 580 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 602 - do_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 581 + swiotlb_tbl_unmap_single(hwdev, ret, size, DMA_TO_DEVICE); 603 582 return NULL; 604 583 } 605 584 *dma_handle = dev_addr; ··· 617 596 if (!is_swiotlb_buffer(paddr)) 618 597 free_pages((unsigned long)vaddr, get_order(size)); 619 598 else 620 - /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 621 - do_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 599 + /* DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single */ 600 + swiotlb_tbl_unmap_single(hwdev, vaddr, size, DMA_TO_DEVICE); 622 601 } 623 602 EXPORT_SYMBOL(swiotlb_free_coherent); 624 603 625 604 static void 626 - swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 605 + swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir, 606 + int do_panic) 627 607 { 628 608 /* 629 609 * Ran out of IOMMU space for this operation. This is very bad. ··· 702 680 * whatever the device wrote there. 703 681 */ 704 682 static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, 705 - size_t size, int dir) 683 + size_t size, enum dma_data_direction dir) 706 684 { 707 685 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 708 686 709 687 BUG_ON(dir == DMA_NONE); 710 688 711 689 if (is_swiotlb_buffer(paddr)) { 712 - do_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 690 + swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir); 713 691 return; 714 692 } 715 693 ··· 745 723 */ 746 724 static void 747 725 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 748 - size_t size, int dir, int target) 726 + size_t size, enum dma_data_direction dir, 727 + enum dma_sync_target target) 749 728 { 750 729 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr); 751 730 752 731 BUG_ON(dir == DMA_NONE); 753 732 754 733 if (is_swiotlb_buffer(paddr)) { 755 - sync_single(hwdev, phys_to_virt(paddr), size, dir, target); 734 + swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir, 735 + target); 756 736 return; 757 737 } 758 738 ··· 833 809 834 810 int 835 811 swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 836 - int dir) 812 + enum dma_data_direction dir) 837 813 { 838 814 return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); 839 815 } ··· 860 836 861 837 void 862 838 swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, 863 - int dir) 839 + enum dma_data_direction dir) 864 840 { 865 841 return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); 866 842 } ··· 875 851 */ 876 852 static void 877 853 swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, 878 - int nelems, int dir, int target) 854 + int nelems, enum dma_data_direction dir, 855 + enum dma_sync_target target) 879 856 { 880 857 struct scatterlist *sg; 881 858 int i;