Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

- fix debugfs initialization order (Anthony Iliopoulos)

- use memory_intersects() directly (Kefeng Wang)

- allow to return specific errors from ->map_sg (Logan Gunthorpe,
Martin Oliveira)

- turn the dma_map_sg return value into an unsigned int (me)

- provide a common global coherent pool іmplementation (me)

* tag 'dma-mapping-5.15' of git://git.infradead.org/users/hch/dma-mapping: (31 commits)
hexagon: use the generic global coherent pool
dma-mapping: make the global coherent pool conditional
dma-mapping: add a dma_init_global_coherent helper
dma-mapping: simplify dma_init_coherent_memory
dma-mapping: allow using the global coherent pool for !ARM
ARM/nommu: use the generic dma-direct code for non-coherent devices
dma-direct: add support for dma_coherent_default_memory
dma-mapping: return an unsigned int from dma_map_sg{,_attrs}
dma-mapping: disallow .map_sg operations from returning zero on error
dma-mapping: return error code from dma_dummy_map_sg()
x86/amd_gart: don't set failed sg dma_address to DMA_MAPPING_ERROR
x86/amd_gart: return error code from gart_map_sg()
xen: swiotlb: return error code from xen_swiotlb_map_sg()
parisc: return error code from .map_sg() ops
sparc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR
sparc/iommu: return error codes from .map_sg() ops
s390/pci: don't set failed sg dma_address to DMA_MAPPING_ERROR
s390/pci: return error code from s390_dma_map_sg()
powerpc/iommu: don't set failed sg dma_address to DMA_MAPPING_ERROR
powerpc/iommu: return error code from .map_sg() ops
...

+309 -442
+7 -3
arch/alpha/kernel/pci_iommu.c
··· 649 649 sg->dma_address 650 650 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg), 651 651 sg->length, dac_allowed); 652 - return sg->dma_address != DMA_MAPPING_ERROR; 652 + if (sg->dma_address == DMA_MAPPING_ERROR) 653 + return -EIO; 654 + return 1; 653 655 } 654 656 655 657 start = sg; ··· 687 685 if (out < end) 688 686 out->dma_length = 0; 689 687 690 - if (out - start == 0) 688 + if (out - start == 0) { 691 689 printk(KERN_WARNING "pci_map_sg failed: no entries?\n"); 690 + return -ENOMEM; 691 + } 692 692 DBGA("pci_map_sg: %ld entries\n", out - start); 693 693 694 694 return out - start; ··· 703 699 entries. Unmap them now. */ 704 700 if (out > start) 705 701 pci_unmap_sg(pdev, start, out - start, dir); 706 - return 0; 702 + return -ENOMEM; 707 703 } 708 704 709 705 /* Unmap a set of streaming mode DMA translations. Again, cpu read
+3 -2
arch/arm/Kconfig
··· 18 18 select ARCH_HAS_SET_MEMORY 19 19 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 20 20 select ARCH_HAS_STRICT_MODULE_RWX if MMU 21 - select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB 22 - select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB 21 + select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU 22 + select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU 23 23 select ARCH_HAS_TEARDOWN_DMA_OPS if MMU 24 24 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 25 25 select ARCH_HAVE_CUSTOM_GPIO_H ··· 44 44 select CPU_PM if SUSPEND || CPU_IDLE 45 45 select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS 46 46 select DMA_DECLARE_COHERENT 47 + select DMA_GLOBAL_POOL if !MMU 47 48 select DMA_OPS 48 49 select DMA_REMAP if MMU 49 50 select EDAC_SUPPORT
+6 -167
arch/arm/mm/dma-mapping-nommu.c
··· 5 5 * Copyright (C) 2000-2004 Russell King 6 6 */ 7 7 8 - #include <linux/export.h> 9 - #include <linux/mm.h> 10 - #include <linux/dma-direct.h> 11 8 #include <linux/dma-map-ops.h> 12 - #include <linux/scatterlist.h> 13 - 14 9 #include <asm/cachetype.h> 15 10 #include <asm/cacheflush.h> 16 11 #include <asm/outercache.h> ··· 13 18 14 19 #include "dma.h" 15 20 16 - /* 17 - * The generic direct mapping code is used if 18 - * - MMU/MPU is off 19 - * - cpu is v7m w/o cache support 20 - * - device is coherent 21 - * otherwise arm_nommu_dma_ops is used. 22 - * 23 - * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to 24 - * [1] on how to declare such memory). 25 - * 26 - * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt 27 - */ 28 - 29 - static void *arm_nommu_dma_alloc(struct device *dev, size_t size, 30 - dma_addr_t *dma_handle, gfp_t gfp, 31 - unsigned long attrs) 32 - 33 - { 34 - void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle); 35 - 36 - /* 37 - * dma_alloc_from_global_coherent() may fail because: 38 - * 39 - * - no consistent DMA region has been defined, so we can't 40 - * continue. 41 - * - there is no space left in consistent DMA region, so we 42 - * only can fallback to generic allocator if we are 43 - * advertised that consistency is not required. 44 - */ 45 - 46 - WARN_ON_ONCE(ret == NULL); 47 - return ret; 48 - } 49 - 50 - static void arm_nommu_dma_free(struct device *dev, size_t size, 51 - void *cpu_addr, dma_addr_t dma_addr, 52 - unsigned long attrs) 53 - { 54 - int ret = dma_release_from_global_coherent(get_order(size), cpu_addr); 55 - 56 - WARN_ON_ONCE(ret == 0); 57 - } 58 - 59 - static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, 60 - void *cpu_addr, dma_addr_t dma_addr, size_t size, 61 - unsigned long attrs) 62 - { 63 - int ret; 64 - 65 - if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 66 - return ret; 67 - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 68 - return ret; 69 - return -ENXIO; 70 - } 71 - 72 - 73 - static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, 74 - enum dma_data_direction dir) 21 + void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 22 + enum dma_data_direction dir) 75 23 { 76 24 dmac_map_area(__va(paddr), size, dir); 77 25 ··· 24 86 outer_clean_range(paddr, paddr + size); 25 87 } 26 88 27 - static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, 28 - enum dma_data_direction dir) 89 + void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, 90 + enum dma_data_direction dir) 29 91 { 30 92 if (dir != DMA_TO_DEVICE) { 31 93 outer_inv_range(paddr, paddr + size); 32 94 dmac_unmap_area(__va(paddr), size, dir); 33 95 } 34 96 } 35 - 36 - static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, 37 - unsigned long offset, size_t size, 38 - enum dma_data_direction dir, 39 - unsigned long attrs) 40 - { 41 - dma_addr_t handle = page_to_phys(page) + offset; 42 - 43 - __dma_page_cpu_to_dev(handle, size, dir); 44 - 45 - return handle; 46 - } 47 - 48 - static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, 49 - size_t size, enum dma_data_direction dir, 50 - unsigned long attrs) 51 - { 52 - __dma_page_dev_to_cpu(handle, size, dir); 53 - } 54 - 55 - 56 - static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, 57 - int nents, enum dma_data_direction dir, 58 - unsigned long attrs) 59 - { 60 - int i; 61 - struct scatterlist *sg; 62 - 63 - for_each_sg(sgl, sg, nents, i) { 64 - sg_dma_address(sg) = sg_phys(sg); 65 - sg_dma_len(sg) = sg->length; 66 - __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); 67 - } 68 - 69 - return nents; 70 - } 71 - 72 - static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, 73 - int nents, enum dma_data_direction dir, 74 - unsigned long attrs) 75 - { 76 - struct scatterlist *sg; 77 - int i; 78 - 79 - for_each_sg(sgl, sg, nents, i) 80 - __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); 81 - } 82 - 83 - static void arm_nommu_dma_sync_single_for_device(struct device *dev, 84 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 85 - { 86 - __dma_page_cpu_to_dev(handle, size, dir); 87 - } 88 - 89 - static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, 90 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 91 - { 92 - __dma_page_cpu_to_dev(handle, size, dir); 93 - } 94 - 95 - static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, 96 - int nents, enum dma_data_direction dir) 97 - { 98 - struct scatterlist *sg; 99 - int i; 100 - 101 - for_each_sg(sgl, sg, nents, i) 102 - __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); 103 - } 104 - 105 - static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, 106 - int nents, enum dma_data_direction dir) 107 - { 108 - struct scatterlist *sg; 109 - int i; 110 - 111 - for_each_sg(sgl, sg, nents, i) 112 - __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); 113 - } 114 - 115 - const struct dma_map_ops arm_nommu_dma_ops = { 116 - .alloc = arm_nommu_dma_alloc, 117 - .free = arm_nommu_dma_free, 118 - .alloc_pages = dma_direct_alloc_pages, 119 - .free_pages = dma_direct_free_pages, 120 - .mmap = arm_nommu_dma_mmap, 121 - .map_page = arm_nommu_dma_map_page, 122 - .unmap_page = arm_nommu_dma_unmap_page, 123 - .map_sg = arm_nommu_dma_map_sg, 124 - .unmap_sg = arm_nommu_dma_unmap_sg, 125 - .sync_single_for_device = arm_nommu_dma_sync_single_for_device, 126 - .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, 127 - .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, 128 - .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, 129 - }; 130 - EXPORT_SYMBOL(arm_nommu_dma_ops); 131 97 132 98 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 133 99 const struct iommu_ops *iommu, bool coherent) ··· 43 201 * enough to check if MPU is in use or not since in absense of 44 202 * MPU system memory map is used. 45 203 */ 46 - dev->archdata.dma_coherent = (cacheid) ? coherent : true; 204 + dev->dma_coherent = cacheid ? coherent : true; 47 205 } else { 48 206 /* 49 207 * Assume coherent DMA in case MMU/MPU has not been set up. 50 208 */ 51 - dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; 209 + dev->dma_coherent = (get_cr() & CR_M) ? coherent : true; 52 210 } 53 - 54 - if (!dev->archdata.dma_coherent) 55 - set_dma_ops(dev, &arm_nommu_dma_ops); 56 211 }
+16 -10
arch/arm/mm/dma-mapping.c
··· 980 980 { 981 981 const struct dma_map_ops *ops = get_dma_ops(dev); 982 982 struct scatterlist *s; 983 - int i, j; 983 + int i, j, ret; 984 984 985 985 for_each_sg(sg, s, nents, i) { 986 986 #ifdef CONFIG_NEED_SG_DMA_LENGTH ··· 988 988 #endif 989 989 s->dma_address = ops->map_page(dev, sg_page(s), s->offset, 990 990 s->length, dir, attrs); 991 - if (dma_mapping_error(dev, s->dma_address)) 991 + if (dma_mapping_error(dev, s->dma_address)) { 992 + ret = -EIO; 992 993 goto bad_mapping; 994 + } 993 995 } 994 996 return nents; 995 997 996 998 bad_mapping: 997 999 for_each_sg(sg, s, i, j) 998 1000 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); 999 - return 0; 1001 + return ret; 1000 1002 } 1001 1003 1002 1004 /** ··· 1624 1622 bool is_coherent) 1625 1623 { 1626 1624 struct scatterlist *s = sg, *dma = sg, *start = sg; 1627 - int i, count = 0; 1625 + int i, count = 0, ret; 1628 1626 unsigned int offset = s->offset; 1629 1627 unsigned int size = s->offset + s->length; 1630 1628 unsigned int max = dma_get_max_seg_size(dev); ··· 1632 1630 for (i = 1; i < nents; i++) { 1633 1631 s = sg_next(s); 1634 1632 1635 - s->dma_address = DMA_MAPPING_ERROR; 1636 1633 s->dma_length = 0; 1637 1634 1638 1635 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { 1639 - if (__map_sg_chunk(dev, start, size, &dma->dma_address, 1640 - dir, attrs, is_coherent) < 0) 1636 + ret = __map_sg_chunk(dev, start, size, 1637 + &dma->dma_address, dir, attrs, 1638 + is_coherent); 1639 + if (ret < 0) 1641 1640 goto bad_mapping; 1642 1641 1643 1642 dma->dma_address += offset; ··· 1651 1648 } 1652 1649 size += s->length; 1653 1650 } 1654 - if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1655 - is_coherent) < 0) 1651 + ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, 1652 + is_coherent); 1653 + if (ret < 0) 1656 1654 goto bad_mapping; 1657 1655 1658 1656 dma->dma_address += offset; ··· 1664 1660 bad_mapping: 1665 1661 for_each_sg(sg, s, count, i) 1666 1662 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); 1667 - return 0; 1663 + if (ret == -ENOMEM) 1664 + return ret; 1665 + return -EINVAL; 1668 1666 } 1669 1667 1670 1668 /**
+1
arch/hexagon/Kconfig
··· 7 7 select ARCH_32BIT_OFF_T 8 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 9 9 select ARCH_NO_PREEMPT 10 + select DMA_GLOBAL_POOL 10 11 # Other pending projects/to-do items. 11 12 # select HAVE_REGS_AND_STACK_ACCESS_API 12 13 # select HAVE_HW_BREAKPOINT if PERF_EVENTS
+11 -46
arch/hexagon/kernel/dma.c
··· 7 7 8 8 #include <linux/dma-map-ops.h> 9 9 #include <linux/memblock.h> 10 - #include <linux/genalloc.h> 11 - #include <linux/module.h> 12 10 #include <asm/page.h> 13 - 14 - static struct gen_pool *coherent_pool; 15 - 16 - 17 - /* Allocates from a pool of uncached memory that was reserved at boot time */ 18 - 19 - void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_addr, 20 - gfp_t flag, unsigned long attrs) 21 - { 22 - void *ret; 23 - 24 - /* 25 - * Our max_low_pfn should have been backed off by 16MB in 26 - * mm/init.c to create DMA coherent space. Use that as the VA 27 - * for the pool. 28 - */ 29 - 30 - if (coherent_pool == NULL) { 31 - coherent_pool = gen_pool_create(PAGE_SHIFT, -1); 32 - 33 - if (coherent_pool == NULL) 34 - panic("Can't create %s() memory pool!", __func__); 35 - else 36 - gen_pool_add(coherent_pool, 37 - (unsigned long)pfn_to_virt(max_low_pfn), 38 - hexagon_coherent_pool_size, -1); 39 - } 40 - 41 - ret = (void *) gen_pool_alloc(coherent_pool, size); 42 - 43 - if (ret) { 44 - memset(ret, 0, size); 45 - *dma_addr = (dma_addr_t) virt_to_phys(ret); 46 - } else 47 - *dma_addr = ~0; 48 - 49 - return ret; 50 - } 51 - 52 - void arch_dma_free(struct device *dev, size_t size, void *vaddr, 53 - dma_addr_t dma_addr, unsigned long attrs) 54 - { 55 - gen_pool_free(coherent_pool, (unsigned long) vaddr, size); 56 - } 57 11 58 12 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, 59 13 enum dma_data_direction dir) ··· 31 77 BUG(); 32 78 } 33 79 } 80 + 81 + /* 82 + * Our max_low_pfn should have been backed off by 16MB in mm/init.c to create 83 + * DMA coherent space. Use that for the pool. 84 + */ 85 + static int __init hexagon_dma_init(void) 86 + { 87 + return dma_init_global_coherent(PFN_PHYS(max_low_pfn), 88 + hexagon_coherent_pool_size); 89 + } 90 + core_initcall(hexagon_dma_init);
+2 -2
arch/ia64/hp/common/sba_iommu.c
··· 1459 1459 sglist->dma_address = sba_map_page(dev, sg_page(sglist), 1460 1460 sglist->offset, sglist->length, dir, attrs); 1461 1461 if (dma_mapping_error(dev, sglist->dma_address)) 1462 - return 0; 1462 + return -EIO; 1463 1463 return 1; 1464 1464 } 1465 1465 ··· 1486 1486 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); 1487 1487 if (coalesced < 0) { 1488 1488 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); 1489 - return 0; 1489 + return -ENOMEM; 1490 1490 } 1491 1491 1492 1492 /*
+1 -1
arch/mips/jazz/jazzdma.c
··· 552 552 dir); 553 553 sg->dma_address = vdma_alloc(sg_phys(sg), sg->length); 554 554 if (sg->dma_address == DMA_MAPPING_ERROR) 555 - return 0; 555 + return -EIO; 556 556 sg_dma_len(sg) = sg->length; 557 557 } 558 558
+2 -4
arch/powerpc/kernel/iommu.c
··· 473 473 BUG_ON(direction == DMA_NONE); 474 474 475 475 if ((nelems == 0) || !tbl) 476 - return 0; 476 + return -EINVAL; 477 477 478 478 outs = s = segstart = &sglist[0]; 479 479 outcount = 1; ··· 575 575 */ 576 576 if (outcount < incount) { 577 577 outs = sg_next(outs); 578 - outs->dma_address = DMA_MAPPING_ERROR; 579 578 outs->dma_length = 0; 580 579 } 581 580 ··· 592 593 npages = iommu_num_pages(s->dma_address, s->dma_length, 593 594 IOMMU_PAGE_SIZE(tbl)); 594 595 __iommu_free(tbl, vaddr, npages); 595 - s->dma_address = DMA_MAPPING_ERROR; 596 596 s->dma_length = 0; 597 597 } 598 598 if (s == outs) 599 599 break; 600 600 } 601 - return 0; 601 + return -EIO; 602 602 } 603 603 604 604
+1 -1
arch/powerpc/platforms/ps3/system-bus.c
··· 662 662 unsigned long attrs) 663 663 { 664 664 BUG(); 665 - return 0; 665 + return -EINVAL; 666 666 } 667 667 668 668 static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
+3 -2
arch/powerpc/platforms/pseries/vio.c
··· 560 560 for_each_sg(sglist, sgl, nelems, count) 561 561 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl)); 562 562 563 - if (vio_cmo_alloc(viodev, alloc_size)) 563 + ret = vio_cmo_alloc(viodev, alloc_size); 564 + if (ret) 564 565 goto out_fail; 565 566 ret = ppc_iommu_map_sg(dev, tbl, sglist, nelems, dma_get_mask(dev), 566 567 direction, attrs); ··· 578 577 vio_cmo_dealloc(viodev, alloc_size); 579 578 out_fail: 580 579 atomic_inc(&viodev->cmo.allocs_failed); 581 - return 0; 580 + return ret; 582 581 } 583 582 584 583 static void vio_dma_iommu_unmap_sg(struct device *dev,
+7 -6
arch/s390/pci/pci_dma.c
··· 487 487 unsigned int max = dma_get_max_seg_size(dev); 488 488 unsigned int size = s->offset + s->length; 489 489 unsigned int offset = s->offset; 490 - int count = 0, i; 490 + int count = 0, i, ret; 491 491 492 492 for (i = 1; i < nr_elements; i++) { 493 493 s = sg_next(s); 494 494 495 - s->dma_address = DMA_MAPPING_ERROR; 496 495 s->dma_length = 0; 497 496 498 497 if (s->offset || (size & ~PAGE_MASK) || 499 498 size + s->length > max) { 500 - if (__s390_dma_map_sg(dev, start, size, 501 - &dma->dma_address, dir)) 499 + ret = __s390_dma_map_sg(dev, start, size, 500 + &dma->dma_address, dir); 501 + if (ret) 502 502 goto unmap; 503 503 504 504 dma->dma_address += offset; ··· 511 511 } 512 512 size += s->length; 513 513 } 514 - if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir)) 514 + ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir); 515 + if (ret) 515 516 goto unmap; 516 517 517 518 dma->dma_address += offset; ··· 524 523 s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s), 525 524 dir, attrs); 526 525 527 - return 0; 526 + return ret; 528 527 } 529 528 530 529 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+2 -4
arch/sparc/kernel/iommu.c
··· 448 448 iommu = dev->archdata.iommu; 449 449 strbuf = dev->archdata.stc; 450 450 if (nelems == 0 || !iommu) 451 - return 0; 451 + return -EINVAL; 452 452 453 453 spin_lock_irqsave(&iommu->lock, flags); 454 454 ··· 546 546 547 547 if (outcount < incount) { 548 548 outs = sg_next(outs); 549 - outs->dma_address = DMA_MAPPING_ERROR; 550 549 outs->dma_length = 0; 551 550 } 552 551 ··· 571 572 iommu_tbl_range_free(&iommu->tbl, vaddr, npages, 572 573 IOMMU_ERROR_CODE); 573 574 574 - s->dma_address = DMA_MAPPING_ERROR; 575 575 s->dma_length = 0; 576 576 } 577 577 if (s == outs) ··· 578 580 } 579 581 spin_unlock_irqrestore(&iommu->lock, flags); 580 582 581 - return 0; 583 + return -EINVAL; 582 584 } 583 585 584 586 /* If contexts are being used, they are the same in all of the mappings
+2 -4
arch/sparc/kernel/pci_sun4v.c
··· 486 486 487 487 iommu = dev->archdata.iommu; 488 488 if (nelems == 0 || !iommu) 489 - return 0; 489 + return -EINVAL; 490 490 atu = iommu->atu; 491 491 492 492 prot = HV_PCI_MAP_ATTR_READ; ··· 594 594 595 595 if (outcount < incount) { 596 596 outs = sg_next(outs); 597 - outs->dma_address = DMA_MAPPING_ERROR; 598 597 outs->dma_length = 0; 599 598 } 600 599 ··· 610 611 iommu_tbl_range_free(tbl, vaddr, npages, 611 612 IOMMU_ERROR_CODE); 612 613 /* XXX demap? XXX */ 613 - s->dma_address = DMA_MAPPING_ERROR; 614 614 s->dma_length = 0; 615 615 } 616 616 if (s == outs) ··· 617 619 } 618 620 local_irq_restore(flags); 619 621 620 - return 0; 622 + return -EINVAL; 621 623 } 622 624 623 625 static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
+1 -1
arch/sparc/mm/iommu.c
··· 256 256 sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg), 257 257 sg->offset, sg->length, per_page_flush); 258 258 if (sg->dma_address == DMA_MAPPING_ERROR) 259 - return 0; 259 + return -EIO; 260 260 sg->dma_length = sg->length; 261 261 } 262 262
+9 -9
arch/x86/kernel/amd_gart_64.c
··· 331 331 int i; 332 332 333 333 if (iommu_start == -1) 334 - return -1; 334 + return -ENOMEM; 335 335 336 336 for_each_sg(start, s, nelems, i) { 337 337 unsigned long pages, addr; ··· 380 380 enum dma_data_direction dir, unsigned long attrs) 381 381 { 382 382 struct scatterlist *s, *ps, *start_sg, *sgmap; 383 - int need = 0, nextneed, i, out, start; 383 + int need = 0, nextneed, i, out, start, ret; 384 384 unsigned long pages = 0; 385 385 unsigned int seg_size; 386 386 unsigned int max_seg_size; 387 387 388 388 if (nents == 0) 389 - return 0; 389 + return -EINVAL; 390 390 391 391 out = 0; 392 392 start = 0; ··· 414 414 if (!iommu_merge || !nextneed || !need || s->offset || 415 415 (s->length + seg_size > max_seg_size) || 416 416 (ps->offset + ps->length) % PAGE_SIZE) { 417 - if (dma_map_cont(dev, start_sg, i - start, 418 - sgmap, pages, need) < 0) 417 + ret = dma_map_cont(dev, start_sg, i - start, 418 + sgmap, pages, need); 419 + if (ret < 0) 419 420 goto error; 420 421 out++; 421 422 ··· 433 432 pages += iommu_num_pages(s->offset, s->length, PAGE_SIZE); 434 433 ps = s; 435 434 } 436 - if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) 435 + ret = dma_map_cont(dev, start_sg, i - start, sgmap, pages, need); 436 + if (ret < 0) 437 437 goto error; 438 438 out++; 439 439 flush_gart(); ··· 458 456 panic("dma_map_sg: overflow on %lu pages\n", pages); 459 457 460 458 iommu_full(dev, pages << PAGE_SHIFT, dir); 461 - for_each_sg(sg, s, nents, i) 462 - s->dma_address = DMA_MAPPING_ERROR; 463 - return 0; 459 + return ret; 464 460 } 465 461 466 462 /* allocate and map a coherent mapping */
+15 -7
drivers/iommu/dma-iommu.c
··· 973 973 974 974 out_unmap: 975 975 iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 976 - return 0; 976 + return -EIO; 977 977 } 978 978 979 979 /* ··· 994 994 dma_addr_t iova; 995 995 size_t iova_len = 0; 996 996 unsigned long mask = dma_get_seg_boundary(dev); 997 + ssize_t ret; 997 998 int i; 998 999 999 - if (static_branch_unlikely(&iommu_deferred_attach_enabled) && 1000 - iommu_deferred_attach(dev, domain)) 1001 - return 0; 1000 + if (static_branch_unlikely(&iommu_deferred_attach_enabled)) { 1001 + ret = iommu_deferred_attach(dev, domain); 1002 + goto out; 1003 + } 1002 1004 1003 1005 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) 1004 1006 iommu_dma_sync_sg_for_device(dev, sg, nents, dir); ··· 1048 1046 } 1049 1047 1050 1048 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); 1051 - if (!iova) 1049 + if (!iova) { 1050 + ret = -ENOMEM; 1052 1051 goto out_restore_sg; 1052 + } 1053 1053 1054 1054 /* 1055 1055 * We'll leave any physical concatenation to the IOMMU driver's 1056 1056 * implementation - it knows better than we do. 1057 1057 */ 1058 - if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) 1058 + ret = iommu_map_sg_atomic(domain, iova, sg, nents, prot); 1059 + if (ret < iova_len) 1059 1060 goto out_free_iova; 1060 1061 1061 1062 return __finalise_sg(dev, sg, nents, iova); ··· 1067 1062 iommu_dma_free_iova(cookie, iova, iova_len, NULL); 1068 1063 out_restore_sg: 1069 1064 __invalidate_sg(sg, nents); 1070 - return 0; 1065 + out: 1066 + if (ret != -ENOMEM) 1067 + return -EINVAL; 1068 + return ret; 1071 1069 } 1072 1070 1073 1071 static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+7 -8
drivers/iommu/iommu.c
··· 2570 2570 } 2571 2571 EXPORT_SYMBOL_GPL(iommu_unmap_fast); 2572 2572 2573 - static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2574 - struct scatterlist *sg, unsigned int nents, int prot, 2575 - gfp_t gfp) 2573 + static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2574 + struct scatterlist *sg, unsigned int nents, int prot, 2575 + gfp_t gfp) 2576 2576 { 2577 2577 const struct iommu_ops *ops = domain->ops; 2578 2578 size_t len = 0, mapped = 0; ··· 2613 2613 /* undo mappings already done */ 2614 2614 iommu_unmap(domain, iova, mapped); 2615 2615 2616 - return 0; 2617 - 2616 + return ret; 2618 2617 } 2619 2618 2620 - size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2621 - struct scatterlist *sg, unsigned int nents, int prot) 2619 + ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 2620 + struct scatterlist *sg, unsigned int nents, int prot) 2622 2621 { 2623 2622 might_sleep(); 2624 2623 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL); 2625 2624 } 2626 2625 EXPORT_SYMBOL_GPL(iommu_map_sg); 2627 2626 2628 - size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2627 + ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, 2629 2628 struct scatterlist *sg, unsigned int nents, int prot) 2630 2629 { 2631 2630 return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
+1 -1
drivers/parisc/ccio-dma.c
··· 918 918 BUG_ON(!dev); 919 919 ioc = GET_IOC(dev); 920 920 if (!ioc) 921 - return 0; 921 + return -EINVAL; 922 922 923 923 DBG_RUN_SG("%s() START %d entries\n", __func__, nents); 924 924
+1 -1
drivers/parisc/sba_iommu.c
··· 947 947 948 948 ioc = GET_IOC(dev); 949 949 if (!ioc) 950 - return 0; 950 + return -EINVAL; 951 951 952 952 /* Fast path single entry scatterlists. */ 953 953 if (nents == 1) {
+1 -1
drivers/xen/swiotlb-xen.c
··· 509 509 out_unmap: 510 510 xen_swiotlb_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 511 511 sg_dma_len(sgl) = 0; 512 - return 0; 512 + return -EIO; 513 513 } 514 514 515 515 static void
+13 -10
include/linux/dma-map-ops.h
··· 41 41 size_t size, enum dma_data_direction dir, 42 42 unsigned long attrs); 43 43 /* 44 - * map_sg returns 0 on error and a value > 0 on success. 45 - * It should never return a value < 0. 44 + * map_sg should return a negative error code on error. See 45 + * dma_map_sgtable() for a list of appropriate error codes 46 + * and their meanings. 46 47 */ 47 48 int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, 48 49 enum dma_data_direction dir, unsigned long attrs); ··· 171 170 int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); 172 171 int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, 173 172 void *cpu_addr, size_t size, int *ret); 174 - 175 - void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 176 - dma_addr_t *dma_handle); 177 - int dma_release_from_global_coherent(int order, void *vaddr); 178 - int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, 179 - size_t size, int *ret); 180 - 181 173 #else 182 174 static inline int dma_declare_coherent_memory(struct device *dev, 183 175 phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) ··· 180 186 #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) 181 187 #define dma_release_from_dev_coherent(dev, order, vaddr) (0) 182 188 #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) 189 + #endif /* CONFIG_DMA_DECLARE_COHERENT */ 183 190 191 + #ifdef CONFIG_DMA_GLOBAL_POOL 192 + void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 193 + dma_addr_t *dma_handle); 194 + int dma_release_from_global_coherent(int order, void *vaddr); 195 + int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, 196 + size_t size, int *ret); 197 + int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); 198 + #else 184 199 static inline void *dma_alloc_from_global_coherent(struct device *dev, 185 200 ssize_t size, dma_addr_t *dma_handle) 186 201 { ··· 204 201 { 205 202 return 0; 206 203 } 207 - #endif /* CONFIG_DMA_DECLARE_COHERENT */ 204 + #endif /* CONFIG_DMA_GLOBAL_POOL */ 208 205 209 206 /* 210 207 * This is the actual return value from the ->alloc_noncontiguous method.
+12 -32
include/linux/dma-mapping.h
··· 105 105 unsigned long attrs); 106 106 void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, 107 107 enum dma_data_direction dir, unsigned long attrs); 108 - int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, 109 - enum dma_data_direction dir, unsigned long attrs); 108 + unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 109 + int nents, enum dma_data_direction dir, unsigned long attrs); 110 110 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 111 111 int nents, enum dma_data_direction dir, 112 112 unsigned long attrs); 113 + int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 114 + enum dma_data_direction dir, unsigned long attrs); 113 115 dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, 114 116 size_t size, enum dma_data_direction dir, unsigned long attrs); 115 117 void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, ··· 166 164 size_t size, enum dma_data_direction dir, unsigned long attrs) 167 165 { 168 166 } 169 - static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 170 - int nents, enum dma_data_direction dir, unsigned long attrs) 167 + static inline unsigned int dma_map_sg_attrs(struct device *dev, 168 + struct scatterlist *sg, int nents, enum dma_data_direction dir, 169 + unsigned long attrs) 171 170 { 172 171 return 0; 173 172 } ··· 176 173 struct scatterlist *sg, int nents, enum dma_data_direction dir, 177 174 unsigned long attrs) 178 175 { 176 + } 177 + static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 178 + enum dma_data_direction dir, unsigned long attrs) 179 + { 180 + return -EOPNOTSUPP; 179 181 } 180 182 static inline dma_addr_t dma_map_resource(struct device *dev, 181 183 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, ··· 349 341 enum dma_data_direction dir) 350 342 { 351 343 return dma_sync_single_for_device(dev, addr + offset, size, dir); 352 - } 353 - 354 - /** 355 - * dma_map_sgtable - Map the given buffer for DMA 356 - * @dev: The device for which to perform the DMA operation 357 - * @sgt: The sg_table object describing the buffer 358 - * @dir: DMA direction 359 - * @attrs: Optional DMA attributes for the map operation 360 - * 361 - * Maps a buffer described by a scatterlist stored in the given sg_table 362 - * object for the @dir DMA operation by the @dev device. After success the 363 - * ownership for the buffer is transferred to the DMA domain. One has to 364 - * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 365 - * ownership of the buffer back to the CPU domain before touching the 366 - * buffer by the CPU. 367 - * 368 - * Returns 0 on success or -EINVAL on error during mapping the buffer. 369 - */ 370 - static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 371 - enum dma_data_direction dir, unsigned long attrs) 372 - { 373 - int nents; 374 - 375 - nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 376 - if (nents <= 0) 377 - return -EINVAL; 378 - sgt->nents = nents; 379 - return 0; 380 344 } 381 345 382 346 /**
+11 -11
include/linux/iommu.h
··· 414 414 extern size_t iommu_unmap_fast(struct iommu_domain *domain, 415 415 unsigned long iova, size_t size, 416 416 struct iommu_iotlb_gather *iotlb_gather); 417 - extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 418 - struct scatterlist *sg,unsigned int nents, int prot); 419 - extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, 420 - unsigned long iova, struct scatterlist *sg, 421 - unsigned int nents, int prot); 417 + extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 418 + struct scatterlist *sg, unsigned int nents, int prot); 419 + extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, 420 + unsigned long iova, struct scatterlist *sg, 421 + unsigned int nents, int prot); 422 422 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 423 423 extern void iommu_set_fault_handler(struct iommu_domain *domain, 424 424 iommu_fault_handler_t handler, void *token); ··· 679 679 return 0; 680 680 } 681 681 682 - static inline size_t iommu_map_sg(struct iommu_domain *domain, 683 - unsigned long iova, struct scatterlist *sg, 684 - unsigned int nents, int prot) 682 + static inline ssize_t iommu_map_sg(struct iommu_domain *domain, 683 + unsigned long iova, struct scatterlist *sg, 684 + unsigned int nents, int prot) 685 685 { 686 - return 0; 686 + return -ENODEV; 687 687 } 688 688 689 - static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, 689 + static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, 690 690 unsigned long iova, struct scatterlist *sg, 691 691 unsigned int nents, int prot) 692 692 { 693 - return 0; 693 + return -ENODEV; 694 694 } 695 695 696 696 static inline void iommu_flush_iotlb_all(struct iommu_domain *domain)
+4
kernel/dma/Kconfig
··· 93 93 select GENERIC_ALLOCATOR 94 94 bool 95 95 96 + config DMA_GLOBAL_POOL 97 + select DMA_DECLARE_COHERENT 98 + bool 99 + 96 100 config DMA_REMAP 97 101 bool 98 102 depends on MMU
+75 -84
kernel/dma/coherent.c
··· 20 20 bool use_dev_dma_pfn_offset; 21 21 }; 22 22 23 - static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; 24 - 25 23 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) 26 24 { 27 25 if (dev && dev->dma_mem) ··· 35 37 return mem->device_base; 36 38 } 37 39 38 - static int dma_init_coherent_memory(phys_addr_t phys_addr, 39 - dma_addr_t device_addr, size_t size, 40 - struct dma_coherent_mem **mem) 40 + static struct dma_coherent_mem *dma_init_coherent_memory(phys_addr_t phys_addr, 41 + dma_addr_t device_addr, size_t size, bool use_dma_pfn_offset) 41 42 { 42 - struct dma_coherent_mem *dma_mem = NULL; 43 - void *mem_base = NULL; 43 + struct dma_coherent_mem *dma_mem; 44 44 int pages = size >> PAGE_SHIFT; 45 45 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); 46 - int ret; 46 + void *mem_base; 47 47 48 - if (!size) { 49 - ret = -EINVAL; 50 - goto out; 51 - } 48 + if (!size) 49 + return ERR_PTR(-EINVAL); 52 50 53 51 mem_base = memremap(phys_addr, size, MEMREMAP_WC); 54 - if (!mem_base) { 55 - ret = -EINVAL; 56 - goto out; 57 - } 52 + if (!mem_base) 53 + return ERR_PTR(-EINVAL); 54 + 58 55 dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); 59 - if (!dma_mem) { 60 - ret = -ENOMEM; 61 - goto out; 62 - } 56 + if (!dma_mem) 57 + goto out_unmap_membase; 63 58 dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); 64 - if (!dma_mem->bitmap) { 65 - ret = -ENOMEM; 66 - goto out; 67 - } 59 + if (!dma_mem->bitmap) 60 + goto out_free_dma_mem; 68 61 69 62 dma_mem->virt_base = mem_base; 70 63 dma_mem->device_base = device_addr; 71 64 dma_mem->pfn_base = PFN_DOWN(phys_addr); 72 65 dma_mem->size = pages; 66 + dma_mem->use_dev_dma_pfn_offset = use_dma_pfn_offset; 73 67 spin_lock_init(&dma_mem->spinlock); 74 68 75 - *mem = dma_mem; 76 - return 0; 69 + return dma_mem; 77 70 78 - out: 71 + out_free_dma_mem: 79 72 kfree(dma_mem); 80 - if (mem_base) 81 - memunmap(mem_base); 82 - return ret; 73 + out_unmap_membase: 74 + memunmap(mem_base); 75 + pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %zd MiB\n", 76 + &phys_addr, size / SZ_1M); 77 + return ERR_PTR(-ENOMEM); 83 78 } 84 79 85 80 static void dma_release_coherent_memory(struct dma_coherent_mem *mem) ··· 121 130 struct dma_coherent_mem *mem; 122 131 int ret; 123 132 124 - ret = dma_init_coherent_memory(phys_addr, device_addr, size, &mem); 125 - if (ret) 126 - return ret; 133 + mem = dma_init_coherent_memory(phys_addr, device_addr, size, false); 134 + if (IS_ERR(mem)) 135 + return PTR_ERR(mem); 127 136 128 137 ret = dma_assign_coherent_memory(dev, mem); 129 138 if (ret) ··· 189 198 return 1; 190 199 } 191 200 192 - void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 193 - dma_addr_t *dma_handle) 194 - { 195 - if (!dma_coherent_default_memory) 196 - return NULL; 197 - 198 - return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, 199 - dma_handle); 200 - } 201 - 202 201 static int __dma_release_from_coherent(struct dma_coherent_mem *mem, 203 202 int order, void *vaddr) 204 203 { ··· 222 241 struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); 223 242 224 243 return __dma_release_from_coherent(mem, order, vaddr); 225 - } 226 - 227 - int dma_release_from_global_coherent(int order, void *vaddr) 228 - { 229 - if (!dma_coherent_default_memory) 230 - return 0; 231 - 232 - return __dma_release_from_coherent(dma_coherent_default_memory, order, 233 - vaddr); 234 244 } 235 245 236 246 static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, ··· 269 297 return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); 270 298 } 271 299 300 + #ifdef CONFIG_DMA_GLOBAL_POOL 301 + static struct dma_coherent_mem *dma_coherent_default_memory __ro_after_init; 302 + 303 + void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, 304 + dma_addr_t *dma_handle) 305 + { 306 + if (!dma_coherent_default_memory) 307 + return NULL; 308 + 309 + return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size, 310 + dma_handle); 311 + } 312 + 313 + int dma_release_from_global_coherent(int order, void *vaddr) 314 + { 315 + if (!dma_coherent_default_memory) 316 + return 0; 317 + 318 + return __dma_release_from_coherent(dma_coherent_default_memory, order, 319 + vaddr); 320 + } 321 + 272 322 int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, 273 323 size_t size, int *ret) 274 324 { ··· 301 307 vaddr, size, ret); 302 308 } 303 309 310 + int dma_init_global_coherent(phys_addr_t phys_addr, size_t size) 311 + { 312 + struct dma_coherent_mem *mem; 313 + 314 + mem = dma_init_coherent_memory(phys_addr, phys_addr, size, true); 315 + if (IS_ERR(mem)) 316 + return PTR_ERR(mem); 317 + dma_coherent_default_memory = mem; 318 + pr_info("DMA: default coherent area is set\n"); 319 + return 0; 320 + } 321 + #endif /* CONFIG_DMA_GLOBAL_POOL */ 322 + 304 323 /* 305 324 * Support for reserved memory regions defined in device tree 306 325 */ ··· 322 315 #include <linux/of_fdt.h> 323 316 #include <linux/of_reserved_mem.h> 324 317 318 + #ifdef CONFIG_DMA_GLOBAL_POOL 325 319 static struct reserved_mem *dma_reserved_default_memory __initdata; 320 + #endif 326 321 327 322 static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev) 328 323 { 329 - struct dma_coherent_mem *mem = rmem->priv; 330 - int ret; 324 + if (!rmem->priv) { 325 + struct dma_coherent_mem *mem; 331 326 332 - if (!mem) { 333 - ret = dma_init_coherent_memory(rmem->base, rmem->base, 334 - rmem->size, &mem); 335 - if (ret) { 336 - pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", 337 - &rmem->base, (unsigned long)rmem->size / SZ_1M); 338 - return ret; 339 - } 327 + mem = dma_init_coherent_memory(rmem->base, rmem->base, 328 + rmem->size, true); 329 + if (IS_ERR(mem)) 330 + return PTR_ERR(mem); 331 + rmem->priv = mem; 340 332 } 341 - mem->use_dev_dma_pfn_offset = true; 342 - rmem->priv = mem; 343 - dma_assign_coherent_memory(dev, mem); 333 + dma_assign_coherent_memory(dev, rmem->priv); 344 334 return 0; 345 335 } 346 336 ··· 365 361 pr_err("Reserved memory: regions without no-map are not yet supported\n"); 366 362 return -EINVAL; 367 363 } 364 + #endif 368 365 366 + #ifdef CONFIG_DMA_GLOBAL_POOL 369 367 if (of_get_flat_dt_prop(node, "linux,dma-default", NULL)) { 370 368 WARN(dma_reserved_default_memory, 371 369 "Reserved memory: region for default DMA coherent area is redefined\n"); ··· 381 375 return 0; 382 376 } 383 377 378 + #ifdef CONFIG_DMA_GLOBAL_POOL 384 379 static int __init dma_init_reserved_memory(void) 385 380 { 386 - const struct reserved_mem_ops *ops; 387 - int ret; 388 - 389 381 if (!dma_reserved_default_memory) 390 382 return -ENOMEM; 391 - 392 - ops = dma_reserved_default_memory->ops; 393 - 394 - /* 395 - * We rely on rmem_dma_device_init() does not propagate error of 396 - * dma_assign_coherent_memory() for "NULL" device. 397 - */ 398 - ret = ops->device_init(dma_reserved_default_memory, NULL); 399 - 400 - if (!ret) { 401 - dma_coherent_default_memory = dma_reserved_default_memory->priv; 402 - pr_info("DMA: default coherent area is set\n"); 403 - } 404 - 405 - return ret; 383 + return dma_init_global_coherent(dma_reserved_default_memory->base, 384 + dma_reserved_default_memory->size); 406 385 } 407 - 408 386 core_initcall(dma_init_reserved_memory); 387 + #endif /* CONFIG_DMA_GLOBAL_POOL */ 409 388 410 389 RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup); 411 390 #endif
+6 -15
kernel/dma/debug.c
··· 792 792 } 793 793 DEFINE_SHOW_ATTRIBUTE(dump); 794 794 795 - static void dma_debug_fs_init(void) 795 + static int __init dma_debug_fs_init(void) 796 796 { 797 797 struct dentry *dentry = debugfs_create_dir("dma-api", NULL); 798 798 ··· 805 805 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries); 806 806 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops); 807 807 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops); 808 + 809 + return 0; 808 810 } 811 + core_initcall_sync(dma_debug_fs_init); 809 812 810 813 static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) 811 814 { ··· 892 889 INIT_LIST_HEAD(&dma_entry_hash[i].list); 893 890 spin_lock_init(&dma_entry_hash[i].lock); 894 891 } 895 - 896 - dma_debug_fs_init(); 897 892 898 893 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); 899 894 for (i = 0; i < nr_pages; ++i) ··· 1065 1064 } 1066 1065 } 1067 1066 1068 - static inline bool overlap(void *addr, unsigned long len, void *start, void *end) 1069 - { 1070 - unsigned long a1 = (unsigned long)addr; 1071 - unsigned long b1 = a1 + len; 1072 - unsigned long a2 = (unsigned long)start; 1073 - unsigned long b2 = (unsigned long)end; 1074 - 1075 - return !(b1 <= a2 || a1 >= b2); 1076 - } 1077 - 1078 1067 static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) 1079 1068 { 1080 - if (overlap(addr, len, _stext, _etext) || 1081 - overlap(addr, len, __start_rodata, __end_rodata)) 1069 + if (memory_intersects(_stext, _etext, addr, len) || 1070 + memory_intersects(__start_rodata, __end_rodata, addr, len)) 1082 1071 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); 1083 1072 } 1084 1073
+16 -1
kernel/dma/direct.c
··· 156 156 157 157 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 158 158 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 159 + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 159 160 !dev_is_dma_coherent(dev)) 160 161 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); 162 + 163 + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 164 + !dev_is_dma_coherent(dev)) 165 + return dma_alloc_from_global_coherent(dev, size, dma_handle); 161 166 162 167 /* 163 168 * Remapping or decrypting memory may block. If either is required and ··· 260 255 261 256 if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 262 257 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 258 + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 263 259 !dev_is_dma_coherent(dev)) { 264 260 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 261 + return; 262 + } 263 + 264 + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && 265 + !dev_is_dma_coherent(dev)) { 266 + if (!dma_release_from_global_coherent(page_order, cpu_addr)) 267 + WARN_ON_ONCE(1); 265 268 return; 266 269 } 267 270 ··· 424 411 425 412 out_unmap: 426 413 dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); 427 - return 0; 414 + return -EIO; 428 415 } 429 416 430 417 dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, ··· 474 461 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 475 462 476 463 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 464 + return ret; 465 + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) 477 466 return ret; 478 467 479 468 if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
+1 -1
kernel/dma/dummy.c
··· 22 22 int nelems, enum dma_data_direction dir, 23 23 unsigned long attrs) 24 24 { 25 - return 0; 25 + return -EINVAL; 26 26 } 27 27 28 28 static int dma_dummy_supported(struct device *hwdev, u64 mask)
+72 -8
kernel/dma/mapping.c
··· 177 177 } 178 178 EXPORT_SYMBOL(dma_unmap_page_attrs); 179 179 180 - /* 181 - * dma_maps_sg_attrs returns 0 on error and > 0 on success. 182 - * It should never return a value < 0. 183 - */ 184 - int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, 185 - enum dma_data_direction dir, unsigned long attrs) 180 + static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 181 + int nents, enum dma_data_direction dir, unsigned long attrs) 186 182 { 187 183 const struct dma_map_ops *ops = get_dma_ops(dev); 188 184 int ents; ··· 193 197 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs); 194 198 else 195 199 ents = ops->map_sg(dev, sg, nents, dir, attrs); 196 - BUG_ON(ents < 0); 197 - debug_dma_map_sg(dev, sg, nents, ents, dir); 200 + 201 + if (ents > 0) 202 + debug_dma_map_sg(dev, sg, nents, ents, dir); 203 + else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && 204 + ents != -EIO)) 205 + return -EIO; 198 206 199 207 return ents; 200 208 } 209 + 210 + /** 211 + * dma_map_sg_attrs - Map the given buffer for DMA 212 + * @dev: The device for which to perform the DMA operation 213 + * @sg: The sg_table object describing the buffer 214 + * @dir: DMA direction 215 + * @attrs: Optional DMA attributes for the map operation 216 + * 217 + * Maps a buffer described by a scatterlist passed in the sg argument with 218 + * nents segments for the @dir DMA operation by the @dev device. 219 + * 220 + * Returns the number of mapped entries (which can be less than nents) 221 + * on success. Zero is returned for any error. 222 + * 223 + * dma_unmap_sg_attrs() should be used to unmap the buffer with the 224 + * original sg and original nents (not the value returned by this funciton). 225 + */ 226 + unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, 227 + int nents, enum dma_data_direction dir, unsigned long attrs) 228 + { 229 + int ret; 230 + 231 + ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); 232 + if (ret < 0) 233 + return 0; 234 + return ret; 235 + } 201 236 EXPORT_SYMBOL(dma_map_sg_attrs); 237 + 238 + /** 239 + * dma_map_sgtable - Map the given buffer for DMA 240 + * @dev: The device for which to perform the DMA operation 241 + * @sgt: The sg_table object describing the buffer 242 + * @dir: DMA direction 243 + * @attrs: Optional DMA attributes for the map operation 244 + * 245 + * Maps a buffer described by a scatterlist stored in the given sg_table 246 + * object for the @dir DMA operation by the @dev device. After success, the 247 + * ownership for the buffer is transferred to the DMA domain. One has to 248 + * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the 249 + * ownership of the buffer back to the CPU domain before touching the 250 + * buffer by the CPU. 251 + * 252 + * Returns 0 on success or a negative error code on error. The following 253 + * error codes are supported with the given meaning: 254 + * 255 + * -EINVAL - An invalid argument, unaligned access or other error 256 + * in usage. Will not succeed if retried. 257 + * -ENOMEM - Insufficient resources (like memory or IOVA space) to 258 + * complete the mapping. Should succeed if retried later. 259 + * -EIO - Legacy error code with an unknown meaning. eg. this is 260 + * returned if a lower level call returned DMA_MAPPING_ERROR. 261 + */ 262 + int dma_map_sgtable(struct device *dev, struct sg_table *sgt, 263 + enum dma_data_direction dir, unsigned long attrs) 264 + { 265 + int nents; 266 + 267 + nents = __dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); 268 + if (nents < 0) 269 + return nents; 270 + sgt->nents = nents; 271 + return 0; 272 + } 273 + EXPORT_SYMBOL_GPL(dma_map_sgtable); 202 274 203 275 void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, 204 276 int nents, enum dma_data_direction dir,