Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

- fix a regression in the ARM dma-direct conversion (Christoph Hellwig)

- use memcpy_{from,to}_page (Fabio M. De Francesco)

- cleanup the swiotlb MAINTAINERS entry (Lukas Bulwahn)

- make SG table pool allocation less fragile (Masahiro Yamada)

- don't panic on swiotlb initialization failure (Robin Murphy)

* tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping:
ARM/dma-mapping: remove the dma_coherent member of struct dev_archdata
ARM/dma-mappіng: don't override ->dma_coherent when set from a bus notifier
lib/sg_pool: change module_init(sg_pool_init) to subsys_initcall
MAINTAINERS: merge SWIOTLB SUBSYSTEM into DMA MAPPING HELPERS
swiotlb: don't panic!
swiotlb: replace kmap_atomic() with memcpy_{from,to}_page()

+38 -46
+5 -12
MAINTAINERS
··· 6171 6171 F: include/linux/dma-direct.h 6172 6172 F: include/linux/dma-mapping.h 6173 6173 F: include/linux/dma-map-ops.h 6174 + F: include/linux/swiotlb.h 6174 6175 F: kernel/dma/ 6175 6176 6176 6177 DMA MAPPING BENCHMARK ··· 19750 19749 F: Documentation/admin-guide/svga.rst 19751 19750 F: arch/x86/boot/video* 19752 19751 19753 - SWIOTLB SUBSYSTEM 19754 - M: Christoph Hellwig <hch@infradead.org> 19755 - L: iommu@lists.linux.dev 19756 - S: Supported 19757 - W: http://git.infradead.org/users/hch/dma-mapping.git 19758 - T: git git://git.infradead.org/users/hch/dma-mapping.git 19759 - F: arch/*/kernel/pci-swiotlb.c 19760 - F: include/linux/swiotlb.h 19761 - F: kernel/dma/swiotlb.c 19762 - 19763 19752 SWITCHDEV 19764 19753 M: Jiri Pirko <jiri@resnulli.us> 19765 19754 M: Ivan Vecera <ivecera@redhat.com> ··· 22466 22475 L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 22467 22476 L: iommu@lists.linux.dev 22468 22477 S: Supported 22469 - F: arch/x86/xen/*swiotlb* 22470 - F: drivers/xen/*swiotlb* 22478 + F: arch/*/include/asm/xen/swiotlb-xen.h 22479 + F: drivers/xen/swiotlb-xen.c 22480 + F: include/xen/arm/swiotlb-xen.h 22481 + F: include/xen/swiotlb-xen.h 22471 22482 22472 22483 XFS FILESYSTEM 22473 22484 C: irc://irc.oftc.net/xfs
-1
arch/arm/include/asm/device.h
··· 9 9 #ifdef CONFIG_ARM_DMA_USE_IOMMU 10 10 struct dma_iommu_mapping *mapping; 11 11 #endif 12 - unsigned int dma_coherent:1; 13 12 unsigned int dma_ops_setup:1; 14 13 }; 15 14
+8 -2
arch/arm/mm/dma-mapping.c
··· 1769 1769 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, 1770 1770 const struct iommu_ops *iommu, bool coherent) 1771 1771 { 1772 - dev->archdata.dma_coherent = coherent; 1773 - dev->dma_coherent = coherent; 1772 + /* 1773 + * Due to legacy code that sets the ->dma_coherent flag from a bus 1774 + * notifier we can't just assign coherent to the ->dma_coherent flag 1775 + * here, but instead have to make sure we only set but never clear it 1776 + * for now. 1777 + */ 1778 + if (coherent) 1779 + dev->dma_coherent = true; 1774 1780 1775 1781 /* 1776 1782 * Don't override the dma_ops if they have already been set. Ideally
+23 -17
kernel/dma/swiotlb.c
··· 346 346 memblock_free(tlb, PAGE_ALIGN(bytes)); 347 347 348 348 nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); 349 - if (nslabs < IO_TLB_MIN_SLABS) 350 - panic("%s: Failed to remap %zu bytes\n", 351 - __func__, bytes); 352 - goto retry; 349 + if (nslabs >= IO_TLB_MIN_SLABS) 350 + goto retry; 351 + 352 + pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes); 353 + return; 353 354 } 354 355 355 356 alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); 356 357 mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); 357 - if (!mem->slots) 358 - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", 359 - __func__, alloc_size, PAGE_SIZE); 358 + if (!mem->slots) { 359 + pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n", 360 + __func__, alloc_size, PAGE_SIZE); 361 + return; 362 + } 360 363 361 364 mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), 362 365 default_nareas), SMP_CACHE_BYTES); 363 - if (!mem->areas) 364 - panic("%s: Failed to allocate mem->areas.\n", __func__); 366 + if (!mem->areas) { 367 + pr_warn("%s: Failed to allocate mem->areas.\n", __func__); 368 + return; 369 + } 365 370 366 371 swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, 367 372 default_nareas); ··· 550 545 } 551 546 552 547 if (PageHighMem(pfn_to_page(pfn))) { 553 - /* The buffer does not have a mapping. Map it in and copy */ 554 548 unsigned int offset = orig_addr & ~PAGE_MASK; 555 - char *buffer; 549 + struct page *page; 556 550 unsigned int sz = 0; 557 551 unsigned long flags; 558 552 ··· 559 555 sz = min_t(size_t, PAGE_SIZE - offset, size); 560 556 561 557 local_irq_save(flags); 562 - buffer = kmap_atomic(pfn_to_page(pfn)); 558 + page = pfn_to_page(pfn); 563 559 if (dir == DMA_TO_DEVICE) 564 - memcpy(vaddr, buffer + offset, sz); 560 + memcpy_from_page(vaddr, page, offset, sz); 565 561 else 566 - memcpy(buffer + offset, vaddr, sz); 567 - kunmap_atomic(buffer); 562 + memcpy_to_page(page, offset, vaddr, sz); 568 563 local_irq_restore(flags); 569 564 570 565 size -= sz; ··· 734 731 int index; 735 732 phys_addr_t tlb_addr; 736 733 737 - if (!mem || !mem->nslabs) 738 - panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 734 + if (!mem || !mem->nslabs) { 735 + dev_warn_ratelimited(dev, 736 + "Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); 737 + return (phys_addr_t)DMA_MAPPING_ERROR; 738 + } 739 739 740 740 if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) 741 741 pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
+2 -14
lib/sg_pool.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0-only 2 - #include <linux/module.h> 2 + #include <linux/init.h> 3 3 #include <linux/scatterlist.h> 4 4 #include <linux/mempool.h> 5 5 #include <linux/slab.h> ··· 177 177 return -ENOMEM; 178 178 } 179 179 180 - static __exit void sg_pool_exit(void) 181 - { 182 - int i; 183 - 184 - for (i = 0; i < SG_MEMPOOL_NR; i++) { 185 - struct sg_pool *sgp = sg_pools + i; 186 - mempool_destroy(sgp->pool); 187 - kmem_cache_destroy(sgp->slab); 188 - } 189 - } 190 - 191 - module_init(sg_pool_init); 192 - module_exit(sg_pool_exit); 180 + subsys_initcall(sg_pool_init);