Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: remove CONFIG_DMA_REMAP

CONFIG_DMA_REMAP is used to build a few helpers around the core
vmalloc code, and to use them in case there is a highmem page in
dma-direct, and to make dma coherent allocations be able to use
non-contiguous pages allocations for DMA allocations in the dma-iommu
layer.

Right now it needs to be explicitly selected by architectures, and
is only done so by architectures that require remapping to deal
with devices that are not DMA coherent. Make it unconditional for
builds with CONFIG_MMU as it is very little extra code, but makes
it much more likely that large DMA allocations succeed on x86.

This fixes hot plugging a NVMe thunderbolt SSD for me, which tries
to allocate a 1MB buffer that is otherwise hard to obtain due to
memory fragmentation on a heavily used laptop.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>

+16 -29
+1 -1
arch/arm/Kconfig
··· 47 47 select DMA_DECLARE_COHERENT 48 48 select DMA_GLOBAL_POOL if !MMU 49 49 select DMA_OPS 50 - select DMA_REMAP if MMU 50 + select DMA_NONCOHERENT_MMAP if MMU 51 51 select EDAC_SUPPORT 52 52 select EDAC_ATOMIC_SCRUB 53 53 select GENERIC_ALLOCATOR
+1 -1
arch/xtensa/Kconfig
··· 17 17 select BUILDTIME_TABLE_SORT 18 18 select CLONE_BACKWARDS 19 19 select COMMON_CLK 20 - select DMA_REMAP if MMU 20 + select DMA_NONCOHERENT_MMAP if MMU 21 21 select GENERIC_ATOMIC64 22 22 select GENERIC_IRQ_SHOW 23 23 select GENERIC_PCI_IOMAP
+5 -9
drivers/iommu/dma-iommu.c
··· 852 852 return NULL; 853 853 } 854 854 855 - #ifdef CONFIG_DMA_REMAP 856 855 static struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, 857 856 size_t size, enum dma_data_direction dir, gfp_t gfp, 858 857 unsigned long attrs) ··· 881 882 sg_free_table(&sh->sgt); 882 883 kfree(sh); 883 884 } 884 - #endif /* CONFIG_DMA_REMAP */ 885 885 886 886 static void iommu_dma_sync_single_for_cpu(struct device *dev, 887 887 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) ··· 1274 1276 dma_free_from_pool(dev, cpu_addr, alloc_size)) 1275 1277 return; 1276 1278 1277 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1279 + if (is_vmalloc_addr(cpu_addr)) { 1278 1280 /* 1279 1281 * If it the address is remapped, then it's either non-coherent 1280 1282 * or highmem CMA, or an iommu_dma_alloc_remap() construction. ··· 1316 1318 if (!page) 1317 1319 return NULL; 1318 1320 1319 - if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 1321 + if (!coherent || PageHighMem(page)) { 1320 1322 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 1321 1323 1322 1324 cpu_addr = dma_common_contiguous_remap(page, alloc_size, ··· 1348 1350 1349 1351 gfp |= __GFP_ZERO; 1350 1352 1351 - if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) && 1353 + if (gfpflags_allow_blocking(gfp) && 1352 1354 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS)) { 1353 1355 return iommu_dma_alloc_remap(dev, size, handle, gfp, 1354 1356 dma_pgprot(dev, PAGE_KERNEL, attrs), attrs); ··· 1389 1391 if (off >= nr_pages || vma_pages(vma) > nr_pages - off) 1390 1392 return -ENXIO; 1391 1393 1392 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1394 + if (is_vmalloc_addr(cpu_addr)) { 1393 1395 struct page **pages = dma_common_find_pages(cpu_addr); 1394 1396 1395 1397 if (pages) ··· 1411 1413 struct page *page; 1412 1414 int ret; 1413 1415 1414 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 1416 + if (is_vmalloc_addr(cpu_addr)) { 1415 1417 struct page **pages = dma_common_find_pages(cpu_addr); 1416 1418 1417 1419 if (pages) { ··· 1443 1445 .free = iommu_dma_free, 1444 1446 .alloc_pages = dma_common_alloc_pages, 1445 1447 .free_pages = dma_common_free_pages, 1446 - #ifdef CONFIG_DMA_REMAP 1447 1448 .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, 1448 1449 .free_noncontiguous = iommu_dma_free_noncontiguous, 1449 - #endif 1450 1450 .mmap = iommu_dma_mmap, 1451 1451 .get_sgtable = iommu_dma_get_sgtable, 1452 1452 .map_page = iommu_dma_map_page,
+1 -6
kernel/dma/Kconfig
··· 110 110 select DMA_DECLARE_COHERENT 111 111 bool 112 112 113 - config DMA_REMAP 114 - bool 115 - depends on MMU 116 - select DMA_NONCOHERENT_MMAP 117 - 118 113 config DMA_DIRECT_REMAP 119 114 bool 120 - select DMA_REMAP 121 115 select DMA_COHERENT_POOL 116 + select DMA_NONCOHERENT_MMAP 122 117 123 118 config DMA_CMA 124 119 bool "DMA Contiguous Memory Allocator"
+1 -1
kernel/dma/Makefile
··· 8 8 obj-$(CONFIG_DMA_API_DEBUG) += debug.o 9 9 obj-$(CONFIG_SWIOTLB) += swiotlb.o 10 10 obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o 11 - obj-$(CONFIG_DMA_REMAP) += remap.o 11 + obj-$(CONFIG_MMU) += remap.o 12 12 obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
+7 -11
kernel/dma/direct.c
··· 265 265 page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); 266 266 if (!page) 267 267 return NULL; 268 + 269 + /* 270 + * dma_alloc_contiguous can return highmem pages depending on a 271 + * combination the cma= arguments and per-arch setup. These need to be 272 + * remapped to return a kernel virtual address. 273 + */ 268 274 if (PageHighMem(page)) { 269 - /* 270 - * Depending on the cma= arguments and per-arch setup, 271 - * dma_alloc_contiguous could return highmem pages. 272 - * Without remapping there is no way to return them here, so 273 - * log an error and fail. 274 - */ 275 - if (!IS_ENABLED(CONFIG_DMA_REMAP)) { 276 - dev_info(dev, "Rejecting highmem page from CMA.\n"); 277 - goto out_free_pages; 278 - } 279 275 remap = true; 280 276 set_uncached = false; 281 277 } ··· 345 349 dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) 346 350 return; 347 351 348 - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { 352 + if (is_vmalloc_addr(cpu_addr)) { 349 353 vunmap(cpu_addr); 350 354 } else { 351 355 if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))