Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: fix page attributes for dma_mmap_*

All the way back to introducing dma_common_mmap we've defaulted to mark
the pages as uncached. But this is wrong for DMA coherent devices.
Later on DMA_ATTR_WRITE_COMBINE also got incorrect treatment as that
flag is only treated special on the alloc side for non-coherent devices.

Introduce a new dma_pgprot helper that deals with the check for coherent
devices so that only the remapping cases ever reach arch_dma_mmap_pgprot
and we thus ensure no aliasing of page attributes happens, which makes
the powerpc version of arch_dma_mmap_pgprot obsolete and simplifies the
remaining ones.

Note that this means arch_dma_mmap_pgprot is a bit misnamed now, but
we'll phase it out soon.

Fixes: 64ccc9c033c6 ("common: dma-mapping: add support for generic dma_mmap_* calls")
Reported-by: Shawn Anastasio <shawn@anastas.io>
Reported-by: Gavin Li <git@thegavinli.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Catalin Marinas <catalin.marinas@arm.com> # arm64

+34 -35
+1 -3
arch/arm/mm/dma-mapping.c
··· 2405 2405 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 2406 2406 unsigned long attrs) 2407 2407 { 2408 - if (!dev_is_dma_coherent(dev)) 2409 - return __get_dma_pgprot(attrs, prot); 2410 - return prot; 2408 + return __get_dma_pgprot(attrs, prot); 2411 2409 } 2412 2410 2413 2411 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+1 -3
arch/arm64/mm/dma-mapping.c
··· 14 14 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 15 15 unsigned long attrs) 16 16 { 17 - if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE)) 18 - return pgprot_writecombine(prot); 19 - return prot; 17 + return pgprot_writecombine(prot); 20 18 } 21 19 22 20 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
-1
arch/powerpc/Kconfig
··· 121 121 select ARCH_32BIT_OFF_T if PPC32 122 122 select ARCH_HAS_DEBUG_VIRTUAL 123 123 select ARCH_HAS_DEVMEM_IS_ALLOWED 124 - select ARCH_HAS_DMA_MMAP_PGPROT 125 124 select ARCH_HAS_ELF_RANDOMIZE 126 125 select ARCH_HAS_FORTIFY_SOURCE 127 126 select ARCH_HAS_GCOV_PROFILE_ALL
+1 -2
arch/powerpc/kernel/Makefile
··· 49 49 signal.o sysfs.o cacheinfo.o time.o \ 50 50 prom.o traps.o setup-common.o \ 51 51 udbg.o misc.o io.o misc_$(BITS).o \ 52 - of_platform.o prom_parse.o \ 53 - dma-common.o 52 + of_platform.o prom_parse.o 54 53 obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ 55 54 signal_64.o ptrace32.o \ 56 55 paca.o nvram_64.o firmware.o
-17
arch/powerpc/kernel/dma-common.c
··· 1 - // SPDX-License-Identifier: GPL-2.0-or-later 2 - /* 3 - * Contains common dma routines for all powerpc platforms. 4 - * 5 - * Copyright (C) 2019 Shawn Anastasio. 6 - */ 7 - 8 - #include <linux/mm.h> 9 - #include <linux/dma-noncoherent.h> 10 - 11 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 12 - unsigned long attrs) 13 - { 14 - if (!dev_is_dma_coherent(dev)) 15 - return pgprot_noncached(prot); 16 - return prot; 17 - }
+3 -3
drivers/iommu/dma-iommu.c
··· 574 574 struct iova_domain *iovad = &cookie->iovad; 575 575 bool coherent = dev_is_dma_coherent(dev); 576 576 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs); 577 - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 577 + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 578 578 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; 579 579 struct page **pages; 580 580 struct sg_table sgt; ··· 975 975 return NULL; 976 976 977 977 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) { 978 - pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs); 978 + pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs); 979 979 980 980 cpu_addr = dma_common_contiguous_remap(page, alloc_size, 981 981 VM_USERMAP, prot, __builtin_return_address(0)); ··· 1035 1035 unsigned long pfn, off = vma->vm_pgoff; 1036 1036 int ret; 1037 1037 1038 - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 1038 + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 1039 1039 1040 1040 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 1041 1041 return ret;
+9 -4
include/linux/dma-noncoherent.h
··· 42 42 dma_addr_t dma_addr, unsigned long attrs); 43 43 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 44 44 dma_addr_t dma_addr); 45 - 46 - #ifdef CONFIG_ARCH_HAS_DMA_MMAP_PGPROT 47 45 pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 48 46 unsigned long attrs); 47 + 48 + #ifdef CONFIG_MMU 49 + pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 49 50 #else 50 - # define arch_dma_mmap_pgprot(dev, prot, attrs) pgprot_noncached(prot) 51 - #endif 51 + static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, 52 + unsigned long attrs) 53 + { 54 + return prot; /* no protection bits supported without page tables */ 55 + } 56 + #endif /* CONFIG_MMU */ 52 57 53 58 #ifdef CONFIG_DMA_NONCOHERENT_CACHE_SYNC 54 59 void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+18 -1
kernel/dma/mapping.c
··· 150 150 } 151 151 EXPORT_SYMBOL(dma_get_sgtable_attrs); 152 152 153 + #ifdef CONFIG_MMU 154 + /* 155 + * Return the page attributes used for mapping dma_alloc_* memory, either in 156 + * kernel space if remapping is needed, or to userspace through dma_mmap_*. 157 + */ 158 + pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) 159 + { 160 + if (dev_is_dma_coherent(dev) || 161 + (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && 162 + (attrs & DMA_ATTR_NON_CONSISTENT))) 163 + return prot; 164 + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) 165 + return arch_dma_mmap_pgprot(dev, prot, attrs); 166 + return pgprot_noncached(prot); 167 + } 168 + #endif /* CONFIG_MMU */ 169 + 153 170 /* 154 171 * Create userspace mapping for the DMA-coherent memory. 155 172 */ ··· 181 164 unsigned long pfn; 182 165 int ret = -ENXIO; 183 166 184 - vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs); 167 + vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); 185 168 186 169 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) 187 170 return ret;
+1 -1
kernel/dma/remap.c
··· 218 218 219 219 /* create a coherent mapping */ 220 220 ret = dma_common_contiguous_remap(page, size, VM_USERMAP, 221 - arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs), 221 + dma_pgprot(dev, PAGE_KERNEL, attrs), 222 222 __builtin_return_address(0)); 223 223 if (!ret) { 224 224 __dma_direct_free_pages(dev, size, page);