Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-mapping: remove arch_dma_mmap_pgprot

arch_dma_mmap_pgprot is used for two things:

1) to override the "normal" uncached page attributes for mapping
memory coherent to devices that can't snoop the CPU caches
2) to provide the special DMA_ATTR_WRITE_COMBINE semantics on older
arm systems and some mips platforms

Replace one with the pgprot_dmacoherent macro that is already provided
by arm and much simpler to use, and lift the DMA_ATTR_WRITE_COMBINE
handling to common code with an explicit arch opt-in.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> # m68k
Acked-by: Paul Burton <paul.burton@mips.com> # mips

+35 -34
+1 -1
arch/arm/Kconfig
··· 8 8 select ARCH_HAS_DEBUG_VIRTUAL if MMU 9 9 select ARCH_HAS_DEVMEM_IS_ALLOWED 10 10 select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB 11 - select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB 11 + select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE 12 12 select ARCH_HAS_ELF_RANDOMIZE 13 13 select ARCH_HAS_FORTIFY_SOURCE 14 14 select ARCH_HAS_KEEPINITRD
-6
arch/arm/mm/dma-mapping.c
··· 2402 2402 return dma_to_pfn(dev, dma_addr); 2403 2403 } 2404 2404 2405 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 2406 - unsigned long attrs) 2407 - { 2408 - return __get_dma_pgprot(attrs, prot); 2409 - } 2410 - 2411 2405 void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, 2412 2406 gfp_t gfp, unsigned long attrs) 2413 2407 {
-1
arch/arm64/Kconfig
··· 13 13 select ARCH_HAS_DEBUG_VIRTUAL 14 14 select ARCH_HAS_DEVMEM_IS_ALLOWED 15 15 select ARCH_HAS_DMA_COHERENT_TO_PFN 16 - select ARCH_HAS_DMA_MMAP_PGPROT 17 16 select ARCH_HAS_DMA_PREP_COHERENT 18 17 select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI 19 18 select ARCH_HAS_ELF_RANDOMIZE
+4
arch/arm64/include/asm/pgtable.h
··· 435 435 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 436 436 #define pgprot_device(prot) \ 437 437 __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) 438 + #define pgprot_dmacoherent(prot) \ 439 + __pgprot_modify(prot, PTE_ATTRINDX_MASK, \ 440 + PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN) 441 + 438 442 #define __HAVE_PHYS_MEM_ACCESS_PROT 439 443 struct file; 440 444 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
-6
arch/arm64/mm/dma-mapping.c
··· 11 11 12 12 #include <asm/cacheflush.h> 13 13 14 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 15 - unsigned long attrs) 16 - { 17 - return pgprot_writecombine(prot); 18 - } 19 - 20 14 void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr, 21 15 size_t size, enum dma_data_direction dir) 22 16 {
-1
arch/m68k/Kconfig
··· 4 4 default y 5 5 select ARCH_32BIT_OFF_T 6 6 select ARCH_HAS_BINFMT_FLAT 7 - select ARCH_HAS_DMA_MMAP_PGPROT if MMU && !COLDFIRE 8 7 select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE 9 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA 10 9 select ARCH_MIGHT_HAVE_PC_PARPORT if ISA
+3
arch/m68k/include/asm/pgtable_mm.h
··· 169 169 ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \ 170 170 : (prot))) 171 171 172 + pgprot_t pgprot_dmacoherent(pgprot_t prot); 173 + #define pgprot_dmacoherent(prot) pgprot_dmacoherent(prot) 174 + 172 175 #endif /* CONFIG_COLDFIRE */ 173 176 #include <asm-generic/pgtable.h> 174 177 #endif /* !__ASSEMBLY__ */
+1 -2
arch/m68k/kernel/dma.c
··· 23 23 cache_push(page_to_phys(page), size); 24 24 } 25 25 26 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 27 - unsigned long attrs) 26 + pgprot_t pgprot_dmacoherent(pgprot_t prot) 28 27 { 29 28 if (CPU_IS_040_OR_060) { 30 29 pgprot_val(prot) &= ~_PAGE_CACHE040;
+1 -1
arch/mips/Kconfig
··· 1119 1119 1120 1120 config DMA_NONCOHERENT 1121 1121 bool 1122 - select ARCH_HAS_DMA_MMAP_PGPROT 1122 + select ARCH_HAS_DMA_WRITE_COMBINE 1123 1123 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 1124 1124 select ARCH_HAS_UNCACHED_SEGMENT 1125 1125 select NEED_DMA_MAP_STATE
-8
arch/mips/mm/dma-noncoherent.c
··· 65 65 return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr))); 66 66 } 67 67 68 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 69 - unsigned long attrs) 70 - { 71 - if (attrs & DMA_ATTR_WRITE_COMBINE) 72 - return pgprot_writecombine(prot); 73 - return pgprot_noncached(prot); 74 - } 75 - 76 68 static inline void dma_sync_virt(void *addr, size_t size, 77 69 enum dma_data_direction dir) 78 70 {
+11 -2
include/linux/dma-noncoherent.h
··· 3 3 #define _LINUX_DMA_NONCOHERENT_H 1 4 4 5 5 #include <linux/dma-mapping.h> 6 + #include <asm/pgtable.h> 6 7 7 8 #ifdef CONFIG_ARCH_HAS_DMA_COHERENCE_H 8 9 #include <asm/dma-coherence.h> ··· 43 42 dma_addr_t dma_addr, unsigned long attrs); 44 43 long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, 45 44 dma_addr_t dma_addr); 46 - pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, 47 - unsigned long attrs); 48 45 49 46 #ifdef CONFIG_MMU 47 + /* 48 + * Page protection so that devices that can't snoop CPU caches can use the 49 + * memory coherently. We default to pgprot_noncached which is usually used 50 + * for ioremap as a safe bet, but architectures can override this with less 51 + * strict semantics if possible. 52 + */ 53 + #ifndef pgprot_dmacoherent 54 + #define pgprot_dmacoherent(prot) pgprot_noncached(prot) 55 + #endif 56 + 50 57 pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs); 51 58 #else 52 59 static inline pgprot_t dma_pgprot(struct device *dev, pgprot_t prot,
+9 -3
kernel/dma/Kconfig
··· 20 20 config ARCH_HAS_DMA_SET_MASK 21 21 bool 22 22 23 + # 24 + # Select this option if the architecture needs special handling for 25 + # DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what 26 + # people thing of when saying write combine, so very few platforms should 27 + # need to enable this. 28 + # 29 + config ARCH_HAS_DMA_WRITE_COMBINE 30 + bool 31 + 23 32 config DMA_DECLARE_COHERENT 24 33 bool 25 34 ··· 52 43 bool 53 44 54 45 config ARCH_HAS_DMA_COHERENT_TO_PFN 55 - bool 56 - 57 - config ARCH_HAS_DMA_MMAP_PGPROT 58 46 bool 59 47 60 48 config ARCH_HAS_FORCE_DMA_UNENCRYPTED
+5 -3
kernel/dma/mapping.c
··· 161 161 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) && 162 162 (attrs & DMA_ATTR_NON_CONSISTENT))) 163 163 return prot; 164 - if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_MMAP_PGPROT)) 165 - return arch_dma_mmap_pgprot(dev, prot, attrs); 166 - return pgprot_noncached(prot); 164 + #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE 165 + if (attrs & DMA_ATTR_WRITE_COMBINE) 166 + return pgprot_writecombine(prot); 167 + #endif 168 + return pgprot_dmacoherent(prot); 167 169 } 168 170 #endif /* CONFIG_MMU */ 169 171