Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dma-direct: make uncached_kernel_address more general

Rename the symbol to arch_dma_set_uncached, and pass a size to it as
well as allow an error return. That will allow reusing this hook for
in-place pagetable remapping.

As the in-place remap doesn't always require an explicit cache flush,
also detangle ARCH_HAS_DMA_PREP_COHERENT from ARCH_HAS_DMA_SET_UNCACHED.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Robin Murphy <robin.murphy@arm.com>

+21 -17
+4 -4
arch/Kconfig
··· 248 248 bool 249 249 250 250 # 251 - # Select if arch has an uncached kernel segment and provides the 252 - # uncached_kernel_address symbol to use it 251 + # Select if the architecture provides the arch_dma_set_uncached symbol to 252 + # either provide an uncached segement alias for a DMA allocation, or 253 + # to remap the page tables in place. 253 254 # 254 - config ARCH_HAS_UNCACHED_SEGMENT 255 - select ARCH_HAS_DMA_PREP_COHERENT 255 + config ARCH_HAS_DMA_SET_UNCACHED 256 256 bool 257 257 258 258 # Select if arch init_task must go in the __init_task_data section
+1 -1
arch/microblaze/Kconfig
··· 8 8 select ARCH_HAS_GCOV_PROFILE_ALL 9 9 select ARCH_HAS_SYNC_DMA_FOR_CPU 10 10 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 11 - select ARCH_HAS_UNCACHED_SEGMENT if !MMU 11 + select ARCH_HAS_DMA_SET_UNCACHED if !MMU 12 12 select ARCH_MIGHT_HAVE_PC_PARPORT 13 13 select ARCH_WANT_IPC_PARSE_VERSION 14 14 select BUILDTIME_TABLE_SORT
+1 -1
arch/microblaze/mm/consistent.c
··· 40 40 #define UNCACHED_SHADOW_MASK 0 41 41 #endif /* CONFIG_XILINX_UNCACHED_SHADOW */ 42 42 43 - void *uncached_kernel_address(void *ptr) 43 + void *arch_dma_set_uncached(void *ptr, size_t size) 44 44 { 45 45 unsigned long addr = (unsigned long)ptr; 46 46
+2 -1
arch/mips/Kconfig
··· 1187 1187 # significant advantages. 1188 1188 # 1189 1189 select ARCH_HAS_DMA_WRITE_COMBINE 1190 + select ARCH_HAS_DMA_PREP_COHERENT 1190 1191 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 1191 - select ARCH_HAS_UNCACHED_SEGMENT 1192 + select ARCH_HAS_DMA_SET_UNCACHED 1192 1193 select DMA_NONCOHERENT_MMAP 1193 1194 select DMA_NONCOHERENT_CACHE_SYNC 1194 1195 select NEED_DMA_MAP_STATE
+1 -1
arch/mips/mm/dma-noncoherent.c
··· 49 49 dma_cache_wback_inv((unsigned long)page_address(page), size); 50 50 } 51 51 52 - void *uncached_kernel_address(void *addr) 52 + void *arch_dma_set_uncached(void *addr, size_t size) 53 53 { 54 54 return (void *)(__pa(addr) + UNCAC_BASE); 55 55 }
+2 -1
arch/nios2/Kconfig
··· 2 2 config NIOS2 3 3 def_bool y 4 4 select ARCH_32BIT_OFF_T 5 + select ARCH_HAS_DMA_PREP_COHERENT 5 6 select ARCH_HAS_SYNC_DMA_FOR_CPU 6 7 select ARCH_HAS_SYNC_DMA_FOR_DEVICE 7 - select ARCH_HAS_UNCACHED_SEGMENT 8 + select ARCH_HAS_DMA_SET_UNCACHED 8 9 select ARCH_NO_SWAP 9 10 select TIMER_OF 10 11 select GENERIC_ATOMIC64
+1 -1
arch/nios2/mm/dma-mapping.c
··· 67 67 flush_dcache_range(start, start + size); 68 68 } 69 69 70 - void *uncached_kernel_address(void *ptr) 70 + void *arch_dma_set_uncached(void *ptr, size_t size) 71 71 { 72 72 unsigned long addr = (unsigned long)ptr; 73 73
+1 -1
arch/xtensa/Kconfig
··· 6 6 select ARCH_HAS_DMA_PREP_COHERENT if MMU 7 7 select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU 8 8 select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU 9 - select ARCH_HAS_UNCACHED_SEGMENT if MMU 9 + select ARCH_HAS_DMA_SET_UNCACHED if MMU 10 10 select ARCH_USE_QUEUED_RWLOCKS 11 11 select ARCH_USE_QUEUED_SPINLOCKS 12 12 select ARCH_WANT_FRAME_POINTERS
+1 -1
arch/xtensa/kernel/pci-dma.c
··· 92 92 * coherent DMA memory operations when CONFIG_MMU is not enabled. 93 93 */ 94 94 #ifdef CONFIG_MMU 95 - void *uncached_kernel_address(void *p) 95 + void *arch_dma_set_uncached(void *p, size_t size) 96 96 { 97 97 return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; 98 98 }
+1 -1
include/linux/dma-noncoherent.h
··· 108 108 } 109 109 #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ 110 110 111 - void *uncached_kernel_address(void *addr); 111 + void *arch_dma_set_uncached(void *addr, size_t size); 112 112 113 113 #endif /* _LINUX_DMA_NONCOHERENT_H */
+6 -4
kernel/dma/direct.c
··· 192 192 193 193 memset(ret, 0, size); 194 194 195 - if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 195 + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 196 196 dma_alloc_need_uncached(dev, attrs)) { 197 197 arch_dma_prep_coherent(page, size); 198 - ret = uncached_kernel_address(ret); 198 + ret = arch_dma_set_uncached(ret, size); 199 + if (IS_ERR(ret)) 200 + goto out_free_pages; 199 201 } 200 202 done: 201 203 if (force_dma_unencrypted(dev)) ··· 238 236 void *dma_direct_alloc(struct device *dev, size_t size, 239 237 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 240 238 { 241 - if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 239 + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 242 240 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 243 241 dma_alloc_need_uncached(dev, attrs)) 244 242 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); ··· 248 246 void dma_direct_free(struct device *dev, size_t size, 249 247 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 250 248 { 251 - if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && 249 + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && 252 250 !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && 253 251 dma_alloc_need_uncached(dev, attrs)) 254 252 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);