Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xtensa: support DMA to high memory

- don't bugcheck if high memory page is passed to xtensa_map_page;
- turn empty dcache flush macros into functions so that they could be
passed as function parameters;
- use kmap_atomic to map high memory pages for cache invalidation/
flushing performed by xtensa_sync_single_for_{cpu,device}.

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>

+39 -17
+8 -3
arch/xtensa/include/asm/cacheflush.h
··· 55 55 extern void __flush_invalidate_dcache_page(unsigned long); 56 56 extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); 57 57 #else 58 - # define __flush_dcache_range(p,s) do { } while(0) 59 - # define __flush_dcache_page(p) do { } while(0) 60 - # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) 58 + static inline void __flush_dcache_page(unsigned long va) 59 + { 60 + } 61 + static inline void __flush_dcache_range(unsigned long va, unsigned long sz) 62 + { 63 + } 64 + # define __flush_invalidate_dcache_all() __invalidate_dcache_all() 65 + # define __flush_invalidate_dcache_page(p) __invalidate_dcache_page(p) 61 66 # define __flush_invalidate_dcache_range(p,s) __invalidate_dcache_range(p,s) 62 67 #endif 63 68
+31 -14
arch/xtensa/kernel/pci-dma.c
··· 15 15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com> 16 16 */ 17 17 18 - #include <linux/types.h> 19 - #include <linux/mm.h> 20 - #include <linux/string.h> 21 - #include <linux/pci.h> 22 18 #include <linux/gfp.h> 19 + #include <linux/highmem.h> 20 + #include <linux/mm.h> 23 21 #include <linux/module.h> 24 - #include <asm/io.h> 22 + #include <linux/pci.h> 23 + #include <linux/string.h> 24 + #include <linux/types.h> 25 25 #include <asm/cacheflush.h> 26 + #include <asm/io.h> 26 27 27 28 void dma_cache_sync(struct device *dev, void *vaddr, size_t size, 28 29 enum dma_data_direction dir) ··· 48 47 } 49 48 EXPORT_SYMBOL(dma_cache_sync); 50 49 50 + static void do_cache_op(dma_addr_t dma_handle, size_t size, 51 + void (*fn)(unsigned long, unsigned long)) 52 + { 53 + unsigned long off = dma_handle & (PAGE_SIZE - 1); 54 + unsigned long pfn = PFN_DOWN(dma_handle); 55 + struct page *page = pfn_to_page(pfn); 56 + 57 + if (!PageHighMem(page)) 58 + fn((unsigned long)bus_to_virt(dma_handle), size); 59 + else 60 + while (size > 0) { 61 + size_t sz = min_t(size_t, size, PAGE_SIZE - off); 62 + void *vaddr = kmap_atomic(page); 63 + 64 + fn((unsigned long)vaddr + off, sz); 65 + kunmap_atomic(vaddr); 66 + off = 0; 67 + ++page; 68 + size -= sz; 69 + } 70 + } 71 + 51 72 static void xtensa_sync_single_for_cpu(struct device *dev, 52 73 dma_addr_t dma_handle, size_t size, 53 74 enum dma_data_direction dir) 54 75 { 55 - void *vaddr; 56 - 57 76 switch (dir) { 58 77 case DMA_BIDIRECTIONAL: 59 78 case DMA_FROM_DEVICE: 60 - vaddr = bus_to_virt(dma_handle); 61 - __invalidate_dcache_range((unsigned long)vaddr, size); 79 + do_cache_op(dma_handle, size, __invalidate_dcache_range); 62 80 break; 63 81 64 82 case DMA_NONE: ··· 93 73 dma_addr_t dma_handle, size_t size, 94 74 enum dma_data_direction dir) 95 75 { 96 - void *vaddr; 97 - 98 76 switch (dir) { 99 77 case DMA_BIDIRECTIONAL: 100 78 case DMA_TO_DEVICE: 101 - vaddr = bus_to_virt(dma_handle); 102 - __flush_dcache_range((unsigned long)vaddr, size); 79 + if (XCHAL_DCACHE_IS_WRITEBACK) 80 + do_cache_op(dma_handle, size, __flush_dcache_range); 103 81 break; 104 82 105 83 case DMA_NONE: ··· 189 171 { 190 172 dma_addr_t dma_handle = page_to_phys(page) + offset; 191 173 192 - BUG_ON(PageHighMem(page)); 193 174 xtensa_sync_single_for_device(dev, dma_handle, size, dir); 194 175 return dma_handle; 195 176 }