Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/arm: use dma-noncoherent.h calls for xen-swiotlb cache maintainance

Copy the arm64 code that uses the dma-direct/swiotlb helpers for DMA
on-coherent devices.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

+28 -75
-3
arch/arm/include/asm/device.h
··· 15 15 #ifdef CONFIG_ARM_DMA_USE_IOMMU 16 16 struct dma_iommu_mapping *mapping; 17 17 #endif 18 - #ifdef CONFIG_XEN 19 - const struct dma_map_ops *dev_dma_ops; 20 - #endif 21 18 unsigned int dma_coherent:1; 22 19 unsigned int dma_ops_setup:1; 23 20 };
+27 -45
arch/arm/include/asm/xen/page-coherent.h
··· 6 6 #include <asm/page.h> 7 7 #include <xen/arm/page-coherent.h> 8 8 9 - static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev) 10 - { 11 - if (dev && dev->archdata.dev_dma_ops) 12 - return dev->archdata.dev_dma_ops; 13 - return get_arch_dma_ops(NULL); 14 - } 15 - 16 9 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 17 10 dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 18 11 { 19 - return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); 12 + return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); 20 13 } 21 14 22 15 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 23 16 void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 24 17 { 25 - xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 18 + dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); 19 + } 20 + 21 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 22 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 23 + { 24 + unsigned long pfn = PFN_DOWN(handle); 25 + 26 + if (pfn_valid(pfn)) 27 + dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); 28 + else 29 + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 30 + } 31 + 32 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 33 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 34 + { 35 + unsigned long pfn = PFN_DOWN(handle); 36 + if (pfn_valid(pfn)) 37 + dma_direct_sync_single_for_device(hwdev, handle, size, dir); 38 + else 39 + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 26 40 } 27 41 28 42 static inline void xen_dma_map_page(struct device *hwdev, struct page *page, ··· 50 36 bool local = (page_pfn <= dev_pfn) && 51 37 (dev_pfn - page_pfn < compound_pages); 52 38 53 - /* 54 - * Dom0 is mapped 1:1, while the Linux page can span across 55 - * multiple Xen pages, it's not possible for it to contain a 56 - * mix of local and foreign Xen pages. So if the first xen_pfn 57 - * == mfn the page is local otherwise it's a foreign page 58 - * grant-mapped in dom0. If the page is local we can safely 59 - * call the native dma_ops function, otherwise we call the xen 60 - * specific function. 61 - */ 62 39 if (local) 63 - xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 40 + dma_direct_map_page(hwdev, page, offset, size, dir, attrs); 64 41 else 65 42 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 66 43 } ··· 68 63 * safely call the native dma_ops function, otherwise we call the xen 69 64 * specific function. 70 65 */ 71 - if (pfn_valid(pfn)) { 72 - if (xen_get_dma_ops(hwdev)->unmap_page) 73 - xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 74 - } else 66 + if (pfn_valid(pfn)) 67 + dma_direct_unmap_page(hwdev, handle, size, dir, attrs); 68 + else 75 69 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 76 - } 77 - 78 - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 79 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 80 - { 81 - unsigned long pfn = PFN_DOWN(handle); 82 - if (pfn_valid(pfn)) { 83 - if (xen_get_dma_ops(hwdev)->sync_single_for_cpu) 84 - xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 85 - } else 86 - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 87 - } 88 - 89 - static inline void xen_dma_sync_single_for_device(struct device *hwdev, 90 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 91 - { 92 - unsigned long pfn = PFN_DOWN(handle); 93 - if (pfn_valid(pfn)) { 94 - if (xen_get_dma_ops(hwdev)->sync_single_for_device) 95 - xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 96 - } else 97 - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 98 70 } 99 71 100 72 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+1 -7
arch/arm/mm/dma-mapping.c
··· 1105 1105 * 32-bit DMA. 1106 1106 * Use the generic dma-direct / swiotlb ops code in that case, as that 1107 1107 * handles bounce buffering for us. 1108 - * 1109 - * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the 1110 - * latter is also selected by the Xen code, but that code for now relies 1111 - * on non-NULL dev_dma_ops. To be cleaned up later. 1112 1108 */ 1113 1109 if (IS_ENABLED(CONFIG_ARM_LPAE)) 1114 1110 return NULL; ··· 2314 2318 set_dma_ops(dev, dma_ops); 2315 2319 2316 2320 #ifdef CONFIG_XEN 2317 - if (xen_initial_domain()) { 2318 - dev->archdata.dev_dma_ops = dev->dma_ops; 2321 + if (xen_initial_domain()) 2319 2322 dev->dma_ops = xen_dma_ops; 2320 - } 2321 2323 #endif 2322 2324 dev->archdata.dma_ops_setup = true; 2323 2325 }
-20
drivers/xen/swiotlb-xen.c
··· 557 557 void *cpu_addr, dma_addr_t dma_addr, size_t size, 558 558 unsigned long attrs) 559 559 { 560 - #ifdef CONFIG_ARM 561 - if (xen_get_dma_ops(dev)->mmap) 562 - return xen_get_dma_ops(dev)->mmap(dev, vma, cpu_addr, 563 - dma_addr, size, attrs); 564 - #endif 565 560 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); 566 561 } 567 562 ··· 569 574 void *cpu_addr, dma_addr_t handle, size_t size, 570 575 unsigned long attrs) 571 576 { 572 - #ifdef CONFIG_ARM 573 - if (xen_get_dma_ops(dev)->get_sgtable) { 574 - #if 0 575 - /* 576 - * This check verifies that the page belongs to the current domain and 577 - * is not one mapped from another domain. 578 - * This check is for debug only, and should not go to production build 579 - */ 580 - unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle)); 581 - BUG_ON (!page_is_ram(bfn)); 582 - #endif 583 - return xen_get_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr, 584 - handle, size, attrs); 585 - } 586 - #endif 587 577 return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size, attrs); 588 578 } 589 579