Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/arm: consolidate page-coherent.h

Shared the duplicate arm/arm64 code in include/xen/arm/page-coherent.h.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

+80 -150
-75
arch/arm/include/asm/xen/page-coherent.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 3 - #define _ASM_ARM_XEN_PAGE_COHERENT_H 4 - 5 - #include <linux/dma-mapping.h> 6 - #include <asm/page.h> 7 2 #include <xen/arm/page-coherent.h> 8 - 9 - static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 10 - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 11 - { 12 - return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); 13 - } 14 - 15 - static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 16 - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 17 - { 18 - dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); 19 - } 20 - 21 - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 22 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 23 - { 24 - unsigned long pfn = PFN_DOWN(handle); 25 - 26 - if (pfn_valid(pfn)) 27 - dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); 28 - else 29 - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 30 - } 31 - 32 - static inline void xen_dma_sync_single_for_device(struct device *hwdev, 33 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 34 - { 35 - unsigned long pfn = PFN_DOWN(handle); 36 - if (pfn_valid(pfn)) 37 - dma_direct_sync_single_for_device(hwdev, handle, size, dir); 38 - else 39 - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 40 - } 41 - 42 - static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 43 - dma_addr_t dev_addr, unsigned long offset, size_t size, 44 - enum dma_data_direction dir, unsigned long attrs) 45 - { 46 - unsigned long page_pfn = page_to_xen_pfn(page); 47 - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 48 - unsigned long compound_pages = 49 - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 50 - bool local = (page_pfn <= dev_pfn) && 51 - (dev_pfn - page_pfn < compound_pages); 52 - 53 - if (local) 54 - dma_direct_map_page(hwdev, page, offset, size, dir, attrs); 55 - else 56 - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 57 - } 58 - 59 - static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 60 - size_t size, enum dma_data_direction dir, unsigned long attrs) 61 - { 62 - unsigned long pfn = PFN_DOWN(handle); 63 - /* 64 - * Dom0 is mapped 1:1, while the Linux page can be spanned accross 65 - * multiple Xen page, it's not possible to have a mix of local and 66 - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 67 - * foreign mfn will always return false. If the page is local we can 68 - * safely call the native dma_ops function, otherwise we call the xen 69 - * specific function. 70 - */ 71 - if (pfn_valid(pfn)) 72 - dma_direct_unmap_page(hwdev, handle, size, dir, attrs); 73 - else 74 - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 75 - } 76 - 77 - #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
-75
arch/arm64/include/asm/xen/page-coherent.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 */ 2 - #ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H 3 - #define _ASM_ARM64_XEN_PAGE_COHERENT_H 4 - 5 - #include <linux/dma-mapping.h> 6 - #include <asm/page.h> 7 2 #include <xen/arm/page-coherent.h> 8 - 9 - static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 10 - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 11 - { 12 - return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); 13 - } 14 - 15 - static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 16 - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 17 - { 18 - dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); 19 - } 20 - 21 - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 22 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 23 - { 24 - unsigned long pfn = PFN_DOWN(handle); 25 - 26 - if (pfn_valid(pfn)) 27 - dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); 28 - else 29 - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 30 - } 31 - 32 - static inline void xen_dma_sync_single_for_device(struct device *hwdev, 33 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 34 - { 35 - unsigned long pfn = PFN_DOWN(handle); 36 - if (pfn_valid(pfn)) 37 - dma_direct_sync_single_for_device(hwdev, handle, size, dir); 38 - else 39 - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 40 - } 41 - 42 - static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 43 - dma_addr_t dev_addr, unsigned long offset, size_t size, 44 - enum dma_data_direction dir, unsigned long attrs) 45 - { 46 - unsigned long page_pfn = page_to_xen_pfn(page); 47 - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 48 - unsigned long compound_pages = 49 - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 50 - bool local = (page_pfn <= dev_pfn) && 51 - (dev_pfn - page_pfn < compound_pages); 52 - 53 - if (local) 54 - dma_direct_map_page(hwdev, page, offset, size, dir, attrs); 55 - else 56 - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 57 - } 58 - 59 - static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 60 - size_t size, enum dma_data_direction dir, unsigned long attrs) 61 - { 62 - unsigned long pfn = PFN_DOWN(handle); 63 - /* 64 - * Dom0 is mapped 1:1, while the Linux page can be spanned accross 65 - * multiple Xen page, it's not possible to have a mix of local and 66 - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 67 - * foreign mfn will always return false. If the page is local we can 68 - * safely call the native dma_ops function, otherwise we call the xen 69 - * specific function. 70 - */ 71 - if (pfn_valid(pfn)) 72 - dma_direct_unmap_page(hwdev, handle, size, dir, attrs); 73 - else 74 - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 75 - } 76 - 77 - #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
+80
include/xen/arm/page-coherent.h
··· 2 2 #ifndef _XEN_ARM_PAGE_COHERENT_H 3 3 #define _XEN_ARM_PAGE_COHERENT_H 4 4 5 + #include <linux/dma-mapping.h> 6 + #include <asm/page.h> 7 + 5 8 void __xen_dma_map_page(struct device *hwdev, struct page *page, 6 9 dma_addr_t dev_addr, unsigned long offset, size_t size, 7 10 enum dma_data_direction dir, unsigned long attrs); ··· 15 12 dma_addr_t handle, size_t size, enum dma_data_direction dir); 16 13 void __xen_dma_sync_single_for_device(struct device *hwdev, 17 14 dma_addr_t handle, size_t size, enum dma_data_direction dir); 15 + 16 + static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 17 + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 18 + { 19 + return dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); 20 + } 21 + 22 + static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 23 + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 24 + { 25 + dma_direct_free(hwdev, size, cpu_addr, dma_handle, attrs); 26 + } 27 + 28 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 29 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 30 + { 31 + unsigned long pfn = PFN_DOWN(handle); 32 + 33 + if (pfn_valid(pfn)) 34 + dma_direct_sync_single_for_cpu(hwdev, handle, size, dir); 35 + else 36 + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 37 + } 38 + 39 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 40 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 41 + { 42 + unsigned long pfn = PFN_DOWN(handle); 43 + if (pfn_valid(pfn)) 44 + dma_direct_sync_single_for_device(hwdev, handle, size, dir); 45 + else 46 + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 47 + } 48 + 49 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 50 + dma_addr_t dev_addr, unsigned long offset, size_t size, 51 + enum dma_data_direction dir, unsigned long attrs) 52 + { 53 + unsigned long page_pfn = page_to_xen_pfn(page); 54 + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 55 + unsigned long compound_pages = 56 + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 57 + bool local = (page_pfn <= dev_pfn) && 58 + (dev_pfn - page_pfn < compound_pages); 59 + 60 + /* 61 + * Dom0 is mapped 1:1, while the Linux page can span across 62 + * multiple Xen pages, it's not possible for it to contain a 63 + * mix of local and foreign Xen pages. So if the first xen_pfn 64 + * == mfn the page is local otherwise it's a foreign page 65 + * grant-mapped in dom0. If the page is local we can safely 66 + * call the native dma_ops function, otherwise we call the xen 67 + * specific function. 68 + */ 69 + if (local) 70 + dma_direct_map_page(hwdev, page, offset, size, dir, attrs); 71 + else 72 + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 73 + } 74 + 75 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 76 + size_t size, enum dma_data_direction dir, unsigned long attrs) 77 + { 78 + unsigned long pfn = PFN_DOWN(handle); 79 + /* 80 + * Dom0 is mapped 1:1, while the Linux page can be spanned accross 81 + * multiple Xen page, it's not possible to have a mix of local and 82 + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 83 + * foreign mfn will always return false. If the page is local we can 84 + * safely call the native dma_ops function, otherwise we call the xen 85 + * specific function. 86 + */ 87 + if (pfn_valid(pfn)) 88 + dma_direct_unmap_page(hwdev, handle, size, dir, attrs); 89 + else 90 + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 91 + } 18 92 19 93 #endif /* _XEN_ARM_PAGE_COHERENT_H */