Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen: introduce xen_dma_map/unmap_page and xen_dma_sync_single_for_cpu/device

Introduce xen_dma_map_page, xen_dma_unmap_page,
xen_dma_sync_single_for_cpu and xen_dma_sync_single_for_device.
They have empty implementations on x86 and ia64 but they call the
corresponding platform dma_ops function on arm and arm64.

Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>

Changes in v9:
- xen_dma_map_page return void, avoid page_to_phys.

+81
+28
arch/arm/include/asm/xen/page-coherent.h
··· 19 19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 20 20 } 21 21 22 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 23 + unsigned long offset, size_t size, enum dma_data_direction dir, 24 + struct dma_attrs *attrs) 25 + { 26 + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 27 + } 28 + 29 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 30 + size_t size, enum dma_data_direction dir, 31 + struct dma_attrs *attrs) 32 + { 33 + if (__generic_dma_ops(hwdev)->unmap_page) 34 + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 35 + } 36 + 37 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 38 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 39 + { 40 + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 41 + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 42 + } 43 + 44 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 45 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 46 + { 47 + if (__generic_dma_ops(hwdev)->sync_single_for_device) 48 + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 49 + } 22 50 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+25
arch/arm64/include/asm/xen/page-coherent.h
··· 19 19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 20 20 } 21 21 22 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 23 + unsigned long offset, size_t size, enum dma_data_direction dir, 24 + struct dma_attrs *attrs) 25 + { 26 + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 27 + } 28 + 29 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 30 + size_t size, enum dma_data_direction dir, 31 + struct dma_attrs *attrs) 32 + { 33 + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 34 + } 35 + 36 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 37 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 38 + { 39 + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 40 + } 41 + 42 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 43 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 44 + { 45 + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 46 + } 22 47 #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
+14
arch/ia64/include/asm/xen/page-coherent.h
··· 21 21 free_pages((unsigned long) cpu_addr, get_order(size)); 22 22 } 23 23 24 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 25 + unsigned long offset, size_t size, enum dma_data_direction dir, 26 + struct dma_attrs *attrs) { } 27 + 28 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 29 + size_t size, enum dma_data_direction dir, 30 + struct dma_attrs *attrs) { } 31 + 32 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 33 + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 34 + 35 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 36 + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 37 + 24 38 #endif /* _ASM_IA64_XEN_PAGE_COHERENT_H */
+14
arch/x86/include/asm/xen/page-coherent.h
··· 21 21 free_pages((unsigned long) cpu_addr, get_order(size)); 22 22 } 23 23 24 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 25 + unsigned long offset, size_t size, enum dma_data_direction dir, 26 + struct dma_attrs *attrs) { } 27 + 28 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 29 + size_t size, enum dma_data_direction dir, 30 + struct dma_attrs *attrs) { } 31 + 32 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 33 + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 34 + 35 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 36 + dma_addr_t handle, size_t size, enum dma_data_direction dir) { } 37 + 24 38 #endif /* _ASM_X86_XEN_PAGE_COHERENT_H */