Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm/arm64: xen: Move shared architecture headers to include/xen/arm

ARM and arm64 Xen ports share a number of headers, leading to
packaging issues when these headers needs to be exported, as it
breaks the reasonable requirement that an architecture port
has self-contained headers.

Fix the issue by moving the 5 header files to include/xen/arm,
and keep local placeholders to include the relevant files.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>

authored by

Marc Zyngier and committed by
Stefano Stabellini
2fbadc30 b36585a0

+441 -436
+1 -87
arch/arm/include/asm/xen/hypercall.h
··· 1 - /****************************************************************************** 2 - * hypercall.h 3 - * 4 - * Linux-specific hypervisor handling. 5 - * 6 - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License version 2 10 - * as published by the Free Software Foundation; or, when distributed 11 - * separately from the Linux kernel or incorporated into other 12 - * software packages, subject to the following license: 13 - * 14 - * Permission is hereby granted, free of charge, to any person obtaining a copy 15 - * of this source file (the "Software"), to deal in the Software without 16 - * restriction, including without limitation the rights to use, copy, modify, 17 - * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 - * and to permit persons to whom the Software is furnished to do so, subject to 19 - * the following conditions: 20 - * 21 - * The above copyright notice and this permission notice shall be included in 22 - * all copies or substantial portions of the Software. 23 - * 24 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 - * IN THE SOFTWARE. 31 - */ 32 - 33 - #ifndef _ASM_ARM_XEN_HYPERCALL_H 34 - #define _ASM_ARM_XEN_HYPERCALL_H 35 - 36 - #include <linux/bug.h> 37 - 38 - #include <xen/interface/xen.h> 39 - #include <xen/interface/sched.h> 40 - #include <xen/interface/platform.h> 41 - 42 - long privcmd_call(unsigned call, unsigned long a1, 43 - unsigned long a2, unsigned long a3, 44 - unsigned long a4, unsigned long a5); 45 - int HYPERVISOR_xen_version(int cmd, void *arg); 46 - int HYPERVISOR_console_io(int cmd, int count, char *str); 47 - int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); 48 - int HYPERVISOR_sched_op(int cmd, void *arg); 49 - int HYPERVISOR_event_channel_op(int cmd, void *arg); 50 - unsigned long HYPERVISOR_hvm_op(int op, void *arg); 51 - int HYPERVISOR_memory_op(unsigned int cmd, void *arg); 52 - int HYPERVISOR_physdev_op(int cmd, void *arg); 53 - int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); 54 - int HYPERVISOR_tmem_op(void *arg); 55 - int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); 56 - int HYPERVISOR_platform_op_raw(void *arg); 57 - static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) 58 - { 59 - op->interface_version = XENPF_INTERFACE_VERSION; 60 - return HYPERVISOR_platform_op_raw(op); 61 - } 62 - int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); 63 - 64 - static inline int 65 - HYPERVISOR_suspend(unsigned long start_info_mfn) 66 - { 67 - struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 68 - 69 - /* start_info_mfn is unused on ARM */ 70 - return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 71 - } 72 - 73 - static inline void 74 - MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 75 - unsigned int new_val, unsigned long flags) 76 - { 77 - BUG(); 78 - } 79 - 80 - static inline void 81 - MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, 82 - int count, int *success_count, domid_t domid) 83 - { 84 - BUG(); 85 - } 86 - 87 - #endif /* _ASM_ARM_XEN_HYPERCALL_H */ 1 + #include <xen/arm/hypercall.h>
+1 -39
arch/arm/include/asm/xen/hypervisor.h
··· 1 - #ifndef _ASM_ARM_XEN_HYPERVISOR_H 2 - #define _ASM_ARM_XEN_HYPERVISOR_H 3 - 4 - #include <linux/init.h> 5 - 6 - extern struct shared_info *HYPERVISOR_shared_info; 7 - extern struct start_info *xen_start_info; 8 - 9 - /* Lazy mode for batching updates / context switch */ 10 - enum paravirt_lazy_mode { 11 - PARAVIRT_LAZY_NONE, 12 - PARAVIRT_LAZY_MMU, 13 - PARAVIRT_LAZY_CPU, 14 - }; 15 - 16 - static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 17 - { 18 - return PARAVIRT_LAZY_NONE; 19 - } 20 - 21 - extern struct dma_map_ops *xen_dma_ops; 22 - 23 - #ifdef CONFIG_XEN 24 - void __init xen_early_init(void); 25 - #else 26 - static inline void xen_early_init(void) { return; } 27 - #endif 28 - 29 - #ifdef CONFIG_HOTPLUG_CPU 30 - static inline void xen_arch_register_cpu(int num) 31 - { 32 - } 33 - 34 - static inline void xen_arch_unregister_cpu(int num) 35 - { 36 - } 37 - #endif 38 - 39 - #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ 1 + #include <xen/arm/hypervisor.h>
+1 -85
arch/arm/include/asm/xen/interface.h
··· 1 - /****************************************************************************** 2 - * Guest OS interface to ARM Xen. 3 - * 4 - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 5 - */ 6 - 7 - #ifndef _ASM_ARM_XEN_INTERFACE_H 8 - #define _ASM_ARM_XEN_INTERFACE_H 9 - 10 - #include <linux/types.h> 11 - 12 - #define uint64_aligned_t uint64_t __attribute__((aligned(8))) 13 - 14 - #define __DEFINE_GUEST_HANDLE(name, type) \ 15 - typedef struct { union { type *p; uint64_aligned_t q; }; } \ 16 - __guest_handle_ ## name 17 - 18 - #define DEFINE_GUEST_HANDLE_STRUCT(name) \ 19 - __DEFINE_GUEST_HANDLE(name, struct name) 20 - #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 21 - #define GUEST_HANDLE(name) __guest_handle_ ## name 22 - 23 - #define set_xen_guest_handle(hnd, val) \ 24 - do { \ 25 - if (sizeof(hnd) == 8) \ 26 - *(uint64_t *)&(hnd) = 0; \ 27 - (hnd).p = val; \ 28 - } while (0) 29 - 30 - #define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op 31 - 32 - #ifndef __ASSEMBLY__ 33 - /* Explicitly size integers that represent pfns in the interface with 34 - * Xen so that we can have one ABI that works for 32 and 64 bit guests. 35 - * Note that this means that the xen_pfn_t type may be capable of 36 - * representing pfn's which the guest cannot represent in its own pfn 37 - * type. However since pfn space is controlled by the guest this is 38 - * fine since it simply wouldn't be able to create any sure pfns in 39 - * the first place. 40 - */ 41 - typedef uint64_t xen_pfn_t; 42 - #define PRI_xen_pfn "llx" 43 - typedef uint64_t xen_ulong_t; 44 - #define PRI_xen_ulong "llx" 45 - typedef int64_t xen_long_t; 46 - #define PRI_xen_long "llx" 47 - /* Guest handles for primitive C types. */ 48 - __DEFINE_GUEST_HANDLE(uchar, unsigned char); 49 - __DEFINE_GUEST_HANDLE(uint, unsigned int); 50 - DEFINE_GUEST_HANDLE(char); 51 - DEFINE_GUEST_HANDLE(int); 52 - DEFINE_GUEST_HANDLE(void); 53 - DEFINE_GUEST_HANDLE(uint64_t); 54 - DEFINE_GUEST_HANDLE(uint32_t); 55 - DEFINE_GUEST_HANDLE(xen_pfn_t); 56 - DEFINE_GUEST_HANDLE(xen_ulong_t); 57 - 58 - /* Maximum number of virtual CPUs in multi-processor guests. */ 59 - #define MAX_VIRT_CPUS 1 60 - 61 - struct arch_vcpu_info { }; 62 - struct arch_shared_info { }; 63 - 64 - /* TODO: Move pvclock definitions some place arch independent */ 65 - struct pvclock_vcpu_time_info { 66 - u32 version; 67 - u32 pad0; 68 - u64 tsc_timestamp; 69 - u64 system_time; 70 - u32 tsc_to_system_mul; 71 - s8 tsc_shift; 72 - u8 flags; 73 - u8 pad[2]; 74 - } __attribute__((__packed__)); /* 32 bytes */ 75 - 76 - /* It is OK to have a 12 bytes struct with no padding because it is packed */ 77 - struct pvclock_wall_clock { 78 - u32 version; 79 - u32 sec; 80 - u32 nsec; 81 - u32 sec_hi; 82 - } __attribute__((__packed__)); 83 - #endif 84 - 85 - #endif /* _ASM_ARM_XEN_INTERFACE_H */ 1 + #include <xen/arm/interface.h>
+1 -98
arch/arm/include/asm/xen/page-coherent.h
··· 1 - #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 2 - #define _ASM_ARM_XEN_PAGE_COHERENT_H 3 - 4 - #include <asm/page.h> 5 - #include <linux/dma-mapping.h> 6 - 7 - void __xen_dma_map_page(struct device *hwdev, struct page *page, 8 - dma_addr_t dev_addr, unsigned long offset, size_t size, 9 - enum dma_data_direction dir, unsigned long attrs); 10 - void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 11 - size_t size, enum dma_data_direction dir, 12 - unsigned long attrs); 13 - void __xen_dma_sync_single_for_cpu(struct device *hwdev, 14 - dma_addr_t handle, size_t size, enum dma_data_direction dir); 15 - 16 - void __xen_dma_sync_single_for_device(struct device *hwdev, 17 - dma_addr_t handle, size_t size, enum dma_data_direction dir); 18 - 19 - static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 21 - { 22 - return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); 23 - } 24 - 25 - static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 26 - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 27 - { 28 - __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 29 - } 30 - 31 - static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 32 - dma_addr_t dev_addr, unsigned long offset, size_t size, 33 - enum dma_data_direction dir, unsigned long attrs) 34 - { 35 - unsigned long page_pfn = page_to_xen_pfn(page); 36 - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 37 - unsigned long compound_pages = 38 - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 39 - bool local = (page_pfn <= dev_pfn) && 40 - (dev_pfn - page_pfn < compound_pages); 41 - 42 - /* 43 - * Dom0 is mapped 1:1, while the Linux page can span across 44 - * multiple Xen pages, it's not possible for it to contain a 45 - * mix of local and foreign Xen pages. So if the first xen_pfn 46 - * == mfn the page is local otherwise it's a foreign page 47 - * grant-mapped in dom0. If the page is local we can safely 48 - * call the native dma_ops function, otherwise we call the xen 49 - * specific function. 50 - */ 51 - if (local) 52 - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 53 - else 54 - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 55 - } 56 - 57 - static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 58 - size_t size, enum dma_data_direction dir, unsigned long attrs) 59 - { 60 - unsigned long pfn = PFN_DOWN(handle); 61 - /* 62 - * Dom0 is mapped 1:1, while the Linux page can be spanned accross 63 - * multiple Xen page, it's not possible to have a mix of local and 64 - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 65 - * foreign mfn will always return false. If the page is local we can 66 - * safely call the native dma_ops function, otherwise we call the xen 67 - * specific function. 68 - */ 69 - if (pfn_valid(pfn)) { 70 - if (__generic_dma_ops(hwdev)->unmap_page) 71 - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 72 - } else 73 - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 74 - } 75 - 76 - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 77 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 78 - { 79 - unsigned long pfn = PFN_DOWN(handle); 80 - if (pfn_valid(pfn)) { 81 - if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 82 - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 83 - } else 84 - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 85 - } 86 - 87 - static inline void xen_dma_sync_single_for_device(struct device *hwdev, 88 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 89 - { 90 - unsigned long pfn = PFN_DOWN(handle); 91 - if (pfn_valid(pfn)) { 92 - if (__generic_dma_ops(hwdev)->sync_single_for_device) 93 - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 94 - } else 95 - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 96 - } 97 - 98 - #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ 1 + #include <xen/arm/page-coherent.h>
+1 -122
arch/arm/include/asm/xen/page.h
··· 1 - #ifndef _ASM_ARM_XEN_PAGE_H 2 - #define _ASM_ARM_XEN_PAGE_H 3 - 4 - #include <asm/page.h> 5 - #include <asm/pgtable.h> 6 - 7 - #include <linux/pfn.h> 8 - #include <linux/types.h> 9 - #include <linux/dma-mapping.h> 10 - 11 - #include <xen/xen.h> 12 - #include <xen/interface/grant_table.h> 13 - 14 - #define phys_to_machine_mapping_valid(pfn) (1) 15 - 16 - /* Xen machine address */ 17 - typedef struct xmaddr { 18 - phys_addr_t maddr; 19 - } xmaddr_t; 20 - 21 - /* Xen pseudo-physical address */ 22 - typedef struct xpaddr { 23 - phys_addr_t paddr; 24 - } xpaddr_t; 25 - 26 - #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 27 - #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 28 - 29 - #define INVALID_P2M_ENTRY (~0UL) 30 - 31 - /* 32 - * The pseudo-physical frame (pfn) used in all the helpers is always based 33 - * on Xen page granularity (i.e 4KB). 34 - * 35 - * A Linux page may be split across multiple non-contiguous Xen page so we 36 - * have to keep track with frame based on 4KB page granularity. 37 - * 38 - * PV drivers should never make a direct usage of those helpers (particularly 39 - * pfn_to_gfn and gfn_to_pfn). 40 - */ 41 - 42 - unsigned long __pfn_to_mfn(unsigned long pfn); 43 - extern struct rb_root phys_to_mach; 44 - 45 - /* Pseudo-physical <-> Guest conversion */ 46 - static inline unsigned long pfn_to_gfn(unsigned long pfn) 47 - { 48 - return pfn; 49 - } 50 - 51 - static inline unsigned long gfn_to_pfn(unsigned long gfn) 52 - { 53 - return gfn; 54 - } 55 - 56 - /* Pseudo-physical <-> BUS conversion */ 57 - static inline unsigned long pfn_to_bfn(unsigned long pfn) 58 - { 59 - unsigned long mfn; 60 - 61 - if (phys_to_mach.rb_node != NULL) { 62 - mfn = __pfn_to_mfn(pfn); 63 - if (mfn != INVALID_P2M_ENTRY) 64 - return mfn; 65 - } 66 - 67 - return pfn; 68 - } 69 - 70 - static inline unsigned long bfn_to_pfn(unsigned long bfn) 71 - { 72 - return bfn; 73 - } 74 - 75 - #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) 76 - 77 - /* VIRT <-> GUEST conversion */ 78 - #define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) 79 - #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) 80 - 81 - /* Only used in PV code. But ARM guests are always HVM. */ 82 - static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) 83 - { 84 - BUG(); 85 - } 86 - 87 - /* TODO: this shouldn't be here but it is because the frontend drivers 88 - * are using it (its rolled in headers) even though we won't hit the code path. 89 - * So for right now just punt with this. 90 - */ 91 - static inline pte_t *lookup_address(unsigned long address, unsigned int *level) 92 - { 93 - BUG(); 94 - return NULL; 95 - } 96 - 97 - extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 98 - struct gnttab_map_grant_ref *kmap_ops, 99 - struct page **pages, unsigned int count); 100 - 101 - extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 102 - struct gnttab_unmap_grant_ref *kunmap_ops, 103 - struct page **pages, unsigned int count); 104 - 105 - bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 106 - bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 107 - unsigned long nr_pages); 108 - 109 - static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 110 - { 111 - return __set_phys_to_machine(pfn, mfn); 112 - } 113 - 114 - #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 115 - #define xen_unmap(cookie) iounmap((cookie)) 116 - 117 - bool xen_arch_need_swiotlb(struct device *dev, 118 - phys_addr_t phys, 119 - dma_addr_t dev_addr); 120 - unsigned long xen_get_swiotlb_free_pages(unsigned int order); 121 - 122 - #endif /* _ASM_ARM_XEN_PAGE_H */ 1 + #include <xen/arm/page.h>
+1 -1
arch/arm64/include/asm/xen/hypercall.h
··· 1 - #include <../../arm/include/asm/xen/hypercall.h> 1 + #include <xen/arm/hypercall.h>
+1 -1
arch/arm64/include/asm/xen/hypervisor.h
··· 1 - #include <../../arm/include/asm/xen/hypervisor.h> 1 + #include <xen/arm/hypervisor.h>
+1 -1
arch/arm64/include/asm/xen/interface.h
··· 1 - #include <../../arm/include/asm/xen/interface.h> 1 + #include <xen/arm/interface.h>
+1 -1
arch/arm64/include/asm/xen/page-coherent.h
··· 1 - #include <../../arm/include/asm/xen/page-coherent.h> 1 + #include <xen/arm/page-coherent.h>
+1 -1
arch/arm64/include/asm/xen/page.h
··· 1 - #include <../../arm/include/asm/xen/page.h> 1 + #include <xen/arm/page.h>
+87
include/xen/arm/hypercall.h
··· 1 + /****************************************************************************** 2 + * hypercall.h 3 + * 4 + * Linux-specific hypervisor handling. 5 + * 6 + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License version 2 10 + * as published by the Free Software Foundation; or, when distributed 11 + * separately from the Linux kernel or incorporated into other 12 + * software packages, subject to the following license: 13 + * 14 + * Permission is hereby granted, free of charge, to any person obtaining a copy 15 + * of this source file (the "Software"), to deal in the Software without 16 + * restriction, including without limitation the rights to use, copy, modify, 17 + * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 + * and to permit persons to whom the Software is furnished to do so, subject to 19 + * the following conditions: 20 + * 21 + * The above copyright notice and this permission notice shall be included in 22 + * all copies or substantial portions of the Software. 23 + * 24 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 + * IN THE SOFTWARE. 31 + */ 32 + 33 + #ifndef _ASM_ARM_XEN_HYPERCALL_H 34 + #define _ASM_ARM_XEN_HYPERCALL_H 35 + 36 + #include <linux/bug.h> 37 + 38 + #include <xen/interface/xen.h> 39 + #include <xen/interface/sched.h> 40 + #include <xen/interface/platform.h> 41 + 42 + long privcmd_call(unsigned call, unsigned long a1, 43 + unsigned long a2, unsigned long a3, 44 + unsigned long a4, unsigned long a5); 45 + int HYPERVISOR_xen_version(int cmd, void *arg); 46 + int HYPERVISOR_console_io(int cmd, int count, char *str); 47 + int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); 48 + int HYPERVISOR_sched_op(int cmd, void *arg); 49 + int HYPERVISOR_event_channel_op(int cmd, void *arg); 50 + unsigned long HYPERVISOR_hvm_op(int op, void *arg); 51 + int HYPERVISOR_memory_op(unsigned int cmd, void *arg); 52 + int HYPERVISOR_physdev_op(int cmd, void *arg); 53 + int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); 54 + int HYPERVISOR_tmem_op(void *arg); 55 + int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); 56 + int HYPERVISOR_platform_op_raw(void *arg); 57 + static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) 58 + { 59 + op->interface_version = XENPF_INTERFACE_VERSION; 60 + return HYPERVISOR_platform_op_raw(op); 61 + } 62 + int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); 63 + 64 + static inline int 65 + HYPERVISOR_suspend(unsigned long start_info_mfn) 66 + { 67 + struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 68 + 69 + /* start_info_mfn is unused on ARM */ 70 + return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 71 + } 72 + 73 + static inline void 74 + MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 75 + unsigned int new_val, unsigned long flags) 76 + { 77 + BUG(); 78 + } 79 + 80 + static inline void 81 + MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, 82 + int count, int *success_count, domid_t domid) 83 + { 84 + BUG(); 85 + } 86 + 87 + #endif /* _ASM_ARM_XEN_HYPERCALL_H */
+39
include/xen/arm/hypervisor.h
··· 1 + #ifndef _ASM_ARM_XEN_HYPERVISOR_H 2 + #define _ASM_ARM_XEN_HYPERVISOR_H 3 + 4 + #include <linux/init.h> 5 + 6 + extern struct shared_info *HYPERVISOR_shared_info; 7 + extern struct start_info *xen_start_info; 8 + 9 + /* Lazy mode for batching updates / context switch */ 10 + enum paravirt_lazy_mode { 11 + PARAVIRT_LAZY_NONE, 12 + PARAVIRT_LAZY_MMU, 13 + PARAVIRT_LAZY_CPU, 14 + }; 15 + 16 + static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 17 + { 18 + return PARAVIRT_LAZY_NONE; 19 + } 20 + 21 + extern struct dma_map_ops *xen_dma_ops; 22 + 23 + #ifdef CONFIG_XEN 24 + void __init xen_early_init(void); 25 + #else 26 + static inline void xen_early_init(void) { return; } 27 + #endif 28 + 29 + #ifdef CONFIG_HOTPLUG_CPU 30 + static inline void xen_arch_register_cpu(int num) 31 + { 32 + } 33 + 34 + static inline void xen_arch_unregister_cpu(int num) 35 + { 36 + } 37 + #endif 38 + 39 + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+85
include/xen/arm/interface.h
··· 1 + /****************************************************************************** 2 + * Guest OS interface to ARM Xen. 3 + * 4 + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 5 + */ 6 + 7 + #ifndef _ASM_ARM_XEN_INTERFACE_H 8 + #define _ASM_ARM_XEN_INTERFACE_H 9 + 10 + #include <linux/types.h> 11 + 12 + #define uint64_aligned_t uint64_t __attribute__((aligned(8))) 13 + 14 + #define __DEFINE_GUEST_HANDLE(name, type) \ 15 + typedef struct { union { type *p; uint64_aligned_t q; }; } \ 16 + __guest_handle_ ## name 17 + 18 + #define DEFINE_GUEST_HANDLE_STRUCT(name) \ 19 + __DEFINE_GUEST_HANDLE(name, struct name) 20 + #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 21 + #define GUEST_HANDLE(name) __guest_handle_ ## name 22 + 23 + #define set_xen_guest_handle(hnd, val) \ 24 + do { \ 25 + if (sizeof(hnd) == 8) \ 26 + *(uint64_t *)&(hnd) = 0; \ 27 + (hnd).p = val; \ 28 + } while (0) 29 + 30 + #define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op 31 + 32 + #ifndef __ASSEMBLY__ 33 + /* Explicitly size integers that represent pfns in the interface with 34 + * Xen so that we can have one ABI that works for 32 and 64 bit guests. 35 + * Note that this means that the xen_pfn_t type may be capable of 36 + * representing pfn's which the guest cannot represent in its own pfn 37 + * type. However since pfn space is controlled by the guest this is 38 + * fine since it simply wouldn't be able to create any sure pfns in 39 + * the first place. 40 + */ 41 + typedef uint64_t xen_pfn_t; 42 + #define PRI_xen_pfn "llx" 43 + typedef uint64_t xen_ulong_t; 44 + #define PRI_xen_ulong "llx" 45 + typedef int64_t xen_long_t; 46 + #define PRI_xen_long "llx" 47 + /* Guest handles for primitive C types. */ 48 + __DEFINE_GUEST_HANDLE(uchar, unsigned char); 49 + __DEFINE_GUEST_HANDLE(uint, unsigned int); 50 + DEFINE_GUEST_HANDLE(char); 51 + DEFINE_GUEST_HANDLE(int); 52 + DEFINE_GUEST_HANDLE(void); 53 + DEFINE_GUEST_HANDLE(uint64_t); 54 + DEFINE_GUEST_HANDLE(uint32_t); 55 + DEFINE_GUEST_HANDLE(xen_pfn_t); 56 + DEFINE_GUEST_HANDLE(xen_ulong_t); 57 + 58 + /* Maximum number of virtual CPUs in multi-processor guests. */ 59 + #define MAX_VIRT_CPUS 1 60 + 61 + struct arch_vcpu_info { }; 62 + struct arch_shared_info { }; 63 + 64 + /* TODO: Move pvclock definitions some place arch independent */ 65 + struct pvclock_vcpu_time_info { 66 + u32 version; 67 + u32 pad0; 68 + u64 tsc_timestamp; 69 + u64 system_time; 70 + u32 tsc_to_system_mul; 71 + s8 tsc_shift; 72 + u8 flags; 73 + u8 pad[2]; 74 + } __attribute__((__packed__)); /* 32 bytes */ 75 + 76 + /* It is OK to have a 12 bytes struct with no padding because it is packed */ 77 + struct pvclock_wall_clock { 78 + u32 version; 79 + u32 sec; 80 + u32 nsec; 81 + u32 sec_hi; 82 + } __attribute__((__packed__)); 83 + #endif 84 + 85 + #endif /* _ASM_ARM_XEN_INTERFACE_H */
+98
include/xen/arm/page-coherent.h
··· 1 + #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 2 + #define _ASM_ARM_XEN_PAGE_COHERENT_H 3 + 4 + #include <asm/page.h> 5 + #include <linux/dma-mapping.h> 6 + 7 + void __xen_dma_map_page(struct device *hwdev, struct page *page, 8 + dma_addr_t dev_addr, unsigned long offset, size_t size, 9 + enum dma_data_direction dir, unsigned long attrs); 10 + void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 11 + size_t size, enum dma_data_direction dir, 12 + unsigned long attrs); 13 + void __xen_dma_sync_single_for_cpu(struct device *hwdev, 14 + dma_addr_t handle, size_t size, enum dma_data_direction dir); 15 + 16 + void __xen_dma_sync_single_for_device(struct device *hwdev, 17 + dma_addr_t handle, size_t size, enum dma_data_direction dir); 18 + 19 + static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20 + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 21 + { 22 + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); 23 + } 24 + 25 + static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 26 + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 27 + { 28 + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 29 + } 30 + 31 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 32 + dma_addr_t dev_addr, unsigned long offset, size_t size, 33 + enum dma_data_direction dir, unsigned long attrs) 34 + { 35 + unsigned long page_pfn = page_to_xen_pfn(page); 36 + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 37 + unsigned long compound_pages = 38 + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 39 + bool local = (page_pfn <= dev_pfn) && 40 + (dev_pfn - page_pfn < compound_pages); 41 + 42 + /* 43 + * Dom0 is mapped 1:1, while the Linux page can span across 44 + * multiple Xen pages, it's not possible for it to contain a 45 + * mix of local and foreign Xen pages. So if the first xen_pfn 46 + * == mfn the page is local otherwise it's a foreign page 47 + * grant-mapped in dom0. If the page is local we can safely 48 + * call the native dma_ops function, otherwise we call the xen 49 + * specific function. 50 + */ 51 + if (local) 52 + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 53 + else 54 + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 55 + } 56 + 57 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 58 + size_t size, enum dma_data_direction dir, unsigned long attrs) 59 + { 60 + unsigned long pfn = PFN_DOWN(handle); 61 + /* 62 + * Dom0 is mapped 1:1, while the Linux page can be spanned accross 63 + * multiple Xen page, it's not possible to have a mix of local and 64 + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 65 + * foreign mfn will always return false. If the page is local we can 66 + * safely call the native dma_ops function, otherwise we call the xen 67 + * specific function. 68 + */ 69 + if (pfn_valid(pfn)) { 70 + if (__generic_dma_ops(hwdev)->unmap_page) 71 + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 72 + } else 73 + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 74 + } 75 + 76 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 77 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 78 + { 79 + unsigned long pfn = PFN_DOWN(handle); 80 + if (pfn_valid(pfn)) { 81 + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 82 + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 83 + } else 84 + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 85 + } 86 + 87 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 88 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 89 + { 90 + unsigned long pfn = PFN_DOWN(handle); 91 + if (pfn_valid(pfn)) { 92 + if (__generic_dma_ops(hwdev)->sync_single_for_device) 93 + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 94 + } else 95 + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 96 + } 97 + 98 + #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+122
include/xen/arm/page.h
··· 1 + #ifndef _ASM_ARM_XEN_PAGE_H 2 + #define _ASM_ARM_XEN_PAGE_H 3 + 4 + #include <asm/page.h> 5 + #include <asm/pgtable.h> 6 + 7 + #include <linux/pfn.h> 8 + #include <linux/types.h> 9 + #include <linux/dma-mapping.h> 10 + 11 + #include <xen/xen.h> 12 + #include <xen/interface/grant_table.h> 13 + 14 + #define phys_to_machine_mapping_valid(pfn) (1) 15 + 16 + /* Xen machine address */ 17 + typedef struct xmaddr { 18 + phys_addr_t maddr; 19 + } xmaddr_t; 20 + 21 + /* Xen pseudo-physical address */ 22 + typedef struct xpaddr { 23 + phys_addr_t paddr; 24 + } xpaddr_t; 25 + 26 + #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 27 + #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 28 + 29 + #define INVALID_P2M_ENTRY (~0UL) 30 + 31 + /* 32 + * The pseudo-physical frame (pfn) used in all the helpers is always based 33 + * on Xen page granularity (i.e 4KB). 34 + * 35 + * A Linux page may be split across multiple non-contiguous Xen page so we 36 + * have to keep track with frame based on 4KB page granularity. 37 + * 38 + * PV drivers should never make a direct usage of those helpers (particularly 39 + * pfn_to_gfn and gfn_to_pfn). 40 + */ 41 + 42 + unsigned long __pfn_to_mfn(unsigned long pfn); 43 + extern struct rb_root phys_to_mach; 44 + 45 + /* Pseudo-physical <-> Guest conversion */ 46 + static inline unsigned long pfn_to_gfn(unsigned long pfn) 47 + { 48 + return pfn; 49 + } 50 + 51 + static inline unsigned long gfn_to_pfn(unsigned long gfn) 52 + { 53 + return gfn; 54 + } 55 + 56 + /* Pseudo-physical <-> BUS conversion */ 57 + static inline unsigned long pfn_to_bfn(unsigned long pfn) 58 + { 59 + unsigned long mfn; 60 + 61 + if (phys_to_mach.rb_node != NULL) { 62 + mfn = __pfn_to_mfn(pfn); 63 + if (mfn != INVALID_P2M_ENTRY) 64 + return mfn; 65 + } 66 + 67 + return pfn; 68 + } 69 + 70 + static inline unsigned long bfn_to_pfn(unsigned long bfn) 71 + { 72 + return bfn; 73 + } 74 + 75 + #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) 76 + 77 + /* VIRT <-> GUEST conversion */ 78 + #define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) 79 + #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) 80 + 81 + /* Only used in PV code. But ARM guests are always HVM. */ 82 + static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) 83 + { 84 + BUG(); 85 + } 86 + 87 + /* TODO: this shouldn't be here but it is because the frontend drivers 88 + * are using it (its rolled in headers) even though we won't hit the code path. 89 + * So for right now just punt with this. 90 + */ 91 + static inline pte_t *lookup_address(unsigned long address, unsigned int *level) 92 + { 93 + BUG(); 94 + return NULL; 95 + } 96 + 97 + extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 98 + struct gnttab_map_grant_ref *kmap_ops, 99 + struct page **pages, unsigned int count); 100 + 101 + extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 102 + struct gnttab_unmap_grant_ref *kunmap_ops, 103 + struct page **pages, unsigned int count); 104 + 105 + bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 106 + bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 107 + unsigned long nr_pages); 108 + 109 + static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 110 + { 111 + return __set_phys_to_machine(pfn, mfn); 112 + } 113 + 114 + #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 115 + #define xen_unmap(cookie) iounmap((cookie)) 116 + 117 + bool xen_arch_need_swiotlb(struct device *dev, 118 + phys_addr_t phys, 119 + dma_addr_t dev_addr); 120 + unsigned long xen_get_swiotlb_free_pages(unsigned int order); 121 + 122 + #endif /* _ASM_ARM_XEN_PAGE_H */