Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus-4.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen updates from Juergen Gross:
"Xen features and fixes for 4.10

These are some fixes, a move of some arm related headers to share them
between arm and arm64 and a series introducing a helper to make code
more readable.

The most notable change is David stepping down as maintainer of the
Xen hypervisor interface. This results in me sending you the pull
requests for Xen related code from now on"

* tag 'for-linus-4.10-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: (29 commits)
xen/balloon: Only mark a page as managed when it is released
xenbus: fix deadlock on writes to /proc/xen/xenbus
xen/scsifront: don't request a slot on the ring until request is ready
xen/x86: Increase xen_e820_map to E820_X_MAX possible entries
x86: Make E820_X_MAX unconditionally larger than E820MAX
xen/pci: Bubble up error and fix description.
xen: xenbus: set error code on failure
xen: set error code on failures
arm/xen: Use alloc_percpu rather than __alloc_percpu
arm/arm64: xen: Move shared architecture headers to include/xen/arm
xen/events: use xen_vcpu_id mapping for EVTCHNOP_status
xen/gntdev: Use VM_MIXEDMAP instead of VM_IO to avoid NUMA balancing
xen-scsifront: Add a missing call to kfree
MAINTAINERS: update XEN HYPERVISOR INTERFACE
xenfs: Use proc_create_mount_point() to create /proc/xen
xen-platform: use builtin_pci_driver
xen-netback: fix error handling output
xen: make use of xenbus_read_unsigned() in xenbus
xen: make use of xenbus_read_unsigned() in xen-pciback
xen: make use of xenbus_read_unsigned() in xen-fbfront
...

+669 -780
-1
MAINTAINERS
··· 13354 13354 13355 13355 XEN HYPERVISOR INTERFACE 13356 13356 M: Boris Ostrovsky <boris.ostrovsky@oracle.com> 13357 - M: David Vrabel <david.vrabel@citrix.com> 13358 13357 M: Juergen Gross <jgross@suse.com> 13359 13358 L: xen-devel@lists.xenproject.org (moderated for non-subscribers) 13360 13359 T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
+1 -87
arch/arm/include/asm/xen/hypercall.h
··· 1 - /****************************************************************************** 2 - * hypercall.h 3 - * 4 - * Linux-specific hypervisor handling. 5 - * 6 - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License version 2 10 - * as published by the Free Software Foundation; or, when distributed 11 - * separately from the Linux kernel or incorporated into other 12 - * software packages, subject to the following license: 13 - * 14 - * Permission is hereby granted, free of charge, to any person obtaining a copy 15 - * of this source file (the "Software"), to deal in the Software without 16 - * restriction, including without limitation the rights to use, copy, modify, 17 - * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 - * and to permit persons to whom the Software is furnished to do so, subject to 19 - * the following conditions: 20 - * 21 - * The above copyright notice and this permission notice shall be included in 22 - * all copies or substantial portions of the Software. 23 - * 24 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 - * IN THE SOFTWARE. 31 - */ 32 - 33 - #ifndef _ASM_ARM_XEN_HYPERCALL_H 34 - #define _ASM_ARM_XEN_HYPERCALL_H 35 - 36 - #include <linux/bug.h> 37 - 38 - #include <xen/interface/xen.h> 39 - #include <xen/interface/sched.h> 40 - #include <xen/interface/platform.h> 41 - 42 - long privcmd_call(unsigned call, unsigned long a1, 43 - unsigned long a2, unsigned long a3, 44 - unsigned long a4, unsigned long a5); 45 - int HYPERVISOR_xen_version(int cmd, void *arg); 46 - int HYPERVISOR_console_io(int cmd, int count, char *str); 47 - int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); 48 - int HYPERVISOR_sched_op(int cmd, void *arg); 49 - int HYPERVISOR_event_channel_op(int cmd, void *arg); 50 - unsigned long HYPERVISOR_hvm_op(int op, void *arg); 51 - int HYPERVISOR_memory_op(unsigned int cmd, void *arg); 52 - int HYPERVISOR_physdev_op(int cmd, void *arg); 53 - int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); 54 - int HYPERVISOR_tmem_op(void *arg); 55 - int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); 56 - int HYPERVISOR_platform_op_raw(void *arg); 57 - static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) 58 - { 59 - op->interface_version = XENPF_INTERFACE_VERSION; 60 - return HYPERVISOR_platform_op_raw(op); 61 - } 62 - int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); 63 - 64 - static inline int 65 - HYPERVISOR_suspend(unsigned long start_info_mfn) 66 - { 67 - struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 68 - 69 - /* start_info_mfn is unused on ARM */ 70 - return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 71 - } 72 - 73 - static inline void 74 - MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 75 - unsigned int new_val, unsigned long flags) 76 - { 77 - BUG(); 78 - } 79 - 80 - static inline void 81 - MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, 82 - int count, int *success_count, domid_t domid) 83 - { 84 - BUG(); 85 - } 86 - 87 - #endif /* _ASM_ARM_XEN_HYPERCALL_H */ 1 + #include <xen/arm/hypercall.h>
+1 -39
arch/arm/include/asm/xen/hypervisor.h
··· 1 - #ifndef _ASM_ARM_XEN_HYPERVISOR_H 2 - #define _ASM_ARM_XEN_HYPERVISOR_H 3 - 4 - #include <linux/init.h> 5 - 6 - extern struct shared_info *HYPERVISOR_shared_info; 7 - extern struct start_info *xen_start_info; 8 - 9 - /* Lazy mode for batching updates / context switch */ 10 - enum paravirt_lazy_mode { 11 - PARAVIRT_LAZY_NONE, 12 - PARAVIRT_LAZY_MMU, 13 - PARAVIRT_LAZY_CPU, 14 - }; 15 - 16 - static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 17 - { 18 - return PARAVIRT_LAZY_NONE; 19 - } 20 - 21 - extern struct dma_map_ops *xen_dma_ops; 22 - 23 - #ifdef CONFIG_XEN 24 - void __init xen_early_init(void); 25 - #else 26 - static inline void xen_early_init(void) { return; } 27 - #endif 28 - 29 - #ifdef CONFIG_HOTPLUG_CPU 30 - static inline void xen_arch_register_cpu(int num) 31 - { 32 - } 33 - 34 - static inline void xen_arch_unregister_cpu(int num) 35 - { 36 - } 37 - #endif 38 - 39 - #endif /* _ASM_ARM_XEN_HYPERVISOR_H */ 1 + #include <xen/arm/hypervisor.h>
+1 -85
arch/arm/include/asm/xen/interface.h
··· 1 - /****************************************************************************** 2 - * Guest OS interface to ARM Xen. 3 - * 4 - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 5 - */ 6 - 7 - #ifndef _ASM_ARM_XEN_INTERFACE_H 8 - #define _ASM_ARM_XEN_INTERFACE_H 9 - 10 - #include <linux/types.h> 11 - 12 - #define uint64_aligned_t uint64_t __attribute__((aligned(8))) 13 - 14 - #define __DEFINE_GUEST_HANDLE(name, type) \ 15 - typedef struct { union { type *p; uint64_aligned_t q; }; } \ 16 - __guest_handle_ ## name 17 - 18 - #define DEFINE_GUEST_HANDLE_STRUCT(name) \ 19 - __DEFINE_GUEST_HANDLE(name, struct name) 20 - #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 21 - #define GUEST_HANDLE(name) __guest_handle_ ## name 22 - 23 - #define set_xen_guest_handle(hnd, val) \ 24 - do { \ 25 - if (sizeof(hnd) == 8) \ 26 - *(uint64_t *)&(hnd) = 0; \ 27 - (hnd).p = val; \ 28 - } while (0) 29 - 30 - #define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op 31 - 32 - #ifndef __ASSEMBLY__ 33 - /* Explicitly size integers that represent pfns in the interface with 34 - * Xen so that we can have one ABI that works for 32 and 64 bit guests. 35 - * Note that this means that the xen_pfn_t type may be capable of 36 - * representing pfn's which the guest cannot represent in its own pfn 37 - * type. However since pfn space is controlled by the guest this is 38 - * fine since it simply wouldn't be able to create any sure pfns in 39 - * the first place. 40 - */ 41 - typedef uint64_t xen_pfn_t; 42 - #define PRI_xen_pfn "llx" 43 - typedef uint64_t xen_ulong_t; 44 - #define PRI_xen_ulong "llx" 45 - typedef int64_t xen_long_t; 46 - #define PRI_xen_long "llx" 47 - /* Guest handles for primitive C types. */ 48 - __DEFINE_GUEST_HANDLE(uchar, unsigned char); 49 - __DEFINE_GUEST_HANDLE(uint, unsigned int); 50 - DEFINE_GUEST_HANDLE(char); 51 - DEFINE_GUEST_HANDLE(int); 52 - DEFINE_GUEST_HANDLE(void); 53 - DEFINE_GUEST_HANDLE(uint64_t); 54 - DEFINE_GUEST_HANDLE(uint32_t); 55 - DEFINE_GUEST_HANDLE(xen_pfn_t); 56 - DEFINE_GUEST_HANDLE(xen_ulong_t); 57 - 58 - /* Maximum number of virtual CPUs in multi-processor guests. */ 59 - #define MAX_VIRT_CPUS 1 60 - 61 - struct arch_vcpu_info { }; 62 - struct arch_shared_info { }; 63 - 64 - /* TODO: Move pvclock definitions some place arch independent */ 65 - struct pvclock_vcpu_time_info { 66 - u32 version; 67 - u32 pad0; 68 - u64 tsc_timestamp; 69 - u64 system_time; 70 - u32 tsc_to_system_mul; 71 - s8 tsc_shift; 72 - u8 flags; 73 - u8 pad[2]; 74 - } __attribute__((__packed__)); /* 32 bytes */ 75 - 76 - /* It is OK to have a 12 bytes struct with no padding because it is packed */ 77 - struct pvclock_wall_clock { 78 - u32 version; 79 - u32 sec; 80 - u32 nsec; 81 - u32 sec_hi; 82 - } __attribute__((__packed__)); 83 - #endif 84 - 85 - #endif /* _ASM_ARM_XEN_INTERFACE_H */ 1 + #include <xen/arm/interface.h>
+1 -98
arch/arm/include/asm/xen/page-coherent.h
··· 1 - #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 2 - #define _ASM_ARM_XEN_PAGE_COHERENT_H 3 - 4 - #include <asm/page.h> 5 - #include <linux/dma-mapping.h> 6 - 7 - void __xen_dma_map_page(struct device *hwdev, struct page *page, 8 - dma_addr_t dev_addr, unsigned long offset, size_t size, 9 - enum dma_data_direction dir, unsigned long attrs); 10 - void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 11 - size_t size, enum dma_data_direction dir, 12 - unsigned long attrs); 13 - void __xen_dma_sync_single_for_cpu(struct device *hwdev, 14 - dma_addr_t handle, size_t size, enum dma_data_direction dir); 15 - 16 - void __xen_dma_sync_single_for_device(struct device *hwdev, 17 - dma_addr_t handle, size_t size, enum dma_data_direction dir); 18 - 19 - static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20 - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 21 - { 22 - return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); 23 - } 24 - 25 - static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 26 - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 27 - { 28 - __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 29 - } 30 - 31 - static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 32 - dma_addr_t dev_addr, unsigned long offset, size_t size, 33 - enum dma_data_direction dir, unsigned long attrs) 34 - { 35 - unsigned long page_pfn = page_to_xen_pfn(page); 36 - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 37 - unsigned long compound_pages = 38 - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 39 - bool local = (page_pfn <= dev_pfn) && 40 - (dev_pfn - page_pfn < compound_pages); 41 - 42 - /* 43 - * Dom0 is mapped 1:1, while the Linux page can span across 44 - * multiple Xen pages, it's not possible for it to contain a 45 - * mix of local and foreign Xen pages. So if the first xen_pfn 46 - * == mfn the page is local otherwise it's a foreign page 47 - * grant-mapped in dom0. If the page is local we can safely 48 - * call the native dma_ops function, otherwise we call the xen 49 - * specific function. 50 - */ 51 - if (local) 52 - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 53 - else 54 - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 55 - } 56 - 57 - static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 58 - size_t size, enum dma_data_direction dir, unsigned long attrs) 59 - { 60 - unsigned long pfn = PFN_DOWN(handle); 61 - /* 62 - * Dom0 is mapped 1:1, while the Linux page can be spanned accross 63 - * multiple Xen page, it's not possible to have a mix of local and 64 - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 65 - * foreign mfn will always return false. If the page is local we can 66 - * safely call the native dma_ops function, otherwise we call the xen 67 - * specific function. 68 - */ 69 - if (pfn_valid(pfn)) { 70 - if (__generic_dma_ops(hwdev)->unmap_page) 71 - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 72 - } else 73 - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 74 - } 75 - 76 - static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 77 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 78 - { 79 - unsigned long pfn = PFN_DOWN(handle); 80 - if (pfn_valid(pfn)) { 81 - if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 82 - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 83 - } else 84 - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 85 - } 86 - 87 - static inline void xen_dma_sync_single_for_device(struct device *hwdev, 88 - dma_addr_t handle, size_t size, enum dma_data_direction dir) 89 - { 90 - unsigned long pfn = PFN_DOWN(handle); 91 - if (pfn_valid(pfn)) { 92 - if (__generic_dma_ops(hwdev)->sync_single_for_device) 93 - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 94 - } else 95 - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 96 - } 97 - 98 - #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ 1 + #include <xen/arm/page-coherent.h>
+1 -122
arch/arm/include/asm/xen/page.h
··· 1 - #ifndef _ASM_ARM_XEN_PAGE_H 2 - #define _ASM_ARM_XEN_PAGE_H 3 - 4 - #include <asm/page.h> 5 - #include <asm/pgtable.h> 6 - 7 - #include <linux/pfn.h> 8 - #include <linux/types.h> 9 - #include <linux/dma-mapping.h> 10 - 11 - #include <xen/xen.h> 12 - #include <xen/interface/grant_table.h> 13 - 14 - #define phys_to_machine_mapping_valid(pfn) (1) 15 - 16 - /* Xen machine address */ 17 - typedef struct xmaddr { 18 - phys_addr_t maddr; 19 - } xmaddr_t; 20 - 21 - /* Xen pseudo-physical address */ 22 - typedef struct xpaddr { 23 - phys_addr_t paddr; 24 - } xpaddr_t; 25 - 26 - #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 27 - #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 28 - 29 - #define INVALID_P2M_ENTRY (~0UL) 30 - 31 - /* 32 - * The pseudo-physical frame (pfn) used in all the helpers is always based 33 - * on Xen page granularity (i.e 4KB). 34 - * 35 - * A Linux page may be split across multiple non-contiguous Xen page so we 36 - * have to keep track with frame based on 4KB page granularity. 37 - * 38 - * PV drivers should never make a direct usage of those helpers (particularly 39 - * pfn_to_gfn and gfn_to_pfn). 40 - */ 41 - 42 - unsigned long __pfn_to_mfn(unsigned long pfn); 43 - extern struct rb_root phys_to_mach; 44 - 45 - /* Pseudo-physical <-> Guest conversion */ 46 - static inline unsigned long pfn_to_gfn(unsigned long pfn) 47 - { 48 - return pfn; 49 - } 50 - 51 - static inline unsigned long gfn_to_pfn(unsigned long gfn) 52 - { 53 - return gfn; 54 - } 55 - 56 - /* Pseudo-physical <-> BUS conversion */ 57 - static inline unsigned long pfn_to_bfn(unsigned long pfn) 58 - { 59 - unsigned long mfn; 60 - 61 - if (phys_to_mach.rb_node != NULL) { 62 - mfn = __pfn_to_mfn(pfn); 63 - if (mfn != INVALID_P2M_ENTRY) 64 - return mfn; 65 - } 66 - 67 - return pfn; 68 - } 69 - 70 - static inline unsigned long bfn_to_pfn(unsigned long bfn) 71 - { 72 - return bfn; 73 - } 74 - 75 - #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) 76 - 77 - /* VIRT <-> GUEST conversion */ 78 - #define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) 79 - #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) 80 - 81 - /* Only used in PV code. But ARM guests are always HVM. */ 82 - static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) 83 - { 84 - BUG(); 85 - } 86 - 87 - /* TODO: this shouldn't be here but it is because the frontend drivers 88 - * are using it (its rolled in headers) even though we won't hit the code path. 89 - * So for right now just punt with this. 90 - */ 91 - static inline pte_t *lookup_address(unsigned long address, unsigned int *level) 92 - { 93 - BUG(); 94 - return NULL; 95 - } 96 - 97 - extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 98 - struct gnttab_map_grant_ref *kmap_ops, 99 - struct page **pages, unsigned int count); 100 - 101 - extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 102 - struct gnttab_unmap_grant_ref *kunmap_ops, 103 - struct page **pages, unsigned int count); 104 - 105 - bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 106 - bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 107 - unsigned long nr_pages); 108 - 109 - static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 110 - { 111 - return __set_phys_to_machine(pfn, mfn); 112 - } 113 - 114 - #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 115 - #define xen_unmap(cookie) iounmap((cookie)) 116 - 117 - bool xen_arch_need_swiotlb(struct device *dev, 118 - phys_addr_t phys, 119 - dma_addr_t dev_addr); 120 - unsigned long xen_get_swiotlb_free_pages(unsigned int order); 121 - 122 - #endif /* _ASM_ARM_XEN_PAGE_H */ 1 + #include <xen/arm/page.h>
+1 -2
arch/arm/xen/enlighten.c
··· 372 372 * for secondary CPUs as they are brought up. 373 373 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. 374 374 */ 375 - xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), 376 - sizeof(struct vcpu_info)); 375 + xen_vcpu_info = alloc_percpu(struct vcpu_info); 377 376 if (xen_vcpu_info == NULL) 378 377 return -ENOMEM; 379 378
+1 -1
arch/arm64/include/asm/xen/hypercall.h
··· 1 - #include <../../arm/include/asm/xen/hypercall.h> 1 + #include <xen/arm/hypercall.h>
+1 -1
arch/arm64/include/asm/xen/hypervisor.h
··· 1 - #include <../../arm/include/asm/xen/hypervisor.h> 1 + #include <xen/arm/hypervisor.h>
+1 -1
arch/arm64/include/asm/xen/interface.h
··· 1 - #include <../../arm/include/asm/xen/interface.h> 1 + #include <xen/arm/interface.h>
+1 -1
arch/arm64/include/asm/xen/page-coherent.h
··· 1 - #include <../../arm/include/asm/xen/page-coherent.h> 1 + #include <xen/arm/page-coherent.h>
+1 -1
arch/arm64/include/asm/xen/page.h
··· 1 - #include <../../arm/include/asm/xen/page.h> 1 + #include <xen/arm/page.h>
+8 -4
arch/x86/include/asm/e820.h
··· 1 1 #ifndef _ASM_X86_E820_H 2 2 #define _ASM_X86_E820_H 3 3 4 - #ifdef CONFIG_EFI 4 + /* 5 + * E820_X_MAX is the maximum size of the extended E820 table. The extended 6 + * table may contain up to 3 extra E820 entries per possible NUMA node, so we 7 + * make room for 3 * MAX_NUMNODES possible entries, beyond the standard 128. 8 + * Also note that E820_X_MAX *must* be defined before we include uapi/asm/e820.h. 9 + */ 5 10 #include <linux/numa.h> 6 11 #define E820_X_MAX (E820MAX + 3 * MAX_NUMNODES) 7 - #else /* ! CONFIG_EFI */ 8 - #define E820_X_MAX E820MAX 9 - #endif 12 + 10 13 #include <uapi/asm/e820.h> 14 + 11 15 #ifndef __ASSEMBLY__ 12 16 /* see comment in arch/x86/kernel/e820.c */ 13 17 extern struct e820map *e820;
+2 -2
arch/x86/pci/xen.c
··· 264 264 return 0; 265 265 266 266 error: 267 - dev_err(&dev->dev, 268 - "Xen PCI frontend has not registered MSI/MSI-X support!\n"); 267 + dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n", 268 + type == PCI_CAP_ID_MSI ? "" : "-X", irq); 269 269 return irq; 270 270 } 271 271
+3 -3
arch/x86/xen/setup.c
··· 41 41 unsigned long xen_released_pages; 42 42 43 43 /* E820 map used during setting up memory. */ 44 - static struct e820entry xen_e820_map[E820MAX] __initdata; 44 + static struct e820entry xen_e820_map[E820_X_MAX] __initdata; 45 45 static u32 xen_e820_map_entries __initdata; 46 46 47 47 /* ··· 750 750 max_pfn = min(max_pfn, xen_start_info->nr_pages); 751 751 mem_end = PFN_PHYS(max_pfn); 752 752 753 - memmap.nr_entries = E820MAX; 753 + memmap.nr_entries = ARRAY_SIZE(xen_e820_map); 754 754 set_xen_guest_handle(memmap.buffer, xen_e820_map); 755 755 756 756 op = xen_initial_domain() ? ··· 923 923 int i; 924 924 int rc; 925 925 926 - memmap.nr_entries = E820MAX; 926 + memmap.nr_entries = ARRAY_SIZE(xen_e820_map); 927 927 set_xen_guest_handle(memmap.buffer, xen_e820_map); 928 928 929 929 rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
+14 -22
drivers/block/xen-blkback/xenbus.c
··· 533 533 struct xenbus_device *dev = be->dev; 534 534 struct xen_blkif *blkif = be->blkif; 535 535 int err; 536 - int state = 0, discard_enable; 536 + int state = 0; 537 537 struct block_device *bdev = be->blkif->vbd.bdev; 538 538 struct request_queue *q = bdev_get_queue(bdev); 539 539 540 - err = xenbus_scanf(XBT_NIL, dev->nodename, "discard-enable", "%d", 541 - &discard_enable); 542 - if (err == 1 && !discard_enable) 540 + if (!xenbus_read_unsigned(dev->nodename, "discard-enable", 1)) 543 541 return; 544 542 545 543 if (blk_queue_discard(q)) { ··· 1037 1039 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); 1038 1040 return -ENOSYS; 1039 1041 } 1040 - err = xenbus_scanf(XBT_NIL, dev->otherend, 1041 - "feature-persistent", "%u", &pers_grants); 1042 - if (err <= 0) 1043 - pers_grants = 0; 1044 - 1042 + pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", 1043 + 0); 1045 1044 be->blkif->vbd.feature_gnt_persistent = pers_grants; 1046 1045 be->blkif->vbd.overflow_max_grants = 0; 1047 1046 1048 1047 /* 1049 1048 * Read the number of hardware queues from frontend. 1050 1049 */ 1051 - err = xenbus_scanf(XBT_NIL, dev->otherend, "multi-queue-num-queues", 1052 - "%u", &requested_num_queues); 1053 - if (err < 0) { 1054 - requested_num_queues = 1; 1055 - } else { 1056 - if (requested_num_queues > xenblk_max_queues 1057 - || requested_num_queues == 0) { 1058 - /* Buggy or malicious guest. */ 1059 - xenbus_dev_fatal(dev, err, 1060 - "guest requested %u queues, exceeding the maximum of %u.", 1061 - requested_num_queues, xenblk_max_queues); 1062 - return -ENOSYS; 1063 - } 1050 + requested_num_queues = xenbus_read_unsigned(dev->otherend, 1051 + "multi-queue-num-queues", 1052 + 1); 1053 + if (requested_num_queues > xenblk_max_queues 1054 + || requested_num_queues == 0) { 1055 + /* Buggy or malicious guest. */ 1056 + xenbus_dev_fatal(dev, err, 1057 + "guest requested %u queues, exceeding the maximum of %u.", 1058 + requested_num_queues, xenblk_max_queues); 1059 + return -ENOSYS; 1064 1060 } 1065 1061 be->blkif->nr_rings = requested_num_queues; 1066 1062 if (xen_blkif_alloc_rings(be->blkif))
+26 -55
drivers/block/xen-blkfront.c
··· 1758 1758 const char *message = NULL; 1759 1759 struct xenbus_transaction xbt; 1760 1760 int err; 1761 - unsigned int i, max_page_order = 0; 1762 - unsigned int ring_page_order = 0; 1761 + unsigned int i, max_page_order; 1762 + unsigned int ring_page_order; 1763 1763 1764 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1765 - "max-ring-page-order", "%u", &max_page_order); 1766 - if (err != 1) 1767 - info->nr_ring_pages = 1; 1768 - else { 1769 - ring_page_order = min(xen_blkif_max_ring_order, max_page_order); 1770 - info->nr_ring_pages = 1 << ring_page_order; 1771 - } 1764 + max_page_order = xenbus_read_unsigned(info->xbdev->otherend, 1765 + "max-ring-page-order", 0); 1766 + ring_page_order = min(xen_blkif_max_ring_order, max_page_order); 1767 + info->nr_ring_pages = 1 << ring_page_order; 1772 1768 1773 1769 for (i = 0; i < info->nr_rings; i++) { 1774 1770 struct blkfront_ring_info *rinfo = &info->rinfo[i]; ··· 1873 1877 1874 1878 static int negotiate_mq(struct blkfront_info *info) 1875 1879 { 1876 - unsigned int backend_max_queues = 0; 1877 - int err; 1880 + unsigned int backend_max_queues; 1878 1881 unsigned int i; 1879 1882 1880 1883 BUG_ON(info->nr_rings); 1881 1884 1882 1885 /* Check if backend supports multiple queues. */ 1883 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1884 - "multi-queue-max-queues", "%u", &backend_max_queues); 1885 - if (err < 0) 1886 - backend_max_queues = 1; 1887 - 1886 + backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend, 1887 + "multi-queue-max-queues", 1); 1888 1888 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues); 1889 1889 /* We need at least one ring. */ 1890 1890 if (!info->nr_rings) ··· 2188 2196 int err; 2189 2197 unsigned int discard_granularity; 2190 2198 unsigned int discard_alignment; 2191 - unsigned int discard_secure; 2192 2199 2193 2200 info->feature_discard = 1; 2194 2201 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, ··· 2198 2207 info->discard_granularity = discard_granularity; 2199 2208 info->discard_alignment = discard_alignment; 2200 2209 } 2201 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2202 - "discard-secure", "%u", &discard_secure); 2203 - if (err > 0) 2204 - info->feature_secdiscard = !!discard_secure; 2210 + info->feature_secdiscard = 2211 + !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", 2212 + 0); 2205 2213 } 2206 2214 2207 2215 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo) ··· 2292 2302 */ 2293 2303 static void blkfront_gather_backend_features(struct blkfront_info *info) 2294 2304 { 2295 - int err; 2296 - int barrier, flush, discard, persistent; 2297 2305 unsigned int indirect_segments; 2298 2306 2299 2307 info->feature_flush = 0; 2300 2308 info->feature_fua = 0; 2301 - 2302 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2303 - "feature-barrier", "%d", &barrier); 2304 2309 2305 2310 /* 2306 2311 * If there's no "feature-barrier" defined, then it means ··· 2304 2319 * 2305 2320 * If there are barriers, then we use flush. 2306 2321 */ 2307 - if (err > 0 && barrier) { 2322 + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) { 2308 2323 info->feature_flush = 1; 2309 2324 info->feature_fua = 1; 2310 2325 } ··· 2313 2328 * And if there is "feature-flush-cache" use that above 2314 2329 * barriers. 2315 2330 */ 2316 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2317 - "feature-flush-cache", "%d", &flush); 2318 - 2319 - if (err > 0 && flush) { 2331 + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache", 2332 + 0)) { 2320 2333 info->feature_flush = 1; 2321 2334 info->feature_fua = 0; 2322 2335 } 2323 2336 2324 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2325 - "feature-discard", "%d", &discard); 2326 - 2327 - if (err > 0 && discard) 2337 + if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0)) 2328 2338 blkfront_setup_discard(info); 2329 2339 2330 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2331 - "feature-persistent", "%d", &persistent); 2332 - if (err <= 0) 2333 - info->feature_persistent = 0; 2334 - else 2335 - info->feature_persistent = persistent; 2340 + info->feature_persistent = 2341 + xenbus_read_unsigned(info->xbdev->otherend, 2342 + "feature-persistent", 0); 2336 2343 2337 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2338 - "feature-max-indirect-segments", "%u", 2339 - &indirect_segments); 2340 - if (err <= 0) 2341 - info->max_indirect_segments = 0; 2342 - else 2343 - info->max_indirect_segments = min(indirect_segments, 2344 - xen_blkif_max_segments); 2344 + indirect_segments = xenbus_read_unsigned(info->xbdev->otherend, 2345 + "feature-max-indirect-segments", 0); 2346 + info->max_indirect_segments = min(indirect_segments, 2347 + xen_blkif_max_segments); 2345 2348 } 2346 2349 2347 2350 /* ··· 2394 2421 * provide this. Assume physical sector size to be the same as 2395 2422 * sector_size in that case. 2396 2423 */ 2397 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 2398 - "physical-sector-size", "%u", &physical_sector_size); 2399 - if (err != 1) 2400 - physical_sector_size = sector_size; 2401 - 2424 + physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend, 2425 + "physical-sector-size", 2426 + sector_size); 2402 2427 blkfront_gather_backend_features(info); 2403 2428 for (i = 0; i < info->nr_rings; i++) { 2404 2429 err = blkfront_setup_indirect(&info->rinfo[i]);
+2 -6
drivers/char/tpm/xen-tpmfront.c
··· 337 337 static void backend_changed(struct xenbus_device *dev, 338 338 enum xenbus_state backend_state) 339 339 { 340 - int val; 341 - 342 340 switch (backend_state) { 343 341 case XenbusStateInitialised: 344 342 case XenbusStateConnected: 345 343 if (dev->state == XenbusStateConnected) 346 344 break; 347 345 348 - if (xenbus_scanf(XBT_NIL, dev->otherend, 349 - "feature-protocol-v2", "%d", &val) < 0) 350 - val = 0; 351 - if (!val) { 346 + if (!xenbus_read_unsigned(dev->otherend, "feature-protocol-v2", 347 + 0)) { 352 348 xenbus_dev_fatal(dev, -EINVAL, 353 349 "vTPM protocol 2 required"); 354 350 return;
+5 -8
drivers/input/misc/xen-kbdfront.c
··· 108 108 static int xenkbd_probe(struct xenbus_device *dev, 109 109 const struct xenbus_device_id *id) 110 110 { 111 - int ret, i, abs; 111 + int ret, i; 112 + unsigned int abs; 112 113 struct xenkbd_info *info; 113 114 struct input_dev *kbd, *ptr; 114 115 ··· 128 127 if (!info->page) 129 128 goto error_nomem; 130 129 131 - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-abs-pointer", "%d", &abs) < 0) 132 - abs = 0; 130 + abs = xenbus_read_unsigned(dev->otherend, "feature-abs-pointer", 0); 133 131 if (abs) { 134 132 ret = xenbus_write(XBT_NIL, dev->nodename, 135 133 "request-abs-pointer", "1"); ··· 322 322 323 323 case XenbusStateInitWait: 324 324 InitWait: 325 - ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 326 - "feature-abs-pointer", "%d", &val); 327 - if (ret < 0) 328 - val = 0; 329 - if (val) { 325 + if (xenbus_read_unsigned(info->xbdev->otherend, 326 + "feature-abs-pointer", 0)) { 330 327 ret = xenbus_write(XBT_NIL, info->xbdev->nodename, 331 328 "request-abs-pointer", "1"); 332 329 if (ret)
+15 -37
drivers/net/xen-netback/xenbus.c
··· 785 785 struct xenvif *vif = container_of(watch, struct xenvif, 786 786 mcast_ctrl_watch); 787 787 struct xenbus_device *dev = xenvif_to_xenbus_device(vif); 788 - int val; 789 788 790 - if (xenbus_scanf(XBT_NIL, dev->otherend, 791 - "request-multicast-control", "%d", &val) < 0) 792 - val = 0; 793 - vif->multicast_control = !!val; 789 + vif->multicast_control = !!xenbus_read_unsigned(dev->otherend, 790 + "request-multicast-control", 0); 794 791 } 795 792 796 793 static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, ··· 931 934 /* Check whether the frontend requested multiple queues 932 935 * and read the number requested. 933 936 */ 934 - err = xenbus_scanf(XBT_NIL, dev->otherend, 935 - "multi-queue-num-queues", 936 - "%u", &requested_num_queues); 937 - if (err < 0) { 938 - requested_num_queues = 1; /* Fall back to single queue */ 939 - } else if (requested_num_queues > xenvif_max_queues) { 937 + requested_num_queues = xenbus_read_unsigned(dev->otherend, 938 + "multi-queue-num-queues", 1); 939 + if (requested_num_queues > xenvif_max_queues) { 940 940 /* buggy or malicious guest */ 941 - xenbus_dev_fatal(dev, err, 941 + xenbus_dev_fatal(dev, -EINVAL, 942 942 "guest requested %u queues, exceeding the maximum of %u.", 943 943 requested_num_queues, xenvif_max_queues); 944 944 return; ··· 1128 1134 struct xenvif *vif = be->vif; 1129 1135 struct xenbus_device *dev = be->dev; 1130 1136 unsigned int rx_copy; 1131 - int err, val; 1137 + int err; 1132 1138 1133 1139 err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u", 1134 1140 &rx_copy); ··· 1144 1150 if (!rx_copy) 1145 1151 return -EOPNOTSUPP; 1146 1152 1147 - if (xenbus_scanf(XBT_NIL, dev->otherend, 1148 - "feature-rx-notify", "%d", &val) < 0) 1149 - val = 0; 1150 - if (!val) { 1153 + if (!xenbus_read_unsigned(dev->otherend, "feature-rx-notify", 0)) { 1151 1154 /* - Reduce drain timeout to poll more frequently for 1152 1155 * Rx requests. 1153 1156 * - Disable Rx stall detection. ··· 1153 1162 be->vif->stall_timeout = 0; 1154 1163 } 1155 1164 1156 - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 1157 - "%d", &val) < 0) 1158 - val = 0; 1159 - vif->can_sg = !!val; 1165 + vif->can_sg = !!xenbus_read_unsigned(dev->otherend, "feature-sg", 0); 1160 1166 1161 1167 vif->gso_mask = 0; 1162 1168 1163 - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", 1164 - "%d", &val) < 0) 1165 - val = 0; 1166 - if (val) 1169 + if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv4", 0)) 1167 1170 vif->gso_mask |= GSO_BIT(TCPV4); 1168 1171 1169 - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv6", 1170 - "%d", &val) < 0) 1171 - val = 0; 1172 - if (val) 1172 + if (xenbus_read_unsigned(dev->otherend, "feature-gso-tcpv6", 0)) 1173 1173 vif->gso_mask |= GSO_BIT(TCPV6); 1174 1174 1175 - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload", 1176 - "%d", &val) < 0) 1177 - val = 0; 1178 - vif->ip_csum = !val; 1175 + vif->ip_csum = !xenbus_read_unsigned(dev->otherend, 1176 + "feature-no-csum-offload", 0); 1179 1177 1180 - if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-ipv6-csum-offload", 1181 - "%d", &val) < 0) 1182 - val = 0; 1183 - vif->ipv6_csum = !!val; 1178 + vif->ipv6_csum = !!xenbus_read_unsigned(dev->otherend, 1179 + "feature-ipv6-csum-offload", 0); 1184 1180 1185 1181 return 0; 1186 1182 }
+18 -49
drivers/net/xen-netfront.c
··· 1169 1169 netdev_features_t features) 1170 1170 { 1171 1171 struct netfront_info *np = netdev_priv(dev); 1172 - int val; 1173 1172 1174 - if (features & NETIF_F_SG) { 1175 - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 1176 - "%d", &val) < 0) 1177 - val = 0; 1173 + if (features & NETIF_F_SG && 1174 + !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0)) 1175 + features &= ~NETIF_F_SG; 1178 1176 1179 - if (!val) 1180 - features &= ~NETIF_F_SG; 1181 - } 1177 + if (features & NETIF_F_IPV6_CSUM && 1178 + !xenbus_read_unsigned(np->xbdev->otherend, 1179 + "feature-ipv6-csum-offload", 0)) 1180 + features &= ~NETIF_F_IPV6_CSUM; 1182 1181 1183 - if (features & NETIF_F_IPV6_CSUM) { 1184 - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1185 - "feature-ipv6-csum-offload", "%d", &val) < 0) 1186 - val = 0; 1182 + if (features & NETIF_F_TSO && 1183 + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0)) 1184 + features &= ~NETIF_F_TSO; 1187 1185 1188 - if (!val) 1189 - features &= ~NETIF_F_IPV6_CSUM; 1190 - } 1191 - 1192 - if (features & NETIF_F_TSO) { 1193 - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1194 - "feature-gso-tcpv4", "%d", &val) < 0) 1195 - val = 0; 1196 - 1197 - if (!val) 1198 - features &= ~NETIF_F_TSO; 1199 - } 1200 - 1201 - if (features & NETIF_F_TSO6) { 1202 - if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1203 - "feature-gso-tcpv6", "%d", &val) < 0) 1204 - val = 0; 1205 - 1206 - if (!val) 1207 - features &= ~NETIF_F_TSO6; 1208 - } 1186 + if (features & NETIF_F_TSO6 && 1187 + !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0)) 1188 + features &= ~NETIF_F_TSO6; 1209 1189 1210 1190 return features; 1211 1191 } ··· 1803 1823 info->netdev->irq = 0; 1804 1824 1805 1825 /* Check if backend supports multiple queues */ 1806 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1807 - "multi-queue-max-queues", "%u", &max_queues); 1808 - if (err < 0) 1809 - max_queues = 1; 1826 + max_queues = xenbus_read_unsigned(info->xbdev->otherend, 1827 + "multi-queue-max-queues", 1); 1810 1828 num_queues = min(max_queues, xennet_max_queues); 1811 1829 1812 1830 /* Check feature-split-event-channels */ 1813 - err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1814 - "feature-split-event-channels", "%u", 1815 - &feature_split_evtchn); 1816 - if (err < 0) 1817 - feature_split_evtchn = 0; 1831 + feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend, 1832 + "feature-split-event-channels", 0); 1818 1833 1819 1834 /* Read mac addr. */ 1820 1835 err = xen_net_read_mac(dev, info->netdev->dev_addr); ··· 1943 1968 struct netfront_info *np = netdev_priv(dev); 1944 1969 unsigned int num_queues = 0; 1945 1970 int err; 1946 - unsigned int feature_rx_copy; 1947 1971 unsigned int j = 0; 1948 1972 struct netfront_queue *queue = NULL; 1949 1973 1950 - err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1951 - "feature-rx-copy", "%u", &feature_rx_copy); 1952 - if (err != 1) 1953 - feature_rx_copy = 0; 1954 - 1955 - if (!feature_rx_copy) { 1974 + if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) { 1956 1975 dev_info(&dev->dev, 1957 1976 "backend does not support copying receive path\n"); 1958 1977 return -ENODEV;
+2 -4
drivers/pci/xen-pcifront.c
··· 1038 1038 err = -ENOMEM; 1039 1039 goto out; 1040 1040 } 1041 - err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d", 1042 - &state); 1043 - if (err != 1) 1044 - state = XenbusStateUnknown; 1041 + state = xenbus_read_unsigned(pdev->xdev->otherend, str, 1042 + XenbusStateUnknown); 1045 1043 1046 1044 if (state != XenbusStateClosing) 1047 1045 continue;
+84 -109
drivers/scsi/xen-scsifront.c
··· 79 79 struct vscsifrnt_shadow { 80 80 /* command between backend and frontend */ 81 81 unsigned char act; 82 + uint8_t nr_segments; 82 83 uint16_t rqid; 84 + uint16_t ref_rqid; 83 85 84 86 unsigned int nr_grants; /* number of grants in gref[] */ 85 87 struct scsiif_request_segment *sg; /* scatter/gather elements */ 88 + struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE]; 86 89 87 90 /* Do reset or abort function. */ 88 91 wait_queue_head_t wq_reset; /* reset work queue */ ··· 175 172 scsifront_wake_up(info); 176 173 } 177 174 178 - static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info) 175 + static int scsifront_do_request(struct vscsifrnt_info *info, 176 + struct vscsifrnt_shadow *shadow) 179 177 { 180 178 struct vscsiif_front_ring *ring = &(info->ring); 181 179 struct vscsiif_request *ring_req; 180 + struct scsi_cmnd *sc = shadow->sc; 182 181 uint32_t id; 182 + int i, notify; 183 + 184 + if (RING_FULL(&info->ring)) 185 + return -EBUSY; 183 186 184 187 id = scsifront_get_rqid(info); /* use id in response */ 185 188 if (id >= VSCSIIF_MAX_REQS) 186 - return NULL; 189 + return -EBUSY; 190 + 191 + info->shadow[id] = shadow; 192 + shadow->rqid = id; 187 193 188 194 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt); 189 - 190 195 ring->req_prod_pvt++; 191 196 192 - ring_req->rqid = (uint16_t)id; 197 + ring_req->rqid = id; 198 + ring_req->act = shadow->act; 199 + ring_req->ref_rqid = shadow->ref_rqid; 200 + ring_req->nr_segments = shadow->nr_segments; 193 201 194 - return ring_req; 195 - } 202 + ring_req->id = sc->device->id; 203 + ring_req->lun = sc->device->lun; 204 + ring_req->channel = sc->device->channel; 205 + ring_req->cmd_len = sc->cmd_len; 196 206 197 - static void scsifront_do_request(struct vscsifrnt_info *info) 198 - { 199 - struct vscsiif_front_ring *ring = &(info->ring); 200 - int notify; 207 + BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); 208 + 209 + memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); 210 + 211 + ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; 212 + ring_req->timeout_per_command = sc->request->timeout / HZ; 213 + 214 + for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++) 215 + ring_req->seg[i] = shadow->seg[i]; 201 216 202 217 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify); 203 218 if (notify) 204 219 notify_remote_via_irq(info->irq); 220 + 221 + return 0; 205 222 } 206 223 207 - static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id) 224 + static void scsifront_gnttab_done(struct vscsifrnt_info *info, 225 + struct vscsifrnt_shadow *shadow) 208 226 { 209 - struct vscsifrnt_shadow *s = info->shadow[id]; 210 227 int i; 211 228 212 - if (s->sc->sc_data_direction == DMA_NONE) 229 + if (shadow->sc->sc_data_direction == DMA_NONE) 213 230 return; 214 231 215 - for (i = 0; i < s->nr_grants; i++) { 216 - if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) { 232 + for (i = 0; i < shadow->nr_grants; i++) { 233 + if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) { 217 234 shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME 218 235 "grant still in use by backend\n"); 219 236 BUG(); 220 237 } 221 - gnttab_end_foreign_access(s->gref[i], 0, 0UL); 238 + gnttab_end_foreign_access(shadow->gref[i], 0, 0UL); 222 239 } 223 240 224 - kfree(s->sg); 241 + kfree(shadow->sg); 225 242 } 226 243 227 244 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info, 228 245 struct vscsiif_response *ring_rsp) 229 246 { 247 + struct vscsifrnt_shadow *shadow; 230 248 struct scsi_cmnd *sc; 231 249 uint32_t id; 232 250 uint8_t sense_len; 233 251 234 252 id = ring_rsp->rqid; 235 - sc = info->shadow[id]->sc; 253 + shadow = info->shadow[id]; 254 + sc = shadow->sc; 236 255 237 256 BUG_ON(sc == NULL); 238 257 239 - scsifront_gnttab_done(info, id); 258 + scsifront_gnttab_done(info, shadow); 240 259 scsifront_put_rqid(info, id); 241 260 242 261 sc->result = ring_rsp->rslt; ··· 391 366 392 367 static int map_data_for_request(struct vscsifrnt_info *info, 393 368 struct scsi_cmnd *sc, 394 - struct vscsiif_request *ring_req, 395 369 struct vscsifrnt_shadow *shadow) 396 370 { 397 371 grant_ref_t gref_head; ··· 403 379 struct scatterlist *sg; 404 380 struct scsiif_request_segment *seg; 405 381 406 - ring_req->nr_segments = 0; 407 382 if (sc->sc_data_direction == DMA_NONE || !data_len) 408 383 return 0; 409 384 ··· 421 398 if (!shadow->sg) 422 399 return -ENOMEM; 423 400 } 424 - seg = shadow->sg ? : ring_req->seg; 401 + seg = shadow->sg ? : shadow->seg; 425 402 426 403 err = gnttab_alloc_grant_references(seg_grants + data_grants, 427 404 &gref_head); ··· 446 423 info->dev->otherend_id, 447 424 xen_page_to_gfn(page), 1); 448 425 shadow->gref[ref_cnt] = ref; 449 - ring_req->seg[ref_cnt].gref = ref; 450 - ring_req->seg[ref_cnt].offset = (uint16_t)off; 451 - ring_req->seg[ref_cnt].length = (uint16_t)bytes; 426 + shadow->seg[ref_cnt].gref = ref; 427 + shadow->seg[ref_cnt].offset = (uint16_t)off; 428 + shadow->seg[ref_cnt].length = (uint16_t)bytes; 452 429 453 430 page++; 454 431 len -= bytes; ··· 496 473 } 497 474 498 475 if (seg_grants) 499 - ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants; 476 + shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants; 500 477 else 501 - ring_req->nr_segments = (uint8_t)ref_cnt; 478 + shadow->nr_segments = (uint8_t)ref_cnt; 502 479 shadow->nr_grants = ref_cnt; 503 480 504 481 return 0; 505 - } 506 - 507 - static struct vscsiif_request *scsifront_command2ring( 508 - struct vscsifrnt_info *info, struct scsi_cmnd *sc, 509 - struct vscsifrnt_shadow *shadow) 510 - { 511 - struct vscsiif_request *ring_req; 512 - 513 - memset(shadow, 0, sizeof(*shadow)); 514 - 515 - ring_req = scsifront_pre_req(info); 516 - if (!ring_req) 517 - return NULL; 518 - 519 - info->shadow[ring_req->rqid] = shadow; 520 - shadow->rqid = ring_req->rqid; 521 - 522 - ring_req->id = sc->device->id; 523 - ring_req->lun = sc->device->lun; 524 - ring_req->channel = sc->device->channel; 525 - ring_req->cmd_len = sc->cmd_len; 526 - 527 - BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE); 528 - 529 - memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len); 530 - 531 - ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction; 532 - ring_req->timeout_per_command = sc->request->timeout / HZ; 533 - 534 - return ring_req; 535 482 } 536 483 537 484 static int scsifront_enter(struct vscsifrnt_info *info) ··· 529 536 struct scsi_cmnd *sc) 530 537 { 531 538 struct vscsifrnt_info *info = shost_priv(shost); 532 - struct vscsiif_request *ring_req; 533 539 struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc); 534 540 unsigned long flags; 535 541 int err; 536 - uint16_t rqid; 542 + 543 + sc->result = 0; 544 + memset(shadow, 0, sizeof(*shadow)); 545 + 546 + shadow->sc = sc; 547 + shadow->act = VSCSIIF_ACT_SCSI_CDB; 537 548 538 549 spin_lock_irqsave(shost->host_lock, flags); 539 550 if (scsifront_enter(info)) { 540 551 spin_unlock_irqrestore(shost->host_lock, flags); 541 552 return SCSI_MLQUEUE_HOST_BUSY; 542 553 } 543 - if (RING_FULL(&info->ring)) 544 - goto busy; 545 554 546 - ring_req = scsifront_command2ring(info, sc, shadow); 547 - if (!ring_req) 548 - goto busy; 549 - 550 - sc->result = 0; 551 - 552 - rqid = ring_req->rqid; 553 - ring_req->act = VSCSIIF_ACT_SCSI_CDB; 554 - 555 - shadow->sc = sc; 556 - shadow->act = VSCSIIF_ACT_SCSI_CDB; 557 - 558 - err = map_data_for_request(info, sc, ring_req, shadow); 555 + err = map_data_for_request(info, sc, shadow); 559 556 if (err < 0) { 560 557 pr_debug("%s: err %d\n", __func__, err); 561 - scsifront_put_rqid(info, rqid); 562 558 scsifront_return(info); 563 559 spin_unlock_irqrestore(shost->host_lock, flags); 564 560 if (err == -ENOMEM) ··· 557 575 return 0; 558 576 } 559 577 560 - scsifront_do_request(info); 578 + if (scsifront_do_request(info, shadow)) { 579 + scsifront_gnttab_done(info, shadow); 580 + goto busy; 581 + } 582 + 561 583 scsifront_return(info); 562 584 spin_unlock_irqrestore(shost->host_lock, flags); 563 585 ··· 584 598 struct Scsi_Host *host = sc->device->host; 585 599 struct vscsifrnt_info *info = shost_priv(host); 586 600 struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc); 587 - struct vscsiif_request *ring_req; 588 601 int err = 0; 589 602 590 - shadow = kmalloc(sizeof(*shadow), GFP_NOIO); 603 + shadow = kzalloc(sizeof(*shadow), GFP_NOIO); 591 604 if (!shadow) 592 605 return FAILED; 606 + 607 + shadow->act = act; 608 + shadow->rslt_reset = RSLT_RESET_WAITING; 609 + shadow->sc = sc; 610 + shadow->ref_rqid = s->rqid; 611 + init_waitqueue_head(&shadow->wq_reset); 593 612 594 613 spin_lock_irq(host->host_lock); 595 614 596 615 for (;;) { 597 - if (!RING_FULL(&info->ring)) { 598 - ring_req = scsifront_command2ring(info, sc, shadow); 599 - if (ring_req) 600 - break; 601 - } 602 - if (err || info->pause) { 603 - spin_unlock_irq(host->host_lock); 604 - kfree(shadow); 605 - return FAILED; 606 - } 616 + if (scsifront_enter(info)) 617 + goto fail; 618 + 619 + if (!scsifront_do_request(info, shadow)) 620 + break; 621 + 622 + scsifront_return(info); 623 + if (err) 624 + goto fail; 607 625 info->wait_ring_available = 1; 608 626 spin_unlock_irq(host->host_lock); 609 627 err = wait_event_interruptible(info->wq_sync, 610 628 !info->wait_ring_available); 611 629 spin_lock_irq(host->host_lock); 612 630 } 613 - 614 - if (scsifront_enter(info)) { 615 - spin_unlock_irq(host->host_lock); 616 - return FAILED; 617 - } 618 - 619 - ring_req->act = act; 620 - ring_req->ref_rqid = s->rqid; 621 - 622 - shadow->act = act; 623 - shadow->rslt_reset = RSLT_RESET_WAITING; 624 - init_waitqueue_head(&shadow->wq_reset); 625 - 626 - ring_req->nr_segments = 0; 627 - 628 - scsifront_do_request(info); 629 631 630 632 spin_unlock_irq(host->host_lock); 631 633 err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset); ··· 633 659 scsifront_return(info); 634 660 spin_unlock_irq(host->host_lock); 635 661 return err; 662 + 663 + fail: 664 + spin_unlock_irq(host->host_lock); 665 + kfree(shadow); 666 + return FAILED; 636 667 } 637 668 638 669 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc) ··· 1039 1060 struct vscsifrnt_info *info) 1040 1061 { 1041 1062 unsigned int sg_grant, nr_segs; 1042 - int ret; 1043 1063 struct Scsi_Host *host = info->host; 1044 1064 1045 - ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u", 1046 - &sg_grant); 1047 - if (ret != 1) 1048 - sg_grant = 0; 1065 + sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0); 1049 1066 nr_segs = min_t(unsigned int, sg_grant, SG_ALL); 1050 1067 nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); 1051 1068 nr_segs = min_t(unsigned int, nr_segs,
+4 -9
drivers/video/fbdev/xen-fbfront.c
··· 633 633 enum xenbus_state backend_state) 634 634 { 635 635 struct xenfb_info *info = dev_get_drvdata(&dev->dev); 636 - int val; 637 636 638 637 switch (backend_state) { 639 638 case XenbusStateInitialising: ··· 656 657 if (dev->state != XenbusStateConnected) 657 658 goto InitWait; /* no InitWait seen yet, fudge it */ 658 659 659 - if (xenbus_scanf(XBT_NIL, info->xbdev->otherend, 660 - "request-update", "%d", &val) < 0) 661 - val = 0; 662 - if (val) 660 + if (xenbus_read_unsigned(info->xbdev->otherend, 661 + "request-update", 0)) 663 662 info->update_wanted = 1; 664 663 665 - if (xenbus_scanf(XBT_NIL, dev->otherend, 666 - "feature-resize", "%d", &val) < 0) 667 - val = 0; 668 - info->feature_resize = val; 664 + info->feature_resize = xenbus_read_unsigned(dev->otherend, 665 + "feature-resize", 0); 669 666 break; 670 667 671 668 case XenbusStateClosed:
+2 -4
drivers/xen/balloon.c
··· 180 180 static void balloon_append(struct page *page) 181 181 { 182 182 __balloon_append(page); 183 - adjust_managed_page_count(page, -1); 184 183 } 185 184 186 185 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */ ··· 199 200 balloon_stats.balloon_high--; 200 201 else 201 202 balloon_stats.balloon_low--; 202 - 203 - adjust_managed_page_count(page, 1); 204 203 205 204 return page; 206 205 } ··· 475 478 #endif 476 479 477 480 /* Relinquish the page back to the allocator. */ 478 - __free_reserved_page(page); 481 + free_reserved_page(page); 479 482 } 480 483 481 484 balloon_stats.current_pages += rc; ··· 506 509 state = BP_EAGAIN; 507 510 break; 508 511 } 512 + adjust_managed_page_count(page, -1); 509 513 scrub_page(page); 510 514 list_add(&page->lru, &pages); 511 515 }
+1 -1
drivers/xen/events/events_base.c
··· 947 947 continue; 948 948 if (status.status != EVTCHNSTAT_virq) 949 949 continue; 950 - if (status.u.virq == virq && status.vcpu == cpu) { 950 + if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) { 951 951 rc = port; 952 952 break; 953 953 }
+6 -3
drivers/xen/gntalloc.c
··· 127 127 struct gntalloc_gref *gref, *next; 128 128 129 129 readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); 130 - rc = -ENOMEM; 131 130 for (i = 0; i < op->count; i++) { 132 131 gref = kzalloc(sizeof(*gref), GFP_KERNEL); 133 - if (!gref) 132 + if (!gref) { 133 + rc = -ENOMEM; 134 134 goto undo; 135 + } 135 136 list_add_tail(&gref->next_gref, &queue_gref); 136 137 list_add_tail(&gref->next_file, &queue_file); 137 138 gref->users = 1; 138 139 gref->file_index = op->index + i * PAGE_SIZE; 139 140 gref->page = alloc_page(GFP_KERNEL|__GFP_ZERO); 140 - if (!gref->page) 141 + if (!gref->page) { 142 + rc = -ENOMEM; 141 143 goto undo; 144 + } 142 145 143 146 /* Grant foreign access to the page. */ 144 147 rc = gnttab_grant_foreign_access(op->domid,
+1 -1
drivers/xen/gntdev.c
··· 1007 1007 1008 1008 vma->vm_ops = &gntdev_vmops; 1009 1009 1010 - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; 1010 + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; 1011 1011 1012 1012 if (use_ptemod) 1013 1013 vma->vm_flags |= VM_DONTCOPY;
+1 -5
drivers/xen/platform-pci.c
··· 125 125 .id_table = platform_pci_tbl, 126 126 }; 127 127 128 - static int __init platform_pci_init(void) 129 - { 130 - return pci_register_driver(&platform_driver); 131 - } 132 - device_initcall(platform_pci_init); 128 + builtin_pci_driver(platform_driver);
+3 -5
drivers/xen/xen-pciback/xenbus.c
··· 362 362 int err = 0; 363 363 int num_devs; 364 364 int domain, bus, slot, func; 365 - int substate; 365 + unsigned int substate; 366 366 int i, len; 367 367 char state_str[64]; 368 368 char dev_str[64]; ··· 395 395 "configuration"); 396 396 goto out; 397 397 } 398 - err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str, 399 - "%d", &substate); 400 - if (err != 1) 401 - substate = XenbusStateUnknown; 398 + substate = xenbus_read_unsigned(pdev->xdev->nodename, state_str, 399 + XenbusStateUnknown); 402 400 403 401 switch (substate) { 404 402 case XenbusStateInitialising:
+2
drivers/xen/xenbus/xenbus_dev_frontend.c
··· 538 538 539 539 nonseekable_open(inode, filp); 540 540 541 + filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */ 542 + 541 543 u = kzalloc(sizeof(*u), GFP_KERNEL); 542 544 if (u == NULL) 543 545 return -ENOMEM;
+2 -2
drivers/xen/xenbus/xenbus_probe.c
··· 702 702 */ 703 703 static int __init xenstored_local_init(void) 704 704 { 705 - int err = 0; 705 + int err = -ENOMEM; 706 706 unsigned long page = 0; 707 707 struct evtchn_alloc_unbound alloc_unbound; 708 708 ··· 826 826 * Create xenfs mountpoint in /proc for compatibility with 827 827 * utilities that expect to find "xenbus" under "/proc/xen". 828 828 */ 829 - proc_mkdir("xen", NULL); 829 + proc_create_mount_point("xen"); 830 830 #endif 831 831 832 832 out_error:
+1 -7
drivers/xen/xenbus/xenbus_probe_backend.c
··· 224 224 225 225 int xenbus_dev_is_online(struct xenbus_device *dev) 226 226 { 227 - int rc, val; 228 - 229 - rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val); 230 - if (rc != 1) 231 - val = 0; /* no online node present */ 232 - 233 - return val; 227 + return !!xenbus_read_unsigned(dev->nodename, "online", 0); 234 228 } 235 229 EXPORT_SYMBOL_GPL(xenbus_dev_is_online); 236 230
+18 -4
drivers/xen/xenbus/xenbus_xs.c
··· 559 559 } 560 560 EXPORT_SYMBOL_GPL(xenbus_scanf); 561 561 562 + /* Read an (optional) unsigned value. */ 563 + unsigned int xenbus_read_unsigned(const char *dir, const char *node, 564 + unsigned int default_val) 565 + { 566 + unsigned int val; 567 + int ret; 568 + 569 + ret = xenbus_scanf(XBT_NIL, dir, node, "%u", &val); 570 + if (ret <= 0) 571 + val = default_val; 572 + 573 + return val; 574 + } 575 + EXPORT_SYMBOL_GPL(xenbus_read_unsigned); 576 + 562 577 /* Single printf and write: returns -errno or 0. */ 563 578 int xenbus_printf(struct xenbus_transaction t, 564 579 const char *dir, const char *node, const char *fmt, ...) ··· 687 672 } 688 673 static void xs_reset_watches(void) 689 674 { 690 - int err, supported = 0; 675 + int err; 691 676 692 677 if (!xen_hvm_domain() || xen_initial_domain()) 693 678 return; ··· 695 680 if (xen_strict_xenbus_quirk()) 696 681 return; 697 682 698 - err = xenbus_scanf(XBT_NIL, "control", 699 - "platform-feature-xs_reset_watches", "%d", &supported); 700 - if (err != 1 || !supported) 683 + if (!xenbus_read_unsigned("control", 684 + "platform-feature-xs_reset_watches", 0)) 701 685 return; 702 686 703 687 err = xs_error(xs_single(XBT_NIL, XS_RESET_WATCHES, "", NULL));
+1
fs/proc/generic.c
··· 479 479 } 480 480 return ent; 481 481 } 482 + EXPORT_SYMBOL(proc_create_mount_point); 482 483 483 484 struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, 484 485 struct proc_dir_entry *parent,
-1
fs/proc/internal.h
··· 195 195 { 196 196 return S_ISDIR(pde->mode) && !pde->proc_iops; 197 197 } 198 - struct proc_dir_entry *proc_create_mount_point(const char *name); 199 198 200 199 /* 201 200 * inode.c
+2
include/linux/proc_fs.h
··· 21 21 struct proc_dir_entry *, void *); 22 22 extern struct proc_dir_entry *proc_mkdir_mode(const char *, umode_t, 23 23 struct proc_dir_entry *); 24 + struct proc_dir_entry *proc_create_mount_point(const char *name); 24 25 25 26 extern struct proc_dir_entry *proc_create_data(const char *, umode_t, 26 27 struct proc_dir_entry *, ··· 57 56 struct proc_dir_entry *parent,const char *dest) { return NULL;} 58 57 static inline struct proc_dir_entry *proc_mkdir(const char *name, 59 58 struct proc_dir_entry *parent) {return NULL;} 59 + static inline struct proc_dir_entry *proc_create_mount_point(const char *name) { return NULL; } 60 60 static inline struct proc_dir_entry *proc_mkdir_data(const char *name, 61 61 umode_t mode, struct proc_dir_entry *parent, void *data) { return NULL; } 62 62 static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
+87
include/xen/arm/hypercall.h
··· 1 + /****************************************************************************** 2 + * hypercall.h 3 + * 4 + * Linux-specific hypervisor handling. 5 + * 6 + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 7 + * 8 + * This program is free software; you can redistribute it and/or 9 + * modify it under the terms of the GNU General Public License version 2 10 + * as published by the Free Software Foundation; or, when distributed 11 + * separately from the Linux kernel or incorporated into other 12 + * software packages, subject to the following license: 13 + * 14 + * Permission is hereby granted, free of charge, to any person obtaining a copy 15 + * of this source file (the "Software"), to deal in the Software without 16 + * restriction, including without limitation the rights to use, copy, modify, 17 + * merge, publish, distribute, sublicense, and/or sell copies of the Software, 18 + * and to permit persons to whom the Software is furnished to do so, subject to 19 + * the following conditions: 20 + * 21 + * The above copyright notice and this permission notice shall be included in 22 + * all copies or substantial portions of the Software. 23 + * 24 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 25 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 26 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 27 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 28 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 29 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 30 + * IN THE SOFTWARE. 31 + */ 32 + 33 + #ifndef _ASM_ARM_XEN_HYPERCALL_H 34 + #define _ASM_ARM_XEN_HYPERCALL_H 35 + 36 + #include <linux/bug.h> 37 + 38 + #include <xen/interface/xen.h> 39 + #include <xen/interface/sched.h> 40 + #include <xen/interface/platform.h> 41 + 42 + long privcmd_call(unsigned call, unsigned long a1, 43 + unsigned long a2, unsigned long a3, 44 + unsigned long a4, unsigned long a5); 45 + int HYPERVISOR_xen_version(int cmd, void *arg); 46 + int HYPERVISOR_console_io(int cmd, int count, char *str); 47 + int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); 48 + int HYPERVISOR_sched_op(int cmd, void *arg); 49 + int HYPERVISOR_event_channel_op(int cmd, void *arg); 50 + unsigned long HYPERVISOR_hvm_op(int op, void *arg); 51 + int HYPERVISOR_memory_op(unsigned int cmd, void *arg); 52 + int HYPERVISOR_physdev_op(int cmd, void *arg); 53 + int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); 54 + int HYPERVISOR_tmem_op(void *arg); 55 + int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); 56 + int HYPERVISOR_platform_op_raw(void *arg); 57 + static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) 58 + { 59 + op->interface_version = XENPF_INTERFACE_VERSION; 60 + return HYPERVISOR_platform_op_raw(op); 61 + } 62 + int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); 63 + 64 + static inline int 65 + HYPERVISOR_suspend(unsigned long start_info_mfn) 66 + { 67 + struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 68 + 69 + /* start_info_mfn is unused on ARM */ 70 + return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 71 + } 72 + 73 + static inline void 74 + MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, 75 + unsigned int new_val, unsigned long flags) 76 + { 77 + BUG(); 78 + } 79 + 80 + static inline void 81 + MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, 82 + int count, int *success_count, domid_t domid) 83 + { 84 + BUG(); 85 + } 86 + 87 + #endif /* _ASM_ARM_XEN_HYPERCALL_H */
+39
include/xen/arm/hypervisor.h
··· 1 + #ifndef _ASM_ARM_XEN_HYPERVISOR_H 2 + #define _ASM_ARM_XEN_HYPERVISOR_H 3 + 4 + #include <linux/init.h> 5 + 6 + extern struct shared_info *HYPERVISOR_shared_info; 7 + extern struct start_info *xen_start_info; 8 + 9 + /* Lazy mode for batching updates / context switch */ 10 + enum paravirt_lazy_mode { 11 + PARAVIRT_LAZY_NONE, 12 + PARAVIRT_LAZY_MMU, 13 + PARAVIRT_LAZY_CPU, 14 + }; 15 + 16 + static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) 17 + { 18 + return PARAVIRT_LAZY_NONE; 19 + } 20 + 21 + extern struct dma_map_ops *xen_dma_ops; 22 + 23 + #ifdef CONFIG_XEN 24 + void __init xen_early_init(void); 25 + #else 26 + static inline void xen_early_init(void) { return; } 27 + #endif 28 + 29 + #ifdef CONFIG_HOTPLUG_CPU 30 + static inline void xen_arch_register_cpu(int num) 31 + { 32 + } 33 + 34 + static inline void xen_arch_unregister_cpu(int num) 35 + { 36 + } 37 + #endif 38 + 39 + #endif /* _ASM_ARM_XEN_HYPERVISOR_H */
+85
include/xen/arm/interface.h
··· 1 + /****************************************************************************** 2 + * Guest OS interface to ARM Xen. 3 + * 4 + * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 5 + */ 6 + 7 + #ifndef _ASM_ARM_XEN_INTERFACE_H 8 + #define _ASM_ARM_XEN_INTERFACE_H 9 + 10 + #include <linux/types.h> 11 + 12 + #define uint64_aligned_t uint64_t __attribute__((aligned(8))) 13 + 14 + #define __DEFINE_GUEST_HANDLE(name, type) \ 15 + typedef struct { union { type *p; uint64_aligned_t q; }; } \ 16 + __guest_handle_ ## name 17 + 18 + #define DEFINE_GUEST_HANDLE_STRUCT(name) \ 19 + __DEFINE_GUEST_HANDLE(name, struct name) 20 + #define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) 21 + #define GUEST_HANDLE(name) __guest_handle_ ## name 22 + 23 + #define set_xen_guest_handle(hnd, val) \ 24 + do { \ 25 + if (sizeof(hnd) == 8) \ 26 + *(uint64_t *)&(hnd) = 0; \ 27 + (hnd).p = val; \ 28 + } while (0) 29 + 30 + #define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op 31 + 32 + #ifndef __ASSEMBLY__ 33 + /* Explicitly size integers that represent pfns in the interface with 34 + * Xen so that we can have one ABI that works for 32 and 64 bit guests. 35 + * Note that this means that the xen_pfn_t type may be capable of 36 + * representing pfn's which the guest cannot represent in its own pfn 37 + * type. However since pfn space is controlled by the guest this is 38 + * fine since it simply wouldn't be able to create any sure pfns in 39 + * the first place. 40 + */ 41 + typedef uint64_t xen_pfn_t; 42 + #define PRI_xen_pfn "llx" 43 + typedef uint64_t xen_ulong_t; 44 + #define PRI_xen_ulong "llx" 45 + typedef int64_t xen_long_t; 46 + #define PRI_xen_long "llx" 47 + /* Guest handles for primitive C types. */ 48 + __DEFINE_GUEST_HANDLE(uchar, unsigned char); 49 + __DEFINE_GUEST_HANDLE(uint, unsigned int); 50 + DEFINE_GUEST_HANDLE(char); 51 + DEFINE_GUEST_HANDLE(int); 52 + DEFINE_GUEST_HANDLE(void); 53 + DEFINE_GUEST_HANDLE(uint64_t); 54 + DEFINE_GUEST_HANDLE(uint32_t); 55 + DEFINE_GUEST_HANDLE(xen_pfn_t); 56 + DEFINE_GUEST_HANDLE(xen_ulong_t); 57 + 58 + /* Maximum number of virtual CPUs in multi-processor guests. */ 59 + #define MAX_VIRT_CPUS 1 60 + 61 + struct arch_vcpu_info { }; 62 + struct arch_shared_info { }; 63 + 64 + /* TODO: Move pvclock definitions some place arch independent */ 65 + struct pvclock_vcpu_time_info { 66 + u32 version; 67 + u32 pad0; 68 + u64 tsc_timestamp; 69 + u64 system_time; 70 + u32 tsc_to_system_mul; 71 + s8 tsc_shift; 72 + u8 flags; 73 + u8 pad[2]; 74 + } __attribute__((__packed__)); /* 32 bytes */ 75 + 76 + /* It is OK to have a 12 bytes struct with no padding because it is packed */ 77 + struct pvclock_wall_clock { 78 + u32 version; 79 + u32 sec; 80 + u32 nsec; 81 + u32 sec_hi; 82 + } __attribute__((__packed__)); 83 + #endif 84 + 85 + #endif /* _ASM_ARM_XEN_INTERFACE_H */
+98
include/xen/arm/page-coherent.h
··· 1 + #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H 2 + #define _ASM_ARM_XEN_PAGE_COHERENT_H 3 + 4 + #include <asm/page.h> 5 + #include <linux/dma-mapping.h> 6 + 7 + void __xen_dma_map_page(struct device *hwdev, struct page *page, 8 + dma_addr_t dev_addr, unsigned long offset, size_t size, 9 + enum dma_data_direction dir, unsigned long attrs); 10 + void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 11 + size_t size, enum dma_data_direction dir, 12 + unsigned long attrs); 13 + void __xen_dma_sync_single_for_cpu(struct device *hwdev, 14 + dma_addr_t handle, size_t size, enum dma_data_direction dir); 15 + 16 + void __xen_dma_sync_single_for_device(struct device *hwdev, 17 + dma_addr_t handle, size_t size, enum dma_data_direction dir); 18 + 19 + static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20 + dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) 21 + { 22 + return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); 23 + } 24 + 25 + static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, 26 + void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) 27 + { 28 + __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); 29 + } 30 + 31 + static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 32 + dma_addr_t dev_addr, unsigned long offset, size_t size, 33 + enum dma_data_direction dir, unsigned long attrs) 34 + { 35 + unsigned long page_pfn = page_to_xen_pfn(page); 36 + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); 37 + unsigned long compound_pages = 38 + (1<<compound_order(page)) * XEN_PFN_PER_PAGE; 39 + bool local = (page_pfn <= dev_pfn) && 40 + (dev_pfn - page_pfn < compound_pages); 41 + 42 + /* 43 + * Dom0 is mapped 1:1, while the Linux page can span across 44 + * multiple Xen pages, it's not possible for it to contain a 45 + * mix of local and foreign Xen pages. So if the first xen_pfn 46 + * == mfn the page is local otherwise it's a foreign page 47 + * grant-mapped in dom0. If the page is local we can safely 48 + * call the native dma_ops function, otherwise we call the xen 49 + * specific function. 50 + */ 51 + if (local) 52 + __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 53 + else 54 + __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); 55 + } 56 + 57 + static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 58 + size_t size, enum dma_data_direction dir, unsigned long attrs) 59 + { 60 + unsigned long pfn = PFN_DOWN(handle); 61 + /* 62 + * Dom0 is mapped 1:1, while the Linux page can be spanned accross 63 + * multiple Xen page, it's not possible to have a mix of local and 64 + * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a 65 + * foreign mfn will always return false. If the page is local we can 66 + * safely call the native dma_ops function, otherwise we call the xen 67 + * specific function. 68 + */ 69 + if (pfn_valid(pfn)) { 70 + if (__generic_dma_ops(hwdev)->unmap_page) 71 + __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); 72 + } else 73 + __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); 74 + } 75 + 76 + static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, 77 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 78 + { 79 + unsigned long pfn = PFN_DOWN(handle); 80 + if (pfn_valid(pfn)) { 81 + if (__generic_dma_ops(hwdev)->sync_single_for_cpu) 82 + __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); 83 + } else 84 + __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); 85 + } 86 + 87 + static inline void xen_dma_sync_single_for_device(struct device *hwdev, 88 + dma_addr_t handle, size_t size, enum dma_data_direction dir) 89 + { 90 + unsigned long pfn = PFN_DOWN(handle); 91 + if (pfn_valid(pfn)) { 92 + if (__generic_dma_ops(hwdev)->sync_single_for_device) 93 + __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); 94 + } else 95 + __xen_dma_sync_single_for_device(hwdev, handle, size, dir); 96 + } 97 + 98 + #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
+122
include/xen/arm/page.h
··· 1 + #ifndef _ASM_ARM_XEN_PAGE_H 2 + #define _ASM_ARM_XEN_PAGE_H 3 + 4 + #include <asm/page.h> 5 + #include <asm/pgtable.h> 6 + 7 + #include <linux/pfn.h> 8 + #include <linux/types.h> 9 + #include <linux/dma-mapping.h> 10 + 11 + #include <xen/xen.h> 12 + #include <xen/interface/grant_table.h> 13 + 14 + #define phys_to_machine_mapping_valid(pfn) (1) 15 + 16 + /* Xen machine address */ 17 + typedef struct xmaddr { 18 + phys_addr_t maddr; 19 + } xmaddr_t; 20 + 21 + /* Xen pseudo-physical address */ 22 + typedef struct xpaddr { 23 + phys_addr_t paddr; 24 + } xpaddr_t; 25 + 26 + #define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) 27 + #define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) 28 + 29 + #define INVALID_P2M_ENTRY (~0UL) 30 + 31 + /* 32 + * The pseudo-physical frame (pfn) used in all the helpers is always based 33 + * on Xen page granularity (i.e 4KB). 34 + * 35 + * A Linux page may be split across multiple non-contiguous Xen page so we 36 + * have to keep track with frame based on 4KB page granularity. 37 + * 38 + * PV drivers should never make a direct usage of those helpers (particularly 39 + * pfn_to_gfn and gfn_to_pfn). 40 + */ 41 + 42 + unsigned long __pfn_to_mfn(unsigned long pfn); 43 + extern struct rb_root phys_to_mach; 44 + 45 + /* Pseudo-physical <-> Guest conversion */ 46 + static inline unsigned long pfn_to_gfn(unsigned long pfn) 47 + { 48 + return pfn; 49 + } 50 + 51 + static inline unsigned long gfn_to_pfn(unsigned long gfn) 52 + { 53 + return gfn; 54 + } 55 + 56 + /* Pseudo-physical <-> BUS conversion */ 57 + static inline unsigned long pfn_to_bfn(unsigned long pfn) 58 + { 59 + unsigned long mfn; 60 + 61 + if (phys_to_mach.rb_node != NULL) { 62 + mfn = __pfn_to_mfn(pfn); 63 + if (mfn != INVALID_P2M_ENTRY) 64 + return mfn; 65 + } 66 + 67 + return pfn; 68 + } 69 + 70 + static inline unsigned long bfn_to_pfn(unsigned long bfn) 71 + { 72 + return bfn; 73 + } 74 + 75 + #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) 76 + 77 + /* VIRT <-> GUEST conversion */ 78 + #define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) 79 + #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) 80 + 81 + /* Only used in PV code. But ARM guests are always HVM. */ 82 + static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) 83 + { 84 + BUG(); 85 + } 86 + 87 + /* TODO: this shouldn't be here but it is because the frontend drivers 88 + * are using it (its rolled in headers) even though we won't hit the code path. 89 + * So for right now just punt with this. 90 + */ 91 + static inline pte_t *lookup_address(unsigned long address, unsigned int *level) 92 + { 93 + BUG(); 94 + return NULL; 95 + } 96 + 97 + extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, 98 + struct gnttab_map_grant_ref *kmap_ops, 99 + struct page **pages, unsigned int count); 100 + 101 + extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, 102 + struct gnttab_unmap_grant_ref *kunmap_ops, 103 + struct page **pages, unsigned int count); 104 + 105 + bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 106 + bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 107 + unsigned long nr_pages); 108 + 109 + static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) 110 + { 111 + return __set_phys_to_machine(pfn, mfn); 112 + } 113 + 114 + #define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 115 + #define xen_unmap(cookie) iounmap((cookie)) 116 + 117 + bool xen_arch_need_swiotlb(struct device *dev, 118 + phys_addr_t phys, 119 + dma_addr_t dev_addr); 120 + unsigned long xen_get_swiotlb_free_pages(unsigned int order); 121 + 122 + #endif /* _ASM_ARM_XEN_PAGE_H */
+4
include/xen/xenbus.h
··· 151 151 int xenbus_scanf(struct xenbus_transaction t, 152 152 const char *dir, const char *node, const char *fmt, ...); 153 153 154 + /* Read an (optional) unsigned value. */ 155 + unsigned int xenbus_read_unsigned(const char *dir, const char *node, 156 + unsigned int default_val); 157 + 154 158 /* Single printf and write: returns -errno or 0. */ 155 159 __printf(4, 5) 156 160 int xenbus_printf(struct xenbus_transaction t,