Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip

Pull xen features and fixes from David Vrabel:

- use a single source list of hypercalls, generating other tables etc.
at build time.

- add a "Xen PV" APIC driver to support >255 VCPUs in PV guests.

- significant performance improve to guest save/restore/migration.

- scsiback/front save/restore support.

- infrastructure for multi-page xenbus rings.

- misc fixes.

* tag 'stable/for-linus-4.1-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/pci: Try harder to get PXM information for Xen
xenbus_client: Extend interface to support multi-page ring
xen-pciback: also support disabling of bus-mastering and memory-write-invalidate
xen: support suspend/resume in pvscsi frontend
xen: scsiback: add LUN of restored domain
xen-scsiback: define a pr_fmt macro with xen-pvscsi
xen/mce: fix up xen_late_init_mcelog() error handling
xen/privcmd: improve performance of MMAPBATCH_V2
xen: unify foreign GFN map/unmap for auto-xlated physmap guests
x86/xen/apic: WARN with details.
x86/xen: Provide a "Xen PV" APIC driver to support >255 VCPUs
xen/pciback: Don't print scary messages when unsupported by hypervisor.
xen: use generated hypercall symbols in arch/x86/xen/xen-head.S
xen: use generated hypervisor symbols in arch/x86/xen/trace.c
xen: synchronize include/xen/interface/xen.h with xen
xen: build infrastructure for generating hypercall depending symbols
xen: balloon: Use static attribute groups for sysfs entries
xen: pcpu: Use static attribute groups for sysfs entry

+1248 -696
+17 -89
arch/arm/xen/enlighten.c
··· 53 53 54 54 static __read_mostly int xen_events_irq = -1; 55 55 56 - /* map fgmfn of domid to lpfn in the current domain */ 57 - static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 58 - unsigned int domid) 59 - { 60 - int rc; 61 - struct xen_add_to_physmap_range xatp = { 62 - .domid = DOMID_SELF, 63 - .foreign_domid = domid, 64 - .size = 1, 65 - .space = XENMAPSPACE_gmfn_foreign, 66 - }; 67 - xen_ulong_t idx = fgmfn; 68 - xen_pfn_t gpfn = lpfn; 69 - int err = 0; 70 - 71 - set_xen_guest_handle(xatp.idxs, &idx); 72 - set_xen_guest_handle(xatp.gpfns, &gpfn); 73 - set_xen_guest_handle(xatp.errs, &err); 74 - 75 - rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 76 - if (rc || err) { 77 - pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", 78 - rc, err, lpfn, fgmfn); 79 - return 1; 80 - } 81 - return 0; 82 - } 83 - 84 - struct remap_data { 85 - xen_pfn_t fgmfn; /* foreign domain's gmfn */ 86 - pgprot_t prot; 87 - domid_t domid; 88 - struct vm_area_struct *vma; 89 - int index; 90 - struct page **pages; 91 - struct xen_remap_mfn_info *info; 92 - }; 93 - 94 - static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 95 - void *data) 96 - { 97 - struct remap_data *info = data; 98 - struct page *page = info->pages[info->index++]; 99 - unsigned long pfn = page_to_pfn(page); 100 - pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 101 - 102 - if (map_foreign_page(pfn, info->fgmfn, info->domid)) 103 - return -EFAULT; 104 - set_pte_at(info->vma->vm_mm, addr, ptep, pte); 105 - 106 - return 0; 107 - } 108 - 109 - int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 56 + int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 110 57 unsigned long addr, 111 - xen_pfn_t mfn, int nr, 112 - pgprot_t prot, unsigned domid, 58 + xen_pfn_t *mfn, int nr, 59 + int *err_ptr, pgprot_t prot, 60 + unsigned domid, 113 61 struct page **pages) 114 62 { 115 - int err; 116 - struct remap_data data; 63 + return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 64 + prot, domid, pages); 65 + } 66 + EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 117 67 118 - /* TBD: Batching, current sole caller only does page at a time */ 119 - if (nr > 1) 120 - return -EINVAL; 121 - 122 - data.fgmfn = mfn; 123 - data.prot = prot; 124 - data.domid = domid; 125 - data.vma = vma; 126 - data.index = 0; 127 - data.pages = pages; 128 - err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 129 - remap_pte_fn, &data); 130 - return err; 68 + /* Not used by XENFEAT_auto_translated guests. */ 69 + int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 70 + unsigned long addr, 71 + xen_pfn_t mfn, int nr, 72 + pgprot_t prot, unsigned domid, 73 + struct page **pages) 74 + { 75 + return -ENOSYS; 131 76 } 132 77 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 133 78 134 79 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 135 80 int nr, struct page **pages) 136 81 { 137 - int i; 138 - 139 - for (i = 0; i < nr; i++) { 140 - struct xen_remove_from_physmap xrp; 141 - unsigned long rc, pfn; 142 - 143 - pfn = page_to_pfn(pages[i]); 144 - 145 - xrp.domid = DOMID_SELF; 146 - xrp.gpfn = pfn; 147 - rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 148 - if (rc) { 149 - pr_warn("Failed to unmap pfn:%lx rc:%ld\n", 150 - pfn, rc); 151 - return rc; 152 - } 153 - } 154 - return 0; 82 + return xen_xlate_unmap_gfn_range(vma, nr, pages); 155 83 } 156 84 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 157 85
+9
arch/x86/syscalls/Makefile
··· 19 19 quiet_cmd_systbl = SYSTBL $@ 20 20 cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@ 21 21 22 + quiet_cmd_hypercalls = HYPERCALLS $@ 23 + cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^) 24 + 22 25 syshdr_abi_unistd_32 := i386 23 26 $(uapi)/unistd_32.h: $(syscall32) $(syshdr) 24 27 $(call if_changed,syshdr) ··· 50 47 $(out)/syscalls_64.h: $(syscall64) $(systbl) 51 48 $(call if_changed,systbl) 52 49 50 + $(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh 51 + $(call if_changed,hypercalls) 52 + 53 + $(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h 54 + 53 55 uapisyshdr-y += unistd_32.h unistd_64.h unistd_x32.h 54 56 syshdr-y += syscalls_32.h 55 57 syshdr-$(CONFIG_X86_64) += unistd_32_ia32.h unistd_64_x32.h 56 58 syshdr-$(CONFIG_X86_64) += syscalls_64.h 59 + syshdr-$(CONFIG_XEN) += xen-hypercalls.h 57 60 58 61 targets += $(uapisyshdr-y) $(syshdr-y) 59 62
+180
arch/x86/xen/apic.c
··· 7 7 #include <xen/xen.h> 8 8 #include <xen/interface/physdev.h> 9 9 #include "xen-ops.h" 10 + #include "smp.h" 10 11 11 12 static unsigned int xen_io_apic_read(unsigned apic, unsigned reg) 12 13 { ··· 29 28 return 0xfd; 30 29 } 31 30 31 + static unsigned long xen_set_apic_id(unsigned int x) 32 + { 33 + WARN_ON(1); 34 + return x; 35 + } 36 + 37 + static unsigned int xen_get_apic_id(unsigned long x) 38 + { 39 + return ((x)>>24) & 0xFFu; 40 + } 41 + 42 + static u32 xen_apic_read(u32 reg) 43 + { 44 + struct xen_platform_op op = { 45 + .cmd = XENPF_get_cpuinfo, 46 + .interface_version = XENPF_INTERFACE_VERSION, 47 + .u.pcpu_info.xen_cpuid = 0, 48 + }; 49 + int ret = 0; 50 + 51 + /* Shouldn't need this as APIC is turned off for PV, and we only 52 + * get called on the bootup processor. But just in case. */ 53 + if (!xen_initial_domain() || smp_processor_id()) 54 + return 0; 55 + 56 + if (reg == APIC_LVR) 57 + return 0x10; 58 + #ifdef CONFIG_X86_32 59 + if (reg == APIC_LDR) 60 + return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); 61 + #endif 62 + if (reg != APIC_ID) 63 + return 0; 64 + 65 + ret = HYPERVISOR_dom0_op(&op); 66 + if (ret) 67 + return 0; 68 + 69 + return op.u.pcpu_info.apic_id << 24; 70 + } 71 + 72 + static void xen_apic_write(u32 reg, u32 val) 73 + { 74 + /* Warn to see if there's any stray references */ 75 + WARN(1,"register: %x, value: %x\n", reg, val); 76 + } 77 + 78 + static u64 xen_apic_icr_read(void) 79 + { 80 + return 0; 81 + } 82 + 83 + static void xen_apic_icr_write(u32 low, u32 id) 84 + { 85 + /* Warn to see if there's any stray references */ 86 + WARN_ON(1); 87 + } 88 + 89 + static u32 xen_safe_apic_wait_icr_idle(void) 90 + { 91 + return 0; 92 + } 93 + 94 + static int xen_apic_probe_pv(void) 95 + { 96 + if (xen_pv_domain()) 97 + return 1; 98 + 99 + return 0; 100 + } 101 + 102 + static int xen_madt_oem_check(char *oem_id, char *oem_table_id) 103 + { 104 + return xen_pv_domain(); 105 + } 106 + 107 + static int xen_id_always_valid(int apicid) 108 + { 109 + return 1; 110 + } 111 + 112 + static int xen_id_always_registered(void) 113 + { 114 + return 1; 115 + } 116 + 117 + static int xen_phys_pkg_id(int initial_apic_id, int index_msb) 118 + { 119 + return initial_apic_id >> index_msb; 120 + } 121 + 122 + #ifdef CONFIG_X86_32 123 + static int xen_x86_32_early_logical_apicid(int cpu) 124 + { 125 + /* Match with APIC_LDR read. Otherwise setup_local_APIC complains. */ 126 + return 1 << cpu; 127 + } 128 + #endif 129 + 130 + static void xen_noop(void) 131 + { 132 + } 133 + 134 + static void xen_silent_inquire(int apicid) 135 + { 136 + } 137 + 138 + static struct apic xen_pv_apic = { 139 + .name = "Xen PV", 140 + .probe = xen_apic_probe_pv, 141 + .acpi_madt_oem_check = xen_madt_oem_check, 142 + .apic_id_valid = xen_id_always_valid, 143 + .apic_id_registered = xen_id_always_registered, 144 + 145 + /* .irq_delivery_mode - used in native_compose_msi_msg only */ 146 + /* .irq_dest_mode - used in native_compose_msi_msg only */ 147 + 148 + .target_cpus = default_target_cpus, 149 + .disable_esr = 0, 150 + /* .dest_logical - default_send_IPI_ use it but we use our own. */ 151 + .check_apicid_used = default_check_apicid_used, /* Used on 32-bit */ 152 + 153 + .vector_allocation_domain = flat_vector_allocation_domain, 154 + .init_apic_ldr = xen_noop, /* setup_local_APIC calls it */ 155 + 156 + .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */ 157 + .setup_apic_routing = NULL, 158 + .cpu_present_to_apicid = default_cpu_present_to_apicid, 159 + .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */ 160 + .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */ 161 + .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */ 162 + 163 + .get_apic_id = xen_get_apic_id, 164 + .set_apic_id = xen_set_apic_id, /* Can be NULL on 32-bit. */ 165 + .apic_id_mask = 0xFF << 24, /* Used by verify_local_APIC. Match with what xen_get_apic_id does. */ 166 + 167 + .cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and, 168 + 169 + #ifdef CONFIG_SMP 170 + .send_IPI_mask = xen_send_IPI_mask, 171 + .send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself, 172 + .send_IPI_allbutself = xen_send_IPI_allbutself, 173 + .send_IPI_all = xen_send_IPI_all, 174 + .send_IPI_self = xen_send_IPI_self, 175 + #endif 176 + /* .wait_for_init_deassert- used by AP bootup - smp_callin which we don't use */ 177 + .inquire_remote_apic = xen_silent_inquire, 178 + 179 + .read = xen_apic_read, 180 + .write = xen_apic_write, 181 + .eoi_write = xen_apic_write, 182 + 183 + .icr_read = xen_apic_icr_read, 184 + .icr_write = xen_apic_icr_write, 185 + .wait_icr_idle = xen_noop, 186 + .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle, 187 + 188 + #ifdef CONFIG_X86_32 189 + /* generic_processor_info and setup_local_APIC. */ 190 + .x86_32_early_logical_apicid = xen_x86_32_early_logical_apicid, 191 + #endif 192 + }; 193 + 194 + static void __init xen_apic_check(void) 195 + { 196 + if (apic == &xen_pv_apic) 197 + return; 198 + 199 + pr_info("Switched APIC routing from %s to %s.\n", apic->name, 200 + xen_pv_apic.name); 201 + apic = &xen_pv_apic; 202 + } 32 203 void __init xen_init_apic(void) 33 204 { 34 205 x86_io_apic_ops.read = xen_io_apic_read; 206 + /* On PV guests the APIC CPUID bit is disabled so none of the 207 + * routines end up executing. */ 208 + if (!xen_initial_domain()) 209 + apic = &xen_pv_apic; 210 + 211 + x86_platform.apic_post_init = xen_apic_check; 35 212 } 213 + apic_driver(xen_pv_apic);
+1 -89
arch/x86/xen/enlighten.c
··· 928 928 { 929 929 } 930 930 931 - #ifdef CONFIG_X86_LOCAL_APIC 932 - static unsigned long xen_set_apic_id(unsigned int x) 933 - { 934 - WARN_ON(1); 935 - return x; 936 - } 937 - static unsigned int xen_get_apic_id(unsigned long x) 938 - { 939 - return ((x)>>24) & 0xFFu; 940 - } 941 - static u32 xen_apic_read(u32 reg) 942 - { 943 - struct xen_platform_op op = { 944 - .cmd = XENPF_get_cpuinfo, 945 - .interface_version = XENPF_INTERFACE_VERSION, 946 - .u.pcpu_info.xen_cpuid = 0, 947 - }; 948 - int ret = 0; 949 - 950 - /* Shouldn't need this as APIC is turned off for PV, and we only 951 - * get called on the bootup processor. But just in case. */ 952 - if (!xen_initial_domain() || smp_processor_id()) 953 - return 0; 954 - 955 - if (reg == APIC_LVR) 956 - return 0x10; 957 - 958 - if (reg != APIC_ID) 959 - return 0; 960 - 961 - ret = HYPERVISOR_dom0_op(&op); 962 - if (ret) 963 - return 0; 964 - 965 - return op.u.pcpu_info.apic_id << 24; 966 - } 967 - 968 - static void xen_apic_write(u32 reg, u32 val) 969 - { 970 - /* Warn to see if there's any stray references */ 971 - WARN_ON(1); 972 - } 973 - 974 - static u64 xen_apic_icr_read(void) 975 - { 976 - return 0; 977 - } 978 - 979 - static void xen_apic_icr_write(u32 low, u32 id) 980 - { 981 - /* Warn to see if there's any stray references */ 982 - WARN_ON(1); 983 - } 984 - 985 - static void xen_apic_wait_icr_idle(void) 986 - { 987 - return; 988 - } 989 - 990 - static u32 xen_safe_apic_wait_icr_idle(void) 991 - { 992 - return 0; 993 - } 994 - 995 - static void set_xen_basic_apic_ops(void) 996 - { 997 - apic->read = xen_apic_read; 998 - apic->write = xen_apic_write; 999 - apic->icr_read = xen_apic_icr_read; 1000 - apic->icr_write = xen_apic_icr_write; 1001 - apic->wait_icr_idle = xen_apic_wait_icr_idle; 1002 - apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; 1003 - apic->set_apic_id = xen_set_apic_id; 1004 - apic->get_apic_id = xen_get_apic_id; 1005 - 1006 - #ifdef CONFIG_SMP 1007 - apic->send_IPI_allbutself = xen_send_IPI_allbutself; 1008 - apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself; 1009 - apic->send_IPI_mask = xen_send_IPI_mask; 1010 - apic->send_IPI_all = xen_send_IPI_all; 1011 - apic->send_IPI_self = xen_send_IPI_self; 1012 - #endif 1013 - } 1014 - 1015 - #endif 1016 - 1017 931 static void xen_clts(void) 1018 932 { 1019 933 struct multicall_space mcs; ··· 1533 1619 /* 1534 1620 * set up the basic apic ops. 1535 1621 */ 1536 - set_xen_basic_apic_ops(); 1622 + xen_init_apic(); 1537 1623 #endif 1538 1624 1539 1625 if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) { ··· 1645 1731 1646 1732 if (HYPERVISOR_dom0_op(&op) == 0) 1647 1733 boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags; 1648 - 1649 - xen_init_apic(); 1650 1734 1651 1735 /* Make sure ACS will be enabled */ 1652 1736 pci_request_acs();
+83 -124
arch/x86/xen/mmu.c
··· 2436 2436 } 2437 2437 #endif 2438 2438 2439 - #ifdef CONFIG_XEN_PVH 2440 - /* 2441 - * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user 2442 - * space creating new guest on pvh dom0 and needing to map domU pages. 2443 - */ 2444 - static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn, 2445 - unsigned int domid) 2446 - { 2447 - int rc, err = 0; 2448 - xen_pfn_t gpfn = lpfn; 2449 - xen_ulong_t idx = fgfn; 2450 - 2451 - struct xen_add_to_physmap_range xatp = { 2452 - .domid = DOMID_SELF, 2453 - .foreign_domid = domid, 2454 - .size = 1, 2455 - .space = XENMAPSPACE_gmfn_foreign, 2456 - }; 2457 - set_xen_guest_handle(xatp.idxs, &idx); 2458 - set_xen_guest_handle(xatp.gpfns, &gpfn); 2459 - set_xen_guest_handle(xatp.errs, &err); 2460 - 2461 - rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 2462 - if (rc < 0) 2463 - return rc; 2464 - return err; 2465 - } 2466 - 2467 - static int xlate_remove_from_p2m(unsigned long spfn, int count) 2468 - { 2469 - struct xen_remove_from_physmap xrp; 2470 - int i, rc; 2471 - 2472 - for (i = 0; i < count; i++) { 2473 - xrp.domid = DOMID_SELF; 2474 - xrp.gpfn = spfn+i; 2475 - rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 2476 - if (rc) 2477 - break; 2478 - } 2479 - return rc; 2480 - } 2481 - 2482 - struct xlate_remap_data { 2483 - unsigned long fgfn; /* foreign domain's gfn */ 2484 - pgprot_t prot; 2485 - domid_t domid; 2486 - int index; 2487 - struct page **pages; 2488 - }; 2489 - 2490 - static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 2491 - void *data) 2492 - { 2493 - int rc; 2494 - struct xlate_remap_data *remap = data; 2495 - unsigned long pfn = page_to_pfn(remap->pages[remap->index++]); 2496 - pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot)); 2497 - 2498 - rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid); 2499 - if (rc) 2500 - return rc; 2501 - native_set_pte(ptep, pteval); 2502 - 2503 - return 0; 2504 - } 2505 - 2506 - static int xlate_remap_gfn_range(struct vm_area_struct *vma, 2507 - unsigned long addr, unsigned long mfn, 2508 - int nr, pgprot_t prot, unsigned domid, 2509 - struct page **pages) 2510 - { 2511 - int err; 2512 - struct xlate_remap_data pvhdata; 2513 - 2514 - BUG_ON(!pages); 2515 - 2516 - pvhdata.fgfn = mfn; 2517 - pvhdata.prot = prot; 2518 - pvhdata.domid = domid; 2519 - pvhdata.index = 0; 2520 - pvhdata.pages = pages; 2521 - err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 2522 - xlate_map_pte_fn, &pvhdata); 2523 - flush_tlb_all(); 2524 - return err; 2525 - } 2526 - #endif 2527 - 2528 2439 #define REMAP_BATCH_SIZE 16 2529 2440 2530 2441 struct remap_data { 2531 - unsigned long mfn; 2442 + xen_pfn_t *mfn; 2443 + bool contiguous; 2532 2444 pgprot_t prot; 2533 2445 struct mmu_update *mmu_update; 2534 2446 }; ··· 2449 2537 unsigned long addr, void *data) 2450 2538 { 2451 2539 struct remap_data *rmd = data; 2452 - pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot)); 2540 + pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot)); 2541 + 2542 + /* If we have a contigious range, just update the mfn itself, 2543 + else update pointer to be "next mfn". */ 2544 + if (rmd->contiguous) 2545 + (*rmd->mfn)++; 2546 + else 2547 + rmd->mfn++; 2453 2548 2454 2549 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; 2455 2550 rmd->mmu_update->val = pte_val_ma(pte); ··· 2465 2546 return 0; 2466 2547 } 2467 2548 2468 - int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2469 - unsigned long addr, 2470 - xen_pfn_t mfn, int nr, 2471 - pgprot_t prot, unsigned domid, 2472 - struct page **pages) 2473 - 2549 + static int do_remap_mfn(struct vm_area_struct *vma, 2550 + unsigned long addr, 2551 + xen_pfn_t *mfn, int nr, 2552 + int *err_ptr, pgprot_t prot, 2553 + unsigned domid, 2554 + struct page **pages) 2474 2555 { 2556 + int err = 0; 2475 2557 struct remap_data rmd; 2476 2558 struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2477 - int batch; 2478 2559 unsigned long range; 2479 - int err = 0; 2560 + int mapped = 0; 2480 2561 2481 2562 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 2482 2563 2483 2564 if (xen_feature(XENFEAT_auto_translated_physmap)) { 2484 2565 #ifdef CONFIG_XEN_PVH 2485 2566 /* We need to update the local page tables and the xen HAP */ 2486 - return xlate_remap_gfn_range(vma, addr, mfn, nr, prot, 2487 - domid, pages); 2567 + return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 2568 + prot, domid, pages); 2488 2569 #else 2489 2570 return -EINVAL; 2490 2571 #endif ··· 2492 2573 2493 2574 rmd.mfn = mfn; 2494 2575 rmd.prot = prot; 2576 + /* We use the err_ptr to indicate if there we are doing a contigious 2577 + * mapping or a discontigious mapping. */ 2578 + rmd.contiguous = !err_ptr; 2495 2579 2496 2580 while (nr) { 2497 - batch = min(REMAP_BATCH_SIZE, nr); 2581 + int index = 0; 2582 + int done = 0; 2583 + int batch = min(REMAP_BATCH_SIZE, nr); 2584 + int batch_left = batch; 2498 2585 range = (unsigned long)batch << PAGE_SHIFT; 2499 2586 2500 2587 rmd.mmu_update = mmu_update; ··· 2509 2584 if (err) 2510 2585 goto out; 2511 2586 2512 - err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid); 2513 - if (err < 0) 2514 - goto out; 2587 + /* We record the error for each page that gives an error, but 2588 + * continue mapping until the whole set is done */ 2589 + do { 2590 + int i; 2591 + 2592 + err = HYPERVISOR_mmu_update(&mmu_update[index], 2593 + batch_left, &done, domid); 2594 + 2595 + /* 2596 + * @err_ptr may be the same buffer as @mfn, so 2597 + * only clear it after each chunk of @mfn is 2598 + * used. 2599 + */ 2600 + if (err_ptr) { 2601 + for (i = index; i < index + done; i++) 2602 + err_ptr[i] = 0; 2603 + } 2604 + if (err < 0) { 2605 + if (!err_ptr) 2606 + goto out; 2607 + err_ptr[i] = err; 2608 + done++; /* Skip failed frame. */ 2609 + } else 2610 + mapped += done; 2611 + batch_left -= done; 2612 + index += done; 2613 + } while (batch_left); 2515 2614 2516 2615 nr -= batch; 2517 2616 addr += range; 2617 + if (err_ptr) 2618 + err_ptr += batch; 2518 2619 } 2519 - 2520 - err = 0; 2521 2620 out: 2522 2621 2523 2622 xen_flush_tlb_all(); 2524 2623 2525 - return err; 2624 + return err < 0 ? err : mapped; 2625 + } 2626 + 2627 + int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2628 + unsigned long addr, 2629 + xen_pfn_t mfn, int nr, 2630 + pgprot_t prot, unsigned domid, 2631 + struct page **pages) 2632 + { 2633 + return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages); 2526 2634 } 2527 2635 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2636 + 2637 + int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 2638 + unsigned long addr, 2639 + xen_pfn_t *mfn, int nr, 2640 + int *err_ptr, pgprot_t prot, 2641 + unsigned domid, struct page **pages) 2642 + { 2643 + /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, 2644 + * and the consequences later is quite hard to detect what the actual 2645 + * cause of "wrong memory was mapped in". 2646 + */ 2647 + BUG_ON(err_ptr == NULL); 2648 + return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages); 2649 + } 2650 + EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 2651 + 2528 2652 2529 2653 /* Returns: 0 success */ 2530 2654 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, ··· 2583 2609 return 0; 2584 2610 2585 2611 #ifdef CONFIG_XEN_PVH 2586 - while (numpgs--) { 2587 - /* 2588 - * The mmu has already cleaned up the process mmu 2589 - * resources at this point (lookup_address will return 2590 - * NULL). 2591 - */ 2592 - unsigned long pfn = page_to_pfn(pages[numpgs]); 2593 - 2594 - xlate_remove_from_p2m(pfn, 1); 2595 - } 2596 - /* 2597 - * We don't need to flush tlbs because as part of 2598 - * xlate_remove_from_p2m, the hypervisor will do tlb flushes 2599 - * after removing the p2m entries from the EPT/NPT 2600 - */ 2601 - return 0; 2612 + return xen_xlate_unmap_gfn_range(vma, numpgs, pages); 2602 2613 #else 2603 2614 return -EINVAL; 2604 2615 #endif
+4 -46
arch/x86/xen/trace.c
··· 1 1 #include <linux/ftrace.h> 2 2 #include <xen/interface/xen.h> 3 + #include <xen/interface/xen-mca.h> 3 4 4 - #define N(x) [__HYPERVISOR_##x] = "("#x")" 5 + #define HYPERCALL(x) [__HYPERVISOR_##x] = "("#x")", 5 6 static const char *xen_hypercall_names[] = { 6 - N(set_trap_table), 7 - N(mmu_update), 8 - N(set_gdt), 9 - N(stack_switch), 10 - N(set_callbacks), 11 - N(fpu_taskswitch), 12 - N(sched_op_compat), 13 - N(dom0_op), 14 - N(set_debugreg), 15 - N(get_debugreg), 16 - N(update_descriptor), 17 - N(memory_op), 18 - N(multicall), 19 - N(update_va_mapping), 20 - N(set_timer_op), 21 - N(event_channel_op_compat), 22 - N(xen_version), 23 - N(console_io), 24 - N(physdev_op_compat), 25 - N(grant_table_op), 26 - N(vm_assist), 27 - N(update_va_mapping_otherdomain), 28 - N(iret), 29 - N(vcpu_op), 30 - N(set_segment_base), 31 - N(mmuext_op), 32 - N(acm_op), 33 - N(nmi_op), 34 - N(sched_op), 35 - N(callback_op), 36 - N(xenoprof_op), 37 - N(event_channel_op), 38 - N(physdev_op), 39 - N(hvm_op), 40 - 41 - /* Architecture-specific hypercall definitions. */ 42 - N(arch_0), 43 - N(arch_1), 44 - N(arch_2), 45 - N(arch_3), 46 - N(arch_4), 47 - N(arch_5), 48 - N(arch_6), 49 - N(arch_7), 7 + #include <asm/xen-hypercalls.h> 50 8 }; 51 - #undef N 9 + #undef HYPERCALL 52 10 53 11 static const char *xen_hypercall_name(unsigned op) 54 12 {
+9 -52
arch/x86/xen/xen-head.S
··· 12 12 13 13 #include <xen/interface/elfnote.h> 14 14 #include <xen/interface/features.h> 15 + #include <xen/interface/xen.h> 16 + #include <xen/interface/xen-mca.h> 15 17 #include <asm/xen/interface.h> 16 18 17 19 #ifdef CONFIG_XEN_PVH ··· 87 85 .pushsection .text 88 86 .balign PAGE_SIZE 89 87 ENTRY(hypercall_page) 90 - #define NEXT_HYPERCALL(x) \ 91 - ENTRY(xen_hypercall_##x) \ 92 - .skip 32 88 + .skip PAGE_SIZE 93 89 94 - NEXT_HYPERCALL(set_trap_table) 95 - NEXT_HYPERCALL(mmu_update) 96 - NEXT_HYPERCALL(set_gdt) 97 - NEXT_HYPERCALL(stack_switch) 98 - NEXT_HYPERCALL(set_callbacks) 99 - NEXT_HYPERCALL(fpu_taskswitch) 100 - NEXT_HYPERCALL(sched_op_compat) 101 - NEXT_HYPERCALL(platform_op) 102 - NEXT_HYPERCALL(set_debugreg) 103 - NEXT_HYPERCALL(get_debugreg) 104 - NEXT_HYPERCALL(update_descriptor) 105 - NEXT_HYPERCALL(ni) 106 - NEXT_HYPERCALL(memory_op) 107 - NEXT_HYPERCALL(multicall) 108 - NEXT_HYPERCALL(update_va_mapping) 109 - NEXT_HYPERCALL(set_timer_op) 110 - NEXT_HYPERCALL(event_channel_op_compat) 111 - NEXT_HYPERCALL(xen_version) 112 - NEXT_HYPERCALL(console_io) 113 - NEXT_HYPERCALL(physdev_op_compat) 114 - NEXT_HYPERCALL(grant_table_op) 115 - NEXT_HYPERCALL(vm_assist) 116 - NEXT_HYPERCALL(update_va_mapping_otherdomain) 117 - NEXT_HYPERCALL(iret) 118 - NEXT_HYPERCALL(vcpu_op) 119 - NEXT_HYPERCALL(set_segment_base) 120 - NEXT_HYPERCALL(mmuext_op) 121 - NEXT_HYPERCALL(xsm_op) 122 - NEXT_HYPERCALL(nmi_op) 123 - NEXT_HYPERCALL(sched_op) 124 - NEXT_HYPERCALL(callback_op) 125 - NEXT_HYPERCALL(xenoprof_op) 126 - NEXT_HYPERCALL(event_channel_op) 127 - NEXT_HYPERCALL(physdev_op) 128 - NEXT_HYPERCALL(hvm_op) 129 - NEXT_HYPERCALL(sysctl) 130 - NEXT_HYPERCALL(domctl) 131 - NEXT_HYPERCALL(kexec_op) 132 - NEXT_HYPERCALL(tmem_op) /* 38 */ 133 - ENTRY(xen_hypercall_rsvr) 134 - .skip 320 135 - NEXT_HYPERCALL(mca) /* 48 */ 136 - NEXT_HYPERCALL(arch_1) 137 - NEXT_HYPERCALL(arch_2) 138 - NEXT_HYPERCALL(arch_3) 139 - NEXT_HYPERCALL(arch_4) 140 - NEXT_HYPERCALL(arch_5) 141 - NEXT_HYPERCALL(arch_6) 142 - .balign PAGE_SIZE 90 + #define HYPERCALL(n) \ 91 + .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ 92 + .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 93 + #include <asm/xen-hypercalls.h> 94 + #undef HYPERCALL 95 + 143 96 .popsection 144 97 145 98 ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux")
+3 -2
drivers/block/xen-blkback/xenbus.c
··· 193 193 return ERR_PTR(-ENOMEM); 194 194 } 195 195 196 - static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, 196 + static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref, 197 197 unsigned int evtchn) 198 198 { 199 199 int err; ··· 202 202 if (blkif->irq) 203 203 return 0; 204 204 205 - err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring); 205 + err = xenbus_map_ring_valloc(blkif->be->dev, &gref, 1, 206 + &blkif->blk_ring); 206 207 if (err < 0) 207 208 return err; 208 209
+3 -2
drivers/block/xen-blkfront.c
··· 1245 1245 struct blkfront_info *info) 1246 1246 { 1247 1247 struct blkif_sring *sring; 1248 + grant_ref_t gref; 1248 1249 int err; 1249 1250 1250 1251 info->ring_ref = GRANT_INVALID_REF; ··· 1258 1257 SHARED_RING_INIT(sring); 1259 1258 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 1260 1259 1261 - err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); 1260 + err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref); 1262 1261 if (err < 0) { 1263 1262 free_page((unsigned long)sring); 1264 1263 info->ring.sring = NULL; 1265 1264 goto fail; 1266 1265 } 1267 - info->ring_ref = err; 1266 + info->ring_ref = gref; 1268 1267 1269 1268 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1270 1269 if (err)
+3 -2
drivers/char/tpm/xen-tpmfront.c
··· 193 193 struct xenbus_transaction xbt; 194 194 const char *message = NULL; 195 195 int rv; 196 + grant_ref_t gref; 196 197 197 198 priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 198 199 if (!priv->shr) { ··· 201 200 return -ENOMEM; 202 201 } 203 202 204 - rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr)); 203 + rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref); 205 204 if (rv < 0) 206 205 return rv; 207 206 208 - priv->ring_ref = rv; 207 + priv->ring_ref = gref; 209 208 210 209 rv = xenbus_alloc_evtchn(dev, &priv->evtchn); 211 210 if (rv)
+2 -2
drivers/net/xen-netback/netback.c
··· 1780 1780 int err = -ENOMEM; 1781 1781 1782 1782 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), 1783 - tx_ring_ref, &addr); 1783 + &tx_ring_ref, 1, &addr); 1784 1784 if (err) 1785 1785 goto err; 1786 1786 ··· 1788 1788 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); 1789 1789 1790 1790 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), 1791 - rx_ring_ref, &addr); 1791 + &rx_ring_ref, 1, &addr); 1792 1792 if (err) 1793 1793 goto err; 1794 1794
+5 -4
drivers/net/xen-netfront.c
··· 1483 1483 { 1484 1484 struct xen_netif_tx_sring *txs; 1485 1485 struct xen_netif_rx_sring *rxs; 1486 + grant_ref_t gref; 1486 1487 int err; 1487 1488 1488 1489 queue->tx_ring_ref = GRANT_INVALID_REF; ··· 1500 1499 SHARED_RING_INIT(txs); 1501 1500 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); 1502 1501 1503 - err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1502 + err = xenbus_grant_ring(dev, txs, 1, &gref); 1504 1503 if (err < 0) 1505 1504 goto grant_tx_ring_fail; 1506 - queue->tx_ring_ref = err; 1505 + queue->tx_ring_ref = gref; 1507 1506 1508 1507 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1509 1508 if (!rxs) { ··· 1514 1513 SHARED_RING_INIT(rxs); 1515 1514 FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); 1516 1515 1517 - err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1516 + err = xenbus_grant_ring(dev, rxs, 1, &gref); 1518 1517 if (err < 0) 1519 1518 goto grant_rx_ring_fail; 1520 - queue->rx_ring_ref = err; 1519 + queue->rx_ring_ref = gref; 1521 1520 1522 1521 if (feature_split_evtchn) 1523 1522 err = setup_netfront_split(queue);
+3 -2
drivers/pci/xen-pcifront.c
··· 777 777 { 778 778 int err = 0; 779 779 struct xenbus_transaction trans; 780 + grant_ref_t gref; 780 781 781 - err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info)); 782 + err = xenbus_grant_ring(pdev->xdev, pdev->sh_info, 1, &gref); 782 783 if (err < 0) 783 784 goto out; 784 785 785 - pdev->gnt_ref = err; 786 + pdev->gnt_ref = gref; 786 787 787 788 err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); 788 789 if (err)
+182 -37
drivers/scsi/xen-scsifront.c
··· 63 63 64 64 #define VSCSIFRONT_OP_ADD_LUN 1 65 65 #define VSCSIFRONT_OP_DEL_LUN 2 66 + #define VSCSIFRONT_OP_READD_LUN 3 66 67 67 68 /* Tuning point. */ 68 69 #define VSCSIIF_DEFAULT_CMD_PER_LUN 10 ··· 114 113 DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS); 115 114 struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS]; 116 115 116 + /* Following items are protected by the host lock. */ 117 117 wait_queue_head_t wq_sync; 118 + wait_queue_head_t wq_pause; 118 119 unsigned int wait_ring_available:1; 120 + unsigned int waiting_pause:1; 121 + unsigned int pause:1; 122 + unsigned callers; 119 123 120 124 char dev_state_path[64]; 121 125 struct task_struct *curr; ··· 280 274 wake_up(&shadow->wq_reset); 281 275 } 282 276 283 - static int scsifront_cmd_done(struct vscsifrnt_info *info) 277 + static void scsifront_do_response(struct vscsifrnt_info *info, 278 + struct vscsiif_response *ring_rsp) 279 + { 280 + if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS || 281 + test_bit(ring_rsp->rqid, info->shadow_free_bitmap), 282 + "illegal rqid %u returned by backend!\n", ring_rsp->rqid)) 283 + return; 284 + 285 + if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB) 286 + scsifront_cdb_cmd_done(info, ring_rsp); 287 + else 288 + scsifront_sync_cmd_done(info, ring_rsp); 289 + } 290 + 291 + static int scsifront_ring_drain(struct vscsifrnt_info *info) 284 292 { 285 293 struct vscsiif_response *ring_rsp; 286 294 RING_IDX i, rp; 287 295 int more_to_do = 0; 288 - unsigned long flags; 289 - 290 - spin_lock_irqsave(info->host->host_lock, flags); 291 296 292 297 rp = info->ring.sring->rsp_prod; 293 298 rmb(); /* ordering required respective to dom0 */ 294 299 for (i = info->ring.rsp_cons; i != rp; i++) { 295 - 296 300 ring_rsp = RING_GET_RESPONSE(&info->ring, i); 297 - 298 - if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS || 299 - test_bit(ring_rsp->rqid, info->shadow_free_bitmap), 300 - "illegal rqid %u returned by backend!\n", 301 - ring_rsp->rqid)) 302 - continue; 303 - 304 - if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB) 305 - scsifront_cdb_cmd_done(info, ring_rsp); 306 - else 307 - scsifront_sync_cmd_done(info, ring_rsp); 301 + scsifront_do_response(info, ring_rsp); 308 302 } 309 303 310 304 info->ring.rsp_cons = i; ··· 313 307 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do); 314 308 else 315 309 info->ring.sring->rsp_event = i + 1; 310 + 311 + return more_to_do; 312 + } 313 + 314 + static int scsifront_cmd_done(struct vscsifrnt_info *info) 315 + { 316 + int more_to_do; 317 + unsigned long flags; 318 + 319 + spin_lock_irqsave(info->host->host_lock, flags); 320 + 321 + more_to_do = scsifront_ring_drain(info); 316 322 317 323 info->wait_ring_available = 0; 318 324 ··· 344 326 cond_resched(); 345 327 346 328 return IRQ_HANDLED; 329 + } 330 + 331 + static void scsifront_finish_all(struct vscsifrnt_info *info) 332 + { 333 + unsigned i; 334 + struct vscsiif_response resp; 335 + 336 + scsifront_ring_drain(info); 337 + 338 + for (i = 0; i < VSCSIIF_MAX_REQS; i++) { 339 + if (test_bit(i, info->shadow_free_bitmap)) 340 + continue; 341 + resp.rqid = i; 342 + resp.sense_len = 0; 343 + resp.rslt = DID_RESET << 16; 344 + resp.residual_len = 0; 345 + scsifront_do_response(info, &resp); 346 + } 347 347 } 348 348 349 349 static int map_data_for_request(struct vscsifrnt_info *info, ··· 511 475 return ring_req; 512 476 } 513 477 478 + static int scsifront_enter(struct vscsifrnt_info *info) 479 + { 480 + if (info->pause) 481 + return 1; 482 + info->callers++; 483 + return 0; 484 + } 485 + 486 + static void scsifront_return(struct vscsifrnt_info *info) 487 + { 488 + info->callers--; 489 + if (info->callers) 490 + return; 491 + 492 + if (!info->waiting_pause) 493 + return; 494 + 495 + info->waiting_pause = 0; 496 + wake_up(&info->wq_pause); 497 + } 498 + 514 499 static int scsifront_queuecommand(struct Scsi_Host *shost, 515 500 struct scsi_cmnd *sc) 516 501 { ··· 543 486 uint16_t rqid; 544 487 545 488 spin_lock_irqsave(shost->host_lock, flags); 489 + if (scsifront_enter(info)) { 490 + spin_unlock_irqrestore(shost->host_lock, flags); 491 + return SCSI_MLQUEUE_HOST_BUSY; 492 + } 546 493 if (RING_FULL(&info->ring)) 547 494 goto busy; 548 495 ··· 566 505 if (err < 0) { 567 506 pr_debug("%s: err %d\n", __func__, err); 568 507 scsifront_put_rqid(info, rqid); 508 + scsifront_return(info); 569 509 spin_unlock_irqrestore(shost->host_lock, flags); 570 510 if (err == -ENOMEM) 571 511 return SCSI_MLQUEUE_HOST_BUSY; ··· 576 514 } 577 515 578 516 scsifront_do_request(info); 517 + scsifront_return(info); 579 518 spin_unlock_irqrestore(shost->host_lock, flags); 580 519 581 520 return 0; 582 521 583 522 busy: 523 + scsifront_return(info); 584 524 spin_unlock_irqrestore(shost->host_lock, flags); 585 525 pr_debug("%s: busy\n", __func__); 586 526 return SCSI_MLQUEUE_HOST_BUSY; ··· 613 549 if (ring_req) 614 550 break; 615 551 } 616 - if (err) { 552 + if (err || info->pause) { 617 553 spin_unlock_irq(host->host_lock); 618 554 kfree(shadow); 619 555 return FAILED; ··· 623 559 err = wait_event_interruptible(info->wq_sync, 624 560 !info->wait_ring_available); 625 561 spin_lock_irq(host->host_lock); 562 + } 563 + 564 + if (scsifront_enter(info)) { 565 + spin_unlock_irq(host->host_lock); 566 + return FAILED; 626 567 } 627 568 628 569 ring_req->act = act; ··· 656 587 err = FAILED; 657 588 } 658 589 590 + scsifront_return(info); 659 591 spin_unlock_irq(host->host_lock); 660 592 return err; 661 593 } ··· 714 644 { 715 645 struct xenbus_device *dev = info->dev; 716 646 struct vscsiif_sring *sring; 647 + grant_ref_t gref; 717 648 int err = -ENOMEM; 718 649 719 650 /***** Frontend to Backend ring start *****/ ··· 727 656 SHARED_RING_INIT(sring); 728 657 FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); 729 658 730 - err = xenbus_grant_ring(dev, virt_to_mfn(sring)); 659 + err = xenbus_grant_ring(dev, sring, 1, &gref); 731 660 if (err < 0) { 732 661 free_page((unsigned long)sring); 733 662 xenbus_dev_fatal(dev, err, 734 663 "fail to grant shared ring (Front to Back)"); 735 664 return err; 736 665 } 737 - info->ring_ref = err; 666 + info->ring_ref = gref; 738 667 739 668 err = xenbus_alloc_evtchn(dev, &info->evtchn); 740 669 if (err) { ··· 767 696 (unsigned long)info->ring.sring); 768 697 769 698 return err; 699 + } 700 + 701 + static void scsifront_free_ring(struct vscsifrnt_info *info) 702 + { 703 + unbind_from_irqhandler(info->irq, info); 704 + gnttab_end_foreign_access(info->ring_ref, 0, 705 + (unsigned long)info->ring.sring); 770 706 } 771 707 772 708 static int scsifront_init_ring(struct vscsifrnt_info *info) ··· 822 744 fail: 823 745 xenbus_transaction_end(xbt, 1); 824 746 free_sring: 825 - unbind_from_irqhandler(info->irq, info); 826 - gnttab_end_foreign_access(info->ring_ref, 0, 827 - (unsigned long)info->ring.sring); 747 + scsifront_free_ring(info); 828 748 829 749 return err; 830 750 } ··· 855 779 } 856 780 857 781 init_waitqueue_head(&info->wq_sync); 782 + init_waitqueue_head(&info->wq_pause); 858 783 spin_lock_init(&info->shadow_lock); 859 784 860 785 snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no); ··· 879 802 return 0; 880 803 881 804 free_sring: 882 - unbind_from_irqhandler(info->irq, info); 883 - gnttab_end_foreign_access(info->ring_ref, 0, 884 - (unsigned long)info->ring.sring); 805 + scsifront_free_ring(info); 885 806 scsi_host_put(host); 807 + return err; 808 + } 809 + 810 + static int scsifront_resume(struct xenbus_device *dev) 811 + { 812 + struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); 813 + struct Scsi_Host *host = info->host; 814 + int err; 815 + 816 + spin_lock_irq(host->host_lock); 817 + 818 + /* Finish all still pending commands. */ 819 + scsifront_finish_all(info); 820 + 821 + spin_unlock_irq(host->host_lock); 822 + 823 + /* Reconnect to dom0. */ 824 + scsifront_free_ring(info); 825 + err = scsifront_init_ring(info); 826 + if (err) { 827 + dev_err(&dev->dev, "fail to resume %d\n", err); 828 + scsi_host_put(host); 829 + return err; 830 + } 831 + 832 + xenbus_switch_state(dev, XenbusStateInitialised); 833 + 834 + return 0; 835 + } 836 + 837 + static int scsifront_suspend(struct xenbus_device *dev) 838 + { 839 + struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev); 840 + struct Scsi_Host *host = info->host; 841 + int err = 0; 842 + 843 + /* No new commands for the backend. */ 844 + spin_lock_irq(host->host_lock); 845 + info->pause = 1; 846 + while (info->callers && !err) { 847 + info->waiting_pause = 1; 848 + info->wait_ring_available = 0; 849 + spin_unlock_irq(host->host_lock); 850 + wake_up(&info->wq_sync); 851 + err = wait_event_interruptible(info->wq_pause, 852 + !info->waiting_pause); 853 + spin_lock_irq(host->host_lock); 854 + } 855 + spin_unlock_irq(host->host_lock); 886 856 return err; 887 857 } 888 858 ··· 947 823 } 948 824 mutex_unlock(&scsifront_mutex); 949 825 950 - gnttab_end_foreign_access(info->ring_ref, 0, 951 - (unsigned long)info->ring.sring); 952 - unbind_from_irqhandler(info->irq, info); 953 - 826 + scsifront_free_ring(info); 954 827 scsi_host_put(info->host); 955 828 956 829 return 0; ··· 1040 919 scsi_device_put(sdev); 1041 920 } 1042 921 break; 922 + case VSCSIFRONT_OP_READD_LUN: 923 + if (device_state == XenbusStateConnected) 924 + xenbus_printf(XBT_NIL, dev->nodename, 925 + info->dev_state_path, 926 + "%d", XenbusStateConnected); 927 + break; 1043 928 default: 1044 929 break; 1045 930 } ··· 1059 932 static void scsifront_read_backend_params(struct xenbus_device *dev, 1060 933 struct vscsifrnt_info *info) 1061 934 { 1062 - unsigned int sg_grant; 935 + unsigned int sg_grant, nr_segs; 1063 936 int ret; 1064 937 struct Scsi_Host *host = info->host; 1065 938 1066 939 ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u", 1067 940 &sg_grant); 1068 - if (ret == 1 && sg_grant) { 1069 - sg_grant = min_t(unsigned int, sg_grant, SG_ALL); 1070 - sg_grant = max_t(unsigned int, sg_grant, VSCSIIF_SG_TABLESIZE); 1071 - host->sg_tablesize = min_t(unsigned int, sg_grant, 941 + if (ret != 1) 942 + sg_grant = 0; 943 + nr_segs = min_t(unsigned int, sg_grant, SG_ALL); 944 + nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE); 945 + nr_segs = min_t(unsigned int, nr_segs, 1072 946 VSCSIIF_SG_TABLESIZE * PAGE_SIZE / 1073 947 sizeof(struct scsiif_request_segment)); 1074 - host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512; 1075 - } 1076 - dev_info(&dev->dev, "using up to %d SG entries\n", host->sg_tablesize); 948 + 949 + if (!info->pause && sg_grant) 950 + dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs); 951 + else if (info->pause && nr_segs < host->sg_tablesize) 952 + dev_warn(&dev->dev, 953 + "SG entries decreased from %d to %u - device may not work properly anymore\n", 954 + host->sg_tablesize, nr_segs); 955 + 956 + host->sg_tablesize = nr_segs; 957 + host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512; 1077 958 } 1078 959 1079 960 static void scsifront_backend_changed(struct xenbus_device *dev, ··· 1100 965 1101 966 case XenbusStateConnected: 1102 967 scsifront_read_backend_params(dev, info); 968 + 969 + if (info->pause) { 970 + scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN); 971 + xenbus_switch_state(dev, XenbusStateConnected); 972 + info->pause = 0; 973 + return; 974 + } 975 + 1103 976 if (xenbus_read_driver_state(dev->nodename) == 1104 977 XenbusStateInitialised) 1105 978 scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN); ··· 1145 1002 .ids = scsifront_ids, 1146 1003 .probe = scsifront_probe, 1147 1004 .remove = scsifront_remove, 1005 + .resume = scsifront_resume, 1006 + .suspend = scsifront_suspend, 1148 1007 .otherend_changed = scsifront_backend_changed, 1149 1008 }; 1150 1009
+6
drivers/xen/Kconfig
··· 270 270 def_bool y 271 271 depends on X86_64 && EFI 272 272 273 + config XEN_AUTO_XLATE 274 + def_bool y 275 + depends on ARM || ARM64 || XEN_PVHVM 276 + help 277 + Support for auto-translated physmap guests. 278 + 273 279 endmenu
+1
drivers/xen/Makefile
··· 37 37 obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o 38 38 obj-$(CONFIG_XEN_EFI) += efi.o 39 39 obj-$(CONFIG_XEN_SCSI_BACKEND) += xen-scsiback.o 40 + obj-$(CONFIG_XEN_AUTO_XLATE) += xlate_mmu.o 40 41 xen-evtchn-y := evtchn.o 41 42 xen-gntdev-y := gntdev.o 42 43 xen-gntalloc-y := gntalloc.o
+19 -8
drivers/xen/mcelog.c
··· 393 393 394 394 static int __init xen_late_init_mcelog(void) 395 395 { 396 - /* Only DOM0 is responsible for MCE logging */ 397 - if (xen_initial_domain()) { 398 - /* register character device /dev/mcelog for xen mcelog */ 399 - if (misc_register(&xen_mce_chrdev_device)) 400 - return -ENODEV; 401 - return bind_virq_for_mce(); 402 - } 396 + int ret; 403 397 404 - return -ENODEV; 398 + /* Only DOM0 is responsible for MCE logging */ 399 + if (!xen_initial_domain()) 400 + return -ENODEV; 401 + 402 + /* register character device /dev/mcelog for xen mcelog */ 403 + ret = misc_register(&xen_mce_chrdev_device); 404 + if (ret) 405 + return ret; 406 + 407 + ret = bind_virq_for_mce(); 408 + if (ret) 409 + goto deregister; 410 + 411 + return 0; 412 + 413 + deregister: 414 + misc_deregister(&xen_mce_chrdev_device); 415 + return ret; 405 416 } 406 417 device_initcall(xen_late_init_mcelog);
+13 -2
drivers/xen/pci.c
··· 19 19 20 20 #include <linux/pci.h> 21 21 #include <linux/acpi.h> 22 + #include <linux/pci-acpi.h> 22 23 #include <xen/xen.h> 23 24 #include <xen/interface/physdev.h> 24 25 #include <xen/interface/xen.h> ··· 68 67 69 68 #ifdef CONFIG_ACPI 70 69 handle = ACPI_HANDLE(&pci_dev->dev); 71 - if (!handle && pci_dev->bus->bridge) 72 - handle = ACPI_HANDLE(pci_dev->bus->bridge); 73 70 #ifdef CONFIG_PCI_IOV 74 71 if (!handle && pci_dev->is_virtfn) 75 72 handle = ACPI_HANDLE(physfn->bus->bridge); 76 73 #endif 74 + if (!handle) { 75 + /* 76 + * This device was not listed in the ACPI name space at 77 + * all. Try to get acpi handle of parent pci bus. 78 + */ 79 + struct pci_bus *pbus; 80 + for (pbus = pci_dev->bus; pbus; pbus = pbus->parent) { 81 + handle = acpi_pci_get_bridge_handle(pbus); 82 + if (handle) 83 + break; 84 + } 85 + } 77 86 if (handle) { 78 87 acpi_status status; 79 88
+28 -16
drivers/xen/pcpu.c
··· 132 132 } 133 133 static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online); 134 134 135 + static struct attribute *pcpu_dev_attrs[] = { 136 + &dev_attr_online.attr, 137 + NULL 138 + }; 139 + 140 + static umode_t pcpu_dev_is_visible(struct kobject *kobj, 141 + struct attribute *attr, int idx) 142 + { 143 + struct device *dev = kobj_to_dev(kobj); 144 + /* 145 + * Xen never offline cpu0 due to several restrictions 146 + * and assumptions. This basically doesn't add a sys control 147 + * to user, one cannot attempt to offline BSP. 148 + */ 149 + return dev->id ? attr->mode : 0; 150 + } 151 + 152 + static const struct attribute_group pcpu_dev_group = { 153 + .attrs = pcpu_dev_attrs, 154 + .is_visible = pcpu_dev_is_visible, 155 + }; 156 + 157 + static const struct attribute_group *pcpu_dev_groups[] = { 158 + &pcpu_dev_group, 159 + NULL 160 + }; 161 + 135 162 static bool xen_pcpu_online(uint32_t flags) 136 163 { 137 164 return !!(flags & XEN_PCPU_FLAGS_ONLINE); ··· 208 181 return; 209 182 210 183 dev = &pcpu->dev; 211 - if (dev->id) 212 - device_remove_file(dev, &dev_attr_online); 213 - 214 184 /* pcpu remove would be implicitly done */ 215 185 device_unregister(dev); 216 186 } ··· 224 200 dev->bus = &xen_pcpu_subsys; 225 201 dev->id = pcpu->cpu_id; 226 202 dev->release = pcpu_release; 203 + dev->groups = pcpu_dev_groups; 227 204 228 205 err = device_register(dev); 229 206 if (err) { 230 207 pcpu_release(dev); 231 208 return err; 232 - } 233 - 234 - /* 235 - * Xen never offline cpu0 due to several restrictions 236 - * and assumptions. This basically doesn't add a sys control 237 - * to user, one cannot attempt to offline BSP. 238 - */ 239 - if (dev->id) { 240 - err = device_create_file(dev, &dev_attr_online); 241 - if (err) { 242 - device_unregister(dev); 243 - return err; 244 - } 245 209 } 246 210 247 211 return 0;
+82 -35
drivers/xen/privcmd.c
··· 159 159 return ret; 160 160 } 161 161 162 + /* 163 + * Similar to traverse_pages, but use each page as a "block" of 164 + * data to be processed as one unit. 165 + */ 166 + static int traverse_pages_block(unsigned nelem, size_t size, 167 + struct list_head *pos, 168 + int (*fn)(void *data, int nr, void *state), 169 + void *state) 170 + { 171 + void *pagedata; 172 + unsigned pageidx; 173 + int ret = 0; 174 + 175 + BUG_ON(size > PAGE_SIZE); 176 + 177 + pageidx = PAGE_SIZE; 178 + 179 + while (nelem) { 180 + int nr = (PAGE_SIZE/size); 181 + struct page *page; 182 + if (nr > nelem) 183 + nr = nelem; 184 + pos = pos->next; 185 + page = list_entry(pos, struct page, lru); 186 + pagedata = page_address(page); 187 + ret = (*fn)(pagedata, nr, state); 188 + if (ret) 189 + break; 190 + nelem -= nr; 191 + } 192 + 193 + return ret; 194 + } 195 + 162 196 struct mmap_mfn_state { 163 197 unsigned long va; 164 198 struct vm_area_struct *vma; ··· 308 274 /* auto translated dom0 note: if domU being created is PV, then mfn is 309 275 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 310 276 */ 311 - static int mmap_batch_fn(void *data, void *state) 277 + static int mmap_batch_fn(void *data, int nr, void *state) 312 278 { 313 279 xen_pfn_t *mfnp = data; 314 280 struct mmap_batch_state *st = state; 315 281 struct vm_area_struct *vma = st->vma; 316 282 struct page **pages = vma->vm_private_data; 317 - struct page *cur_page = NULL; 283 + struct page **cur_pages = NULL; 318 284 int ret; 319 285 320 286 if (xen_feature(XENFEAT_auto_translated_physmap)) 321 - cur_page = pages[st->index++]; 287 + cur_pages = &pages[st->index]; 322 288 323 - ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 324 - st->vma->vm_page_prot, st->domain, 325 - &cur_page); 289 + BUG_ON(nr < 0); 290 + ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, 291 + (int *)mfnp, st->vma->vm_page_prot, 292 + st->domain, cur_pages); 326 293 327 - /* Store error code for second pass. */ 328 - if (st->version == 1) { 329 - if (ret < 0) { 330 - /* 331 - * V1 encodes the error codes in the 32bit top nibble of the 332 - * mfn (with its known limitations vis-a-vis 64 bit callers). 333 - */ 334 - *mfnp |= (ret == -ENOENT) ? 335 - PRIVCMD_MMAPBATCH_PAGED_ERROR : 336 - PRIVCMD_MMAPBATCH_MFN_ERROR; 337 - } 338 - } else { /* st->version == 2 */ 339 - *((int *) mfnp) = ret; 340 - } 341 - 342 - /* And see if it affects the global_error. */ 343 - if (ret < 0) { 294 + /* Adjust the global_error? */ 295 + if (ret != nr) { 344 296 if (ret == -ENOENT) 345 297 st->global_error = -ENOENT; 346 298 else { ··· 335 315 st->global_error = 1; 336 316 } 337 317 } 338 - st->va += PAGE_SIZE; 318 + st->va += PAGE_SIZE * nr; 319 + st->index += nr; 339 320 340 321 return 0; 341 322 } 342 323 343 - static int mmap_return_errors(void *data, void *state) 324 + static int mmap_return_error(int err, struct mmap_batch_state *st) 344 325 { 345 - struct mmap_batch_state *st = state; 326 + int ret; 346 327 347 328 if (st->version == 1) { 348 - xen_pfn_t mfnp = *((xen_pfn_t *) data); 349 - if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR) 350 - return __put_user(mfnp, st->user_mfn++); 351 - else 329 + if (err) { 330 + xen_pfn_t mfn; 331 + 332 + ret = get_user(mfn, st->user_mfn); 333 + if (ret < 0) 334 + return ret; 335 + /* 336 + * V1 encodes the error codes in the 32bit top 337 + * nibble of the mfn (with its known 338 + * limitations vis-a-vis 64 bit callers). 339 + */ 340 + mfn |= (err == -ENOENT) ? 341 + PRIVCMD_MMAPBATCH_PAGED_ERROR : 342 + PRIVCMD_MMAPBATCH_MFN_ERROR; 343 + return __put_user(mfn, st->user_mfn++); 344 + } else 352 345 st->user_mfn++; 353 346 } else { /* st->version == 2 */ 354 - int err = *((int *) data); 355 347 if (err) 356 348 return __put_user(err, st->user_err++); 357 349 else 358 350 st->user_err++; 359 351 } 360 352 353 + return 0; 354 + } 355 + 356 + static int mmap_return_errors(void *data, int nr, void *state) 357 + { 358 + struct mmap_batch_state *st = state; 359 + int *errs = data; 360 + int i; 361 + int ret; 362 + 363 + for (i = 0; i < nr; i++) { 364 + ret = mmap_return_error(errs[i], st); 365 + if (ret < 0) 366 + return ret; 367 + } 361 368 return 0; 362 369 } 363 370 ··· 519 472 state.version = version; 520 473 521 474 /* mmap_batch_fn guarantees ret == 0 */ 522 - BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t), 523 - &pagelist, mmap_batch_fn, &state)); 475 + BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t), 476 + &pagelist, mmap_batch_fn, &state)); 524 477 525 478 up_write(&mm->mmap_sem); 526 479 ··· 528 481 /* Write back errors in second pass. */ 529 482 state.user_mfn = (xen_pfn_t *)m.arr; 530 483 state.user_err = m.err; 531 - ret = traverse_pages(m.num, sizeof(xen_pfn_t), 532 - &pagelist, mmap_return_errors, &state); 484 + ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), 485 + &pagelist, mmap_return_errors, &state); 533 486 } else 534 487 ret = 0; 535 488
+20 -25
drivers/xen/xen-balloon.c
··· 193 193 show_target, store_target); 194 194 195 195 196 - static struct device_attribute *balloon_attrs[] = { 197 - &dev_attr_target_kb, 198 - &dev_attr_target, 199 - &dev_attr_schedule_delay.attr, 200 - &dev_attr_max_schedule_delay.attr, 201 - &dev_attr_retry_count.attr, 202 - &dev_attr_max_retry_count.attr 196 + static struct attribute *balloon_attrs[] = { 197 + &dev_attr_target_kb.attr, 198 + &dev_attr_target.attr, 199 + &dev_attr_schedule_delay.attr.attr, 200 + &dev_attr_max_schedule_delay.attr.attr, 201 + &dev_attr_retry_count.attr.attr, 202 + &dev_attr_max_retry_count.attr.attr, 203 + NULL 204 + }; 205 + 206 + static const struct attribute_group balloon_group = { 207 + .attrs = balloon_attrs 203 208 }; 204 209 205 210 static struct attribute *balloon_info_attrs[] = { ··· 219 214 .attrs = balloon_info_attrs 220 215 }; 221 216 217 + static const struct attribute_group *balloon_groups[] = { 218 + &balloon_group, 219 + &balloon_info_group, 220 + NULL 221 + }; 222 + 222 223 static struct bus_type balloon_subsys = { 223 224 .name = BALLOON_CLASS_NAME, 224 225 .dev_name = BALLOON_CLASS_NAME, ··· 232 221 233 222 static int register_balloon(struct device *dev) 234 223 { 235 - int i, error; 224 + int error; 236 225 237 226 error = subsys_system_register(&balloon_subsys, NULL); 238 227 if (error) ··· 240 229 241 230 dev->id = 0; 242 231 dev->bus = &balloon_subsys; 232 + dev->groups = balloon_groups; 243 233 244 234 error = device_register(dev); 245 235 if (error) { ··· 248 236 return error; 249 237 } 250 238 251 - for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) { 252 - error = device_create_file(dev, balloon_attrs[i]); 253 - if (error) 254 - goto fail; 255 - } 256 - 257 - error = sysfs_create_group(&dev->kobj, &balloon_info_group); 258 - if (error) 259 - goto fail; 260 - 261 239 return 0; 262 - 263 - fail: 264 - while (--i >= 0) 265 - device_remove_file(dev, balloon_attrs[i]); 266 - device_unregister(dev); 267 - bus_unregister(&balloon_subsys); 268 - return error; 269 240 } 270 241 271 242 MODULE_LICENSE("GPL");
+14 -1
drivers/xen/xen-pciback/conf_space_header.c
··· 88 88 printk(KERN_DEBUG DRV_NAME ": %s: set bus master\n", 89 89 pci_name(dev)); 90 90 pci_set_master(dev); 91 + } else if (dev->is_busmaster && !is_master_cmd(value)) { 92 + if (unlikely(verbose_request)) 93 + printk(KERN_DEBUG DRV_NAME ": %s: clear bus master\n", 94 + pci_name(dev)); 95 + pci_clear_master(dev); 91 96 } 92 97 93 - if (value & PCI_COMMAND_INVALIDATE) { 98 + if (!(cmd->val & PCI_COMMAND_INVALIDATE) && 99 + (value & PCI_COMMAND_INVALIDATE)) { 94 100 if (unlikely(verbose_request)) 95 101 printk(KERN_DEBUG 96 102 DRV_NAME ": %s: enable memory-write-invalidate\n", ··· 107 101 pci_name(dev), err); 108 102 value &= ~PCI_COMMAND_INVALIDATE; 109 103 } 104 + } else if ((cmd->val & PCI_COMMAND_INVALIDATE) && 105 + !(value & PCI_COMMAND_INVALIDATE)) { 106 + if (unlikely(verbose_request)) 107 + printk(KERN_DEBUG 108 + DRV_NAME ": %s: disable memory-write-invalidate\n", 109 + pci_name(dev)); 110 + pci_clear_mwi(dev); 110 111 } 111 112 112 113 cmd->val = value;
+2 -2
drivers/xen/xen-pciback/pci_stub.c
··· 118 118 int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix, 119 119 &ppdev); 120 120 121 - if (err) 121 + if (err && err != -ENOSYS) 122 122 dev_warn(&dev->dev, "MSI-X release failed (%d)\n", 123 123 err); 124 124 } ··· 402 402 }; 403 403 404 404 err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev); 405 - if (err) 405 + if (err && err != -ENOSYS) 406 406 dev_err(&dev->dev, "MSI-X preparation failed (%d)\n", 407 407 err); 408 408 }
+1 -1
drivers/xen/xen-pciback/xenbus.c
··· 113 113 "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", 114 114 gnt_ref, remote_evtchn); 115 115 116 - err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr); 116 + err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr); 117 117 if (err < 0) { 118 118 xenbus_dev_fatal(pdev->xdev, err, 119 119 "Error mapping other domain page in ours.");
+51 -45
drivers/xen/xen-scsiback.c
··· 31 31 * IN THE SOFTWARE. 32 32 */ 33 33 34 + #define pr_fmt(fmt) "xen-pvscsi: " fmt 35 + 34 36 #include <stdarg.h> 35 37 36 38 #include <linux/module.h> ··· 70 68 71 69 #include <xen/interface/grant_table.h> 72 70 #include <xen/interface/io/vscsiif.h> 73 - 74 - #define DPRINTK(_f, _a...) \ 75 - pr_debug("(file=%s, line=%d) " _f, __FILE__ , __LINE__ , ## _a) 76 71 77 72 #define VSCSI_VERSION "v0.1" 78 73 #define VSCSI_NAMELEN 32 ··· 270 271 { 271 272 struct scsiback_tpg *tpg = pending_req->v2p->tpg; 272 273 273 - pr_err("xen-pvscsi[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n", 274 + pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n", 274 275 tpg->tport->tport_name, pending_req->v2p->lun, 275 276 pending_req->cmnd[0], status_byte(errors), msg_byte(errors), 276 277 host_byte(errors), driver_byte(errors)); ··· 426 427 BUG_ON(err); 427 428 for (i = 0; i < cnt; i++) { 428 429 if (unlikely(map[i].status != GNTST_okay)) { 429 - pr_err("xen-pvscsi: invalid buffer -- could not remap it\n"); 430 + pr_err("invalid buffer -- could not remap it\n"); 430 431 map[i].handle = SCSIBACK_INVALID_HANDLE; 431 432 err = -ENOMEM; 432 433 } else { ··· 448 449 for (i = 0; i < cnt; i++) { 449 450 if (get_free_page(pg + mapcount)) { 450 451 put_free_pages(pg, mapcount); 451 - pr_err("xen-pvscsi: no grant page\n"); 452 + pr_err("no grant page\n"); 452 453 return -ENOMEM; 453 454 } 454 455 gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]), ··· 491 492 return 0; 492 493 493 494 if (nr_segments > VSCSIIF_SG_TABLESIZE) { 494 - DPRINTK("xen-pvscsi: invalid parameter nr_seg = %d\n", 495 + pr_debug("invalid parameter nr_seg = %d\n", 495 496 ring_req->nr_segments); 496 497 return -EINVAL; 497 498 } ··· 515 516 nr_segments += n_segs; 516 517 } 517 518 if (nr_segments > SG_ALL) { 518 - DPRINTK("xen-pvscsi: invalid nr_seg = %d\n", 519 - nr_segments); 519 + pr_debug("invalid nr_seg = %d\n", nr_segments); 520 520 return -EINVAL; 521 521 } 522 522 } 523 523 524 - /* free of (sgl) in fast_flush_area()*/ 524 + /* free of (sgl) in fast_flush_area() */ 525 525 pending_req->sgl = kmalloc_array(nr_segments, 526 526 sizeof(struct scatterlist), GFP_KERNEL); 527 527 if (!pending_req->sgl) ··· 677 679 v2p = scsiback_do_translation(info, &vir); 678 680 if (!v2p) { 679 681 pending_req->v2p = NULL; 680 - DPRINTK("xen-pvscsi: doesn't exist.\n"); 682 + pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n", 683 + vir.chn, vir.tgt, vir.lun); 681 684 return -ENODEV; 682 685 } 683 686 pending_req->v2p = v2p; ··· 689 690 (pending_req->sc_data_direction != DMA_TO_DEVICE) && 690 691 (pending_req->sc_data_direction != DMA_FROM_DEVICE) && 691 692 (pending_req->sc_data_direction != DMA_NONE)) { 692 - DPRINTK("xen-pvscsi: invalid parameter data_dir = %d\n", 693 + pr_debug("invalid parameter data_dir = %d\n", 693 694 pending_req->sc_data_direction); 694 695 return -EINVAL; 695 696 } 696 697 697 698 pending_req->cmd_len = ring_req->cmd_len; 698 699 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) { 699 - DPRINTK("xen-pvscsi: invalid parameter cmd_len = %d\n", 700 + pr_debug("invalid parameter cmd_len = %d\n", 700 701 pending_req->cmd_len); 701 702 return -EINVAL; 702 703 } ··· 720 721 721 722 if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) { 722 723 rc = ring->rsp_prod_pvt; 723 - pr_warn("xen-pvscsi: Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", 724 + pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n", 724 725 info->domid, rp, rc, rp - rc); 725 726 info->ring_error = 1; 726 727 return 0; ··· 771 772 scsiback_device_action(pending_req, TMR_LUN_RESET, 0); 772 773 break; 773 774 default: 774 - pr_err_ratelimited("xen-pvscsi: invalid request\n"); 775 + pr_err_ratelimited("invalid request\n"); 775 776 scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 776 777 0, pending_req); 777 778 kmem_cache_free(scsiback_cachep, pending_req); ··· 809 810 if (info->irq) 810 811 return -1; 811 812 812 - err = xenbus_map_ring_valloc(info->dev, ring_ref, &area); 813 + err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area); 813 814 if (err) 814 815 return err; 815 816 ··· 873 874 874 875 lunp = strrchr(phy, ':'); 875 876 if (!lunp) { 876 - pr_err("xen-pvscsi: illegal format of physical device %s\n", 877 - phy); 877 + pr_err("illegal format of physical device %s\n", phy); 878 878 return -EINVAL; 879 879 } 880 880 *lunp = 0; 881 881 lunp++; 882 882 if (kstrtouint(lunp, 10, &lun) || lun >= TRANSPORT_MAX_LUNS_PER_TPG) { 883 - pr_err("xen-pvscsi: lun number not valid: %s\n", lunp); 883 + pr_err("lun number not valid: %s\n", lunp); 884 884 return -EINVAL; 885 885 } 886 886 ··· 907 909 mutex_unlock(&scsiback_mutex); 908 910 909 911 if (!tpg) { 910 - pr_err("xen-pvscsi: %s:%d %s\n", phy, lun, error); 912 + pr_err("%s:%d %s\n", phy, lun, error); 911 913 return -ENODEV; 912 914 } 913 915 ··· 924 926 if ((entry->v.chn == v->chn) && 925 927 (entry->v.tgt == v->tgt) && 926 928 (entry->v.lun == v->lun)) { 927 - pr_warn("xen-pvscsi: Virtual ID is already used. Assignment was not performed.\n"); 929 + pr_warn("Virtual ID is already used. Assignment was not performed.\n"); 928 930 err = -EEXIST; 929 931 goto out; 930 932 } ··· 990 992 } 991 993 992 994 static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, 993 - char *phy, struct ids_tuple *vir) 995 + char *phy, struct ids_tuple *vir, int try) 994 996 { 995 997 if (!scsiback_add_translation_entry(info, phy, vir)) { 996 998 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 997 999 "%d", XenbusStateInitialised)) { 998 - pr_err("xen-pvscsi: xenbus_printf error %s\n", state); 1000 + pr_err("xenbus_printf error %s\n", state); 999 1001 scsiback_del_translation_entry(info, vir); 1000 1002 } 1001 - } else { 1003 + } else if (!try) { 1002 1004 xenbus_printf(XBT_NIL, info->dev->nodename, state, 1003 1005 "%d", XenbusStateClosed); 1004 1006 } ··· 1010 1012 if (!scsiback_del_translation_entry(info, vir)) { 1011 1013 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 1012 1014 "%d", XenbusStateClosed)) 1013 - pr_err("xen-pvscsi: xenbus_printf error %s\n", state); 1015 + pr_err("xenbus_printf error %s\n", state); 1014 1016 } 1015 1017 } 1016 1018 ··· 1058 1060 1059 1061 switch (op) { 1060 1062 case VSCSIBACK_OP_ADD_OR_DEL_LUN: 1061 - if (device_state == XenbusStateInitialising) 1062 - scsiback_do_add_lun(info, state, phy, &vir); 1063 - if (device_state == XenbusStateClosing) 1063 + switch (device_state) { 1064 + case XenbusStateInitialising: 1065 + scsiback_do_add_lun(info, state, phy, &vir, 0); 1066 + break; 1067 + case XenbusStateConnected: 1068 + scsiback_do_add_lun(info, state, phy, &vir, 1); 1069 + break; 1070 + case XenbusStateClosing: 1064 1071 scsiback_do_del_lun(info, state, &vir); 1072 + break; 1073 + default: 1074 + break; 1075 + } 1065 1076 break; 1066 1077 1067 1078 case VSCSIBACK_OP_UPDATEDEV_STATE: ··· 1078 1071 /* modify vscsi-devs/dev-x/state */ 1079 1072 if (xenbus_printf(XBT_NIL, dev->nodename, state, 1080 1073 "%d", XenbusStateConnected)) { 1081 - pr_err("xen-pvscsi: xenbus_printf error %s\n", 1082 - str); 1074 + pr_err("xenbus_printf error %s\n", str); 1083 1075 scsiback_del_translation_entry(info, &vir); 1084 1076 xenbus_printf(XBT_NIL, dev->nodename, state, 1085 1077 "%d", XenbusStateClosed); 1086 1078 } 1087 1079 } 1088 1080 break; 1089 - /*When it is necessary, processing is added here.*/ 1081 + /* When it is necessary, processing is added here. */ 1090 1082 default: 1091 1083 break; 1092 1084 } ··· 1202 1196 struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info), 1203 1197 GFP_KERNEL); 1204 1198 1205 - DPRINTK("%p %d\n", dev, dev->otherend_id); 1199 + pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id); 1206 1200 1207 1201 if (!info) { 1208 1202 xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure"); ··· 1233 1227 return 0; 1234 1228 1235 1229 fail: 1236 - pr_warn("xen-pvscsi: %s failed\n", __func__); 1230 + pr_warn("%s failed\n", __func__); 1237 1231 scsiback_remove(dev); 1238 1232 1239 1233 return err; ··· 1438 1432 } 1439 1433 snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]); 1440 1434 1441 - pr_debug("xen-pvscsi: Allocated emulated Target %s Address: %s\n", 1435 + pr_debug("Allocated emulated Target %s Address: %s\n", 1442 1436 scsiback_dump_proto_id(tport), name); 1443 1437 1444 1438 return &tport->tport_wwn; ··· 1449 1443 struct scsiback_tport *tport = container_of(wwn, 1450 1444 struct scsiback_tport, tport_wwn); 1451 1445 1452 - pr_debug("xen-pvscsi: Deallocating emulated Target %s Address: %s\n", 1446 + pr_debug("Deallocating emulated Target %s Address: %s\n", 1453 1447 scsiback_dump_proto_id(tport), tport->tport_name); 1454 1448 1455 1449 kfree(tport); ··· 1476 1470 static int scsiback_check_stop_free(struct se_cmd *se_cmd) 1477 1471 { 1478 1472 /* 1479 - * Do not release struct se_cmd's containing a valid TMR 1480 - * pointer. These will be released directly in scsiback_device_action() 1473 + * Do not release struct se_cmd's containing a valid TMR pointer. 1474 + * These will be released directly in scsiback_device_action() 1481 1475 * with transport_generic_free_cmd(). 1482 1476 */ 1483 1477 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) ··· 1643 1637 return -ENOMEM; 1644 1638 } 1645 1639 /* 1646 - * Initialize the struct se_session pointer 1640 + * Initialize the struct se_session pointer 1647 1641 */ 1648 1642 tv_nexus->tvn_se_sess = transport_init_session(TARGET_PROT_NORMAL); 1649 1643 if (IS_ERR(tv_nexus->tvn_se_sess)) { ··· 1711 1705 return -EBUSY; 1712 1706 } 1713 1707 1714 - pr_debug("xen-pvscsi: Removing I_T Nexus to emulated %s Initiator Port: %s\n", 1708 + pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n", 1715 1709 scsiback_dump_proto_id(tpg->tport), 1716 1710 tv_nexus->tvn_se_sess->se_node_acl->initiatorname); 1717 1711 ··· 1757 1751 unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr; 1758 1752 int ret; 1759 1753 /* 1760 - * Shutdown the active I_T nexus if 'NULL' is passed.. 1754 + * Shutdown the active I_T nexus if 'NULL' is passed. 1761 1755 */ 1762 1756 if (!strncmp(page, "NULL", 4)) { 1763 1757 ret = scsiback_drop_nexus(tpg); ··· 1928 1922 */ 1929 1923 scsiback_drop_nexus(tpg); 1930 1924 /* 1931 - * Deregister the se_tpg from TCM.. 1925 + * Deregister the se_tpg from TCM. 1932 1926 */ 1933 1927 core_tpg_deregister(se_tpg); 1934 1928 kfree(tpg); ··· 1998 1992 struct target_fabric_configfs *fabric; 1999 1993 int ret; 2000 1994 2001 - pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n", 1995 + pr_debug("fabric module %s on %s/%s on "UTS_RELEASE"\n", 2002 1996 VSCSI_VERSION, utsname()->sysname, utsname()->machine); 2003 1997 /* 2004 1998 * Register the top level struct config_item_type with TCM core ··· 2035 2029 * Setup our local pointer to *fabric 2036 2030 */ 2037 2031 scsiback_fabric_configfs = fabric; 2038 - pr_debug("xen-pvscsi: Set fabric -> scsiback_fabric_configfs\n"); 2032 + pr_debug("Set fabric -> scsiback_fabric_configfs\n"); 2039 2033 return 0; 2040 2034 }; 2041 2035 ··· 2046 2040 2047 2041 target_fabric_configfs_deregister(scsiback_fabric_configfs); 2048 2042 scsiback_fabric_configfs = NULL; 2049 - pr_debug("xen-pvscsi: Cleared scsiback_fabric_configfs\n"); 2043 + pr_debug("Cleared scsiback_fabric_configfs\n"); 2050 2044 }; 2051 2045 2052 2046 static const struct xenbus_device_id scsiback_ids[] = { ··· 2097 2091 xenbus_unregister_driver(&scsiback_driver); 2098 2092 out_cache_destroy: 2099 2093 kmem_cache_destroy(scsiback_cachep); 2100 - pr_err("xen-pvscsi: %s: error %d\n", __func__, ret); 2094 + pr_err("%s: error %d\n", __func__, ret); 2101 2095 return ret; 2102 2096 } 2103 2097
+287 -102
drivers/xen/xenbus/xenbus_client.c
··· 52 52 struct xenbus_map_node { 53 53 struct list_head next; 54 54 union { 55 - struct vm_struct *area; /* PV */ 56 - struct page *page; /* HVM */ 55 + struct { 56 + struct vm_struct *area; 57 + } pv; 58 + struct { 59 + struct page *pages[XENBUS_MAX_RING_PAGES]; 60 + void *addr; 61 + } hvm; 57 62 }; 58 - grant_handle_t handle; 63 + grant_handle_t handles[XENBUS_MAX_RING_PAGES]; 64 + unsigned int nr_handles; 59 65 }; 60 66 61 67 static DEFINE_SPINLOCK(xenbus_valloc_lock); 62 68 static LIST_HEAD(xenbus_valloc_pages); 63 69 64 70 struct xenbus_ring_ops { 65 - int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); 71 + int (*map)(struct xenbus_device *dev, 72 + grant_ref_t *gnt_refs, unsigned int nr_grefs, 73 + void **vaddr); 66 74 int (*unmap)(struct xenbus_device *dev, void *vaddr); 67 75 }; 68 76 ··· 363 355 /** 364 356 * xenbus_grant_ring 365 357 * @dev: xenbus device 366 - * @ring_mfn: mfn of ring to grant 367 - 368 - * Grant access to the given @ring_mfn to the peer of the given device. Return 369 - * a grant reference on success, or -errno on error. On error, the device will 370 - * switch to XenbusStateClosing, and the error will be saved in the store. 358 + * @vaddr: starting virtual address of the ring 359 + * @nr_pages: number of pages to be granted 360 + * @grefs: grant reference array to be filled in 361 + * 362 + * Grant access to the given @vaddr to the peer of the given device. 363 + * Then fill in @grefs with grant references. Return 0 on success, or 364 + * -errno on error. On error, the device will switch to 365 + * XenbusStateClosing, and the error will be saved in the store. 371 366 */ 372 - int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) 367 + int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, 368 + unsigned int nr_pages, grant_ref_t *grefs) 373 369 { 374 - int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); 375 - if (err < 0) 376 - xenbus_dev_fatal(dev, err, "granting access to ring page"); 370 + int err; 371 + int i, j; 372 + 373 + for (i = 0; i < nr_pages; i++) { 374 + unsigned long addr = (unsigned long)vaddr + 375 + (PAGE_SIZE * i); 376 + err = gnttab_grant_foreign_access(dev->otherend_id, 377 + virt_to_mfn(addr), 0); 378 + if (err < 0) { 379 + xenbus_dev_fatal(dev, err, 380 + "granting access to ring page"); 381 + goto fail; 382 + } 383 + grefs[i] = err; 384 + } 385 + 386 + return 0; 387 + 388 + fail: 389 + for (j = 0; j < i; j++) 390 + gnttab_end_foreign_access_ref(grefs[j], 0); 377 391 return err; 378 392 } 379 393 EXPORT_SYMBOL_GPL(xenbus_grant_ring); ··· 449 419 /** 450 420 * xenbus_map_ring_valloc 451 421 * @dev: xenbus device 452 - * @gnt_ref: grant reference 422 + * @gnt_refs: grant reference array 423 + * @nr_grefs: number of grant references 453 424 * @vaddr: pointer to address to be filled out by mapping 454 425 * 455 - * Based on Rusty Russell's skeleton driver's map_page. 456 - * Map a page of memory into this domain from another domain's grant table. 457 - * xenbus_map_ring_valloc allocates a page of virtual address space, maps the 458 - * page to that address, and sets *vaddr to that address. 459 - * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 460 - * or -ENOMEM on error. If an error is returned, device will switch to 426 + * Map @nr_grefs pages of memory into this domain from another 427 + * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs 428 + * pages of virtual address space, maps the pages to that address, and 429 + * sets *vaddr to that address. Returns 0 on success, and GNTST_* 430 + * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on 431 + * error. If an error is returned, device will switch to 461 432 * XenbusStateClosing and the error message will be saved in XenStore. 462 433 */ 463 - int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) 434 + int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, 435 + unsigned int nr_grefs, void **vaddr) 464 436 { 465 - return ring_ops->map(dev, gnt_ref, vaddr); 437 + return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); 466 438 } 467 439 EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); 468 440 469 - static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, 470 - int gnt_ref, void **vaddr) 441 + /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned 442 + * long), e.g. 32-on-64. Caller is responsible for preparing the 443 + * right array to feed into this function */ 444 + static int __xenbus_map_ring(struct xenbus_device *dev, 445 + grant_ref_t *gnt_refs, 446 + unsigned int nr_grefs, 447 + grant_handle_t *handles, 448 + phys_addr_t *addrs, 449 + unsigned int flags, 450 + bool *leaked) 471 451 { 472 - struct gnttab_map_grant_ref op = { 473 - .flags = GNTMAP_host_map | GNTMAP_contains_pte, 474 - .ref = gnt_ref, 475 - .dom = dev->otherend_id, 476 - }; 452 + struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES]; 453 + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; 454 + int i, j; 455 + int err = GNTST_okay; 456 + 457 + if (nr_grefs > XENBUS_MAX_RING_PAGES) 458 + return -EINVAL; 459 + 460 + for (i = 0; i < nr_grefs; i++) { 461 + memset(&map[i], 0, sizeof(map[i])); 462 + gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], 463 + dev->otherend_id); 464 + handles[i] = INVALID_GRANT_HANDLE; 465 + } 466 + 467 + gnttab_batch_map(map, i); 468 + 469 + for (i = 0; i < nr_grefs; i++) { 470 + if (map[i].status != GNTST_okay) { 471 + err = map[i].status; 472 + xenbus_dev_fatal(dev, map[i].status, 473 + "mapping in shared page %d from domain %d", 474 + gnt_refs[i], dev->otherend_id); 475 + goto fail; 476 + } else 477 + handles[i] = map[i].handle; 478 + } 479 + 480 + return GNTST_okay; 481 + 482 + fail: 483 + for (i = j = 0; i < nr_grefs; i++) { 484 + if (handles[i] != INVALID_GRANT_HANDLE) { 485 + memset(&unmap[j], 0, sizeof(unmap[j])); 486 + gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], 487 + GNTMAP_host_map, handles[i]); 488 + j++; 489 + } 490 + } 491 + 492 + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) 493 + BUG(); 494 + 495 + *leaked = false; 496 + for (i = 0; i < j; i++) { 497 + if (unmap[i].status != GNTST_okay) { 498 + *leaked = true; 499 + break; 500 + } 501 + } 502 + 503 + return err; 504 + } 505 + 506 + static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, 507 + grant_ref_t *gnt_refs, 508 + unsigned int nr_grefs, 509 + void **vaddr) 510 + { 477 511 struct xenbus_map_node *node; 478 512 struct vm_struct *area; 479 - pte_t *pte; 513 + pte_t *ptes[XENBUS_MAX_RING_PAGES]; 514 + phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; 515 + int err = GNTST_okay; 516 + int i; 517 + bool leaked; 480 518 481 519 *vaddr = NULL; 520 + 521 + if (nr_grefs > XENBUS_MAX_RING_PAGES) 522 + return -EINVAL; 482 523 483 524 node = kzalloc(sizeof(*node), GFP_KERNEL); 484 525 if (!node) 485 526 return -ENOMEM; 486 527 487 - area = alloc_vm_area(PAGE_SIZE, &pte); 528 + area = alloc_vm_area(PAGE_SIZE * nr_grefs, ptes); 488 529 if (!area) { 489 530 kfree(node); 490 531 return -ENOMEM; 491 532 } 492 533 493 - op.host_addr = arbitrary_virt_to_machine(pte).maddr; 534 + for (i = 0; i < nr_grefs; i++) 535 + phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; 494 536 495 - gnttab_batch_map(&op, 1); 537 + err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, 538 + phys_addrs, 539 + GNTMAP_host_map | GNTMAP_contains_pte, 540 + &leaked); 541 + if (err) 542 + goto failed; 496 543 497 - if (op.status != GNTST_okay) { 498 - free_vm_area(area); 499 - kfree(node); 500 - xenbus_dev_fatal(dev, op.status, 501 - "mapping in shared page %d from domain %d", 502 - gnt_ref, dev->otherend_id); 503 - return op.status; 504 - } 505 - 506 - node->handle = op.handle; 507 - node->area = area; 544 + node->nr_handles = nr_grefs; 545 + node->pv.area = area; 508 546 509 547 spin_lock(&xenbus_valloc_lock); 510 548 list_add(&node->next, &xenbus_valloc_pages); ··· 580 482 581 483 *vaddr = area->addr; 582 484 return 0; 485 + 486 + failed: 487 + if (!leaked) 488 + free_vm_area(area); 489 + else 490 + pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); 491 + 492 + kfree(node); 493 + return err; 583 494 } 584 495 585 496 static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, 586 - int gnt_ref, void **vaddr) 497 + grant_ref_t *gnt_ref, 498 + unsigned int nr_grefs, 499 + void **vaddr) 587 500 { 588 501 struct xenbus_map_node *node; 502 + int i; 589 503 int err; 590 504 void *addr; 505 + bool leaked = false; 506 + /* Why do we need two arrays? See comment of __xenbus_map_ring */ 507 + phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; 508 + unsigned long addrs[XENBUS_MAX_RING_PAGES]; 509 + 510 + if (nr_grefs > XENBUS_MAX_RING_PAGES) 511 + return -EINVAL; 591 512 592 513 *vaddr = NULL; 593 514 ··· 614 497 if (!node) 615 498 return -ENOMEM; 616 499 617 - err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); 500 + err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages, 501 + false /* lowmem */); 618 502 if (err) 619 503 goto out_err; 620 504 621 - addr = pfn_to_kaddr(page_to_pfn(node->page)); 505 + for (i = 0; i < nr_grefs; i++) { 506 + unsigned long pfn = page_to_pfn(node->hvm.pages[i]); 507 + phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn); 508 + addrs[i] = (unsigned long)pfn_to_kaddr(pfn); 509 + } 622 510 623 - err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); 511 + err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, 512 + phys_addrs, GNTMAP_host_map, &leaked); 513 + node->nr_handles = nr_grefs; 514 + 624 515 if (err) 625 - goto out_err_free_ballooned_pages; 516 + goto out_free_ballooned_pages; 517 + 518 + addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP, 519 + PAGE_KERNEL); 520 + if (!addr) { 521 + err = -ENOMEM; 522 + goto out_xenbus_unmap_ring; 523 + } 524 + 525 + node->hvm.addr = addr; 626 526 627 527 spin_lock(&xenbus_valloc_lock); 628 528 list_add(&node->next, &xenbus_valloc_pages); ··· 648 514 *vaddr = addr; 649 515 return 0; 650 516 651 - out_err_free_ballooned_pages: 652 - free_xenballooned_pages(1, &node->page); 517 + out_xenbus_unmap_ring: 518 + if (!leaked) 519 + xenbus_unmap_ring(dev, node->handles, node->nr_handles, 520 + addrs); 521 + else 522 + pr_alert("leaking %p size %u page(s)", 523 + addr, nr_grefs); 524 + out_free_ballooned_pages: 525 + if (!leaked) 526 + free_xenballooned_pages(nr_grefs, node->hvm.pages); 653 527 out_err: 654 528 kfree(node); 655 529 return err; ··· 667 525 /** 668 526 * xenbus_map_ring 669 527 * @dev: xenbus device 670 - * @gnt_ref: grant reference 671 - * @handle: pointer to grant handle to be filled 672 - * @vaddr: address to be mapped to 528 + * @gnt_refs: grant reference array 529 + * @nr_grefs: number of grant reference 530 + * @handles: pointer to grant handle to be filled 531 + * @vaddrs: addresses to be mapped to 532 + * @leaked: fail to clean up a failed map, caller should not free vaddr 673 533 * 674 - * Map a page of memory into this domain from another domain's grant table. 534 + * Map pages of memory into this domain from another domain's grant table. 675 535 * xenbus_map_ring does not allocate the virtual address space (you must do 676 - * this yourself!). It only maps in the page to the specified address. 536 + * this yourself!). It only maps in the pages to the specified address. 677 537 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) 678 - * or -ENOMEM on error. If an error is returned, device will switch to 679 - * XenbusStateClosing and the error message will be saved in XenStore. 538 + * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to 539 + * XenbusStateClosing and the first error message will be saved in XenStore. 540 + * Further more if we fail to map the ring, caller should check @leaked. 541 + * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller 542 + * should not free the address space of @vaddr. 680 543 */ 681 - int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, 682 - grant_handle_t *handle, void *vaddr) 544 + int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, 545 + unsigned int nr_grefs, grant_handle_t *handles, 546 + unsigned long *vaddrs, bool *leaked) 683 547 { 684 - struct gnttab_map_grant_ref op; 548 + phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; 549 + int i; 685 550 686 - gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, 687 - dev->otherend_id); 551 + if (nr_grefs > XENBUS_MAX_RING_PAGES) 552 + return -EINVAL; 688 553 689 - gnttab_batch_map(&op, 1); 554 + for (i = 0; i < nr_grefs; i++) 555 + phys_addrs[i] = (unsigned long)vaddrs[i]; 690 556 691 - if (op.status != GNTST_okay) { 692 - xenbus_dev_fatal(dev, op.status, 693 - "mapping in shared page %d from domain %d", 694 - gnt_ref, dev->otherend_id); 695 - } else 696 - *handle = op.handle; 697 - 698 - return op.status; 557 + return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles, 558 + phys_addrs, GNTMAP_host_map, leaked); 699 559 } 700 560 EXPORT_SYMBOL_GPL(xenbus_map_ring); 701 561 ··· 723 579 static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) 724 580 { 725 581 struct xenbus_map_node *node; 726 - struct gnttab_unmap_grant_ref op = { 727 - .host_addr = (unsigned long)vaddr, 728 - }; 582 + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; 729 583 unsigned int level; 584 + int i; 585 + bool leaked = false; 586 + int err; 730 587 731 588 spin_lock(&xenbus_valloc_lock); 732 589 list_for_each_entry(node, &xenbus_valloc_pages, next) { 733 - if (node->area->addr == vaddr) { 590 + if (node->pv.area->addr == vaddr) { 734 591 list_del(&node->next); 735 592 goto found; 736 593 } ··· 746 601 return GNTST_bad_virt_addr; 747 602 } 748 603 749 - op.handle = node->handle; 750 - op.host_addr = arbitrary_virt_to_machine( 751 - lookup_address((unsigned long)vaddr, &level)).maddr; 604 + for (i = 0; i < node->nr_handles; i++) { 605 + unsigned long addr; 752 606 753 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 607 + memset(&unmap[i], 0, sizeof(unmap[i])); 608 + addr = (unsigned long)vaddr + (PAGE_SIZE * i); 609 + unmap[i].host_addr = arbitrary_virt_to_machine( 610 + lookup_address(addr, &level)).maddr; 611 + unmap[i].dev_bus_addr = 0; 612 + unmap[i].handle = node->handles[i]; 613 + } 614 + 615 + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) 754 616 BUG(); 755 617 756 - if (op.status == GNTST_okay) 757 - free_vm_area(node->area); 618 + err = GNTST_okay; 619 + leaked = false; 620 + for (i = 0; i < node->nr_handles; i++) { 621 + if (unmap[i].status != GNTST_okay) { 622 + leaked = true; 623 + xenbus_dev_error(dev, unmap[i].status, 624 + "unmapping page at handle %d error %d", 625 + node->handles[i], unmap[i].status); 626 + err = unmap[i].status; 627 + break; 628 + } 629 + } 630 + 631 + if (!leaked) 632 + free_vm_area(node->pv.area); 758 633 else 759 - xenbus_dev_error(dev, op.status, 760 - "unmapping page at handle %d error %d", 761 - node->handle, op.status); 634 + pr_alert("leaking VM area %p size %u page(s)", 635 + node->pv.area, node->nr_handles); 762 636 763 637 kfree(node); 764 - return op.status; 638 + return err; 765 639 } 766 640 767 641 static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) ··· 788 624 int rv; 789 625 struct xenbus_map_node *node; 790 626 void *addr; 627 + unsigned long addrs[XENBUS_MAX_RING_PAGES]; 628 + int i; 791 629 792 630 spin_lock(&xenbus_valloc_lock); 793 631 list_for_each_entry(node, &xenbus_valloc_pages, next) { 794 - addr = pfn_to_kaddr(page_to_pfn(node->page)); 632 + addr = node->hvm.addr; 795 633 if (addr == vaddr) { 796 634 list_del(&node->next); 797 635 goto found; ··· 809 643 return GNTST_bad_virt_addr; 810 644 } 811 645 812 - rv = xenbus_unmap_ring(dev, node->handle, addr); 646 + for (i = 0; i < node->nr_handles; i++) 647 + addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i])); 813 648 649 + rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 650 + addrs); 814 651 if (!rv) 815 - free_xenballooned_pages(1, &node->page); 652 + vunmap(vaddr); 816 653 else 817 - WARN(1, "Leaking %p\n", vaddr); 654 + WARN(1, "Leaking %p, size %u page(s)\n", vaddr, 655 + node->nr_handles); 818 656 819 657 kfree(node); 820 658 return rv; ··· 827 657 /** 828 658 * xenbus_unmap_ring 829 659 * @dev: xenbus device 830 - * @handle: grant handle 831 - * @vaddr: addr to unmap 660 + * @handles: grant handle array 661 + * @nr_handles: number of handles in the array 662 + * @vaddrs: addresses to unmap 832 663 * 833 - * Unmap a page of memory in this domain that was imported from another domain. 664 + * Unmap memory in this domain that was imported from another domain. 834 665 * Returns 0 on success and returns GNTST_* on error 835 666 * (see xen/include/interface/grant_table.h). 836 667 */ 837 668 int xenbus_unmap_ring(struct xenbus_device *dev, 838 - grant_handle_t handle, void *vaddr) 669 + grant_handle_t *handles, unsigned int nr_handles, 670 + unsigned long *vaddrs) 839 671 { 840 - struct gnttab_unmap_grant_ref op; 672 + struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; 673 + int i; 674 + int err; 841 675 842 - gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); 676 + if (nr_handles > XENBUS_MAX_RING_PAGES) 677 + return -EINVAL; 843 678 844 - if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) 679 + for (i = 0; i < nr_handles; i++) 680 + gnttab_set_unmap_op(&unmap[i], vaddrs[i], 681 + GNTMAP_host_map, handles[i]); 682 + 683 + if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) 845 684 BUG(); 846 685 847 - if (op.status != GNTST_okay) 848 - xenbus_dev_error(dev, op.status, 849 - "unmapping page at handle %d error %d", 850 - handle, op.status); 686 + err = GNTST_okay; 687 + for (i = 0; i < nr_handles; i++) { 688 + if (unmap[i].status != GNTST_okay) { 689 + xenbus_dev_error(dev, unmap[i].status, 690 + "unmapping page at handle %d error %d", 691 + handles[i], unmap[i].status); 692 + err = unmap[i].status; 693 + break; 694 + } 695 + } 851 696 852 - return op.status; 697 + return err; 853 698 } 854 699 EXPORT_SYMBOL_GPL(xenbus_unmap_ring); 855 700
+143
drivers/xen/xlate_mmu.c
··· 1 + /* 2 + * MMU operations common to all auto-translated physmap guests. 3 + * 4 + * Copyright (C) 2015 Citrix Systems R&D Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 2 8 + * as published by the Free Software Foundation; or, when distributed 9 + * separately from the Linux kernel or incorporated into other 10 + * software packages, subject to the following license: 11 + * 12 + * Permission is hereby granted, free of charge, to any person obtaining a copy 13 + * of this source file (the "Software"), to deal in the Software without 14 + * restriction, including without limitation the rights to use, copy, modify, 15 + * merge, publish, distribute, sublicense, and/or sell copies of the Software, 16 + * and to permit persons to whom the Software is furnished to do so, subject to 17 + * the following conditions: 18 + * 19 + * The above copyright notice and this permission notice shall be included in 20 + * all copies or substantial portions of the Software. 21 + * 22 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 28 + * IN THE SOFTWARE. 29 + */ 30 + #include <linux/kernel.h> 31 + #include <linux/mm.h> 32 + 33 + #include <asm/xen/hypercall.h> 34 + #include <asm/xen/hypervisor.h> 35 + 36 + #include <xen/xen.h> 37 + #include <xen/page.h> 38 + #include <xen/interface/xen.h> 39 + #include <xen/interface/memory.h> 40 + 41 + /* map fgmfn of domid to lpfn in the current domain */ 42 + static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 43 + unsigned int domid) 44 + { 45 + int rc; 46 + struct xen_add_to_physmap_range xatp = { 47 + .domid = DOMID_SELF, 48 + .foreign_domid = domid, 49 + .size = 1, 50 + .space = XENMAPSPACE_gmfn_foreign, 51 + }; 52 + xen_ulong_t idx = fgmfn; 53 + xen_pfn_t gpfn = lpfn; 54 + int err = 0; 55 + 56 + set_xen_guest_handle(xatp.idxs, &idx); 57 + set_xen_guest_handle(xatp.gpfns, &gpfn); 58 + set_xen_guest_handle(xatp.errs, &err); 59 + 60 + rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 61 + return rc < 0 ? rc : err; 62 + } 63 + 64 + struct remap_data { 65 + xen_pfn_t *fgmfn; /* foreign domain's gmfn */ 66 + pgprot_t prot; 67 + domid_t domid; 68 + struct vm_area_struct *vma; 69 + int index; 70 + struct page **pages; 71 + struct xen_remap_mfn_info *info; 72 + int *err_ptr; 73 + int mapped; 74 + }; 75 + 76 + static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 77 + void *data) 78 + { 79 + struct remap_data *info = data; 80 + struct page *page = info->pages[info->index++]; 81 + unsigned long pfn = page_to_pfn(page); 82 + pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 83 + int rc; 84 + 85 + rc = map_foreign_page(pfn, *info->fgmfn, info->domid); 86 + *info->err_ptr++ = rc; 87 + if (!rc) { 88 + set_pte_at(info->vma->vm_mm, addr, ptep, pte); 89 + info->mapped++; 90 + } 91 + info->fgmfn++; 92 + 93 + return 0; 94 + } 95 + 96 + int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 97 + unsigned long addr, 98 + xen_pfn_t *mfn, int nr, 99 + int *err_ptr, pgprot_t prot, 100 + unsigned domid, 101 + struct page **pages) 102 + { 103 + int err; 104 + struct remap_data data; 105 + unsigned long range = nr << PAGE_SHIFT; 106 + 107 + /* Kept here for the purpose of making sure code doesn't break 108 + x86 PVOPS */ 109 + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 110 + 111 + data.fgmfn = mfn; 112 + data.prot = prot; 113 + data.domid = domid; 114 + data.vma = vma; 115 + data.pages = pages; 116 + data.index = 0; 117 + data.err_ptr = err_ptr; 118 + data.mapped = 0; 119 + 120 + err = apply_to_page_range(vma->vm_mm, addr, range, 121 + remap_pte_fn, &data); 122 + return err < 0 ? err : data.mapped; 123 + } 124 + EXPORT_SYMBOL_GPL(xen_xlate_remap_gfn_array); 125 + 126 + int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 127 + int nr, struct page **pages) 128 + { 129 + int i; 130 + 131 + for (i = 0; i < nr; i++) { 132 + struct xen_remove_from_physmap xrp; 133 + unsigned long pfn; 134 + 135 + pfn = page_to_pfn(pages[i]); 136 + 137 + xrp.domid = DOMID_SELF; 138 + xrp.gpfn = pfn; 139 + (void)HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 140 + } 141 + return 0; 142 + } 143 + EXPORT_SYMBOL_GPL(xen_xlate_unmap_gfn_range);
+5 -1
include/xen/interface/xen.h
··· 67 67 #define __HYPERVISOR_vcpu_op 24 68 68 #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ 69 69 #define __HYPERVISOR_mmuext_op 26 70 - #define __HYPERVISOR_acm_op 27 70 + #define __HYPERVISOR_xsm_op 27 71 71 #define __HYPERVISOR_nmi_op 28 72 72 #define __HYPERVISOR_sched_op 29 73 73 #define __HYPERVISOR_callback_op 30 ··· 75 75 #define __HYPERVISOR_event_channel_op 32 76 76 #define __HYPERVISOR_physdev_op 33 77 77 #define __HYPERVISOR_hvm_op 34 78 + #define __HYPERVISOR_sysctl 35 79 + #define __HYPERVISOR_domctl 36 80 + #define __HYPERVISOR_kexec_op 37 78 81 #define __HYPERVISOR_tmem_op 38 82 + #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ 79 83 80 84 /* Architecture-specific hypercall definitions. */ 81 85 #define __HYPERVISOR_arch_0 48
+46 -1
include/xen/xen-ops.h
··· 27 27 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); 28 28 29 29 struct vm_area_struct; 30 + 31 + /* 32 + * xen_remap_domain_mfn_array() - map an array of foreign frames 33 + * @vma: VMA to map the pages into 34 + * @addr: Address at which to map the pages 35 + * @gfn: Array of GFNs to map 36 + * @nr: Number entries in the GFN array 37 + * @err_ptr: Returns per-GFN error status. 38 + * @prot: page protection mask 39 + * @domid: Domain owning the pages 40 + * @pages: Array of pages if this domain has an auto-translated physmap 41 + * 42 + * @gfn and @err_ptr may point to the same buffer, the GFNs will be 43 + * overwritten by the error codes after they are mapped. 44 + * 45 + * Returns the number of successfully mapped frames, or a -ve error 46 + * code. 47 + */ 48 + int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 49 + unsigned long addr, 50 + xen_pfn_t *gfn, int nr, 51 + int *err_ptr, pgprot_t prot, 52 + unsigned domid, 53 + struct page **pages); 54 + 55 + /* xen_remap_domain_mfn_range() - map a range of foreign frames 56 + * @vma: VMA to map the pages into 57 + * @addr: Address at which to map the pages 58 + * @gfn: First GFN to map. 59 + * @nr: Number frames to map 60 + * @prot: page protection mask 61 + * @domid: Domain owning the pages 62 + * @pages: Array of pages if this domain has an auto-translated physmap 63 + * 64 + * Returns the number of successfully mapped frames, or a -ve error 65 + * code. 66 + */ 30 67 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 31 68 unsigned long addr, 32 - xen_pfn_t mfn, int nr, 69 + xen_pfn_t gfn, int nr, 33 70 pgprot_t prot, unsigned domid, 34 71 struct page **pages); 35 72 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 36 73 int numpgs, struct page **pages); 74 + int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 75 + unsigned long addr, 76 + xen_pfn_t *gfn, int nr, 77 + int *err_ptr, pgprot_t prot, 78 + unsigned domid, 79 + struct page **pages); 80 + int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 81 + int nr, struct page **pages); 37 82 38 83 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); 39 84
+14 -6
include/xen/xenbus.h
··· 46 46 #include <xen/interface/io/xenbus.h> 47 47 #include <xen/interface/io/xs_wire.h> 48 48 49 + #define XENBUS_MAX_RING_PAGE_ORDER 4 50 + #define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER) 51 + #define INVALID_GRANT_HANDLE (~0U) 52 + 49 53 /* Register callback to watch this node. */ 50 54 struct xenbus_watch 51 55 { ··· 203 199 const char *pathfmt, ...); 204 200 205 201 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); 206 - int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); 207 - int xenbus_map_ring_valloc(struct xenbus_device *dev, 208 - int gnt_ref, void **vaddr); 209 - int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, 210 - grant_handle_t *handle, void *vaddr); 202 + int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, 203 + unsigned int nr_pages, grant_ref_t *grefs); 204 + int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, 205 + unsigned int nr_grefs, void **vaddr); 206 + int xenbus_map_ring(struct xenbus_device *dev, 207 + grant_ref_t *gnt_refs, unsigned int nr_grefs, 208 + grant_handle_t *handles, unsigned long *vaddrs, 209 + bool *leaked); 211 210 212 211 int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); 213 212 int xenbus_unmap_ring(struct xenbus_device *dev, 214 - grant_handle_t handle, void *vaddr); 213 + grant_handle_t *handles, unsigned int nr_handles, 214 + unsigned long *vaddrs); 215 215 216 216 int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); 217 217 int xenbus_free_evtchn(struct xenbus_device *dev, int port);
+12
scripts/xen-hypercalls.sh
··· 1 + #!/bin/sh 2 + out="$1" 3 + shift 4 + in="$@" 5 + 6 + for i in $in; do 7 + eval $CPP $LINUXINCLUDE -dD -imacros "$i" -x c /dev/null 8 + done | \ 9 + awk '$1 == "#define" && $2 ~ /__HYPERVISOR_[a-z][a-z_0-9]*/ { v[$3] = $2 } 10 + END { print "/* auto-generated by scripts/xen-hypercall.sh */" 11 + for (i in v) if (!(v[i] in v)) 12 + print "HYPERCALL("substr(v[i], 14)")"}' | sort -u >$out