Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'stable/for-linus-3.8-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull Xen updates from Konrad Rzeszutek Wilk:
- Add necessary infrastructure to make balloon driver work under ARM.
- Add /dev/xen/privcmd interfaces to work with ARM and PVH.
- Improve Xen PCIBack wild-card parsing.
- Add Xen ACPI PAD (Processor Aggregator) support - so can offline/
online sockets depending on the power consumption.
- PVHVM + kexec = use an E820_RESV region for the shared region so we
don't overwrite said region during kexec reboot.
- Cleanups, compile fixes.

Fix up some trivial conflicts due to the balloon driver now working on
ARM, and there were changes next to the previous work-arounds that are
now gone.

* tag 'stable/for-linus-3.8-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
xen/PVonHVM: fix compile warning in init_hvm_pv_info
xen: arm: implement remap interfaces needed for privcmd mappings.
xen: correctly use xen_pfn_t in remap_domain_mfn_range.
xen: arm: enable balloon driver
xen: balloon: allow PVMMU interfaces to be compiled out
xen: privcmd: support autotranslated physmap guests.
xen: add pages parameter to xen_remap_domain_mfn_range
xen/acpi: Move the xen_running_on_version_or_later function.
xen/xenbus: Remove duplicate inclusion of asm/xen/hypervisor.h
xen/acpi: Fix compile error by missing decleration for xen_domain.
xen/acpi: revert pad config check in xen_check_mwait
xen/acpi: ACPI PAD driver
xen-pciback: reject out of range inputs
xen-pciback: simplify and tighten parsing of device IDs
xen PVonHVM: use E820_Reserved area for shared_info

+594 -119
+1
arch/arm/include/asm/xen/interface.h
··· 49 49 DEFINE_GUEST_HANDLE(uint64_t); 50 50 DEFINE_GUEST_HANDLE(uint32_t); 51 51 DEFINE_GUEST_HANDLE(xen_pfn_t); 52 + DEFINE_GUEST_HANDLE(xen_ulong_t); 52 53 53 54 /* Maximum number of virtual CPUs in multi-processor guests. */ 54 55 #define MAX_VIRT_CPUS 1
+102 -21
arch/arm/xen/enlighten.c
··· 8 8 #include <xen/features.h> 9 9 #include <xen/platform_pci.h> 10 10 #include <xen/xenbus.h> 11 + #include <xen/page.h> 12 + #include <xen/xen-ops.h> 11 13 #include <asm/xen/hypervisor.h> 12 14 #include <asm/xen/hypercall.h> 13 15 #include <linux/interrupt.h> ··· 18 16 #include <linux/of.h> 19 17 #include <linux/of_irq.h> 20 18 #include <linux/of_address.h> 19 + 20 + #include <linux/mm.h> 21 21 22 22 struct start_info _xen_start_info; 23 23 struct start_info *xen_start_info = &_xen_start_info; ··· 33 29 34 30 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 35 31 32 + /* These are unused until we support booting "pre-ballooned" */ 33 + unsigned long xen_released_pages; 34 + struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 35 + 36 36 /* TODO: to be removed */ 37 37 __read_mostly int xen_have_vector_callback; 38 38 EXPORT_SYMBOL_GPL(xen_have_vector_callback); ··· 46 38 47 39 static __read_mostly int xen_events_irq = -1; 48 40 41 + /* map fgmfn of domid to lpfn in the current domain */ 42 + static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 43 + unsigned int domid) 44 + { 45 + int rc; 46 + struct xen_add_to_physmap_range xatp = { 47 + .domid = DOMID_SELF, 48 + .foreign_domid = domid, 49 + .size = 1, 50 + .space = XENMAPSPACE_gmfn_foreign, 51 + }; 52 + xen_ulong_t idx = fgmfn; 53 + xen_pfn_t gpfn = lpfn; 54 + 55 + set_xen_guest_handle(xatp.idxs, &idx); 56 + set_xen_guest_handle(xatp.gpfns, &gpfn); 57 + 58 + rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 59 + if (rc) { 60 + pr_warn("Failed to map pfn to mfn rc:%d pfn:%lx mfn:%lx\n", 61 + rc, lpfn, fgmfn); 62 + return 1; 63 + } 64 + return 0; 65 + } 66 + 67 + struct remap_data { 68 + xen_pfn_t fgmfn; /* foreign domain's gmfn */ 69 + pgprot_t prot; 70 + domid_t domid; 71 + struct vm_area_struct *vma; 72 + int index; 73 + struct page **pages; 74 + struct xen_remap_mfn_info *info; 75 + }; 76 + 77 + static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 78 + void *data) 79 + { 80 + struct remap_data *info = data; 81 + struct page *page = info->pages[info->index++]; 82 + unsigned long pfn = page_to_pfn(page); 83 + pte_t pte = pfn_pte(pfn, info->prot); 84 + 85 + if (map_foreign_page(pfn, info->fgmfn, info->domid)) 86 + return -EFAULT; 87 + set_pte_at(info->vma->vm_mm, addr, ptep, pte); 88 + 89 + return 0; 90 + } 91 + 49 92 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 50 93 unsigned long addr, 51 - unsigned long mfn, int nr, 52 - pgprot_t prot, unsigned domid) 94 + xen_pfn_t mfn, int nr, 95 + pgprot_t prot, unsigned domid, 96 + struct page **pages) 53 97 { 54 - return -ENOSYS; 98 + int err; 99 + struct remap_data data; 100 + 101 + /* TBD: Batching, current sole caller only does page at a time */ 102 + if (nr > 1) 103 + return -EINVAL; 104 + 105 + data.fgmfn = mfn; 106 + data.prot = prot; 107 + data.domid = domid; 108 + data.vma = vma; 109 + data.index = 0; 110 + data.pages = pages; 111 + err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 112 + remap_pte_fn, &data); 113 + return err; 55 114 } 56 115 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 116 + 117 + int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 118 + int nr, struct page **pages) 119 + { 120 + int i; 121 + 122 + for (i = 0; i < nr; i++) { 123 + struct xen_remove_from_physmap xrp; 124 + unsigned long rc, pfn; 125 + 126 + pfn = page_to_pfn(pages[i]); 127 + 128 + xrp.domid = DOMID_SELF; 129 + xrp.gpfn = pfn; 130 + rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 131 + if (rc) { 132 + pr_warn("Failed to unmap pfn:%lx rc:%ld\n", 133 + pfn, rc); 134 + return rc; 135 + } 136 + } 137 + return 0; 138 + } 139 + EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 57 140 58 141 /* 59 142 * see Documentation/devicetree/bindings/arm/xen.txt for the ··· 247 148 return 0; 248 149 } 249 150 postcore_initcall(xen_init_events); 250 - 251 - /* XXX: only until balloon is properly working */ 252 - int alloc_xenballooned_pages(int nr_pages, struct page **pages, bool highmem) 253 - { 254 - *pages = alloc_pages(highmem ? GFP_HIGHUSER : GFP_KERNEL, 255 - get_order(nr_pages)); 256 - if (*pages == NULL) 257 - return -ENOMEM; 258 - return 0; 259 - } 260 - EXPORT_SYMBOL_GPL(alloc_xenballooned_pages); 261 - 262 - void free_xenballooned_pages(int nr_pages, struct page **pages) 263 - { 264 - kfree(*pages); 265 - *pages = NULL; 266 - } 267 - EXPORT_SYMBOL_GPL(free_xenballooned_pages); 268 151 269 152 /* In the hypervisor.S file. */ 270 153 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
+1
arch/x86/include/asm/xen/interface.h
··· 63 63 DEFINE_GUEST_HANDLE(uint64_t); 64 64 DEFINE_GUEST_HANDLE(uint32_t); 65 65 DEFINE_GUEST_HANDLE(xen_pfn_t); 66 + DEFINE_GUEST_HANDLE(xen_ulong_t); 66 67 #endif 67 68 68 69 #ifndef HYPERVISOR_VIRT_START
+1
arch/x86/xen/Kconfig
··· 6 6 bool "Xen guest support" 7 7 select PARAVIRT 8 8 select PARAVIRT_CLOCK 9 + select XEN_HAVE_PVMMU 9 10 depends on X86_64 || (X86_32 && X86_PAE && !X86_VISWS) 10 11 depends on X86_TSC 11 12 help
+80 -28
arch/x86/xen/enlighten.c
··· 223 223 version >> 16, version & 0xffff, extra.extraversion, 224 224 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 225 225 } 226 + /* Check if running on Xen version (major, minor) or later */ 227 + bool 228 + xen_running_on_version_or_later(unsigned int major, unsigned int minor) 229 + { 230 + unsigned int version; 231 + 232 + if (!xen_domain()) 233 + return false; 234 + 235 + version = HYPERVISOR_xen_version(XENVER_version, NULL); 236 + if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) || 237 + ((version >> 16) > major)) 238 + return true; 239 + return false; 240 + } 226 241 227 242 #define CPUID_THERM_POWER_LEAF 6 228 243 #define APERFMPERF_PRESENT 0 ··· 302 287 303 288 static bool __init xen_check_mwait(void) 304 289 { 305 - #if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \ 306 - !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) 290 + #ifdef CONFIG_ACPI 307 291 struct xen_platform_op op = { 308 292 .cmd = XENPF_set_processor_pminfo, 309 293 .u.set_pminfo.id = -1, ··· 321 307 * from the hardware and hypercall. 322 308 */ 323 309 if (!xen_initial_domain()) 310 + return false; 311 + 312 + /* 313 + * When running under platform earlier than Xen4.2, do not expose 314 + * mwait, to avoid the risk of loading native acpi pad driver 315 + */ 316 + if (!xen_running_on_version_or_later(4, 2)) 324 317 return false; 325 318 326 319 ax = 1; ··· 1516 1495 #endif 1517 1496 } 1518 1497 1519 - void __ref xen_hvm_init_shared_info(void) 1520 - { 1521 - int cpu; 1522 - struct xen_add_to_physmap xatp; 1523 - static struct shared_info *shared_info_page = 0; 1498 + #ifdef CONFIG_XEN_PVHVM 1499 + #define HVM_SHARED_INFO_ADDR 0xFE700000UL 1500 + static struct shared_info *xen_hvm_shared_info; 1501 + static unsigned long xen_hvm_sip_phys; 1502 + static int xen_major, xen_minor; 1524 1503 1525 - if (!shared_info_page) 1526 - shared_info_page = (struct shared_info *) 1527 - extend_brk(PAGE_SIZE, PAGE_SIZE); 1504 + static void xen_hvm_connect_shared_info(unsigned long pfn) 1505 + { 1506 + struct xen_add_to_physmap xatp; 1507 + 1528 1508 xatp.domid = DOMID_SELF; 1529 1509 xatp.idx = 0; 1530 1510 xatp.space = XENMAPSPACE_shared_info; 1531 - xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 1511 + xatp.gpfn = pfn; 1532 1512 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 1533 1513 BUG(); 1534 1514 1535 - HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 1515 + } 1516 + static void __init xen_hvm_set_shared_info(struct shared_info *sip) 1517 + { 1518 + int cpu; 1519 + 1520 + HYPERVISOR_shared_info = sip; 1536 1521 1537 1522 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 1538 1523 * page, we use it in the event channel upcall and in some pvclock 1539 1524 * related functions. We don't need the vcpu_info placement 1540 1525 * optimizations because we don't use any pv_mmu or pv_irq op on 1541 - * HVM. 1542 - * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is 1543 - * online but xen_hvm_init_shared_info is run at resume time too and 1544 - * in that case multiple vcpus might be online. */ 1545 - for_each_online_cpu(cpu) { 1526 + * HVM. */ 1527 + for_each_online_cpu(cpu) 1546 1528 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; 1547 - } 1548 1529 } 1549 1530 1550 - #ifdef CONFIG_XEN_PVHVM 1531 + /* Reconnect the shared_info pfn to a (new) mfn */ 1532 + void xen_hvm_resume_shared_info(void) 1533 + { 1534 + xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); 1535 + } 1536 + 1537 + /* Xen tools prior to Xen 4 do not provide a E820_Reserved area for guest usage. 1538 + * On these old tools the shared info page will be placed in E820_Ram. 1539 + * Xen 4 provides a E820_Reserved area at 0xFC000000, and this code expects 1540 + * that nothing is mapped up to HVM_SHARED_INFO_ADDR. 1541 + * Xen 4.3+ provides an explicit 1MB area at HVM_SHARED_INFO_ADDR which is used 1542 + * here for the shared info page. */ 1543 + static void __init xen_hvm_init_shared_info(void) 1544 + { 1545 + if (xen_major < 4) { 1546 + xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); 1547 + xen_hvm_sip_phys = __pa(xen_hvm_shared_info); 1548 + } else { 1549 + xen_hvm_sip_phys = HVM_SHARED_INFO_ADDR; 1550 + set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_hvm_sip_phys); 1551 + xen_hvm_shared_info = 1552 + (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); 1553 + } 1554 + xen_hvm_connect_shared_info(xen_hvm_sip_phys >> PAGE_SHIFT); 1555 + xen_hvm_set_shared_info(xen_hvm_shared_info); 1556 + } 1557 + 1551 1558 static void __init init_hvm_pv_info(void) 1552 1559 { 1553 - int major, minor; 1554 - uint32_t eax, ebx, ecx, edx, pages, msr, base; 1560 + uint32_t ecx, edx, pages, msr, base; 1555 1561 u64 pfn; 1556 1562 1557 1563 base = xen_cpuid_base(); 1558 - cpuid(base + 1, &eax, &ebx, &ecx, &edx); 1559 - 1560 - major = eax >> 16; 1561 - minor = eax & 0xffff; 1562 - printk(KERN_INFO "Xen version %d.%d.\n", major, minor); 1563 - 1564 1564 cpuid(base + 2, &pages, &msr, &ecx, &edx); 1565 1565 1566 1566 pfn = __pa(hypercall_page); ··· 1632 1590 1633 1591 static bool __init xen_hvm_platform(void) 1634 1592 { 1593 + uint32_t eax, ebx, ecx, edx, base; 1594 + 1635 1595 if (xen_pv_domain()) 1636 1596 return false; 1637 1597 1638 - if (!xen_cpuid_base()) 1598 + base = xen_cpuid_base(); 1599 + if (!base) 1639 1600 return false; 1601 + 1602 + cpuid(base + 1, &eax, &ebx, &ecx, &edx); 1603 + 1604 + xen_major = eax >> 16; 1605 + xen_minor = eax & 0xffff; 1606 + 1607 + printk(KERN_INFO "Xen version %d.%d.\n", xen_major, xen_minor); 1640 1608 1641 1609 return true; 1642 1610 }
+15 -2
arch/x86/xen/mmu.c
··· 2497 2497 2498 2498 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2499 2499 unsigned long addr, 2500 - unsigned long mfn, int nr, 2501 - pgprot_t prot, unsigned domid) 2500 + xen_pfn_t mfn, int nr, 2501 + pgprot_t prot, unsigned domid, 2502 + struct page **pages) 2503 + 2502 2504 { 2503 2505 struct remap_data rmd; 2504 2506 struct mmu_update mmu_update[REMAP_BATCH_SIZE]; ··· 2544 2542 return err; 2545 2543 } 2546 2544 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2545 + 2546 + /* Returns: 0 success */ 2547 + int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 2548 + int numpgs, struct page **pages) 2549 + { 2550 + if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) 2551 + return 0; 2552 + 2553 + return -EINVAL; 2554 + } 2555 + EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+1 -1
arch/x86/xen/suspend.c
··· 30 30 { 31 31 #ifdef CONFIG_XEN_PVHVM 32 32 int cpu; 33 - xen_hvm_init_shared_info(); 33 + xen_hvm_resume_shared_info(); 34 34 xen_callback_vector(); 35 35 xen_unplug_emulated_devices(); 36 36 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
+1 -1
arch/x86/xen/xen-ops.h
··· 40 40 void xen_vcpu_restore(void); 41 41 42 42 void xen_callback_vector(void); 43 - void xen_hvm_init_shared_info(void); 43 + void xen_hvm_resume_shared_info(void); 44 44 void xen_unplug_emulated_devices(void); 45 45 46 46 void __init xen_build_dynamic_phys_to_machine(void);
+3
drivers/xen/Kconfig
··· 206 206 Allow kernel fetching MCE error from Xen platform and 207 207 converting it into Linux mcelog format for mcelog tools 208 208 209 + config XEN_HAVE_PVMMU 210 + bool 211 + 209 212 endmenu
+4 -3
drivers/xen/Makefile
··· 1 1 ifneq ($(CONFIG_ARM),y) 2 - obj-y += manage.o balloon.o 2 + obj-y += manage.o 3 3 obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 4 4 endif 5 5 obj-$(CONFIG_X86) += fallback.o 6 - obj-y += grant-table.o features.o events.o 6 + obj-y += grant-table.o features.o events.o balloon.o 7 7 obj-y += xenbus/ 8 8 9 9 nostackp := $(call cc-option, -fno-stack-protector) ··· 11 11 12 12 dom0-$(CONFIG_PCI) += pci.o 13 13 dom0-$(CONFIG_USB_SUPPORT) += dbgp.o 14 - dom0-$(CONFIG_ACPI) += acpi.o 14 + dom0-$(CONFIG_ACPI) += acpi.o $(xen-pad-y) 15 + xen-pad-$(CONFIG_X86) += xen-acpi-pad.o 15 16 dom0-$(CONFIG_X86) += pcpu.o 16 17 obj-$(CONFIG_XEN_DOM0) += $(dom0-y) 17 18 obj-$(CONFIG_BLOCK) += biomerge.o
+4 -1
drivers/xen/balloon.c
··· 359 359 360 360 set_phys_to_machine(pfn, frame_list[i]); 361 361 362 + #ifdef CONFIG_XEN_HAVE_PVMMU 362 363 /* Link back into the page tables if not highmem. */ 363 364 if (xen_pv_domain() && !PageHighMem(page)) { 364 365 int ret; ··· 369 368 0); 370 369 BUG_ON(ret); 371 370 } 371 + #endif 372 372 373 373 /* Relinquish the page back to the allocator. */ 374 374 ClearPageReserved(page); ··· 418 416 419 417 scrub_page(page); 420 418 419 + #ifdef CONFIG_XEN_HAVE_PVMMU 421 420 if (xen_pv_domain() && !PageHighMem(page)) { 422 421 ret = HYPERVISOR_update_va_mapping( 423 422 (unsigned long)__va(pfn << PAGE_SHIFT), 424 423 __pte_ma(0), 0); 425 424 BUG_ON(ret); 426 425 } 427 - 426 + #endif 428 427 } 429 428 430 429 /* Ensure that ballooned highmem pages don't have kmaps. */
+69 -3
drivers/xen/privcmd.c
··· 33 33 #include <xen/features.h> 34 34 #include <xen/page.h> 35 35 #include <xen/xen-ops.h> 36 + #include <xen/balloon.h> 36 37 37 38 #include "privcmd.h" 38 39 39 40 MODULE_LICENSE("GPL"); 41 + 42 + #define PRIV_VMA_LOCKED ((void *)1) 40 43 41 44 #ifndef HAVE_ARCH_PRIVCMD_MMAP 42 45 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); ··· 181 178 msg->va & PAGE_MASK, 182 179 msg->mfn, msg->npages, 183 180 vma->vm_page_prot, 184 - st->domain); 181 + st->domain, NULL); 185 182 if (rc < 0) 186 183 return rc; 187 184 ··· 201 198 202 199 if (!xen_initial_domain()) 203 200 return -EPERM; 201 + 202 + /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 203 + if (xen_feature(XENFEAT_auto_translated_physmap)) 204 + return -ENOSYS; 204 205 205 206 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) 206 207 return -EFAULT; ··· 253 246 domid_t domain; 254 247 unsigned long va; 255 248 struct vm_area_struct *vma; 249 + int index; 256 250 /* A tristate: 257 251 * 0 for no errors 258 252 * 1 if at least one error has happened (and no ··· 268 260 xen_pfn_t __user *user_mfn; 269 261 }; 270 262 263 + /* auto translated dom0 note: if domU being created is PV, then mfn is 264 + * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 265 + */ 271 266 static int mmap_batch_fn(void *data, void *state) 272 267 { 273 268 xen_pfn_t *mfnp = data; 274 269 struct mmap_batch_state *st = state; 270 + struct vm_area_struct *vma = st->vma; 271 + struct page **pages = vma->vm_private_data; 272 + struct page *cur_page = NULL; 275 273 int ret; 276 274 275 + if (xen_feature(XENFEAT_auto_translated_physmap)) 276 + cur_page = pages[st->index++]; 277 + 277 278 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 278 - st->vma->vm_page_prot, st->domain); 279 + st->vma->vm_page_prot, st->domain, 280 + &cur_page); 279 281 280 282 /* Store error code for second pass. */ 281 283 *(st->err++) = ret; ··· 319 301 PRIVCMD_MMAPBATCH_PAGED_ERROR : 320 302 PRIVCMD_MMAPBATCH_MFN_ERROR; 321 303 return __put_user(*mfnp, st->user_mfn++); 304 + } 305 + 306 + /* Allocate pfns that are then mapped with gmfns from foreign domid. Update 307 + * the vma with the page info to use later. 308 + * Returns: 0 if success, otherwise -errno 309 + */ 310 + static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) 311 + { 312 + int rc; 313 + struct page **pages; 314 + 315 + pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); 316 + if (pages == NULL) 317 + return -ENOMEM; 318 + 319 + rc = alloc_xenballooned_pages(numpgs, pages, 0); 320 + if (rc != 0) { 321 + pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, 322 + numpgs, rc); 323 + kfree(pages); 324 + return -ENOMEM; 325 + } 326 + BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED); 327 + vma->vm_private_data = pages; 328 + 329 + return 0; 322 330 } 323 331 324 332 static struct vm_operations_struct privcmd_vm_ops; ··· 414 370 ret = -EINVAL; 415 371 goto out; 416 372 } 373 + if (xen_feature(XENFEAT_auto_translated_physmap)) { 374 + ret = alloc_empty_pages(vma, m.num); 375 + if (ret < 0) { 376 + up_write(&mm->mmap_sem); 377 + goto out; 378 + } 379 + } 417 380 418 381 state.domain = m.dom; 419 382 state.vma = vma; 420 383 state.va = m.addr; 384 + state.index = 0; 421 385 state.global_error = 0; 422 386 state.err = err_array; 423 387 ··· 494 442 return ret; 495 443 } 496 444 445 + static void privcmd_close(struct vm_area_struct *vma) 446 + { 447 + struct page **pages = vma->vm_private_data; 448 + int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 449 + 450 + if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) 451 + return; 452 + 453 + xen_unmap_domain_mfn_range(vma, numpgs, pages); 454 + free_xenballooned_pages(numpgs, pages); 455 + kfree(pages); 456 + } 457 + 497 458 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 498 459 { 499 460 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", ··· 517 452 } 518 453 519 454 static struct vm_operations_struct privcmd_vm_ops = { 455 + .close = privcmd_close, 520 456 .fault = privcmd_fault 521 457 }; 522 458 ··· 535 469 536 470 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) 537 471 { 538 - return (xchg(&vma->vm_private_data, (void *)1) == NULL); 472 + return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED); 539 473 } 540 474 541 475 const struct file_operations xen_privcmd_fops = {
+182
drivers/xen/xen-acpi-pad.c
··· 1 + /* 2 + * xen-acpi-pad.c - Xen pad interface 3 + * 4 + * Copyright (c) 2012, Intel Corporation. 5 + * Author: Liu, Jinsong <jinsong.liu@intel.com> 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + */ 16 + 17 + #include <linux/kernel.h> 18 + #include <linux/types.h> 19 + #include <acpi/acpi_bus.h> 20 + #include <acpi/acpi_drivers.h> 21 + #include <asm/xen/hypercall.h> 22 + #include <xen/interface/version.h> 23 + #include <xen/xen-ops.h> 24 + 25 + #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" 26 + #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" 27 + #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 28 + static DEFINE_MUTEX(xen_cpu_lock); 29 + 30 + static int xen_acpi_pad_idle_cpus(unsigned int idle_nums) 31 + { 32 + struct xen_platform_op op; 33 + 34 + op.cmd = XENPF_core_parking; 35 + op.u.core_parking.type = XEN_CORE_PARKING_SET; 36 + op.u.core_parking.idle_nums = idle_nums; 37 + 38 + return HYPERVISOR_dom0_op(&op); 39 + } 40 + 41 + static int xen_acpi_pad_idle_cpus_num(void) 42 + { 43 + struct xen_platform_op op; 44 + 45 + op.cmd = XENPF_core_parking; 46 + op.u.core_parking.type = XEN_CORE_PARKING_GET; 47 + 48 + return HYPERVISOR_dom0_op(&op) 49 + ?: op.u.core_parking.idle_nums; 50 + } 51 + 52 + /* 53 + * Query firmware how many CPUs should be idle 54 + * return -1 on failure 55 + */ 56 + static int acpi_pad_pur(acpi_handle handle) 57 + { 58 + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; 59 + union acpi_object *package; 60 + int num = -1; 61 + 62 + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) 63 + return num; 64 + 65 + if (!buffer.length || !buffer.pointer) 66 + return num; 67 + 68 + package = buffer.pointer; 69 + 70 + if (package->type == ACPI_TYPE_PACKAGE && 71 + package->package.count == 2 && 72 + package->package.elements[0].integer.value == 1) /* rev 1 */ 73 + num = package->package.elements[1].integer.value; 74 + 75 + kfree(buffer.pointer); 76 + return num; 77 + } 78 + 79 + /* Notify firmware how many CPUs are idle */ 80 + static void acpi_pad_ost(acpi_handle handle, int stat, 81 + uint32_t idle_nums) 82 + { 83 + union acpi_object params[3] = { 84 + {.type = ACPI_TYPE_INTEGER,}, 85 + {.type = ACPI_TYPE_INTEGER,}, 86 + {.type = ACPI_TYPE_BUFFER,}, 87 + }; 88 + struct acpi_object_list arg_list = {3, params}; 89 + 90 + params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; 91 + params[1].integer.value = stat; 92 + params[2].buffer.length = 4; 93 + params[2].buffer.pointer = (void *)&idle_nums; 94 + acpi_evaluate_object(handle, "_OST", &arg_list, NULL); 95 + } 96 + 97 + static void acpi_pad_handle_notify(acpi_handle handle) 98 + { 99 + int idle_nums; 100 + 101 + mutex_lock(&xen_cpu_lock); 102 + idle_nums = acpi_pad_pur(handle); 103 + if (idle_nums < 0) { 104 + mutex_unlock(&xen_cpu_lock); 105 + return; 106 + } 107 + 108 + idle_nums = xen_acpi_pad_idle_cpus(idle_nums) 109 + ?: xen_acpi_pad_idle_cpus_num(); 110 + if (idle_nums >= 0) 111 + acpi_pad_ost(handle, 0, idle_nums); 112 + mutex_unlock(&xen_cpu_lock); 113 + } 114 + 115 + static void acpi_pad_notify(acpi_handle handle, u32 event, 116 + void *data) 117 + { 118 + switch (event) { 119 + case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: 120 + acpi_pad_handle_notify(handle); 121 + break; 122 + default: 123 + pr_warn("Unsupported event [0x%x]\n", event); 124 + break; 125 + } 126 + } 127 + 128 + static int acpi_pad_add(struct acpi_device *device) 129 + { 130 + acpi_status status; 131 + 132 + strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); 133 + strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); 134 + 135 + status = acpi_install_notify_handler(device->handle, 136 + ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); 137 + if (ACPI_FAILURE(status)) 138 + return -ENODEV; 139 + 140 + return 0; 141 + } 142 + 143 + static int acpi_pad_remove(struct acpi_device *device, 144 + int type) 145 + { 146 + mutex_lock(&xen_cpu_lock); 147 + xen_acpi_pad_idle_cpus(0); 148 + mutex_unlock(&xen_cpu_lock); 149 + 150 + acpi_remove_notify_handler(device->handle, 151 + ACPI_DEVICE_NOTIFY, acpi_pad_notify); 152 + return 0; 153 + } 154 + 155 + static const struct acpi_device_id pad_device_ids[] = { 156 + {"ACPI000C", 0}, 157 + {"", 0}, 158 + }; 159 + 160 + static struct acpi_driver acpi_pad_driver = { 161 + .name = "processor_aggregator", 162 + .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, 163 + .ids = pad_device_ids, 164 + .ops = { 165 + .add = acpi_pad_add, 166 + .remove = acpi_pad_remove, 167 + }, 168 + }; 169 + 170 + static int __init xen_acpi_pad_init(void) 171 + { 172 + /* Only DOM0 is responsible for Xen acpi pad */ 173 + if (!xen_initial_domain()) 174 + return -ENODEV; 175 + 176 + /* Only Xen4.2 or later support Xen acpi pad */ 177 + if (!xen_running_on_version_or_later(4, 2)) 178 + return -ENODEV; 179 + 180 + return acpi_bus_register_driver(&acpi_pad_driver); 181 + } 182 + subsys_initcall(xen_acpi_pad_init);
+64 -54
drivers/xen/xen-pciback/pci_stub.c
··· 142 142 if (psdev->dev != NULL 143 143 && domain == pci_domain_nr(psdev->dev->bus) 144 144 && bus == psdev->dev->bus->number 145 - && PCI_DEVFN(slot, func) == psdev->dev->devfn) { 145 + && slot == PCI_SLOT(psdev->dev->devfn) 146 + && func == PCI_FUNC(psdev->dev->devfn)) { 146 147 pcistub_device_get(psdev); 147 148 goto out; 148 149 } ··· 192 191 if (psdev->dev != NULL 193 192 && domain == pci_domain_nr(psdev->dev->bus) 194 193 && bus == psdev->dev->bus->number 195 - && PCI_DEVFN(slot, func) == psdev->dev->devfn) { 194 + && slot == PCI_SLOT(psdev->dev->devfn) 195 + && func == PCI_FUNC(psdev->dev->devfn)) { 196 196 found_dev = pcistub_device_get_pci_dev(pdev, psdev); 197 197 break; 198 198 } ··· 899 897 static inline int str_to_slot(const char *buf, int *domain, int *bus, 900 898 int *slot, int *func) 901 899 { 902 - int err; 903 - char wc = '*'; 900 + int parsed = 0; 904 901 905 - err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func); 906 - switch (err) { 902 + switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func, 903 + &parsed)) { 907 904 case 3: 908 905 *func = -1; 909 - err = sscanf(buf, " %x:%x:%x.%c", domain, bus, slot, &wc); 906 + sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed); 910 907 break; 911 908 case 2: 912 909 *slot = *func = -1; 913 - err = sscanf(buf, " %x:%x:*.%c", domain, bus, &wc); 914 - if (err >= 2) 915 - ++err; 910 + sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed); 916 911 break; 917 912 } 918 - if (err == 4 && wc == '*') 913 + if (parsed && !buf[parsed]) 919 914 return 0; 920 - else if (err < 0) 921 - return -EINVAL; 922 915 923 916 /* try again without domain */ 924 917 *domain = 0; 925 - wc = '*'; 926 - err = sscanf(buf, " %x:%x.%x", bus, slot, func); 927 - switch (err) { 918 + switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) { 928 919 case 2: 929 920 *func = -1; 930 - err = sscanf(buf, " %x:%x.%c", bus, slot, &wc); 921 + sscanf(buf, " %x:%x.* %n", bus, slot, &parsed); 931 922 break; 932 923 case 1: 933 924 *slot = *func = -1; 934 - err = sscanf(buf, " %x:*.%c", bus, &wc) + 1; 925 + sscanf(buf, " %x:*.* %n", bus, &parsed); 935 926 break; 936 927 } 937 - if (err == 3 && wc == '*') 928 + if (parsed && !buf[parsed]) 938 929 return 0; 939 930 940 931 return -EINVAL; ··· 936 941 static inline int str_to_quirk(const char *buf, int *domain, int *bus, int 937 942 *slot, int *func, int *reg, int *size, int *mask) 938 943 { 939 - int err; 944 + int parsed = 0; 940 945 941 - err = 942 - sscanf(buf, " %04x:%02x:%02x.%d-%08x:%1x:%08x", domain, bus, slot, 943 - func, reg, size, mask); 944 - if (err == 7) 946 + sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func, 947 + reg, size, mask, &parsed); 948 + if (parsed && !buf[parsed]) 945 949 return 0; 950 + 951 + /* try again without domain */ 952 + *domain = 0; 953 + sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size, 954 + mask, &parsed); 955 + if (parsed && !buf[parsed]) 956 + return 0; 957 + 946 958 return -EINVAL; 947 959 } 948 960 ··· 957 955 { 958 956 struct pcistub_device_id *pci_dev_id; 959 957 unsigned long flags; 960 - int rc = 0; 958 + int rc = 0, devfn = PCI_DEVFN(slot, func); 961 959 962 960 if (slot < 0) { 963 961 for (slot = 0; !rc && slot < 32; ++slot) ··· 971 969 return rc; 972 970 } 973 971 972 + if (( 973 + #if !defined(MODULE) /* pci_domains_supported is not being exported */ \ 974 + || !defined(CONFIG_PCI_DOMAINS) 975 + !pci_domains_supported ? domain : 976 + #endif 977 + domain < 0 || domain > 0xffff) 978 + || bus < 0 || bus > 0xff 979 + || PCI_SLOT(devfn) != slot 980 + || PCI_FUNC(devfn) != func) 981 + return -EINVAL; 982 + 974 983 pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL); 975 984 if (!pci_dev_id) 976 985 return -ENOMEM; 977 986 978 987 pci_dev_id->domain = domain; 979 988 pci_dev_id->bus = bus; 980 - pci_dev_id->devfn = PCI_DEVFN(slot, func); 989 + pci_dev_id->devfn = devfn; 981 990 982 991 pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n", 983 992 domain, bus, slot, func); ··· 1029 1016 return err; 1030 1017 } 1031 1018 1032 - static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg, 1033 - int size, int mask) 1019 + static int pcistub_reg_add(int domain, int bus, int slot, int func, 1020 + unsigned int reg, unsigned int size, 1021 + unsigned int mask) 1034 1022 { 1035 1023 int err = 0; 1036 1024 struct pcistub_device *psdev; 1037 1025 struct pci_dev *dev; 1038 1026 struct config_field *field; 1027 + 1028 + if (reg > 0xfff || (size < 4 && (mask >> (size * 8)))) 1029 + return -EINVAL; 1039 1030 1040 1031 psdev = pcistub_device_find(domain, bus, slot, func); 1041 1032 if (!psdev) { ··· 1271 1254 int err; 1272 1255 struct pcistub_device *psdev; 1273 1256 struct xen_pcibk_dev_data *dev_data; 1257 + 1274 1258 err = str_to_slot(buf, &domain, &bus, &slot, &func); 1275 1259 if (err) 1276 1260 goto out; 1277 - if (slot < 0 || func < 0) { 1278 - err = -EINVAL; 1279 - goto out; 1280 - } 1261 + 1281 1262 psdev = pcistub_device_find(domain, bus, slot, func); 1282 1263 if (!psdev) { 1283 1264 err = -ENODEV; ··· 1354 1339 1355 1340 if (pci_devs_to_hide && *pci_devs_to_hide) { 1356 1341 do { 1357 - char wc = '*'; 1358 - 1359 1342 parsed = 0; 1360 1343 1361 1344 err = sscanf(pci_devs_to_hide + pos, ··· 1362 1349 switch (err) { 1363 1350 case 3: 1364 1351 func = -1; 1365 - err = sscanf(pci_devs_to_hide + pos, 1366 - " (%x:%x:%x.%c) %n", 1367 - &domain, &bus, &slot, &wc, 1368 - &parsed); 1352 + sscanf(pci_devs_to_hide + pos, 1353 + " (%x:%x:%x.*) %n", 1354 + &domain, &bus, &slot, &parsed); 1369 1355 break; 1370 1356 case 2: 1371 1357 slot = func = -1; 1372 - err = sscanf(pci_devs_to_hide + pos, 1373 - " (%x:%x:*.%c) %n", 1374 - &domain, &bus, &wc, &parsed) + 1; 1358 + sscanf(pci_devs_to_hide + pos, 1359 + " (%x:%x:*.*) %n", 1360 + &domain, &bus, &parsed); 1375 1361 break; 1376 1362 } 1377 1363 1378 - if (err != 4 || wc != '*') { 1364 + if (!parsed) { 1379 1365 domain = 0; 1380 - wc = '*'; 1381 1366 err = sscanf(pci_devs_to_hide + pos, 1382 1367 " (%x:%x.%x) %n", 1383 1368 &bus, &slot, &func, &parsed); 1384 1369 switch (err) { 1385 1370 case 2: 1386 1371 func = -1; 1387 - err = sscanf(pci_devs_to_hide + pos, 1388 - " (%x:%x.%c) %n", 1389 - &bus, &slot, &wc, 1390 - &parsed); 1372 + sscanf(pci_devs_to_hide + pos, 1373 + " (%x:%x.*) %n", 1374 + &bus, &slot, &parsed); 1391 1375 break; 1392 1376 case 1: 1393 1377 slot = func = -1; 1394 - err = sscanf(pci_devs_to_hide + pos, 1395 - " (%x:*.%c) %n", 1396 - &bus, &wc, &parsed) + 1; 1378 + sscanf(pci_devs_to_hide + pos, 1379 + " (%x:*.*) %n", 1380 + &bus, &parsed); 1397 1381 break; 1398 1382 } 1399 - if (err != 3 || wc != '*') 1400 - goto parse_error; 1401 1383 } 1384 + 1385 + if (parsed <= 0) 1386 + goto parse_error; 1402 1387 1403 1388 err = pcistub_device_id_add(domain, bus, slot, func); 1404 1389 if (err) 1405 1390 goto out; 1406 1391 1407 - /* if parsed<=0, we've reached the end of the string */ 1408 1392 pos += parsed; 1409 - } while (parsed > 0 && pci_devs_to_hide[pos]); 1393 + } while (pci_devs_to_hide[pos]); 1410 1394 } 1411 1395 1412 1396 /* If we're the first PCI Device Driver to register, we're the
-1
drivers/xen/xenbus/xenbus_xs.c
··· 48 48 #include <xen/xenbus.h> 49 49 #include <xen/xen.h> 50 50 #include "xenbus_comms.h" 51 - #include <asm/xen/hypervisor.h> 52 51 53 52 struct xs_stored_msg { 54 53 struct list_head list;
+42 -2
include/xen/interface/memory.h
··· 153 153 }; 154 154 DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t); 155 155 156 + #define XENMAPSPACE_shared_info 0 /* shared info page */ 157 + #define XENMAPSPACE_grant_table 1 /* grant table page */ 158 + #define XENMAPSPACE_gmfn 2 /* GMFN */ 159 + #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */ 160 + #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom, 161 + * XENMEM_add_to_physmap_range only. 162 + */ 163 + 156 164 /* 157 165 * Sets the GPFN at which a particular page appears in the specified guest's 158 166 * pseudophysical address space. ··· 175 167 uint16_t size; 176 168 177 169 /* Source mapping space. */ 178 - #define XENMAPSPACE_shared_info 0 /* shared info page */ 179 - #define XENMAPSPACE_grant_table 1 /* grant table page */ 180 170 unsigned int space; 181 171 182 172 /* Index into source mapping space. */ ··· 187 181 188 182 /*** REMOVED ***/ 189 183 /*#define XENMEM_translate_gpfn_list 8*/ 184 + 185 + #define XENMEM_add_to_physmap_range 23 186 + struct xen_add_to_physmap_range { 187 + /* Which domain to change the mapping for. */ 188 + domid_t domid; 189 + uint16_t space; /* => enum phys_map_space */ 190 + 191 + /* Number of pages to go through */ 192 + uint16_t size; 193 + domid_t foreign_domid; /* IFF gmfn_foreign */ 194 + 195 + /* Indexes into space being mapped. */ 196 + GUEST_HANDLE(xen_ulong_t) idxs; 197 + 198 + /* GPFN in domid where the source mapping page should appear. */ 199 + GUEST_HANDLE(xen_pfn_t) gpfns; 200 + }; 201 + DEFINE_GUEST_HANDLE_STRUCT(xen_add_to_physmap_range); 190 202 191 203 /* 192 204 * Returns the pseudo-physical memory map as it was when the domain ··· 241 217 * during a driver critical region. 242 218 */ 243 219 extern spinlock_t xen_reservation_lock; 220 + 221 + /* 222 + * Unmaps the page appearing at a particular GPFN from the specified guest's 223 + * pseudophysical address space. 224 + * arg == addr of xen_remove_from_physmap_t. 225 + */ 226 + #define XENMEM_remove_from_physmap 15 227 + struct xen_remove_from_physmap { 228 + /* Which domain to change the mapping for. */ 229 + domid_t domid; 230 + 231 + /* GPFN of the current mapping of the page. */ 232 + xen_pfn_t gpfn; 233 + }; 234 + DEFINE_GUEST_HANDLE_STRUCT(xen_remove_from_physmap); 235 + 244 236 #endif /* __XEN_PUBLIC_MEMORY_H__ */
+17
include/xen/interface/platform.h
··· 324 324 }; 325 325 DEFINE_GUEST_HANDLE_STRUCT(xenpf_cpu_ol); 326 326 327 + /* 328 + * CMD 58 and 59 are reserved for cpu hotadd and memory hotadd, 329 + * which are already occupied at Xen hypervisor side. 330 + */ 331 + #define XENPF_core_parking 60 332 + struct xenpf_core_parking { 333 + /* IN variables */ 334 + #define XEN_CORE_PARKING_SET 1 335 + #define XEN_CORE_PARKING_GET 2 336 + uint32_t type; 337 + /* IN variables: set cpu nums expected to be idled */ 338 + /* OUT variables: get cpu nums actually be idled */ 339 + uint32_t idle_nums; 340 + }; 341 + DEFINE_GUEST_HANDLE_STRUCT(xenpf_core_parking); 342 + 327 343 struct xen_platform_op { 328 344 uint32_t cmd; 329 345 uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ ··· 357 341 struct xenpf_set_processor_pminfo set_pminfo; 358 342 struct xenpf_pcpuinfo pcpu_info; 359 343 struct xenpf_cpu_ol cpu_ol; 344 + struct xenpf_core_parking core_parking; 360 345 uint8_t pad[128]; 361 346 } u; 362 347 };
+7 -2
include/xen/xen-ops.h
··· 2 2 #define INCLUDE_XEN_OPS_H 3 3 4 4 #include <linux/percpu.h> 5 + #include <asm/xen/interface.h> 5 6 6 7 DECLARE_PER_CPU(struct vcpu_info *, xen_vcpu); 7 8 ··· 27 26 struct vm_area_struct; 28 27 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 29 28 unsigned long addr, 30 - unsigned long mfn, int nr, 31 - pgprot_t prot, unsigned domid); 29 + xen_pfn_t mfn, int nr, 30 + pgprot_t prot, unsigned domid, 31 + struct page **pages); 32 + int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 33 + int numpgs, struct page **pages); 32 34 35 + bool xen_running_on_version_or_later(unsigned int major, unsigned int minor); 33 36 #endif /* INCLUDE_XEN_OPS_H */