Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

x86/xen: Move pv specific parts of arch/x86/xen/mmu.c to mmu_pv.c

There are some PV specific functions in arch/x86/xen/mmu.c which can be
moved to mmu_pv.c. This in turn enables to build multicalls.c dependent
on CONFIG_XEN_PV.

Signed-off-by: Juergen Gross <jgross@suse.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: xen-devel@lists.xenproject.org
Cc: virtualization@lists.linux-foundation.org
Cc: akataria@vmware.com
Cc: rusty@rustcorp.com.au
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180828074026.820-3-jgross@suse.com

authored by

Juergen Gross and committed by
Thomas Gleixner
f030aade 28c11b0f

+249 -294
-34
arch/arm/xen/enlighten.c
··· 62 62 uint32_t xen_start_flags; 63 63 EXPORT_SYMBOL(xen_start_flags); 64 64 65 - int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 66 - unsigned long addr, 67 - xen_pfn_t *gfn, int nr, 68 - int *err_ptr, pgprot_t prot, 69 - unsigned domid, 70 - struct page **pages) 71 - { 72 - return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, 73 - prot, domid, pages); 74 - } 75 - EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); 76 - 77 - /* Not used by XENFEAT_auto_translated guests. */ 78 - int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 79 - unsigned long addr, 80 - xen_pfn_t gfn, int nr, 81 - pgprot_t prot, unsigned domid, 82 - struct page **pages) 83 - { 84 - return -ENOSYS; 85 - } 86 - EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); 87 - 88 65 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 89 66 int nr, struct page **pages) 90 67 { 91 68 return xen_xlate_unmap_gfn_range(vma, nr, pages); 92 69 } 93 70 EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); 94 - 95 - /* Not used by XENFEAT_auto_translated guests. */ 96 - int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 97 - unsigned long addr, 98 - xen_pfn_t *mfn, int nr, 99 - int *err_ptr, pgprot_t prot, 100 - unsigned int domid, struct page **pages) 101 - { 102 - return -ENOSYS; 103 - } 104 - EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 105 71 106 72 static void xen_read_wallclock(struct timespec64 *ts) 107 73 {
+1 -1
arch/x86/xen/Makefile
··· 15 15 CFLAGS_mmu_pv.o := $(nostackp) 16 16 17 17 obj-y += enlighten.o 18 - obj-y += multicalls.o 19 18 obj-y += mmu.o 20 19 obj-y += time.o 21 20 obj-y += grant-table.o ··· 33 34 obj-$(CONFIG_XEN_PV) += enlighten_pv.o 34 35 obj-$(CONFIG_XEN_PV) += mmu_pv.o 35 36 obj-$(CONFIG_XEN_PV) += irq.o 37 + obj-$(CONFIG_XEN_PV) += multicalls.o 36 38 obj-$(CONFIG_XEN_PV) += xen-asm.o 37 39 obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o 38 40
-186
arch/x86/xen/mmu.c
··· 6 6 #include "multicalls.h" 7 7 #include "mmu.h" 8 8 9 - /* 10 - * Protects atomic reservation decrease/increase against concurrent increases. 11 - * Also protects non-atomic updates of current_pages and balloon lists. 12 - */ 13 - DEFINE_SPINLOCK(xen_reservation_lock); 14 - 15 9 unsigned long arbitrary_virt_to_mfn(void *vaddr) 16 10 { 17 11 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); ··· 35 41 return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); 36 42 } 37 43 EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); 38 - 39 - static noinline void xen_flush_tlb_all(void) 40 - { 41 - struct mmuext_op *op; 42 - struct multicall_space mcs; 43 - 44 - preempt_disable(); 45 - 46 - mcs = xen_mc_entry(sizeof(*op)); 47 - 48 - op = mcs.args; 49 - op->cmd = MMUEXT_TLB_FLUSH_ALL; 50 - MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 51 - 52 - xen_mc_issue(PARAVIRT_LAZY_MMU); 53 - 54 - preempt_enable(); 55 - } 56 - 57 - #define REMAP_BATCH_SIZE 16 58 - 59 - struct remap_data { 60 - xen_pfn_t *pfn; 61 - bool contiguous; 62 - bool no_translate; 63 - pgprot_t prot; 64 - struct mmu_update *mmu_update; 65 - }; 66 - 67 - static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token, 68 - unsigned long addr, void *data) 69 - { 70 - struct remap_data *rmd = data; 71 - pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); 72 - 73 - /* 74 - * If we have a contiguous range, just update the pfn itself, 75 - * else update pointer to be "next pfn". 76 - */ 77 - if (rmd->contiguous) 78 - (*rmd->pfn)++; 79 - else 80 - rmd->pfn++; 81 - 82 - rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; 83 - rmd->mmu_update->ptr |= rmd->no_translate ? 84 - MMU_PT_UPDATE_NO_TRANSLATE : 85 - MMU_NORMAL_PT_UPDATE; 86 - rmd->mmu_update->val = pte_val_ma(pte); 87 - rmd->mmu_update++; 88 - 89 - return 0; 90 - } 91 - 92 - static int do_remap_pfn(struct vm_area_struct *vma, 93 - unsigned long addr, 94 - xen_pfn_t *pfn, int nr, 95 - int *err_ptr, pgprot_t prot, 96 - unsigned int domid, 97 - bool no_translate, 98 - struct page **pages) 99 - { 100 - int err = 0; 101 - struct remap_data rmd; 102 - struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 103 - unsigned long range; 104 - int mapped = 0; 105 - 106 - BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 107 - 108 - rmd.pfn = pfn; 109 - rmd.prot = prot; 110 - /* 111 - * We use the err_ptr to indicate if there we are doing a contiguous 112 - * mapping or a discontigious mapping. 113 - */ 114 - rmd.contiguous = !err_ptr; 115 - rmd.no_translate = no_translate; 116 - 117 - while (nr) { 118 - int index = 0; 119 - int done = 0; 120 - int batch = min(REMAP_BATCH_SIZE, nr); 121 - int batch_left = batch; 122 - range = (unsigned long)batch << PAGE_SHIFT; 123 - 124 - rmd.mmu_update = mmu_update; 125 - err = apply_to_page_range(vma->vm_mm, addr, range, 126 - remap_area_pfn_pte_fn, &rmd); 127 - if (err) 128 - goto out; 129 - 130 - /* We record the error for each page that gives an error, but 131 - * continue mapping until the whole set is done */ 132 - do { 133 - int i; 134 - 135 - err = HYPERVISOR_mmu_update(&mmu_update[index], 136 - batch_left, &done, domid); 137 - 138 - /* 139 - * @err_ptr may be the same buffer as @gfn, so 140 - * only clear it after each chunk of @gfn is 141 - * used. 142 - */ 143 - if (err_ptr) { 144 - for (i = index; i < index + done; i++) 145 - err_ptr[i] = 0; 146 - } 147 - if (err < 0) { 148 - if (!err_ptr) 149 - goto out; 150 - err_ptr[i] = err; 151 - done++; /* Skip failed frame. */ 152 - } else 153 - mapped += done; 154 - batch_left -= done; 155 - index += done; 156 - } while (batch_left); 157 - 158 - nr -= batch; 159 - addr += range; 160 - if (err_ptr) 161 - err_ptr += batch; 162 - cond_resched(); 163 - } 164 - out: 165 - 166 - xen_flush_tlb_all(); 167 - 168 - return err < 0 ? err : mapped; 169 - } 170 - 171 - int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 172 - unsigned long addr, 173 - xen_pfn_t gfn, int nr, 174 - pgprot_t prot, unsigned domid, 175 - struct page **pages) 176 - { 177 - if (xen_feature(XENFEAT_auto_translated_physmap)) 178 - return -EOPNOTSUPP; 179 - 180 - return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, 181 - pages); 182 - } 183 - EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); 184 - 185 - int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 186 - unsigned long addr, 187 - xen_pfn_t *gfn, int nr, 188 - int *err_ptr, pgprot_t prot, 189 - unsigned domid, struct page **pages) 190 - { 191 - if (xen_feature(XENFEAT_auto_translated_physmap)) 192 - return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, 193 - prot, domid, pages); 194 - 195 - /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, 196 - * and the consequences later is quite hard to detect what the actual 197 - * cause of "wrong memory was mapped in". 198 - */ 199 - BUG_ON(err_ptr == NULL); 200 - return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, 201 - false, pages); 202 - } 203 - EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); 204 - 205 - int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 206 - unsigned long addr, 207 - xen_pfn_t *mfn, int nr, 208 - int *err_ptr, pgprot_t prot, 209 - unsigned int domid, struct page **pages) 210 - { 211 - if (xen_feature(XENFEAT_auto_translated_physmap)) 212 - return -EOPNOTSUPP; 213 - 214 - return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, 215 - true, pages); 216 - } 217 - EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 218 44 219 45 /* Returns: 0 success */ 220 46 int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
+138
arch/x86/xen/mmu_pv.c
··· 99 99 #endif /* CONFIG_X86_64 */ 100 100 101 101 /* 102 + * Protects atomic reservation decrease/increase against concurrent increases. 103 + * Also protects non-atomic updates of current_pages and balloon lists. 104 + */ 105 + DEFINE_SPINLOCK(xen_reservation_lock); 106 + 107 + /* 102 108 * Note about cr3 (pagetable base) values: 103 109 * 104 110 * xen_cr3 contains the current logical cr3 value; it contains the ··· 2667 2661 spin_unlock_irqrestore(&xen_reservation_lock, flags); 2668 2662 } 2669 2663 EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); 2664 + 2665 + static noinline void xen_flush_tlb_all(void) 2666 + { 2667 + struct mmuext_op *op; 2668 + struct multicall_space mcs; 2669 + 2670 + preempt_disable(); 2671 + 2672 + mcs = xen_mc_entry(sizeof(*op)); 2673 + 2674 + op = mcs.args; 2675 + op->cmd = MMUEXT_TLB_FLUSH_ALL; 2676 + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); 2677 + 2678 + xen_mc_issue(PARAVIRT_LAZY_MMU); 2679 + 2680 + preempt_enable(); 2681 + } 2682 + 2683 + #define REMAP_BATCH_SIZE 16 2684 + 2685 + struct remap_data { 2686 + xen_pfn_t *pfn; 2687 + bool contiguous; 2688 + bool no_translate; 2689 + pgprot_t prot; 2690 + struct mmu_update *mmu_update; 2691 + }; 2692 + 2693 + static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token, 2694 + unsigned long addr, void *data) 2695 + { 2696 + struct remap_data *rmd = data; 2697 + pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); 2698 + 2699 + /* 2700 + * If we have a contiguous range, just update the pfn itself, 2701 + * else update pointer to be "next pfn". 2702 + */ 2703 + if (rmd->contiguous) 2704 + (*rmd->pfn)++; 2705 + else 2706 + rmd->pfn++; 2707 + 2708 + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; 2709 + rmd->mmu_update->ptr |= rmd->no_translate ? 2710 + MMU_PT_UPDATE_NO_TRANSLATE : 2711 + MMU_NORMAL_PT_UPDATE; 2712 + rmd->mmu_update->val = pte_val_ma(pte); 2713 + rmd->mmu_update++; 2714 + 2715 + return 0; 2716 + } 2717 + 2718 + int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 2719 + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, 2720 + unsigned int domid, bool no_translate, struct page **pages) 2721 + { 2722 + int err = 0; 2723 + struct remap_data rmd; 2724 + struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2725 + unsigned long range; 2726 + int mapped = 0; 2727 + 2728 + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 2729 + 2730 + rmd.pfn = pfn; 2731 + rmd.prot = prot; 2732 + /* 2733 + * We use the err_ptr to indicate if there we are doing a contiguous 2734 + * mapping or a discontigious mapping. 2735 + */ 2736 + rmd.contiguous = !err_ptr; 2737 + rmd.no_translate = no_translate; 2738 + 2739 + while (nr) { 2740 + int index = 0; 2741 + int done = 0; 2742 + int batch = min(REMAP_BATCH_SIZE, nr); 2743 + int batch_left = batch; 2744 + 2745 + range = (unsigned long)batch << PAGE_SHIFT; 2746 + 2747 + rmd.mmu_update = mmu_update; 2748 + err = apply_to_page_range(vma->vm_mm, addr, range, 2749 + remap_area_pfn_pte_fn, &rmd); 2750 + if (err) 2751 + goto out; 2752 + 2753 + /* 2754 + * We record the error for each page that gives an error, but 2755 + * continue mapping until the whole set is done 2756 + */ 2757 + do { 2758 + int i; 2759 + 2760 + err = HYPERVISOR_mmu_update(&mmu_update[index], 2761 + batch_left, &done, domid); 2762 + 2763 + /* 2764 + * @err_ptr may be the same buffer as @gfn, so 2765 + * only clear it after each chunk of @gfn is 2766 + * used. 2767 + */ 2768 + if (err_ptr) { 2769 + for (i = index; i < index + done; i++) 2770 + err_ptr[i] = 0; 2771 + } 2772 + if (err < 0) { 2773 + if (!err_ptr) 2774 + goto out; 2775 + err_ptr[i] = err; 2776 + done++; /* Skip failed frame. */ 2777 + } else 2778 + mapped += done; 2779 + batch_left -= done; 2780 + index += done; 2781 + } while (batch_left); 2782 + 2783 + nr -= batch; 2784 + addr += range; 2785 + if (err_ptr) 2786 + err_ptr += batch; 2787 + cond_resched(); 2788 + } 2789 + out: 2790 + 2791 + xen_flush_tlb_all(); 2792 + 2793 + return err < 0 ? err : mapped; 2794 + } 2795 + EXPORT_SYMBOL_GPL(xen_remap_pfn); 2670 2796 2671 2797 #ifdef CONFIG_KEXEC_CORE 2672 2798 phys_addr_t paddr_vmcoreinfo_note(void)
-6
include/xen/interface/memory.h
··· 245 245 246 246 247 247 /* 248 - * Prevent the balloon driver from changing the memory reservation 249 - * during a driver critical region. 250 - */ 251 - extern spinlock_t xen_reservation_lock; 252 - 253 - /* 254 248 * Unmaps the page appearing at a particular GPFN from the specified guest's 255 249 * pseudophysical address space. 256 250 * arg == addr of xen_remove_from_physmap_t.
+110 -67
include/xen/xen-ops.h
··· 5 5 #include <linux/percpu.h> 6 6 #include <linux/notifier.h> 7 7 #include <linux/efi.h> 8 + #include <xen/features.h> 8 9 #include <asm/xen/interface.h> 9 10 #include <xen/interface/vcpu.h> 10 11 ··· 48 47 dma_addr_t *dma_handle); 49 48 50 49 void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order); 50 + 51 + int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 52 + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, 53 + unsigned int domid, bool no_translate, struct page **pages); 51 54 #else 52 55 static inline int xen_create_contiguous_region(phys_addr_t pstart, 53 56 unsigned int order, ··· 63 58 64 59 static inline void xen_destroy_contiguous_region(phys_addr_t pstart, 65 60 unsigned int order) { } 61 + 62 + static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, 63 + xen_pfn_t *pfn, int nr, int *err_ptr, 64 + pgprot_t prot, unsigned int domid, 65 + bool no_translate, struct page **pages) 66 + { 67 + BUG(); 68 + return 0; 69 + } 66 70 #endif 67 71 68 72 struct vm_area_struct; 69 - 70 - /* 71 - * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn 72 - * @vma: VMA to map the pages into 73 - * @addr: Address at which to map the pages 74 - * @gfn: Array of GFNs to map 75 - * @nr: Number entries in the GFN array 76 - * @err_ptr: Returns per-GFN error status. 77 - * @prot: page protection mask 78 - * @domid: Domain owning the pages 79 - * @pages: Array of pages if this domain has an auto-translated physmap 80 - * 81 - * @gfn and @err_ptr may point to the same buffer, the GFNs will be 82 - * overwritten by the error codes after they are mapped. 83 - * 84 - * Returns the number of successfully mapped frames, or a -ve error 85 - * code. 86 - */ 87 - int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 88 - unsigned long addr, 89 - xen_pfn_t *gfn, int nr, 90 - int *err_ptr, pgprot_t prot, 91 - unsigned domid, 92 - struct page **pages); 93 - 94 - /* 95 - * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn 96 - * @vma: VMA to map the pages into 97 - * @addr: Address at which to map the pages 98 - * @mfn: Array of MFNs to map 99 - * @nr: Number entries in the MFN array 100 - * @err_ptr: Returns per-MFN error status. 101 - * @prot: page protection mask 102 - * @domid: Domain owning the pages 103 - * @pages: Array of pages if this domain has an auto-translated physmap 104 - * 105 - * @mfn and @err_ptr may point to the same buffer, the MFNs will be 106 - * overwritten by the error codes after they are mapped. 107 - * 108 - * Returns the number of successfully mapped frames, or a -ve error 109 - * code. 110 - */ 111 - int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 112 - unsigned long addr, xen_pfn_t *mfn, int nr, 113 - int *err_ptr, pgprot_t prot, 114 - unsigned int domid, struct page **pages); 115 - 116 - /* xen_remap_domain_gfn_range() - map a range of foreign frames 117 - * @vma: VMA to map the pages into 118 - * @addr: Address at which to map the pages 119 - * @gfn: First GFN to map. 120 - * @nr: Number frames to map 121 - * @prot: page protection mask 122 - * @domid: Domain owning the pages 123 - * @pages: Array of pages if this domain has an auto-translated physmap 124 - * 125 - * Returns the number of successfully mapped frames, or a -ve error 126 - * code. 127 - */ 128 - int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 129 - unsigned long addr, 130 - xen_pfn_t gfn, int nr, 131 - pgprot_t prot, unsigned domid, 132 - struct page **pages); 133 - int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 134 - int numpgs, struct page **pages); 135 73 136 74 #ifdef CONFIG_XEN_AUTO_XLATE 137 75 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 138 76 unsigned long addr, 139 77 xen_pfn_t *gfn, int nr, 140 78 int *err_ptr, pgprot_t prot, 141 - unsigned domid, 79 + unsigned int domid, 142 80 struct page **pages); 143 81 int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, 144 82 int nr, struct page **pages); ··· 106 158 return -EOPNOTSUPP; 107 159 } 108 160 #endif 161 + 162 + /* 163 + * xen_remap_domain_gfn_array() - map an array of foreign frames by gfn 164 + * @vma: VMA to map the pages into 165 + * @addr: Address at which to map the pages 166 + * @gfn: Array of GFNs to map 167 + * @nr: Number entries in the GFN array 168 + * @err_ptr: Returns per-GFN error status. 169 + * @prot: page protection mask 170 + * @domid: Domain owning the pages 171 + * @pages: Array of pages if this domain has an auto-translated physmap 172 + * 173 + * @gfn and @err_ptr may point to the same buffer, the GFNs will be 174 + * overwritten by the error codes after they are mapped. 175 + * 176 + * Returns the number of successfully mapped frames, or a -ve error 177 + * code. 178 + */ 179 + static inline int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 180 + unsigned long addr, 181 + xen_pfn_t *gfn, int nr, 182 + int *err_ptr, pgprot_t prot, 183 + unsigned int domid, 184 + struct page **pages) 185 + { 186 + if (xen_feature(XENFEAT_auto_translated_physmap)) 187 + return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, 188 + prot, domid, pages); 189 + 190 + /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, 191 + * and the consequences later is quite hard to detect what the actual 192 + * cause of "wrong memory was mapped in". 193 + */ 194 + BUG_ON(err_ptr == NULL); 195 + return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, 196 + false, pages); 197 + } 198 + 199 + /* 200 + * xen_remap_domain_mfn_array() - map an array of foreign frames by mfn 201 + * @vma: VMA to map the pages into 202 + * @addr: Address at which to map the pages 203 + * @mfn: Array of MFNs to map 204 + * @nr: Number entries in the MFN array 205 + * @err_ptr: Returns per-MFN error status. 206 + * @prot: page protection mask 207 + * @domid: Domain owning the pages 208 + * @pages: Array of pages if this domain has an auto-translated physmap 209 + * 210 + * @mfn and @err_ptr may point to the same buffer, the MFNs will be 211 + * overwritten by the error codes after they are mapped. 212 + * 213 + * Returns the number of successfully mapped frames, or a -ve error 214 + * code. 215 + */ 216 + static inline int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 217 + unsigned long addr, xen_pfn_t *mfn, 218 + int nr, int *err_ptr, 219 + pgprot_t prot, unsigned int domid, 220 + struct page **pages) 221 + { 222 + if (xen_feature(XENFEAT_auto_translated_physmap)) 223 + return -EOPNOTSUPP; 224 + 225 + return xen_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid, 226 + true, pages); 227 + } 228 + 229 + /* xen_remap_domain_gfn_range() - map a range of foreign frames 230 + * @vma: VMA to map the pages into 231 + * @addr: Address at which to map the pages 232 + * @gfn: First GFN to map. 233 + * @nr: Number frames to map 234 + * @prot: page protection mask 235 + * @domid: Domain owning the pages 236 + * @pages: Array of pages if this domain has an auto-translated physmap 237 + * 238 + * Returns the number of successfully mapped frames, or a -ve error 239 + * code. 240 + */ 241 + static inline int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 242 + unsigned long addr, 243 + xen_pfn_t gfn, int nr, 244 + pgprot_t prot, unsigned int domid, 245 + struct page **pages) 246 + { 247 + if (xen_feature(XENFEAT_auto_translated_physmap)) 248 + return -EOPNOTSUPP; 249 + 250 + return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, 251 + pages); 252 + } 253 + 254 + int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 255 + int numpgs, struct page **pages); 109 256 110 257 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr, 111 258 unsigned long nr_grant_frames);