Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen/privcmd: Further s/MFN/GFN/ clean-up

The privcmd code is mixing the usage of GFN and MFN within the same
functions which make the code difficult to understand when you only work
with auto-translated guests.

The privcmd driver is only dealing with GFN so replace all the mention
of MFN into GFN.

The ioctl structure used to map foreign change has been left unchanged
given that the userspace is using it. Nonetheless, add a comment to
explain the expected value within the "mfn" field.

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>

+65 -61
+9 -9
arch/arm/xen/enlighten.c
··· 49 49 50 50 static __initdata struct device_node *xen_node; 51 51 52 - int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 52 + int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 53 53 unsigned long addr, 54 - xen_pfn_t *mfn, int nr, 54 + xen_pfn_t *gfn, int nr, 55 55 int *err_ptr, pgprot_t prot, 56 56 unsigned domid, 57 57 struct page **pages) 58 58 { 59 - return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 59 + return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, 60 60 prot, domid, pages); 61 61 } 62 - EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 62 + EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); 63 63 64 64 /* Not used by XENFEAT_auto_translated guests. */ 65 - int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 65 + int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 66 66 unsigned long addr, 67 - xen_pfn_t mfn, int nr, 67 + xen_pfn_t gfn, int nr, 68 68 pgprot_t prot, unsigned domid, 69 69 struct page **pages) 70 70 { 71 71 return -ENOSYS; 72 72 } 73 - EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 73 + EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); 74 74 75 - int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 75 + int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 76 76 int nr, struct page **pages) 77 77 { 78 78 return xen_xlate_unmap_gfn_range(vma, nr, pages); 79 79 } 80 - EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 80 + EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); 81 81 82 82 static void xen_percpu_init(void) 83 83 {
+16 -16
arch/x86/xen/mmu.c
··· 2812 2812 return 0; 2813 2813 } 2814 2814 2815 - static int do_remap_mfn(struct vm_area_struct *vma, 2815 + static int do_remap_gfn(struct vm_area_struct *vma, 2816 2816 unsigned long addr, 2817 - xen_pfn_t *mfn, int nr, 2817 + xen_pfn_t *gfn, int nr, 2818 2818 int *err_ptr, pgprot_t prot, 2819 2819 unsigned domid, 2820 2820 struct page **pages) ··· 2830 2830 if (xen_feature(XENFEAT_auto_translated_physmap)) { 2831 2831 #ifdef CONFIG_XEN_PVH 2832 2832 /* We need to update the local page tables and the xen HAP */ 2833 - return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 2833 + return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, 2834 2834 prot, domid, pages); 2835 2835 #else 2836 2836 return -EINVAL; 2837 2837 #endif 2838 2838 } 2839 2839 2840 - rmd.mfn = mfn; 2840 + rmd.mfn = gfn; 2841 2841 rmd.prot = prot; 2842 2842 /* We use the err_ptr to indicate if there we are doing a contigious 2843 2843 * mapping or a discontigious mapping. */ ··· 2865 2865 batch_left, &done, domid); 2866 2866 2867 2867 /* 2868 - * @err_ptr may be the same buffer as @mfn, so 2869 - * only clear it after each chunk of @mfn is 2868 + * @err_ptr may be the same buffer as @gfn, so 2869 + * only clear it after each chunk of @gfn is 2870 2870 * used. 2871 2871 */ 2872 2872 if (err_ptr) { ··· 2896 2896 return err < 0 ? err : mapped; 2897 2897 } 2898 2898 2899 - int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2899 + int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 2900 2900 unsigned long addr, 2901 - xen_pfn_t mfn, int nr, 2901 + xen_pfn_t gfn, int nr, 2902 2902 pgprot_t prot, unsigned domid, 2903 2903 struct page **pages) 2904 2904 { 2905 - return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages); 2905 + return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); 2906 2906 } 2907 - EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2907 + EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); 2908 2908 2909 - int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 2909 + int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 2910 2910 unsigned long addr, 2911 - xen_pfn_t *mfn, int nr, 2911 + xen_pfn_t *gfn, int nr, 2912 2912 int *err_ptr, pgprot_t prot, 2913 2913 unsigned domid, struct page **pages) 2914 2914 { ··· 2917 2917 * cause of "wrong memory was mapped in". 2918 2918 */ 2919 2919 BUG_ON(err_ptr == NULL); 2920 - return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages); 2920 + return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); 2921 2921 } 2922 - EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 2922 + EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); 2923 2923 2924 2924 2925 2925 /* Returns: 0 success */ 2926 - int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 2926 + int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 2927 2927 int numpgs, struct page **pages) 2928 2928 { 2929 2929 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) ··· 2935 2935 return -EINVAL; 2936 2936 #endif 2937 2937 } 2938 - EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 2938 + EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
+22 -22
drivers/xen/privcmd.c
··· 193 193 return ret; 194 194 } 195 195 196 - struct mmap_mfn_state { 196 + struct mmap_gfn_state { 197 197 unsigned long va; 198 198 struct vm_area_struct *vma; 199 199 domid_t domain; 200 200 }; 201 201 202 - static int mmap_mfn_range(void *data, void *state) 202 + static int mmap_gfn_range(void *data, void *state) 203 203 { 204 204 struct privcmd_mmap_entry *msg = data; 205 - struct mmap_mfn_state *st = state; 205 + struct mmap_gfn_state *st = state; 206 206 struct vm_area_struct *vma = st->vma; 207 207 int rc; 208 208 ··· 216 216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) 217 217 return -EINVAL; 218 218 219 - rc = xen_remap_domain_mfn_range(vma, 219 + rc = xen_remap_domain_gfn_range(vma, 220 220 msg->va & PAGE_MASK, 221 221 msg->mfn, msg->npages, 222 222 vma->vm_page_prot, ··· 236 236 struct vm_area_struct *vma; 237 237 int rc; 238 238 LIST_HEAD(pagelist); 239 - struct mmap_mfn_state state; 239 + struct mmap_gfn_state state; 240 240 241 241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 242 242 if (xen_feature(XENFEAT_auto_translated_physmap)) ··· 273 273 274 274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), 275 275 &pagelist, 276 - mmap_mfn_range, &state); 276 + mmap_gfn_range, &state); 277 277 278 278 279 279 out_up: ··· 299 299 int global_error; 300 300 int version; 301 301 302 - /* User-space mfn array to store errors in the second pass for V1. */ 303 - xen_pfn_t __user *user_mfn; 302 + /* User-space gfn array to store errors in the second pass for V1. */ 303 + xen_pfn_t __user *user_gfn; 304 304 /* User-space int array to store errors in the second pass for V2. */ 305 305 int __user *user_err; 306 306 }; 307 307 308 - /* auto translated dom0 note: if domU being created is PV, then mfn is 309 - * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 308 + /* auto translated dom0 note: if domU being created is PV, then gfn is 309 + * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP). 310 310 */ 311 311 static int mmap_batch_fn(void *data, int nr, void *state) 312 312 { 313 - xen_pfn_t *mfnp = data; 313 + xen_pfn_t *gfnp = data; 314 314 struct mmap_batch_state *st = state; 315 315 struct vm_area_struct *vma = st->vma; 316 316 struct page **pages = vma->vm_private_data; ··· 321 321 cur_pages = &pages[st->index]; 322 322 323 323 BUG_ON(nr < 0); 324 - ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, 325 - (int *)mfnp, st->vma->vm_page_prot, 324 + ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, 325 + (int *)gfnp, st->vma->vm_page_prot, 326 326 st->domain, cur_pages); 327 327 328 328 /* Adjust the global_error? */ ··· 347 347 348 348 if (st->version == 1) { 349 349 if (err) { 350 - xen_pfn_t mfn; 350 + xen_pfn_t gfn; 351 351 352 - ret = get_user(mfn, st->user_mfn); 352 + ret = get_user(gfn, st->user_gfn); 353 353 if (ret < 0) 354 354 return ret; 355 355 /* 356 356 * V1 encodes the error codes in the 32bit top 357 - * nibble of the mfn (with its known 357 + * nibble of the gfn (with its known 358 358 * limitations vis-a-vis 64 bit callers). 359 359 */ 360 - mfn |= (err == -ENOENT) ? 360 + gfn |= (err == -ENOENT) ? 361 361 PRIVCMD_MMAPBATCH_PAGED_ERROR : 362 362 PRIVCMD_MMAPBATCH_MFN_ERROR; 363 - return __put_user(mfn, st->user_mfn++); 363 + return __put_user(gfn, st->user_gfn++); 364 364 } else 365 - st->user_mfn++; 365 + st->user_gfn++; 366 366 } else { /* st->version == 2 */ 367 367 if (err) 368 368 return __put_user(err, st->user_err++); ··· 388 388 return 0; 389 389 } 390 390 391 - /* Allocate pfns that are then mapped with gmfns from foreign domid. Update 391 + /* Allocate pfns that are then mapped with gfns from foreign domid. Update 392 392 * the vma with the page info to use later. 393 393 * Returns: 0 if success, otherwise -errno 394 394 */ ··· 526 526 527 527 if (state.global_error) { 528 528 /* Write back errors in second pass. */ 529 - state.user_mfn = (xen_pfn_t *)m.arr; 529 + state.user_gfn = (xen_pfn_t *)m.arr; 530 530 state.user_err = m.err; 531 531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), 532 532 &pagelist, mmap_return_errors, &state); ··· 587 587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 588 588 return; 589 589 590 - rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); 590 + rc = xen_unmap_domain_gfn_range(vma, numpgs, pages); 591 591 if (rc == 0) 592 592 free_xenballooned_pages(numpgs, pages); 593 593 else
+9 -9
drivers/xen/xlate_mmu.c
··· 38 38 #include <xen/interface/xen.h> 39 39 #include <xen/interface/memory.h> 40 40 41 - /* map fgmfn of domid to lpfn in the current domain */ 42 - static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 41 + /* map fgfn of domid to lpfn in the current domain */ 42 + static int map_foreign_page(unsigned long lpfn, unsigned long fgfn, 43 43 unsigned int domid) 44 44 { 45 45 int rc; ··· 49 49 .size = 1, 50 50 .space = XENMAPSPACE_gmfn_foreign, 51 51 }; 52 - xen_ulong_t idx = fgmfn; 52 + xen_ulong_t idx = fgfn; 53 53 xen_pfn_t gpfn = lpfn; 54 54 int err = 0; 55 55 ··· 62 62 } 63 63 64 64 struct remap_data { 65 - xen_pfn_t *fgmfn; /* foreign domain's gmfn */ 65 + xen_pfn_t *fgfn; /* foreign domain's gfn */ 66 66 pgprot_t prot; 67 67 domid_t domid; 68 68 struct vm_area_struct *vma; 69 69 int index; 70 70 struct page **pages; 71 - struct xen_remap_mfn_info *info; 71 + struct xen_remap_gfn_info *info; 72 72 int *err_ptr; 73 73 int mapped; 74 74 }; ··· 82 82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 83 83 int rc; 84 84 85 - rc = map_foreign_page(pfn, *info->fgmfn, info->domid); 85 + rc = map_foreign_page(pfn, *info->fgfn, info->domid); 86 86 *info->err_ptr++ = rc; 87 87 if (!rc) { 88 88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 89 89 info->mapped++; 90 90 } 91 - info->fgmfn++; 91 + info->fgfn++; 92 92 93 93 return 0; 94 94 } 95 95 96 96 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 97 97 unsigned long addr, 98 - xen_pfn_t *mfn, int nr, 98 + xen_pfn_t *gfn, int nr, 99 99 int *err_ptr, pgprot_t prot, 100 100 unsigned domid, 101 101 struct page **pages) ··· 108 108 x86 PVOPS */ 109 109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 110 110 111 - data.fgmfn = mfn; 111 + data.fgfn = gfn; 112 112 data.prot = prot; 113 113 data.domid = domid; 114 114 data.vma = vma;
+4
include/uapi/xen/privcmd.h
··· 44 44 45 45 struct privcmd_mmap_entry { 46 46 __u64 va; 47 + /* 48 + * This should be a GFN. It's not possible to change the name because 49 + * it's exposed to the user-space. 50 + */ 47 51 __u64 mfn; 48 52 __u64 npages; 49 53 };
+5 -5
include/xen/xen-ops.h
··· 30 30 struct vm_area_struct; 31 31 32 32 /* 33 - * xen_remap_domain_mfn_array() - map an array of foreign frames 33 + * xen_remap_domain_gfn_array() - map an array of foreign frames 34 34 * @vma: VMA to map the pages into 35 35 * @addr: Address at which to map the pages 36 36 * @gfn: Array of GFNs to map ··· 46 46 * Returns the number of successfully mapped frames, or a -ve error 47 47 * code. 48 48 */ 49 - int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 49 + int xen_remap_domain_gfn_array(struct vm_area_struct *vma, 50 50 unsigned long addr, 51 51 xen_pfn_t *gfn, int nr, 52 52 int *err_ptr, pgprot_t prot, 53 53 unsigned domid, 54 54 struct page **pages); 55 55 56 - /* xen_remap_domain_mfn_range() - map a range of foreign frames 56 + /* xen_remap_domain_gfn_range() - map a range of foreign frames 57 57 * @vma: VMA to map the pages into 58 58 * @addr: Address at which to map the pages 59 59 * @gfn: First GFN to map. ··· 65 65 * Returns the number of successfully mapped frames, or a -ve error 66 66 * code. 67 67 */ 68 - int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 68 + int xen_remap_domain_gfn_range(struct vm_area_struct *vma, 69 69 unsigned long addr, 70 70 xen_pfn_t gfn, int nr, 71 71 pgprot_t prot, unsigned domid, 72 72 struct page **pages); 73 - int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 73 + int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, 74 74 int numpgs, struct page **pages); 75 75 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 76 76 unsigned long addr,