Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen: Use correctly the Xen memory terminologies

Based on include/xen/mm.h [1], Linux is mistakenly using MFN when GFN
is meant, I suspect this is because the first support for Xen was for
PV. This resulted in some misimplementation of helpers on ARM and
confused developers about the expected behavior.

For instance, with pfn_to_mfn, we expect to get an MFN based on the name.
Although, if we look at the implementation on x86, it's returning a GFN.

For clarity and avoid new confusion, replace any reference to mfn with
gfn in any helpers used by PV drivers. The x86 code will still keep some
reference of pfn_to_mfn which may be used by all kind of guests
No changes as been made in the hypercall field, even
though they may be invalid, in order to keep the same as the defintion
in xen repo.

Note that page_to_mfn has been renamed to xen_page_to_gfn to avoid a
name to close to the KVM function gfn_to_page.

Take also the opportunity to simplify simple construction such
as pfn_to_mfn(page_to_pfn(page)) into xen_page_to_gfn. More complex clean up
will come in follow-up patches.

[1] http://xenbits.xen.org/gitweb/?p=xen.git;a=commitdiff;h=e758ed14f390342513405dd766e874934573e6cb

Signed-off-by: Julien Grall <julien.grall@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Acked-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Acked-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>

+81 -47
+7 -6
arch/arm/include/asm/xen/page.h
··· 34 34 unsigned long __pfn_to_mfn(unsigned long pfn); 35 35 extern struct rb_root phys_to_mach; 36 36 37 - static inline unsigned long pfn_to_mfn(unsigned long pfn) 37 + /* Pseudo-physical <-> Guest conversion */ 38 + static inline unsigned long pfn_to_gfn(unsigned long pfn) 38 39 { 39 40 return pfn; 40 41 } 41 42 42 - static inline unsigned long mfn_to_pfn(unsigned long mfn) 43 + static inline unsigned long gfn_to_pfn(unsigned long gfn) 43 44 { 44 - return mfn; 45 + return gfn; 45 46 } 46 47 47 48 /* Pseudo-physical <-> BUS conversion */ ··· 66 65 67 66 #define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) 68 67 69 - /* VIRT <-> MACHINE conversion */ 70 - #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 71 - #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 68 + /* VIRT <-> GUEST conversion */ 69 + #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) 70 + #define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT)) 72 71 73 72 /* Only used in PV code. But ARM guests are always HVM. */ 74 73 static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
+33 -2
arch/x86/include/asm/xen/page.h
··· 101 101 { 102 102 unsigned long mfn; 103 103 104 + /* 105 + * Some x86 code are still using pfn_to_mfn instead of 106 + * pfn_to_mfn. This will have to be removed when we figured 107 + * out which call. 108 + */ 104 109 if (xen_feature(XENFEAT_auto_translated_physmap)) 105 110 return pfn; 106 111 ··· 152 147 { 153 148 unsigned long pfn; 154 149 150 + /* 151 + * Some x86 code are still using mfn_to_pfn instead of 152 + * gfn_to_pfn. This will have to be removed when we figure 153 + * out which call. 154 + */ 155 155 if (xen_feature(XENFEAT_auto_translated_physmap)) 156 156 return mfn; 157 157 ··· 186 176 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); 187 177 } 188 178 179 + /* Pseudo-physical <-> Guest conversion */ 180 + static inline unsigned long pfn_to_gfn(unsigned long pfn) 181 + { 182 + if (xen_feature(XENFEAT_auto_translated_physmap)) 183 + return pfn; 184 + else 185 + return pfn_to_mfn(pfn); 186 + } 187 + 188 + static inline unsigned long gfn_to_pfn(unsigned long gfn) 189 + { 190 + if (xen_feature(XENFEAT_auto_translated_physmap)) 191 + return gfn; 192 + else 193 + return mfn_to_pfn(gfn); 194 + } 195 + 189 196 /* Pseudo-physical <-> Bus conversion */ 190 - #define pfn_to_bfn(pfn) pfn_to_mfn(pfn) 191 - #define bfn_to_pfn(bfn) mfn_to_pfn(bfn) 197 + #define pfn_to_bfn(pfn) pfn_to_gfn(pfn) 198 + #define bfn_to_pfn(bfn) gfn_to_pfn(bfn) 192 199 193 200 /* 194 201 * We detect special mappings in one of two ways: ··· 245 218 #define virt_to_pfn(v) (PFN_DOWN(__pa(v))) 246 219 #define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 247 220 #define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 221 + 222 + /* VIRT <-> GUEST conversion */ 223 + #define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v))) 224 + #define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT)) 248 225 249 226 static inline unsigned long pte_mfn(pte_t pte) 250 227 {
+1 -1
arch/x86/xen/smp.c
··· 453 453 } 454 454 #endif 455 455 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 456 - ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); 456 + ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); 457 457 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) 458 458 BUG(); 459 459
+3 -3
drivers/block/xen-blkfront.c
··· 250 250 struct blkfront_info *info) 251 251 { 252 252 struct grant *gnt_list_entry; 253 - unsigned long buffer_mfn; 253 + unsigned long buffer_gfn; 254 254 255 255 BUG_ON(list_empty(&info->grants)); 256 256 gnt_list_entry = list_first_entry(&info->grants, struct grant, ··· 269 269 BUG_ON(!pfn); 270 270 gnt_list_entry->pfn = pfn; 271 271 } 272 - buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); 272 + buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn); 273 273 gnttab_grant_foreign_access_ref(gnt_list_entry->gref, 274 274 info->xbdev->otherend_id, 275 - buffer_mfn, 0); 275 + buffer_gfn, 0); 276 276 return gnt_list_entry; 277 277 } 278 278
+2 -2
drivers/input/misc/xen-kbdfront.c
··· 232 232 struct xenbus_transaction xbt; 233 233 234 234 ret = gnttab_grant_foreign_access(dev->otherend_id, 235 - virt_to_mfn(info->page), 0); 235 + virt_to_gfn(info->page), 0); 236 236 if (ret < 0) 237 237 return ret; 238 238 info->gref = ret; ··· 255 255 goto error_irqh; 256 256 } 257 257 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", 258 - virt_to_mfn(info->page)); 258 + virt_to_gfn(info->page)); 259 259 if (ret) 260 260 goto error_xenbus; 261 261 ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
+2 -2
drivers/net/xen-netback/netback.c
··· 314 314 } else { 315 315 copy_gop->source.domid = DOMID_SELF; 316 316 copy_gop->source.u.gmfn = 317 - virt_to_mfn(page_address(page)); 317 + virt_to_gfn(page_address(page)); 318 318 } 319 319 copy_gop->source.offset = offset; 320 320 ··· 1296 1296 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; 1297 1297 1298 1298 queue->tx_copy_ops[*copy_ops].dest.u.gmfn = 1299 - virt_to_mfn(skb->data); 1299 + virt_to_gfn(skb->data); 1300 1300 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; 1301 1301 queue->tx_copy_ops[*copy_ops].dest.offset = 1302 1302 offset_in_page(skb->data);
+7 -5
drivers/net/xen-netfront.c
··· 291 291 struct sk_buff *skb; 292 292 unsigned short id; 293 293 grant_ref_t ref; 294 - unsigned long pfn; 294 + unsigned long gfn; 295 295 struct xen_netif_rx_request *req; 296 296 297 297 skb = xennet_alloc_one_rx_buffer(queue); ··· 307 307 BUG_ON((signed short)ref < 0); 308 308 queue->grant_rx_ref[id] = ref; 309 309 310 - pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 310 + gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 311 311 312 312 req = RING_GET_REQUEST(&queue->rx, req_prod); 313 313 gnttab_grant_foreign_access_ref(ref, 314 314 queue->info->xbdev->otherend_id, 315 - pfn_to_mfn(pfn), 315 + gfn, 316 316 0); 317 317 318 318 req->id = id; ··· 430 430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 431 431 BUG_ON((signed short)ref < 0); 432 432 433 - gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 434 - page_to_mfn(page), GNTMAP_readonly); 433 + gnttab_grant_foreign_access_ref(ref, 434 + queue->info->xbdev->otherend_id, 435 + xen_page_to_gfn(page), 436 + GNTMAP_readonly); 435 437 436 438 queue->tx_skbs[id].skb = skb; 437 439 queue->grant_tx_page[id] = page;
+5 -5
drivers/scsi/xen-scsifront.c
··· 377 377 unsigned int data_len = scsi_bufflen(sc); 378 378 unsigned int data_grants = 0, seg_grants = 0; 379 379 struct scatterlist *sg; 380 - unsigned long mfn; 381 380 struct scsiif_request_segment *seg; 382 381 383 382 ring_req->nr_segments = 0; ··· 419 420 ref = gnttab_claim_grant_reference(&gref_head); 420 421 BUG_ON(ref == -ENOSPC); 421 422 422 - mfn = pfn_to_mfn(page_to_pfn(page)); 423 423 gnttab_grant_foreign_access_ref(ref, 424 - info->dev->otherend_id, mfn, 1); 424 + info->dev->otherend_id, 425 + xen_page_to_gfn(page), 1); 425 426 shadow->gref[ref_cnt] = ref; 426 427 ring_req->seg[ref_cnt].gref = ref; 427 428 ring_req->seg[ref_cnt].offset = (uint16_t)off; ··· 453 454 ref = gnttab_claim_grant_reference(&gref_head); 454 455 BUG_ON(ref == -ENOSPC); 455 456 456 - mfn = pfn_to_mfn(page_to_pfn(page)); 457 457 gnttab_grant_foreign_access_ref(ref, 458 - info->dev->otherend_id, mfn, grant_ro); 458 + info->dev->otherend_id, 459 + xen_page_to_gfn(page), 460 + grant_ro); 459 461 460 462 shadow->gref[ref_cnt] = ref; 461 463 seg->gref = ref;
+3 -2
drivers/tty/hvc/hvc_xen.c
··· 265 265 return 0; 266 266 } 267 267 info->evtchn = xen_start_info->console.domU.evtchn; 268 - info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); 268 + /* GFN == MFN for PV guest */ 269 + info->intf = gfn_to_virt(xen_start_info->console.domU.mfn); 269 270 info->vtermno = HVC_COOKIE; 270 271 271 272 spin_lock(&xencons_lock); ··· 391 390 if (IS_ERR(info->hvc)) 392 391 return PTR_ERR(info->hvc); 393 392 if (xen_pv_domain()) 394 - mfn = virt_to_mfn(info->intf); 393 + mfn = virt_to_gfn(info->intf); 395 394 else 396 395 mfn = __pa(info->intf) >> PAGE_SHIFT; 397 396 ret = gnttab_alloc_grant_references(1, &gref_head);
+2 -2
drivers/video/fbdev/xen-fbfront.c
··· 539 539 540 540 static unsigned long vmalloc_to_mfn(void *address) 541 541 { 542 - return pfn_to_mfn(vmalloc_to_pfn(address)); 542 + return pfn_to_gfn(vmalloc_to_pfn(address)); 543 543 } 544 544 545 545 static void xenfb_init_shared_page(struct xenfb_info *info, ··· 586 586 goto unbind_irq; 587 587 } 588 588 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", 589 - virt_to_mfn(info->page)); 589 + virt_to_gfn(info->page)); 590 590 if (ret) 591 591 goto error_xenbus; 592 592 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+1 -1
drivers/xen/balloon.c
··· 441 441 /* Update direct mapping, invalidate P2M, and add to balloon. */ 442 442 for (i = 0; i < nr_pages; i++) { 443 443 pfn = frame_list[i]; 444 - frame_list[i] = pfn_to_mfn(pfn); 444 + frame_list[i] = pfn_to_gfn(pfn); 445 445 page = pfn_to_page(pfn); 446 446 447 447 #ifdef CONFIG_XEN_HAVE_PVMMU
+1 -1
drivers/xen/events/events_base.c
··· 1688 1688 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1689 1689 1690 1690 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1691 - eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); 1691 + eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map); 1692 1692 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1693 1693 /* TODO: No PVH support for PIRQ EOI */ 1694 1694 if (rc != 0) {
+2 -2
drivers/xen/events/events_fifo.c
··· 111 111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) 112 112 q->head[i] = 0; 113 113 114 - init_control.control_gfn = virt_to_mfn(control_block); 114 + init_control.control_gfn = virt_to_gfn(control_block); 115 115 init_control.offset = 0; 116 116 init_control.vcpu = cpu; 117 117 ··· 167 167 /* Mask all events in this page before adding it. */ 168 168 init_array_page(array_page); 169 169 170 - expand_array.array_gfn = virt_to_mfn(array_page); 170 + expand_array.array_gfn = virt_to_gfn(array_page); 171 171 172 172 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array); 173 173 if (ret < 0)
+2 -1
drivers/xen/gntalloc.c
··· 142 142 143 143 /* Grant foreign access to the page. */ 144 144 rc = gnttab_grant_foreign_access(op->domid, 145 - pfn_to_mfn(page_to_pfn(gref->page)), readonly); 145 + xen_page_to_gfn(gref->page), 146 + readonly); 146 147 if (rc < 0) 147 148 goto undo; 148 149 gref_ids[i] = gref->gref_id = rc;
+1 -1
drivers/xen/manage.c
··· 80 80 * is resuming in a new domain. 81 81 */ 82 82 si->cancelled = HYPERVISOR_suspend(xen_pv_domain() 83 - ? virt_to_mfn(xen_start_info) 83 + ? virt_to_gfn(xen_start_info) 84 84 : 0); 85 85 86 86 xen_arch_post_suspend(si->cancelled);
+2 -2
drivers/xen/tmem.c
··· 131 131 static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, 132 132 u32 index, unsigned long pfn) 133 133 { 134 - unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; 134 + unsigned long gmfn = pfn_to_gfn(pfn); 135 135 136 136 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, 137 137 gmfn, 0, 0, 0); ··· 140 140 static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, 141 141 u32 index, unsigned long pfn) 142 142 { 143 - unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; 143 + unsigned long gmfn = pfn_to_gfn(pfn); 144 144 145 145 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, 146 146 gmfn, 0, 0, 0);
+1 -1
drivers/xen/xenbus/xenbus_client.c
··· 380 380 381 381 for (i = 0; i < nr_pages; i++) { 382 382 err = gnttab_grant_foreign_access(dev->otherend_id, 383 - virt_to_mfn(vaddr), 0); 383 + virt_to_gfn(vaddr), 0); 384 384 if (err < 0) { 385 385 xenbus_dev_fatal(dev, err, 386 386 "granting access to ring page");
+1 -1
drivers/xen/xenbus/xenbus_dev_backend.c
··· 49 49 goto out_err; 50 50 51 51 gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, 52 - virt_to_mfn(xen_store_interface), 0 /* writable */); 52 + virt_to_gfn(xen_store_interface), 0 /* writable */); 53 53 54 54 arg.dom = DOMID_SELF; 55 55 arg.remote_dom = domid;
+3 -5
drivers/xen/xenbus/xenbus_probe.c
··· 711 711 if (!page) 712 712 goto out_err; 713 713 714 - xen_store_mfn = xen_start_info->store_mfn = 715 - pfn_to_mfn(virt_to_phys((void *)page) >> 716 - PAGE_SHIFT); 714 + xen_store_mfn = xen_start_info->store_mfn = virt_to_gfn((void *)page); 717 715 718 716 /* Next allocate a local port which xenstored can bind to */ 719 717 alloc_unbound.dom = DOMID_SELF; ··· 785 787 err = xenstored_local_init(); 786 788 if (err) 787 789 goto out_error; 788 - xen_store_interface = mfn_to_virt(xen_store_mfn); 790 + xen_store_interface = gfn_to_virt(xen_store_mfn); 789 791 break; 790 792 case XS_PV: 791 793 xen_store_evtchn = xen_start_info->store_evtchn; 792 794 xen_store_mfn = xen_start_info->store_mfn; 793 - xen_store_interface = mfn_to_virt(xen_store_mfn); 795 + xen_store_interface = gfn_to_virt(xen_store_mfn); 794 796 break; 795 797 case XS_HVM: 796 798 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+2 -2
include/xen/page.h
··· 3 3 4 4 #include <asm/xen/page.h> 5 5 6 - static inline unsigned long page_to_mfn(struct page *page) 6 + static inline unsigned long xen_page_to_gfn(struct page *page) 7 7 { 8 - return pfn_to_mfn(page_to_pfn(page)); 8 + return pfn_to_gfn(page_to_pfn(page)); 9 9 } 10 10 11 11 struct xen_memory_region {