Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xen: privcmd: support autotranslated physmap guests.

PVH and ARM only support the batch interface. To map a foreign page to
a process, the PFN must be allocated and the autotranslated path uses
ballooning for that purpose.

The returned PFN is then mapped to the foreign page.
xen_unmap_domain_mfn_range() is introduced to unmap these pages via the
privcmd close call.

Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
[v1: Fix up privcmd_close]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
[v2: used for ARM too]

authored by

Mukesh Rathor and committed by
Ian Campbell
d71f5139 9a032e39

+67 -2
+67 -2
drivers/xen/privcmd.c
··· 33 33 #include <xen/features.h> 34 34 #include <xen/page.h> 35 35 #include <xen/xen-ops.h> 36 + #include <xen/balloon.h> 36 37 37 38 #include "privcmd.h" 38 39 39 40 MODULE_LICENSE("GPL"); 41 + 42 + #define PRIV_VMA_LOCKED ((void *)1) 40 43 41 44 #ifndef HAVE_ARCH_PRIVCMD_MMAP 42 45 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); ··· 202 199 if (!xen_initial_domain()) 203 200 return -EPERM; 204 201 202 + /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 203 + if (xen_feature(XENFEAT_auto_translated_physmap)) 204 + return -ENOSYS; 205 + 205 206 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) 206 207 return -EFAULT; 207 208 ··· 253 246 domid_t domain; 254 247 unsigned long va; 255 248 struct vm_area_struct *vma; 249 + int index; 256 250 /* A tristate: 257 251 * 0 for no errors 258 252 * 1 if at least one error has happened (and no ··· 268 260 xen_pfn_t __user *user_mfn; 269 261 }; 270 262 263 + /* auto translated dom0 note: if domU being created is PV, then mfn is 264 + * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 265 + */ 271 266 static int mmap_batch_fn(void *data, void *state) 272 267 { 273 268 xen_pfn_t *mfnp = data; 274 269 struct mmap_batch_state *st = state; 270 + struct vm_area_struct *vma = st->vma; 271 + struct page **pages = vma->vm_private_data; 272 + struct page *cur_page = NULL; 275 273 int ret; 274 + 275 + if (xen_feature(XENFEAT_auto_translated_physmap)) 276 + cur_page = pages[st->index++]; 276 277 277 278 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 278 279 st->vma->vm_page_prot, st->domain, 279 - NULL); 280 + &cur_page); 280 281 281 282 /* Store error code for second pass. */ 282 283 *(st->err++) = ret; ··· 319 302 PRIVCMD_MMAPBATCH_PAGED_ERROR : 320 303 PRIVCMD_MMAPBATCH_MFN_ERROR; 321 304 return __put_user(*mfnp, st->user_mfn++); 305 + } 306 + 307 + /* Allocate pfns that are then mapped with gmfns from foreign domid. Update 308 + * the vma with the page info to use later. 309 + * Returns: 0 if success, otherwise -errno 310 + */ 311 + static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) 312 + { 313 + int rc; 314 + struct page **pages; 315 + 316 + pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); 317 + if (pages == NULL) 318 + return -ENOMEM; 319 + 320 + rc = alloc_xenballooned_pages(numpgs, pages, 0); 321 + if (rc != 0) { 322 + pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, 323 + numpgs, rc); 324 + kfree(pages); 325 + return -ENOMEM; 326 + } 327 + BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED); 328 + vma->vm_private_data = pages; 329 + 330 + return 0; 322 331 } 323 332 324 333 static struct vm_operations_struct privcmd_vm_ops; ··· 414 371 up_write(&mm->mmap_sem); 415 372 goto out; 416 373 } 374 + if (xen_feature(XENFEAT_auto_translated_physmap)) { 375 + ret = alloc_empty_pages(vma, m.num); 376 + if (ret < 0) { 377 + up_write(&mm->mmap_sem); 378 + goto out; 379 + } 380 + } 417 381 418 382 state.domain = m.dom; 419 383 state.vma = vma; 420 384 state.va = m.addr; 385 + state.index = 0; 421 386 state.global_error = 0; 422 387 state.err = err_array; 423 388 ··· 490 439 return ret; 491 440 } 492 441 442 + static void privcmd_close(struct vm_area_struct *vma) 443 + { 444 + struct page **pages = vma->vm_private_data; 445 + int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 446 + 447 + if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) 448 + return; 449 + 450 + xen_unmap_domain_mfn_range(vma, numpgs, pages); 451 + free_xenballooned_pages(numpgs, pages); 452 + kfree(pages); 453 + } 454 + 493 455 static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 494 456 { 495 457 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", ··· 513 449 } 514 450 515 451 static struct vm_operations_struct privcmd_vm_ops = { 452 + .close = privcmd_close, 516 453 .fault = privcmd_fault 517 454 }; 518 455 ··· 531 466 532 467 static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) 533 468 { 534 - return (xchg(&vma->vm_private_data, (void *)1) == NULL); 469 + return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED); 535 470 } 536 471 537 472 const struct file_operations xen_privcmd_fops = {