Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: Use page_private()/set_page_private() apis

Besides using an established api, this allows using kvm in older kernels.

Signed-off-by: Markus Rechberger <markus.rechberger@amd.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>

authored by

Markus Rechberger and committed by
Avi Kivity
5972e953 9d8f549d

+20 -20
+1 -1
drivers/kvm/kvm.h
··· 523 { 524 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 525 526 - return (struct kvm_mmu_page *)page->private; 527 } 528 529 static inline u16 read_fs(void)
··· 523 { 524 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 525 526 + return (struct kvm_mmu_page *)page_private(page); 527 } 528 529 static inline u16 read_fs(void)
+1 -1
drivers/kvm/kvm_main.c
··· 670 | __GFP_ZERO); 671 if (!new.phys_mem[i]) 672 goto out_free; 673 - new.phys_mem[i]->private = 0; 674 } 675 } 676
··· 670 | __GFP_ZERO); 671 if (!new.phys_mem[i]) 672 goto out_free; 673 + set_page_private(new.phys_mem[i],0); 674 } 675 } 676
+18 -18
drivers/kvm/mmu.c
··· 298 if (!is_rmap_pte(*spte)) 299 return; 300 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 301 - if (!page->private) { 302 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 303 - page->private = (unsigned long)spte; 304 - } else if (!(page->private & 1)) { 305 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 306 desc = mmu_alloc_rmap_desc(vcpu); 307 - desc->shadow_ptes[0] = (u64 *)page->private; 308 desc->shadow_ptes[1] = spte; 309 - page->private = (unsigned long)desc | 1; 310 } else { 311 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 312 - desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 313 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) 314 desc = desc->more; 315 if (desc->shadow_ptes[RMAP_EXT-1]) { ··· 337 if (j != 0) 338 return; 339 if (!prev_desc && !desc->more) 340 - page->private = (unsigned long)desc->shadow_ptes[0]; 341 else 342 if (prev_desc) 343 prev_desc->more = desc->more; 344 else 345 - page->private = (unsigned long)desc->more | 1; 346 mmu_free_rmap_desc(vcpu, desc); 347 } 348 ··· 356 if (!is_rmap_pte(*spte)) 357 return; 358 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 359 - if (!page->private) { 360 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 361 BUG(); 362 - } else if (!(page->private & 1)) { 363 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); 364 - if ((u64 *)page->private != spte) { 365 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", 366 spte, *spte); 367 BUG(); 368 } 369 - page->private = 0; 370 } else { 371 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); 372 - desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 373 prev_desc = NULL; 374 while (desc) { 375 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) ··· 398 BUG_ON(!slot); 399 page = gfn_to_page(slot, gfn); 400 401 - while (page->private) { 402 - if (!(page->private & 1)) 403 - spte = (u64 *)page->private; 404 else { 405 - desc = (struct kvm_rmap_desc *)(page->private & ~1ul); 406 spte = desc->shadow_ptes[0]; 407 } 408 BUG_ON(!spte); ··· 1218 INIT_LIST_HEAD(&page_header->link); 1219 if ((page = alloc_page(GFP_KERNEL)) == NULL) 1220 goto error_1; 1221 - page->private = (unsigned long)page_header; 1222 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1223 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1224 list_add(&page_header->link, &vcpu->free_pages);
··· 298 if (!is_rmap_pte(*spte)) 299 return; 300 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 301 + if (!page_private(page)) { 302 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 303 + set_page_private(page,(unsigned long)spte); 304 + } else if (!(page_private(page) & 1)) { 305 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 306 desc = mmu_alloc_rmap_desc(vcpu); 307 + desc->shadow_ptes[0] = (u64 *)page_private(page); 308 desc->shadow_ptes[1] = spte; 309 + set_page_private(page,(unsigned long)desc | 1); 310 } else { 311 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 312 + desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul); 313 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) 314 desc = desc->more; 315 if (desc->shadow_ptes[RMAP_EXT-1]) { ··· 337 if (j != 0) 338 return; 339 if (!prev_desc && !desc->more) 340 + set_page_private(page,(unsigned long)desc->shadow_ptes[0]); 341 else 342 if (prev_desc) 343 prev_desc->more = desc->more; 344 else 345 + set_page_private(page,(unsigned long)desc->more | 1); 346 mmu_free_rmap_desc(vcpu, desc); 347 } 348 ··· 356 if (!is_rmap_pte(*spte)) 357 return; 358 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT); 359 + if (!page_private(page)) { 360 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 361 BUG(); 362 + } else if (!(page_private(page) & 1)) { 363 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte); 364 + if ((u64 *)page_private(page) != spte) { 365 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n", 366 spte, *spte); 367 BUG(); 368 } 369 + set_page_private(page,0); 370 } else { 371 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte); 372 + desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul); 373 prev_desc = NULL; 374 while (desc) { 375 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) ··· 398 BUG_ON(!slot); 399 page = gfn_to_page(slot, gfn); 400 401 + while (page_private(page)) { 402 + if (!(page_private(page) & 1)) 403 + spte = (u64 *)page_private(page); 404 else { 405 + desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul); 406 spte = desc->shadow_ptes[0]; 407 } 408 BUG_ON(!spte); ··· 1218 INIT_LIST_HEAD(&page_header->link); 1219 if ((page = alloc_page(GFP_KERNEL)) == NULL) 1220 goto error_1; 1221 + set_page_private(page, (unsigned long)page_header); 1222 page_header->page_hpa = (hpa_t)page_to_pfn(page) << PAGE_SHIFT; 1223 memset(__va(page_header->page_hpa), 0, PAGE_SIZE); 1224 list_add(&page_header->link, &vcpu->free_pages);