Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39 1262 lines 40 kB view raw
1/*P:700 2 * The pagetable code, on the other hand, still shows the scars of 3 * previous encounters. It's functional, and as neat as it can be in the 4 * circumstances, but be wary, for these things are subtle and break easily. 5 * The Guest provides a virtual to physical mapping, but we can neither trust 6 * it nor use it: we verify and convert it here then point the CPU to the 7 * converted Guest pages when running the Guest. 8:*/ 9 10/* Copyright (C) Rusty Russell IBM Corporation 2006. 11 * GPL v2 and any later version */ 12#include <linux/mm.h> 13#include <linux/gfp.h> 14#include <linux/types.h> 15#include <linux/spinlock.h> 16#include <linux/random.h> 17#include <linux/percpu.h> 18#include <asm/tlbflush.h> 19#include <asm/uaccess.h> 20#include <asm/bootparam.h> 21#include "lg.h" 22 23/*M:008 24 * We hold reference to pages, which prevents them from being swapped. 25 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants 26 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we 27 * could probably consider launching Guests as non-root. 28:*/ 29 30/*H:300 31 * The Page Table Code 32 * 33 * We use two-level page tables for the Guest, or three-level with PAE. If 34 * you're not entirely comfortable with virtual addresses, physical addresses 35 * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page 36 * Table Handling" (with diagrams!). 37 * 38 * The Guest keeps page tables, but we maintain the actual ones here: these are 39 * called "shadow" page tables. Which is a very Guest-centric name: these are 40 * the real page tables the CPU uses, although we keep them up to date to 41 * reflect the Guest's. (See what I mean about weird naming? Since when do 42 * shadows reflect anything?) 43 * 44 * Anyway, this is the most complicated part of the Host code. There are seven 45 * parts to this: 46 * (i) Looking up a page table entry when the Guest faults, 47 * (ii) Making sure the Guest stack is mapped, 48 * (iii) Setting up a page table entry when the Guest tells us one has changed, 49 * (iv) Switching page tables, 50 * (v) Flushing (throwing away) page tables, 51 * (vi) Mapping the Switcher when the Guest is about to run, 52 * (vii) Setting up the page tables initially. 53:*/ 54 55/* 56 * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB) 57 * or 512 PTE entries with PAE (2MB). 58 */ 59#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) 60 61/* 62 * For PAE we need the PMD index as well. We use the last 2MB, so we 63 * will need the last pmd entry of the last pmd page. 64 */ 65#ifdef CONFIG_X86_PAE 66#define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1) 67#define RESERVE_MEM 2U 68#define CHECK_GPGD_MASK _PAGE_PRESENT 69#else 70#define RESERVE_MEM 4U 71#define CHECK_GPGD_MASK _PAGE_TABLE 72#endif 73 74/* 75 * We actually need a separate PTE page for each CPU. Remember that after the 76 * Switcher code itself comes two pages for each CPU, and we don't want this 77 * CPU's guest to see the pages of any other CPU. 78 */ 79static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); 80#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) 81 82/*H:320 83 * The page table code is curly enough to need helper functions to keep it 84 * clear and clean. The kernel itself provides many of them; one advantage 85 * of insisting that the Guest and Host use the same CONFIG_PAE setting. 86 * 87 * There are two functions which return pointers to the shadow (aka "real") 88 * page tables. 89 * 90 * spgd_addr() takes the virtual address and returns a pointer to the top-level 91 * page directory entry (PGD) for that address. Since we keep track of several 92 * page tables, the "i" argument tells us which one we're interested in (it's 93 * usually the current one). 94 */ 95static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) 96{ 97 unsigned int index = pgd_index(vaddr); 98 99#ifndef CONFIG_X86_PAE 100 /* We kill any Guest trying to touch the Switcher addresses. */ 101 if (index >= SWITCHER_PGD_INDEX) { 102 kill_guest(cpu, "attempt to access switcher pages"); 103 index = 0; 104 } 105#endif 106 /* Return a pointer index'th pgd entry for the i'th page table. */ 107 return &cpu->lg->pgdirs[i].pgdir[index]; 108} 109 110#ifdef CONFIG_X86_PAE 111/* 112 * This routine then takes the PGD entry given above, which contains the 113 * address of the PMD page. It then returns a pointer to the PMD entry for the 114 * given address. 115 */ 116static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) 117{ 118 unsigned int index = pmd_index(vaddr); 119 pmd_t *page; 120 121 /* We kill any Guest trying to touch the Switcher addresses. */ 122 if (pgd_index(vaddr) == SWITCHER_PGD_INDEX && 123 index >= SWITCHER_PMD_INDEX) { 124 kill_guest(cpu, "attempt to access switcher pages"); 125 index = 0; 126 } 127 128 /* You should never call this if the PGD entry wasn't valid */ 129 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); 130 page = __va(pgd_pfn(spgd) << PAGE_SHIFT); 131 132 return &page[index]; 133} 134#endif 135 136/* 137 * This routine then takes the page directory entry returned above, which 138 * contains the address of the page table entry (PTE) page. It then returns a 139 * pointer to the PTE entry for the given address. 140 */ 141static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) 142{ 143#ifdef CONFIG_X86_PAE 144 pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); 145 pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); 146 147 /* You should never call this if the PMD entry wasn't valid */ 148 BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); 149#else 150 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); 151 /* You should never call this if the PGD entry wasn't valid */ 152 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); 153#endif 154 155 return &page[pte_index(vaddr)]; 156} 157 158/* 159 * These functions are just like the above two, except they access the Guest 160 * page tables. Hence they return a Guest address. 161 */ 162static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) 163{ 164 unsigned int index = vaddr >> (PGDIR_SHIFT); 165 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); 166} 167 168#ifdef CONFIG_X86_PAE 169/* Follow the PGD to the PMD. */ 170static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr) 171{ 172 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 173 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 174 return gpage + pmd_index(vaddr) * sizeof(pmd_t); 175} 176 177/* Follow the PMD to the PTE. */ 178static unsigned long gpte_addr(struct lg_cpu *cpu, 179 pmd_t gpmd, unsigned long vaddr) 180{ 181 unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT; 182 183 BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT)); 184 return gpage + pte_index(vaddr) * sizeof(pte_t); 185} 186#else 187/* Follow the PGD to the PTE (no mid-level for !PAE). */ 188static unsigned long gpte_addr(struct lg_cpu *cpu, 189 pgd_t gpgd, unsigned long vaddr) 190{ 191 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 192 193 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 194 return gpage + pte_index(vaddr) * sizeof(pte_t); 195} 196#endif 197/*:*/ 198 199/*M:014 200 * get_pfn is slow: we could probably try to grab batches of pages here as 201 * an optimization (ie. pre-faulting). 202:*/ 203 204/*H:350 205 * This routine takes a page number given by the Guest and converts it to 206 * an actual, physical page number. It can fail for several reasons: the 207 * virtual address might not be mapped by the Launcher, the write flag is set 208 * and the page is read-only, or the write flag was set and the page was 209 * shared so had to be copied, but we ran out of memory. 210 * 211 * This holds a reference to the page, so release_pte() is careful to put that 212 * back. 213 */ 214static unsigned long get_pfn(unsigned long virtpfn, int write) 215{ 216 struct page *page; 217 218 /* gup me one page at this address please! */ 219 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) 220 return page_to_pfn(page); 221 222 /* This value indicates failure. */ 223 return -1UL; 224} 225 226/*H:340 227 * Converting a Guest page table entry to a shadow (ie. real) page table 228 * entry can be a little tricky. The flags are (almost) the same, but the 229 * Guest PTE contains a virtual page number: the CPU needs the real page 230 * number. 231 */ 232static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) 233{ 234 unsigned long pfn, base, flags; 235 236 /* 237 * The Guest sets the global flag, because it thinks that it is using 238 * PGE. We only told it to use PGE so it would tell us whether it was 239 * flushing a kernel mapping or a userspace mapping. We don't actually 240 * use the global bit, so throw it away. 241 */ 242 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); 243 244 /* The Guest's pages are offset inside the Launcher. */ 245 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; 246 247 /* 248 * We need a temporary "unsigned long" variable to hold the answer from 249 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't 250 * fit in spte.pfn. get_pfn() finds the real physical number of the 251 * page, given the virtual number. 252 */ 253 pfn = get_pfn(base + pte_pfn(gpte), write); 254 if (pfn == -1UL) { 255 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); 256 /* 257 * When we destroy the Guest, we'll go through the shadow page 258 * tables and release_pte() them. Make sure we don't think 259 * this one is valid! 260 */ 261 flags = 0; 262 } 263 /* Now we assemble our shadow PTE from the page number and flags. */ 264 return pfn_pte(pfn, __pgprot(flags)); 265} 266 267/*H:460 And to complete the chain, release_pte() looks like this: */ 268static void release_pte(pte_t pte) 269{ 270 /* 271 * Remember that get_user_pages_fast() took a reference to the page, in 272 * get_pfn()? We have to put it back now. 273 */ 274 if (pte_flags(pte) & _PAGE_PRESENT) 275 put_page(pte_page(pte)); 276} 277/*:*/ 278 279static void check_gpte(struct lg_cpu *cpu, pte_t gpte) 280{ 281 if ((pte_flags(gpte) & _PAGE_PSE) || 282 pte_pfn(gpte) >= cpu->lg->pfn_limit) 283 kill_guest(cpu, "bad page table entry"); 284} 285 286static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) 287{ 288 if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || 289 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) 290 kill_guest(cpu, "bad page directory entry"); 291} 292 293#ifdef CONFIG_X86_PAE 294static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) 295{ 296 if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || 297 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) 298 kill_guest(cpu, "bad page middle directory entry"); 299} 300#endif 301 302/*H:330 303 * (i) Looking up a page table entry when the Guest faults. 304 * 305 * We saw this call in run_guest(): when we see a page fault in the Guest, we 306 * come here. That's because we only set up the shadow page tables lazily as 307 * they're needed, so we get page faults all the time and quietly fix them up 308 * and return to the Guest without it knowing. 309 * 310 * If we fixed up the fault (ie. we mapped the address), this routine returns 311 * true. Otherwise, it was a real fault and we need to tell the Guest. 312 */ 313bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 314{ 315 pgd_t gpgd; 316 pgd_t *spgd; 317 unsigned long gpte_ptr; 318 pte_t gpte; 319 pte_t *spte; 320 321 /* Mid level for PAE. */ 322#ifdef CONFIG_X86_PAE 323 pmd_t *spmd; 324 pmd_t gpmd; 325#endif 326 327 /* First step: get the top-level Guest page table entry. */ 328 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 329 /* Toplevel not present? We can't map it in. */ 330 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 331 return false; 332 333 /* Now look at the matching shadow entry. */ 334 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 335 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { 336 /* No shadow entry: allocate a new shadow PTE page. */ 337 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 338 /* 339 * This is not really the Guest's fault, but killing it is 340 * simple for this corner case. 341 */ 342 if (!ptepage) { 343 kill_guest(cpu, "out of memory allocating pte page"); 344 return false; 345 } 346 /* We check that the Guest pgd is OK. */ 347 check_gpgd(cpu, gpgd); 348 /* 349 * And we copy the flags to the shadow PGD entry. The page 350 * number in the shadow PGD is the page we just allocated. 351 */ 352 set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags(gpgd))); 353 } 354 355#ifdef CONFIG_X86_PAE 356 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); 357 /* Middle level not present? We can't map it in. */ 358 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) 359 return false; 360 361 /* Now look at the matching shadow entry. */ 362 spmd = spmd_addr(cpu, *spgd, vaddr); 363 364 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) { 365 /* No shadow entry: allocate a new shadow PTE page. */ 366 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 367 368 /* 369 * This is not really the Guest's fault, but killing it is 370 * simple for this corner case. 371 */ 372 if (!ptepage) { 373 kill_guest(cpu, "out of memory allocating pte page"); 374 return false; 375 } 376 377 /* We check that the Guest pmd is OK. */ 378 check_gpmd(cpu, gpmd); 379 380 /* 381 * And we copy the flags to the shadow PMD entry. The page 382 * number in the shadow PMD is the page we just allocated. 383 */ 384 set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags(gpmd))); 385 } 386 387 /* 388 * OK, now we look at the lower level in the Guest page table: keep its 389 * address, because we might update it later. 390 */ 391 gpte_ptr = gpte_addr(cpu, gpmd, vaddr); 392#else 393 /* 394 * OK, now we look at the lower level in the Guest page table: keep its 395 * address, because we might update it later. 396 */ 397 gpte_ptr = gpte_addr(cpu, gpgd, vaddr); 398#endif 399 400 /* Read the actual PTE value. */ 401 gpte = lgread(cpu, gpte_ptr, pte_t); 402 403 /* If this page isn't in the Guest page tables, we can't page it in. */ 404 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 405 return false; 406 407 /* 408 * Check they're not trying to write to a page the Guest wants 409 * read-only (bit 2 of errcode == write). 410 */ 411 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) 412 return false; 413 414 /* User access to a kernel-only page? (bit 3 == user access) */ 415 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 416 return false; 417 418 /* 419 * Check that the Guest PTE flags are OK, and the page number is below 420 * the pfn_limit (ie. not mapping the Launcher binary). 421 */ 422 check_gpte(cpu, gpte); 423 424 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ 425 gpte = pte_mkyoung(gpte); 426 if (errcode & 2) 427 gpte = pte_mkdirty(gpte); 428 429 /* Get the pointer to the shadow PTE entry we're going to set. */ 430 spte = spte_addr(cpu, *spgd, vaddr); 431 432 /* 433 * If there was a valid shadow PTE entry here before, we release it. 434 * This can happen with a write to a previously read-only entry. 435 */ 436 release_pte(*spte); 437 438 /* 439 * If this is a write, we insist that the Guest page is writable (the 440 * final arg to gpte_to_spte()). 441 */ 442 if (pte_dirty(gpte)) 443 *spte = gpte_to_spte(cpu, gpte, 1); 444 else 445 /* 446 * If this is a read, don't set the "writable" bit in the page 447 * table entry, even if the Guest says it's writable. That way 448 * we will come back here when a write does actually occur, so 449 * we can update the Guest's _PAGE_DIRTY flag. 450 */ 451 set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); 452 453 /* 454 * Finally, we write the Guest PTE entry back: we've set the 455 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. 456 */ 457 lgwrite(cpu, gpte_ptr, pte_t, gpte); 458 459 /* 460 * The fault is fixed, the page table is populated, the mapping 461 * manipulated, the result returned and the code complete. A small 462 * delay and a trace of alliteration are the only indications the Guest 463 * has that a page fault occurred at all. 464 */ 465 return true; 466} 467 468/*H:360 469 * (ii) Making sure the Guest stack is mapped. 470 * 471 * Remember that direct traps into the Guest need a mapped Guest kernel stack. 472 * pin_stack_pages() calls us here: we could simply call demand_page(), but as 473 * we've seen that logic is quite long, and usually the stack pages are already 474 * mapped, so it's overkill. 475 * 476 * This is a quick version which answers the question: is this virtual address 477 * mapped by the shadow page tables, and is it writable? 478 */ 479static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) 480{ 481 pgd_t *spgd; 482 unsigned long flags; 483 484#ifdef CONFIG_X86_PAE 485 pmd_t *spmd; 486#endif 487 /* Look at the current top level entry: is it present? */ 488 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 489 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 490 return false; 491 492#ifdef CONFIG_X86_PAE 493 spmd = spmd_addr(cpu, *spgd, vaddr); 494 if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) 495 return false; 496#endif 497 498 /* 499 * Check the flags on the pte entry itself: it must be present and 500 * writable. 501 */ 502 flags = pte_flags(*(spte_addr(cpu, *spgd, vaddr))); 503 504 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 505} 506 507/* 508 * So, when pin_stack_pages() asks us to pin a page, we check if it's already 509 * in the page tables, and if not, we call demand_page() with error code 2 510 * (meaning "write"). 511 */ 512void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 513{ 514 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 515 kill_guest(cpu, "bad stack page %#lx", vaddr); 516} 517/*:*/ 518 519#ifdef CONFIG_X86_PAE 520static void release_pmd(pmd_t *spmd) 521{ 522 /* If the entry's not present, there's nothing to release. */ 523 if (pmd_flags(*spmd) & _PAGE_PRESENT) { 524 unsigned int i; 525 pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT); 526 /* For each entry in the page, we might need to release it. */ 527 for (i = 0; i < PTRS_PER_PTE; i++) 528 release_pte(ptepage[i]); 529 /* Now we can free the page of PTEs */ 530 free_page((long)ptepage); 531 /* And zero out the PMD entry so we never release it twice. */ 532 set_pmd(spmd, __pmd(0)); 533 } 534} 535 536static void release_pgd(pgd_t *spgd) 537{ 538 /* If the entry's not present, there's nothing to release. */ 539 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 540 unsigned int i; 541 pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); 542 543 for (i = 0; i < PTRS_PER_PMD; i++) 544 release_pmd(&pmdpage[i]); 545 546 /* Now we can free the page of PMDs */ 547 free_page((long)pmdpage); 548 /* And zero out the PGD entry so we never release it twice. */ 549 set_pgd(spgd, __pgd(0)); 550 } 551} 552 553#else /* !CONFIG_X86_PAE */ 554/*H:450 555 * If we chase down the release_pgd() code, the non-PAE version looks like 556 * this. The PAE version is almost identical, but instead of calling 557 * release_pte it calls release_pmd(), which looks much like this. 558 */ 559static void release_pgd(pgd_t *spgd) 560{ 561 /* If the entry's not present, there's nothing to release. */ 562 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 563 unsigned int i; 564 /* 565 * Converting the pfn to find the actual PTE page is easy: turn 566 * the page number into a physical address, then convert to a 567 * virtual address (easy for kernel pages like this one). 568 */ 569 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); 570 /* For each entry in the page, we might need to release it. */ 571 for (i = 0; i < PTRS_PER_PTE; i++) 572 release_pte(ptepage[i]); 573 /* Now we can free the page of PTEs */ 574 free_page((long)ptepage); 575 /* And zero out the PGD entry so we never release it twice. */ 576 *spgd = __pgd(0); 577 } 578} 579#endif 580 581/*H:445 582 * We saw flush_user_mappings() twice: once from the flush_user_mappings() 583 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. 584 * It simply releases every PTE page from 0 up to the Guest's kernel address. 585 */ 586static void flush_user_mappings(struct lguest *lg, int idx) 587{ 588 unsigned int i; 589 /* Release every pgd entry up to the kernel's address. */ 590 for (i = 0; i < pgd_index(lg->kernel_address); i++) 591 release_pgd(lg->pgdirs[idx].pgdir + i); 592} 593 594/*H:440 595 * (v) Flushing (throwing away) page tables, 596 * 597 * The Guest has a hypercall to throw away the page tables: it's used when a 598 * large number of mappings have been changed. 599 */ 600void guest_pagetable_flush_user(struct lg_cpu *cpu) 601{ 602 /* Drop the userspace part of the current page table. */ 603 flush_user_mappings(cpu->lg, cpu->cpu_pgd); 604} 605/*:*/ 606 607/* We walk down the guest page tables to get a guest-physical address */ 608unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) 609{ 610 pgd_t gpgd; 611 pte_t gpte; 612#ifdef CONFIG_X86_PAE 613 pmd_t gpmd; 614#endif 615 /* First step: get the top-level Guest page table entry. */ 616 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 617 /* Toplevel not present? We can't map it in. */ 618 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { 619 kill_guest(cpu, "Bad address %#lx", vaddr); 620 return -1UL; 621 } 622 623#ifdef CONFIG_X86_PAE 624 gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); 625 if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) 626 kill_guest(cpu, "Bad address %#lx", vaddr); 627 gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); 628#else 629 gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); 630#endif 631 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 632 kill_guest(cpu, "Bad address %#lx", vaddr); 633 634 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 635} 636 637/* 638 * We keep several page tables. This is a simple routine to find the page 639 * table (if any) corresponding to this top-level address the Guest has given 640 * us. 641 */ 642static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) 643{ 644 unsigned int i; 645 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 646 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) 647 break; 648 return i; 649} 650 651/*H:435 652 * And this is us, creating the new page directory. If we really do 653 * allocate a new one (and so the kernel parts are not there), we set 654 * blank_pgdir. 655 */ 656static unsigned int new_pgdir(struct lg_cpu *cpu, 657 unsigned long gpgdir, 658 int *blank_pgdir) 659{ 660 unsigned int next; 661#ifdef CONFIG_X86_PAE 662 pmd_t *pmd_table; 663#endif 664 665 /* 666 * We pick one entry at random to throw out. Choosing the Least 667 * Recently Used might be better, but this is easy. 668 */ 669 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); 670 /* If it's never been allocated at all before, try now. */ 671 if (!cpu->lg->pgdirs[next].pgdir) { 672 cpu->lg->pgdirs[next].pgdir = 673 (pgd_t *)get_zeroed_page(GFP_KERNEL); 674 /* If the allocation fails, just keep using the one we have */ 675 if (!cpu->lg->pgdirs[next].pgdir) 676 next = cpu->cpu_pgd; 677 else { 678#ifdef CONFIG_X86_PAE 679 /* 680 * In PAE mode, allocate a pmd page and populate the 681 * last pgd entry. 682 */ 683 pmd_table = (pmd_t *)get_zeroed_page(GFP_KERNEL); 684 if (!pmd_table) { 685 free_page((long)cpu->lg->pgdirs[next].pgdir); 686 set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); 687 next = cpu->cpu_pgd; 688 } else { 689 set_pgd(cpu->lg->pgdirs[next].pgdir + 690 SWITCHER_PGD_INDEX, 691 __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 692 /* 693 * This is a blank page, so there are no kernel 694 * mappings: caller must map the stack! 695 */ 696 *blank_pgdir = 1; 697 } 698#else 699 *blank_pgdir = 1; 700#endif 701 } 702 } 703 /* Record which Guest toplevel this shadows. */ 704 cpu->lg->pgdirs[next].gpgdir = gpgdir; 705 /* Release all the non-kernel mappings. */ 706 flush_user_mappings(cpu->lg, next); 707 708 return next; 709} 710 711/*H:430 712 * (iv) Switching page tables 713 * 714 * Now we've seen all the page table setting and manipulation, let's see 715 * what happens when the Guest changes page tables (ie. changes the top-level 716 * pgdir). This occurs on almost every context switch. 717 */ 718void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 719{ 720 int newpgdir, repin = 0; 721 722 /* Look to see if we have this one already. */ 723 newpgdir = find_pgdir(cpu->lg, pgtable); 724 /* 725 * If not, we allocate or mug an existing one: if it's a fresh one, 726 * repin gets set to 1. 727 */ 728 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) 729 newpgdir = new_pgdir(cpu, pgtable, &repin); 730 /* Change the current pgd index to the new one. */ 731 cpu->cpu_pgd = newpgdir; 732 /* If it was completely blank, we map in the Guest kernel stack */ 733 if (repin) 734 pin_stack_pages(cpu); 735} 736 737/*H:470 738 * Finally, a routine which throws away everything: all PGD entries in all 739 * the shadow page tables, including the Guest's kernel mappings. This is used 740 * when we destroy the Guest. 741 */ 742static void release_all_pagetables(struct lguest *lg) 743{ 744 unsigned int i, j; 745 746 /* Every shadow pagetable this Guest has */ 747 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 748 if (lg->pgdirs[i].pgdir) { 749#ifdef CONFIG_X86_PAE 750 pgd_t *spgd; 751 pmd_t *pmdpage; 752 unsigned int k; 753 754 /* Get the last pmd page. */ 755 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; 756 pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); 757 758 /* 759 * And release the pmd entries of that pmd page, 760 * except for the switcher pmd. 761 */ 762 for (k = 0; k < SWITCHER_PMD_INDEX; k++) 763 release_pmd(&pmdpage[k]); 764#endif 765 /* Every PGD entry except the Switcher at the top */ 766 for (j = 0; j < SWITCHER_PGD_INDEX; j++) 767 release_pgd(lg->pgdirs[i].pgdir + j); 768 } 769} 770 771/* 772 * We also throw away everything when a Guest tells us it's changed a kernel 773 * mapping. Since kernel mappings are in every page table, it's easiest to 774 * throw them all away. This traps the Guest in amber for a while as 775 * everything faults back in, but it's rare. 776 */ 777void guest_pagetable_clear_all(struct lg_cpu *cpu) 778{ 779 release_all_pagetables(cpu->lg); 780 /* We need the Guest kernel stack mapped again. */ 781 pin_stack_pages(cpu); 782} 783/*:*/ 784 785/*M:009 786 * Since we throw away all mappings when a kernel mapping changes, our 787 * performance sucks for guests using highmem. In fact, a guest with 788 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is 789 * usually slower than a Guest with less memory. 790 * 791 * This, of course, cannot be fixed. It would take some kind of... well, I 792 * don't know, but the term "puissant code-fu" comes to mind. 793:*/ 794 795/*H:420 796 * This is the routine which actually sets the page table entry for then 797 * "idx"'th shadow page table. 798 * 799 * Normally, we can just throw out the old entry and replace it with 0: if they 800 * use it demand_page() will put the new entry in. We need to do this anyway: 801 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page 802 * is read from, and _PAGE_DIRTY when it's written to. 803 * 804 * But Avi Kivity pointed out that most Operating Systems (Linux included) set 805 * these bits on PTEs immediately anyway. This is done to save the CPU from 806 * having to update them, but it helps us the same way: if they set 807 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if 808 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. 809 */ 810static void do_set_pte(struct lg_cpu *cpu, int idx, 811 unsigned long vaddr, pte_t gpte) 812{ 813 /* Look up the matching shadow page directory entry. */ 814 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); 815#ifdef CONFIG_X86_PAE 816 pmd_t *spmd; 817#endif 818 819 /* If the top level isn't present, there's no entry to update. */ 820 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 821#ifdef CONFIG_X86_PAE 822 spmd = spmd_addr(cpu, *spgd, vaddr); 823 if (pmd_flags(*spmd) & _PAGE_PRESENT) { 824#endif 825 /* Otherwise, start by releasing the existing entry. */ 826 pte_t *spte = spte_addr(cpu, *spgd, vaddr); 827 release_pte(*spte); 828 829 /* 830 * If they're setting this entry as dirty or accessed, 831 * we might as well put that entry they've given us in 832 * now. This shaves 10% off a copy-on-write 833 * micro-benchmark. 834 */ 835 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 836 check_gpte(cpu, gpte); 837 set_pte(spte, 838 gpte_to_spte(cpu, gpte, 839 pte_flags(gpte) & _PAGE_DIRTY)); 840 } else { 841 /* 842 * Otherwise kill it and we can demand_page() 843 * it in later. 844 */ 845 set_pte(spte, __pte(0)); 846 } 847#ifdef CONFIG_X86_PAE 848 } 849#endif 850 } 851} 852 853/*H:410 854 * Updating a PTE entry is a little trickier. 855 * 856 * We keep track of several different page tables (the Guest uses one for each 857 * process, so it makes sense to cache at least a few). Each of these have 858 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for 859 * all processes. So when the page table above that address changes, we update 860 * all the page tables, not just the current one. This is rare. 861 * 862 * The benefit is that when we have to track a new page table, we can keep all 863 * the kernel mappings. This speeds up context switch immensely. 864 */ 865void guest_set_pte(struct lg_cpu *cpu, 866 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) 867{ 868 /* 869 * Kernel mappings must be changed on all top levels. Slow, but doesn't 870 * happen often. 871 */ 872 if (vaddr >= cpu->lg->kernel_address) { 873 unsigned int i; 874 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) 875 if (cpu->lg->pgdirs[i].pgdir) 876 do_set_pte(cpu, i, vaddr, gpte); 877 } else { 878 /* Is this page table one we have a shadow for? */ 879 int pgdir = find_pgdir(cpu->lg, gpgdir); 880 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) 881 /* If so, do the update. */ 882 do_set_pte(cpu, pgdir, vaddr, gpte); 883 } 884} 885 886/*H:400 887 * (iii) Setting up a page table entry when the Guest tells us one has changed. 888 * 889 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal 890 * with the other side of page tables while we're here: what happens when the 891 * Guest asks for a page table to be updated? 892 * 893 * We already saw that demand_page() will fill in the shadow page tables when 894 * needed, so we can simply remove shadow page table entries whenever the Guest 895 * tells us they've changed. When the Guest tries to use the new entry it will 896 * fault and demand_page() will fix it up. 897 * 898 * So with that in mind here's our code to update a (top-level) PGD entry: 899 */ 900void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) 901{ 902 int pgdir; 903 904 if (idx >= SWITCHER_PGD_INDEX) 905 return; 906 907 /* If they're talking about a page table we have a shadow for... */ 908 pgdir = find_pgdir(lg, gpgdir); 909 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 910 /* ... throw it away. */ 911 release_pgd(lg->pgdirs[pgdir].pgdir + idx); 912} 913 914#ifdef CONFIG_X86_PAE 915/* For setting a mid-level, we just throw everything away. It's easy. */ 916void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx) 917{ 918 guest_pagetable_clear_all(&lg->cpus[0]); 919} 920#endif 921 922/*H:505 923 * To get through boot, we construct simple identity page mappings (which 924 * set virtual == physical) and linear mappings which will get the Guest far 925 * enough into the boot to create its own. The linear mapping means we 926 * simplify the Guest boot, but it makes assumptions about their PAGE_OFFSET, 927 * as you'll see. 928 * 929 * We lay them out of the way, just below the initrd (which is why we need to 930 * know its size here). 931 */ 932static unsigned long setup_pagetables(struct lguest *lg, 933 unsigned long mem, 934 unsigned long initrd_size) 935{ 936 pgd_t __user *pgdir; 937 pte_t __user *linear; 938 unsigned long mem_base = (unsigned long)lg->mem_base; 939 unsigned int mapped_pages, i, linear_pages; 940#ifdef CONFIG_X86_PAE 941 pmd_t __user *pmds; 942 unsigned int j; 943 pgd_t pgd; 944 pmd_t pmd; 945#else 946 unsigned int phys_linear; 947#endif 948 949 /* 950 * We have mapped_pages frames to map, so we need linear_pages page 951 * tables to map them. 952 */ 953 mapped_pages = mem / PAGE_SIZE; 954 linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; 955 956 /* We put the toplevel page directory page at the top of memory. */ 957 pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE); 958 959 /* Now we use the next linear_pages pages as pte pages */ 960 linear = (void *)pgdir - linear_pages * PAGE_SIZE; 961 962#ifdef CONFIG_X86_PAE 963 /* 964 * And the single mid page goes below that. We only use one, but 965 * that's enough to map 1G, which definitely gets us through boot. 966 */ 967 pmds = (void *)linear - PAGE_SIZE; 968#endif 969 /* 970 * Linear mapping is easy: put every page's address into the 971 * mapping in order. 972 */ 973 for (i = 0; i < mapped_pages; i++) { 974 pte_t pte; 975 pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); 976 if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0) 977 return -EFAULT; 978 } 979 980#ifdef CONFIG_X86_PAE 981 /* 982 * Make the Guest PMD entries point to the corresponding place in the 983 * linear mapping (up to one page worth of PMD). 984 */ 985 for (i = j = 0; i < mapped_pages && j < PTRS_PER_PMD; 986 i += PTRS_PER_PTE, j++) { 987 pmd = pfn_pmd(((unsigned long)&linear[i] - mem_base)/PAGE_SIZE, 988 __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); 989 990 if (copy_to_user(&pmds[j], &pmd, sizeof(pmd)) != 0) 991 return -EFAULT; 992 } 993 994 /* One PGD entry, pointing to that PMD page. */ 995 pgd = __pgd(((unsigned long)pmds - mem_base) | _PAGE_PRESENT); 996 /* Copy it in as the first PGD entry (ie. addresses 0-1G). */ 997 if (copy_to_user(&pgdir[0], &pgd, sizeof(pgd)) != 0) 998 return -EFAULT; 999 /* 1000 * And the other PGD entry to make the linear mapping at PAGE_OFFSET 1001 */ 1002 if (copy_to_user(&pgdir[KERNEL_PGD_BOUNDARY], &pgd, sizeof(pgd))) 1003 return -EFAULT; 1004#else 1005 /* 1006 * The top level points to the linear page table pages above. 1007 * We setup the identity and linear mappings here. 1008 */ 1009 phys_linear = (unsigned long)linear - mem_base; 1010 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { 1011 pgd_t pgd; 1012 /* 1013 * Create a PGD entry which points to the right part of the 1014 * linear PTE pages. 1015 */ 1016 pgd = __pgd((phys_linear + i * sizeof(pte_t)) | 1017 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); 1018 1019 /* 1020 * Copy it into the PGD page at 0 and PAGE_OFFSET. 1021 */ 1022 if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) 1023 || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) 1024 + i / PTRS_PER_PTE], 1025 &pgd, sizeof(pgd))) 1026 return -EFAULT; 1027 } 1028#endif 1029 1030 /* 1031 * We return the top level (guest-physical) address: we remember where 1032 * this is to write it into lguest_data when the Guest initializes. 1033 */ 1034 return (unsigned long)pgdir - mem_base; 1035} 1036 1037/*H:500 1038 * (vii) Setting up the page tables initially. 1039 * 1040 * When a Guest is first created, the Launcher tells us where the toplevel of 1041 * its first page table is. We set some things up here: 1042 */ 1043int init_guest_pagetable(struct lguest *lg) 1044{ 1045 u64 mem; 1046 u32 initrd_size; 1047 struct boot_params __user *boot = (struct boot_params *)lg->mem_base; 1048#ifdef CONFIG_X86_PAE 1049 pgd_t *pgd; 1050 pmd_t *pmd_table; 1051#endif 1052 /* 1053 * Get the Guest memory size and the ramdisk size from the boot header 1054 * located at lg->mem_base (Guest address 0). 1055 */ 1056 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) 1057 || get_user(initrd_size, &boot->hdr.ramdisk_size)) 1058 return -EFAULT; 1059 1060 /* 1061 * We start on the first shadow page table, and give it a blank PGD 1062 * page. 1063 */ 1064 lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); 1065 if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) 1066 return lg->pgdirs[0].gpgdir; 1067 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 1068 if (!lg->pgdirs[0].pgdir) 1069 return -ENOMEM; 1070 1071#ifdef CONFIG_X86_PAE 1072 /* For PAE, we also create the initial mid-level. */ 1073 pgd = lg->pgdirs[0].pgdir; 1074 pmd_table = (pmd_t *) get_zeroed_page(GFP_KERNEL); 1075 if (!pmd_table) 1076 return -ENOMEM; 1077 1078 set_pgd(pgd + SWITCHER_PGD_INDEX, 1079 __pgd(__pa(pmd_table) | _PAGE_PRESENT)); 1080#endif 1081 1082 /* This is the current page table. */ 1083 lg->cpus[0].cpu_pgd = 0; 1084 return 0; 1085} 1086 1087/*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ 1088void page_table_guest_data_init(struct lg_cpu *cpu) 1089{ 1090 /* We get the kernel address: above this is all kernel memory. */ 1091 if (get_user(cpu->lg->kernel_address, 1092 &cpu->lg->lguest_data->kernel_address) 1093 /* 1094 * We tell the Guest that it can't use the top 2 or 4 MB 1095 * of virtual addresses used by the Switcher. 1096 */ 1097 || put_user(RESERVE_MEM * 1024 * 1024, 1098 &cpu->lg->lguest_data->reserve_mem) 1099 || put_user(cpu->lg->pgdirs[0].gpgdir, 1100 &cpu->lg->lguest_data->pgdir)) 1101 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 1102 1103 /* 1104 * In flush_user_mappings() we loop from 0 to 1105 * "pgd_index(lg->kernel_address)". This assumes it won't hit the 1106 * Switcher mappings, so check that now. 1107 */ 1108#ifdef CONFIG_X86_PAE 1109 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && 1110 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) 1111#else 1112 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) 1113#endif 1114 kill_guest(cpu, "bad kernel address %#lx", 1115 cpu->lg->kernel_address); 1116} 1117 1118/* When a Guest dies, our cleanup is fairly simple. */ 1119void free_guest_pagetable(struct lguest *lg) 1120{ 1121 unsigned int i; 1122 1123 /* Throw away all page table pages. */ 1124 release_all_pagetables(lg); 1125 /* Now free the top levels: free_page() can handle 0 just fine. */ 1126 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 1127 free_page((long)lg->pgdirs[i].pgdir); 1128} 1129 1130/*H:480 1131 * (vi) Mapping the Switcher when the Guest is about to run. 1132 * 1133 * The Switcher and the two pages for this CPU need to be visible in the 1134 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages 1135 * for each CPU already set up, we just need to hook them in now we know which 1136 * Guest is about to run on this CPU. 1137 */ 1138void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 1139{ 1140 pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages); 1141 pte_t regs_pte; 1142 1143#ifdef CONFIG_X86_PAE 1144 pmd_t switcher_pmd; 1145 pmd_t *pmd_table; 1146 1147 switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, 1148 PAGE_KERNEL_EXEC); 1149 1150 /* Figure out where the pmd page is, by reading the PGD, and converting 1151 * it to a virtual address. */ 1152 pmd_table = __va(pgd_pfn(cpu->lg-> 1153 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) 1154 << PAGE_SHIFT); 1155 /* Now write it into the shadow page table. */ 1156 set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); 1157#else 1158 pgd_t switcher_pgd; 1159 1160 /* 1161 * Make the last PGD entry for this Guest point to the Switcher's PTE 1162 * page for this CPU (with appropriate flags). 1163 */ 1164 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); 1165 1166 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 1167 1168#endif 1169 /* 1170 * We also change the Switcher PTE page. When we're running the Guest, 1171 * we want the Guest's "regs" page to appear where the first Switcher 1172 * page for this CPU is. This is an optimization: when the Switcher 1173 * saves the Guest registers, it saves them into the first page of this 1174 * CPU's "struct lguest_pages": if we make sure the Guest's register 1175 * page is already mapped there, we don't have to copy them out 1176 * again. 1177 */ 1178 regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); 1179 set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte); 1180} 1181/*:*/ 1182 1183static void free_switcher_pte_pages(void) 1184{ 1185 unsigned int i; 1186 1187 for_each_possible_cpu(i) 1188 free_page((long)switcher_pte_page(i)); 1189} 1190 1191/*H:520 1192 * Setting up the Switcher PTE page for given CPU is fairly easy, given 1193 * the CPU number and the "struct page"s for the Switcher code itself. 1194 * 1195 * Currently the Switcher is less than a page long, so "pages" is always 1. 1196 */ 1197static __init void populate_switcher_pte_page(unsigned int cpu, 1198 struct page *switcher_page[], 1199 unsigned int pages) 1200{ 1201 unsigned int i; 1202 pte_t *pte = switcher_pte_page(cpu); 1203 1204 /* The first entries are easy: they map the Switcher code. */ 1205 for (i = 0; i < pages; i++) { 1206 set_pte(&pte[i], mk_pte(switcher_page[i], 1207 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); 1208 } 1209 1210 /* The only other thing we map is this CPU's pair of pages. */ 1211 i = pages + cpu*2; 1212 1213 /* First page (Guest registers) is writable from the Guest */ 1214 set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), 1215 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); 1216 1217 /* 1218 * The second page contains the "struct lguest_ro_state", and is 1219 * read-only. 1220 */ 1221 set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), 1222 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); 1223} 1224 1225/* 1226 * We've made it through the page table code. Perhaps our tired brains are 1227 * still processing the details, or perhaps we're simply glad it's over. 1228 * 1229 * If nothing else, note that all this complexity in juggling shadow page tables 1230 * in sync with the Guest's page tables is for one reason: for most Guests this 1231 * page table dance determines how bad performance will be. This is why Xen 1232 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD 1233 * have implemented shadow page table support directly into hardware. 1234 * 1235 * There is just one file remaining in the Host. 1236 */ 1237 1238/*H:510 1239 * At boot or module load time, init_pagetables() allocates and populates 1240 * the Switcher PTE page for each CPU. 1241 */ 1242__init int init_pagetables(struct page **switcher_page, unsigned int pages) 1243{ 1244 unsigned int i; 1245 1246 for_each_possible_cpu(i) { 1247 switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL); 1248 if (!switcher_pte_page(i)) { 1249 free_switcher_pte_pages(); 1250 return -ENOMEM; 1251 } 1252 populate_switcher_pte_page(i, switcher_page, pages); 1253 } 1254 return 0; 1255} 1256/*:*/ 1257 1258/* Cleaning up simply involves freeing the PTE page for each CPU. */ 1259void free_pagetables(void) 1260{ 1261 free_switcher_pte_pages(); 1262}