Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.30 805 lines 30 kB view raw
1/*P:700 The pagetable code, on the other hand, still shows the scars of 2 * previous encounters. It's functional, and as neat as it can be in the 3 * circumstances, but be wary, for these things are subtle and break easily. 4 * The Guest provides a virtual to physical mapping, but we can neither trust 5 * it nor use it: we verify and convert it here then point the CPU to the 6 * converted Guest pages when running the Guest. :*/ 7 8/* Copyright (C) Rusty Russell IBM Corporation 2006. 9 * GPL v2 and any later version */ 10#include <linux/mm.h> 11#include <linux/types.h> 12#include <linux/spinlock.h> 13#include <linux/random.h> 14#include <linux/percpu.h> 15#include <asm/tlbflush.h> 16#include <asm/uaccess.h> 17#include <asm/bootparam.h> 18#include "lg.h" 19 20/*M:008 We hold reference to pages, which prevents them from being swapped. 21 * It'd be nice to have a callback in the "struct mm_struct" when Linux wants 22 * to swap out. If we had this, and a shrinker callback to trim PTE pages, we 23 * could probably consider launching Guests as non-root. :*/ 24 25/*H:300 26 * The Page Table Code 27 * 28 * We use two-level page tables for the Guest. If you're not entirely 29 * comfortable with virtual addresses, physical addresses and page tables then 30 * I recommend you review arch/x86/lguest/boot.c's "Page Table Handling" (with 31 * diagrams!). 32 * 33 * The Guest keeps page tables, but we maintain the actual ones here: these are 34 * called "shadow" page tables. Which is a very Guest-centric name: these are 35 * the real page tables the CPU uses, although we keep them up to date to 36 * reflect the Guest's. (See what I mean about weird naming? Since when do 37 * shadows reflect anything?) 38 * 39 * Anyway, this is the most complicated part of the Host code. There are seven 40 * parts to this: 41 * (i) Looking up a page table entry when the Guest faults, 42 * (ii) Making sure the Guest stack is mapped, 43 * (iii) Setting up a page table entry when the Guest tells us one has changed, 44 * (iv) Switching page tables, 45 * (v) Flushing (throwing away) page tables, 46 * (vi) Mapping the Switcher when the Guest is about to run, 47 * (vii) Setting up the page tables initially. 48 :*/ 49 50 51/* 1024 entries in a page table page maps 1024 pages: 4MB. The Switcher is 52 * conveniently placed at the top 4MB, so it uses a separate, complete PTE 53 * page. */ 54#define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1) 55 56/* We actually need a separate PTE page for each CPU. Remember that after the 57 * Switcher code itself comes two pages for each CPU, and we don't want this 58 * CPU's guest to see the pages of any other CPU. */ 59static DEFINE_PER_CPU(pte_t *, switcher_pte_pages); 60#define switcher_pte_page(cpu) per_cpu(switcher_pte_pages, cpu) 61 62/*H:320 The page table code is curly enough to need helper functions to keep it 63 * clear and clean. 64 * 65 * There are two functions which return pointers to the shadow (aka "real") 66 * page tables. 67 * 68 * spgd_addr() takes the virtual address and returns a pointer to the top-level 69 * page directory entry (PGD) for that address. Since we keep track of several 70 * page tables, the "i" argument tells us which one we're interested in (it's 71 * usually the current one). */ 72static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr) 73{ 74 unsigned int index = pgd_index(vaddr); 75 76 /* We kill any Guest trying to touch the Switcher addresses. */ 77 if (index >= SWITCHER_PGD_INDEX) { 78 kill_guest(cpu, "attempt to access switcher pages"); 79 index = 0; 80 } 81 /* Return a pointer index'th pgd entry for the i'th page table. */ 82 return &cpu->lg->pgdirs[i].pgdir[index]; 83} 84 85/* This routine then takes the page directory entry returned above, which 86 * contains the address of the page table entry (PTE) page. It then returns a 87 * pointer to the PTE entry for the given address. */ 88static pte_t *spte_addr(pgd_t spgd, unsigned long vaddr) 89{ 90 pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); 91 /* You should never call this if the PGD entry wasn't valid */ 92 BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); 93 return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; 94} 95 96/* These two functions just like the above two, except they access the Guest 97 * page tables. Hence they return a Guest address. */ 98static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr) 99{ 100 unsigned int index = vaddr >> (PGDIR_SHIFT); 101 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); 102} 103 104static unsigned long gpte_addr(pgd_t gpgd, unsigned long vaddr) 105{ 106 unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; 107 BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); 108 return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); 109} 110/*:*/ 111 112/*M:014 get_pfn is slow: we could probably try to grab batches of pages here as 113 * an optimization (ie. pre-faulting). :*/ 114 115/*H:350 This routine takes a page number given by the Guest and converts it to 116 * an actual, physical page number. It can fail for several reasons: the 117 * virtual address might not be mapped by the Launcher, the write flag is set 118 * and the page is read-only, or the write flag was set and the page was 119 * shared so had to be copied, but we ran out of memory. 120 * 121 * This holds a reference to the page, so release_pte() is careful to put that 122 * back. */ 123static unsigned long get_pfn(unsigned long virtpfn, int write) 124{ 125 struct page *page; 126 127 /* gup me one page at this address please! */ 128 if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1) 129 return page_to_pfn(page); 130 131 /* This value indicates failure. */ 132 return -1UL; 133} 134 135/*H:340 Converting a Guest page table entry to a shadow (ie. real) page table 136 * entry can be a little tricky. The flags are (almost) the same, but the 137 * Guest PTE contains a virtual page number: the CPU needs the real page 138 * number. */ 139static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write) 140{ 141 unsigned long pfn, base, flags; 142 143 /* The Guest sets the global flag, because it thinks that it is using 144 * PGE. We only told it to use PGE so it would tell us whether it was 145 * flushing a kernel mapping or a userspace mapping. We don't actually 146 * use the global bit, so throw it away. */ 147 flags = (pte_flags(gpte) & ~_PAGE_GLOBAL); 148 149 /* The Guest's pages are offset inside the Launcher. */ 150 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; 151 152 /* We need a temporary "unsigned long" variable to hold the answer from 153 * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't 154 * fit in spte.pfn. get_pfn() finds the real physical number of the 155 * page, given the virtual number. */ 156 pfn = get_pfn(base + pte_pfn(gpte), write); 157 if (pfn == -1UL) { 158 kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte)); 159 /* When we destroy the Guest, we'll go through the shadow page 160 * tables and release_pte() them. Make sure we don't think 161 * this one is valid! */ 162 flags = 0; 163 } 164 /* Now we assemble our shadow PTE from the page number and flags. */ 165 return pfn_pte(pfn, __pgprot(flags)); 166} 167 168/*H:460 And to complete the chain, release_pte() looks like this: */ 169static void release_pte(pte_t pte) 170{ 171 /* Remember that get_user_pages_fast() took a reference to the page, in 172 * get_pfn()? We have to put it back now. */ 173 if (pte_flags(pte) & _PAGE_PRESENT) 174 put_page(pfn_to_page(pte_pfn(pte))); 175} 176/*:*/ 177 178static void check_gpte(struct lg_cpu *cpu, pte_t gpte) 179{ 180 if ((pte_flags(gpte) & _PAGE_PSE) || 181 pte_pfn(gpte) >= cpu->lg->pfn_limit) 182 kill_guest(cpu, "bad page table entry"); 183} 184 185static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) 186{ 187 if ((pgd_flags(gpgd) & ~_PAGE_TABLE) || 188 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) 189 kill_guest(cpu, "bad page directory entry"); 190} 191 192/*H:330 193 * (i) Looking up a page table entry when the Guest faults. 194 * 195 * We saw this call in run_guest(): when we see a page fault in the Guest, we 196 * come here. That's because we only set up the shadow page tables lazily as 197 * they're needed, so we get page faults all the time and quietly fix them up 198 * and return to the Guest without it knowing. 199 * 200 * If we fixed up the fault (ie. we mapped the address), this routine returns 201 * true. Otherwise, it was a real fault and we need to tell the Guest. */ 202bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) 203{ 204 pgd_t gpgd; 205 pgd_t *spgd; 206 unsigned long gpte_ptr; 207 pte_t gpte; 208 pte_t *spte; 209 210 /* First step: get the top-level Guest page table entry. */ 211 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 212 /* Toplevel not present? We can't map it in. */ 213 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) 214 return false; 215 216 /* Now look at the matching shadow entry. */ 217 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 218 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) { 219 /* No shadow entry: allocate a new shadow PTE page. */ 220 unsigned long ptepage = get_zeroed_page(GFP_KERNEL); 221 /* This is not really the Guest's fault, but killing it is 222 * simple for this corner case. */ 223 if (!ptepage) { 224 kill_guest(cpu, "out of memory allocating pte page"); 225 return false; 226 } 227 /* We check that the Guest pgd is OK. */ 228 check_gpgd(cpu, gpgd); 229 /* And we copy the flags to the shadow PGD entry. The page 230 * number in the shadow PGD is the page we just allocated. */ 231 *spgd = __pgd(__pa(ptepage) | pgd_flags(gpgd)); 232 } 233 234 /* OK, now we look at the lower level in the Guest page table: keep its 235 * address, because we might update it later. */ 236 gpte_ptr = gpte_addr(gpgd, vaddr); 237 gpte = lgread(cpu, gpte_ptr, pte_t); 238 239 /* If this page isn't in the Guest page tables, we can't page it in. */ 240 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 241 return false; 242 243 /* Check they're not trying to write to a page the Guest wants 244 * read-only (bit 2 of errcode == write). */ 245 if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW)) 246 return false; 247 248 /* User access to a kernel-only page? (bit 3 == user access) */ 249 if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) 250 return false; 251 252 /* Check that the Guest PTE flags are OK, and the page number is below 253 * the pfn_limit (ie. not mapping the Launcher binary). */ 254 check_gpte(cpu, gpte); 255 256 /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */ 257 gpte = pte_mkyoung(gpte); 258 if (errcode & 2) 259 gpte = pte_mkdirty(gpte); 260 261 /* Get the pointer to the shadow PTE entry we're going to set. */ 262 spte = spte_addr(*spgd, vaddr); 263 /* If there was a valid shadow PTE entry here before, we release it. 264 * This can happen with a write to a previously read-only entry. */ 265 release_pte(*spte); 266 267 /* If this is a write, we insist that the Guest page is writable (the 268 * final arg to gpte_to_spte()). */ 269 if (pte_dirty(gpte)) 270 *spte = gpte_to_spte(cpu, gpte, 1); 271 else 272 /* If this is a read, don't set the "writable" bit in the page 273 * table entry, even if the Guest says it's writable. That way 274 * we will come back here when a write does actually occur, so 275 * we can update the Guest's _PAGE_DIRTY flag. */ 276 *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); 277 278 /* Finally, we write the Guest PTE entry back: we've set the 279 * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ 280 lgwrite(cpu, gpte_ptr, pte_t, gpte); 281 282 /* The fault is fixed, the page table is populated, the mapping 283 * manipulated, the result returned and the code complete. A small 284 * delay and a trace of alliteration are the only indications the Guest 285 * has that a page fault occurred at all. */ 286 return true; 287} 288 289/*H:360 290 * (ii) Making sure the Guest stack is mapped. 291 * 292 * Remember that direct traps into the Guest need a mapped Guest kernel stack. 293 * pin_stack_pages() calls us here: we could simply call demand_page(), but as 294 * we've seen that logic is quite long, and usually the stack pages are already 295 * mapped, so it's overkill. 296 * 297 * This is a quick version which answers the question: is this virtual address 298 * mapped by the shadow page tables, and is it writable? */ 299static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) 300{ 301 pgd_t *spgd; 302 unsigned long flags; 303 304 /* Look at the current top level entry: is it present? */ 305 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); 306 if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) 307 return false; 308 309 /* Check the flags on the pte entry itself: it must be present and 310 * writable. */ 311 flags = pte_flags(*(spte_addr(*spgd, vaddr))); 312 313 return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW); 314} 315 316/* So, when pin_stack_pages() asks us to pin a page, we check if it's already 317 * in the page tables, and if not, we call demand_page() with error code 2 318 * (meaning "write"). */ 319void pin_page(struct lg_cpu *cpu, unsigned long vaddr) 320{ 321 if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) 322 kill_guest(cpu, "bad stack page %#lx", vaddr); 323} 324 325/*H:450 If we chase down the release_pgd() code, it looks like this: */ 326static void release_pgd(struct lguest *lg, pgd_t *spgd) 327{ 328 /* If the entry's not present, there's nothing to release. */ 329 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 330 unsigned int i; 331 /* Converting the pfn to find the actual PTE page is easy: turn 332 * the page number into a physical address, then convert to a 333 * virtual address (easy for kernel pages like this one). */ 334 pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT); 335 /* For each entry in the page, we might need to release it. */ 336 for (i = 0; i < PTRS_PER_PTE; i++) 337 release_pte(ptepage[i]); 338 /* Now we can free the page of PTEs */ 339 free_page((long)ptepage); 340 /* And zero out the PGD entry so we never release it twice. */ 341 *spgd = __pgd(0); 342 } 343} 344 345/*H:445 We saw flush_user_mappings() twice: once from the flush_user_mappings() 346 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page. 347 * It simply releases every PTE page from 0 up to the Guest's kernel address. */ 348static void flush_user_mappings(struct lguest *lg, int idx) 349{ 350 unsigned int i; 351 /* Release every pgd entry up to the kernel's address. */ 352 for (i = 0; i < pgd_index(lg->kernel_address); i++) 353 release_pgd(lg, lg->pgdirs[idx].pgdir + i); 354} 355 356/*H:440 (v) Flushing (throwing away) page tables, 357 * 358 * The Guest has a hypercall to throw away the page tables: it's used when a 359 * large number of mappings have been changed. */ 360void guest_pagetable_flush_user(struct lg_cpu *cpu) 361{ 362 /* Drop the userspace part of the current page table. */ 363 flush_user_mappings(cpu->lg, cpu->cpu_pgd); 364} 365/*:*/ 366 367/* We walk down the guest page tables to get a guest-physical address */ 368unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) 369{ 370 pgd_t gpgd; 371 pte_t gpte; 372 373 /* First step: get the top-level Guest page table entry. */ 374 gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); 375 /* Toplevel not present? We can't map it in. */ 376 if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { 377 kill_guest(cpu, "Bad address %#lx", vaddr); 378 return -1UL; 379 } 380 381 gpte = lgread(cpu, gpte_addr(gpgd, vaddr), pte_t); 382 if (!(pte_flags(gpte) & _PAGE_PRESENT)) 383 kill_guest(cpu, "Bad address %#lx", vaddr); 384 385 return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); 386} 387 388/* We keep several page tables. This is a simple routine to find the page 389 * table (if any) corresponding to this top-level address the Guest has given 390 * us. */ 391static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable) 392{ 393 unsigned int i; 394 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 395 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) 396 break; 397 return i; 398} 399 400/*H:435 And this is us, creating the new page directory. If we really do 401 * allocate a new one (and so the kernel parts are not there), we set 402 * blank_pgdir. */ 403static unsigned int new_pgdir(struct lg_cpu *cpu, 404 unsigned long gpgdir, 405 int *blank_pgdir) 406{ 407 unsigned int next; 408 409 /* We pick one entry at random to throw out. Choosing the Least 410 * Recently Used might be better, but this is easy. */ 411 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); 412 /* If it's never been allocated at all before, try now. */ 413 if (!cpu->lg->pgdirs[next].pgdir) { 414 cpu->lg->pgdirs[next].pgdir = 415 (pgd_t *)get_zeroed_page(GFP_KERNEL); 416 /* If the allocation fails, just keep using the one we have */ 417 if (!cpu->lg->pgdirs[next].pgdir) 418 next = cpu->cpu_pgd; 419 else 420 /* This is a blank page, so there are no kernel 421 * mappings: caller must map the stack! */ 422 *blank_pgdir = 1; 423 } 424 /* Record which Guest toplevel this shadows. */ 425 cpu->lg->pgdirs[next].gpgdir = gpgdir; 426 /* Release all the non-kernel mappings. */ 427 flush_user_mappings(cpu->lg, next); 428 429 return next; 430} 431 432/*H:430 (iv) Switching page tables 433 * 434 * Now we've seen all the page table setting and manipulation, let's see what 435 * what happens when the Guest changes page tables (ie. changes the top-level 436 * pgdir). This occurs on almost every context switch. */ 437void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) 438{ 439 int newpgdir, repin = 0; 440 441 /* Look to see if we have this one already. */ 442 newpgdir = find_pgdir(cpu->lg, pgtable); 443 /* If not, we allocate or mug an existing one: if it's a fresh one, 444 * repin gets set to 1. */ 445 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) 446 newpgdir = new_pgdir(cpu, pgtable, &repin); 447 /* Change the current pgd index to the new one. */ 448 cpu->cpu_pgd = newpgdir; 449 /* If it was completely blank, we map in the Guest kernel stack */ 450 if (repin) 451 pin_stack_pages(cpu); 452} 453 454/*H:470 Finally, a routine which throws away everything: all PGD entries in all 455 * the shadow page tables, including the Guest's kernel mappings. This is used 456 * when we destroy the Guest. */ 457static void release_all_pagetables(struct lguest *lg) 458{ 459 unsigned int i, j; 460 461 /* Every shadow pagetable this Guest has */ 462 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 463 if (lg->pgdirs[i].pgdir) 464 /* Every PGD entry except the Switcher at the top */ 465 for (j = 0; j < SWITCHER_PGD_INDEX; j++) 466 release_pgd(lg, lg->pgdirs[i].pgdir + j); 467} 468 469/* We also throw away everything when a Guest tells us it's changed a kernel 470 * mapping. Since kernel mappings are in every page table, it's easiest to 471 * throw them all away. This traps the Guest in amber for a while as 472 * everything faults back in, but it's rare. */ 473void guest_pagetable_clear_all(struct lg_cpu *cpu) 474{ 475 release_all_pagetables(cpu->lg); 476 /* We need the Guest kernel stack mapped again. */ 477 pin_stack_pages(cpu); 478} 479/*:*/ 480/*M:009 Since we throw away all mappings when a kernel mapping changes, our 481 * performance sucks for guests using highmem. In fact, a guest with 482 * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is 483 * usually slower than a Guest with less memory. 484 * 485 * This, of course, cannot be fixed. It would take some kind of... well, I 486 * don't know, but the term "puissant code-fu" comes to mind. :*/ 487 488/*H:420 This is the routine which actually sets the page table entry for then 489 * "idx"'th shadow page table. 490 * 491 * Normally, we can just throw out the old entry and replace it with 0: if they 492 * use it demand_page() will put the new entry in. We need to do this anyway: 493 * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page 494 * is read from, and _PAGE_DIRTY when it's written to. 495 * 496 * But Avi Kivity pointed out that most Operating Systems (Linux included) set 497 * these bits on PTEs immediately anyway. This is done to save the CPU from 498 * having to update them, but it helps us the same way: if they set 499 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if 500 * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately. 501 */ 502static void do_set_pte(struct lg_cpu *cpu, int idx, 503 unsigned long vaddr, pte_t gpte) 504{ 505 /* Look up the matching shadow page directory entry. */ 506 pgd_t *spgd = spgd_addr(cpu, idx, vaddr); 507 508 /* If the top level isn't present, there's no entry to update. */ 509 if (pgd_flags(*spgd) & _PAGE_PRESENT) { 510 /* Otherwise, we start by releasing the existing entry. */ 511 pte_t *spte = spte_addr(*spgd, vaddr); 512 release_pte(*spte); 513 514 /* If they're setting this entry as dirty or accessed, we might 515 * as well put that entry they've given us in now. This shaves 516 * 10% off a copy-on-write micro-benchmark. */ 517 if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { 518 check_gpte(cpu, gpte); 519 *spte = gpte_to_spte(cpu, gpte, 520 pte_flags(gpte) & _PAGE_DIRTY); 521 } else 522 /* Otherwise kill it and we can demand_page() it in 523 * later. */ 524 *spte = __pte(0); 525 } 526} 527 528/*H:410 Updating a PTE entry is a little trickier. 529 * 530 * We keep track of several different page tables (the Guest uses one for each 531 * process, so it makes sense to cache at least a few). Each of these have 532 * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for 533 * all processes. So when the page table above that address changes, we update 534 * all the page tables, not just the current one. This is rare. 535 * 536 * The benefit is that when we have to track a new page table, we can keep all 537 * the kernel mappings. This speeds up context switch immensely. */ 538void guest_set_pte(struct lg_cpu *cpu, 539 unsigned long gpgdir, unsigned long vaddr, pte_t gpte) 540{ 541 /* Kernel mappings must be changed on all top levels. Slow, but doesn't 542 * happen often. */ 543 if (vaddr >= cpu->lg->kernel_address) { 544 unsigned int i; 545 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) 546 if (cpu->lg->pgdirs[i].pgdir) 547 do_set_pte(cpu, i, vaddr, gpte); 548 } else { 549 /* Is this page table one we have a shadow for? */ 550 int pgdir = find_pgdir(cpu->lg, gpgdir); 551 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) 552 /* If so, do the update. */ 553 do_set_pte(cpu, pgdir, vaddr, gpte); 554 } 555} 556 557/*H:400 558 * (iii) Setting up a page table entry when the Guest tells us one has changed. 559 * 560 * Just like we did in interrupts_and_traps.c, it makes sense for us to deal 561 * with the other side of page tables while we're here: what happens when the 562 * Guest asks for a page table to be updated? 563 * 564 * We already saw that demand_page() will fill in the shadow page tables when 565 * needed, so we can simply remove shadow page table entries whenever the Guest 566 * tells us they've changed. When the Guest tries to use the new entry it will 567 * fault and demand_page() will fix it up. 568 * 569 * So with that in mind here's our code to to update a (top-level) PGD entry: 570 */ 571void guest_set_pmd(struct lguest *lg, unsigned long gpgdir, u32 idx) 572{ 573 int pgdir; 574 575 /* The kernel seems to try to initialize this early on: we ignore its 576 * attempts to map over the Switcher. */ 577 if (idx >= SWITCHER_PGD_INDEX) 578 return; 579 580 /* If they're talking about a page table we have a shadow for... */ 581 pgdir = find_pgdir(lg, gpgdir); 582 if (pgdir < ARRAY_SIZE(lg->pgdirs)) 583 /* ... throw it away. */ 584 release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); 585} 586 587/* Once we know how much memory we have we can construct simple identity 588 * (which set virtual == physical) and linear mappings 589 * which will get the Guest far enough into the boot to create its own. 590 * 591 * We lay them out of the way, just below the initrd (which is why we need to 592 * know its size here). */ 593static unsigned long setup_pagetables(struct lguest *lg, 594 unsigned long mem, 595 unsigned long initrd_size) 596{ 597 pgd_t __user *pgdir; 598 pte_t __user *linear; 599 unsigned int mapped_pages, i, linear_pages, phys_linear; 600 unsigned long mem_base = (unsigned long)lg->mem_base; 601 602 /* We have mapped_pages frames to map, so we need 603 * linear_pages page tables to map them. */ 604 mapped_pages = mem / PAGE_SIZE; 605 linear_pages = (mapped_pages + PTRS_PER_PTE - 1) / PTRS_PER_PTE; 606 607 /* We put the toplevel page directory page at the top of memory. */ 608 pgdir = (pgd_t *)(mem + mem_base - initrd_size - PAGE_SIZE); 609 610 /* Now we use the next linear_pages pages as pte pages */ 611 linear = (void *)pgdir - linear_pages * PAGE_SIZE; 612 613 /* Linear mapping is easy: put every page's address into the 614 * mapping in order. */ 615 for (i = 0; i < mapped_pages; i++) { 616 pte_t pte; 617 pte = pfn_pte(i, __pgprot(_PAGE_PRESENT|_PAGE_RW|_PAGE_USER)); 618 if (copy_to_user(&linear[i], &pte, sizeof(pte)) != 0) 619 return -EFAULT; 620 } 621 622 /* The top level points to the linear page table pages above. 623 * We setup the identity and linear mappings here. */ 624 phys_linear = (unsigned long)linear - mem_base; 625 for (i = 0; i < mapped_pages; i += PTRS_PER_PTE) { 626 pgd_t pgd; 627 pgd = __pgd((phys_linear + i * sizeof(pte_t)) | 628 (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER)); 629 630 if (copy_to_user(&pgdir[i / PTRS_PER_PTE], &pgd, sizeof(pgd)) 631 || copy_to_user(&pgdir[pgd_index(PAGE_OFFSET) 632 + i / PTRS_PER_PTE], 633 &pgd, sizeof(pgd))) 634 return -EFAULT; 635 } 636 637 /* We return the top level (guest-physical) address: remember where 638 * this is. */ 639 return (unsigned long)pgdir - mem_base; 640} 641 642/*H:500 (vii) Setting up the page tables initially. 643 * 644 * When a Guest is first created, the Launcher tells us where the toplevel of 645 * its first page table is. We set some things up here: */ 646int init_guest_pagetable(struct lguest *lg) 647{ 648 u64 mem; 649 u32 initrd_size; 650 struct boot_params __user *boot = (struct boot_params *)lg->mem_base; 651 652 /* Get the Guest memory size and the ramdisk size from the boot header 653 * located at lg->mem_base (Guest address 0). */ 654 if (copy_from_user(&mem, &boot->e820_map[0].size, sizeof(mem)) 655 || get_user(initrd_size, &boot->hdr.ramdisk_size)) 656 return -EFAULT; 657 658 /* We start on the first shadow page table, and give it a blank PGD 659 * page. */ 660 lg->pgdirs[0].gpgdir = setup_pagetables(lg, mem, initrd_size); 661 if (IS_ERR_VALUE(lg->pgdirs[0].gpgdir)) 662 return lg->pgdirs[0].gpgdir; 663 lg->pgdirs[0].pgdir = (pgd_t *)get_zeroed_page(GFP_KERNEL); 664 if (!lg->pgdirs[0].pgdir) 665 return -ENOMEM; 666 lg->cpus[0].cpu_pgd = 0; 667 return 0; 668} 669 670/* When the Guest calls LHCALL_LGUEST_INIT we do more setup. */ 671void page_table_guest_data_init(struct lg_cpu *cpu) 672{ 673 /* We get the kernel address: above this is all kernel memory. */ 674 if (get_user(cpu->lg->kernel_address, 675 &cpu->lg->lguest_data->kernel_address) 676 /* We tell the Guest that it can't use the top 4MB of virtual 677 * addresses used by the Switcher. */ 678 || put_user(4U*1024*1024, &cpu->lg->lguest_data->reserve_mem) 679 || put_user(cpu->lg->pgdirs[0].gpgdir, &cpu->lg->lguest_data->pgdir)) 680 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); 681 682 /* In flush_user_mappings() we loop from 0 to 683 * "pgd_index(lg->kernel_address)". This assumes it won't hit the 684 * Switcher mappings, so check that now. */ 685 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) 686 kill_guest(cpu, "bad kernel address %#lx", 687 cpu->lg->kernel_address); 688} 689 690/* When a Guest dies, our cleanup is fairly simple. */ 691void free_guest_pagetable(struct lguest *lg) 692{ 693 unsigned int i; 694 695 /* Throw away all page table pages. */ 696 release_all_pagetables(lg); 697 /* Now free the top levels: free_page() can handle 0 just fine. */ 698 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) 699 free_page((long)lg->pgdirs[i].pgdir); 700} 701 702/*H:480 (vi) Mapping the Switcher when the Guest is about to run. 703 * 704 * The Switcher and the two pages for this CPU need to be visible in the 705 * Guest (and not the pages for other CPUs). We have the appropriate PTE pages 706 * for each CPU already set up, we just need to hook them in now we know which 707 * Guest is about to run on this CPU. */ 708void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) 709{ 710 pte_t *switcher_pte_page = __get_cpu_var(switcher_pte_pages); 711 pgd_t switcher_pgd; 712 pte_t regs_pte; 713 unsigned long pfn; 714 715 /* Make the last PGD entry for this Guest point to the Switcher's PTE 716 * page for this CPU (with appropriate flags). */ 717 switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL); 718 719 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; 720 721 /* We also change the Switcher PTE page. When we're running the Guest, 722 * we want the Guest's "regs" page to appear where the first Switcher 723 * page for this CPU is. This is an optimization: when the Switcher 724 * saves the Guest registers, it saves them into the first page of this 725 * CPU's "struct lguest_pages": if we make sure the Guest's register 726 * page is already mapped there, we don't have to copy them out 727 * again. */ 728 pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; 729 regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); 730 switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; 731} 732/*:*/ 733 734static void free_switcher_pte_pages(void) 735{ 736 unsigned int i; 737 738 for_each_possible_cpu(i) 739 free_page((long)switcher_pte_page(i)); 740} 741 742/*H:520 Setting up the Switcher PTE page for given CPU is fairly easy, given 743 * the CPU number and the "struct page"s for the Switcher code itself. 744 * 745 * Currently the Switcher is less than a page long, so "pages" is always 1. */ 746static __init void populate_switcher_pte_page(unsigned int cpu, 747 struct page *switcher_page[], 748 unsigned int pages) 749{ 750 unsigned int i; 751 pte_t *pte = switcher_pte_page(cpu); 752 753 /* The first entries are easy: they map the Switcher code. */ 754 for (i = 0; i < pages; i++) { 755 pte[i] = mk_pte(switcher_page[i], 756 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 757 } 758 759 /* The only other thing we map is this CPU's pair of pages. */ 760 i = pages + cpu*2; 761 762 /* First page (Guest registers) is writable from the Guest */ 763 pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), 764 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); 765 766 /* The second page contains the "struct lguest_ro_state", and is 767 * read-only. */ 768 pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), 769 __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); 770} 771 772/* We've made it through the page table code. Perhaps our tired brains are 773 * still processing the details, or perhaps we're simply glad it's over. 774 * 775 * If nothing else, note that all this complexity in juggling shadow page tables 776 * in sync with the Guest's page tables is for one reason: for most Guests this 777 * page table dance determines how bad performance will be. This is why Xen 778 * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD 779 * have implemented shadow page table support directly into hardware. 780 * 781 * There is just one file remaining in the Host. */ 782 783/*H:510 At boot or module load time, init_pagetables() allocates and populates 784 * the Switcher PTE page for each CPU. */ 785__init int init_pagetables(struct page **switcher_page, unsigned int pages) 786{ 787 unsigned int i; 788 789 for_each_possible_cpu(i) { 790 switcher_pte_page(i) = (pte_t *)get_zeroed_page(GFP_KERNEL); 791 if (!switcher_pte_page(i)) { 792 free_switcher_pte_pages(); 793 return -ENOMEM; 794 } 795 populate_switcher_pte_page(i, switcher_page, pages); 796 } 797 return 0; 798} 799/*:*/ 800 801/* Cleaning up simply involves freeing the PTE page for each CPU. */ 802void free_pagetables(void) 803{ 804 free_switcher_pte_pages(); 805}