Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

lguest: map Switcher text whenever we allocate a new pagetable.

It's always to same, so no need to put in the PTE every time we're
about to run. Keep a flag to track whether the pagetable has the
Switcher entries allocated, and when allocating always initialize the
Switcher text PTE.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>

+33 -10
+1
drivers/lguest/lg.h
··· 16 16 17 17 struct pgdir { 18 18 unsigned long gpgdir; 19 + bool switcher_mapped; 19 20 pgd_t *pgdir; 20 21 }; 21 22
+32 -10
drivers/lguest/page_tables.c
··· 736 736 737 737 /*H:501 738 738 * We do need the Switcher code mapped at all times, so we allocate that 739 - * part of the Guest page table here, and populate it when we're about to run 740 - * the guest. 739 + * part of the Guest page table here. We map the Switcher code immediately, 740 + * but defer mapping of the guest register page and IDT/LDT etc page until 741 + * just before we run the guest in map_switcher_in_guest(). 742 + * 743 + * We *could* do this setup in map_switcher_in_guest(), but at that point 744 + * we've interrupts disabled, and allocating pages like that is fraught: we 745 + * can't sleep if we need to free up some memory. 741 746 */ 742 747 static bool allocate_switcher_mapping(struct lg_cpu *cpu) 743 748 { 744 749 int i; 745 750 746 751 for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) { 747 - if (!find_spte(cpu, switcher_addr + i * PAGE_SIZE, true, 748 - CHECK_GPGD_MASK, _PAGE_TABLE)) 752 + pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true, 753 + CHECK_GPGD_MASK, _PAGE_TABLE); 754 + if (!pte) 749 755 return false; 756 + 757 + /* 758 + * Map the switcher page if not already there. It might 759 + * already be there because we call allocate_switcher_mapping() 760 + * in guest_set_pgd() just in case it did discard our Switcher 761 + * mapping, but it probably didn't. 762 + */ 763 + if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) { 764 + /* Get a reference to the Switcher page. */ 765 + get_page(lg_switcher_pages[0]); 766 + /* Create a read-only, exectuable, kernel-style PTE */ 767 + set_pte(pte, 768 + mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX)); 769 + } 750 770 } 771 + cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true; 751 772 return true; 752 773 } 753 774 ··· 789 768 /* Every PGD entry. */ 790 769 for (j = 0; j < PTRS_PER_PGD; j++) 791 770 release_pgd(lg->pgdirs[i].pgdir + j); 771 + lg->pgdirs[i].switcher_mapped = false; 792 772 } 793 773 } 794 774 ··· 849 827 if (repin) 850 828 pin_stack_pages(cpu); 851 829 852 - if (!allocate_switcher_mapping(cpu)) 853 - kill_guest(cpu, "Cannot populate switcher mapping"); 830 + if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) { 831 + if (!allocate_switcher_mapping(cpu)) 832 + kill_guest(cpu, "Cannot populate switcher mapping"); 833 + } 854 834 } 855 835 /*:*/ 856 836 ··· 1100 1076 struct page *percpu_switcher_page, *regs_page; 1101 1077 pte_t *pte; 1102 1078 1103 - /* Code page should always be mapped, and executable. */ 1104 - pte = find_spte(cpu, switcher_addr, false, 0, 0); 1105 - get_page(lg_switcher_pages[0]); 1106 - set_pte(pte, mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX)); 1079 + /* Switcher page should always be mapped! */ 1080 + BUG_ON(!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped); 1107 1081 1108 1082 /* Clear all the Switcher mappings for any other CPUs. */ 1109 1083 /* FIXME: This is dumb: update only when Host CPU changes. */