Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm/arm64: KVM: Always have merged page tables

We're in a position where we can now always have "merged" page
tables, where both the runtime mapping and the idmap coexist.

This results in some code being removed, but there is more to come.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

authored by

Marc Zyngier and committed by
Christoffer Dall
0535a3e2 d1745910

+42 -65
+35 -41
arch/arm/kvm/mmu.c
··· 492 492 493 493 if (boot_hyp_pgd) { 494 494 unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); 495 - unmap_hyp_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 496 495 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order); 497 496 boot_hyp_pgd = NULL; 498 497 } 499 498 500 499 if (hyp_pgd) 501 - unmap_hyp_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); 500 + unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE); 502 501 503 502 mutex_unlock(&kvm_hyp_pgd_mutex); 504 503 } ··· 1690 1691 if (__kvm_cpu_uses_extended_idmap()) 1691 1692 return virt_to_phys(merged_hyp_pgd); 1692 1693 else 1693 - return virt_to_phys(boot_hyp_pgd); 1694 + return virt_to_phys(hyp_pgd); 1694 1695 } 1695 1696 1696 1697 phys_addr_t kvm_get_idmap_vector(void) ··· 1701 1702 phys_addr_t kvm_get_idmap_start(void) 1702 1703 { 1703 1704 return hyp_idmap_start; 1705 + } 1706 + 1707 + static int kvm_map_idmap_text(pgd_t *pgd) 1708 + { 1709 + int err; 1710 + 1711 + /* Create the idmap in the boot page tables */ 1712 + err = __create_hyp_mappings(pgd, 1713 + hyp_idmap_start, hyp_idmap_end, 1714 + __phys_to_pfn(hyp_idmap_start), 1715 + PAGE_HYP_EXEC); 1716 + if (err) 1717 + kvm_err("Failed to idmap %lx-%lx\n", 1718 + hyp_idmap_start, hyp_idmap_end); 1719 + 1720 + return err; 1704 1721 } 1705 1722 1706 1723 int kvm_mmu_init(void) ··· 1734 1719 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); 1735 1720 1736 1721 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); 1737 - boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order); 1738 - 1739 - if (!hyp_pgd || !boot_hyp_pgd) { 1722 + if (!hyp_pgd) { 1740 1723 kvm_err("Hyp mode PGD not allocated\n"); 1741 1724 err = -ENOMEM; 1742 1725 goto out; 1743 1726 } 1744 1727 1745 - /* Create the idmap in the boot page tables */ 1746 - err = __create_hyp_mappings(boot_hyp_pgd, 1747 - hyp_idmap_start, hyp_idmap_end, 1748 - __phys_to_pfn(hyp_idmap_start), 1749 - PAGE_HYP_EXEC); 1750 - 1751 - if (err) { 1752 - kvm_err("Failed to idmap %lx-%lx\n", 1753 - hyp_idmap_start, hyp_idmap_end); 1754 - goto out; 1755 - } 1756 - 1757 1728 if (__kvm_cpu_uses_extended_idmap()) { 1729 + boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1730 + hyp_pgd_order); 1731 + if (!boot_hyp_pgd) { 1732 + kvm_err("Hyp boot PGD not allocated\n"); 1733 + err = -ENOMEM; 1734 + goto out; 1735 + } 1736 + 1737 + err = kvm_map_idmap_text(boot_hyp_pgd); 1738 + if (err) 1739 + goto out; 1740 + 1758 1741 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); 1759 1742 if (!merged_hyp_pgd) { 1760 1743 kvm_err("Failed to allocate extra HYP pgd\n"); ··· 1760 1747 } 1761 1748 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd, 1762 1749 hyp_idmap_start); 1763 - return 0; 1764 - } 1765 - 1766 - /* Map the very same page at the trampoline VA */ 1767 - err = __create_hyp_mappings(boot_hyp_pgd, 1768 - TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, 1769 - __phys_to_pfn(hyp_idmap_start), 1770 - PAGE_HYP_EXEC); 1771 - if (err) { 1772 - kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n", 1773 - TRAMPOLINE_VA); 1774 - goto out; 1775 - } 1776 - 1777 - /* Map the same page again into the runtime page tables */ 1778 - err = __create_hyp_mappings(hyp_pgd, 1779 - TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE, 1780 - __phys_to_pfn(hyp_idmap_start), 1781 - PAGE_HYP_EXEC); 1782 - if (err) { 1783 - kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n", 1784 - TRAMPOLINE_VA); 1785 - goto out; 1750 + } else { 1751 + err = kvm_map_idmap_text(hyp_pgd); 1752 + if (err) 1753 + goto out; 1786 1754 } 1787 1755 1788 1756 return 0;
+7 -24
arch/arm64/kvm/reset.c
··· 133 133 return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq); 134 134 } 135 135 136 - extern char __hyp_idmap_text_start[]; 137 - 138 136 unsigned long kvm_hyp_reset_entry(void) 139 137 { 140 - if (!__kvm_cpu_uses_extended_idmap()) { 141 - unsigned long offset; 142 - 143 - /* 144 - * Find the address of __kvm_hyp_reset() in the trampoline page. 145 - * This is present in the running page tables, and the boot page 146 - * tables, so we call the code here to start the trampoline 147 - * dance in reverse. 148 - */ 149 - offset = (unsigned long)__kvm_hyp_reset 150 - - ((unsigned long)__hyp_idmap_text_start & PAGE_MASK); 151 - 152 - return TRAMPOLINE_VA + offset; 153 - } else { 154 - /* 155 - * KVM is running with merged page tables, which don't have the 156 - * trampoline page mapped. We know the idmap is still mapped, 157 - * but can't be called into directly. Use 158 - * __extended_idmap_trampoline to do the call. 159 - */ 160 - return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline); 161 - } 138 + /* 139 + * KVM is running with merged page tables, which don't have the 140 + * trampoline page mapped. We know the idmap is still mapped, 141 + * but can't be called into directly. Use 142 + * __extended_idmap_trampoline to do the call. 143 + */ 144 + return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline); 162 145 }