Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.7-rc2 187 lines 4.4 kB view raw
1/* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18#include <linux/linkage.h> 19 20#include <asm/assembler.h> 21#include <asm/kvm_arm.h> 22#include <asm/kvm_mmu.h> 23#include <asm/pgtable-hwdef.h> 24#include <asm/sysreg.h> 25 26 .text 27 .pushsection .hyp.idmap.text, "ax" 28 29 .align 11 30 31ENTRY(__kvm_hyp_init) 32 ventry __invalid // Synchronous EL2t 33 ventry __invalid // IRQ EL2t 34 ventry __invalid // FIQ EL2t 35 ventry __invalid // Error EL2t 36 37 ventry __invalid // Synchronous EL2h 38 ventry __invalid // IRQ EL2h 39 ventry __invalid // FIQ EL2h 40 ventry __invalid // Error EL2h 41 42 ventry __do_hyp_init // Synchronous 64-bit EL1 43 ventry __invalid // IRQ 64-bit EL1 44 ventry __invalid // FIQ 64-bit EL1 45 ventry __invalid // Error 64-bit EL1 46 47 ventry __invalid // Synchronous 32-bit EL1 48 ventry __invalid // IRQ 32-bit EL1 49 ventry __invalid // FIQ 32-bit EL1 50 ventry __invalid // Error 32-bit EL1 51 52__invalid: 53 b . 54 55 /* 56 * x0: HYP boot pgd 57 * x1: HYP pgd 58 * x2: HYP stack 59 * x3: HYP vectors 60 */ 61__do_hyp_init: 62 63 msr ttbr0_el2, x0 64 65 mrs x4, tcr_el1 66 ldr x5, =TCR_EL2_MASK 67 and x4, x4, x5 68 mov x5, #TCR_EL2_RES1 69 orr x4, x4, x5 70 71#ifndef CONFIG_ARM64_VA_BITS_48 72 /* 73 * If we are running with VA_BITS < 48, we may be running with an extra 74 * level of translation in the ID map. This is only the case if system 75 * RAM is out of range for the currently configured page size and number 76 * of translation levels, in which case we will also need the extra 77 * level for the HYP ID map, or we won't be able to enable the EL2 MMU. 78 * 79 * However, at EL2, there is only one TTBR register, and we can't switch 80 * between translation tables *and* update TCR_EL2.T0SZ at the same 81 * time. Bottom line: we need the extra level in *both* our translation 82 * tables. 83 * 84 * So use the same T0SZ value we use for the ID map. 85 */ 86 ldr_l x5, idmap_t0sz 87 bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH 88#endif 89 /* 90 * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in 91 * TCR_EL2. 92 */ 93 mrs x5, ID_AA64MMFR0_EL1 94 bfi x4, x5, #16, #3 95 96 msr tcr_el2, x4 97 98 mrs x4, mair_el1 99 msr mair_el2, x4 100 isb 101 102 /* Invalidate the stale TLBs from Bootloader */ 103 tlbi alle2 104 dsb sy 105 106 mrs x4, sctlr_el2 107 and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2 108 ldr x5, =SCTLR_ELx_FLAGS 109 orr x4, x4, x5 110 msr sctlr_el2, x4 111 isb 112 113 /* Skip the trampoline dance if we merged the boot and runtime PGDs */ 114 cmp x0, x1 115 b.eq merged 116 117 /* MMU is now enabled. Get ready for the trampoline dance */ 118 ldr x4, =TRAMPOLINE_VA 119 adr x5, target 120 bfi x4, x5, #0, #PAGE_SHIFT 121 br x4 122 123target: /* We're now in the trampoline code, switch page tables */ 124 msr ttbr0_el2, x1 125 isb 126 127 /* Invalidate the old TLBs */ 128 tlbi alle2 129 dsb sy 130 131merged: 132 /* Set the stack and new vectors */ 133 kern_hyp_va x2 134 mov sp, x2 135 kern_hyp_va x3 136 msr vbar_el2, x3 137 138 /* Hello, World! */ 139 eret 140ENDPROC(__kvm_hyp_init) 141 142 /* 143 * Reset kvm back to the hyp stub. This is the trampoline dance in 144 * reverse. If kvm used an extended idmap, __extended_idmap_trampoline 145 * calls this code directly in the idmap. In this case switching to the 146 * boot tables is a no-op. 147 * 148 * x0: HYP boot pgd 149 * x1: HYP phys_idmap_start 150 */ 151ENTRY(__kvm_hyp_reset) 152 /* We're in trampoline code in VA, switch back to boot page tables */ 153 msr ttbr0_el2, x0 154 isb 155 156 /* Ensure the PA branch doesn't find a stale tlb entry or stale code. */ 157 ic iallu 158 tlbi alle2 159 dsb sy 160 isb 161 162 /* Branch into PA space */ 163 adr x0, 1f 164 bfi x1, x0, #0, #PAGE_SHIFT 165 br x1 166 167 /* We're now in idmap, disable MMU */ 1681: mrs x0, sctlr_el2 169 ldr x1, =SCTLR_ELx_FLAGS 170 bic x0, x0, x1 // Clear SCTL_M and etc 171 msr sctlr_el2, x0 172 isb 173 174 /* Invalidate the old TLBs */ 175 tlbi alle2 176 dsb sy 177 178 /* Install stub vectors */ 179 adr_l x0, __hyp_stub_vectors 180 msr vbar_el2, x0 181 182 eret 183ENDPROC(__kvm_hyp_reset) 184 185 .ltorg 186 187 .popsection