Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.9-rc5 373 lines 11 kB view raw
1/* 2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 17 */ 18 19#include <linux/mm.h> 20#include <linux/kvm_host.h> 21#include <asm/kvm_arm.h> 22#include <asm/kvm_emulate.h> 23#include <trace/events/kvm.h> 24 25#include "trace.h" 26 27#define VCPU_NR_MODES 6 28#define VCPU_REG_OFFSET_USR 0 29#define VCPU_REG_OFFSET_FIQ 1 30#define VCPU_REG_OFFSET_IRQ 2 31#define VCPU_REG_OFFSET_SVC 3 32#define VCPU_REG_OFFSET_ABT 4 33#define VCPU_REG_OFFSET_UND 5 34#define REG_OFFSET(_reg) \ 35 (offsetof(struct kvm_regs, _reg) / sizeof(u32)) 36 37#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) 38 39static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { 40 /* USR/SYS Registers */ 41 [VCPU_REG_OFFSET_USR] = { 42 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), 43 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), 44 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), 45 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), 46 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), 47 }, 48 49 /* FIQ Registers */ 50 [VCPU_REG_OFFSET_FIQ] = { 51 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), 52 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), 53 USR_REG_OFFSET(6), USR_REG_OFFSET(7), 54 REG_OFFSET(fiq_regs[0]), /* r8 */ 55 REG_OFFSET(fiq_regs[1]), /* r9 */ 56 REG_OFFSET(fiq_regs[2]), /* r10 */ 57 REG_OFFSET(fiq_regs[3]), /* r11 */ 58 REG_OFFSET(fiq_regs[4]), /* r12 */ 59 REG_OFFSET(fiq_regs[5]), /* r13 */ 60 REG_OFFSET(fiq_regs[6]), /* r14 */ 61 }, 62 63 /* IRQ Registers */ 64 [VCPU_REG_OFFSET_IRQ] = { 65 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), 66 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), 67 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), 68 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), 69 USR_REG_OFFSET(12), 70 REG_OFFSET(irq_regs[0]), /* r13 */ 71 REG_OFFSET(irq_regs[1]), /* r14 */ 72 }, 73 74 /* SVC Registers */ 75 [VCPU_REG_OFFSET_SVC] = { 76 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), 77 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), 78 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), 79 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), 80 USR_REG_OFFSET(12), 81 REG_OFFSET(svc_regs[0]), /* r13 */ 82 REG_OFFSET(svc_regs[1]), /* r14 */ 83 }, 84 85 /* ABT Registers */ 86 [VCPU_REG_OFFSET_ABT] = { 87 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), 88 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), 89 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), 90 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), 91 USR_REG_OFFSET(12), 92 REG_OFFSET(abt_regs[0]), /* r13 */ 93 REG_OFFSET(abt_regs[1]), /* r14 */ 94 }, 95 96 /* UND Registers */ 97 [VCPU_REG_OFFSET_UND] = { 98 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), 99 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), 100 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), 101 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), 102 USR_REG_OFFSET(12), 103 REG_OFFSET(und_regs[0]), /* r13 */ 104 REG_OFFSET(und_regs[1]), /* r14 */ 105 }, 106}; 107 108/* 109 * Return a pointer to the register number valid in the current mode of 110 * the virtual CPU. 111 */ 112u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) 113{ 114 u32 *reg_array = (u32 *)&vcpu->arch.regs; 115 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 116 117 switch (mode) { 118 case USR_MODE...SVC_MODE: 119 mode &= ~MODE32_BIT; /* 0 ... 3 */ 120 break; 121 122 case ABT_MODE: 123 mode = VCPU_REG_OFFSET_ABT; 124 break; 125 126 case UND_MODE: 127 mode = VCPU_REG_OFFSET_UND; 128 break; 129 130 case SYSTEM_MODE: 131 mode = VCPU_REG_OFFSET_USR; 132 break; 133 134 default: 135 BUG(); 136 } 137 138 return reg_array + vcpu_reg_offsets[mode][reg_num]; 139} 140 141/* 142 * Return the SPSR for the current mode of the virtual CPU. 143 */ 144u32 *vcpu_spsr(struct kvm_vcpu *vcpu) 145{ 146 u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; 147 switch (mode) { 148 case SVC_MODE: 149 return &vcpu->arch.regs.KVM_ARM_SVC_spsr; 150 case ABT_MODE: 151 return &vcpu->arch.regs.KVM_ARM_ABT_spsr; 152 case UND_MODE: 153 return &vcpu->arch.regs.KVM_ARM_UND_spsr; 154 case IRQ_MODE: 155 return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; 156 case FIQ_MODE: 157 return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; 158 default: 159 BUG(); 160 } 161} 162 163/** 164 * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest 165 * @vcpu: the vcpu pointer 166 * @run: the kvm_run structure pointer 167 * 168 * Simply sets the wait_for_interrupts flag on the vcpu structure, which will 169 * halt execution of world-switches and schedule other host processes until 170 * there is an incoming IRQ or FIQ to the VM. 171 */ 172int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) 173{ 174 trace_kvm_wfi(*vcpu_pc(vcpu)); 175 kvm_vcpu_block(vcpu); 176 return 1; 177} 178 179/** 180 * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block 181 * @vcpu: The VCPU pointer 182 * 183 * When exceptions occur while instructions are executed in Thumb IF-THEN 184 * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have 185 * to do this little bit of work manually. The fields map like this: 186 * 187 * IT[7:0] -> CPSR[26:25],CPSR[15:10] 188 */ 189static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) 190{ 191 unsigned long itbits, cond; 192 unsigned long cpsr = *vcpu_cpsr(vcpu); 193 bool is_arm = !(cpsr & PSR_T_BIT); 194 195 BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); 196 197 if (!(cpsr & PSR_IT_MASK)) 198 return; 199 200 cond = (cpsr & 0xe000) >> 13; 201 itbits = (cpsr & 0x1c00) >> (10 - 2); 202 itbits |= (cpsr & (0x3 << 25)) >> 25; 203 204 /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ 205 if ((itbits & 0x7) == 0) 206 itbits = cond = 0; 207 else 208 itbits = (itbits << 1) & 0x1f; 209 210 cpsr &= ~PSR_IT_MASK; 211 cpsr |= cond << 13; 212 cpsr |= (itbits & 0x1c) << (10 - 2); 213 cpsr |= (itbits & 0x3) << 25; 214 *vcpu_cpsr(vcpu) = cpsr; 215} 216 217/** 218 * kvm_skip_instr - skip a trapped instruction and proceed to the next 219 * @vcpu: The vcpu pointer 220 */ 221void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) 222{ 223 bool is_thumb; 224 225 is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); 226 if (is_thumb && !is_wide_instr) 227 *vcpu_pc(vcpu) += 2; 228 else 229 *vcpu_pc(vcpu) += 4; 230 kvm_adjust_itstate(vcpu); 231} 232 233 234/****************************************************************************** 235 * Inject exceptions into the guest 236 */ 237 238static u32 exc_vector_base(struct kvm_vcpu *vcpu) 239{ 240 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 241 u32 vbar = vcpu->arch.cp15[c12_VBAR]; 242 243 if (sctlr & SCTLR_V) 244 return 0xffff0000; 245 else /* always have security exceptions */ 246 return vbar; 247} 248 249/** 250 * kvm_inject_undefined - inject an undefined exception into the guest 251 * @vcpu: The VCPU to receive the undefined exception 252 * 253 * It is assumed that this code is called from the VCPU thread and that the 254 * VCPU therefore is not currently executing guest code. 255 * 256 * Modelled after TakeUndefInstrException() pseudocode. 257 */ 258void kvm_inject_undefined(struct kvm_vcpu *vcpu) 259{ 260 u32 new_lr_value; 261 u32 new_spsr_value; 262 u32 cpsr = *vcpu_cpsr(vcpu); 263 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 264 bool is_thumb = (cpsr & PSR_T_BIT); 265 u32 vect_offset = 4; 266 u32 return_offset = (is_thumb) ? 2 : 4; 267 268 new_spsr_value = cpsr; 269 new_lr_value = *vcpu_pc(vcpu) - return_offset; 270 271 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; 272 *vcpu_cpsr(vcpu) |= PSR_I_BIT; 273 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); 274 275 if (sctlr & SCTLR_TE) 276 *vcpu_cpsr(vcpu) |= PSR_T_BIT; 277 if (sctlr & SCTLR_EE) 278 *vcpu_cpsr(vcpu) |= PSR_E_BIT; 279 280 /* Note: These now point to UND banked copies */ 281 *vcpu_spsr(vcpu) = cpsr; 282 *vcpu_reg(vcpu, 14) = new_lr_value; 283 284 /* Branch to exception vector */ 285 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; 286} 287 288/* 289 * Modelled after TakeDataAbortException() and TakePrefetchAbortException 290 * pseudocode. 291 */ 292static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 293{ 294 u32 new_lr_value; 295 u32 new_spsr_value; 296 u32 cpsr = *vcpu_cpsr(vcpu); 297 u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; 298 bool is_thumb = (cpsr & PSR_T_BIT); 299 u32 vect_offset; 300 u32 return_offset = (is_thumb) ? 4 : 0; 301 bool is_lpae; 302 303 new_spsr_value = cpsr; 304 new_lr_value = *vcpu_pc(vcpu) + return_offset; 305 306 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; 307 *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; 308 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); 309 310 if (sctlr & SCTLR_TE) 311 *vcpu_cpsr(vcpu) |= PSR_T_BIT; 312 if (sctlr & SCTLR_EE) 313 *vcpu_cpsr(vcpu) |= PSR_E_BIT; 314 315 /* Note: These now point to ABT banked copies */ 316 *vcpu_spsr(vcpu) = cpsr; 317 *vcpu_reg(vcpu, 14) = new_lr_value; 318 319 if (is_pabt) 320 vect_offset = 12; 321 else 322 vect_offset = 16; 323 324 /* Branch to exception vector */ 325 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; 326 327 if (is_pabt) { 328 /* Set DFAR and DFSR */ 329 vcpu->arch.cp15[c6_IFAR] = addr; 330 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); 331 /* Always give debug fault for now - should give guest a clue */ 332 if (is_lpae) 333 vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; 334 else 335 vcpu->arch.cp15[c5_IFSR] = 2; 336 } else { /* !iabt */ 337 /* Set DFAR and DFSR */ 338 vcpu->arch.cp15[c6_DFAR] = addr; 339 is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); 340 /* Always give debug fault for now - should give guest a clue */ 341 if (is_lpae) 342 vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; 343 else 344 vcpu->arch.cp15[c5_DFSR] = 2; 345 } 346 347} 348 349/** 350 * kvm_inject_dabt - inject a data abort into the guest 351 * @vcpu: The VCPU to receive the undefined exception 352 * @addr: The address to report in the DFAR 353 * 354 * It is assumed that this code is called from the VCPU thread and that the 355 * VCPU therefore is not currently executing guest code. 356 */ 357void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) 358{ 359 inject_abt(vcpu, false, addr); 360} 361 362/** 363 * kvm_inject_pabt - inject a prefetch abort into the guest 364 * @vcpu: The VCPU to receive the undefined exception 365 * @addr: The address to report in the DFAR 366 * 367 * It is assumed that this code is called from the VCPU thread and that the 368 * VCPU therefore is not currently executing guest code. 369 */ 370void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) 371{ 372 inject_abt(vcpu, true, addr); 373}