Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.18-rc4 1304 lines 36 kB view raw
1/* 2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 * Authors: Rusty Russell <rusty@rustcorp.com.au> 4 * Christoffer Dall <c.dall@virtualopensystems.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License, version 2, as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 */ 19#include <linux/mm.h> 20#include <linux/kvm_host.h> 21#include <linux/uaccess.h> 22#include <asm/kvm_arm.h> 23#include <asm/kvm_host.h> 24#include <asm/kvm_emulate.h> 25#include <asm/kvm_coproc.h> 26#include <asm/kvm_mmu.h> 27#include <asm/cacheflush.h> 28#include <asm/cputype.h> 29#include <trace/events/kvm.h> 30#include <asm/vfp.h> 31#include "../vfp/vfpinstr.h" 32 33#include "trace.h" 34#include "coproc.h" 35 36 37/****************************************************************************** 38 * Co-processor emulation 39 *****************************************************************************/ 40 41/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 42static u32 cache_levels; 43 44/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 45#define CSSELR_MAX 12 46 47/* 48 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some 49 * of cp15 registers can be viewed either as couple of two u32 registers 50 * or one u64 register. Current u64 register encoding is that least 51 * significant u32 word is followed by most significant u32 word. 52 */ 53static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, 54 const struct coproc_reg *r, 55 u64 val) 56{ 57 vcpu->arch.cp15[r->reg] = val & 0xffffffff; 58 vcpu->arch.cp15[r->reg + 1] = val >> 32; 59} 60 61static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, 62 const struct coproc_reg *r) 63{ 64 u64 val; 65 66 val = vcpu->arch.cp15[r->reg + 1]; 67 val = val << 32; 68 val = val | vcpu->arch.cp15[r->reg]; 69 return val; 70} 71 72int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) 73{ 74 kvm_inject_undefined(vcpu); 75 return 1; 76} 77 78int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 79{ 80 /* 81 * We can get here, if the host has been built without VFPv3 support, 82 * but the guest attempted a floating point operation. 83 */ 84 kvm_inject_undefined(vcpu); 85 return 1; 86} 87 88int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 89{ 90 kvm_inject_undefined(vcpu); 91 return 1; 92} 93 94int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 95{ 96 kvm_inject_undefined(vcpu); 97 return 1; 98} 99 100static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 101{ 102 /* 103 * Compute guest MPIDR. We build a virtual cluster out of the 104 * vcpu_id, but we read the 'U' bit from the underlying 105 * hardware directly. 106 */ 107 vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | 108 ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | 109 (vcpu->vcpu_id & 3)); 110} 111 112/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ 113static bool access_actlr(struct kvm_vcpu *vcpu, 114 const struct coproc_params *p, 115 const struct coproc_reg *r) 116{ 117 if (p->is_write) 118 return ignore_write(vcpu, p); 119 120 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; 121 return true; 122} 123 124/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ 125static bool access_cbar(struct kvm_vcpu *vcpu, 126 const struct coproc_params *p, 127 const struct coproc_reg *r) 128{ 129 if (p->is_write) 130 return write_to_read_only(vcpu, p); 131 return read_zero(vcpu, p); 132} 133 134/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ 135static bool access_l2ctlr(struct kvm_vcpu *vcpu, 136 const struct coproc_params *p, 137 const struct coproc_reg *r) 138{ 139 if (p->is_write) 140 return ignore_write(vcpu, p); 141 142 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; 143 return true; 144} 145 146static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 147{ 148 u32 l2ctlr, ncores; 149 150 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); 151 l2ctlr &= ~(3 << 24); 152 ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; 153 /* How many cores in the current cluster and the next ones */ 154 ncores -= (vcpu->vcpu_id & ~3); 155 /* Cap it to the maximum number of cores in a single cluster */ 156 ncores = min(ncores, 3U); 157 l2ctlr |= (ncores & 3) << 24; 158 159 vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; 160} 161 162static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 163{ 164 u32 actlr; 165 166 /* ACTLR contains SMP bit: make sure you create all cpus first! */ 167 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); 168 /* Make the SMP bit consistent with the guest configuration */ 169 if (atomic_read(&vcpu->kvm->online_vcpus) > 1) 170 actlr |= 1U << 6; 171 else 172 actlr &= ~(1U << 6); 173 174 vcpu->arch.cp15[c1_ACTLR] = actlr; 175} 176 177/* 178 * TRM entries: A7:4.3.50, A15:4.3.49 179 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). 180 */ 181static bool access_l2ectlr(struct kvm_vcpu *vcpu, 182 const struct coproc_params *p, 183 const struct coproc_reg *r) 184{ 185 if (p->is_write) 186 return ignore_write(vcpu, p); 187 188 *vcpu_reg(vcpu, p->Rt1) = 0; 189 return true; 190} 191 192/* See note at ARM ARM B1.14.4 */ 193static bool access_dcsw(struct kvm_vcpu *vcpu, 194 const struct coproc_params *p, 195 const struct coproc_reg *r) 196{ 197 unsigned long val; 198 int cpu; 199 200 if (!p->is_write) 201 return read_from_write_only(vcpu, p); 202 203 cpu = get_cpu(); 204 205 cpumask_setall(&vcpu->arch.require_dcache_flush); 206 cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); 207 208 /* If we were already preempted, take the long way around */ 209 if (cpu != vcpu->arch.last_pcpu) { 210 flush_cache_all(); 211 goto done; 212 } 213 214 val = *vcpu_reg(vcpu, p->Rt1); 215 216 switch (p->CRm) { 217 case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ 218 case 14: /* DCCISW */ 219 asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); 220 break; 221 222 case 10: /* DCCSW */ 223 asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); 224 break; 225 } 226 227done: 228 put_cpu(); 229 230 return true; 231} 232 233/* 234 * Generic accessor for VM registers. Only called as long as HCR_TVM 235 * is set. 236 */ 237static bool access_vm_reg(struct kvm_vcpu *vcpu, 238 const struct coproc_params *p, 239 const struct coproc_reg *r) 240{ 241 BUG_ON(!p->is_write); 242 243 vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); 244 if (p->is_64bit) 245 vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); 246 247 return true; 248} 249 250/* 251 * SCTLR accessor. Only called as long as HCR_TVM is set. If the 252 * guest enables the MMU, we stop trapping the VM sys_regs and leave 253 * it in complete control of the caches. 254 * 255 * Used by the cpu-specific code. 256 */ 257bool access_sctlr(struct kvm_vcpu *vcpu, 258 const struct coproc_params *p, 259 const struct coproc_reg *r) 260{ 261 access_vm_reg(vcpu, p, r); 262 263 if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ 264 vcpu->arch.hcr &= ~HCR_TVM; 265 stage2_flush_vm(vcpu->kvm); 266 } 267 268 return true; 269} 270 271/* 272 * We could trap ID_DFR0 and tell the guest we don't support performance 273 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 274 * NAKed, so it will read the PMCR anyway. 275 * 276 * Therefore we tell the guest we have 0 counters. Unfortunately, we 277 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 278 * all PM registers, which doesn't crash the guest kernel at least. 279 */ 280static bool pm_fake(struct kvm_vcpu *vcpu, 281 const struct coproc_params *p, 282 const struct coproc_reg *r) 283{ 284 if (p->is_write) 285 return ignore_write(vcpu, p); 286 else 287 return read_zero(vcpu, p); 288} 289 290#define access_pmcr pm_fake 291#define access_pmcntenset pm_fake 292#define access_pmcntenclr pm_fake 293#define access_pmovsr pm_fake 294#define access_pmselr pm_fake 295#define access_pmceid0 pm_fake 296#define access_pmceid1 pm_fake 297#define access_pmccntr pm_fake 298#define access_pmxevtyper pm_fake 299#define access_pmxevcntr pm_fake 300#define access_pmuserenr pm_fake 301#define access_pmintenset pm_fake 302#define access_pmintenclr pm_fake 303 304/* Architected CP15 registers. 305 * CRn denotes the primary register number, but is copied to the CRm in the 306 * user space API for 64-bit register access in line with the terminology used 307 * in the ARM ARM. 308 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit 309 * registers preceding 32-bit ones. 310 */ 311static const struct coproc_reg cp15_regs[] = { 312 /* MPIDR: we use VMPIDR for guest access. */ 313 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, 314 NULL, reset_mpidr, c0_MPIDR }, 315 316 /* CSSELR: swapped by interrupt.S. */ 317 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, 318 NULL, reset_unknown, c0_CSSELR }, 319 320 /* ACTLR: trapped by HCR.TAC bit. */ 321 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, 322 access_actlr, reset_actlr, c1_ACTLR }, 323 324 /* CPACR: swapped by interrupt.S. */ 325 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, 326 NULL, reset_val, c1_CPACR, 0x00000000 }, 327 328 /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */ 329 { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 }, 330 { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32, 331 access_vm_reg, reset_unknown, c2_TTBR0 }, 332 { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32, 333 access_vm_reg, reset_unknown, c2_TTBR1 }, 334 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 335 access_vm_reg, reset_val, c2_TTBCR, 0x00000000 }, 336 { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 }, 337 338 339 /* DACR: swapped by interrupt.S. */ 340 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, 341 access_vm_reg, reset_unknown, c3_DACR }, 342 343 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ 344 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, 345 access_vm_reg, reset_unknown, c5_DFSR }, 346 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, 347 access_vm_reg, reset_unknown, c5_IFSR }, 348 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, 349 access_vm_reg, reset_unknown, c5_ADFSR }, 350 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, 351 access_vm_reg, reset_unknown, c5_AIFSR }, 352 353 /* DFAR/IFAR: swapped by interrupt.S. */ 354 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, 355 access_vm_reg, reset_unknown, c6_DFAR }, 356 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, 357 access_vm_reg, reset_unknown, c6_IFAR }, 358 359 /* PAR swapped by interrupt.S */ 360 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 361 362 /* 363 * DC{C,I,CI}SW operations: 364 */ 365 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, 366 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, 367 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, 368 /* 369 * L2CTLR access (guest wants to know #CPUs). 370 */ 371 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, 372 access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, 373 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, 374 375 /* 376 * Dummy performance monitor implementation. 377 */ 378 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, 379 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, 380 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, 381 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, 382 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, 383 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, 384 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, 385 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, 386 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, 387 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, 388 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, 389 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, 390 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, 391 392 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ 393 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, 394 access_vm_reg, reset_unknown, c10_PRRR}, 395 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, 396 access_vm_reg, reset_unknown, c10_NMRR}, 397 398 /* AMAIR0/AMAIR1: swapped by interrupt.S. */ 399 { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32, 400 access_vm_reg, reset_unknown, c10_AMAIR0}, 401 { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, 402 access_vm_reg, reset_unknown, c10_AMAIR1}, 403 404 /* VBAR: swapped by interrupt.S. */ 405 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, 406 NULL, reset_val, c12_VBAR, 0x00000000 }, 407 408 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ 409 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, 410 access_vm_reg, reset_val, c13_CID, 0x00000000 }, 411 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, 412 NULL, reset_unknown, c13_TID_URW }, 413 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, 414 NULL, reset_unknown, c13_TID_URO }, 415 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, 416 NULL, reset_unknown, c13_TID_PRIV }, 417 418 /* CNTKCTL: swapped by interrupt.S. */ 419 { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, 420 NULL, reset_val, c14_CNTKCTL, 0x00000000 }, 421 422 /* The Configuration Base Address Register. */ 423 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, 424}; 425 426/* Target specific emulation tables */ 427static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 428 429void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) 430{ 431 unsigned int i; 432 433 for (i = 1; i < table->num; i++) 434 BUG_ON(cmp_reg(&table->table[i-1], 435 &table->table[i]) >= 0); 436 437 target_tables[table->target] = table; 438} 439 440/* Get specific register table for this target. */ 441static const struct coproc_reg *get_target_table(unsigned target, size_t *num) 442{ 443 struct kvm_coproc_target_table *table; 444 445 table = target_tables[target]; 446 *num = table->num; 447 return table->table; 448} 449 450static const struct coproc_reg *find_reg(const struct coproc_params *params, 451 const struct coproc_reg table[], 452 unsigned int num) 453{ 454 unsigned int i; 455 456 for (i = 0; i < num; i++) { 457 const struct coproc_reg *r = &table[i]; 458 459 if (params->is_64bit != r->is_64) 460 continue; 461 if (params->CRn != r->CRn) 462 continue; 463 if (params->CRm != r->CRm) 464 continue; 465 if (params->Op1 != r->Op1) 466 continue; 467 if (params->Op2 != r->Op2) 468 continue; 469 470 return r; 471 } 472 return NULL; 473} 474 475static int emulate_cp15(struct kvm_vcpu *vcpu, 476 const struct coproc_params *params) 477{ 478 size_t num; 479 const struct coproc_reg *table, *r; 480 481 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, 482 params->CRm, params->Op2, params->is_write); 483 484 table = get_target_table(vcpu->arch.target, &num); 485 486 /* Search target-specific then generic table. */ 487 r = find_reg(params, table, num); 488 if (!r) 489 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); 490 491 if (likely(r)) { 492 /* If we don't have an accessor, we should never get here! */ 493 BUG_ON(!r->access); 494 495 if (likely(r->access(vcpu, params, r))) { 496 /* Skip instruction, since it was emulated */ 497 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 498 return 1; 499 } 500 /* If access function fails, it should complain. */ 501 } else { 502 kvm_err("Unsupported guest CP15 access at: %08lx\n", 503 *vcpu_pc(vcpu)); 504 print_cp_instr(params); 505 } 506 kvm_inject_undefined(vcpu); 507 return 1; 508} 509 510/** 511 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 512 * @vcpu: The VCPU pointer 513 * @run: The kvm_run struct 514 */ 515int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 516{ 517 struct coproc_params params; 518 519 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; 520 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; 521 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); 522 params.is_64bit = true; 523 524 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; 525 params.Op2 = 0; 526 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 527 params.CRm = 0; 528 529 return emulate_cp15(vcpu, &params); 530} 531 532static void reset_coproc_regs(struct kvm_vcpu *vcpu, 533 const struct coproc_reg *table, size_t num) 534{ 535 unsigned long i; 536 537 for (i = 0; i < num; i++) 538 if (table[i].reset) 539 table[i].reset(vcpu, &table[i]); 540} 541 542/** 543 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 544 * @vcpu: The VCPU pointer 545 * @run: The kvm_run struct 546 */ 547int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 548{ 549 struct coproc_params params; 550 551 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; 552 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; 553 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); 554 params.is_64bit = false; 555 556 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 557 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; 558 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; 559 params.Rt2 = 0; 560 561 return emulate_cp15(vcpu, &params); 562} 563 564/****************************************************************************** 565 * Userspace API 566 *****************************************************************************/ 567 568static bool index_to_params(u64 id, struct coproc_params *params) 569{ 570 switch (id & KVM_REG_SIZE_MASK) { 571 case KVM_REG_SIZE_U32: 572 /* Any unused index bits means it's not valid. */ 573 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 574 | KVM_REG_ARM_COPROC_MASK 575 | KVM_REG_ARM_32_CRN_MASK 576 | KVM_REG_ARM_CRM_MASK 577 | KVM_REG_ARM_OPC1_MASK 578 | KVM_REG_ARM_32_OPC2_MASK)) 579 return false; 580 581 params->is_64bit = false; 582 params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) 583 >> KVM_REG_ARM_32_CRN_SHIFT); 584 params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 585 >> KVM_REG_ARM_CRM_SHIFT); 586 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 587 >> KVM_REG_ARM_OPC1_SHIFT); 588 params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) 589 >> KVM_REG_ARM_32_OPC2_SHIFT); 590 return true; 591 case KVM_REG_SIZE_U64: 592 /* Any unused index bits means it's not valid. */ 593 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 594 | KVM_REG_ARM_COPROC_MASK 595 | KVM_REG_ARM_CRM_MASK 596 | KVM_REG_ARM_OPC1_MASK)) 597 return false; 598 params->is_64bit = true; 599 /* CRm to CRn: see cp15_to_index for details */ 600 params->CRn = ((id & KVM_REG_ARM_CRM_MASK) 601 >> KVM_REG_ARM_CRM_SHIFT); 602 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 603 >> KVM_REG_ARM_OPC1_SHIFT); 604 params->Op2 = 0; 605 params->CRm = 0; 606 return true; 607 default: 608 return false; 609 } 610} 611 612/* Decode an index value, and find the cp15 coproc_reg entry. */ 613static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, 614 u64 id) 615{ 616 size_t num; 617 const struct coproc_reg *table, *r; 618 struct coproc_params params; 619 620 /* We only do cp15 for now. */ 621 if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) 622 return NULL; 623 624 if (!index_to_params(id, &params)) 625 return NULL; 626 627 table = get_target_table(vcpu->arch.target, &num); 628 r = find_reg(&params, table, num); 629 if (!r) 630 r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs)); 631 632 /* Not saved in the cp15 array? */ 633 if (r && !r->reg) 634 r = NULL; 635 636 return r; 637} 638 639/* 640 * These are the invariant cp15 registers: we let the guest see the host 641 * versions of these, so they're part of the guest state. 642 * 643 * A future CPU may provide a mechanism to present different values to 644 * the guest, or a future kvm may trap them. 645 */ 646/* Unfortunately, there's no register-argument for mrc, so generate. */ 647#define FUNCTION_FOR32(crn, crm, op1, op2, name) \ 648 static void get_##name(struct kvm_vcpu *v, \ 649 const struct coproc_reg *r) \ 650 { \ 651 u32 val; \ 652 \ 653 asm volatile("mrc p15, " __stringify(op1) \ 654 ", %0, c" __stringify(crn) \ 655 ", c" __stringify(crm) \ 656 ", " __stringify(op2) "\n" : "=r" (val)); \ 657 ((struct coproc_reg *)r)->val = val; \ 658 } 659 660FUNCTION_FOR32(0, 0, 0, 0, MIDR) 661FUNCTION_FOR32(0, 0, 0, 1, CTR) 662FUNCTION_FOR32(0, 0, 0, 2, TCMTR) 663FUNCTION_FOR32(0, 0, 0, 3, TLBTR) 664FUNCTION_FOR32(0, 0, 0, 6, REVIDR) 665FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) 666FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) 667FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) 668FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) 669FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) 670FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) 671FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) 672FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) 673FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) 674FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) 675FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) 676FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) 677FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) 678FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) 679FUNCTION_FOR32(0, 0, 1, 1, CLIDR) 680FUNCTION_FOR32(0, 0, 1, 7, AIDR) 681 682/* ->val is filled in by kvm_invariant_coproc_table_init() */ 683static struct coproc_reg invariant_cp15[] = { 684 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, 685 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, 686 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, 687 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, 688 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, 689 690 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, 691 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, 692 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, 693 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, 694 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, 695 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, 696 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, 697 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, 698 699 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, 700 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, 701 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, 702 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, 703 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, 704 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, 705 706 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, 707 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, 708}; 709 710/* 711 * Reads a register value from a userspace address to a kernel 712 * variable. Make sure that register size matches sizeof(*__val). 713 */ 714static int reg_from_user(void *val, const void __user *uaddr, u64 id) 715{ 716 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 717 return -EFAULT; 718 return 0; 719} 720 721/* 722 * Writes a register value to a userspace address from a kernel variable. 723 * Make sure that register size matches sizeof(*__val). 724 */ 725static int reg_to_user(void __user *uaddr, const void *val, u64 id) 726{ 727 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 728 return -EFAULT; 729 return 0; 730} 731 732static int get_invariant_cp15(u64 id, void __user *uaddr) 733{ 734 struct coproc_params params; 735 const struct coproc_reg *r; 736 int ret; 737 738 if (!index_to_params(id, &params)) 739 return -ENOENT; 740 741 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); 742 if (!r) 743 return -ENOENT; 744 745 ret = -ENOENT; 746 if (KVM_REG_SIZE(id) == 4) { 747 u32 val = r->val; 748 749 ret = reg_to_user(uaddr, &val, id); 750 } else if (KVM_REG_SIZE(id) == 8) { 751 ret = reg_to_user(uaddr, &r->val, id); 752 } 753 return ret; 754} 755 756static int set_invariant_cp15(u64 id, void __user *uaddr) 757{ 758 struct coproc_params params; 759 const struct coproc_reg *r; 760 int err; 761 u64 val; 762 763 if (!index_to_params(id, &params)) 764 return -ENOENT; 765 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); 766 if (!r) 767 return -ENOENT; 768 769 err = -ENOENT; 770 if (KVM_REG_SIZE(id) == 4) { 771 u32 val32; 772 773 err = reg_from_user(&val32, uaddr, id); 774 if (!err) 775 val = val32; 776 } else if (KVM_REG_SIZE(id) == 8) { 777 err = reg_from_user(&val, uaddr, id); 778 } 779 if (err) 780 return err; 781 782 /* This is what we mean by invariant: you can't change it. */ 783 if (r->val != val) 784 return -EINVAL; 785 786 return 0; 787} 788 789static bool is_valid_cache(u32 val) 790{ 791 u32 level, ctype; 792 793 if (val >= CSSELR_MAX) 794 return false; 795 796 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 797 level = (val >> 1); 798 ctype = (cache_levels >> (level * 3)) & 7; 799 800 switch (ctype) { 801 case 0: /* No cache */ 802 return false; 803 case 1: /* Instruction cache only */ 804 return (val & 1); 805 case 2: /* Data cache only */ 806 case 4: /* Unified cache */ 807 return !(val & 1); 808 case 3: /* Separate instruction and data caches */ 809 return true; 810 default: /* Reserved: we can't know instruction or data. */ 811 return false; 812 } 813} 814 815/* Which cache CCSIDR represents depends on CSSELR value. */ 816static u32 get_ccsidr(u32 csselr) 817{ 818 u32 ccsidr; 819 820 /* Make sure noone else changes CSSELR during this! */ 821 local_irq_disable(); 822 /* Put value into CSSELR */ 823 asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); 824 isb(); 825 /* Read result out of CCSIDR */ 826 asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); 827 local_irq_enable(); 828 829 return ccsidr; 830} 831 832static int demux_c15_get(u64 id, void __user *uaddr) 833{ 834 u32 val; 835 u32 __user *uval = uaddr; 836 837 /* Fail if we have unknown bits set. */ 838 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 839 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 840 return -ENOENT; 841 842 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 843 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 844 if (KVM_REG_SIZE(id) != 4) 845 return -ENOENT; 846 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 847 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 848 if (!is_valid_cache(val)) 849 return -ENOENT; 850 851 return put_user(get_ccsidr(val), uval); 852 default: 853 return -ENOENT; 854 } 855} 856 857static int demux_c15_set(u64 id, void __user *uaddr) 858{ 859 u32 val, newval; 860 u32 __user *uval = uaddr; 861 862 /* Fail if we have unknown bits set. */ 863 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 864 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 865 return -ENOENT; 866 867 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 868 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 869 if (KVM_REG_SIZE(id) != 4) 870 return -ENOENT; 871 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 872 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 873 if (!is_valid_cache(val)) 874 return -ENOENT; 875 876 if (get_user(newval, uval)) 877 return -EFAULT; 878 879 /* This is also invariant: you can't change it. */ 880 if (newval != get_ccsidr(val)) 881 return -EINVAL; 882 return 0; 883 default: 884 return -ENOENT; 885 } 886} 887 888#ifdef CONFIG_VFPv3 889static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, 890 KVM_REG_ARM_VFP_FPSCR, 891 KVM_REG_ARM_VFP_FPINST, 892 KVM_REG_ARM_VFP_FPINST2, 893 KVM_REG_ARM_VFP_MVFR0, 894 KVM_REG_ARM_VFP_MVFR1, 895 KVM_REG_ARM_VFP_FPSID }; 896 897static unsigned int num_fp_regs(void) 898{ 899 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) 900 return 32; 901 else 902 return 16; 903} 904 905static unsigned int num_vfp_regs(void) 906{ 907 /* Normal FP regs + control regs. */ 908 return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); 909} 910 911static int copy_vfp_regids(u64 __user *uindices) 912{ 913 unsigned int i; 914 const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; 915 const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; 916 917 for (i = 0; i < num_fp_regs(); i++) { 918 if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, 919 uindices)) 920 return -EFAULT; 921 uindices++; 922 } 923 924 for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { 925 if (put_user(u32reg | vfp_sysregs[i], uindices)) 926 return -EFAULT; 927 uindices++; 928 } 929 930 return num_vfp_regs(); 931} 932 933static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 934{ 935 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); 936 u32 val; 937 938 /* Fail if we have unknown bits set. */ 939 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 940 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 941 return -ENOENT; 942 943 if (vfpid < num_fp_regs()) { 944 if (KVM_REG_SIZE(id) != 8) 945 return -ENOENT; 946 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], 947 id); 948 } 949 950 /* FP control registers are all 32 bit. */ 951 if (KVM_REG_SIZE(id) != 4) 952 return -ENOENT; 953 954 switch (vfpid) { 955 case KVM_REG_ARM_VFP_FPEXC: 956 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); 957 case KVM_REG_ARM_VFP_FPSCR: 958 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); 959 case KVM_REG_ARM_VFP_FPINST: 960 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); 961 case KVM_REG_ARM_VFP_FPINST2: 962 return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); 963 case KVM_REG_ARM_VFP_MVFR0: 964 val = fmrx(MVFR0); 965 return reg_to_user(uaddr, &val, id); 966 case KVM_REG_ARM_VFP_MVFR1: 967 val = fmrx(MVFR1); 968 return reg_to_user(uaddr, &val, id); 969 case KVM_REG_ARM_VFP_FPSID: 970 val = fmrx(FPSID); 971 return reg_to_user(uaddr, &val, id); 972 default: 973 return -ENOENT; 974 } 975} 976 977static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) 978{ 979 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); 980 u32 val; 981 982 /* Fail if we have unknown bits set. */ 983 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 984 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 985 return -ENOENT; 986 987 if (vfpid < num_fp_regs()) { 988 if (KVM_REG_SIZE(id) != 8) 989 return -ENOENT; 990 return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], 991 uaddr, id); 992 } 993 994 /* FP control registers are all 32 bit. */ 995 if (KVM_REG_SIZE(id) != 4) 996 return -ENOENT; 997 998 switch (vfpid) { 999 case KVM_REG_ARM_VFP_FPEXC: 1000 return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); 1001 case KVM_REG_ARM_VFP_FPSCR: 1002 return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); 1003 case KVM_REG_ARM_VFP_FPINST: 1004 return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); 1005 case KVM_REG_ARM_VFP_FPINST2: 1006 return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); 1007 /* These are invariant. */ 1008 case KVM_REG_ARM_VFP_MVFR0: 1009 if (reg_from_user(&val, uaddr, id)) 1010 return -EFAULT; 1011 if (val != fmrx(MVFR0)) 1012 return -EINVAL; 1013 return 0; 1014 case KVM_REG_ARM_VFP_MVFR1: 1015 if (reg_from_user(&val, uaddr, id)) 1016 return -EFAULT; 1017 if (val != fmrx(MVFR1)) 1018 return -EINVAL; 1019 return 0; 1020 case KVM_REG_ARM_VFP_FPSID: 1021 if (reg_from_user(&val, uaddr, id)) 1022 return -EFAULT; 1023 if (val != fmrx(FPSID)) 1024 return -EINVAL; 1025 return 0; 1026 default: 1027 return -ENOENT; 1028 } 1029} 1030#else /* !CONFIG_VFPv3 */ 1031static unsigned int num_vfp_regs(void) 1032{ 1033 return 0; 1034} 1035 1036static int copy_vfp_regids(u64 __user *uindices) 1037{ 1038 return 0; 1039} 1040 1041static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 1042{ 1043 return -ENOENT; 1044} 1045 1046static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) 1047{ 1048 return -ENOENT; 1049} 1050#endif /* !CONFIG_VFPv3 */ 1051 1052int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1053{ 1054 const struct coproc_reg *r; 1055 void __user *uaddr = (void __user *)(long)reg->addr; 1056 int ret; 1057 1058 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1059 return demux_c15_get(reg->id, uaddr); 1060 1061 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) 1062 return vfp_get_reg(vcpu, reg->id, uaddr); 1063 1064 r = index_to_coproc_reg(vcpu, reg->id); 1065 if (!r) 1066 return get_invariant_cp15(reg->id, uaddr); 1067 1068 ret = -ENOENT; 1069 if (KVM_REG_SIZE(reg->id) == 8) { 1070 u64 val; 1071 1072 val = vcpu_cp15_reg64_get(vcpu, r); 1073 ret = reg_to_user(uaddr, &val, reg->id); 1074 } else if (KVM_REG_SIZE(reg->id) == 4) { 1075 ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 1076 } 1077 1078 return ret; 1079} 1080 1081int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1082{ 1083 const struct coproc_reg *r; 1084 void __user *uaddr = (void __user *)(long)reg->addr; 1085 int ret; 1086 1087 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1088 return demux_c15_set(reg->id, uaddr); 1089 1090 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) 1091 return vfp_set_reg(vcpu, reg->id, uaddr); 1092 1093 r = index_to_coproc_reg(vcpu, reg->id); 1094 if (!r) 1095 return set_invariant_cp15(reg->id, uaddr); 1096 1097 ret = -ENOENT; 1098 if (KVM_REG_SIZE(reg->id) == 8) { 1099 u64 val; 1100 1101 ret = reg_from_user(&val, uaddr, reg->id); 1102 if (!ret) 1103 vcpu_cp15_reg64_set(vcpu, r, val); 1104 } else if (KVM_REG_SIZE(reg->id) == 4) { 1105 ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 1106 } 1107 1108 return ret; 1109} 1110 1111static unsigned int num_demux_regs(void) 1112{ 1113 unsigned int i, count = 0; 1114 1115 for (i = 0; i < CSSELR_MAX; i++) 1116 if (is_valid_cache(i)) 1117 count++; 1118 1119 return count; 1120} 1121 1122static int write_demux_regids(u64 __user *uindices) 1123{ 1124 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1125 unsigned int i; 1126 1127 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1128 for (i = 0; i < CSSELR_MAX; i++) { 1129 if (!is_valid_cache(i)) 1130 continue; 1131 if (put_user(val | i, uindices)) 1132 return -EFAULT; 1133 uindices++; 1134 } 1135 return 0; 1136} 1137 1138static u64 cp15_to_index(const struct coproc_reg *reg) 1139{ 1140 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); 1141 if (reg->is_64) { 1142 val |= KVM_REG_SIZE_U64; 1143 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 1144 /* 1145 * CRn always denotes the primary coproc. reg. nr. for the 1146 * in-kernel representation, but the user space API uses the 1147 * CRm for the encoding, because it is modelled after the 1148 * MRRC/MCRR instructions: see the ARM ARM rev. c page 1149 * B3-1445 1150 */ 1151 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); 1152 } else { 1153 val |= KVM_REG_SIZE_U32; 1154 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 1155 val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); 1156 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 1157 val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); 1158 } 1159 return val; 1160} 1161 1162static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) 1163{ 1164 if (!*uind) 1165 return true; 1166 1167 if (put_user(cp15_to_index(reg), *uind)) 1168 return false; 1169 1170 (*uind)++; 1171 return true; 1172} 1173 1174/* Assumed ordered tables, see kvm_coproc_table_init. */ 1175static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) 1176{ 1177 const struct coproc_reg *i1, *i2, *end1, *end2; 1178 unsigned int total = 0; 1179 size_t num; 1180 1181 /* We check for duplicates here, to allow arch-specific overrides. */ 1182 i1 = get_target_table(vcpu->arch.target, &num); 1183 end1 = i1 + num; 1184 i2 = cp15_regs; 1185 end2 = cp15_regs + ARRAY_SIZE(cp15_regs); 1186 1187 BUG_ON(i1 == end1 || i2 == end2); 1188 1189 /* Walk carefully, as both tables may refer to the same register. */ 1190 while (i1 || i2) { 1191 int cmp = cmp_reg(i1, i2); 1192 /* target-specific overrides generic entry. */ 1193 if (cmp <= 0) { 1194 /* Ignore registers we trap but don't save. */ 1195 if (i1->reg) { 1196 if (!copy_reg_to_user(i1, &uind)) 1197 return -EFAULT; 1198 total++; 1199 } 1200 } else { 1201 /* Ignore registers we trap but don't save. */ 1202 if (i2->reg) { 1203 if (!copy_reg_to_user(i2, &uind)) 1204 return -EFAULT; 1205 total++; 1206 } 1207 } 1208 1209 if (cmp <= 0 && ++i1 == end1) 1210 i1 = NULL; 1211 if (cmp >= 0 && ++i2 == end2) 1212 i2 = NULL; 1213 } 1214 return total; 1215} 1216 1217unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) 1218{ 1219 return ARRAY_SIZE(invariant_cp15) 1220 + num_demux_regs() 1221 + num_vfp_regs() 1222 + walk_cp15(vcpu, (u64 __user *)NULL); 1223} 1224 1225int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 1226{ 1227 unsigned int i; 1228 int err; 1229 1230 /* Then give them all the invariant registers' indices. */ 1231 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { 1232 if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) 1233 return -EFAULT; 1234 uindices++; 1235 } 1236 1237 err = walk_cp15(vcpu, uindices); 1238 if (err < 0) 1239 return err; 1240 uindices += err; 1241 1242 err = copy_vfp_regids(uindices); 1243 if (err < 0) 1244 return err; 1245 uindices += err; 1246 1247 return write_demux_regids(uindices); 1248} 1249 1250void kvm_coproc_table_init(void) 1251{ 1252 unsigned int i; 1253 1254 /* Make sure tables are unique and in order. */ 1255 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) 1256 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); 1257 1258 /* We abuse the reset function to overwrite the table itself. */ 1259 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) 1260 invariant_cp15[i].reset(NULL, &invariant_cp15[i]); 1261 1262 /* 1263 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 1264 * 1265 * If software reads the Cache Type fields from Ctype1 1266 * upwards, once it has seen a value of 0b000, no caches 1267 * exist at further-out levels of the hierarchy. So, for 1268 * example, if Ctype3 is the first Cache Type field with a 1269 * value of 0b000, the values of Ctype4 to Ctype7 must be 1270 * ignored. 1271 */ 1272 asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); 1273 for (i = 0; i < 7; i++) 1274 if (((cache_levels >> (i*3)) & 7) == 0) 1275 break; 1276 /* Clear all higher bits. */ 1277 cache_levels &= (1 << (i*3))-1; 1278} 1279 1280/** 1281 * kvm_reset_coprocs - sets cp15 registers to reset value 1282 * @vcpu: The VCPU pointer 1283 * 1284 * This function finds the right table above and sets the registers on the 1285 * virtual CPU struct to their architecturally defined reset values. 1286 */ 1287void kvm_reset_coprocs(struct kvm_vcpu *vcpu) 1288{ 1289 size_t num; 1290 const struct coproc_reg *table; 1291 1292 /* Catch someone adding a register without putting in reset entry. */ 1293 memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); 1294 1295 /* Generic chip reset first (so target could override). */ 1296 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); 1297 1298 table = get_target_table(vcpu->arch.target, &num); 1299 reset_coproc_regs(vcpu, table, num); 1300 1301 for (num = 1; num < NR_CP15_REGS; num++) 1302 if (vcpu->arch.cp15[num] == 0x42424242) 1303 panic("Didn't reset vcpu->arch.cp15[%zi]", num); 1304}