Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v4.14 1371 lines 38 kB view raw
1/* 2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 3 * Authors: Rusty Russell <rusty@rustcorp.com.au> 4 * Christoffer Dall <c.dall@virtualopensystems.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License, version 2, as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 */ 19 20#include <linux/bsearch.h> 21#include <linux/mm.h> 22#include <linux/kvm_host.h> 23#include <linux/uaccess.h> 24#include <asm/kvm_arm.h> 25#include <asm/kvm_host.h> 26#include <asm/kvm_emulate.h> 27#include <asm/kvm_coproc.h> 28#include <asm/kvm_mmu.h> 29#include <asm/cacheflush.h> 30#include <asm/cputype.h> 31#include <trace/events/kvm.h> 32#include <asm/vfp.h> 33#include "../vfp/vfpinstr.h" 34 35#define CREATE_TRACE_POINTS 36#include "trace.h" 37#include "coproc.h" 38 39 40/****************************************************************************** 41 * Co-processor emulation 42 *****************************************************************************/ 43 44static bool write_to_read_only(struct kvm_vcpu *vcpu, 45 const struct coproc_params *params) 46{ 47 WARN_ONCE(1, "CP15 write to read-only register\n"); 48 print_cp_instr(params); 49 kvm_inject_undefined(vcpu); 50 return false; 51} 52 53static bool read_from_write_only(struct kvm_vcpu *vcpu, 54 const struct coproc_params *params) 55{ 56 WARN_ONCE(1, "CP15 read to write-only register\n"); 57 print_cp_instr(params); 58 kvm_inject_undefined(vcpu); 59 return false; 60} 61 62/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 63static u32 cache_levels; 64 65/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 66#define CSSELR_MAX 12 67 68/* 69 * kvm_vcpu_arch.cp15 holds cp15 registers as an array of u32, but some 70 * of cp15 registers can be viewed either as couple of two u32 registers 71 * or one u64 register. Current u64 register encoding is that least 72 * significant u32 word is followed by most significant u32 word. 73 */ 74static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, 75 const struct coproc_reg *r, 76 u64 val) 77{ 78 vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; 79 vcpu_cp15(vcpu, r->reg + 1) = val >> 32; 80} 81 82static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, 83 const struct coproc_reg *r) 84{ 85 u64 val; 86 87 val = vcpu_cp15(vcpu, r->reg + 1); 88 val = val << 32; 89 val = val | vcpu_cp15(vcpu, r->reg); 90 return val; 91} 92 93int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) 94{ 95 kvm_inject_undefined(vcpu); 96 return 1; 97} 98 99int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) 100{ 101 /* 102 * We can get here, if the host has been built without VFPv3 support, 103 * but the guest attempted a floating point operation. 104 */ 105 kvm_inject_undefined(vcpu); 106 return 1; 107} 108 109int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 110{ 111 kvm_inject_undefined(vcpu); 112 return 1; 113} 114 115static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 116{ 117 /* 118 * Compute guest MPIDR. We build a virtual cluster out of the 119 * vcpu_id, but we read the 'U' bit from the underlying 120 * hardware directly. 121 */ 122 vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | 123 ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | 124 (vcpu->vcpu_id & 3)); 125} 126 127/* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ 128static bool access_actlr(struct kvm_vcpu *vcpu, 129 const struct coproc_params *p, 130 const struct coproc_reg *r) 131{ 132 if (p->is_write) 133 return ignore_write(vcpu, p); 134 135 *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); 136 return true; 137} 138 139/* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ 140static bool access_cbar(struct kvm_vcpu *vcpu, 141 const struct coproc_params *p, 142 const struct coproc_reg *r) 143{ 144 if (p->is_write) 145 return write_to_read_only(vcpu, p); 146 return read_zero(vcpu, p); 147} 148 149/* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ 150static bool access_l2ctlr(struct kvm_vcpu *vcpu, 151 const struct coproc_params *p, 152 const struct coproc_reg *r) 153{ 154 if (p->is_write) 155 return ignore_write(vcpu, p); 156 157 *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); 158 return true; 159} 160 161static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 162{ 163 u32 l2ctlr, ncores; 164 165 asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); 166 l2ctlr &= ~(3 << 24); 167 ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; 168 /* How many cores in the current cluster and the next ones */ 169 ncores -= (vcpu->vcpu_id & ~3); 170 /* Cap it to the maximum number of cores in a single cluster */ 171 ncores = min(ncores, 3U); 172 l2ctlr |= (ncores & 3) << 24; 173 174 vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; 175} 176 177static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) 178{ 179 u32 actlr; 180 181 /* ACTLR contains SMP bit: make sure you create all cpus first! */ 182 asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); 183 /* Make the SMP bit consistent with the guest configuration */ 184 if (atomic_read(&vcpu->kvm->online_vcpus) > 1) 185 actlr |= 1U << 6; 186 else 187 actlr &= ~(1U << 6); 188 189 vcpu_cp15(vcpu, c1_ACTLR) = actlr; 190} 191 192/* 193 * TRM entries: A7:4.3.50, A15:4.3.49 194 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). 195 */ 196static bool access_l2ectlr(struct kvm_vcpu *vcpu, 197 const struct coproc_params *p, 198 const struct coproc_reg *r) 199{ 200 if (p->is_write) 201 return ignore_write(vcpu, p); 202 203 *vcpu_reg(vcpu, p->Rt1) = 0; 204 return true; 205} 206 207/* 208 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 209 */ 210static bool access_dcsw(struct kvm_vcpu *vcpu, 211 const struct coproc_params *p, 212 const struct coproc_reg *r) 213{ 214 if (!p->is_write) 215 return read_from_write_only(vcpu, p); 216 217 kvm_set_way_flush(vcpu); 218 return true; 219} 220 221/* 222 * Generic accessor for VM registers. Only called as long as HCR_TVM 223 * is set. If the guest enables the MMU, we stop trapping the VM 224 * sys_regs and leave it in complete control of the caches. 225 * 226 * Used by the cpu-specific code. 227 */ 228bool access_vm_reg(struct kvm_vcpu *vcpu, 229 const struct coproc_params *p, 230 const struct coproc_reg *r) 231{ 232 bool was_enabled = vcpu_has_cache_enabled(vcpu); 233 234 BUG_ON(!p->is_write); 235 236 vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); 237 if (p->is_64bit) 238 vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); 239 240 kvm_toggle_cache(vcpu, was_enabled); 241 return true; 242} 243 244static bool access_gic_sgi(struct kvm_vcpu *vcpu, 245 const struct coproc_params *p, 246 const struct coproc_reg *r) 247{ 248 u64 reg; 249 250 if (!p->is_write) 251 return read_from_write_only(vcpu, p); 252 253 reg = (u64)*vcpu_reg(vcpu, p->Rt2) << 32; 254 reg |= *vcpu_reg(vcpu, p->Rt1) ; 255 256 vgic_v3_dispatch_sgi(vcpu, reg); 257 258 return true; 259} 260 261static bool access_gic_sre(struct kvm_vcpu *vcpu, 262 const struct coproc_params *p, 263 const struct coproc_reg *r) 264{ 265 if (p->is_write) 266 return ignore_write(vcpu, p); 267 268 *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre; 269 270 return true; 271} 272 273/* 274 * We could trap ID_DFR0 and tell the guest we don't support performance 275 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 276 * NAKed, so it will read the PMCR anyway. 277 * 278 * Therefore we tell the guest we have 0 counters. Unfortunately, we 279 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 280 * all PM registers, which doesn't crash the guest kernel at least. 281 */ 282static bool trap_raz_wi(struct kvm_vcpu *vcpu, 283 const struct coproc_params *p, 284 const struct coproc_reg *r) 285{ 286 if (p->is_write) 287 return ignore_write(vcpu, p); 288 else 289 return read_zero(vcpu, p); 290} 291 292#define access_pmcr trap_raz_wi 293#define access_pmcntenset trap_raz_wi 294#define access_pmcntenclr trap_raz_wi 295#define access_pmovsr trap_raz_wi 296#define access_pmselr trap_raz_wi 297#define access_pmceid0 trap_raz_wi 298#define access_pmceid1 trap_raz_wi 299#define access_pmccntr trap_raz_wi 300#define access_pmxevtyper trap_raz_wi 301#define access_pmxevcntr trap_raz_wi 302#define access_pmuserenr trap_raz_wi 303#define access_pmintenset trap_raz_wi 304#define access_pmintenclr trap_raz_wi 305 306/* Architected CP15 registers. 307 * CRn denotes the primary register number, but is copied to the CRm in the 308 * user space API for 64-bit register access in line with the terminology used 309 * in the ARM ARM. 310 * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit 311 * registers preceding 32-bit ones. 312 */ 313static const struct coproc_reg cp15_regs[] = { 314 /* MPIDR: we use VMPIDR for guest access. */ 315 { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, 316 NULL, reset_mpidr, c0_MPIDR }, 317 318 /* CSSELR: swapped by interrupt.S. */ 319 { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, 320 NULL, reset_unknown, c0_CSSELR }, 321 322 /* ACTLR: trapped by HCR.TAC bit. */ 323 { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, 324 access_actlr, reset_actlr, c1_ACTLR }, 325 326 /* CPACR: swapped by interrupt.S. */ 327 { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, 328 NULL, reset_val, c1_CPACR, 0x00000000 }, 329 330 /* TTBR0/TTBR1/TTBCR: swapped by interrupt.S. */ 331 { CRm64( 2), Op1( 0), is64, access_vm_reg, reset_unknown64, c2_TTBR0 }, 332 { CRn(2), CRm( 0), Op1( 0), Op2( 0), is32, 333 access_vm_reg, reset_unknown, c2_TTBR0 }, 334 { CRn(2), CRm( 0), Op1( 0), Op2( 1), is32, 335 access_vm_reg, reset_unknown, c2_TTBR1 }, 336 { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, 337 access_vm_reg, reset_val, c2_TTBCR, 0x00000000 }, 338 { CRm64( 2), Op1( 1), is64, access_vm_reg, reset_unknown64, c2_TTBR1 }, 339 340 341 /* DACR: swapped by interrupt.S. */ 342 { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, 343 access_vm_reg, reset_unknown, c3_DACR }, 344 345 /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ 346 { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, 347 access_vm_reg, reset_unknown, c5_DFSR }, 348 { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, 349 access_vm_reg, reset_unknown, c5_IFSR }, 350 { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, 351 access_vm_reg, reset_unknown, c5_ADFSR }, 352 { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, 353 access_vm_reg, reset_unknown, c5_AIFSR }, 354 355 /* DFAR/IFAR: swapped by interrupt.S. */ 356 { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, 357 access_vm_reg, reset_unknown, c6_DFAR }, 358 { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, 359 access_vm_reg, reset_unknown, c6_IFAR }, 360 361 /* PAR swapped by interrupt.S */ 362 { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, 363 364 /* 365 * DC{C,I,CI}SW operations: 366 */ 367 { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, 368 { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, 369 { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, 370 /* 371 * L2CTLR access (guest wants to know #CPUs). 372 */ 373 { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, 374 access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, 375 { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, 376 377 /* 378 * Dummy performance monitor implementation. 379 */ 380 { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, 381 { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, 382 { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, 383 { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, 384 { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, 385 { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, 386 { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, 387 { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, 388 { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, 389 { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, 390 { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, 391 { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, 392 { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, 393 394 /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ 395 { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, 396 access_vm_reg, reset_unknown, c10_PRRR}, 397 { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, 398 access_vm_reg, reset_unknown, c10_NMRR}, 399 400 /* AMAIR0/AMAIR1: swapped by interrupt.S. */ 401 { CRn(10), CRm( 3), Op1( 0), Op2( 0), is32, 402 access_vm_reg, reset_unknown, c10_AMAIR0}, 403 { CRn(10), CRm( 3), Op1( 0), Op2( 1), is32, 404 access_vm_reg, reset_unknown, c10_AMAIR1}, 405 406 /* ICC_SGI1R */ 407 { CRm64(12), Op1( 0), is64, access_gic_sgi}, 408 409 /* VBAR: swapped by interrupt.S. */ 410 { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, 411 NULL, reset_val, c12_VBAR, 0x00000000 }, 412 413 /* ICC_SRE */ 414 { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre }, 415 416 /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ 417 { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, 418 access_vm_reg, reset_val, c13_CID, 0x00000000 }, 419 { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, 420 NULL, reset_unknown, c13_TID_URW }, 421 { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, 422 NULL, reset_unknown, c13_TID_URO }, 423 { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, 424 NULL, reset_unknown, c13_TID_PRIV }, 425 426 /* CNTKCTL: swapped by interrupt.S. */ 427 { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, 428 NULL, reset_val, c14_CNTKCTL, 0x00000000 }, 429 430 /* The Configuration Base Address Register. */ 431 { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, 432}; 433 434static int check_reg_table(const struct coproc_reg *table, unsigned int n) 435{ 436 unsigned int i; 437 438 for (i = 1; i < n; i++) { 439 if (cmp_reg(&table[i-1], &table[i]) >= 0) { 440 kvm_err("reg table %p out of order (%d)\n", table, i - 1); 441 return 1; 442 } 443 } 444 445 return 0; 446} 447 448/* Target specific emulation tables */ 449static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 450 451void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) 452{ 453 BUG_ON(check_reg_table(table->table, table->num)); 454 target_tables[table->target] = table; 455} 456 457/* Get specific register table for this target. */ 458static const struct coproc_reg *get_target_table(unsigned target, size_t *num) 459{ 460 struct kvm_coproc_target_table *table; 461 462 table = target_tables[target]; 463 *num = table->num; 464 return table->table; 465} 466 467#define reg_to_match_value(x) \ 468 ({ \ 469 unsigned long val; \ 470 val = (x)->CRn << 11; \ 471 val |= (x)->CRm << 7; \ 472 val |= (x)->Op1 << 4; \ 473 val |= (x)->Op2 << 1; \ 474 val |= !(x)->is_64bit; \ 475 val; \ 476 }) 477 478static int match_reg(const void *key, const void *elt) 479{ 480 const unsigned long pval = (unsigned long)key; 481 const struct coproc_reg *r = elt; 482 483 return pval - reg_to_match_value(r); 484} 485 486static const struct coproc_reg *find_reg(const struct coproc_params *params, 487 const struct coproc_reg table[], 488 unsigned int num) 489{ 490 unsigned long pval = reg_to_match_value(params); 491 492 return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg); 493} 494 495static int emulate_cp15(struct kvm_vcpu *vcpu, 496 const struct coproc_params *params) 497{ 498 size_t num; 499 const struct coproc_reg *table, *r; 500 501 trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, 502 params->CRm, params->Op2, params->is_write); 503 504 table = get_target_table(vcpu->arch.target, &num); 505 506 /* Search target-specific then generic table. */ 507 r = find_reg(params, table, num); 508 if (!r) 509 r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); 510 511 if (likely(r)) { 512 /* If we don't have an accessor, we should never get here! */ 513 BUG_ON(!r->access); 514 515 if (likely(r->access(vcpu, params, r))) { 516 /* Skip instruction, since it was emulated */ 517 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 518 } 519 } else { 520 /* If access function fails, it should complain. */ 521 kvm_err("Unsupported guest CP15 access at: %08lx\n", 522 *vcpu_pc(vcpu)); 523 print_cp_instr(params); 524 kvm_inject_undefined(vcpu); 525 } 526 527 return 1; 528} 529 530static struct coproc_params decode_64bit_hsr(struct kvm_vcpu *vcpu) 531{ 532 struct coproc_params params; 533 534 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; 535 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; 536 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); 537 params.is_64bit = true; 538 539 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; 540 params.Op2 = 0; 541 params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 542 params.CRm = 0; 543 544 return params; 545} 546 547/** 548 * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access 549 * @vcpu: The VCPU pointer 550 * @run: The kvm_run struct 551 */ 552int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 553{ 554 struct coproc_params params = decode_64bit_hsr(vcpu); 555 556 return emulate_cp15(vcpu, &params); 557} 558 559/** 560 * kvm_handle_cp14_64 -- handles a mrrc/mcrr trap on a guest CP14 access 561 * @vcpu: The VCPU pointer 562 * @run: The kvm_run struct 563 */ 564int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 565{ 566 struct coproc_params params = decode_64bit_hsr(vcpu); 567 568 /* raz_wi cp14 */ 569 trap_raz_wi(vcpu, &params, NULL); 570 571 /* handled */ 572 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 573 return 1; 574} 575 576static void reset_coproc_regs(struct kvm_vcpu *vcpu, 577 const struct coproc_reg *table, size_t num) 578{ 579 unsigned long i; 580 581 for (i = 0; i < num; i++) 582 if (table[i].reset) 583 table[i].reset(vcpu, &table[i]); 584} 585 586static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu) 587{ 588 struct coproc_params params; 589 590 params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; 591 params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; 592 params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); 593 params.is_64bit = false; 594 595 params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; 596 params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; 597 params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; 598 params.Rt2 = 0; 599 600 return params; 601} 602 603/** 604 * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access 605 * @vcpu: The VCPU pointer 606 * @run: The kvm_run struct 607 */ 608int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 609{ 610 struct coproc_params params = decode_32bit_hsr(vcpu); 611 return emulate_cp15(vcpu, &params); 612} 613 614/** 615 * kvm_handle_cp14_32 -- handles a mrc/mcr trap on a guest CP14 access 616 * @vcpu: The VCPU pointer 617 * @run: The kvm_run struct 618 */ 619int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 620{ 621 struct coproc_params params = decode_32bit_hsr(vcpu); 622 623 /* raz_wi cp14 */ 624 trap_raz_wi(vcpu, &params, NULL); 625 626 /* handled */ 627 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 628 return 1; 629} 630 631/****************************************************************************** 632 * Userspace API 633 *****************************************************************************/ 634 635static bool index_to_params(u64 id, struct coproc_params *params) 636{ 637 switch (id & KVM_REG_SIZE_MASK) { 638 case KVM_REG_SIZE_U32: 639 /* Any unused index bits means it's not valid. */ 640 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 641 | KVM_REG_ARM_COPROC_MASK 642 | KVM_REG_ARM_32_CRN_MASK 643 | KVM_REG_ARM_CRM_MASK 644 | KVM_REG_ARM_OPC1_MASK 645 | KVM_REG_ARM_32_OPC2_MASK)) 646 return false; 647 648 params->is_64bit = false; 649 params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) 650 >> KVM_REG_ARM_32_CRN_SHIFT); 651 params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 652 >> KVM_REG_ARM_CRM_SHIFT); 653 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 654 >> KVM_REG_ARM_OPC1_SHIFT); 655 params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) 656 >> KVM_REG_ARM_32_OPC2_SHIFT); 657 return true; 658 case KVM_REG_SIZE_U64: 659 /* Any unused index bits means it's not valid. */ 660 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 661 | KVM_REG_ARM_COPROC_MASK 662 | KVM_REG_ARM_CRM_MASK 663 | KVM_REG_ARM_OPC1_MASK)) 664 return false; 665 params->is_64bit = true; 666 /* CRm to CRn: see cp15_to_index for details */ 667 params->CRn = ((id & KVM_REG_ARM_CRM_MASK) 668 >> KVM_REG_ARM_CRM_SHIFT); 669 params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 670 >> KVM_REG_ARM_OPC1_SHIFT); 671 params->Op2 = 0; 672 params->CRm = 0; 673 return true; 674 default: 675 return false; 676 } 677} 678 679/* Decode an index value, and find the cp15 coproc_reg entry. */ 680static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, 681 u64 id) 682{ 683 size_t num; 684 const struct coproc_reg *table, *r; 685 struct coproc_params params; 686 687 /* We only do cp15 for now. */ 688 if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) 689 return NULL; 690 691 if (!index_to_params(id, &params)) 692 return NULL; 693 694 table = get_target_table(vcpu->arch.target, &num); 695 r = find_reg(&params, table, num); 696 if (!r) 697 r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs)); 698 699 /* Not saved in the cp15 array? */ 700 if (r && !r->reg) 701 r = NULL; 702 703 return r; 704} 705 706/* 707 * These are the invariant cp15 registers: we let the guest see the host 708 * versions of these, so they're part of the guest state. 709 * 710 * A future CPU may provide a mechanism to present different values to 711 * the guest, or a future kvm may trap them. 712 */ 713/* Unfortunately, there's no register-argument for mrc, so generate. */ 714#define FUNCTION_FOR32(crn, crm, op1, op2, name) \ 715 static void get_##name(struct kvm_vcpu *v, \ 716 const struct coproc_reg *r) \ 717 { \ 718 u32 val; \ 719 \ 720 asm volatile("mrc p15, " __stringify(op1) \ 721 ", %0, c" __stringify(crn) \ 722 ", c" __stringify(crm) \ 723 ", " __stringify(op2) "\n" : "=r" (val)); \ 724 ((struct coproc_reg *)r)->val = val; \ 725 } 726 727FUNCTION_FOR32(0, 0, 0, 0, MIDR) 728FUNCTION_FOR32(0, 0, 0, 1, CTR) 729FUNCTION_FOR32(0, 0, 0, 2, TCMTR) 730FUNCTION_FOR32(0, 0, 0, 3, TLBTR) 731FUNCTION_FOR32(0, 0, 0, 6, REVIDR) 732FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) 733FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) 734FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) 735FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) 736FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) 737FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) 738FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) 739FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) 740FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) 741FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) 742FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) 743FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) 744FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) 745FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) 746FUNCTION_FOR32(0, 0, 1, 1, CLIDR) 747FUNCTION_FOR32(0, 0, 1, 7, AIDR) 748 749/* ->val is filled in by kvm_invariant_coproc_table_init() */ 750static struct coproc_reg invariant_cp15[] = { 751 { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, 752 { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, 753 { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, 754 { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, 755 { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, 756 757 { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, 758 { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, 759 760 { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, 761 { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, 762 { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, 763 { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, 764 { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, 765 { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, 766 { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, 767 { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, 768 769 { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, 770 { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, 771 { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, 772 { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, 773 { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, 774 { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, 775}; 776 777/* 778 * Reads a register value from a userspace address to a kernel 779 * variable. Make sure that register size matches sizeof(*__val). 780 */ 781static int reg_from_user(void *val, const void __user *uaddr, u64 id) 782{ 783 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 784 return -EFAULT; 785 return 0; 786} 787 788/* 789 * Writes a register value to a userspace address from a kernel variable. 790 * Make sure that register size matches sizeof(*__val). 791 */ 792static int reg_to_user(void __user *uaddr, const void *val, u64 id) 793{ 794 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 795 return -EFAULT; 796 return 0; 797} 798 799static int get_invariant_cp15(u64 id, void __user *uaddr) 800{ 801 struct coproc_params params; 802 const struct coproc_reg *r; 803 int ret; 804 805 if (!index_to_params(id, &params)) 806 return -ENOENT; 807 808 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); 809 if (!r) 810 return -ENOENT; 811 812 ret = -ENOENT; 813 if (KVM_REG_SIZE(id) == 4) { 814 u32 val = r->val; 815 816 ret = reg_to_user(uaddr, &val, id); 817 } else if (KVM_REG_SIZE(id) == 8) { 818 ret = reg_to_user(uaddr, &r->val, id); 819 } 820 return ret; 821} 822 823static int set_invariant_cp15(u64 id, void __user *uaddr) 824{ 825 struct coproc_params params; 826 const struct coproc_reg *r; 827 int err; 828 u64 val; 829 830 if (!index_to_params(id, &params)) 831 return -ENOENT; 832 r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); 833 if (!r) 834 return -ENOENT; 835 836 err = -ENOENT; 837 if (KVM_REG_SIZE(id) == 4) { 838 u32 val32; 839 840 err = reg_from_user(&val32, uaddr, id); 841 if (!err) 842 val = val32; 843 } else if (KVM_REG_SIZE(id) == 8) { 844 err = reg_from_user(&val, uaddr, id); 845 } 846 if (err) 847 return err; 848 849 /* This is what we mean by invariant: you can't change it. */ 850 if (r->val != val) 851 return -EINVAL; 852 853 return 0; 854} 855 856static bool is_valid_cache(u32 val) 857{ 858 u32 level, ctype; 859 860 if (val >= CSSELR_MAX) 861 return false; 862 863 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 864 level = (val >> 1); 865 ctype = (cache_levels >> (level * 3)) & 7; 866 867 switch (ctype) { 868 case 0: /* No cache */ 869 return false; 870 case 1: /* Instruction cache only */ 871 return (val & 1); 872 case 2: /* Data cache only */ 873 case 4: /* Unified cache */ 874 return !(val & 1); 875 case 3: /* Separate instruction and data caches */ 876 return true; 877 default: /* Reserved: we can't know instruction or data. */ 878 return false; 879 } 880} 881 882/* Which cache CCSIDR represents depends on CSSELR value. */ 883static u32 get_ccsidr(u32 csselr) 884{ 885 u32 ccsidr; 886 887 /* Make sure noone else changes CSSELR during this! */ 888 local_irq_disable(); 889 /* Put value into CSSELR */ 890 asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); 891 isb(); 892 /* Read result out of CCSIDR */ 893 asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); 894 local_irq_enable(); 895 896 return ccsidr; 897} 898 899static int demux_c15_get(u64 id, void __user *uaddr) 900{ 901 u32 val; 902 u32 __user *uval = uaddr; 903 904 /* Fail if we have unknown bits set. */ 905 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 906 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 907 return -ENOENT; 908 909 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 910 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 911 if (KVM_REG_SIZE(id) != 4) 912 return -ENOENT; 913 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 914 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 915 if (!is_valid_cache(val)) 916 return -ENOENT; 917 918 return put_user(get_ccsidr(val), uval); 919 default: 920 return -ENOENT; 921 } 922} 923 924static int demux_c15_set(u64 id, void __user *uaddr) 925{ 926 u32 val, newval; 927 u32 __user *uval = uaddr; 928 929 /* Fail if we have unknown bits set. */ 930 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 931 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 932 return -ENOENT; 933 934 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 935 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 936 if (KVM_REG_SIZE(id) != 4) 937 return -ENOENT; 938 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 939 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 940 if (!is_valid_cache(val)) 941 return -ENOENT; 942 943 if (get_user(newval, uval)) 944 return -EFAULT; 945 946 /* This is also invariant: you can't change it. */ 947 if (newval != get_ccsidr(val)) 948 return -EINVAL; 949 return 0; 950 default: 951 return -ENOENT; 952 } 953} 954 955#ifdef CONFIG_VFPv3 956static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, 957 KVM_REG_ARM_VFP_FPSCR, 958 KVM_REG_ARM_VFP_FPINST, 959 KVM_REG_ARM_VFP_FPINST2, 960 KVM_REG_ARM_VFP_MVFR0, 961 KVM_REG_ARM_VFP_MVFR1, 962 KVM_REG_ARM_VFP_FPSID }; 963 964static unsigned int num_fp_regs(void) 965{ 966 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) 967 return 32; 968 else 969 return 16; 970} 971 972static unsigned int num_vfp_regs(void) 973{ 974 /* Normal FP regs + control regs. */ 975 return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); 976} 977 978static int copy_vfp_regids(u64 __user *uindices) 979{ 980 unsigned int i; 981 const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; 982 const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; 983 984 for (i = 0; i < num_fp_regs(); i++) { 985 if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, 986 uindices)) 987 return -EFAULT; 988 uindices++; 989 } 990 991 for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { 992 if (put_user(u32reg | vfp_sysregs[i], uindices)) 993 return -EFAULT; 994 uindices++; 995 } 996 997 return num_vfp_regs(); 998} 999 1000static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 1001{ 1002 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); 1003 u32 val; 1004 1005 /* Fail if we have unknown bits set. */ 1006 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1007 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1008 return -ENOENT; 1009 1010 if (vfpid < num_fp_regs()) { 1011 if (KVM_REG_SIZE(id) != 8) 1012 return -ENOENT; 1013 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], 1014 id); 1015 } 1016 1017 /* FP control registers are all 32 bit. */ 1018 if (KVM_REG_SIZE(id) != 4) 1019 return -ENOENT; 1020 1021 switch (vfpid) { 1022 case KVM_REG_ARM_VFP_FPEXC: 1023 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); 1024 case KVM_REG_ARM_VFP_FPSCR: 1025 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); 1026 case KVM_REG_ARM_VFP_FPINST: 1027 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); 1028 case KVM_REG_ARM_VFP_FPINST2: 1029 return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); 1030 case KVM_REG_ARM_VFP_MVFR0: 1031 val = fmrx(MVFR0); 1032 return reg_to_user(uaddr, &val, id); 1033 case KVM_REG_ARM_VFP_MVFR1: 1034 val = fmrx(MVFR1); 1035 return reg_to_user(uaddr, &val, id); 1036 case KVM_REG_ARM_VFP_FPSID: 1037 val = fmrx(FPSID); 1038 return reg_to_user(uaddr, &val, id); 1039 default: 1040 return -ENOENT; 1041 } 1042} 1043 1044static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) 1045{ 1046 u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); 1047 u32 val; 1048 1049 /* Fail if we have unknown bits set. */ 1050 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1051 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1052 return -ENOENT; 1053 1054 if (vfpid < num_fp_regs()) { 1055 if (KVM_REG_SIZE(id) != 8) 1056 return -ENOENT; 1057 return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], 1058 uaddr, id); 1059 } 1060 1061 /* FP control registers are all 32 bit. */ 1062 if (KVM_REG_SIZE(id) != 4) 1063 return -ENOENT; 1064 1065 switch (vfpid) { 1066 case KVM_REG_ARM_VFP_FPEXC: 1067 return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); 1068 case KVM_REG_ARM_VFP_FPSCR: 1069 return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); 1070 case KVM_REG_ARM_VFP_FPINST: 1071 return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); 1072 case KVM_REG_ARM_VFP_FPINST2: 1073 return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); 1074 /* These are invariant. */ 1075 case KVM_REG_ARM_VFP_MVFR0: 1076 if (reg_from_user(&val, uaddr, id)) 1077 return -EFAULT; 1078 if (val != fmrx(MVFR0)) 1079 return -EINVAL; 1080 return 0; 1081 case KVM_REG_ARM_VFP_MVFR1: 1082 if (reg_from_user(&val, uaddr, id)) 1083 return -EFAULT; 1084 if (val != fmrx(MVFR1)) 1085 return -EINVAL; 1086 return 0; 1087 case KVM_REG_ARM_VFP_FPSID: 1088 if (reg_from_user(&val, uaddr, id)) 1089 return -EFAULT; 1090 if (val != fmrx(FPSID)) 1091 return -EINVAL; 1092 return 0; 1093 default: 1094 return -ENOENT; 1095 } 1096} 1097#else /* !CONFIG_VFPv3 */ 1098static unsigned int num_vfp_regs(void) 1099{ 1100 return 0; 1101} 1102 1103static int copy_vfp_regids(u64 __user *uindices) 1104{ 1105 return 0; 1106} 1107 1108static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) 1109{ 1110 return -ENOENT; 1111} 1112 1113static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) 1114{ 1115 return -ENOENT; 1116} 1117#endif /* !CONFIG_VFPv3 */ 1118 1119int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1120{ 1121 const struct coproc_reg *r; 1122 void __user *uaddr = (void __user *)(long)reg->addr; 1123 int ret; 1124 1125 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1126 return demux_c15_get(reg->id, uaddr); 1127 1128 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) 1129 return vfp_get_reg(vcpu, reg->id, uaddr); 1130 1131 r = index_to_coproc_reg(vcpu, reg->id); 1132 if (!r) 1133 return get_invariant_cp15(reg->id, uaddr); 1134 1135 ret = -ENOENT; 1136 if (KVM_REG_SIZE(reg->id) == 8) { 1137 u64 val; 1138 1139 val = vcpu_cp15_reg64_get(vcpu, r); 1140 ret = reg_to_user(uaddr, &val, reg->id); 1141 } else if (KVM_REG_SIZE(reg->id) == 4) { 1142 ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); 1143 } 1144 1145 return ret; 1146} 1147 1148int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1149{ 1150 const struct coproc_reg *r; 1151 void __user *uaddr = (void __user *)(long)reg->addr; 1152 int ret; 1153 1154 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1155 return demux_c15_set(reg->id, uaddr); 1156 1157 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) 1158 return vfp_set_reg(vcpu, reg->id, uaddr); 1159 1160 r = index_to_coproc_reg(vcpu, reg->id); 1161 if (!r) 1162 return set_invariant_cp15(reg->id, uaddr); 1163 1164 ret = -ENOENT; 1165 if (KVM_REG_SIZE(reg->id) == 8) { 1166 u64 val; 1167 1168 ret = reg_from_user(&val, uaddr, reg->id); 1169 if (!ret) 1170 vcpu_cp15_reg64_set(vcpu, r, val); 1171 } else if (KVM_REG_SIZE(reg->id) == 4) { 1172 ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); 1173 } 1174 1175 return ret; 1176} 1177 1178static unsigned int num_demux_regs(void) 1179{ 1180 unsigned int i, count = 0; 1181 1182 for (i = 0; i < CSSELR_MAX; i++) 1183 if (is_valid_cache(i)) 1184 count++; 1185 1186 return count; 1187} 1188 1189static int write_demux_regids(u64 __user *uindices) 1190{ 1191 u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1192 unsigned int i; 1193 1194 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1195 for (i = 0; i < CSSELR_MAX; i++) { 1196 if (!is_valid_cache(i)) 1197 continue; 1198 if (put_user(val | i, uindices)) 1199 return -EFAULT; 1200 uindices++; 1201 } 1202 return 0; 1203} 1204 1205static u64 cp15_to_index(const struct coproc_reg *reg) 1206{ 1207 u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); 1208 if (reg->is_64bit) { 1209 val |= KVM_REG_SIZE_U64; 1210 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 1211 /* 1212 * CRn always denotes the primary coproc. reg. nr. for the 1213 * in-kernel representation, but the user space API uses the 1214 * CRm for the encoding, because it is modelled after the 1215 * MRRC/MCRR instructions: see the ARM ARM rev. c page 1216 * B3-1445 1217 */ 1218 val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); 1219 } else { 1220 val |= KVM_REG_SIZE_U32; 1221 val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 1222 val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); 1223 val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 1224 val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); 1225 } 1226 return val; 1227} 1228 1229static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) 1230{ 1231 if (!*uind) 1232 return true; 1233 1234 if (put_user(cp15_to_index(reg), *uind)) 1235 return false; 1236 1237 (*uind)++; 1238 return true; 1239} 1240 1241/* Assumed ordered tables, see kvm_coproc_table_init. */ 1242static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) 1243{ 1244 const struct coproc_reg *i1, *i2, *end1, *end2; 1245 unsigned int total = 0; 1246 size_t num; 1247 1248 /* We check for duplicates here, to allow arch-specific overrides. */ 1249 i1 = get_target_table(vcpu->arch.target, &num); 1250 end1 = i1 + num; 1251 i2 = cp15_regs; 1252 end2 = cp15_regs + ARRAY_SIZE(cp15_regs); 1253 1254 BUG_ON(i1 == end1 || i2 == end2); 1255 1256 /* Walk carefully, as both tables may refer to the same register. */ 1257 while (i1 || i2) { 1258 int cmp = cmp_reg(i1, i2); 1259 /* target-specific overrides generic entry. */ 1260 if (cmp <= 0) { 1261 /* Ignore registers we trap but don't save. */ 1262 if (i1->reg) { 1263 if (!copy_reg_to_user(i1, &uind)) 1264 return -EFAULT; 1265 total++; 1266 } 1267 } else { 1268 /* Ignore registers we trap but don't save. */ 1269 if (i2->reg) { 1270 if (!copy_reg_to_user(i2, &uind)) 1271 return -EFAULT; 1272 total++; 1273 } 1274 } 1275 1276 if (cmp <= 0 && ++i1 == end1) 1277 i1 = NULL; 1278 if (cmp >= 0 && ++i2 == end2) 1279 i2 = NULL; 1280 } 1281 return total; 1282} 1283 1284unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) 1285{ 1286 return ARRAY_SIZE(invariant_cp15) 1287 + num_demux_regs() 1288 + num_vfp_regs() 1289 + walk_cp15(vcpu, (u64 __user *)NULL); 1290} 1291 1292int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 1293{ 1294 unsigned int i; 1295 int err; 1296 1297 /* Then give them all the invariant registers' indices. */ 1298 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { 1299 if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) 1300 return -EFAULT; 1301 uindices++; 1302 } 1303 1304 err = walk_cp15(vcpu, uindices); 1305 if (err < 0) 1306 return err; 1307 uindices += err; 1308 1309 err = copy_vfp_regids(uindices); 1310 if (err < 0) 1311 return err; 1312 uindices += err; 1313 1314 return write_demux_regids(uindices); 1315} 1316 1317void kvm_coproc_table_init(void) 1318{ 1319 unsigned int i; 1320 1321 /* Make sure tables are unique and in order. */ 1322 BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 1323 BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15))); 1324 1325 /* We abuse the reset function to overwrite the table itself. */ 1326 for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) 1327 invariant_cp15[i].reset(NULL, &invariant_cp15[i]); 1328 1329 /* 1330 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 1331 * 1332 * If software reads the Cache Type fields from Ctype1 1333 * upwards, once it has seen a value of 0b000, no caches 1334 * exist at further-out levels of the hierarchy. So, for 1335 * example, if Ctype3 is the first Cache Type field with a 1336 * value of 0b000, the values of Ctype4 to Ctype7 must be 1337 * ignored. 1338 */ 1339 asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); 1340 for (i = 0; i < 7; i++) 1341 if (((cache_levels >> (i*3)) & 7) == 0) 1342 break; 1343 /* Clear all higher bits. */ 1344 cache_levels &= (1 << (i*3))-1; 1345} 1346 1347/** 1348 * kvm_reset_coprocs - sets cp15 registers to reset value 1349 * @vcpu: The VCPU pointer 1350 * 1351 * This function finds the right table above and sets the registers on the 1352 * virtual CPU struct to their architecturally defined reset values. 1353 */ 1354void kvm_reset_coprocs(struct kvm_vcpu *vcpu) 1355{ 1356 size_t num; 1357 const struct coproc_reg *table; 1358 1359 /* Catch someone adding a register without putting in reset entry. */ 1360 memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); 1361 1362 /* Generic chip reset first (so target could override). */ 1363 reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); 1364 1365 table = get_target_table(vcpu->arch.target, &num); 1366 reset_coproc_regs(vcpu, table, num); 1367 1368 for (num = 1; num < NR_CP15_REGS; num++) 1369 if (vcpu_cp15(vcpu, num) == 0x42424242) 1370 panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); 1371}