Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: ARM: User space API for getting/setting co-proc registers

The following three ioctls are implemented:
- KVM_GET_REG_LIST
- KVM_GET_ONE_REG
- KVM_SET_ONE_REG

Now we have a table for all the cp15 registers, we can drive a generic
API.

The register IDs carry the following encoding:

ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:

ARM 32-bit CP15 registers have the following id bit patterns:
0x4002 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3>

ARM 64-bit CP15 registers have the following id bit patterns:
0x4003 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3>

For futureproofing, we need to tell QEMU about the CP15 registers the
host lets the guest access.

It will need this information to restore a current guest on a future
CPU or perhaps a future KVM which allow some of these to be changed.

We use a separate table for these, as they're only for the userspace API.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>

+350 -4
+5
Documentation/virtual/kvm/api.txt
··· 1799 1799 ARM core registers have the following id bit patterns: 1800 1800 0x4002 0000 0010 <index into the kvm_regs struct:16> 1801 1801 1802 + ARM 32-bit CP15 registers have the following id bit patterns: 1803 + 0x4002 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3> 1804 + 1805 + ARM 64-bit CP15 registers have the following id bit patterns: 1806 + 0x4003 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3> 1802 1807 1803 1808 1804 1809 4.69 KVM_GET_ONE_REG
+9
arch/arm/include/asm/kvm_coproc.h
··· 34 34 int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); 35 35 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); 36 36 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); 37 + 38 + unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); 39 + int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); 37 40 void kvm_coproc_table_init(void); 41 + 42 + struct kvm_one_reg; 43 + int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); 44 + int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 45 + int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); 46 + unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); 38 47 #endif /* __ARM_KVM_COPROC_H__ */
+4
arch/arm/include/asm/kvm_host.h
··· 27 27 #define KVM_MEMORY_SLOTS 32 28 28 #define KVM_PRIVATE_MEM_SLOTS 4 29 29 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 30 + #define KVM_HAVE_ONE_REG 30 31 31 32 #define KVM_VCPU_MAX_FEATURES 0 32 33 ··· 137 136 int kvm_unmap_hva_range(struct kvm *kvm, 138 137 unsigned long start, unsigned long end); 139 138 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); 139 + 140 + unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); 141 + int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); 140 142 141 143 /* We do not have shadow page tables, hence the empty hooks */ 142 144 static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+327
arch/arm/kvm/coproc.c
··· 18 18 */ 19 19 #include <linux/mm.h> 20 20 #include <linux/kvm_host.h> 21 + #include <linux/uaccess.h> 21 22 #include <asm/kvm_arm.h> 22 23 #include <asm/kvm_host.h> 23 24 #include <asm/kvm_emulate.h> ··· 348 347 return emulate_cp15(vcpu, &params); 349 348 } 350 349 350 + /****************************************************************************** 351 + * Userspace API 352 + *****************************************************************************/ 353 + 354 + static bool index_to_params(u64 id, struct coproc_params *params) 355 + { 356 + switch (id & KVM_REG_SIZE_MASK) { 357 + case KVM_REG_SIZE_U32: 358 + /* Any unused index bits means it's not valid. */ 359 + if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 360 + | KVM_REG_ARM_COPROC_MASK 361 + | KVM_REG_ARM_32_CRN_MASK 362 + | KVM_REG_ARM_CRM_MASK 363 + | KVM_REG_ARM_OPC1_MASK 364 + | KVM_REG_ARM_32_OPC2_MASK)) 365 + return false; 366 + 367 + params->is_64bit = false; 368 + params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) 369 + >> KVM_REG_ARM_32_CRN_SHIFT); 370 + params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 371 + >> KVM_REG_ARM_CRM_SHIFT); 372 + params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 373 + >> KVM_REG_ARM_OPC1_SHIFT); 374 + params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) 375 + >> KVM_REG_ARM_32_OPC2_SHIFT); 376 + return true; 377 + case KVM_REG_SIZE_U64: 378 + /* Any unused index bits means it's not valid. */ 379 + if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 380 + | KVM_REG_ARM_COPROC_MASK 381 + | KVM_REG_ARM_CRM_MASK 382 + | KVM_REG_ARM_OPC1_MASK)) 383 + return false; 384 + params->is_64bit = true; 385 + params->CRm = ((id & KVM_REG_ARM_CRM_MASK) 386 + >> KVM_REG_ARM_CRM_SHIFT); 387 + params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) 388 + >> KVM_REG_ARM_OPC1_SHIFT); 389 + params->Op2 = 0; 390 + params->CRn = 0; 391 + return true; 392 + default: 393 + return false; 394 + } 395 + } 396 + 397 + /* Decode an index value, and find the cp15 coproc_reg entry. */ 398 + static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, 399 + u64 id) 400 + { 401 + size_t num; 402 + const struct coproc_reg *table, *r; 403 + struct coproc_params params; 404 + 405 + /* We only do cp15 for now. */ 406 + if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) 407 + return NULL; 408 + 409 + if (!index_to_params(id, &params)) 410 + return NULL; 411 + 412 + table = get_target_table(vcpu->arch.target, &num); 413 + r = find_reg(&params, table, num); 414 + if (!r) 415 + r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs)); 416 + 417 + /* Not saved in the cp15 array? */ 418 + if (r && !r->reg) 419 + r = NULL; 420 + 421 + return r; 422 + } 423 + 424 + /* 425 + * These are the invariant cp15 registers: we let the guest see the host 426 + * versions of these, so they're part of the guest state. 427 + * 428 + * A future CPU may provide a mechanism to present different values to 429 + * the guest, or a future kvm may trap them. 430 + */ 431 + /* Unfortunately, there's no register-argument for mrc, so generate. */ 432 + #define FUNCTION_FOR32(crn, crm, op1, op2, name) \ 433 + static void get_##name(struct kvm_vcpu *v, \ 434 + const struct coproc_reg *r) \ 435 + { \ 436 + u32 val; \ 437 + \ 438 + asm volatile("mrc p15, " __stringify(op1) \ 439 + ", %0, c" __stringify(crn) \ 440 + ", c" __stringify(crm) \ 441 + ", " __stringify(op2) "\n" : "=r" (val)); \ 442 + ((struct coproc_reg *)r)->val = val; \ 443 + } 444 + 445 + FUNCTION_FOR32(0, 0, 0, 0, MIDR) 446 + FUNCTION_FOR32(0, 0, 0, 1, CTR) 447 + FUNCTION_FOR32(0, 0, 0, 2, TCMTR) 448 + FUNCTION_FOR32(0, 0, 0, 3, TLBTR) 449 + FUNCTION_FOR32(0, 0, 0, 6, REVIDR) 450 + FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) 451 + FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) 452 + FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) 453 + FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) 454 + FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) 455 + FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) 456 + FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) 457 + FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) 458 + FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) 459 + FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) 460 + FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) 461 + FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) 462 + FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) 463 + FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) 464 + FUNCTION_FOR32(0, 0, 1, 1, CLIDR) 465 + FUNCTION_FOR32(0, 0, 1, 7, AIDR) 466 + 467 + /* ->val is filled in by kvm_invariant_coproc_table_init() */ 468 + static struct coproc_reg invariant_cp15[] = { 469 + { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, 470 + { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, 471 + { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, 472 + { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, 473 + { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, 474 + 475 + { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, 476 + { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, 477 + { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, 478 + { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, 479 + { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, 480 + { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, 481 + { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, 482 + { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, 483 + 484 + { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, 485 + { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, 486 + { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, 487 + { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, 488 + { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, 489 + { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, 490 + 491 + { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, 492 + { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, 493 + }; 494 + 495 + static int reg_from_user(void *val, const void __user *uaddr, u64 id) 496 + { 497 + /* This Just Works because we are little endian. */ 498 + if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 499 + return -EFAULT; 500 + return 0; 501 + } 502 + 503 + static int reg_to_user(void __user *uaddr, const void *val, u64 id) 504 + { 505 + /* This Just Works because we are little endian. */ 506 + if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 507 + return -EFAULT; 508 + return 0; 509 + } 510 + 511 + static int get_invariant_cp15(u64 id, void __user *uaddr) 512 + { 513 + struct coproc_params params; 514 + const struct coproc_reg *r; 515 + 516 + if (!index_to_params(id, &params)) 517 + return -ENOENT; 518 + 519 + r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); 520 + if (!r) 521 + return -ENOENT; 522 + 523 + return reg_to_user(uaddr, &r->val, id); 524 + } 525 + 526 + static int set_invariant_cp15(u64 id, void __user *uaddr) 527 + { 528 + struct coproc_params params; 529 + const struct coproc_reg *r; 530 + int err; 531 + u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 532 + 533 + if (!index_to_params(id, &params)) 534 + return -ENOENT; 535 + r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); 536 + if (!r) 537 + return -ENOENT; 538 + 539 + err = reg_from_user(&val, uaddr, id); 540 + if (err) 541 + return err; 542 + 543 + /* This is what we mean by invariant: you can't change it. */ 544 + if (r->val != val) 545 + return -EINVAL; 546 + 547 + return 0; 548 + } 549 + 550 + int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 551 + { 552 + const struct coproc_reg *r; 553 + void __user *uaddr = (void __user *)(long)reg->addr; 554 + 555 + r = index_to_coproc_reg(vcpu, reg->id); 556 + if (!r) 557 + return get_invariant_cp15(reg->id, uaddr); 558 + 559 + /* Note: copies two regs if size is 64 bit. */ 560 + return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); 561 + } 562 + 563 + int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 564 + { 565 + const struct coproc_reg *r; 566 + void __user *uaddr = (void __user *)(long)reg->addr; 567 + 568 + r = index_to_coproc_reg(vcpu, reg->id); 569 + if (!r) 570 + return set_invariant_cp15(reg->id, uaddr); 571 + 572 + /* Note: copies two regs if size is 64 bit */ 573 + return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); 574 + } 575 + 576 + static u64 cp15_to_index(const struct coproc_reg *reg) 577 + { 578 + u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); 579 + if (reg->is_64) { 580 + val |= KVM_REG_SIZE_U64; 581 + val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 582 + val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 583 + } else { 584 + val |= KVM_REG_SIZE_U32; 585 + val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); 586 + val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); 587 + val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); 588 + val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); 589 + } 590 + return val; 591 + } 592 + 593 + static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) 594 + { 595 + if (!*uind) 596 + return true; 597 + 598 + if (put_user(cp15_to_index(reg), *uind)) 599 + return false; 600 + 601 + (*uind)++; 602 + return true; 603 + } 604 + 605 + /* Assumed ordered tables, see kvm_coproc_table_init. */ 606 + static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) 607 + { 608 + const struct coproc_reg *i1, *i2, *end1, *end2; 609 + unsigned int total = 0; 610 + size_t num; 611 + 612 + /* We check for duplicates here, to allow arch-specific overrides. */ 613 + i1 = get_target_table(vcpu->arch.target, &num); 614 + end1 = i1 + num; 615 + i2 = cp15_regs; 616 + end2 = cp15_regs + ARRAY_SIZE(cp15_regs); 617 + 618 + BUG_ON(i1 == end1 || i2 == end2); 619 + 620 + /* Walk carefully, as both tables may refer to the same register. */ 621 + while (i1 || i2) { 622 + int cmp = cmp_reg(i1, i2); 623 + /* target-specific overrides generic entry. */ 624 + if (cmp <= 0) { 625 + /* Ignore registers we trap but don't save. */ 626 + if (i1->reg) { 627 + if (!copy_reg_to_user(i1, &uind)) 628 + return -EFAULT; 629 + total++; 630 + } 631 + } else { 632 + /* Ignore registers we trap but don't save. */ 633 + if (i2->reg) { 634 + if (!copy_reg_to_user(i2, &uind)) 635 + return -EFAULT; 636 + total++; 637 + } 638 + } 639 + 640 + if (cmp <= 0 && ++i1 == end1) 641 + i1 = NULL; 642 + if (cmp >= 0 && ++i2 == end2) 643 + i2 = NULL; 644 + } 645 + return total; 646 + } 647 + 648 + unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) 649 + { 650 + return ARRAY_SIZE(invariant_cp15) 651 + + walk_cp15(vcpu, (u64 __user *)NULL); 652 + } 653 + 654 + int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 655 + { 656 + unsigned int i; 657 + int err; 658 + 659 + /* Then give them all the invariant registers' indices. */ 660 + for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { 661 + if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) 662 + return -EFAULT; 663 + uindices++; 664 + } 665 + 666 + err = walk_cp15(vcpu, uindices); 667 + if (err > 0) 668 + err = 0; 669 + return err; 670 + } 671 + 351 672 void kvm_coproc_table_init(void) 352 673 { 353 674 unsigned int i; ··· 677 354 /* Make sure tables are unique and in order. */ 678 355 for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) 679 356 BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); 357 + 358 + /* We abuse the reset function to overwrite the table itself. */ 359 + for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) 360 + invariant_cp15[i].reset(NULL, &invariant_cp15[i]); 680 361 } 681 362 682 363 /**
+5 -4
arch/arm/kvm/guest.c
··· 26 26 #include <asm/kvm.h> 27 27 #include <asm/kvm_asm.h> 28 28 #include <asm/kvm_emulate.h> 29 + #include <asm/kvm_coproc.h> 29 30 30 31 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } 31 32 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } ··· 120 119 */ 121 120 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 122 121 { 123 - return num_core_regs(); 122 + return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); 124 123 } 125 124 126 125 /** ··· 139 138 uindices++; 140 139 } 141 140 142 - return 0; 141 + return kvm_arm_copy_coproc_indices(vcpu, uindices); 143 142 } 144 143 145 144 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ··· 152 151 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 153 152 return get_core_reg(vcpu, reg); 154 153 155 - return -EINVAL; 154 + return kvm_arm_coproc_get_reg(vcpu, reg); 156 155 } 157 156 158 157 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) ··· 165 164 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) 166 165 return set_core_reg(vcpu, reg); 167 166 168 - return -EINVAL; 167 + return kvm_arm_coproc_set_reg(vcpu, reg); 169 168 } 170 169 171 170 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,