Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-s390-next-20150318' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into queue

KVM: s390: Features and fixes for 4.1 (kvm/next)

1. Fixes
2. Implement access register mode in KVM
3. Provide a userspace post handler for the STSI instruction
4. Provide an interface for compliant memory accesses
5. Provide an interface for getting/setting the guest storage key
6. Fixup for the vector facility patches: do not announce the
vector facility in the guest for old QEMUs.

1-5 were initially shown as RFC in

http://www.spinics.net/lists/kvm/msg114720.html

some small review changes
- added some ACKs
- have the AR mode patches first
- get rid of unnecessary AR_INVAL define
- typos and language

6. two new patches
The two new patches fixup the vector support patches that were
introduced in the last pull request for QEMU versions that dont
know about vector support and guests that do. (We announce the
facility bit, but dont enable the facility so vector aware guests
will crash on vector instructions).

authored by

Marcelo Tosatti and committed by
Marcelo Tosatti
bbf4aef8 0a4e6be9

+755 -127
+132
Documentation/virtual/kvm/api.txt
··· 2716 2716 eax, ebx, ecx, edx: the values returned by the cpuid instruction for 2717 2717 this function/index combination 2718 2718 2719 + 4.89 KVM_S390_MEM_OP 2720 + 2721 + Capability: KVM_CAP_S390_MEM_OP 2722 + Architectures: s390 2723 + Type: vcpu ioctl 2724 + Parameters: struct kvm_s390_mem_op (in) 2725 + Returns: = 0 on success, 2726 + < 0 on generic error (e.g. -EFAULT or -ENOMEM), 2727 + > 0 if an exception occurred while walking the page tables 2728 + 2729 + Read or write data from/to the logical (virtual) memory of a VPCU. 2730 + 2731 + Parameters are specified via the following structure: 2732 + 2733 + struct kvm_s390_mem_op { 2734 + __u64 gaddr; /* the guest address */ 2735 + __u64 flags; /* flags */ 2736 + __u32 size; /* amount of bytes */ 2737 + __u32 op; /* type of operation */ 2738 + __u64 buf; /* buffer in userspace */ 2739 + __u8 ar; /* the access register number */ 2740 + __u8 reserved[31]; /* should be set to 0 */ 2741 + }; 2742 + 2743 + The type of operation is specified in the "op" field. It is either 2744 + KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or 2745 + KVM_S390_MEMOP_LOGICAL_WRITE for writing to logical memory space. The 2746 + KVM_S390_MEMOP_F_CHECK_ONLY flag can be set in the "flags" field to check 2747 + whether the corresponding memory access would create an access exception 2748 + (without touching the data in the memory at the destination). In case an 2749 + access exception occurred while walking the MMU tables of the guest, the 2750 + ioctl returns a positive error number to indicate the type of exception. 2751 + This exception is also raised directly at the corresponding VCPU if the 2752 + flag KVM_S390_MEMOP_F_INJECT_EXCEPTION is set in the "flags" field. 2753 + 2754 + The start address of the memory region has to be specified in the "gaddr" 2755 + field, and the length of the region in the "size" field. "buf" is the buffer 2756 + supplied by the userspace application where the read data should be written 2757 + to for KVM_S390_MEMOP_LOGICAL_READ, or where the data that should be written 2758 + is stored for a KVM_S390_MEMOP_LOGICAL_WRITE. "buf" is unused and can be NULL 2759 + when KVM_S390_MEMOP_F_CHECK_ONLY is specified. "ar" designates the access 2760 + register number to be used. 2761 + 2762 + The "reserved" field is meant for future extensions. It is not used by 2763 + KVM with the currently defined set of flags. 2764 + 2765 + 4.90 KVM_S390_GET_SKEYS 2766 + 2767 + Capability: KVM_CAP_S390_SKEYS 2768 + Architectures: s390 2769 + Type: vm ioctl 2770 + Parameters: struct kvm_s390_skeys 2771 + Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage 2772 + keys, negative value on error 2773 + 2774 + This ioctl is used to get guest storage key values on the s390 2775 + architecture. The ioctl takes parameters via the kvm_s390_skeys struct. 2776 + 2777 + struct kvm_s390_skeys { 2778 + __u64 start_gfn; 2779 + __u64 count; 2780 + __u64 skeydata_addr; 2781 + __u32 flags; 2782 + __u32 reserved[9]; 2783 + }; 2784 + 2785 + The start_gfn field is the number of the first guest frame whose storage keys 2786 + you want to get. 2787 + 2788 + The count field is the number of consecutive frames (starting from start_gfn) 2789 + whose storage keys to get. The count field must be at least 1 and the maximum 2790 + allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range 2791 + will cause the ioctl to return -EINVAL. 2792 + 2793 + The skeydata_addr field is the address to a buffer large enough to hold count 2794 + bytes. This buffer will be filled with storage key data by the ioctl. 2795 + 2796 + 4.91 KVM_S390_SET_SKEYS 2797 + 2798 + Capability: KVM_CAP_S390_SKEYS 2799 + Architectures: s390 2800 + Type: vm ioctl 2801 + Parameters: struct kvm_s390_skeys 2802 + Returns: 0 on success, negative value on error 2803 + 2804 + This ioctl is used to set guest storage key values on the s390 2805 + architecture. The ioctl takes parameters via the kvm_s390_skeys struct. 2806 + See section on KVM_S390_GET_SKEYS for struct definition. 2807 + 2808 + The start_gfn field is the number of the first guest frame whose storage keys 2809 + you want to set. 2810 + 2811 + The count field is the number of consecutive frames (starting from start_gfn) 2812 + whose storage keys to get. The count field must be at least 1 and the maximum 2813 + allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range 2814 + will cause the ioctl to return -EINVAL. 2815 + 2816 + The skeydata_addr field is the address to a buffer containing count bytes of 2817 + storage keys. Each byte in the buffer will be set as the storage key for a 2818 + single frame starting at start_gfn for count frames. 2819 + 2820 + Note: If any architecturally invalid key value is found in the given data then 2821 + the ioctl will return -EINVAL. 2822 + 2719 2823 5. The kvm_run structure 2720 2824 ------------------------ 2721 2825 ··· 3362 3258 Allows use of the vector registers introduced with z13 processor, and 3363 3259 provides for the synchronization between host and user space. Will 3364 3260 return -EINVAL if the machine does not support vectors. 3261 + 3262 + 7.4 KVM_CAP_S390_USER_STSI 3263 + 3264 + Architectures: s390 3265 + Parameters: none 3266 + 3267 + This capability allows post-handlers for the STSI instruction. After 3268 + initial handling in the kernel, KVM exits to user space with 3269 + KVM_EXIT_S390_STSI to allow user space to insert further data. 3270 + 3271 + Before exiting to userspace, kvm handlers should fill in s390_stsi field of 3272 + vcpu->run: 3273 + struct { 3274 + __u64 addr; 3275 + __u8 ar; 3276 + __u8 reserved; 3277 + __u8 fc; 3278 + __u8 sel1; 3279 + __u16 sel2; 3280 + } s390_stsi; 3281 + 3282 + @addr - guest address of STSI SYSIB 3283 + @fc - function code 3284 + @sel1 - selector 1 3285 + @sel2 - selector 2 3286 + @ar - access register number 3287 + 3288 + KVM handlers should exit to userspace with rc = -EREMOTE.
+1 -1
arch/s390/include/asm/kvm_host.h
··· 562 562 int css_support; 563 563 int use_irqchip; 564 564 int use_cmma; 565 - int use_vectors; 566 565 int user_cpu_state_ctrl; 567 566 int user_sigp; 567 + int user_stsi; 568 568 struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; 569 569 wait_queue_head_t ipte_wq; 570 570 int ipte_lock_count;
+2 -2
arch/s390/kvm/diag.c
··· 77 77 78 78 if (vcpu->run->s.regs.gprs[rx] & 7) 79 79 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 80 - rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], &parm, sizeof(parm)); 80 + rc = read_guest(vcpu, vcpu->run->s.regs.gprs[rx], rx, &parm, sizeof(parm)); 81 81 if (rc) 82 82 return kvm_s390_inject_prog_cond(vcpu, rc); 83 83 if (parm.parm_version != 2 || parm.parm_len < 5 || parm.code != 0x258) ··· 230 230 231 231 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) 232 232 { 233 - int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff; 233 + int code = kvm_s390_get_base_disp_rs(vcpu, NULL) & 0xffff; 234 234 235 235 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 236 236 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
+242 -52
arch/s390/kvm/gaccess.c
··· 10 10 #include <asm/pgtable.h> 11 11 #include "kvm-s390.h" 12 12 #include "gaccess.h" 13 + #include <asm/switch_to.h> 13 14 14 15 union asce { 15 16 unsigned long val; ··· 208 207 unsigned long pfra : 52; /* Page-Frame Real Address */ 209 208 }; 210 209 210 + union alet { 211 + u32 val; 212 + struct { 213 + u32 reserved : 7; 214 + u32 p : 1; 215 + u32 alesn : 8; 216 + u32 alen : 16; 217 + }; 218 + }; 219 + 220 + union ald { 221 + u32 val; 222 + struct { 223 + u32 : 1; 224 + u32 alo : 24; 225 + u32 all : 7; 226 + }; 227 + }; 228 + 229 + struct ale { 230 + unsigned long i : 1; /* ALEN-Invalid Bit */ 231 + unsigned long : 5; 232 + unsigned long fo : 1; /* Fetch-Only Bit */ 233 + unsigned long p : 1; /* Private Bit */ 234 + unsigned long alesn : 8; /* Access-List-Entry Sequence Number */ 235 + unsigned long aleax : 16; /* Access-List-Entry Authorization Index */ 236 + unsigned long : 32; 237 + unsigned long : 1; 238 + unsigned long asteo : 25; /* ASN-Second-Table-Entry Origin */ 239 + unsigned long : 6; 240 + unsigned long astesn : 32; /* ASTE Sequence Number */ 241 + } __packed; 242 + 243 + struct aste { 244 + unsigned long i : 1; /* ASX-Invalid Bit */ 245 + unsigned long ato : 29; /* Authority-Table Origin */ 246 + unsigned long : 1; 247 + unsigned long b : 1; /* Base-Space Bit */ 248 + unsigned long ax : 16; /* Authorization Index */ 249 + unsigned long atl : 12; /* Authority-Table Length */ 250 + unsigned long : 2; 251 + unsigned long ca : 1; /* Controlled-ASN Bit */ 252 + unsigned long ra : 1; /* Reusable-ASN Bit */ 253 + unsigned long asce : 64; /* Address-Space-Control Element */ 254 + unsigned long ald : 32; 255 + unsigned long astesn : 32; 256 + /* .. more fields there */ 257 + } __packed; 211 258 212 259 int ipte_lock_held(struct kvm_vcpu *vcpu) 213 260 { ··· 356 307 ipte_unlock_simple(vcpu); 357 308 } 358 309 359 - static unsigned long get_vcpu_asce(struct kvm_vcpu *vcpu) 310 + static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar, 311 + int write) 360 312 { 313 + union alet alet; 314 + struct ale ale; 315 + struct aste aste; 316 + unsigned long ald_addr, authority_table_addr; 317 + union ald ald; 318 + int eax, rc; 319 + u8 authority_table; 320 + 321 + if (ar >= NUM_ACRS) 322 + return -EINVAL; 323 + 324 + save_access_regs(vcpu->run->s.regs.acrs); 325 + alet.val = vcpu->run->s.regs.acrs[ar]; 326 + 327 + if (ar == 0 || alet.val == 0) { 328 + asce->val = vcpu->arch.sie_block->gcr[1]; 329 + return 0; 330 + } else if (alet.val == 1) { 331 + asce->val = vcpu->arch.sie_block->gcr[7]; 332 + return 0; 333 + } 334 + 335 + if (alet.reserved) 336 + return PGM_ALET_SPECIFICATION; 337 + 338 + if (alet.p) 339 + ald_addr = vcpu->arch.sie_block->gcr[5]; 340 + else 341 + ald_addr = vcpu->arch.sie_block->gcr[2]; 342 + ald_addr &= 0x7fffffc0; 343 + 344 + rc = read_guest_real(vcpu, ald_addr + 16, &ald.val, sizeof(union ald)); 345 + if (rc) 346 + return rc; 347 + 348 + if (alet.alen / 8 > ald.all) 349 + return PGM_ALEN_TRANSLATION; 350 + 351 + if (0x7fffffff - ald.alo * 128 < alet.alen * 16) 352 + return PGM_ADDRESSING; 353 + 354 + rc = read_guest_real(vcpu, ald.alo * 128 + alet.alen * 16, &ale, 355 + sizeof(struct ale)); 356 + if (rc) 357 + return rc; 358 + 359 + if (ale.i == 1) 360 + return PGM_ALEN_TRANSLATION; 361 + if (ale.alesn != alet.alesn) 362 + return PGM_ALE_SEQUENCE; 363 + 364 + rc = read_guest_real(vcpu, ale.asteo * 64, &aste, sizeof(struct aste)); 365 + if (rc) 366 + return rc; 367 + 368 + if (aste.i) 369 + return PGM_ASTE_VALIDITY; 370 + if (aste.astesn != ale.astesn) 371 + return PGM_ASTE_SEQUENCE; 372 + 373 + if (ale.p == 1) { 374 + eax = (vcpu->arch.sie_block->gcr[8] >> 16) & 0xffff; 375 + if (ale.aleax != eax) { 376 + if (eax / 16 > aste.atl) 377 + return PGM_EXTENDED_AUTHORITY; 378 + 379 + authority_table_addr = aste.ato * 4 + eax / 4; 380 + 381 + rc = read_guest_real(vcpu, authority_table_addr, 382 + &authority_table, 383 + sizeof(u8)); 384 + if (rc) 385 + return rc; 386 + 387 + if ((authority_table & (0x40 >> ((eax & 3) * 2))) == 0) 388 + return PGM_EXTENDED_AUTHORITY; 389 + } 390 + } 391 + 392 + if (ale.fo == 1 && write) 393 + return PGM_PROTECTION; 394 + 395 + asce->val = aste.asce; 396 + return 0; 397 + } 398 + 399 + struct trans_exc_code_bits { 400 + unsigned long addr : 52; /* Translation-exception Address */ 401 + unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ 402 + unsigned long : 6; 403 + unsigned long b60 : 1; 404 + unsigned long b61 : 1; 405 + unsigned long as : 2; /* ASCE Identifier */ 406 + }; 407 + 408 + enum { 409 + FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ 410 + FSI_STORE = 1, /* Exception was due to store operation */ 411 + FSI_FETCH = 2 /* Exception was due to fetch operation */ 412 + }; 413 + 414 + static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce, 415 + ar_t ar, int write) 416 + { 417 + int rc; 418 + psw_t *psw = &vcpu->arch.sie_block->gpsw; 419 + struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 420 + struct trans_exc_code_bits *tec_bits; 421 + 422 + memset(pgm, 0, sizeof(*pgm)); 423 + tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 424 + tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; 425 + tec_bits->as = psw_bits(*psw).as; 426 + 427 + if (!psw_bits(*psw).t) { 428 + asce->val = 0; 429 + asce->r = 1; 430 + return 0; 431 + } 432 + 361 433 switch (psw_bits(vcpu->arch.sie_block->gpsw).as) { 362 434 case PSW_AS_PRIMARY: 363 - return vcpu->arch.sie_block->gcr[1]; 435 + asce->val = vcpu->arch.sie_block->gcr[1]; 436 + return 0; 364 437 case PSW_AS_SECONDARY: 365 - return vcpu->arch.sie_block->gcr[7]; 438 + asce->val = vcpu->arch.sie_block->gcr[7]; 439 + return 0; 366 440 case PSW_AS_HOME: 367 - return vcpu->arch.sie_block->gcr[13]; 441 + asce->val = vcpu->arch.sie_block->gcr[13]; 442 + return 0; 443 + case PSW_AS_ACCREG: 444 + rc = ar_translation(vcpu, asce, ar, write); 445 + switch (rc) { 446 + case PGM_ALEN_TRANSLATION: 447 + case PGM_ALE_SEQUENCE: 448 + case PGM_ASTE_VALIDITY: 449 + case PGM_ASTE_SEQUENCE: 450 + case PGM_EXTENDED_AUTHORITY: 451 + vcpu->arch.pgm.exc_access_id = ar; 452 + break; 453 + case PGM_PROTECTION: 454 + tec_bits->b60 = 1; 455 + tec_bits->b61 = 1; 456 + break; 457 + } 458 + if (rc > 0) 459 + pgm->code = rc; 460 + return rc; 368 461 } 369 462 return 0; 370 463 } ··· 521 330 * @vcpu: virtual cpu 522 331 * @gva: guest virtual address 523 332 * @gpa: points to where guest physical (absolute) address should be stored 333 + * @asce: effective asce 524 334 * @write: indicates if access is a write access 525 335 * 526 336 * Translate a guest virtual address into a guest absolute address by means ··· 537 345 * by the architecture 538 346 */ 539 347 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, 540 - unsigned long *gpa, int write) 348 + unsigned long *gpa, const union asce asce, 349 + int write) 541 350 { 542 351 union vaddress vaddr = {.addr = gva}; 543 352 union raddress raddr = {.addr = gva}; ··· 547 354 union ctlreg0 ctlreg0; 548 355 unsigned long ptr; 549 356 int edat1, edat2; 550 - union asce asce; 551 357 552 358 ctlreg0.val = vcpu->arch.sie_block->gcr[0]; 553 359 edat1 = ctlreg0.edat && test_kvm_facility(vcpu->kvm, 8); 554 360 edat2 = edat1 && test_kvm_facility(vcpu->kvm, 78); 555 - asce.val = get_vcpu_asce(vcpu); 556 361 if (asce.r) 557 362 goto real_address; 558 363 ptr = asce.origin * 4096; ··· 697 506 return (ga & ~0x11fful) == 0; 698 507 } 699 508 700 - static int low_address_protection_enabled(struct kvm_vcpu *vcpu) 509 + static int low_address_protection_enabled(struct kvm_vcpu *vcpu, 510 + const union asce asce) 701 511 { 702 512 union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 703 513 psw_t *psw = &vcpu->arch.sie_block->gpsw; 704 - union asce asce; 705 514 706 515 if (!ctlreg0.lap) 707 516 return 0; 708 - asce.val = get_vcpu_asce(vcpu); 709 517 if (psw_bits(*psw).t && asce.p) 710 518 return 0; 711 519 return 1; 712 520 } 713 521 714 - struct trans_exc_code_bits { 715 - unsigned long addr : 52; /* Translation-exception Address */ 716 - unsigned long fsi : 2; /* Access Exception Fetch/Store Indication */ 717 - unsigned long : 7; 718 - unsigned long b61 : 1; 719 - unsigned long as : 2; /* ASCE Identifier */ 720 - }; 721 - 722 - enum { 723 - FSI_UNKNOWN = 0, /* Unknown wether fetch or store */ 724 - FSI_STORE = 1, /* Exception was due to store operation */ 725 - FSI_FETCH = 2 /* Exception was due to fetch operation */ 726 - }; 727 - 728 522 static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, 729 523 unsigned long *pages, unsigned long nr_pages, 730 - int write) 524 + const union asce asce, int write) 731 525 { 732 526 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 733 527 psw_t *psw = &vcpu->arch.sie_block->gpsw; 734 528 struct trans_exc_code_bits *tec_bits; 735 529 int lap_enabled, rc; 736 530 737 - memset(pgm, 0, sizeof(*pgm)); 738 531 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 739 - tec_bits->fsi = write ? FSI_STORE : FSI_FETCH; 740 - tec_bits->as = psw_bits(*psw).as; 741 - lap_enabled = low_address_protection_enabled(vcpu); 532 + lap_enabled = low_address_protection_enabled(vcpu, asce); 742 533 while (nr_pages) { 743 534 ga = kvm_s390_logical_to_effective(vcpu, ga); 744 535 tec_bits->addr = ga >> PAGE_SHIFT; ··· 730 557 } 731 558 ga &= PAGE_MASK; 732 559 if (psw_bits(*psw).t) { 733 - rc = guest_translate(vcpu, ga, pages, write); 560 + rc = guest_translate(vcpu, ga, pages, asce, write); 734 561 if (rc < 0) 735 562 return rc; 736 563 if (rc == PGM_PROTECTION) ··· 751 578 return 0; 752 579 } 753 580 754 - int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 581 + int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 755 582 unsigned long len, int write) 756 583 { 757 584 psw_t *psw = &vcpu->arch.sie_block->gpsw; ··· 764 591 765 592 if (!len) 766 593 return 0; 767 - /* Access register mode is not supported yet. */ 768 - if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) 769 - return -EOPNOTSUPP; 594 + rc = get_vcpu_asce(vcpu, &asce, ar, write); 595 + if (rc) 596 + return rc; 770 597 nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1; 771 598 pages = pages_array; 772 599 if (nr_pages > ARRAY_SIZE(pages_array)) 773 600 pages = vmalloc(nr_pages * sizeof(unsigned long)); 774 601 if (!pages) 775 602 return -ENOMEM; 776 - asce.val = get_vcpu_asce(vcpu); 777 603 need_ipte_lock = psw_bits(*psw).t && !asce.r; 778 604 if (need_ipte_lock) 779 605 ipte_lock(vcpu); 780 - rc = guest_page_range(vcpu, ga, pages, nr_pages, write); 606 + rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write); 781 607 for (idx = 0; idx < nr_pages && !rc; idx++) { 782 608 gpa = *(pages + idx) + (ga & ~PAGE_MASK); 783 609 _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len); ··· 824 652 * Note: The IPTE lock is not taken during this function, so the caller 825 653 * has to take care of this. 826 654 */ 827 - int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 655 + int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 828 656 unsigned long *gpa, int write) 829 657 { 830 658 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; ··· 833 661 union asce asce; 834 662 int rc; 835 663 836 - /* Access register mode is not supported yet. */ 837 - if (psw_bits(*psw).t && psw_bits(*psw).as == PSW_AS_ACCREG) 838 - return -EOPNOTSUPP; 839 - 840 664 gva = kvm_s390_logical_to_effective(vcpu, gva); 841 - memset(pgm, 0, sizeof(*pgm)); 842 665 tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 843 - tec->as = psw_bits(*psw).as; 844 - tec->fsi = write ? FSI_STORE : FSI_FETCH; 666 + rc = get_vcpu_asce(vcpu, &asce, ar, write); 845 667 tec->addr = gva >> PAGE_SHIFT; 846 - if (is_low_address(gva) && low_address_protection_enabled(vcpu)) { 668 + if (rc) 669 + return rc; 670 + if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { 847 671 if (write) { 848 672 rc = pgm->code = PGM_PROTECTION; 849 673 return rc; 850 674 } 851 675 } 852 676 853 - asce.val = get_vcpu_asce(vcpu); 854 677 if (psw_bits(*psw).t && !asce.r) { /* Use DAT? */ 855 - rc = guest_translate(vcpu, gva, gpa, write); 678 + rc = guest_translate(vcpu, gva, gpa, asce, write); 856 679 if (rc > 0) { 857 680 if (rc == PGM_PROTECTION) 858 681 tec->b61 = 1; ··· 864 697 } 865 698 866 699 /** 867 - * kvm_s390_check_low_addr_protection - check for low-address protection 868 - * @ga: Guest address 700 + * check_gva_range - test a range of guest virtual addresses for accessibility 701 + */ 702 + int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 703 + unsigned long length, int is_write) 704 + { 705 + unsigned long gpa; 706 + unsigned long currlen; 707 + int rc = 0; 708 + 709 + ipte_lock(vcpu); 710 + while (length > 0 && !rc) { 711 + currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE)); 712 + rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write); 713 + gva += currlen; 714 + length -= currlen; 715 + } 716 + ipte_unlock(vcpu); 717 + 718 + return rc; 719 + } 720 + 721 + /** 722 + * kvm_s390_check_low_addr_prot_real - check for low-address protection 723 + * @gra: Guest real address 869 724 * 870 725 * Checks whether an address is subject to low-address protection and set 871 726 * up vcpu->arch.pgm accordingly if necessary. 872 727 * 873 728 * Return: 0 if no protection exception, or PGM_PROTECTION if protected. 874 729 */ 875 - int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga) 730 + int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra) 876 731 { 877 732 struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm; 878 733 psw_t *psw = &vcpu->arch.sie_block->gpsw; 879 734 struct trans_exc_code_bits *tec_bits; 735 + union ctlreg0 ctlreg0 = {.val = vcpu->arch.sie_block->gcr[0]}; 880 736 881 - if (!is_low_address(ga) || !low_address_protection_enabled(vcpu)) 737 + if (!ctlreg0.lap || !is_low_address(gra)) 882 738 return 0; 883 739 884 740 memset(pgm, 0, sizeof(*pgm)); 885 741 tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code; 886 742 tec_bits->fsi = FSI_STORE; 887 743 tec_bits->as = psw_bits(*psw).as; 888 - tec_bits->addr = ga >> PAGE_SHIFT; 744 + tec_bits->addr = gra >> PAGE_SHIFT; 889 745 pgm->code = PGM_PROTECTION; 890 746 891 747 return pgm->code;
+12 -9
arch/s390/kvm/gaccess.h
··· 156 156 } 157 157 158 158 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 159 - unsigned long *gpa, int write); 159 + ar_t ar, unsigned long *gpa, int write); 160 + int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar, 161 + unsigned long length, int is_write); 160 162 161 - int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 163 + int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 162 164 unsigned long len, int write); 163 165 164 166 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, ··· 170 168 * write_guest - copy data from kernel space to guest space 171 169 * @vcpu: virtual cpu 172 170 * @ga: guest address 171 + * @ar: access register 173 172 * @data: source address in kernel space 174 173 * @len: number of bytes to copy 175 174 * ··· 179 176 * If DAT is off data will be copied to guest real or absolute memory. 180 177 * If DAT is on data will be copied to the address space as specified by 181 178 * the address space bits of the PSW: 182 - * Primary, secondory or home space (access register mode is currently not 183 - * implemented). 179 + * Primary, secondary, home space or access register mode. 184 180 * The addressing mode of the PSW is also inspected, so that address wrap 185 181 * around is taken into account for 24-, 31- and 64-bit addressing mode, 186 182 * if the to be copied data crosses page boundaries in guest address space. ··· 212 210 * if data has been changed in guest space in case of an exception. 213 211 */ 214 212 static inline __must_check 215 - int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 213 + int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 216 214 unsigned long len) 217 215 { 218 - return access_guest(vcpu, ga, data, len, 1); 216 + return access_guest(vcpu, ga, ar, data, len, 1); 219 217 } 220 218 221 219 /** 222 220 * read_guest - copy data from guest space to kernel space 223 221 * @vcpu: virtual cpu 224 222 * @ga: guest address 223 + * @ar: access register 225 224 * @data: destination address in kernel space 226 225 * @len: number of bytes to copy 227 226 * ··· 232 229 * data will be copied from guest space to kernel space. 233 230 */ 234 231 static inline __must_check 235 - int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, void *data, 232 + int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data, 236 233 unsigned long len) 237 234 { 238 - return access_guest(vcpu, ga, data, len, 0); 235 + return access_guest(vcpu, ga, ar, data, len, 0); 239 236 } 240 237 241 238 /** ··· 333 330 void ipte_lock(struct kvm_vcpu *vcpu); 334 331 void ipte_unlock(struct kvm_vcpu *vcpu); 335 332 int ipte_lock_held(struct kvm_vcpu *vcpu); 336 - int kvm_s390_check_low_addr_protection(struct kvm_vcpu *vcpu, unsigned long ga); 333 + int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); 337 334 338 335 #endif /* __KVM_S390_GACCESS_H */
+2 -2
arch/s390/kvm/intercept.c
··· 320 320 321 321 /* Make sure that the source is paged-in */ 322 322 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2], 323 - &srcaddr, 0); 323 + reg2, &srcaddr, 0); 324 324 if (rc) 325 325 return kvm_s390_inject_prog_cond(vcpu, rc); 326 326 rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0); ··· 329 329 330 330 /* Make sure that the destination is paged-in */ 331 331 rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1], 332 - &dstaddr, 1); 332 + reg1, &dstaddr, 1); 333 333 if (rc) 334 334 return kvm_s390_inject_prog_cond(vcpu, rc); 335 335 rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
+222 -22
arch/s390/kvm/kvm-s390.c
··· 25 25 #include <linux/random.h> 26 26 #include <linux/slab.h> 27 27 #include <linux/timer.h> 28 + #include <linux/vmalloc.h> 28 29 #include <asm/asm-offsets.h> 29 30 #include <asm/lowcore.h> 30 31 #include <asm/pgtable.h> ··· 38 37 #define CREATE_TRACE_POINTS 39 38 #include "trace.h" 40 39 #include "trace-s390.h" 40 + 41 + #define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */ 41 42 42 43 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 43 44 ··· 107 104 unsigned long kvm_s390_fac_list_mask[] = { 108 105 0xff82fffbf4fc2000UL, 109 106 0x005c000000000000UL, 110 - 0x4000000000000000UL, 111 107 }; 112 108 113 109 unsigned long kvm_s390_fac_list_mask_size(void) ··· 177 175 case KVM_CAP_VM_ATTRIBUTES: 178 176 case KVM_CAP_MP_STATE: 179 177 case KVM_CAP_S390_USER_SIGP: 178 + case KVM_CAP_S390_USER_STSI: 179 + case KVM_CAP_S390_SKEYS: 180 180 r = 1; 181 + break; 182 + case KVM_CAP_S390_MEM_OP: 183 + r = MEM_OP_MAX_SIZE; 181 184 break; 182 185 case KVM_CAP_NR_VCPUS: 183 186 case KVM_CAP_MAX_VCPUS: ··· 278 271 r = 0; 279 272 break; 280 273 case KVM_CAP_S390_VECTOR_REGISTERS: 281 - kvm->arch.use_vectors = MACHINE_HAS_VX; 282 - r = MACHINE_HAS_VX ? 0 : -EINVAL; 274 + if (MACHINE_HAS_VX) { 275 + set_kvm_facility(kvm->arch.model.fac->mask, 129); 276 + set_kvm_facility(kvm->arch.model.fac->list, 129); 277 + r = 0; 278 + } else 279 + r = -EINVAL; 280 + break; 281 + case KVM_CAP_S390_USER_STSI: 282 + kvm->arch.user_stsi = 1; 283 + r = 0; 283 284 break; 284 285 default: 285 286 r = -EINVAL; ··· 733 718 return ret; 734 719 } 735 720 721 + static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) 722 + { 723 + uint8_t *keys; 724 + uint64_t hva; 725 + unsigned long curkey; 726 + int i, r = 0; 727 + 728 + if (args->flags != 0) 729 + return -EINVAL; 730 + 731 + /* Is this guest using storage keys? */ 732 + if (!mm_use_skey(current->mm)) 733 + return KVM_S390_GET_SKEYS_NONE; 734 + 735 + /* Enforce sane limit on memory allocation */ 736 + if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 737 + return -EINVAL; 738 + 739 + keys = kmalloc_array(args->count, sizeof(uint8_t), 740 + GFP_KERNEL | __GFP_NOWARN); 741 + if (!keys) 742 + keys = vmalloc(sizeof(uint8_t) * args->count); 743 + if (!keys) 744 + return -ENOMEM; 745 + 746 + for (i = 0; i < args->count; i++) { 747 + hva = gfn_to_hva(kvm, args->start_gfn + i); 748 + if (kvm_is_error_hva(hva)) { 749 + r = -EFAULT; 750 + goto out; 751 + } 752 + 753 + curkey = get_guest_storage_key(current->mm, hva); 754 + if (IS_ERR_VALUE(curkey)) { 755 + r = curkey; 756 + goto out; 757 + } 758 + keys[i] = curkey; 759 + } 760 + 761 + r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys, 762 + sizeof(uint8_t) * args->count); 763 + if (r) 764 + r = -EFAULT; 765 + out: 766 + kvfree(keys); 767 + return r; 768 + } 769 + 770 + static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args) 771 + { 772 + uint8_t *keys; 773 + uint64_t hva; 774 + int i, r = 0; 775 + 776 + if (args->flags != 0) 777 + return -EINVAL; 778 + 779 + /* Enforce sane limit on memory allocation */ 780 + if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX) 781 + return -EINVAL; 782 + 783 + keys = kmalloc_array(args->count, sizeof(uint8_t), 784 + GFP_KERNEL | __GFP_NOWARN); 785 + if (!keys) 786 + keys = vmalloc(sizeof(uint8_t) * args->count); 787 + if (!keys) 788 + return -ENOMEM; 789 + 790 + r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr, 791 + sizeof(uint8_t) * args->count); 792 + if (r) { 793 + r = -EFAULT; 794 + goto out; 795 + } 796 + 797 + /* Enable storage key handling for the guest */ 798 + s390_enable_skey(); 799 + 800 + for (i = 0; i < args->count; i++) { 801 + hva = gfn_to_hva(kvm, args->start_gfn + i); 802 + if (kvm_is_error_hva(hva)) { 803 + r = -EFAULT; 804 + goto out; 805 + } 806 + 807 + /* Lowest order bit is reserved */ 808 + if (keys[i] & 0x01) { 809 + r = -EINVAL; 810 + goto out; 811 + } 812 + 813 + r = set_guest_storage_key(current->mm, hva, 814 + (unsigned long)keys[i], 0); 815 + if (r) 816 + goto out; 817 + } 818 + out: 819 + kvfree(keys); 820 + return r; 821 + } 822 + 736 823 long kvm_arch_vm_ioctl(struct file *filp, 737 824 unsigned int ioctl, unsigned long arg) 738 825 { ··· 892 775 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr))) 893 776 break; 894 777 r = kvm_s390_vm_has_attr(kvm, &attr); 778 + break; 779 + } 780 + case KVM_S390_GET_SKEYS: { 781 + struct kvm_s390_skeys args; 782 + 783 + r = -EFAULT; 784 + if (copy_from_user(&args, argp, 785 + sizeof(struct kvm_s390_skeys))) 786 + break; 787 + r = kvm_s390_get_skeys(kvm, &args); 788 + break; 789 + } 790 + case KVM_S390_SET_SKEYS: { 791 + struct kvm_s390_skeys args; 792 + 793 + r = -EFAULT; 794 + if (copy_from_user(&args, argp, 795 + sizeof(struct kvm_s390_skeys))) 796 + break; 797 + r = kvm_s390_set_skeys(kvm, &args); 895 798 break; 896 799 } 897 800 default: ··· 1034 897 1035 898 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long)); 1036 899 if (!kvm->arch.dbf) 1037 - goto out_nodbf; 900 + goto out_err; 1038 901 1039 902 /* 1040 903 * The architectural maximum amount of facilities is 16 kbit. To store ··· 1046 909 kvm->arch.model.fac = 1047 910 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA); 1048 911 if (!kvm->arch.model.fac) 1049 - goto out_nofac; 912 + goto out_err; 1050 913 1051 914 /* Populate the facility mask initially. */ 1052 915 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list, ··· 1066 929 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff; 1067 930 1068 931 if (kvm_s390_crypto_init(kvm) < 0) 1069 - goto out_crypto; 932 + goto out_err; 1070 933 1071 934 spin_lock_init(&kvm->arch.float_int.lock); 1072 935 INIT_LIST_HEAD(&kvm->arch.float_int.list); ··· 1081 944 } else { 1082 945 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1); 1083 946 if (!kvm->arch.gmap) 1084 - goto out_nogmap; 947 + goto out_err; 1085 948 kvm->arch.gmap->private = kvm; 1086 949 kvm->arch.gmap->pfault_enabled = 0; 1087 950 } 1088 951 1089 952 kvm->arch.css_support = 0; 1090 953 kvm->arch.use_irqchip = 0; 1091 - kvm->arch.use_vectors = 0; 1092 954 kvm->arch.epoch = 0; 1093 955 1094 956 spin_lock_init(&kvm->arch.start_stop_lock); 1095 957 1096 958 return 0; 1097 - out_nogmap: 1098 - kfree(kvm->arch.crypto.crycb); 1099 - out_crypto: 1100 - free_page((unsigned long)kvm->arch.model.fac); 1101 - out_nofac: 1102 - debug_unregister(kvm->arch.dbf); 1103 - out_nodbf: 1104 - free_page((unsigned long)(kvm->arch.sca)); 1105 959 out_err: 960 + kfree(kvm->arch.crypto.crycb); 961 + free_page((unsigned long)kvm->arch.model.fac); 962 + debug_unregister(kvm->arch.dbf); 963 + free_page((unsigned long)(kvm->arch.sca)); 1106 964 return rc; 1107 965 } 1108 966 ··· 1189 1057 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 1190 1058 { 1191 1059 save_fp_ctl(&vcpu->arch.host_fpregs.fpc); 1192 - if (vcpu->kvm->arch.use_vectors) 1060 + if (test_kvm_facility(vcpu->kvm, 129)) 1193 1061 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 1194 1062 else 1195 1063 save_fp_regs(vcpu->arch.host_fpregs.fprs); 1196 1064 save_access_regs(vcpu->arch.host_acrs); 1197 - if (vcpu->kvm->arch.use_vectors) { 1065 + if (test_kvm_facility(vcpu->kvm, 129)) { 1198 1066 restore_fp_ctl(&vcpu->run->s.regs.fpc); 1199 1067 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 1200 1068 } else { ··· 1210 1078 { 1211 1079 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); 1212 1080 gmap_disable(vcpu->arch.gmap); 1213 - if (vcpu->kvm->arch.use_vectors) { 1081 + if (test_kvm_facility(vcpu->kvm, 129)) { 1214 1082 save_fp_ctl(&vcpu->run->s.regs.fpc); 1215 1083 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs); 1216 1084 } else { ··· 1219 1087 } 1220 1088 save_access_regs(vcpu->run->s.regs.acrs); 1221 1089 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc); 1222 - if (vcpu->kvm->arch.use_vectors) 1090 + if (test_kvm_facility(vcpu->kvm, 129)) 1223 1091 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs); 1224 1092 else 1225 1093 restore_fp_regs(vcpu->arch.host_fpregs.fprs); ··· 1319 1187 vcpu->arch.sie_block->eca |= 1; 1320 1188 if (sclp_has_sigpif()) 1321 1189 vcpu->arch.sie_block->eca |= 0x10000000U; 1322 - if (vcpu->kvm->arch.use_vectors) { 1190 + if (test_kvm_facility(vcpu->kvm, 129)) { 1323 1191 vcpu->arch.sie_block->eca |= 0x00020000; 1324 1192 vcpu->arch.sie_block->ecd |= 0x20000000; 1325 1193 } ··· 1912 1780 * to look up the current opcode to get the length of the instruction 1913 1781 * to be able to forward the PSW. 1914 1782 */ 1915 - rc = read_guest(vcpu, psw->addr, &opcode, 1); 1783 + rc = read_guest(vcpu, psw->addr, 0, &opcode, 1); 1916 1784 if (rc) 1917 1785 return kvm_s390_inject_prog_cond(vcpu, rc); 1918 1786 psw->addr = __rewind_psw(*psw, -insn_length(opcode)); ··· 2321 2189 return r; 2322 2190 } 2323 2191 2192 + static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu, 2193 + struct kvm_s390_mem_op *mop) 2194 + { 2195 + void __user *uaddr = (void __user *)mop->buf; 2196 + void *tmpbuf = NULL; 2197 + int r, srcu_idx; 2198 + const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION 2199 + | KVM_S390_MEMOP_F_CHECK_ONLY; 2200 + 2201 + if (mop->flags & ~supported_flags) 2202 + return -EINVAL; 2203 + 2204 + if (mop->size > MEM_OP_MAX_SIZE) 2205 + return -E2BIG; 2206 + 2207 + if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) { 2208 + tmpbuf = vmalloc(mop->size); 2209 + if (!tmpbuf) 2210 + return -ENOMEM; 2211 + } 2212 + 2213 + srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 2214 + 2215 + switch (mop->op) { 2216 + case KVM_S390_MEMOP_LOGICAL_READ: 2217 + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { 2218 + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false); 2219 + break; 2220 + } 2221 + r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); 2222 + if (r == 0) { 2223 + if (copy_to_user(uaddr, tmpbuf, mop->size)) 2224 + r = -EFAULT; 2225 + } 2226 + break; 2227 + case KVM_S390_MEMOP_LOGICAL_WRITE: 2228 + if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) { 2229 + r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true); 2230 + break; 2231 + } 2232 + if (copy_from_user(tmpbuf, uaddr, mop->size)) { 2233 + r = -EFAULT; 2234 + break; 2235 + } 2236 + r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size); 2237 + break; 2238 + default: 2239 + r = -EINVAL; 2240 + } 2241 + 2242 + srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); 2243 + 2244 + if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0) 2245 + kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 2246 + 2247 + vfree(tmpbuf); 2248 + return r; 2249 + } 2250 + 2324 2251 long kvm_arch_vcpu_ioctl(struct file *filp, 2325 2252 unsigned int ioctl, unsigned long arg) 2326 2253 { ··· 2477 2286 if (copy_from_user(&cap, argp, sizeof(cap))) 2478 2287 break; 2479 2288 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 2289 + break; 2290 + } 2291 + case KVM_S390_MEM_OP: { 2292 + struct kvm_s390_mem_op mem_op; 2293 + 2294 + if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0) 2295 + r = kvm_s390_guest_mem_op(vcpu, &mem_op); 2296 + else 2297 + r = -EFAULT; 2480 2298 break; 2481 2299 } 2482 2300 default:
+33 -5
arch/s390/kvm/kvm-s390.h
··· 70 70 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); 71 71 } 72 72 73 - static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu) 73 + typedef u8 __bitwise ar_t; 74 + 75 + static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar) 74 76 { 75 77 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 76 78 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 79 + 80 + if (ar) 81 + *ar = base2; 77 82 78 83 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 79 84 } 80 85 81 86 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu, 82 - u64 *address1, u64 *address2) 87 + u64 *address1, u64 *address2, 88 + ar_t *ar_b1, ar_t *ar_b2) 83 89 { 84 90 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28; 85 91 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16; ··· 94 88 95 89 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1; 96 90 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 91 + 92 + if (ar_b1) 93 + *ar_b1 = base1; 94 + if (ar_b2) 95 + *ar_b2 = base2; 97 96 } 98 97 99 98 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2) ··· 109 98 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16; 110 99 } 111 100 112 - static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu) 101 + static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar) 113 102 { 114 103 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 115 104 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + ··· 118 107 if (disp2 & 0x80000) 119 108 disp2+=0xfff00000; 120 109 110 + if (ar) 111 + *ar = base2; 112 + 121 113 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2; 122 114 } 123 115 124 - static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu) 116 + static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar) 125 117 { 126 118 u32 base2 = vcpu->arch.sie_block->ipb >> 28; 127 119 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); 120 + 121 + if (ar) 122 + *ar = base2; 128 123 129 124 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2; 130 125 } ··· 142 125 vcpu->arch.sie_block->gpsw.mask |= cc << 44; 143 126 } 144 127 145 - /* test availability of facility in a kvm intance */ 128 + /* test availability of facility in a kvm instance */ 146 129 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr) 147 130 { 148 131 return __test_facility(nr, kvm->arch.model.fac->mask) && 149 132 __test_facility(nr, kvm->arch.model.fac->list); 133 + } 134 + 135 + static inline int set_kvm_facility(u64 *fac_list, unsigned long nr) 136 + { 137 + unsigned char *ptr; 138 + 139 + if (nr >= MAX_FACILITY_BIT) 140 + return -EINVAL; 141 + ptr = (unsigned char *) fac_list + (nr >> 3); 142 + *ptr |= (0x80UL >> (nr & 7)); 143 + return 0; 150 144 } 151 145 152 146 /* are cpu states controlled by user space */
+61 -32
arch/s390/kvm/priv.c
··· 36 36 struct kvm_vcpu *cpup; 37 37 s64 hostclk, val; 38 38 int i, rc; 39 + ar_t ar; 39 40 u64 op2; 40 41 41 42 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 42 43 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 43 44 44 - op2 = kvm_s390_get_base_disp_s(vcpu); 45 + op2 = kvm_s390_get_base_disp_s(vcpu, &ar); 45 46 if (op2 & 7) /* Operand must be on a doubleword boundary */ 46 47 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 47 - rc = read_guest(vcpu, op2, &val, sizeof(val)); 48 + rc = read_guest(vcpu, op2, ar, &val, sizeof(val)); 48 49 if (rc) 49 50 return kvm_s390_inject_prog_cond(vcpu, rc); 50 51 ··· 69 68 u64 operand2; 70 69 u32 address; 71 70 int rc; 71 + ar_t ar; 72 72 73 73 vcpu->stat.instruction_spx++; 74 74 75 75 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 76 76 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 77 77 78 - operand2 = kvm_s390_get_base_disp_s(vcpu); 78 + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 79 79 80 80 /* must be word boundary */ 81 81 if (operand2 & 3) 82 82 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 83 83 84 84 /* get the value */ 85 - rc = read_guest(vcpu, operand2, &address, sizeof(address)); 85 + rc = read_guest(vcpu, operand2, ar, &address, sizeof(address)); 86 86 if (rc) 87 87 return kvm_s390_inject_prog_cond(vcpu, rc); 88 88 ··· 109 107 u64 operand2; 110 108 u32 address; 111 109 int rc; 110 + ar_t ar; 112 111 113 112 vcpu->stat.instruction_stpx++; 114 113 115 114 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 116 115 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 117 116 118 - operand2 = kvm_s390_get_base_disp_s(vcpu); 117 + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 119 118 120 119 /* must be word boundary */ 121 120 if (operand2 & 3) ··· 125 122 address = kvm_s390_get_prefix(vcpu); 126 123 127 124 /* get the value */ 128 - rc = write_guest(vcpu, operand2, &address, sizeof(address)); 125 + rc = write_guest(vcpu, operand2, ar, &address, sizeof(address)); 129 126 if (rc) 130 127 return kvm_s390_inject_prog_cond(vcpu, rc); 131 128 ··· 139 136 u16 vcpu_id = vcpu->vcpu_id; 140 137 u64 ga; 141 138 int rc; 139 + ar_t ar; 142 140 143 141 vcpu->stat.instruction_stap++; 144 142 145 143 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 146 144 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 147 145 148 - ga = kvm_s390_get_base_disp_s(vcpu); 146 + ga = kvm_s390_get_base_disp_s(vcpu, &ar); 149 147 150 148 if (ga & 1) 151 149 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 152 150 153 - rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id)); 151 + rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id)); 154 152 if (rc) 155 153 return kvm_s390_inject_prog_cond(vcpu, rc); 156 154 ··· 211 207 kvm_s390_get_regs_rre(vcpu, NULL, &reg2); 212 208 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; 213 209 addr = kvm_s390_logical_to_effective(vcpu, addr); 214 - if (kvm_s390_check_low_addr_protection(vcpu, addr)) 210 + if (kvm_s390_check_low_addr_prot_real(vcpu, addr)) 215 211 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 216 212 addr = kvm_s390_real_to_abs(vcpu, addr); 217 213 ··· 235 231 u32 tpi_data[3]; 236 232 int rc; 237 233 u64 addr; 234 + ar_t ar; 238 235 239 - addr = kvm_s390_get_base_disp_s(vcpu); 236 + addr = kvm_s390_get_base_disp_s(vcpu, &ar); 240 237 if (addr & 3) 241 238 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 242 239 ··· 256 251 * provided area. 257 252 */ 258 253 len = sizeof(tpi_data) - 4; 259 - rc = write_guest(vcpu, addr, &tpi_data, len); 254 + rc = write_guest(vcpu, addr, ar, &tpi_data, len); 260 255 if (rc) { 261 256 rc = kvm_s390_inject_prog_cond(vcpu, rc); 262 257 goto reinject_interrupt; ··· 400 395 psw_compat_t new_psw; 401 396 u64 addr; 402 397 int rc; 398 + ar_t ar; 403 399 404 400 if (gpsw->mask & PSW_MASK_PSTATE) 405 401 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 406 402 407 - addr = kvm_s390_get_base_disp_s(vcpu); 403 + addr = kvm_s390_get_base_disp_s(vcpu, &ar); 408 404 if (addr & 7) 409 405 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 410 406 411 - rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 407 + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 412 408 if (rc) 413 409 return kvm_s390_inject_prog_cond(vcpu, rc); 414 410 if (!(new_psw.mask & PSW32_MASK_BASE)) ··· 427 421 psw_t new_psw; 428 422 u64 addr; 429 423 int rc; 424 + ar_t ar; 430 425 431 426 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 432 427 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 433 428 434 - addr = kvm_s390_get_base_disp_s(vcpu); 429 + addr = kvm_s390_get_base_disp_s(vcpu, &ar); 435 430 if (addr & 7) 436 431 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 437 - rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw)); 432 + rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw)); 438 433 if (rc) 439 434 return kvm_s390_inject_prog_cond(vcpu, rc); 440 435 vcpu->arch.sie_block->gpsw = new_psw; ··· 449 442 u64 stidp_data = vcpu->arch.stidp_data; 450 443 u64 operand2; 451 444 int rc; 445 + ar_t ar; 452 446 453 447 vcpu->stat.instruction_stidp++; 454 448 455 449 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 456 450 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 457 451 458 - operand2 = kvm_s390_get_base_disp_s(vcpu); 452 + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 459 453 460 454 if (operand2 & 7) 461 455 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); 462 456 463 - rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data)); 457 + rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data)); 464 458 if (rc) 465 459 return kvm_s390_inject_prog_cond(vcpu, rc); 466 460 ··· 496 488 ASCEBC(mem->vm[0].cpi, 16); 497 489 } 498 490 491 + static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, ar_t ar, 492 + u8 fc, u8 sel1, u16 sel2) 493 + { 494 + vcpu->run->exit_reason = KVM_EXIT_S390_STSI; 495 + vcpu->run->s390_stsi.addr = addr; 496 + vcpu->run->s390_stsi.ar = ar; 497 + vcpu->run->s390_stsi.fc = fc; 498 + vcpu->run->s390_stsi.sel1 = sel1; 499 + vcpu->run->s390_stsi.sel2 = sel2; 500 + } 501 + 499 502 static int handle_stsi(struct kvm_vcpu *vcpu) 500 503 { 501 504 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28; ··· 515 496 unsigned long mem = 0; 516 497 u64 operand2; 517 498 int rc = 0; 499 + ar_t ar; 518 500 519 501 vcpu->stat.instruction_stsi++; 520 502 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); ··· 538 518 return 0; 539 519 } 540 520 541 - operand2 = kvm_s390_get_base_disp_s(vcpu); 521 + operand2 = kvm_s390_get_base_disp_s(vcpu, &ar); 542 522 543 523 if (operand2 & 0xfff) 544 524 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); ··· 562 542 break; 563 543 } 564 544 565 - rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE); 545 + rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE); 566 546 if (rc) { 567 547 rc = kvm_s390_inject_prog_cond(vcpu, rc); 568 548 goto out; 549 + } 550 + if (vcpu->kvm->arch.user_stsi) { 551 + insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2); 552 + rc = -EREMOTE; 569 553 } 570 554 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2); 571 555 free_page(mem); 572 556 kvm_s390_set_psw_cc(vcpu, 0); 573 557 vcpu->run->s.regs.gprs[0] = 0; 574 - return 0; 558 + return rc; 575 559 out_no_data: 576 560 kvm_s390_set_psw_cc(vcpu, 3); 577 561 out: ··· 704 680 } 705 681 706 682 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { 707 - if (kvm_s390_check_low_addr_protection(vcpu, start)) 683 + if (kvm_s390_check_low_addr_prot_real(vcpu, start)) 708 684 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm); 709 685 } 710 686 ··· 810 786 int reg, rc, nr_regs; 811 787 u32 ctl_array[16]; 812 788 u64 ga; 789 + ar_t ar; 813 790 814 791 vcpu->stat.instruction_lctl++; 815 792 816 793 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 817 794 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 818 795 819 - ga = kvm_s390_get_base_disp_rs(vcpu); 796 + ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 820 797 821 798 if (ga & 3) 822 799 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); ··· 826 801 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga); 827 802 828 803 nr_regs = ((reg3 - reg1) & 0xf) + 1; 829 - rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 804 + rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 830 805 if (rc) 831 806 return kvm_s390_inject_prog_cond(vcpu, rc); 832 807 reg = reg1; ··· 849 824 int reg, rc, nr_regs; 850 825 u32 ctl_array[16]; 851 826 u64 ga; 827 + ar_t ar; 852 828 853 829 vcpu->stat.instruction_stctl++; 854 830 855 831 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 856 832 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 857 833 858 - ga = kvm_s390_get_base_disp_rs(vcpu); 834 + ga = kvm_s390_get_base_disp_rs(vcpu, &ar); 859 835 860 836 if (ga & 3) 861 837 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); ··· 872 846 break; 873 847 reg = (reg + 1) % 16; 874 848 } while (1); 875 - rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u32)); 849 + rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32)); 876 850 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 877 851 } 878 852 ··· 883 857 int reg, rc, nr_regs; 884 858 u64 ctl_array[16]; 885 859 u64 ga; 860 + ar_t ar; 886 861 887 862 vcpu->stat.instruction_lctlg++; 888 863 889 864 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 890 865 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 891 866 892 - ga = kvm_s390_get_base_disp_rsy(vcpu); 867 + ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 893 868 894 869 if (ga & 7) 895 870 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); ··· 899 872 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga); 900 873 901 874 nr_regs = ((reg3 - reg1) & 0xf) + 1; 902 - rc = read_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 875 + rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 903 876 if (rc) 904 877 return kvm_s390_inject_prog_cond(vcpu, rc); 905 878 reg = reg1; ··· 921 894 int reg, rc, nr_regs; 922 895 u64 ctl_array[16]; 923 896 u64 ga; 897 + ar_t ar; 924 898 925 899 vcpu->stat.instruction_stctg++; 926 900 927 901 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 928 902 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 929 903 930 - ga = kvm_s390_get_base_disp_rsy(vcpu); 904 + ga = kvm_s390_get_base_disp_rsy(vcpu, &ar); 931 905 932 906 if (ga & 7) 933 907 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); ··· 944 916 break; 945 917 reg = (reg + 1) % 16; 946 918 } while (1); 947 - rc = write_guest(vcpu, ga, ctl_array, nr_regs * sizeof(u64)); 919 + rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64)); 948 920 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0; 949 921 } 950 922 ··· 969 941 unsigned long hva, gpa; 970 942 int ret = 0, cc = 0; 971 943 bool writable; 944 + ar_t ar; 972 945 973 946 vcpu->stat.instruction_tprot++; 974 947 975 948 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 976 949 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 977 950 978 - kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); 951 + kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL); 979 952 980 953 /* we only handle the Linux memory detection case: 981 954 * access key == 0 ··· 985 956 return -EOPNOTSUPP; 986 957 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) 987 958 ipte_lock(vcpu); 988 - ret = guest_translate_address(vcpu, address1, &gpa, 1); 959 + ret = guest_translate_address(vcpu, address1, ar, &gpa, 1); 989 960 if (ret == PGM_PROTECTION) { 990 961 /* Write protected? Try again with read-only... */ 991 962 cc = 1; 992 - ret = guest_translate_address(vcpu, address1, &gpa, 0); 963 + ret = guest_translate_address(vcpu, address1, ar, &gpa, 0); 993 964 } 994 965 if (ret) { 995 966 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
+2 -2
arch/s390/kvm/sigp.c
··· 434 434 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 435 435 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); 436 436 437 - order_code = kvm_s390_get_base_disp_rs(vcpu); 437 + order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 438 438 if (handle_sigp_order_in_user_space(vcpu, order_code)) 439 439 return -EOPNOTSUPP; 440 440 ··· 476 476 int r3 = vcpu->arch.sie_block->ipa & 0x000f; 477 477 u16 cpu_addr = vcpu->run->s.regs.gprs[r3]; 478 478 struct kvm_vcpu *dest_vcpu; 479 - u8 order_code = kvm_s390_get_base_disp_rs(vcpu); 479 + u8 order_code = kvm_s390_get_base_disp_rs(vcpu, NULL); 480 480 481 481 trace_kvm_s390_handle_sigp_pei(vcpu, order_code, cpu_addr); 482 482
+46
include/uapi/linux/kvm.h
··· 147 147 148 148 #define KVM_PIT_SPEAKER_DUMMY 1 149 149 150 + struct kvm_s390_skeys { 151 + __u64 start_gfn; 152 + __u64 count; 153 + __u64 skeydata_addr; 154 + __u32 flags; 155 + __u32 reserved[9]; 156 + }; 157 + #define KVM_S390_GET_SKEYS_NONE 1 158 + #define KVM_S390_SKEYS_MAX 1048576 159 + 150 160 #define KVM_EXIT_UNKNOWN 0 151 161 #define KVM_EXIT_EXCEPTION 1 152 162 #define KVM_EXIT_IO 2 ··· 182 172 #define KVM_EXIT_S390_TSCH 22 183 173 #define KVM_EXIT_EPR 23 184 174 #define KVM_EXIT_SYSTEM_EVENT 24 175 + #define KVM_EXIT_S390_STSI 25 185 176 186 177 /* For KVM_EXIT_INTERNAL_ERROR */ 187 178 /* Emulate instruction failed. */ ··· 320 309 __u32 type; 321 310 __u64 flags; 322 311 } system_event; 312 + /* KVM_EXIT_S390_STSI */ 313 + struct { 314 + __u64 addr; 315 + __u8 ar; 316 + __u8 reserved; 317 + __u8 fc; 318 + __u8 sel1; 319 + __u16 sel2; 320 + } s390_stsi; 323 321 /* Fix the size of the union. */ 324 322 char padding[256]; 325 323 }; ··· 384 364 __u8 usermode; 385 365 __u8 pad[5]; 386 366 }; 367 + 368 + /* for KVM_S390_MEM_OP */ 369 + struct kvm_s390_mem_op { 370 + /* in */ 371 + __u64 gaddr; /* the guest address */ 372 + __u64 flags; /* flags */ 373 + __u32 size; /* amount of bytes */ 374 + __u32 op; /* type of operation */ 375 + __u64 buf; /* buffer in userspace */ 376 + __u8 ar; /* the access register number */ 377 + __u8 reserved[31]; /* should be set to 0 */ 378 + }; 379 + /* types for kvm_s390_mem_op->op */ 380 + #define KVM_S390_MEMOP_LOGICAL_READ 0 381 + #define KVM_S390_MEMOP_LOGICAL_WRITE 1 382 + /* flags for kvm_s390_mem_op->flags */ 383 + #define KVM_S390_MEMOP_F_CHECK_ONLY (1ULL << 0) 384 + #define KVM_S390_MEMOP_F_INJECT_EXCEPTION (1ULL << 1) 387 385 388 386 /* for KVM_INTERRUPT */ 389 387 struct kvm_interrupt { ··· 799 761 #define KVM_CAP_CHECK_EXTENSION_VM 105 800 762 #define KVM_CAP_S390_USER_SIGP 106 801 763 #define KVM_CAP_S390_VECTOR_REGISTERS 107 764 + #define KVM_CAP_S390_MEM_OP 108 765 + #define KVM_CAP_S390_USER_STSI 109 766 + #define KVM_CAP_S390_SKEYS 110 802 767 803 768 #ifdef KVM_CAP_IRQ_ROUTING 804 769 ··· 1177 1136 #define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init) 1178 1137 #define KVM_ARM_PREFERRED_TARGET _IOR(KVMIO, 0xaf, struct kvm_vcpu_init) 1179 1138 #define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list) 1139 + /* Available with KVM_CAP_S390_MEM_OP */ 1140 + #define KVM_S390_MEM_OP _IOW(KVMIO, 0xb1, struct kvm_s390_mem_op) 1141 + /* Available with KVM_CAP_S390_SKEYS */ 1142 + #define KVM_S390_GET_SKEYS _IOW(KVMIO, 0xb2, struct kvm_s390_skeys) 1143 + #define KVM_S390_SET_SKEYS _IOW(KVMIO, 0xb3, struct kvm_s390_skeys) 1180 1144 1181 1145 #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 1182 1146 #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)