Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'kvm-s390-next-6.13-1' of https://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD

- second part of the ucontrol selftest
- cpumodel sanity check selftest
- gen17 cpumodel changes

+737 -13
+1
arch/s390/include/asm/kvm_host.h
··· 356 356 #define ECD_MEF 0x08000000 357 357 #define ECD_ETOKENF 0x02000000 358 358 #define ECD_ECC 0x00200000 359 + #define ECD_HMAC 0x00004000 359 360 __u32 ecd; /* 0x01c8 */ 360 361 __u8 reserved1cc[18]; /* 0x01cc */ 361 362 __u64 pp; /* 0x01de */
+2 -1
arch/s390/include/uapi/asm/kvm.h
··· 469 469 __u8 kdsa[16]; /* with MSA9 */ 470 470 __u8 sortl[32]; /* with STFLE.150 */ 471 471 __u8 dfltcc[32]; /* with STFLE.151 */ 472 - __u8 reserved[1728]; 472 + __u8 pfcr[16]; /* with STFLE.201 */ 473 + __u8 reserved[1712]; 473 474 }; 474 475 475 476 #define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
+41 -2
arch/s390/kvm/kvm-s390.c
··· 348 348 return cc == 0; 349 349 } 350 350 351 + static __always_inline void pfcr_query(u8 (*query)[16]) 352 + { 353 + asm volatile( 354 + " lghi 0,0\n" 355 + " .insn rsy,0xeb0000000016,0,0,%[query]\n" 356 + : [query] "=QS" (*query) 357 + : 358 + : "cc", "0"); 359 + } 360 + 351 361 static __always_inline void __sortl_query(u8 (*query)[32]) 352 362 { 353 363 asm volatile( ··· 438 428 439 429 if (test_facility(151)) /* DFLTCC */ 440 430 __dfltcc_query(&kvm_s390_available_subfunc.dfltcc); 431 + 432 + if (test_facility(201)) /* PFCR */ 433 + pfcr_query(&kvm_s390_available_subfunc.pfcr); 441 434 442 435 if (MACHINE_HAS_ESOP) 443 436 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP); ··· 811 798 if (test_facility(192)) { 812 799 set_kvm_facility(kvm->arch.model.fac_mask, 192); 813 800 set_kvm_facility(kvm->arch.model.fac_list, 192); 801 + } 802 + if (test_facility(198)) { 803 + set_kvm_facility(kvm->arch.model.fac_mask, 198); 804 + set_kvm_facility(kvm->arch.model.fac_list, 198); 805 + } 806 + if (test_facility(199)) { 807 + set_kvm_facility(kvm->arch.model.fac_mask, 199); 808 + set_kvm_facility(kvm->arch.model.fac_list, 199); 814 809 } 815 810 r = 0; 816 811 } else ··· 1564 1543 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], 1565 1544 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], 1566 1545 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); 1546 + VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx", 1547 + ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0], 1548 + ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]); 1567 1549 1568 1550 return 0; 1569 1551 } ··· 1781 1757 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1], 1782 1758 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2], 1783 1759 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]); 1760 + VM_EVENT(kvm, 3, "GET: guest PFCR subfunc 0x%16.16lx.%16.16lx", 1761 + ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0], 1762 + ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]); 1784 1763 1785 1764 return 0; 1786 1765 } ··· 1852 1825 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1], 1853 1826 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2], 1854 1827 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]); 1828 + VM_EVENT(kvm, 3, "GET: host PFCR subfunc 0x%16.16lx.%16.16lx", 1829 + ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[0], 1830 + ((unsigned long *) &kvm_s390_available_subfunc.pfcr)[1]); 1855 1831 1856 1832 return 0; 1857 1833 } ··· 3804 3774 3805 3775 } 3806 3776 3777 + static bool kvm_has_pckmo_hmac(struct kvm *kvm) 3778 + { 3779 + /* At least one HMAC subfunction must be present */ 3780 + return kvm_has_pckmo_subfunc(kvm, 118) || 3781 + kvm_has_pckmo_subfunc(kvm, 122); 3782 + } 3783 + 3807 3784 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu) 3808 3785 { 3809 3786 /* ··· 3823 3786 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd; 3824 3787 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA); 3825 3788 vcpu->arch.sie_block->eca &= ~ECA_APIE; 3826 - vcpu->arch.sie_block->ecd &= ~ECD_ECC; 3789 + vcpu->arch.sie_block->ecd &= ~(ECD_ECC | ECD_HMAC); 3827 3790 3828 3791 if (vcpu->kvm->arch.crypto.apie) 3829 3792 vcpu->arch.sie_block->eca |= ECA_APIE; ··· 3831 3794 /* Set up protected key support */ 3832 3795 if (vcpu->kvm->arch.crypto.aes_kw) { 3833 3796 vcpu->arch.sie_block->ecb3 |= ECB3_AES; 3834 - /* ecc is also wrapped with AES key */ 3797 + /* ecc/hmac is also wrapped with AES key */ 3835 3798 if (kvm_has_pckmo_ecc(vcpu->kvm)) 3836 3799 vcpu->arch.sie_block->ecd |= ECD_ECC; 3800 + if (kvm_has_pckmo_hmac(vcpu->kvm)) 3801 + vcpu->arch.sie_block->ecd |= ECD_HMAC; 3837 3802 } 3838 3803 3839 3804 if (vcpu->kvm->arch.crypto.dea_kw)
+2 -1
arch/s390/kvm/vsie.c
··· 335 335 /* we may only allow it if enabled for guest 2 */ 336 336 ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 & 337 337 (ECB3_AES | ECB3_DEA); 338 - ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & ECD_ECC; 338 + ecd_flags = scb_o->ecd & vcpu->arch.sie_block->ecd & 339 + (ECD_ECC | ECD_HMAC); 339 340 if (!ecb3_flags && !ecd_flags) 340 341 goto end; 341 342
+2
arch/s390/tools/gen_facilities.c
··· 109 109 15, /* AP Facilities Test */ 110 110 156, /* etoken facility */ 111 111 165, /* nnpa facility */ 112 + 170, /* ineffective-nonconstrained-transaction facility */ 112 113 193, /* bear enhancement facility */ 113 114 194, /* rdp enhancement facility */ 114 115 196, /* processor activity instrumentation facility */ 115 116 197, /* processor activity instrumentation extension 1 */ 117 + 201, /* concurrent-functions facility */ 116 118 -1 /* END */ 117 119 } 118 120 },
+2 -1
tools/arch/s390/include/uapi/asm/kvm.h
··· 469 469 __u8 kdsa[16]; /* with MSA9 */ 470 470 __u8 sortl[32]; /* with STFLE.150 */ 471 471 __u8 dfltcc[32]; /* with STFLE.151 */ 472 - __u8 reserved[1728]; 472 + __u8 pfcr[16]; /* with STFLE.201 */ 473 + __u8 reserved[1712]; 473 474 }; 474 475 475 476 #define KVM_S390_VM_CPU_PROCESSOR_UV_FEAT_GUEST 6
+2
tools/testing/selftests/kvm/Makefile
··· 55 55 LIBKVM_s390x += lib/s390x/diag318_test_handler.c 56 56 LIBKVM_s390x += lib/s390x/processor.c 57 57 LIBKVM_s390x += lib/s390x/ucall.c 58 + LIBKVM_s390x += lib/s390x/facility.c 58 59 59 60 LIBKVM_riscv += lib/riscv/handlers.S 60 61 LIBKVM_riscv += lib/riscv/processor.c ··· 190 189 TEST_GEN_PROGS_s390x += s390x/tprot 191 190 TEST_GEN_PROGS_s390x += s390x/cmma_test 192 191 TEST_GEN_PROGS_s390x += s390x/debug_test 192 + TEST_GEN_PROGS_s390x += s390x/cpumodel_subfuncs_test 193 193 TEST_GEN_PROGS_s390x += s390x/shared_zeropage_test 194 194 TEST_GEN_PROGS_s390x += s390x/ucontrol_test 195 195 TEST_GEN_PROGS_s390x += demand_paging_test
+50
tools/testing/selftests/kvm/include/s390x/facility.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only */ 2 + /* 3 + * Copyright IBM Corp. 2024 4 + * 5 + * Authors: 6 + * Hariharan Mari <hari55@linux.ibm.com> 7 + * 8 + * Get the facility bits with the STFLE instruction 9 + */ 10 + 11 + #ifndef SELFTEST_KVM_FACILITY_H 12 + #define SELFTEST_KVM_FACILITY_H 13 + 14 + #include <linux/bitops.h> 15 + 16 + /* alt_stfle_fac_list[16] + stfle_fac_list[16] */ 17 + #define NB_STFL_DOUBLEWORDS 32 18 + 19 + extern uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; 20 + extern bool stfle_flag; 21 + 22 + static inline bool test_bit_inv(unsigned long nr, const unsigned long *ptr) 23 + { 24 + return test_bit(nr ^ (BITS_PER_LONG - 1), ptr); 25 + } 26 + 27 + static inline void stfle(uint64_t *fac, unsigned int nb_doublewords) 28 + { 29 + register unsigned long r0 asm("0") = nb_doublewords - 1; 30 + 31 + asm volatile(" .insn s,0xb2b00000,0(%1)\n" 32 + : "+d" (r0) 33 + : "a" (fac) 34 + : "memory", "cc"); 35 + } 36 + 37 + static inline void setup_facilities(void) 38 + { 39 + stfle(stfl_doublewords, NB_STFL_DOUBLEWORDS); 40 + stfle_flag = true; 41 + } 42 + 43 + static inline bool test_facility(int nr) 44 + { 45 + if (!stfle_flag) 46 + setup_facilities(); 47 + return test_bit_inv(nr, stfl_doublewords); 48 + } 49 + 50 + #endif
+6
tools/testing/selftests/kvm/include/s390x/processor.h
··· 32 32 barrier(); 33 33 } 34 34 35 + /* Get the instruction length */ 36 + static inline int insn_length(unsigned char code) 37 + { 38 + return ((((int)code + 64) >> 7) + 1) << 1; 39 + } 40 + 35 41 #endif
+14
tools/testing/selftests/kvm/lib/s390x/facility.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright IBM Corp. 2024 4 + * 5 + * Authors: 6 + * Hariharan Mari <hari55@linux.ibm.com> 7 + * 8 + * Contains the definition for the global variables to have the test facitlity feature. 9 + */ 10 + 11 + #include "facility.h" 12 + 13 + uint64_t stfl_doublewords[NB_STFL_DOUBLEWORDS]; 14 + bool stfle_flag;
+301
tools/testing/selftests/kvm/s390x/cpumodel_subfuncs_test.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * Copyright IBM Corp. 2024 4 + * 5 + * Authors: 6 + * Hariharan Mari <hari55@linux.ibm.com> 7 + * 8 + * The tests compare the result of the KVM ioctl for obtaining CPU subfunction data with those 9 + * from an ASM block performing the same CPU subfunction. Currently KVM doesn't mask instruction 10 + * query data reported via the CPU Model, allowing us to directly compare it with the data 11 + * acquired through executing the queries in the test. 12 + */ 13 + 14 + #include <stdio.h> 15 + #include <stdlib.h> 16 + #include <string.h> 17 + #include <sys/ioctl.h> 18 + #include "facility.h" 19 + 20 + #include "kvm_util.h" 21 + 22 + #define PLO_FUNCTION_MAX 256 23 + 24 + /* Query available CPU subfunctions */ 25 + struct kvm_s390_vm_cpu_subfunc cpu_subfunc; 26 + 27 + static void get_cpu_machine_subfuntions(struct kvm_vm *vm, 28 + struct kvm_s390_vm_cpu_subfunc *cpu_subfunc) 29 + { 30 + int r; 31 + 32 + r = __kvm_device_attr_get(vm->fd, KVM_S390_VM_CPU_MODEL, 33 + KVM_S390_VM_CPU_MACHINE_SUBFUNC, cpu_subfunc); 34 + 35 + TEST_ASSERT(!r, "Get cpu subfunctions failed r=%d errno=%d", r, errno); 36 + } 37 + 38 + static inline int plo_test_bit(unsigned char nr) 39 + { 40 + unsigned long function = nr | 0x100; 41 + int cc; 42 + 43 + asm volatile(" lgr 0,%[function]\n" 44 + /* Parameter registers are ignored for "test bit" */ 45 + " plo 0,0,0,0(0)\n" 46 + " ipm %0\n" 47 + " srl %0,28\n" 48 + : "=d" (cc) 49 + : [function] "d" (function) 50 + : "cc", "0"); 51 + return cc == 0; 52 + } 53 + 54 + /* Testing Perform Locked Operation (PLO) CPU subfunction's ASM block */ 55 + static void test_plo_asm_block(u8 (*query)[32]) 56 + { 57 + for (int i = 0; i < PLO_FUNCTION_MAX; ++i) { 58 + if (plo_test_bit(i)) 59 + (*query)[i >> 3] |= 0x80 >> (i & 7); 60 + } 61 + } 62 + 63 + /* Testing Crypto Compute Message Authentication Code (KMAC) CPU subfunction's ASM block */ 64 + static void test_kmac_asm_block(u8 (*query)[16]) 65 + { 66 + asm volatile(" la %%r1,%[query]\n" 67 + " xgr %%r0,%%r0\n" 68 + " .insn rre,0xb91e0000,0,2\n" 69 + : [query] "=R" (*query) 70 + : 71 + : "cc", "r0", "r1"); 72 + } 73 + 74 + /* Testing Crypto Cipher Message with Chaining (KMC) CPU subfunction's ASM block */ 75 + static void test_kmc_asm_block(u8 (*query)[16]) 76 + { 77 + asm volatile(" la %%r1,%[query]\n" 78 + " xgr %%r0,%%r0\n" 79 + " .insn rre,0xb92f0000,2,4\n" 80 + : [query] "=R" (*query) 81 + : 82 + : "cc", "r0", "r1"); 83 + } 84 + 85 + /* Testing Crypto Cipher Message (KM) CPU subfunction's ASM block */ 86 + static void test_km_asm_block(u8 (*query)[16]) 87 + { 88 + asm volatile(" la %%r1,%[query]\n" 89 + " xgr %%r0,%%r0\n" 90 + " .insn rre,0xb92e0000,2,4\n" 91 + : [query] "=R" (*query) 92 + : 93 + : "cc", "r0", "r1"); 94 + } 95 + 96 + /* Testing Crypto Compute Intermediate Message Digest (KIMD) CPU subfunction's ASM block */ 97 + static void test_kimd_asm_block(u8 (*query)[16]) 98 + { 99 + asm volatile(" la %%r1,%[query]\n" 100 + " xgr %%r0,%%r0\n" 101 + " .insn rre,0xb93e0000,0,2\n" 102 + : [query] "=R" (*query) 103 + : 104 + : "cc", "r0", "r1"); 105 + } 106 + 107 + /* Testing Crypto Compute Last Message Digest (KLMD) CPU subfunction's ASM block */ 108 + static void test_klmd_asm_block(u8 (*query)[16]) 109 + { 110 + asm volatile(" la %%r1,%[query]\n" 111 + " xgr %%r0,%%r0\n" 112 + " .insn rre,0xb93f0000,0,2\n" 113 + : [query] "=R" (*query) 114 + : 115 + : "cc", "r0", "r1"); 116 + } 117 + 118 + /* Testing Crypto Cipher Message with Counter (KMCTR) CPU subfunction's ASM block */ 119 + static void test_kmctr_asm_block(u8 (*query)[16]) 120 + { 121 + asm volatile(" la %%r1,%[query]\n" 122 + " xgr %%r0,%%r0\n" 123 + " .insn rrf,0xb92d0000,2,4,6,0\n" 124 + : [query] "=R" (*query) 125 + : 126 + : "cc", "r0", "r1"); 127 + } 128 + 129 + /* Testing Crypto Cipher Message with Cipher Feedback (KMF) CPU subfunction's ASM block */ 130 + static void test_kmf_asm_block(u8 (*query)[16]) 131 + { 132 + asm volatile(" la %%r1,%[query]\n" 133 + " xgr %%r0,%%r0\n" 134 + " .insn rre,0xb92a0000,2,4\n" 135 + : [query] "=R" (*query) 136 + : 137 + : "cc", "r0", "r1"); 138 + } 139 + 140 + /* Testing Crypto Cipher Message with Output Feedback (KMO) CPU subfunction's ASM block */ 141 + static void test_kmo_asm_block(u8 (*query)[16]) 142 + { 143 + asm volatile(" la %%r1,%[query]\n" 144 + " xgr %%r0,%%r0\n" 145 + " .insn rre,0xb92b0000,2,4\n" 146 + : [query] "=R" (*query) 147 + : 148 + : "cc", "r0", "r1"); 149 + } 150 + 151 + /* Testing Crypto Perform Cryptographic Computation (PCC) CPU subfunction's ASM block */ 152 + static void test_pcc_asm_block(u8 (*query)[16]) 153 + { 154 + asm volatile(" la %%r1,%[query]\n" 155 + " xgr %%r0,%%r0\n" 156 + " .insn rre,0xb92c0000,0,0\n" 157 + : [query] "=R" (*query) 158 + : 159 + : "cc", "r0", "r1"); 160 + } 161 + 162 + /* Testing Crypto Perform Random Number Operation (PRNO) CPU subfunction's ASM block */ 163 + static void test_prno_asm_block(u8 (*query)[16]) 164 + { 165 + asm volatile(" la %%r1,%[query]\n" 166 + " xgr %%r0,%%r0\n" 167 + " .insn rre,0xb93c0000,2,4\n" 168 + : [query] "=R" (*query) 169 + : 170 + : "cc", "r0", "r1"); 171 + } 172 + 173 + /* Testing Crypto Cipher Message with Authentication (KMA) CPU subfunction's ASM block */ 174 + static void test_kma_asm_block(u8 (*query)[16]) 175 + { 176 + asm volatile(" la %%r1,%[query]\n" 177 + " xgr %%r0,%%r0\n" 178 + " .insn rrf,0xb9290000,2,4,6,0\n" 179 + : [query] "=R" (*query) 180 + : 181 + : "cc", "r0", "r1"); 182 + } 183 + 184 + /* Testing Crypto Compute Digital Signature Authentication (KDSA) CPU subfunction's ASM block */ 185 + static void test_kdsa_asm_block(u8 (*query)[16]) 186 + { 187 + asm volatile(" la %%r1,%[query]\n" 188 + " xgr %%r0,%%r0\n" 189 + " .insn rre,0xb93a0000,0,2\n" 190 + : [query] "=R" (*query) 191 + : 192 + : "cc", "r0", "r1"); 193 + } 194 + 195 + /* Testing Sort Lists (SORTL) CPU subfunction's ASM block */ 196 + static void test_sortl_asm_block(u8 (*query)[32]) 197 + { 198 + asm volatile(" lghi 0,0\n" 199 + " la 1,%[query]\n" 200 + " .insn rre,0xb9380000,2,4\n" 201 + : [query] "=R" (*query) 202 + : 203 + : "cc", "0", "1"); 204 + } 205 + 206 + /* Testing Deflate Conversion Call (DFLTCC) CPU subfunction's ASM block */ 207 + static void test_dfltcc_asm_block(u8 (*query)[32]) 208 + { 209 + asm volatile(" lghi 0,0\n" 210 + " la 1,%[query]\n" 211 + " .insn rrf,0xb9390000,2,4,6,0\n" 212 + : [query] "=R" (*query) 213 + : 214 + : "cc", "0", "1"); 215 + } 216 + 217 + /* 218 + * Testing Perform Function with Concurrent Results (PFCR) 219 + * CPU subfunctions's ASM block 220 + */ 221 + static void test_pfcr_asm_block(u8 (*query)[16]) 222 + { 223 + asm volatile(" lghi 0,0\n" 224 + " .insn rsy,0xeb0000000016,0,0,%[query]\n" 225 + : [query] "=QS" (*query) 226 + : 227 + : "cc", "0"); 228 + } 229 + 230 + typedef void (*testfunc_t)(u8 (*array)[]); 231 + 232 + struct testdef { 233 + const char *subfunc_name; 234 + u8 *subfunc_array; 235 + size_t array_size; 236 + testfunc_t test; 237 + int facility_bit; 238 + } testlist[] = { 239 + /* 240 + * PLO was introduced in the very first 64-bit machine generation. 241 + * Hence it is assumed PLO is always installed in Z Arch. 242 + */ 243 + { "PLO", cpu_subfunc.plo, sizeof(cpu_subfunc.plo), test_plo_asm_block, 1 }, 244 + /* MSA - Facility bit 17 */ 245 + { "KMAC", cpu_subfunc.kmac, sizeof(cpu_subfunc.kmac), test_kmac_asm_block, 17 }, 246 + { "KMC", cpu_subfunc.kmc, sizeof(cpu_subfunc.kmc), test_kmc_asm_block, 17 }, 247 + { "KM", cpu_subfunc.km, sizeof(cpu_subfunc.km), test_km_asm_block, 17 }, 248 + { "KIMD", cpu_subfunc.kimd, sizeof(cpu_subfunc.kimd), test_kimd_asm_block, 17 }, 249 + { "KLMD", cpu_subfunc.klmd, sizeof(cpu_subfunc.klmd), test_klmd_asm_block, 17 }, 250 + /* MSA - Facility bit 77 */ 251 + { "KMCTR", cpu_subfunc.kmctr, sizeof(cpu_subfunc.kmctr), test_kmctr_asm_block, 77 }, 252 + { "KMF", cpu_subfunc.kmf, sizeof(cpu_subfunc.kmf), test_kmf_asm_block, 77 }, 253 + { "KMO", cpu_subfunc.kmo, sizeof(cpu_subfunc.kmo), test_kmo_asm_block, 77 }, 254 + { "PCC", cpu_subfunc.pcc, sizeof(cpu_subfunc.pcc), test_pcc_asm_block, 77 }, 255 + /* MSA5 - Facility bit 57 */ 256 + { "PPNO", cpu_subfunc.ppno, sizeof(cpu_subfunc.ppno), test_prno_asm_block, 57 }, 257 + /* MSA8 - Facility bit 146 */ 258 + { "KMA", cpu_subfunc.kma, sizeof(cpu_subfunc.kma), test_kma_asm_block, 146 }, 259 + /* MSA9 - Facility bit 155 */ 260 + { "KDSA", cpu_subfunc.kdsa, sizeof(cpu_subfunc.kdsa), test_kdsa_asm_block, 155 }, 261 + /* SORTL - Facility bit 150 */ 262 + { "SORTL", cpu_subfunc.sortl, sizeof(cpu_subfunc.sortl), test_sortl_asm_block, 150 }, 263 + /* DFLTCC - Facility bit 151 */ 264 + { "DFLTCC", cpu_subfunc.dfltcc, sizeof(cpu_subfunc.dfltcc), test_dfltcc_asm_block, 151 }, 265 + /* Concurrent-function facility - Facility bit 201 */ 266 + { "PFCR", cpu_subfunc.pfcr, sizeof(cpu_subfunc.pfcr), test_pfcr_asm_block, 201 }, 267 + }; 268 + 269 + int main(int argc, char *argv[]) 270 + { 271 + struct kvm_vm *vm; 272 + int idx; 273 + 274 + ksft_print_header(); 275 + 276 + vm = vm_create(1); 277 + 278 + memset(&cpu_subfunc, 0, sizeof(cpu_subfunc)); 279 + get_cpu_machine_subfuntions(vm, &cpu_subfunc); 280 + 281 + ksft_set_plan(ARRAY_SIZE(testlist)); 282 + for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) { 283 + if (test_facility(testlist[idx].facility_bit)) { 284 + u8 *array = malloc(testlist[idx].array_size); 285 + 286 + testlist[idx].test((u8 (*)[testlist[idx].array_size])array); 287 + 288 + TEST_ASSERT_EQ(memcmp(testlist[idx].subfunc_array, 289 + array, testlist[idx].array_size), 0); 290 + 291 + ksft_test_result_pass("%s\n", testlist[idx].subfunc_name); 292 + free(array); 293 + } else { 294 + ksft_test_result_skip("%s feature is not avaialable\n", 295 + testlist[idx].subfunc_name); 296 + } 297 + } 298 + 299 + kvm_vm_free(vm); 300 + ksft_finished(); 301 + }
+314 -8
tools/testing/selftests/kvm/s390x/ucontrol_test.c
··· 16 16 #include <linux/capability.h> 17 17 #include <linux/sizes.h> 18 18 19 + #define PGM_SEGMENT_TRANSLATION 0x10 20 + 19 21 #define VM_MEM_SIZE (4 * SZ_1M) 22 + #define VM_MEM_EXT_SIZE (2 * SZ_1M) 23 + #define VM_MEM_MAX_M ((VM_MEM_SIZE + VM_MEM_EXT_SIZE) / SZ_1M) 20 24 21 25 /* so directly declare capget to check caps without libcap */ 22 26 int capget(cap_user_header_t header, cap_user_data_t data); ··· 62 58 " j 0b\n" 63 59 ); 64 60 61 + /* Test program manipulating memory */ 62 + extern char test_mem_asm[]; 63 + asm("test_mem_asm:\n" 64 + "xgr %r0, %r0\n" 65 + 66 + "0:\n" 67 + " ahi %r0,1\n" 68 + " st %r1,0(%r5,%r6)\n" 69 + 70 + " xgr %r1,%r1\n" 71 + " l %r1,0(%r5,%r6)\n" 72 + " ahi %r0,1\n" 73 + " diag 0,0,0x44\n" 74 + 75 + " j 0b\n" 76 + ); 77 + 78 + /* Test program manipulating storage keys */ 79 + extern char test_skey_asm[]; 80 + asm("test_skey_asm:\n" 81 + "xgr %r0, %r0\n" 82 + 83 + "0:\n" 84 + " ahi %r0,1\n" 85 + " st %r1,0(%r5,%r6)\n" 86 + 87 + " iske %r1,%r6\n" 88 + " ahi %r0,1\n" 89 + " diag 0,0,0x44\n" 90 + 91 + " sske %r1,%r6\n" 92 + " xgr %r1,%r1\n" 93 + " iske %r1,%r6\n" 94 + " ahi %r0,1\n" 95 + " diag 0,0,0x44\n" 96 + 97 + " rrbe %r1,%r6\n" 98 + " iske %r1,%r6\n" 99 + " ahi %r0,1\n" 100 + " diag 0,0,0x44\n" 101 + 102 + " j 0b\n" 103 + ); 104 + 65 105 FIXTURE(uc_kvm) 66 106 { 67 107 struct kvm_s390_sie_block *sie_block; ··· 115 67 uintptr_t base_hva; 116 68 uintptr_t code_hva; 117 69 int kvm_run_size; 70 + vm_paddr_t pgd; 118 71 void *vm_mem; 119 72 int vcpu_fd; 120 73 int kvm_fd; ··· 165 116 self->base_gpa = 0; 166 117 self->code_gpa = self->base_gpa + (3 * SZ_1M); 167 118 168 - self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_SIZE); 119 + self->vm_mem = aligned_alloc(SZ_1M, VM_MEM_MAX_M * SZ_1M); 169 120 ASSERT_NE(NULL, self->vm_mem) TH_LOG("malloc failed %u", errno); 170 121 self->base_hva = (uintptr_t)self->vm_mem; 171 122 self->code_hva = self->base_hva - self->base_gpa + self->code_gpa; ··· 271 222 close(kvm_fd); 272 223 } 273 224 274 - /* verify SIEIC exit 225 + /* calculate host virtual addr from guest physical addr */ 226 + static void *gpa2hva(FIXTURE_DATA(uc_kvm) *self, u64 gpa) 227 + { 228 + return (void *)(self->base_hva - self->base_gpa + gpa); 229 + } 230 + 231 + /* map / make additional memory available */ 232 + static int uc_map_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length) 233 + { 234 + struct kvm_s390_ucas_mapping map = { 235 + .user_addr = (u64)gpa2hva(self, vcpu_addr), 236 + .vcpu_addr = vcpu_addr, 237 + .length = length, 238 + }; 239 + pr_info("ucas map %p %p 0x%llx", 240 + (void *)map.user_addr, (void *)map.vcpu_addr, map.length); 241 + return ioctl(self->vcpu_fd, KVM_S390_UCAS_MAP, &map); 242 + } 243 + 244 + /* unmap previously mapped memory */ 245 + static int uc_unmap_ext(FIXTURE_DATA(uc_kvm) *self, u64 vcpu_addr, u64 length) 246 + { 247 + struct kvm_s390_ucas_mapping map = { 248 + .user_addr = (u64)gpa2hva(self, vcpu_addr), 249 + .vcpu_addr = vcpu_addr, 250 + .length = length, 251 + }; 252 + pr_info("ucas unmap %p %p 0x%llx", 253 + (void *)map.user_addr, (void *)map.vcpu_addr, map.length); 254 + return ioctl(self->vcpu_fd, KVM_S390_UCAS_UNMAP, &map); 255 + } 256 + 257 + /* handle ucontrol exit by mapping the accessed segment */ 258 + static void uc_handle_exit_ucontrol(FIXTURE_DATA(uc_kvm) *self) 259 + { 260 + struct kvm_run *run = self->run; 261 + u64 seg_addr; 262 + int rc; 263 + 264 + TEST_ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); 265 + switch (run->s390_ucontrol.pgm_code) { 266 + case PGM_SEGMENT_TRANSLATION: 267 + seg_addr = run->s390_ucontrol.trans_exc_code & ~(SZ_1M - 1); 268 + pr_info("ucontrol pic segment translation 0x%llx, mapping segment 0x%lx\n", 269 + run->s390_ucontrol.trans_exc_code, seg_addr); 270 + /* map / make additional memory available */ 271 + rc = uc_map_ext(self, seg_addr, SZ_1M); 272 + TEST_ASSERT_EQ(0, rc); 273 + break; 274 + default: 275 + TEST_FAIL("UNEXPECTED PGM CODE %d", run->s390_ucontrol.pgm_code); 276 + } 277 + } 278 + 279 + /* 280 + * Handle the SIEIC exit 275 281 * * fail on codes not expected in the test cases 282 + * Returns if interception is handled / execution can be continued 276 283 */ 277 - static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) * self) 284 + static void uc_skey_enable(FIXTURE_DATA(uc_kvm) *self) 285 + { 286 + struct kvm_s390_sie_block *sie_block = self->sie_block; 287 + 288 + /* disable KSS */ 289 + sie_block->cpuflags &= ~CPUSTAT_KSS; 290 + /* disable skey inst interception */ 291 + sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 292 + } 293 + 294 + /* 295 + * Handle the instruction intercept 296 + * Returns if interception is handled / execution can be continued 297 + */ 298 + static bool uc_handle_insn_ic(FIXTURE_DATA(uc_kvm) *self) 299 + { 300 + struct kvm_s390_sie_block *sie_block = self->sie_block; 301 + int ilen = insn_length(sie_block->ipa >> 8); 302 + struct kvm_run *run = self->run; 303 + 304 + switch (run->s390_sieic.ipa) { 305 + case 0xB229: /* ISKE */ 306 + case 0xB22b: /* SSKE */ 307 + case 0xB22a: /* RRBE */ 308 + uc_skey_enable(self); 309 + 310 + /* rewind to reexecute intercepted instruction */ 311 + run->psw_addr = run->psw_addr - ilen; 312 + pr_info("rewind guest addr to 0x%.16llx\n", run->psw_addr); 313 + return true; 314 + default: 315 + return false; 316 + } 317 + } 318 + 319 + /* 320 + * Handle the SIEIC exit 321 + * * fail on codes not expected in the test cases 322 + * Returns if interception is handled / execution can be continued 323 + */ 324 + static bool uc_handle_sieic(FIXTURE_DATA(uc_kvm) *self) 278 325 { 279 326 struct kvm_s390_sie_block *sie_block = self->sie_block; 280 327 struct kvm_run *run = self->run; 281 328 282 329 /* check SIE interception code */ 283 - pr_info("sieic: 0x%.2x 0x%.4x 0x%.4x\n", 330 + pr_info("sieic: 0x%.2x 0x%.4x 0x%.8x\n", 284 331 run->s390_sieic.icptcode, 285 332 run->s390_sieic.ipa, 286 333 run->s390_sieic.ipb); ··· 384 239 case ICPT_INST: 385 240 /* end execution in caller on intercepted instruction */ 386 241 pr_info("sie instruction interception\n"); 387 - return false; 242 + return uc_handle_insn_ic(self); 243 + case ICPT_KSS: 244 + uc_skey_enable(self); 245 + return true; 388 246 case ICPT_OPEREXC: 389 247 /* operation exception */ 390 248 TEST_FAIL("sie exception on %.4x%.8x", sie_block->ipa, sie_block->ipb); ··· 398 250 } 399 251 400 252 /* verify VM state on exit */ 401 - static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) * self) 253 + static bool uc_handle_exit(FIXTURE_DATA(uc_kvm) *self) 402 254 { 403 255 struct kvm_run *run = self->run; 404 256 405 257 switch (run->exit_reason) { 258 + case KVM_EXIT_S390_UCONTROL: 259 + /** check program interruption code 260 + * handle page fault --> ucas map 261 + */ 262 + uc_handle_exit_ucontrol(self); 263 + break; 406 264 case KVM_EXIT_S390_SIEIC: 407 265 return uc_handle_sieic(self); 408 266 default: ··· 418 264 } 419 265 420 266 /* run the VM until interrupted */ 421 - static int uc_run_once(FIXTURE_DATA(uc_kvm) * self) 267 + static int uc_run_once(FIXTURE_DATA(uc_kvm) *self) 422 268 { 423 269 int rc; 424 270 ··· 429 275 return rc; 430 276 } 431 277 432 - static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) * self) 278 + static void uc_assert_diag44(FIXTURE_DATA(uc_kvm) *self) 433 279 { 434 280 struct kvm_s390_sie_block *sie_block = self->sie_block; 435 281 ··· 438 284 TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode); 439 285 TEST_ASSERT_EQ(0x8300, sie_block->ipa); 440 286 TEST_ASSERT_EQ(0x440000, sie_block->ipb); 287 + } 288 + 289 + TEST_F(uc_kvm, uc_no_user_region) 290 + { 291 + struct kvm_userspace_memory_region region = { 292 + .slot = 1, 293 + .guest_phys_addr = self->code_gpa, 294 + .memory_size = VM_MEM_EXT_SIZE, 295 + .userspace_addr = (uintptr_t)self->code_hva, 296 + }; 297 + struct kvm_userspace_memory_region2 region2 = { 298 + .slot = 1, 299 + .guest_phys_addr = self->code_gpa, 300 + .memory_size = VM_MEM_EXT_SIZE, 301 + .userspace_addr = (uintptr_t)self->code_hva, 302 + }; 303 + 304 + ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION, &region)); 305 + ASSERT_EQ(EINVAL, errno); 306 + 307 + ASSERT_EQ(-1, ioctl(self->vm_fd, KVM_SET_USER_MEMORY_REGION2, &region2)); 308 + ASSERT_EQ(EINVAL, errno); 309 + } 310 + 311 + TEST_F(uc_kvm, uc_map_unmap) 312 + { 313 + struct kvm_sync_regs *sync_regs = &self->run->s.regs; 314 + struct kvm_run *run = self->run; 315 + const u64 disp = 1; 316 + int rc; 317 + 318 + /* copy test_mem_asm to code_hva / code_gpa */ 319 + TH_LOG("copy code %p to vm mapped memory %p / %p", 320 + &test_mem_asm, (void *)self->code_hva, (void *)self->code_gpa); 321 + memcpy((void *)self->code_hva, &test_mem_asm, PAGE_SIZE); 322 + 323 + /* DAT disabled + 64 bit mode */ 324 + run->psw_mask = 0x0000000180000000ULL; 325 + run->psw_addr = self->code_gpa; 326 + 327 + /* set register content for test_mem_asm to access not mapped memory*/ 328 + sync_regs->gprs[1] = 0x55; 329 + sync_regs->gprs[5] = self->base_gpa; 330 + sync_regs->gprs[6] = VM_MEM_SIZE + disp; 331 + run->kvm_dirty_regs |= KVM_SYNC_GPRS; 332 + 333 + /* run and expect to fail with ucontrol pic segment translation */ 334 + ASSERT_EQ(0, uc_run_once(self)); 335 + ASSERT_EQ(1, sync_regs->gprs[0]); 336 + ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); 337 + 338 + ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); 339 + ASSERT_EQ(self->base_gpa + VM_MEM_SIZE, run->s390_ucontrol.trans_exc_code); 340 + 341 + /* fail to map memory with not segment aligned address */ 342 + rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE + disp, VM_MEM_EXT_SIZE); 343 + ASSERT_GT(0, rc) 344 + TH_LOG("ucas map for non segment address should fail but didn't; " 345 + "result %d not expected, %s", rc, strerror(errno)); 346 + 347 + /* map / make additional memory available */ 348 + rc = uc_map_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE); 349 + ASSERT_EQ(0, rc) 350 + TH_LOG("ucas map result %d not expected, %s", rc, strerror(errno)); 351 + ASSERT_EQ(0, uc_run_once(self)); 352 + ASSERT_EQ(false, uc_handle_exit(self)); 353 + uc_assert_diag44(self); 354 + 355 + /* assert registers and memory are in expected state */ 356 + ASSERT_EQ(2, sync_regs->gprs[0]); 357 + ASSERT_EQ(0x55, sync_regs->gprs[1]); 358 + ASSERT_EQ(0x55, *(u32 *)gpa2hva(self, self->base_gpa + VM_MEM_SIZE + disp)); 359 + 360 + /* unmap and run loop again */ 361 + rc = uc_unmap_ext(self, self->base_gpa + VM_MEM_SIZE, VM_MEM_EXT_SIZE); 362 + ASSERT_EQ(0, rc) 363 + TH_LOG("ucas unmap result %d not expected, %s", rc, strerror(errno)); 364 + ASSERT_EQ(0, uc_run_once(self)); 365 + ASSERT_EQ(3, sync_regs->gprs[0]); 366 + ASSERT_EQ(KVM_EXIT_S390_UCONTROL, run->exit_reason); 367 + ASSERT_EQ(PGM_SEGMENT_TRANSLATION, run->s390_ucontrol.pgm_code); 368 + /* handle ucontrol exit and remap memory after previous map and unmap */ 369 + ASSERT_EQ(true, uc_handle_exit(self)); 441 370 } 442 371 443 372 TEST_F(uc_kvm, uc_gprs) ··· 564 327 ASSERT_EQ(0, ioctl(self->vcpu_fd, KVM_GET_REGS, &regs)); 565 328 ASSERT_EQ(1, regs.gprs[0]); 566 329 ASSERT_EQ(1, sync_regs->gprs[0]); 330 + } 331 + 332 + TEST_F(uc_kvm, uc_skey) 333 + { 334 + struct kvm_s390_sie_block *sie_block = self->sie_block; 335 + struct kvm_sync_regs *sync_regs = &self->run->s.regs; 336 + u64 test_vaddr = VM_MEM_SIZE - (SZ_1M / 2); 337 + struct kvm_run *run = self->run; 338 + const u8 skeyvalue = 0x34; 339 + 340 + /* copy test_skey_asm to code_hva / code_gpa */ 341 + TH_LOG("copy code %p to vm mapped memory %p / %p", 342 + &test_skey_asm, (void *)self->code_hva, (void *)self->code_gpa); 343 + memcpy((void *)self->code_hva, &test_skey_asm, PAGE_SIZE); 344 + 345 + /* set register content for test_skey_asm to access not mapped memory */ 346 + sync_regs->gprs[1] = skeyvalue; 347 + sync_regs->gprs[5] = self->base_gpa; 348 + sync_regs->gprs[6] = test_vaddr; 349 + run->kvm_dirty_regs |= KVM_SYNC_GPRS; 350 + 351 + /* DAT disabled + 64 bit mode */ 352 + run->psw_mask = 0x0000000180000000ULL; 353 + run->psw_addr = self->code_gpa; 354 + 355 + ASSERT_EQ(0, uc_run_once(self)); 356 + ASSERT_EQ(true, uc_handle_exit(self)); 357 + ASSERT_EQ(1, sync_regs->gprs[0]); 358 + 359 + /* ISKE */ 360 + ASSERT_EQ(0, uc_run_once(self)); 361 + 362 + /* 363 + * Bail out and skip the test after uc_skey_enable was executed but iske 364 + * is still intercepted. Instructions are not handled by the kernel. 365 + * Thus there is no need to test this here. 366 + */ 367 + TEST_ASSERT_EQ(0, sie_block->cpuflags & CPUSTAT_KSS); 368 + TEST_ASSERT_EQ(0, sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)); 369 + TEST_ASSERT_EQ(KVM_EXIT_S390_SIEIC, self->run->exit_reason); 370 + TEST_ASSERT_EQ(ICPT_INST, sie_block->icptcode); 371 + TEST_REQUIRE(sie_block->ipa != 0xb229); 372 + 373 + /* ISKE contd. */ 374 + ASSERT_EQ(false, uc_handle_exit(self)); 375 + ASSERT_EQ(2, sync_regs->gprs[0]); 376 + /* assert initial skey (ACC = 0, R & C = 1) */ 377 + ASSERT_EQ(0x06, sync_regs->gprs[1]); 378 + uc_assert_diag44(self); 379 + 380 + /* SSKE + ISKE */ 381 + sync_regs->gprs[1] = skeyvalue; 382 + run->kvm_dirty_regs |= KVM_SYNC_GPRS; 383 + ASSERT_EQ(0, uc_run_once(self)); 384 + ASSERT_EQ(false, uc_handle_exit(self)); 385 + ASSERT_EQ(3, sync_regs->gprs[0]); 386 + ASSERT_EQ(skeyvalue, sync_regs->gprs[1]); 387 + uc_assert_diag44(self); 388 + 389 + /* RRBE + ISKE */ 390 + sync_regs->gprs[1] = skeyvalue; 391 + run->kvm_dirty_regs |= KVM_SYNC_GPRS; 392 + ASSERT_EQ(0, uc_run_once(self)); 393 + ASSERT_EQ(false, uc_handle_exit(self)); 394 + ASSERT_EQ(4, sync_regs->gprs[0]); 395 + /* assert R reset but rest of skey unchanged */ 396 + ASSERT_EQ(skeyvalue & 0xfa, sync_regs->gprs[1]); 397 + ASSERT_EQ(0, sync_regs->gprs[1] & 0x04); 398 + uc_assert_diag44(self); 567 399 } 568 400 569 401 TEST_HARNESS_MAIN