Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Use accessors for VCPU registers

Introduce accessor generator macros for Book3S HV VCPU registers. Use
the accessor functions to replace direct accesses to this registers.

This will be important later for Nested APIv2 support which requires
additional functionality for accessing and modifying VCPU state.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-7-jniethe5@gmail.com

authored by

Jordan Niethe and committed by
Michael Ellerman
ebc88ea7 c8ae9b3c

+139 -72
+3 -2
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 15 15 16 16 #include <asm/kvm_ppc.h> 17 17 #include <asm/kvm_book3s.h> 18 + #include "book3s_hv.h" 18 19 #include <asm/page.h> 19 20 #include <asm/mmu.h> 20 21 #include <asm/pgalloc.h> ··· 295 294 } else { 296 295 if (!(pte & _PAGE_PRIVILEGED)) { 297 296 /* Check AMR/IAMR to see if strict mode is in force */ 298 - if (vcpu->arch.amr & (1ul << 62)) 297 + if (kvmppc_get_amr_hv(vcpu) & (1ul << 62)) 299 298 gpte->may_read = 0; 300 - if (vcpu->arch.amr & (1ul << 63)) 299 + if (kvmppc_get_amr_hv(vcpu) & (1ul << 63)) 301 300 gpte->may_write = 0; 302 301 if (vcpu->arch.iamr & (1ul << 62)) 303 302 gpte->may_execute = 0;
+78 -70
arch/powerpc/kvm/book3s_hv.c
··· 868 868 /* Guests can't breakpoint the hypervisor */ 869 869 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) 870 870 return H_P3; 871 - vcpu->arch.ciabr = value1; 871 + kvmppc_set_ciabr_hv(vcpu, value1); 872 872 return H_SUCCESS; 873 873 case H_SET_MODE_RESOURCE_SET_DAWR0: 874 874 if (!kvmppc_power8_compatible(vcpu)) ··· 879 879 return H_UNSUPPORTED_FLAG_START; 880 880 if (value2 & DABRX_HYP) 881 881 return H_P4; 882 - vcpu->arch.dawr0 = value1; 883 - vcpu->arch.dawrx0 = value2; 882 + kvmppc_set_dawr0_hv(vcpu, value1); 883 + kvmppc_set_dawrx0_hv(vcpu, value2); 884 884 return H_SUCCESS; 885 885 case H_SET_MODE_RESOURCE_SET_DAWR1: 886 886 if (!kvmppc_power8_compatible(vcpu)) ··· 895 895 return H_UNSUPPORTED_FLAG_START; 896 896 if (value2 & DABRX_HYP) 897 897 return H_P4; 898 - vcpu->arch.dawr1 = value1; 899 - vcpu->arch.dawrx1 = value2; 898 + kvmppc_set_dawr1_hv(vcpu, value1); 899 + kvmppc_set_dawrx1_hv(vcpu, value2); 900 900 return H_SUCCESS; 901 901 case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: 902 902 /* ··· 1548 1548 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) 1549 1549 return EMULATE_FAIL; 1550 1550 1551 - vcpu->arch.hfscr |= HFSCR_PM; 1551 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PM); 1552 1552 1553 1553 return RESUME_GUEST; 1554 1554 } ··· 1558 1558 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) 1559 1559 return EMULATE_FAIL; 1560 1560 1561 - vcpu->arch.hfscr |= HFSCR_EBB; 1561 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_EBB); 1562 1562 1563 1563 return RESUME_GUEST; 1564 1564 } ··· 1568 1568 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) 1569 1569 return EMULATE_FAIL; 1570 1570 1571 - vcpu->arch.hfscr |= HFSCR_TM; 1571 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM); 1572 1572 1573 1573 return RESUME_GUEST; 1574 1574 } ··· 1867 1867 * Otherwise, we just generate a program interrupt to the guest. 1868 1868 */ 1869 1869 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: { 1870 - u64 cause = vcpu->arch.hfscr >> 56; 1870 + u64 cause = kvmppc_get_hfscr_hv(vcpu) >> 56; 1871 1871 1872 1872 r = EMULATE_FAIL; 1873 1873 if (cpu_has_feature(CPU_FTR_ARCH_300)) { ··· 2211 2211 *val = get_reg_val(id, vcpu->arch.dabrx); 2212 2212 break; 2213 2213 case KVM_REG_PPC_DSCR: 2214 - *val = get_reg_val(id, vcpu->arch.dscr); 2214 + *val = get_reg_val(id, kvmppc_get_dscr_hv(vcpu)); 2215 2215 break; 2216 2216 case KVM_REG_PPC_PURR: 2217 - *val = get_reg_val(id, vcpu->arch.purr); 2217 + *val = get_reg_val(id, kvmppc_get_purr_hv(vcpu)); 2218 2218 break; 2219 2219 case KVM_REG_PPC_SPURR: 2220 - *val = get_reg_val(id, vcpu->arch.spurr); 2220 + *val = get_reg_val(id, kvmppc_get_spurr_hv(vcpu)); 2221 2221 break; 2222 2222 case KVM_REG_PPC_AMR: 2223 - *val = get_reg_val(id, vcpu->arch.amr); 2223 + *val = get_reg_val(id, kvmppc_get_amr_hv(vcpu)); 2224 2224 break; 2225 2225 case KVM_REG_PPC_UAMOR: 2226 - *val = get_reg_val(id, vcpu->arch.uamor); 2226 + *val = get_reg_val(id, kvmppc_get_uamor_hv(vcpu)); 2227 2227 break; 2228 2228 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: 2229 2229 i = id - KVM_REG_PPC_MMCR0; 2230 - *val = get_reg_val(id, vcpu->arch.mmcr[i]); 2230 + *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, i)); 2231 2231 break; 2232 2232 case KVM_REG_PPC_MMCR2: 2233 - *val = get_reg_val(id, vcpu->arch.mmcr[2]); 2233 + *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 2)); 2234 2234 break; 2235 2235 case KVM_REG_PPC_MMCRA: 2236 - *val = get_reg_val(id, vcpu->arch.mmcra); 2236 + *val = get_reg_val(id, kvmppc_get_mmcra_hv(vcpu)); 2237 2237 break; 2238 2238 case KVM_REG_PPC_MMCRS: 2239 2239 *val = get_reg_val(id, vcpu->arch.mmcrs); 2240 2240 break; 2241 2241 case KVM_REG_PPC_MMCR3: 2242 - *val = get_reg_val(id, vcpu->arch.mmcr[3]); 2242 + *val = get_reg_val(id, kvmppc_get_mmcr_hv(vcpu, 3)); 2243 2243 break; 2244 2244 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: 2245 2245 i = id - KVM_REG_PPC_PMC1; 2246 - *val = get_reg_val(id, vcpu->arch.pmc[i]); 2246 + *val = get_reg_val(id, kvmppc_get_pmc_hv(vcpu, i)); 2247 2247 break; 2248 2248 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 2249 2249 i = id - KVM_REG_PPC_SPMC1; 2250 2250 *val = get_reg_val(id, vcpu->arch.spmc[i]); 2251 2251 break; 2252 2252 case KVM_REG_PPC_SIAR: 2253 - *val = get_reg_val(id, vcpu->arch.siar); 2253 + *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); 2254 2254 break; 2255 2255 case KVM_REG_PPC_SDAR: 2256 - *val = get_reg_val(id, vcpu->arch.sdar); 2256 + *val = get_reg_val(id, kvmppc_get_siar_hv(vcpu)); 2257 2257 break; 2258 2258 case KVM_REG_PPC_SIER: 2259 - *val = get_reg_val(id, vcpu->arch.sier[0]); 2259 + *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 0)); 2260 2260 break; 2261 2261 case KVM_REG_PPC_SIER2: 2262 - *val = get_reg_val(id, vcpu->arch.sier[1]); 2262 + *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 1)); 2263 2263 break; 2264 2264 case KVM_REG_PPC_SIER3: 2265 - *val = get_reg_val(id, vcpu->arch.sier[2]); 2265 + *val = get_reg_val(id, kvmppc_get_sier_hv(vcpu, 2)); 2266 2266 break; 2267 2267 case KVM_REG_PPC_IAMR: 2268 - *val = get_reg_val(id, vcpu->arch.iamr); 2268 + *val = get_reg_val(id, kvmppc_get_iamr_hv(vcpu)); 2269 2269 break; 2270 2270 case KVM_REG_PPC_PSPB: 2271 - *val = get_reg_val(id, vcpu->arch.pspb); 2271 + *val = get_reg_val(id, kvmppc_get_pspb_hv(vcpu)); 2272 2272 break; 2273 2273 case KVM_REG_PPC_DPDES: 2274 2274 /* ··· 2286 2286 *val = get_reg_val(id, kvmppc_get_vtb(vcpu)); 2287 2287 break; 2288 2288 case KVM_REG_PPC_DAWR: 2289 - *val = get_reg_val(id, vcpu->arch.dawr0); 2289 + *val = get_reg_val(id, kvmppc_get_dawr0_hv(vcpu)); 2290 2290 break; 2291 2291 case KVM_REG_PPC_DAWRX: 2292 - *val = get_reg_val(id, vcpu->arch.dawrx0); 2292 + *val = get_reg_val(id, kvmppc_get_dawrx0_hv(vcpu)); 2293 2293 break; 2294 2294 case KVM_REG_PPC_DAWR1: 2295 - *val = get_reg_val(id, vcpu->arch.dawr1); 2295 + *val = get_reg_val(id, kvmppc_get_dawr1_hv(vcpu)); 2296 2296 break; 2297 2297 case KVM_REG_PPC_DAWRX1: 2298 - *val = get_reg_val(id, vcpu->arch.dawrx1); 2298 + *val = get_reg_val(id, kvmppc_get_dawrx1_hv(vcpu)); 2299 2299 break; 2300 2300 case KVM_REG_PPC_CIABR: 2301 - *val = get_reg_val(id, vcpu->arch.ciabr); 2301 + *val = get_reg_val(id, kvmppc_get_ciabr_hv(vcpu)); 2302 2302 break; 2303 2303 case KVM_REG_PPC_CSIGR: 2304 2304 *val = get_reg_val(id, vcpu->arch.csigr); ··· 2316 2316 *val = get_reg_val(id, vcpu->arch.acop); 2317 2317 break; 2318 2318 case KVM_REG_PPC_WORT: 2319 - *val = get_reg_val(id, vcpu->arch.wort); 2319 + *val = get_reg_val(id, kvmppc_get_wort_hv(vcpu)); 2320 2320 break; 2321 2321 case KVM_REG_PPC_TIDR: 2322 2322 *val = get_reg_val(id, vcpu->arch.tid); ··· 2349 2349 *val = get_reg_val(id, kvmppc_get_lpcr(vcpu)); 2350 2350 break; 2351 2351 case KVM_REG_PPC_PPR: 2352 - *val = get_reg_val(id, vcpu->arch.ppr); 2352 + *val = get_reg_val(id, kvmppc_get_ppr_hv(vcpu)); 2353 2353 break; 2354 2354 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2355 2355 case KVM_REG_PPC_TFHAR: ··· 2429 2429 case KVM_REG_PPC_PTCR: 2430 2430 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); 2431 2431 break; 2432 + case KVM_REG_PPC_FSCR: 2433 + *val = get_reg_val(id, kvmppc_get_fscr_hv(vcpu)); 2434 + break; 2432 2435 default: 2433 2436 r = -EINVAL; 2434 2437 break; ··· 2460 2457 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; 2461 2458 break; 2462 2459 case KVM_REG_PPC_DSCR: 2463 - vcpu->arch.dscr = set_reg_val(id, *val); 2460 + kvmppc_set_dscr_hv(vcpu, set_reg_val(id, *val)); 2464 2461 break; 2465 2462 case KVM_REG_PPC_PURR: 2466 - vcpu->arch.purr = set_reg_val(id, *val); 2463 + kvmppc_set_purr_hv(vcpu, set_reg_val(id, *val)); 2467 2464 break; 2468 2465 case KVM_REG_PPC_SPURR: 2469 - vcpu->arch.spurr = set_reg_val(id, *val); 2466 + kvmppc_set_spurr_hv(vcpu, set_reg_val(id, *val)); 2470 2467 break; 2471 2468 case KVM_REG_PPC_AMR: 2472 - vcpu->arch.amr = set_reg_val(id, *val); 2469 + kvmppc_set_amr_hv(vcpu, set_reg_val(id, *val)); 2473 2470 break; 2474 2471 case KVM_REG_PPC_UAMOR: 2475 - vcpu->arch.uamor = set_reg_val(id, *val); 2472 + kvmppc_set_uamor_hv(vcpu, set_reg_val(id, *val)); 2476 2473 break; 2477 2474 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: 2478 2475 i = id - KVM_REG_PPC_MMCR0; 2479 - vcpu->arch.mmcr[i] = set_reg_val(id, *val); 2476 + kvmppc_set_mmcr_hv(vcpu, i, set_reg_val(id, *val)); 2480 2477 break; 2481 2478 case KVM_REG_PPC_MMCR2: 2482 - vcpu->arch.mmcr[2] = set_reg_val(id, *val); 2479 + kvmppc_set_mmcr_hv(vcpu, 2, set_reg_val(id, *val)); 2483 2480 break; 2484 2481 case KVM_REG_PPC_MMCRA: 2485 - vcpu->arch.mmcra = set_reg_val(id, *val); 2482 + kvmppc_set_mmcra_hv(vcpu, set_reg_val(id, *val)); 2486 2483 break; 2487 2484 case KVM_REG_PPC_MMCRS: 2488 2485 vcpu->arch.mmcrs = set_reg_val(id, *val); ··· 2492 2489 break; 2493 2490 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: 2494 2491 i = id - KVM_REG_PPC_PMC1; 2495 - vcpu->arch.pmc[i] = set_reg_val(id, *val); 2492 + kvmppc_set_pmc_hv(vcpu, i, set_reg_val(id, *val)); 2496 2493 break; 2497 2494 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: 2498 2495 i = id - KVM_REG_PPC_SPMC1; 2499 2496 vcpu->arch.spmc[i] = set_reg_val(id, *val); 2500 2497 break; 2501 2498 case KVM_REG_PPC_SIAR: 2502 - vcpu->arch.siar = set_reg_val(id, *val); 2499 + kvmppc_set_siar_hv(vcpu, set_reg_val(id, *val)); 2503 2500 break; 2504 2501 case KVM_REG_PPC_SDAR: 2505 - vcpu->arch.sdar = set_reg_val(id, *val); 2502 + kvmppc_set_sdar_hv(vcpu, set_reg_val(id, *val)); 2506 2503 break; 2507 2504 case KVM_REG_PPC_SIER: 2508 - vcpu->arch.sier[0] = set_reg_val(id, *val); 2505 + kvmppc_set_sier_hv(vcpu, 0, set_reg_val(id, *val)); 2509 2506 break; 2510 2507 case KVM_REG_PPC_SIER2: 2511 - vcpu->arch.sier[1] = set_reg_val(id, *val); 2508 + kvmppc_set_sier_hv(vcpu, 1, set_reg_val(id, *val)); 2512 2509 break; 2513 2510 case KVM_REG_PPC_SIER3: 2514 - vcpu->arch.sier[2] = set_reg_val(id, *val); 2511 + kvmppc_set_sier_hv(vcpu, 2, set_reg_val(id, *val)); 2515 2512 break; 2516 2513 case KVM_REG_PPC_IAMR: 2517 - vcpu->arch.iamr = set_reg_val(id, *val); 2514 + kvmppc_set_iamr_hv(vcpu, set_reg_val(id, *val)); 2518 2515 break; 2519 2516 case KVM_REG_PPC_PSPB: 2520 - vcpu->arch.pspb = set_reg_val(id, *val); 2517 + kvmppc_set_pspb_hv(vcpu, set_reg_val(id, *val)); 2521 2518 break; 2522 2519 case KVM_REG_PPC_DPDES: 2523 2520 if (cpu_has_feature(CPU_FTR_ARCH_300)) ··· 2529 2526 kvmppc_set_vtb(vcpu, set_reg_val(id, *val)); 2530 2527 break; 2531 2528 case KVM_REG_PPC_DAWR: 2532 - vcpu->arch.dawr0 = set_reg_val(id, *val); 2529 + kvmppc_set_dawr0_hv(vcpu, set_reg_val(id, *val)); 2533 2530 break; 2534 2531 case KVM_REG_PPC_DAWRX: 2535 - vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; 2532 + kvmppc_set_dawrx0_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); 2536 2533 break; 2537 2534 case KVM_REG_PPC_DAWR1: 2538 - vcpu->arch.dawr1 = set_reg_val(id, *val); 2535 + kvmppc_set_dawr1_hv(vcpu, set_reg_val(id, *val)); 2539 2536 break; 2540 2537 case KVM_REG_PPC_DAWRX1: 2541 - vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; 2538 + kvmppc_set_dawrx1_hv(vcpu, set_reg_val(id, *val) & ~DAWRX_HYP); 2542 2539 break; 2543 2540 case KVM_REG_PPC_CIABR: 2544 - vcpu->arch.ciabr = set_reg_val(id, *val); 2541 + kvmppc_set_ciabr_hv(vcpu, set_reg_val(id, *val)); 2545 2542 /* Don't allow setting breakpoints in hypervisor code */ 2546 - if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) 2547 - vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ 2543 + if ((kvmppc_get_ciabr_hv(vcpu) & CIABR_PRIV) == CIABR_PRIV_HYPER) 2544 + kvmppc_set_ciabr_hv(vcpu, kvmppc_get_ciabr_hv(vcpu) & ~CIABR_PRIV); 2548 2545 break; 2549 2546 case KVM_REG_PPC_CSIGR: 2550 2547 vcpu->arch.csigr = set_reg_val(id, *val); ··· 2562 2559 vcpu->arch.acop = set_reg_val(id, *val); 2563 2560 break; 2564 2561 case KVM_REG_PPC_WORT: 2565 - vcpu->arch.wort = set_reg_val(id, *val); 2562 + kvmppc_set_wort_hv(vcpu, set_reg_val(id, *val)); 2566 2563 break; 2567 2564 case KVM_REG_PPC_TIDR: 2568 2565 vcpu->arch.tid = set_reg_val(id, *val); ··· 2623 2620 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); 2624 2621 break; 2625 2622 case KVM_REG_PPC_PPR: 2626 - vcpu->arch.ppr = set_reg_val(id, *val); 2623 + kvmppc_set_ppr_hv(vcpu, set_reg_val(id, *val)); 2627 2624 break; 2628 2625 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2629 2626 case KVM_REG_PPC_TFHAR: ··· 2706 2703 break; 2707 2704 case KVM_REG_PPC_PTCR: 2708 2705 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); 2706 + break; 2707 + case KVM_REG_PPC_FSCR: 2708 + kvmppc_set_fscr_hv(vcpu, set_reg_val(id, *val)); 2709 2709 break; 2710 2710 default: 2711 2711 r = -EINVAL; ··· 2927 2921 vcpu->arch.shared_big_endian = false; 2928 2922 #endif 2929 2923 #endif 2930 - vcpu->arch.mmcr[0] = MMCR0_FC; 2924 + kvmppc_set_mmcr_hv(vcpu, 0, MMCR0_FC); 2925 + 2931 2926 if (cpu_has_feature(CPU_FTR_ARCH_31)) { 2932 - vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT; 2933 - vcpu->arch.mmcra = MMCRA_BHRB_DISABLE; 2927 + kvmppc_set_mmcr_hv(vcpu, 0, kvmppc_get_mmcr_hv(vcpu, 0) | MMCR0_PMCCEXT); 2928 + kvmppc_set_mmcra_hv(vcpu, MMCRA_BHRB_DISABLE); 2934 2929 } 2935 2930 2936 - vcpu->arch.ctrl = CTRL_RUNLATCH; 2931 + kvmppc_set_ctrl_hv(vcpu, CTRL_RUNLATCH); 2937 2932 /* default to host PVR, since we can't spoof it */ 2938 2933 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); 2939 2934 spin_lock_init(&vcpu->arch.vpa_update_lock); ··· 2950 2943 * don't set the HFSCR_MSGP bit, and that causes those instructions 2951 2944 * to trap and then we emulate them. 2952 2945 */ 2953 - vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | 2954 - HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP; 2946 + kvmppc_set_hfscr_hv(vcpu, HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | 2947 + HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP); 2955 2948 2956 2949 /* On POWER10 and later, allow prefixed instructions */ 2957 2950 if (cpu_has_feature(CPU_FTR_ARCH_31)) 2958 - vcpu->arch.hfscr |= HFSCR_PREFIX; 2951 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_PREFIX); 2959 2952 2960 2953 if (cpu_has_feature(CPU_FTR_HVMODE)) { 2961 - vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); 2954 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & mfspr(SPRN_HFSCR)); 2955 + 2962 2956 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2963 2957 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) 2964 - vcpu->arch.hfscr |= HFSCR_TM; 2958 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) | HFSCR_TM); 2965 2959 #endif 2966 2960 } 2967 2961 if (cpu_has_feature(CPU_FTR_TM_COMP)) 2968 2962 vcpu->arch.hfscr |= HFSCR_TM; 2969 2963 2970 - vcpu->arch.hfscr_permitted = vcpu->arch.hfscr; 2964 + vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu); 2971 2965 2972 2966 /* 2973 2967 * PM, EBB, TM are demand-faulted so start with it clear. 2974 2968 */ 2975 - vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM); 2969 + kvmppc_set_hfscr_hv(vcpu, kvmppc_get_hfscr_hv(vcpu) & ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM)); 2976 2970 2977 2971 kvmppc_mmu_book3s_hv_init(vcpu); 2978 2972 ··· 4856 4848 msr |= MSR_VSX; 4857 4849 if ((cpu_has_feature(CPU_FTR_TM) || 4858 4850 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) && 4859 - (vcpu->arch.hfscr & HFSCR_TM)) 4851 + (kvmppc_get_hfscr_hv(vcpu) & HFSCR_TM)) 4860 4852 msr |= MSR_TM; 4861 4853 msr = msr_check_and_set(msr); 4862 4854
+58
arch/powerpc/kvm/book3s_hv.h
··· 50 50 #define start_timing(vcpu, next) do {} while (0) 51 51 #define end_timing(vcpu) do {} while (0) 52 52 #endif 53 + 54 + #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \ 55 + static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \ 56 + { \ 57 + vcpu->arch.reg = val; \ 58 + } 59 + 60 + #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \ 61 + static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \ 62 + { \ 63 + return vcpu->arch.reg; \ 64 + } 65 + 66 + #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size) \ 67 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size) \ 68 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size) \ 69 + 70 + #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \ 71 + static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \ 72 + { \ 73 + vcpu->arch.reg[i] = val; \ 74 + } 75 + 76 + #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \ 77 + static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \ 78 + { \ 79 + return vcpu->arch.reg[i]; \ 80 + } 81 + 82 + #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size) \ 83 + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size) \ 84 + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size) \ 85 + 86 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64) 87 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64) 88 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64) 89 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64) 90 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64) 91 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64) 92 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64) 93 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64) 94 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64) 95 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64) 96 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64) 97 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64) 98 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64) 99 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64) 100 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64) 101 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64) 102 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64) 103 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64) 104 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64) 105 + 106 + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64) 107 + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64) 108 + KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32) 109 + 110 + KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32)