Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v6.7-rc2 694 lines 24 kB view raw
1/* SPDX-License-Identifier: GPL-2.0-only */ 2/* 3 * 4 * Copyright SUSE Linux Products GmbH 2009 5 * 6 * Authors: Alexander Graf <agraf@suse.de> 7 */ 8 9#ifndef __ASM_KVM_BOOK3S_H__ 10#define __ASM_KVM_BOOK3S_H__ 11 12#include <linux/types.h> 13#include <linux/kvm_host.h> 14#include <asm/kvm_book3s_asm.h> 15#include <asm/guest-state-buffer.h> 16 17struct kvmppc_bat { 18 u64 raw; 19 u32 bepi; 20 u32 bepi_mask; 21 u32 brpn; 22 u8 wimg; 23 u8 pp; 24 bool vs : 1; 25 bool vp : 1; 26}; 27 28struct kvmppc_sid_map { 29 u64 guest_vsid; 30 u64 guest_esid; 31 u64 host_vsid; 32 bool valid : 1; 33}; 34 35#define SID_MAP_BITS 9 36#define SID_MAP_NUM (1 << SID_MAP_BITS) 37#define SID_MAP_MASK (SID_MAP_NUM - 1) 38 39#ifdef CONFIG_PPC_BOOK3S_64 40#define SID_CONTEXTS 1 41#else 42#define SID_CONTEXTS 128 43#define VSID_POOL_SIZE (SID_CONTEXTS * 16) 44#endif 45 46struct hpte_cache { 47 struct hlist_node list_pte; 48 struct hlist_node list_pte_long; 49 struct hlist_node list_vpte; 50 struct hlist_node list_vpte_long; 51#ifdef CONFIG_PPC_BOOK3S_64 52 struct hlist_node list_vpte_64k; 53#endif 54 struct rcu_head rcu_head; 55 u64 host_vpn; 56 u64 pfn; 57 ulong slot; 58 struct kvmppc_pte pte; 59 int pagesize; 60}; 61 62/* 63 * Struct for a virtual core. 64 * Note: entry_exit_map combines a bitmap of threads that have entered 65 * in the bottom 8 bits and a bitmap of threads that have exited in the 66 * next 8 bits. This is so that we can atomically set the entry bit 67 * iff the exit map is 0 without taking a lock. 68 */ 69struct kvmppc_vcore { 70 int n_runnable; 71 int num_threads; 72 int entry_exit_map; 73 int napping_threads; 74 int first_vcpuid; 75 u16 pcpu; 76 u16 last_cpu; 77 u8 vcore_state; 78 u8 in_guest; 79 struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS]; 80 struct list_head preempt_list; 81 spinlock_t lock; 82 struct rcuwait wait; 83 spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ 84 u64 stolen_tb; 85 u64 preempt_tb; 86 struct kvm_vcpu *runner; 87 struct kvm *kvm; 88 u64 tb_offset; /* guest timebase - host timebase */ 89 u64 tb_offset_applied; /* timebase offset currently in force */ 90 ulong lpcr; 91 u32 arch_compat; 92 ulong pcr; 93 ulong dpdes; /* doorbell state (POWER8) */ 94 ulong vtb; /* virtual timebase */ 95 ulong conferring_threads; 96 unsigned int halt_poll_ns; 97 atomic_t online_count; 98}; 99 100struct kvmppc_vcpu_book3s { 101 struct kvmppc_sid_map sid_map[SID_MAP_NUM]; 102 struct { 103 u64 esid; 104 u64 vsid; 105 } slb_shadow[64]; 106 u8 slb_shadow_max; 107 struct kvmppc_bat ibat[8]; 108 struct kvmppc_bat dbat[8]; 109 u64 hid[6]; 110 u64 gqr[8]; 111 u64 sdr1; 112 u64 hior; 113 u64 msr_mask; 114 u64 vtb; 115#ifdef CONFIG_PPC_BOOK3S_32 116 u32 vsid_pool[VSID_POOL_SIZE]; 117 u32 vsid_next; 118#else 119 u64 proto_vsid_first; 120 u64 proto_vsid_max; 121 u64 proto_vsid_next; 122#endif 123 int context_id[SID_CONTEXTS]; 124 125 bool hior_explicit; /* HIOR is set by ioctl, not PVR */ 126 127 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 128 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 129 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 130 struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; 131#ifdef CONFIG_PPC_BOOK3S_64 132 struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; 133#endif 134 int hpte_cache_count; 135 spinlock_t mmu_lock; 136}; 137 138#define VSID_REAL 0x07ffffffffc00000ULL 139#define VSID_BAT 0x07ffffffffb00000ULL 140#define VSID_64K 0x0800000000000000ULL 141#define VSID_1T 0x1000000000000000ULL 142#define VSID_REAL_DR 0x2000000000000000ULL 143#define VSID_REAL_IR 0x4000000000000000ULL 144#define VSID_PR 0x8000000000000000ULL 145 146extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask); 147extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); 148extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); 149extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); 150extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); 151extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); 152extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); 153extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 154 bool iswrite); 155extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 156extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 157extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 158extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 159extern int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, 160 unsigned long addr, unsigned long status); 161extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 162 unsigned long slb_v, unsigned long valid); 163extern int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, 164 unsigned long gpa, gva_t ea, int is_store); 165 166extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 167extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 168extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); 169extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); 170extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); 171extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 172extern int kvmppc_mmu_hpte_sysinit(void); 173extern void kvmppc_mmu_hpte_sysexit(void); 174extern int kvmppc_mmu_hv_init(void); 175extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc); 176 177extern int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, 178 unsigned long ea, unsigned long dsisr); 179extern unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, 180 gva_t eaddr, void *to, void *from, 181 unsigned long n); 182extern long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 183 void *to, unsigned long n); 184extern long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, 185 void *from, unsigned long n); 186extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, 187 struct kvmppc_pte *gpte, u64 root, 188 u64 *pte_ret_p); 189extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, 190 struct kvmppc_pte *gpte, u64 table, 191 int table_index, u64 *pte_ret_p); 192extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 193 struct kvmppc_pte *gpte, bool data, bool iswrite); 194extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, 195 unsigned int pshift, u64 lpid); 196extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 197 unsigned int shift, 198 const struct kvm_memory_slot *memslot, 199 u64 lpid); 200extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, 201 bool writing, unsigned long gpa, 202 u64 lpid); 203extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, 204 unsigned long gpa, 205 struct kvm_memory_slot *memslot, 206 bool writing, bool kvm_ro, 207 pte_t *inserted_pte, unsigned int *levelp); 208extern int kvmppc_init_vm_radix(struct kvm *kvm); 209extern void kvmppc_free_radix(struct kvm *kvm); 210extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, 211 u64 lpid); 212extern int kvmppc_radix_init(void); 213extern void kvmppc_radix_exit(void); 214extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 215 unsigned long gfn); 216extern bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 217 unsigned long gfn); 218extern bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, 219 unsigned long gfn); 220extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, 221 struct kvm_memory_slot *memslot, unsigned long *map); 222extern void kvmppc_radix_flush_memslot(struct kvm *kvm, 223 const struct kvm_memory_slot *memslot); 224extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); 225 226/* XXX remove this export when load_last_inst() is generic */ 227extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data); 228extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec); 229extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 230 unsigned int vec); 231extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags); 232extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac); 233extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, 234 bool upper, u32 val); 235extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 236extern int kvmppc_emulate_paired_single(struct kvm_vcpu *vcpu); 237extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, 238 bool writing, bool *writable); 239extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, 240 unsigned long *rmap, long pte_index, int realmode); 241extern void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, 242 unsigned long gfn, unsigned long psize); 243extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, 244 unsigned long pte_index); 245void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, 246 unsigned long pte_index); 247extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr, 248 unsigned long *nb_ret); 249extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr, 250 unsigned long gpa, bool dirty); 251extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, 252 long pte_index, unsigned long pteh, unsigned long ptel, 253 pgd_t *pgdir, bool realmode, unsigned long *idx_ret); 254extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, 255 unsigned long pte_index, unsigned long avpn, 256 unsigned long *hpret); 257extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, 258 struct kvm_memory_slot *memslot, unsigned long *map); 259extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, 260 struct kvm_memory_slot *memslot, 261 unsigned long *map); 262extern unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, 263 unsigned long lpcr); 264extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, 265 unsigned long mask); 266extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr); 267 268extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu); 269extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu); 270extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu); 271 272extern void kvmppc_entry_trampoline(void); 273extern void kvmppc_hv_entry_trampoline(void); 274extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); 275extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); 276extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); 277extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm); 278extern int kvmppc_hcall_impl_pr(unsigned long cmd); 279extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd); 280extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu); 281extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu); 282 283long kvmppc_read_intr(void); 284void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr); 285void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags); 286 287#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 288void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu); 289void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu); 290void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu); 291void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu); 292#else 293static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {} 294static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {} 295static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {} 296static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {} 297#endif 298 299extern unsigned long nested_capabilities; 300long kvmhv_nested_init(void); 301void kvmhv_nested_exit(void); 302void kvmhv_vm_nested_init(struct kvm *kvm); 303long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); 304long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); 305void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1); 306void kvmhv_release_all_nested(struct kvm *kvm); 307long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); 308long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu); 309long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid, 310 unsigned long type, unsigned long pg_sizes, 311 unsigned long start, unsigned long end); 312int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, 313 u64 time_limit, unsigned long lpcr); 314void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr); 315void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu, 316 struct hv_guest_state *hr); 317long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu); 318 319void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); 320 321 322#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 323 324extern struct static_key_false __kvmhv_is_nestedv2; 325 326static inline bool kvmhv_is_nestedv2(void) 327{ 328 return static_branch_unlikely(&__kvmhv_is_nestedv2); 329} 330 331static inline bool kvmhv_is_nestedv1(void) 332{ 333 return !static_branch_likely(&__kvmhv_is_nestedv2); 334} 335 336#else 337 338static inline bool kvmhv_is_nestedv2(void) 339{ 340 return false; 341} 342 343static inline bool kvmhv_is_nestedv1(void) 344{ 345 return false; 346} 347 348#endif 349 350int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs); 351int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs); 352int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden); 353int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden); 354 355static inline int kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, 356 struct pt_regs *regs) 357{ 358 if (kvmhv_is_nestedv2()) 359 return __kvmhv_nestedv2_reload_ptregs(vcpu, regs); 360 return 0; 361} 362static inline int kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, 363 struct pt_regs *regs) 364{ 365 if (kvmhv_is_nestedv2()) 366 return __kvmhv_nestedv2_mark_dirty_ptregs(vcpu, regs); 367 return 0; 368} 369 370static inline int kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) 371{ 372 if (kvmhv_is_nestedv2()) 373 return __kvmhv_nestedv2_mark_dirty(vcpu, iden); 374 return 0; 375} 376 377static inline int kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) 378{ 379 if (kvmhv_is_nestedv2()) 380 return __kvmhv_nestedv2_cached_reload(vcpu, iden); 381 return 0; 382} 383 384extern int kvm_irq_bypass; 385 386static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) 387{ 388 return vcpu->arch.book3s; 389} 390 391/* Also add subarch specific defines */ 392 393#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 394#include <asm/kvm_book3s_32.h> 395#endif 396#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 397#include <asm/kvm_book3s_64.h> 398#endif 399 400static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 401{ 402 vcpu->arch.regs.gpr[num] = val; 403 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(num)); 404} 405 406static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 407{ 408 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_GPR(num)) < 0); 409 return vcpu->arch.regs.gpr[num]; 410} 411 412static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 413{ 414 vcpu->arch.regs.ccr = val; 415 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR); 416} 417 418static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 419{ 420 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CR) < 0); 421 return vcpu->arch.regs.ccr; 422} 423 424static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val) 425{ 426 vcpu->arch.regs.xer = val; 427 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER); 428} 429 430static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu) 431{ 432 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_XER) < 0); 433 return vcpu->arch.regs.xer; 434} 435 436static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 437{ 438 vcpu->arch.regs.ctr = val; 439 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR); 440} 441 442static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 443{ 444 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_CTR) < 0); 445 return vcpu->arch.regs.ctr; 446} 447 448static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 449{ 450 vcpu->arch.regs.link = val; 451 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR); 452} 453 454static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 455{ 456 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_LR) < 0); 457 return vcpu->arch.regs.link; 458} 459 460static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 461{ 462 vcpu->arch.regs.nip = val; 463 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA); 464} 465 466static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 467{ 468 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_NIA) < 0); 469 return vcpu->arch.regs.nip; 470} 471 472static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu); 473static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) 474{ 475 return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE); 476} 477 478static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 479{ 480 return vcpu->arch.fault_dar; 481} 482 483static inline u64 kvmppc_get_fpr(struct kvm_vcpu *vcpu, int i) 484{ 485 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); 486 return vcpu->arch.fp.fpr[i][TS_FPROFFSET]; 487} 488 489static inline void kvmppc_set_fpr(struct kvm_vcpu *vcpu, int i, u64 val) 490{ 491 vcpu->arch.fp.fpr[i][TS_FPROFFSET] = val; 492 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); 493} 494 495static inline u64 kvmppc_get_fpscr(struct kvm_vcpu *vcpu) 496{ 497 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_FPSCR) < 0); 498 return vcpu->arch.fp.fpscr; 499} 500 501static inline void kvmppc_set_fpscr(struct kvm_vcpu *vcpu, u64 val) 502{ 503 vcpu->arch.fp.fpscr = val; 504 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_FPSCR); 505} 506 507 508static inline u64 kvmppc_get_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j) 509{ 510 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(i)) < 0); 511 return vcpu->arch.fp.fpr[i][j]; 512} 513 514static inline void kvmppc_set_vsx_fpr(struct kvm_vcpu *vcpu, int i, int j, 515 u64 val) 516{ 517 vcpu->arch.fp.fpr[i][j] = val; 518 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(i)); 519} 520 521#ifdef CONFIG_ALTIVEC 522static inline void kvmppc_get_vsx_vr(struct kvm_vcpu *vcpu, int i, vector128 *v) 523{ 524 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSRS(32 + i)) < 0); 525 *v = vcpu->arch.vr.vr[i]; 526} 527 528static inline void kvmppc_set_vsx_vr(struct kvm_vcpu *vcpu, int i, 529 vector128 *val) 530{ 531 vcpu->arch.vr.vr[i] = *val; 532 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSRS(32 + i)); 533} 534 535static inline u32 kvmppc_get_vscr(struct kvm_vcpu *vcpu) 536{ 537 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_VSCR) < 0); 538 return vcpu->arch.vr.vscr.u[3]; 539} 540 541static inline void kvmppc_set_vscr(struct kvm_vcpu *vcpu, u32 val) 542{ 543 vcpu->arch.vr.vscr.u[3] = val; 544 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_VSCR); 545} 546#endif 547 548#define KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \ 549static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ 550{ \ 551 \ 552 vcpu->arch.reg = val; \ 553 kvmhv_nestedv2_mark_dirty(vcpu, iden); \ 554} 555 556#define KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \ 557static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 558{ \ 559 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ 560 return vcpu->arch.reg; \ 561} 562 563#define KVMPPC_BOOK3S_VCPU_ACCESSOR(reg, size, iden) \ 564 KVMPPC_BOOK3S_VCPU_ACCESSOR_SET(reg, size, iden) \ 565 KVMPPC_BOOK3S_VCPU_ACCESSOR_GET(reg, size, iden) \ 566 567KVMPPC_BOOK3S_VCPU_ACCESSOR(pid, 32, KVMPPC_GSID_PIDR) 568KVMPPC_BOOK3S_VCPU_ACCESSOR(tar, 64, KVMPPC_GSID_TAR) 569KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbhr, 64, KVMPPC_GSID_EBBHR) 570KVMPPC_BOOK3S_VCPU_ACCESSOR(ebbrr, 64, KVMPPC_GSID_EBBRR) 571KVMPPC_BOOK3S_VCPU_ACCESSOR(bescr, 64, KVMPPC_GSID_BESCR) 572KVMPPC_BOOK3S_VCPU_ACCESSOR(ic, 64, KVMPPC_GSID_IC) 573KVMPPC_BOOK3S_VCPU_ACCESSOR(vrsave, 64, KVMPPC_GSID_VRSAVE) 574 575 576#define KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \ 577static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ 578{ \ 579 vcpu->arch.vcore->reg = val; \ 580 kvmhv_nestedv2_mark_dirty(vcpu, iden); \ 581} 582 583#define KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \ 584static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ 585{ \ 586 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden) < 0); \ 587 return vcpu->arch.vcore->reg; \ 588} 589 590#define KVMPPC_BOOK3S_VCORE_ACCESSOR(reg, size, iden) \ 591 KVMPPC_BOOK3S_VCORE_ACCESSOR_SET(reg, size, iden) \ 592 KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(reg, size, iden) \ 593 594 595KVMPPC_BOOK3S_VCORE_ACCESSOR(vtb, 64, KVMPPC_GSID_VTB) 596KVMPPC_BOOK3S_VCORE_ACCESSOR(tb_offset, 64, KVMPPC_GSID_TB_OFFSET) 597KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(arch_compat, 32, KVMPPC_GSID_LOGICAL_PVR) 598KVMPPC_BOOK3S_VCORE_ACCESSOR_GET(lpcr, 64, KVMPPC_GSID_LPCR) 599 600static inline u64 kvmppc_get_dec_expires(struct kvm_vcpu *vcpu) 601{ 602 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0); 603 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB) < 0); 604 return vcpu->arch.dec_expires; 605} 606 607static inline void kvmppc_set_dec_expires(struct kvm_vcpu *vcpu, u64 val) 608{ 609 vcpu->arch.dec_expires = val; 610 WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_TB_OFFSET) < 0); 611 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_DEC_EXPIRY_TB); 612} 613 614/* Expiry time of vcpu DEC relative to host TB */ 615static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu) 616{ 617 return kvmppc_get_dec_expires(vcpu) - kvmppc_get_tb_offset(vcpu); 618} 619 620static inline bool is_kvmppc_resume_guest(int r) 621{ 622 return (r == RESUME_GUEST || r == RESUME_GUEST_NV); 623} 624 625static inline bool is_kvmppc_hv_enabled(struct kvm *kvm); 626static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu) 627{ 628 /* Only PR KVM supports the magic page */ 629 return !is_kvmppc_hv_enabled(vcpu->kvm); 630} 631 632extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu); 633extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu); 634 635/* Magic register values loaded into r3 and r4 before the 'sc' assembly 636 * instruction for the OSI hypercalls */ 637#define OSI_SC_MAGIC_R3 0x113724FA 638#define OSI_SC_MAGIC_R4 0x77810F9B 639 640#define INS_DCBZ 0x7c0007ec 641/* TO = 31 for unconditional trap */ 642#define INS_TW 0x7fe00008 643 644#define SPLIT_HACK_MASK 0xff000000 645#define SPLIT_HACK_OFFS 0xfb000000 646 647/* 648 * This packs a VCPU ID from the [0..KVM_MAX_VCPU_IDS) space down to the 649 * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride 650 * (but not its actual threading mode, which is not available) to avoid 651 * collisions. 652 * 653 * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block 654 * 0) unchanged: if the guest is filling each VCORE completely then it will be 655 * using consecutive IDs and it will fill the space without any packing. 656 * 657 * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo 658 * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is 659 * added to avoid collisions. 660 * 661 * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only 662 * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs 663 * can be safely packed into the second half of each VCORE by adding an offset 664 * of (stride / 2). 665 * 666 * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4)) 667 * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each 668 * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4). 669 * 670 * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a 671 * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7 672 * must be free to use. 673 * 674 * (The offsets for each block are stored in block_offsets[], indexed by the 675 * block number if the stride is 8. For cases where the guest's stride is less 676 * than 8, we can re-use the block_offsets array by multiplying the block 677 * number by (MAX_SMT_THREADS / stride) to reach the correct entry.) 678 */ 679static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id) 680{ 681 const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7}; 682 int stride = kvm->arch.emul_smt_mode; 683 int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride); 684 u32 packed_id; 685 686 if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack")) 687 return 0; 688 packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block]; 689 if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed")) 690 return 0; 691 return packed_id; 692} 693 694#endif /* __ASM_KVM_BOOK3S_H__ */