Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3s HV: Hold LPIDs in an unsigned long

The LPID register is 32 bits long. The host keeps the lpids for each
guest in an unsigned word struct kvm_arch. Currently, LPIDs are already
limited by mmu_lpid_bits and KVM_MAX_NESTED_GUESTS_SHIFT.

The nestedv2 API returns a 64 bit "Guest ID" to be used be the L1 host
for each L2 guest. This value is used as an lpid, e.g. it is the
parameter used by H_RPT_INVALIDATE. To minimize needless special casing
it makes sense to keep this "Guest ID" in struct kvm_arch::lpid.

This means that struct kvm_arch::lpid is too small so prepare for this
and make it an unsigned long. This is not a problem for the KVM-HV and
nestedv1 cases as their lpid values are already limited to valid ranges
so in those contexts the lpid can be used as an unsigned word safely as
needed.

In the PAPR, the H_RPT_INVALIDATE pid/lpid parameter is already
specified as an unsigned long so change pseries_rpt_invalidate() to
match that. Update the callers of pseries_rpt_invalidate() to also take
an unsigned long if they take an lpid value.

Signed-off-by: Jordan Niethe <jniethe5@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/20230914030600.16993-10-jniethe5@gmail.com

authored by

Jordan Niethe and committed by
Michael Ellerman
dfcaacc8 6ccbbc33

+26 -26
+5 -5
arch/powerpc/include/asm/kvm_book3s.h
··· 191 191 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 192 192 struct kvmppc_pte *gpte, bool data, bool iswrite); 193 193 extern void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, 194 - unsigned int pshift, unsigned int lpid); 194 + unsigned int pshift, u64 lpid); 195 195 extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 196 196 unsigned int shift, 197 197 const struct kvm_memory_slot *memslot, 198 - unsigned int lpid); 198 + u64 lpid); 199 199 extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, 200 200 bool writing, unsigned long gpa, 201 - unsigned int lpid); 201 + u64 lpid); 202 202 extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, 203 203 unsigned long gpa, 204 204 struct kvm_memory_slot *memslot, ··· 207 207 extern int kvmppc_init_vm_radix(struct kvm *kvm); 208 208 extern void kvmppc_free_radix(struct kvm *kvm); 209 209 extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, 210 - unsigned int lpid); 210 + u64 lpid); 211 211 extern int kvmppc_radix_init(void); 212 212 extern void kvmppc_radix_exit(void); 213 213 extern void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, ··· 300 300 void kvmhv_vm_nested_init(struct kvm *kvm); 301 301 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu); 302 302 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu); 303 - void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1); 303 + void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1); 304 304 void kvmhv_release_all_nested(struct kvm *kvm); 305 305 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu); 306 306 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+1 -1
arch/powerpc/include/asm/kvm_book3s_64.h
··· 624 624 625 625 extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, 626 626 unsigned long gpa, unsigned int level, 627 - unsigned long mmu_seq, unsigned int lpid, 627 + unsigned long mmu_seq, u64 lpid, 628 628 unsigned long *rmapp, struct rmap_nested **n_rmap); 629 629 extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp, 630 630 struct rmap_nested **n_rmap);
+1 -1
arch/powerpc/include/asm/kvm_host.h
··· 276 276 #define KVMPPC_SECURE_INIT_ABORT 0x4 /* H_SVM_INIT_ABORT issued */ 277 277 278 278 struct kvm_arch { 279 - unsigned int lpid; 279 + u64 lpid; 280 280 unsigned int smt_mode; /* # vcpus per virtual core */ 281 281 unsigned int emul_smt_mode; /* emualted SMT mode, on P9 */ 282 282 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+2 -2
arch/powerpc/include/asm/plpar_wrappers.h
··· 355 355 * error recovery of killing the process/guest will be eventually 356 356 * needed. 357 357 */ 358 - static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type, 358 + static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type, 359 359 u64 page_sizes, u64 start, u64 end) 360 360 { 361 361 long rc; ··· 401 401 return 0; 402 402 } 403 403 404 - static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type, 404 + static inline long pseries_rpt_invalidate(u64 pid, u64 target, u64 type, 405 405 u64 page_sizes, u64 start, u64 end) 406 406 { 407 407 return 0;
+1 -1
arch/powerpc/kvm/book3s_64_mmu_hv.c
··· 121 121 kvm->arch.hpt = *info; 122 122 kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); 123 123 124 - pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", 124 + pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n", 125 125 info->virt, (long)info->order, kvm->arch.lpid); 126 126 } 127 127
+11 -11
arch/powerpc/kvm/book3s_64_mmu_radix.c
··· 308 308 } 309 309 310 310 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, 311 - unsigned int pshift, unsigned int lpid) 311 + unsigned int pshift, u64 lpid) 312 312 { 313 313 unsigned long psize = PAGE_SIZE; 314 314 int psi; ··· 345 345 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); 346 346 } 347 347 348 - static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) 348 + static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid) 349 349 { 350 350 long rc; 351 351 ··· 418 418 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, 419 419 unsigned int shift, 420 420 const struct kvm_memory_slot *memslot, 421 - unsigned int lpid) 421 + u64 lpid) 422 422 423 423 { 424 424 unsigned long old; ··· 469 469 * (or 4kB) mappings (of sub-pages of the same 2MB page). 470 470 */ 471 471 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, 472 - unsigned int lpid) 472 + u64 lpid) 473 473 { 474 474 if (full) { 475 475 memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); ··· 490 490 } 491 491 492 492 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, 493 - unsigned int lpid) 493 + u64 lpid) 494 494 { 495 495 unsigned long im; 496 496 pmd_t *p = pmd; ··· 519 519 } 520 520 521 521 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, 522 - unsigned int lpid) 522 + u64 lpid) 523 523 { 524 524 unsigned long iu; 525 525 pud_t *p = pud; ··· 540 540 pud_free(kvm->mm, pud); 541 541 } 542 542 543 - void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) 543 + void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid) 544 544 { 545 545 unsigned long ig; 546 546 ··· 567 567 } 568 568 569 569 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, 570 - unsigned long gpa, unsigned int lpid) 570 + unsigned long gpa, u64 lpid) 571 571 { 572 572 pte_t *pte = pte_offset_kernel(pmd, 0); 573 573 ··· 583 583 } 584 584 585 585 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, 586 - unsigned long gpa, unsigned int lpid) 586 + unsigned long gpa, u64 lpid) 587 587 { 588 588 pmd_t *pmd = pmd_offset(pud, 0); 589 589 ··· 609 609 610 610 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, 611 611 unsigned long gpa, unsigned int level, 612 - unsigned long mmu_seq, unsigned int lpid, 612 + unsigned long mmu_seq, u64 lpid, 613 613 unsigned long *rmapp, struct rmap_nested **n_rmap) 614 614 { 615 615 pgd_t *pgd; ··· 786 786 } 787 787 788 788 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, 789 - unsigned long gpa, unsigned int lpid) 789 + unsigned long gpa, u64 lpid) 790 790 { 791 791 unsigned long pgflags; 792 792 unsigned int shift;
+2 -2
arch/powerpc/kvm/book3s_hv_nested.c
··· 478 478 } 479 479 } 480 480 481 - static void kvmhv_flush_lpid(unsigned int lpid) 481 + static void kvmhv_flush_lpid(u64 lpid) 482 482 { 483 483 long rc; 484 484 ··· 500 500 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc); 501 501 } 502 502 503 - void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1) 503 + void kvmhv_set_ptbl_entry(u64 lpid, u64 dw0, u64 dw1) 504 504 { 505 505 if (!kvmhv_on_pseries()) { 506 506 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
+1 -1
arch/powerpc/kvm/book3s_hv_uvmem.c
··· 858 858 } 859 859 860 860 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; 861 - pr_info("LPID %d went secure\n", kvm->arch.lpid); 861 + pr_info("LPID %lld went secure\n", kvm->arch.lpid); 862 862 863 863 out: 864 864 srcu_read_unlock(&kvm->srcu, srcu_idx);
+2 -2
arch/powerpc/kvm/book3s_xive.c
··· 884 884 } 885 885 886 886 if (single_escalation) 887 - name = kasprintf(GFP_KERNEL, "kvm-%d-%d", 887 + name = kasprintf(GFP_KERNEL, "kvm-%lld-%d", 888 888 vcpu->kvm->arch.lpid, xc->server_num); 889 889 else 890 - name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d", 890 + name = kasprintf(GFP_KERNEL, "kvm-%lld-%d-%d", 891 891 vcpu->kvm->arch.lpid, xc->server_num, prio); 892 892 if (!name) { 893 893 pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",