Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

RISC-V: KVM: Implement SBI STA extension

Add a select SCHED_INFO to the KVM config in order to get run_delay
info. Then implement SBI STA's set-steal-time-shmem function and
kvm_riscv_vcpu_record_steal_time() to provide the steal-time info
to guests.

Reviewed-by: Anup Patel <anup@brainfault.org>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Signed-off-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>

authored by

Andrew Jones and committed by
Anup Patel
e9f12b5f f61ce890

+95 -2
+1
arch/riscv/kvm/Kconfig
··· 32 32 select KVM_XFER_TO_GUEST_WORK 33 33 select MMU_NOTIFIER 34 34 select PREEMPT_NOTIFIERS 35 + select SCHED_INFO 35 36 help 36 37 Support hosting virtualized guest machines. 37 38
+94 -2
arch/riscv/kvm/vcpu_sbi_sta.c
··· 6 6 #include <linux/kconfig.h> 7 7 #include <linux/kernel.h> 8 8 #include <linux/kvm_host.h> 9 + #include <linux/mm.h> 10 + #include <linux/sizes.h> 9 11 12 + #include <asm/bug.h> 13 + #include <asm/current.h> 10 14 #include <asm/kvm_vcpu_sbi.h> 15 + #include <asm/page.h> 11 16 #include <asm/sbi.h> 17 + #include <asm/uaccess.h> 12 18 13 19 void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu) 14 20 { ··· 25 19 void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) 26 20 { 27 21 gpa_t shmem = vcpu->arch.sta.shmem; 22 + u64 last_steal = vcpu->arch.sta.last_steal; 23 + u32 *sequence_ptr, sequence; 24 + u64 *steal_ptr, steal; 25 + unsigned long hva; 26 + gfn_t gfn; 28 27 29 28 if (shmem == INVALID_GPA) 30 29 return; 30 + 31 + /* 32 + * shmem is 64-byte aligned (see the enforcement in 33 + * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct 34 + * is 64 bytes, so we know all its offsets are in the same page. 35 + */ 36 + gfn = shmem >> PAGE_SHIFT; 37 + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); 38 + 39 + if (WARN_ON(kvm_is_error_hva(hva))) { 40 + vcpu->arch.sta.shmem = INVALID_GPA; 41 + return; 42 + } 43 + 44 + sequence_ptr = (u32 *)(hva + offset_in_page(shmem) + 45 + offsetof(struct sbi_sta_struct, sequence)); 46 + steal_ptr = (u64 *)(hva + offset_in_page(shmem) + 47 + offsetof(struct sbi_sta_struct, steal)); 48 + 49 + if (WARN_ON(get_user(sequence, sequence_ptr))) 50 + return; 51 + 52 + sequence = le32_to_cpu(sequence); 53 + sequence += 1; 54 + 55 + if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) 56 + return; 57 + 58 + if (!WARN_ON(get_user(steal, steal_ptr))) { 59 + steal = le64_to_cpu(steal); 60 + vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); 61 + steal += vcpu->arch.sta.last_steal - last_steal; 62 + WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); 63 + } 64 + 65 + sequence += 1; 66 + WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)); 67 + 68 + kvm_vcpu_mark_page_dirty(vcpu, gfn); 31 69 } 32 70 33 71 static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) 34 72 { 35 - return SBI_ERR_FAILURE; 73 + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; 74 + unsigned long shmem_phys_lo = cp->a0; 75 + unsigned long shmem_phys_hi = cp->a1; 76 + u32 flags = cp->a2; 77 + struct sbi_sta_struct zero_sta = {0}; 78 + unsigned long hva; 79 + bool writable; 80 + gpa_t shmem; 81 + int ret; 82 + 83 + if (flags != 0) 84 + return SBI_ERR_INVALID_PARAM; 85 + 86 + if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE && 87 + shmem_phys_hi == SBI_STA_SHMEM_DISABLE) { 88 + vcpu->arch.sta.shmem = INVALID_GPA; 89 + return 0; 90 + } 91 + 92 + if (shmem_phys_lo & (SZ_64 - 1)) 93 + return SBI_ERR_INVALID_PARAM; 94 + 95 + shmem = shmem_phys_lo; 96 + 97 + if (shmem_phys_hi != 0) { 98 + if (IS_ENABLED(CONFIG_32BIT)) 99 + shmem |= ((gpa_t)shmem_phys_hi << 32); 100 + else 101 + return SBI_ERR_INVALID_ADDRESS; 102 + } 103 + 104 + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); 105 + if (kvm_is_error_hva(hva) || !writable) 106 + return SBI_ERR_INVALID_ADDRESS; 107 + 108 + ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); 109 + if (ret) 110 + return SBI_ERR_FAILURE; 111 + 112 + vcpu->arch.sta.shmem = shmem; 113 + vcpu->arch.sta.last_steal = current->sched_info.run_delay; 114 + 115 + return 0; 36 116 } 37 117 38 118 static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, ··· 144 52 145 53 static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu) 146 54 { 147 - return 0; 55 + return !!sched_info_on(); 148 56 } 149 57 150 58 const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {