Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'loongarch-kvm-6.20' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson into HEAD

LoongArch KVM changes for v6.20

1. Add more CPUCFG mask bits.
2. Improve feature detection.
3. Add FPU/LBT delay load support.
4. Set default return value in KVM IO bus ops.
5. Add paravirt preempt feature support.
6. Add KVM steal time test case for tools/selftests.

+319 -116
+9
arch/loongarch/include/asm/kvm_host.h
··· 37 37 #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) 38 38 #define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(1) 39 39 #define KVM_REQ_PMU KVM_ARCH_REQ(2) 40 + #define KVM_REQ_AUX_LOAD KVM_ARCH_REQ(3) 40 41 41 42 #define KVM_GUESTDBG_SW_BP_MASK \ 42 43 (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP) ··· 165 164 166 165 #define LOONGARCH_PV_FEAT_UPDATED BIT_ULL(63) 167 166 #define LOONGARCH_PV_FEAT_MASK (BIT(KVM_FEATURE_IPI) | \ 167 + BIT(KVM_FEATURE_PREEMPT) | \ 168 168 BIT(KVM_FEATURE_STEAL_TIME) | \ 169 169 BIT(KVM_FEATURE_USER_HCALL) | \ 170 170 BIT(KVM_FEATURE_VIRT_EXTIOI)) ··· 202 200 203 201 /* Which auxiliary state is loaded (KVM_LARCH_*) */ 204 202 unsigned int aux_inuse; 203 + unsigned int aux_ldtype; 205 204 206 205 /* FPU state */ 207 206 struct loongarch_fpu fpu FPU_ALIGN; ··· 255 252 u64 guest_addr; 256 253 u64 last_steal; 257 254 struct gfn_to_hva_cache cache; 255 + u8 preempted; 258 256 } st; 259 257 }; 260 258 ··· 267 263 static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) 268 264 { 269 265 csr->csrs[reg] = val; 266 + } 267 + 268 + static inline bool kvm_guest_has_msgint(struct kvm_vcpu_arch *arch) 269 + { 270 + return arch->cpucfg[1] & CPUCFG1_MSGINT; 270 271 } 271 272 272 273 static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch)
+3 -1
arch/loongarch/include/asm/kvm_para.h
··· 37 37 __u64 steal; 38 38 __u32 version; 39 39 __u32 flags; 40 - __u32 pad[12]; 40 + __u8 preempted; 41 + __u8 pad[47]; 41 42 }; 43 + #define KVM_VCPU_PREEMPTED (1 << 0) 42 44 43 45 /* 44 46 * Hypercall interface for KVM hypervisor
+1
arch/loongarch/include/asm/loongarch.h
··· 690 690 #define LOONGARCH_CSR_ISR3 0xa3 691 691 692 692 #define LOONGARCH_CSR_IRR 0xa4 693 + #define LOONGARCH_CSR_IPR 0xa5 693 694 694 695 #define LOONGARCH_CSR_PRID 0xc0 695 696
+4
arch/loongarch/include/asm/qspinlock.h
··· 34 34 return true; 35 35 } 36 36 37 + #define vcpu_is_preempted vcpu_is_preempted 38 + 39 + bool vcpu_is_preempted(int cpu); 40 + 37 41 #endif /* CONFIG_PARAVIRT */ 38 42 39 43 #include <asm-generic/qspinlock.h>
+1
arch/loongarch/include/uapi/asm/kvm.h
··· 105 105 #define KVM_LOONGARCH_VM_FEAT_PV_STEALTIME 7 106 106 #define KVM_LOONGARCH_VM_FEAT_PTW 8 107 107 #define KVM_LOONGARCH_VM_FEAT_MSGINT 9 108 + #define KVM_LOONGARCH_VM_FEAT_PV_PREEMPT 10 108 109 109 110 /* Device Control API on vcpu fd */ 110 111 #define KVM_LOONGARCH_VCPU_CPUCFG 0
+1
arch/loongarch/include/uapi/asm/kvm_para.h
··· 15 15 #define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) 16 16 #define KVM_FEATURE_IPI 1 17 17 #define KVM_FEATURE_STEAL_TIME 2 18 + #define KVM_FEATURE_PREEMPT 3 18 19 /* BIT 24 - 31 are features configurable by user space vmm */ 19 20 #define KVM_FEATURE_VIRT_EXTIOI 24 20 21 #define KVM_FEATURE_USER_HCALL 25
+20 -1
arch/loongarch/kernel/paravirt.c
··· 12 12 struct static_key paravirt_steal_enabled; 13 13 struct static_key paravirt_steal_rq_enabled; 14 14 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); 15 + static DEFINE_STATIC_KEY_FALSE(virt_preempt_key); 15 16 DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key); 16 17 17 18 static u64 native_steal_clock(int cpu) ··· 268 267 269 268 return 0; 270 269 } 270 + 271 + bool vcpu_is_preempted(int cpu) 272 + { 273 + struct kvm_steal_time *src; 274 + 275 + if (!static_branch_unlikely(&virt_preempt_key)) 276 + return false; 277 + 278 + src = &per_cpu(steal_time, cpu); 279 + return !!(src->preempted & KVM_VCPU_PREEMPTED); 280 + } 281 + EXPORT_SYMBOL(vcpu_is_preempted); 271 282 #endif 272 283 273 284 static void pv_cpu_reboot(void *unused) ··· 321 308 pr_err("Failed to install cpu hotplug callbacks\n"); 322 309 return r; 323 310 } 311 + 312 + if (kvm_para_has_feature(KVM_FEATURE_PREEMPT)) 313 + static_branch_enable(&virt_preempt_key); 324 314 #endif 325 315 326 316 static_call_update(pv_steal_clock, paravt_steal_clock); ··· 334 318 static_key_slow_inc(&paravirt_steal_rq_enabled); 335 319 #endif 336 320 337 - pr_info("Using paravirt steal-time\n"); 321 + if (static_key_enabled(&virt_preempt_key)) 322 + pr_info("Using paravirt steal-time with preempt enabled\n"); 323 + else 324 + pr_info("Using paravirt steal-time with preempt disabled\n"); 338 325 339 326 return 0; 340 327 }
+17 -4
arch/loongarch/kvm/exit.c
··· 754 754 return RESUME_HOST; 755 755 } 756 756 757 - kvm_own_fpu(vcpu); 757 + vcpu->arch.aux_ldtype = KVM_LARCH_FPU; 758 + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); 758 759 759 760 return RESUME_GUEST; 760 761 } ··· 793 792 */ 794 793 static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu, int ecode) 795 794 { 796 - if (kvm_own_lsx(vcpu)) 795 + if (!kvm_guest_has_lsx(&vcpu->arch)) 797 796 kvm_queue_exception(vcpu, EXCCODE_INE, 0); 797 + else { 798 + vcpu->arch.aux_ldtype = KVM_LARCH_LSX; 799 + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); 800 + } 798 801 799 802 return RESUME_GUEST; 800 803 } ··· 813 808 */ 814 809 static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu, int ecode) 815 810 { 816 - if (kvm_own_lasx(vcpu)) 811 + if (!kvm_guest_has_lasx(&vcpu->arch)) 817 812 kvm_queue_exception(vcpu, EXCCODE_INE, 0); 813 + else { 814 + vcpu->arch.aux_ldtype = KVM_LARCH_LASX; 815 + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); 816 + } 818 817 819 818 return RESUME_GUEST; 820 819 } 821 820 822 821 static int kvm_handle_lbt_disabled(struct kvm_vcpu *vcpu, int ecode) 823 822 { 824 - if (kvm_own_lbt(vcpu)) 823 + if (!kvm_guest_has_lbt(&vcpu->arch)) 825 824 kvm_queue_exception(vcpu, EXCCODE_INE, 0); 825 + else { 826 + vcpu->arch.aux_ldtype = KVM_LARCH_LBT; 827 + kvm_make_request(KVM_REQ_AUX_LOAD, vcpu); 828 + } 826 829 827 830 return RESUME_GUEST; 828 831 }
+17 -26
arch/loongarch/kvm/intc/eiointc.c
··· 119 119 static int loongarch_eiointc_read(struct kvm_vcpu *vcpu, struct loongarch_eiointc *s, 120 120 gpa_t addr, unsigned long *val) 121 121 { 122 - int index, ret = 0; 122 + int index; 123 123 u64 data = 0; 124 124 gpa_t offset; 125 125 ··· 150 150 data = s->coremap[index]; 151 151 break; 152 152 default: 153 - ret = -EINVAL; 154 153 break; 155 154 } 156 155 *val = data; 157 156 158 - return ret; 157 + return 0; 159 158 } 160 159 161 160 static int kvm_eiointc_read(struct kvm_vcpu *vcpu, 162 161 struct kvm_io_device *dev, 163 162 gpa_t addr, int len, void *val) 164 163 { 165 - int ret = -EINVAL; 166 164 unsigned long flags, data, offset; 167 165 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; 168 166 169 167 if (!eiointc) { 170 168 kvm_err("%s: eiointc irqchip not valid!\n", __func__); 171 - return -EINVAL; 169 + return 0; 172 170 } 173 171 174 172 if (addr & (len - 1)) { 175 173 kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); 176 - return -EINVAL; 174 + return 0; 177 175 } 178 176 179 177 offset = addr & 0x7; 180 178 addr -= offset; 181 179 vcpu->stat.eiointc_read_exits++; 182 180 spin_lock_irqsave(&eiointc->lock, flags); 183 - ret = loongarch_eiointc_read(vcpu, eiointc, addr, &data); 181 + loongarch_eiointc_read(vcpu, eiointc, addr, &data); 184 182 spin_unlock_irqrestore(&eiointc->lock, flags); 185 - if (ret) 186 - return ret; 187 183 188 184 data = data >> (offset * 8); 189 185 switch (len) { ··· 204 208 struct loongarch_eiointc *s, 205 209 gpa_t addr, u64 value, u64 field_mask) 206 210 { 207 - int index, irq, ret = 0; 211 + int index, irq; 208 212 u8 cpu; 209 213 u64 data, old, mask; 210 214 gpa_t offset; ··· 283 287 eiointc_update_sw_coremap(s, index * 8, data, sizeof(data), true); 284 288 break; 285 289 default: 286 - ret = -EINVAL; 287 290 break; 288 291 } 289 292 290 - return ret; 293 + return 0; 291 294 } 292 295 293 296 static int kvm_eiointc_write(struct kvm_vcpu *vcpu, 294 297 struct kvm_io_device *dev, 295 298 gpa_t addr, int len, const void *val) 296 299 { 297 - int ret = -EINVAL; 298 300 unsigned long flags, value; 299 301 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; 300 302 301 303 if (!eiointc) { 302 304 kvm_err("%s: eiointc irqchip not valid!\n", __func__); 303 - return -EINVAL; 305 + return 0; 304 306 } 305 307 306 308 if (addr & (len - 1)) { 307 309 kvm_err("%s: eiointc not aligned addr %llx len %d\n", __func__, addr, len); 308 - return -EINVAL; 310 + return 0; 309 311 } 310 312 311 313 vcpu->stat.eiointc_write_exits++; ··· 311 317 switch (len) { 312 318 case 1: 313 319 value = *(unsigned char *)val; 314 - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF); 320 + loongarch_eiointc_write(vcpu, eiointc, addr, value, 0xFF); 315 321 break; 316 322 case 2: 317 323 value = *(unsigned short *)val; 318 - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX); 324 + loongarch_eiointc_write(vcpu, eiointc, addr, value, USHRT_MAX); 319 325 break; 320 326 case 4: 321 327 value = *(unsigned int *)val; 322 - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX); 328 + loongarch_eiointc_write(vcpu, eiointc, addr, value, UINT_MAX); 323 329 break; 324 330 default: 325 331 value = *(unsigned long *)val; 326 - ret = loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX); 332 + loongarch_eiointc_write(vcpu, eiointc, addr, value, ULONG_MAX); 327 333 break; 328 334 } 329 335 spin_unlock_irqrestore(&eiointc->lock, flags); 330 336 331 - return ret; 337 + return 0; 332 338 } 333 339 334 340 static const struct kvm_io_device_ops kvm_eiointc_ops = { ··· 346 352 347 353 if (!eiointc) { 348 354 kvm_err("%s: eiointc irqchip not valid!\n", __func__); 349 - return -EINVAL; 355 + return 0; 350 356 } 351 357 352 358 addr -= EIOINTC_VIRT_BASE; ··· 370 376 struct kvm_io_device *dev, 371 377 gpa_t addr, int len, const void *val) 372 378 { 373 - int ret = 0; 374 379 unsigned long flags; 375 380 u32 value = *(u32 *)val; 376 381 struct loongarch_eiointc *eiointc = vcpu->kvm->arch.eiointc; 377 382 378 383 if (!eiointc) { 379 384 kvm_err("%s: eiointc irqchip not valid!\n", __func__); 380 - return -EINVAL; 385 + return 0; 381 386 } 382 387 383 388 addr -= EIOINTC_VIRT_BASE; 384 389 spin_lock_irqsave(&eiointc->lock, flags); 385 390 switch (addr) { 386 391 case EIOINTC_VIRT_FEATURES: 387 - ret = -EPERM; 388 392 break; 389 393 case EIOINTC_VIRT_CONFIG: 390 394 /* 391 395 * eiointc features can only be set at disabled status 392 396 */ 393 397 if ((eiointc->status & BIT(EIOINTC_ENABLE)) && value) { 394 - ret = -EPERM; 395 398 break; 396 399 } 397 400 eiointc->status = value & eiointc->features; ··· 398 407 } 399 408 spin_unlock_irqrestore(&eiointc->lock, flags); 400 409 401 - return ret; 410 + return 0; 402 411 } 403 412 404 413 static const struct kvm_io_device_ops kvm_eiointc_virt_ops = {
+8 -18
arch/loongarch/kvm/intc/ipi.c
··· 111 111 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); 112 112 if (unlikely(vcpu == NULL)) { 113 113 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); 114 - return -EINVAL; 114 + return 0; 115 115 } 116 116 mailbox = ((data & 0xffffffff) >> 2) & 0x7; 117 117 offset = IOCSR_IPI_BUF_20 + mailbox * 4; ··· 145 145 srcu_read_unlock(&vcpu->kvm->srcu, idx); 146 146 if (unlikely(ret)) { 147 147 kvm_err("%s: : read data from addr %llx failed\n", __func__, addr); 148 - return ret; 148 + return 0; 149 149 } 150 150 /* Construct the mask by scanning the bit 27-30 */ 151 151 for (i = 0; i < 4; i++) { ··· 162 162 if (unlikely(ret)) 163 163 kvm_err("%s: : write data to addr %llx failed\n", __func__, addr); 164 164 165 - return ret; 165 + return 0; 166 166 } 167 167 168 168 static int any_send(struct kvm *kvm, uint64_t data) ··· 174 174 vcpu = kvm_get_vcpu_by_cpuid(kvm, cpu); 175 175 if (unlikely(vcpu == NULL)) { 176 176 kvm_err("%s: invalid target cpu: %d\n", __func__, cpu); 177 - return -EINVAL; 177 + return 0; 178 178 } 179 179 offset = data & 0xffff; 180 180 ··· 183 183 184 184 static int loongarch_ipi_readl(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *val) 185 185 { 186 - int ret = 0; 187 186 uint32_t offset; 188 187 uint64_t res = 0; 189 188 ··· 201 202 spin_unlock(&vcpu->arch.ipi_state.lock); 202 203 break; 203 204 case IOCSR_IPI_SET: 204 - res = 0; 205 - break; 206 205 case IOCSR_IPI_CLEAR: 207 - res = 0; 208 206 break; 209 207 case IOCSR_IPI_BUF_20 ... IOCSR_IPI_BUF_38 + 7: 210 208 if (offset + len > IOCSR_IPI_BUF_38 + 8) { 211 209 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", 212 210 __func__, offset, len); 213 - ret = -EINVAL; 214 211 break; 215 212 } 216 213 res = read_mailbox(vcpu, offset, len); 217 214 break; 218 215 default: 219 216 kvm_err("%s: unknown addr: %llx\n", __func__, addr); 220 - ret = -EINVAL; 221 217 break; 222 218 } 223 219 *(uint64_t *)val = res; 224 220 225 - return ret; 221 + return 0; 226 222 } 227 223 228 224 static int loongarch_ipi_writel(struct kvm_vcpu *vcpu, gpa_t addr, int len, const void *val) 229 225 { 230 - int ret = 0; 231 226 uint64_t data; 232 227 uint32_t offset; 233 228 ··· 232 239 233 240 switch (offset) { 234 241 case IOCSR_IPI_STATUS: 235 - ret = -EINVAL; 236 242 break; 237 243 case IOCSR_IPI_EN: 238 244 spin_lock(&vcpu->arch.ipi_state.lock); ··· 249 257 if (offset + len > IOCSR_IPI_BUF_38 + 8) { 250 258 kvm_err("%s: invalid offset or len: offset = %d, len = %d\n", 251 259 __func__, offset, len); 252 - ret = -EINVAL; 253 260 break; 254 261 } 255 262 write_mailbox(vcpu, offset, data, len); ··· 257 266 ipi_send(vcpu->kvm, data); 258 267 break; 259 268 case IOCSR_MAIL_SEND: 260 - ret = mail_send(vcpu->kvm, data); 269 + mail_send(vcpu->kvm, data); 261 270 break; 262 271 case IOCSR_ANY_SEND: 263 - ret = any_send(vcpu->kvm, data); 272 + any_send(vcpu->kvm, data); 264 273 break; 265 274 default: 266 275 kvm_err("%s: unknown addr: %llx\n", __func__, addr); 267 - ret = -EINVAL; 268 276 break; 269 277 } 270 278 271 - return ret; 279 + return 0; 272 280 } 273 281 274 282 static int kvm_ipi_read(struct kvm_vcpu *vcpu,
+14 -17
arch/loongarch/kvm/intc/pch_pic.c
··· 74 74 75 75 static int loongarch_pch_pic_read(struct loongarch_pch_pic *s, gpa_t addr, int len, void *val) 76 76 { 77 - int ret = 0, offset; 77 + int offset; 78 78 u64 data = 0; 79 79 void *ptemp; 80 80 ··· 121 121 data = s->isr; 122 122 break; 123 123 default: 124 - ret = -EINVAL; 124 + break; 125 125 } 126 126 spin_unlock(&s->lock); 127 127 128 - if (ret == 0) { 129 - offset = (addr - s->pch_pic_base) & 7; 130 - data = data >> (offset * 8); 131 - memcpy(val, &data, len); 132 - } 128 + offset = (addr - s->pch_pic_base) & 7; 129 + data = data >> (offset * 8); 130 + memcpy(val, &data, len); 133 131 134 - return ret; 132 + return 0; 135 133 } 136 134 137 135 static int kvm_pch_pic_read(struct kvm_vcpu *vcpu, 138 136 struct kvm_io_device *dev, 139 137 gpa_t addr, int len, void *val) 140 138 { 141 - int ret; 139 + int ret = 0; 142 140 struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; 143 141 144 142 if (!s) { 145 143 kvm_err("%s: pch pic irqchip not valid!\n", __func__); 146 - return -EINVAL; 144 + return ret; 147 145 } 148 146 149 147 if (addr & (len - 1)) { 150 148 kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); 151 - return -EINVAL; 149 + return ret; 152 150 } 153 151 154 152 /* statistics of pch pic reading */ ··· 159 161 static int loongarch_pch_pic_write(struct loongarch_pch_pic *s, gpa_t addr, 160 162 int len, const void *val) 161 163 { 162 - int ret = 0, offset; 164 + int offset; 163 165 u64 old, data, mask; 164 166 void *ptemp; 165 167 ··· 224 226 case PCH_PIC_ROUTE_ENTRY_START ... PCH_PIC_ROUTE_ENTRY_END: 225 227 break; 226 228 default: 227 - ret = -EINVAL; 228 229 break; 229 230 } 230 231 spin_unlock(&s->lock); 231 232 232 - return ret; 233 + return 0; 233 234 } 234 235 235 236 static int kvm_pch_pic_write(struct kvm_vcpu *vcpu, 236 237 struct kvm_io_device *dev, 237 238 gpa_t addr, int len, const void *val) 238 239 { 239 - int ret; 240 + int ret = 0; 240 241 struct loongarch_pch_pic *s = vcpu->kvm->arch.pch_pic; 241 242 242 243 if (!s) { 243 244 kvm_err("%s: pch pic irqchip not valid!\n", __func__); 244 - return -EINVAL; 245 + return ret; 245 246 } 246 247 247 248 if (addr & (len - 1)) { 248 249 kvm_err("%s: pch pic not aligned addr %llx len %d\n", __func__, addr, len); 249 - return -EINVAL; 250 + return ret; 250 251 } 251 252 252 253 /* statistics of pch pic writing */
+2 -2
arch/loongarch/kvm/interrupt.c
··· 32 32 if (priority < EXCCODE_INT_NUM) 33 33 irq = priority_to_irq[priority]; 34 34 35 - if (cpu_has_msgint && (priority == INT_AVEC)) { 35 + if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) { 36 36 set_gcsr_estat(irq); 37 37 return 1; 38 38 } ··· 64 64 if (priority < EXCCODE_INT_NUM) 65 65 irq = priority_to_irq[priority]; 66 66 67 - if (cpu_has_msgint && (priority == INT_AVEC)) { 67 + if (kvm_guest_has_msgint(&vcpu->arch) && (priority == INT_AVEC)) { 68 68 clear_gcsr_estat(irq); 69 69 return 1; 70 70 }
+8
arch/loongarch/kvm/main.c
··· 192 192 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); 193 193 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); 194 194 set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); 195 + 196 + if (cpu_has_msgint) { 197 + set_gcsr_hw_flag(LOONGARCH_CSR_IPR); 198 + set_gcsr_hw_flag(LOONGARCH_CSR_ISR0); 199 + set_gcsr_hw_flag(LOONGARCH_CSR_ISR1); 200 + set_gcsr_hw_flag(LOONGARCH_CSR_ISR2); 201 + set_gcsr_hw_flag(LOONGARCH_CSR_ISR3); 202 + } 195 203 } 196 204 197 205 static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu)
+99 -26
arch/loongarch/kvm/vcpu.c
··· 181 181 } 182 182 183 183 st = (struct kvm_steal_time __user *)ghc->hva; 184 + if (kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) { 185 + unsafe_put_user(0, &st->preempted, out); 186 + vcpu->arch.st.preempted = 0; 187 + } 188 + 184 189 unsafe_get_user(version, &st->version, out); 185 190 if (version & 1) 186 191 version += 1; /* first time write, random junk */ ··· 237 232 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); 238 233 vcpu->arch.flush_gpa = INVALID_GPA; 239 234 } 235 + 236 + if (kvm_check_request(KVM_REQ_AUX_LOAD, vcpu)) { 237 + switch (vcpu->arch.aux_ldtype) { 238 + case KVM_LARCH_FPU: 239 + kvm_own_fpu(vcpu); 240 + break; 241 + case KVM_LARCH_LSX: 242 + kvm_own_lsx(vcpu); 243 + break; 244 + case KVM_LARCH_LASX: 245 + kvm_own_lasx(vcpu); 246 + break; 247 + case KVM_LARCH_LBT: 248 + kvm_own_lbt(vcpu); 249 + break; 250 + default: 251 + break; 252 + } 253 + 254 + vcpu->arch.aux_ldtype = 0; 255 + } 240 256 } 241 257 242 258 /* ··· 678 652 679 653 static int _kvm_get_cpucfg_mask(int id, u64 *v) 680 654 { 655 + unsigned int config; 656 + 681 657 if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) 682 658 return -EINVAL; 683 659 ··· 712 684 if (cpu_has_ptw) 713 685 *v |= CPUCFG2_PTW; 714 686 687 + config = read_cpucfg(LOONGARCH_CPUCFG2); 688 + *v |= config & (CPUCFG2_FRECIPE | CPUCFG2_DIV32 | CPUCFG2_LAM_BH); 689 + *v |= config & (CPUCFG2_LAMCAS | CPUCFG2_LLACQ_SCREL | CPUCFG2_SCQ); 715 690 return 0; 716 691 case LOONGARCH_CPUCFG3: 717 - *v = GENMASK(16, 0); 692 + *v = GENMASK(23, 0); 693 + 694 + /* VM does not support memory order and SFB setting */ 695 + config = read_cpucfg(LOONGARCH_CPUCFG3); 696 + *v &= config & ~(CPUCFG3_SFB); 697 + *v &= config & ~(CPUCFG3_ALDORDER_CAP | CPUCFG3_ASTORDER_CAP | CPUCFG3_SLDORDER_CAP); 718 698 return 0; 719 699 case LOONGARCH_CPUCFG4: 720 700 case LOONGARCH_CPUCFG5: ··· 753 717 static int kvm_check_cpucfg(int id, u64 val) 754 718 { 755 719 int ret; 720 + u32 host; 756 721 u64 mask = 0; 757 722 758 723 ret = _kvm_get_cpucfg_mask(id, &mask); ··· 783 746 /* LASX architecturally implies LSX and FP but val does not satisfy that */ 784 747 return -EINVAL; 785 748 return 0; 749 + case LOONGARCH_CPUCFG3: 750 + host = read_cpucfg(LOONGARCH_CPUCFG3); 751 + if ((val & CPUCFG3_RVAMAX) > (host & CPUCFG3_RVAMAX)) 752 + return -EINVAL; 753 + if ((val & CPUCFG3_SPW_LVL) > (host & CPUCFG3_SPW_LVL)) 754 + return -EINVAL; 755 + return 0; 786 756 case LOONGARCH_CPUCFG6: 787 757 if (val & CPUCFG6_PMP) { 788 - u32 host = read_cpucfg(LOONGARCH_CPUCFG6); 758 + host = read_cpucfg(LOONGARCH_CPUCFG6); 789 759 if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) 790 760 return -EINVAL; 791 761 if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) ··· 1330 1286 #ifdef CONFIG_CPU_HAS_LBT 1331 1287 int kvm_own_lbt(struct kvm_vcpu *vcpu) 1332 1288 { 1333 - if (!kvm_guest_has_lbt(&vcpu->arch)) 1334 - return -EINVAL; 1335 - 1336 - preempt_disable(); 1337 1289 if (!(vcpu->arch.aux_inuse & KVM_LARCH_LBT)) { 1338 1290 set_csr_euen(CSR_EUEN_LBTEN); 1339 1291 _restore_lbt(&vcpu->arch.lbt); 1340 1292 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; 1341 1293 } 1342 - preempt_enable(); 1343 1294 1344 1295 return 0; 1345 1296 } ··· 1377 1338 /* Enable FPU and restore context */ 1378 1339 void kvm_own_fpu(struct kvm_vcpu *vcpu) 1379 1340 { 1380 - preempt_disable(); 1381 - 1382 1341 /* 1383 1342 * Enable FPU for guest 1384 1343 * Set FR and FRE according to guest context ··· 1387 1350 kvm_restore_fpu(&vcpu->arch.fpu); 1388 1351 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; 1389 1352 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); 1390 - 1391 - preempt_enable(); 1392 1353 } 1393 1354 1394 1355 #ifdef CONFIG_CPU_HAS_LSX 1395 1356 /* Enable LSX and restore context */ 1396 1357 int kvm_own_lsx(struct kvm_vcpu *vcpu) 1397 1358 { 1398 - if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) 1399 - return -EINVAL; 1400 - 1401 - preempt_disable(); 1402 - 1403 1359 /* Enable LSX for guest */ 1404 1360 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); 1405 1361 set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); ··· 1414 1384 1415 1385 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); 1416 1386 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; 1417 - preempt_enable(); 1418 1387 1419 1388 return 0; 1420 1389 } ··· 1423 1394 /* Enable LASX and restore context */ 1424 1395 int kvm_own_lasx(struct kvm_vcpu *vcpu) 1425 1396 { 1426 - if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) 1427 - return -EINVAL; 1428 - 1429 - preempt_disable(); 1430 - 1431 1397 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); 1432 1398 set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); 1433 1399 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { ··· 1444 1420 1445 1421 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); 1446 1422 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; 1447 - preempt_enable(); 1448 1423 1449 1424 return 0; 1450 1425 } ··· 1684 1661 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); 1685 1662 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); 1686 1663 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); 1687 - if (cpu_has_msgint) { 1664 + 1665 + if (kvm_guest_has_msgint(&vcpu->arch)) { 1666 + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_IPR); 1688 1667 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR0); 1689 1668 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR1); 1690 1669 kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ISR2); ··· 1781 1756 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); 1782 1757 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); 1783 1758 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); 1784 - if (cpu_has_msgint) { 1759 + 1760 + if (kvm_guest_has_msgint(&vcpu->arch)) { 1761 + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_IPR); 1785 1762 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR0); 1786 1763 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR1); 1787 1764 kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ISR2); ··· 1800 1773 return 0; 1801 1774 } 1802 1775 1776 + static void kvm_vcpu_set_pv_preempted(struct kvm_vcpu *vcpu) 1777 + { 1778 + gpa_t gpa; 1779 + struct gfn_to_hva_cache *ghc; 1780 + struct kvm_memslots *slots; 1781 + struct kvm_steal_time __user *st; 1782 + 1783 + gpa = vcpu->arch.st.guest_addr; 1784 + if (!(gpa & KVM_STEAL_PHYS_VALID)) 1785 + return; 1786 + 1787 + /* vCPU may be preempted for many times */ 1788 + if (vcpu->arch.st.preempted) 1789 + return; 1790 + 1791 + /* This happens on process exit */ 1792 + if (unlikely(current->mm != vcpu->kvm->mm)) 1793 + return; 1794 + 1795 + gpa &= KVM_STEAL_PHYS_MASK; 1796 + ghc = &vcpu->arch.st.cache; 1797 + slots = kvm_memslots(vcpu->kvm); 1798 + if (slots->generation != ghc->generation || gpa != ghc->gpa) { 1799 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st))) { 1800 + ghc->gpa = INVALID_GPA; 1801 + return; 1802 + } 1803 + } 1804 + 1805 + st = (struct kvm_steal_time __user *)ghc->hva; 1806 + unsafe_put_user(KVM_VCPU_PREEMPTED, &st->preempted, out); 1807 + vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; 1808 + out: 1809 + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); 1810 + } 1811 + 1803 1812 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1804 1813 { 1805 - int cpu; 1814 + int cpu, idx; 1806 1815 unsigned long flags; 1816 + 1817 + if (vcpu->preempted && kvm_guest_has_pv_feature(vcpu, KVM_FEATURE_PREEMPT)) { 1818 + /* 1819 + * Take the srcu lock as memslots will be accessed to check 1820 + * the gfn cache generation against the memslots generation. 1821 + */ 1822 + idx = srcu_read_lock(&vcpu->kvm->srcu); 1823 + kvm_vcpu_set_pv_preempted(vcpu); 1824 + srcu_read_unlock(&vcpu->kvm->srcu, idx); 1825 + } 1807 1826 1808 1827 local_irq_save(flags); 1809 1828 cpu = smp_processor_id();
+18 -21
arch/loongarch/kvm/vm.c
··· 29 29 { 30 30 unsigned long val; 31 31 32 + if (cpu_has_lsx) 33 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LSX); 34 + if (cpu_has_lasx) 35 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_LASX); 36 + if (cpu_has_lbt_x86) 37 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_X86BT); 38 + if (cpu_has_lbt_arm) 39 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_ARMBT); 40 + if (cpu_has_lbt_mips) 41 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MIPSBT); 42 + if (cpu_has_ptw) 43 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PTW); 44 + if (cpu_has_msgint) 45 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_MSGINT); 46 + 32 47 val = read_csr_gcfg(); 33 48 if (val & CSR_GCFG_GPMP) 34 49 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU); ··· 52 37 kvm->arch.pv_features = BIT(KVM_FEATURE_IPI); 53 38 kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI); 54 39 if (kvm_pvtime_supported()) { 40 + kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT); 55 41 kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME); 42 + kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_PREEMPT); 56 43 kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_STEALTIME); 57 44 } 58 45 } ··· 148 131 { 149 132 switch (attr->attr) { 150 133 case KVM_LOONGARCH_VM_FEAT_LSX: 151 - if (cpu_has_lsx) 152 - return 0; 153 - return -ENXIO; 154 134 case KVM_LOONGARCH_VM_FEAT_LASX: 155 - if (cpu_has_lasx) 156 - return 0; 157 - return -ENXIO; 158 135 case KVM_LOONGARCH_VM_FEAT_X86BT: 159 - if (cpu_has_lbt_x86) 160 - return 0; 161 - return -ENXIO; 162 136 case KVM_LOONGARCH_VM_FEAT_ARMBT: 163 - if (cpu_has_lbt_arm) 164 - return 0; 165 - return -ENXIO; 166 137 case KVM_LOONGARCH_VM_FEAT_MIPSBT: 167 - if (cpu_has_lbt_mips) 168 - return 0; 169 - return -ENXIO; 170 138 case KVM_LOONGARCH_VM_FEAT_PTW: 171 - if (cpu_has_ptw) 172 - return 0; 173 - return -ENXIO; 174 139 case KVM_LOONGARCH_VM_FEAT_MSGINT: 175 - if (cpu_has_msgint) 176 - return 0; 177 - return -ENXIO; 178 140 case KVM_LOONGARCH_VM_FEAT_PMU: 179 141 case KVM_LOONGARCH_VM_FEAT_PV_IPI: 142 + case KVM_LOONGARCH_VM_FEAT_PV_PREEMPT: 180 143 case KVM_LOONGARCH_VM_FEAT_PV_STEALTIME: 181 144 if (kvm_vm_support(&kvm->arch, attr->attr)) 182 145 return 0;
+1
tools/testing/selftests/kvm/Makefile.kvm
··· 228 228 TEST_GEN_PROGS_loongarch += memslot_modification_stress_test 229 229 TEST_GEN_PROGS_loongarch += memslot_perf_test 230 230 TEST_GEN_PROGS_loongarch += set_memory_region_test 231 + TEST_GEN_PROGS_loongarch += steal_time 231 232 232 233 SPLIT_TESTS += arch_timer 233 234 SPLIT_TESTS += get-reg-list
+96
tools/testing/selftests/kvm/steal_time.c
··· 301 301 pr_info("\n"); 302 302 } 303 303 304 + #elif defined(__loongarch__) 305 + 306 + /* steal_time must have 64-byte alignment */ 307 + #define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63) 308 + #define KVM_STEAL_PHYS_VALID BIT_ULL(0) 309 + 310 + struct kvm_steal_time { 311 + __u64 steal; 312 + __u32 version; 313 + __u32 flags; 314 + __u8 preempted; 315 + __u8 pad[47]; 316 + }; 317 + 318 + static void check_status(struct kvm_steal_time *st) 319 + { 320 + GUEST_ASSERT(!(READ_ONCE(st->version) & 1)); 321 + GUEST_ASSERT_EQ(READ_ONCE(st->flags), 0); 322 + GUEST_ASSERT_EQ(READ_ONCE(st->preempted), 0); 323 + } 324 + 325 + static void guest_code(int cpu) 326 + { 327 + uint32_t version; 328 + struct kvm_steal_time *st = st_gva[cpu]; 329 + 330 + memset(st, 0, sizeof(*st)); 331 + GUEST_SYNC(0); 332 + 333 + check_status(st); 334 + WRITE_ONCE(guest_stolen_time[cpu], st->steal); 335 + version = READ_ONCE(st->version); 336 + check_status(st); 337 + GUEST_SYNC(1); 338 + 339 + check_status(st); 340 + GUEST_ASSERT(version < READ_ONCE(st->version)); 341 + WRITE_ONCE(guest_stolen_time[cpu], st->steal); 342 + check_status(st); 343 + GUEST_DONE(); 344 + } 345 + 346 + static bool is_steal_time_supported(struct kvm_vcpu *vcpu) 347 + { 348 + int err; 349 + uint64_t val; 350 + struct kvm_device_attr attr = { 351 + .group = KVM_LOONGARCH_VCPU_CPUCFG, 352 + .attr = CPUCFG_KVM_FEATURE, 353 + .addr = (uint64_t)&val, 354 + }; 355 + 356 + err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); 357 + if (err) 358 + return false; 359 + 360 + err = __vcpu_ioctl(vcpu, KVM_GET_DEVICE_ATTR, &attr); 361 + if (err) 362 + return false; 363 + 364 + return val & BIT(KVM_FEATURE_STEAL_TIME); 365 + } 366 + 367 + static void steal_time_init(struct kvm_vcpu *vcpu, uint32_t i) 368 + { 369 + int err; 370 + uint64_t st_gpa; 371 + struct kvm_vm *vm = vcpu->vm; 372 + struct kvm_device_attr attr = { 373 + .group = KVM_LOONGARCH_VCPU_PVTIME_CTRL, 374 + .attr = KVM_LOONGARCH_VCPU_PVTIME_GPA, 375 + .addr = (uint64_t)&st_gpa, 376 + }; 377 + 378 + /* ST_GPA_BASE is identity mapped */ 379 + st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE); 380 + sync_global_to_guest(vm, st_gva[i]); 381 + 382 + err = __vcpu_ioctl(vcpu, KVM_HAS_DEVICE_ATTR, &attr); 383 + TEST_ASSERT(err == 0, "No PV stealtime Feature"); 384 + 385 + st_gpa = (unsigned long)st_gva[i] | KVM_STEAL_PHYS_VALID; 386 + err = __vcpu_ioctl(vcpu, KVM_SET_DEVICE_ATTR, &attr); 387 + TEST_ASSERT(err == 0, "Fail to set PV stealtime GPA"); 388 + } 389 + 390 + static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpu_idx) 391 + { 392 + struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpu_idx]); 393 + 394 + ksft_print_msg("VCPU%d:\n", vcpu_idx); 395 + ksft_print_msg(" steal: %lld\n", st->steal); 396 + ksft_print_msg(" flags: %d\n", st->flags); 397 + ksft_print_msg(" version: %d\n", st->version); 398 + ksft_print_msg(" preempted: %d\n", st->preempted); 399 + } 304 400 #endif 305 401 306 402 static void *do_steal_time(void *arg)