Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: KVM: Add base guest MSA support

Add base code for supporting the MIPS SIMD Architecture (MSA) in MIPS
KVM guests. MSA cannot yet be enabled in the guest, we're just laying
the groundwork.

As with the FPU, whether the guest's MSA context is loaded is stored in
another bit in the fpu_inuse vcpu member. This allows MSA to be disabled
when the guest disables it, but keeping the MSA context loaded so it
doesn't have to be reloaded if the guest re-enables it.

New assembly code is added for saving and restoring the MSA context,
restoring only the upper half of the MSA context (for if the FPU context
is already loaded) and for saving/clearing and restoring MSACSR (which
can itself cause an MSA FP exception depending on the value). The MSACSR
is restored before returning to the guest if MSA is already enabled, and
the existing FP exception die notifier is extended to catch the possible
MSA FP exception and step over the ctcmsa instruction.

The helper function kvm_own_msa() is added to enable MSA and restore
the MSA context if it isn't already loaded, which will be used in a
later patch when the guest attempts to use MSA for the first time and
triggers an MSA disabled exception.

The existing FPU helpers are extended to handle MSA. kvm_lose_fpu()
saves the full MSA context if it is loaded (which includes the FPU
context) and both kvm_lose_fpu() and kvm_drop_fpu() disable MSA.

kvm_own_fpu() also needs to lose any MSA context if FR=0, since there
would be a risk of getting reserved instruction exceptions if CU1 is
enabled and we later try and save the MSA context. We shouldn't usually
hit this case since it will be handled when emulating CU1 changes,
however there's nothing to stop the guest modifying the Status register
directly via the comm page, which will cause this case to get hit.

Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org

+323 -19
+20 -1
arch/mips/include/asm/kvm_host.h
··· 360 360 }; 361 361 362 362 #define KVM_MIPS_FPU_FPU 0x1 363 + #define KVM_MIPS_FPU_MSA 0x2 363 364 364 365 #define KVM_MIPS_GUEST_TLB_SIZE 64 365 366 struct kvm_vcpu_arch { ··· 433 432 int wait; 434 433 435 434 u8 fpu_enabled; 435 + u8 msa_enabled; 436 436 }; 437 437 438 438 ··· 578 576 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP; 579 577 } 580 578 579 + static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu) 580 + { 581 + return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) && 582 + vcpu->msa_enabled; 583 + } 584 + 585 + static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu) 586 + { 587 + return kvm_mips_guest_can_have_msa(vcpu) && 588 + kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA; 589 + } 590 + 581 591 struct kvm_mips_callbacks { 582 592 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu); 583 593 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu); ··· 633 619 /* Trampoline ASM routine to start running in "Guest" context */ 634 620 extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); 635 621 636 - /* FPU context management */ 622 + /* FPU/MSA context management */ 637 623 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu); 638 624 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu); 639 625 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu); 626 + void __kvm_save_msa(struct kvm_vcpu_arch *vcpu); 627 + void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu); 628 + void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu); 629 + void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu); 640 630 void kvm_own_fpu(struct kvm_vcpu *vcpu); 631 + void kvm_own_msa(struct kvm_vcpu *vcpu); 641 632 void kvm_drop_fpu(struct kvm_vcpu *vcpu); 642 633 void kvm_lose_fpu(struct kvm_vcpu *vcpu); 643 634
+1
arch/mips/kernel/asm-offsets.c
··· 440 440 OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]); 441 441 442 442 OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31); 443 + OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr); 443 444 BLANK(); 444 445 445 446 OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
+4 -2
arch/mips/kvm/Makefile
··· 1 1 # Makefile for KVM support for MIPS 2 2 # 3 3 4 - common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 4 + common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) 5 5 6 6 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm 7 7 8 - kvm-objs := $(common-objs) mips.o emulate.o locore.o \ 8 + common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o 9 + 10 + kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \ 9 11 interrupt.o stats.o commpage.o \ 10 12 dyntrans.o trap_emul.o fpu.o 11 13
+21
arch/mips/kvm/locore.S
··· 36 36 #define PT_HOST_USERLOCAL PT_EPC 37 37 38 38 #define CP0_DDATA_LO $28,3 39 + #define CP0_CONFIG3 $16,3 40 + #define CP0_CONFIG5 $16,5 39 41 #define CP0_EBASE $15,1 40 42 41 43 #define CP0_INTCTL $12,1 ··· 371 369 .set pop 372 370 .set noat 373 371 1: 372 + 373 + #ifdef CONFIG_CPU_HAS_MSA 374 + /* 375 + * If MSA is enabled, save MSACSR and clear it so that later 376 + * instructions don't trigger MSAFPE for pending exceptions. 377 + */ 378 + mfc0 t0, CP0_CONFIG3 379 + ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */ 380 + beqz t0, 1f 381 + nop 382 + mfc0 t0, CP0_CONFIG5 383 + ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */ 384 + beqz t0, 1f 385 + nop 386 + _cfcmsa t0, MSA_CSR 387 + sw t0, VCPU_MSA_CSR(k1) 388 + _ctcmsa MSA_CSR, zero 389 + 1: 390 + #endif 374 391 375 392 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 376 393 .set at
+116 -16
arch/mips/kvm/mips.c
··· 1295 1295 1296 1296 if (ret == RESUME_GUEST) { 1297 1297 /* 1298 - * If FPU is enabled (i.e. the guest's FPU context is live), 1299 - * restore FCR31. 1298 + * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context 1299 + * is live), restore FCR31 / MSACSR. 1300 1300 * 1301 1301 * This should be before returning to the guest exception 1302 - * vector, as it may well cause an FP exception if there are 1303 - * pending exception bits unmasked. (see 1302 + * vector, as it may well cause an [MSA] FP exception if there 1303 + * are pending exception bits unmasked. (see 1304 1304 * kvm_mips_csr_die_notifier() for how that is handled). 1305 1305 */ 1306 1306 if (kvm_mips_guest_has_fpu(&vcpu->arch) && 1307 1307 read_c0_status() & ST0_CU1) 1308 1308 __kvm_restore_fcsr(&vcpu->arch); 1309 + 1310 + if (kvm_mips_guest_has_msa(&vcpu->arch) && 1311 + read_c0_config5() & MIPS_CONF5_MSAEN) 1312 + __kvm_restore_msacsr(&vcpu->arch); 1309 1313 } 1310 1314 1311 1315 /* Disable HTW before returning to guest or host */ ··· 1326 1322 1327 1323 preempt_disable(); 1328 1324 1325 + sr = kvm_read_c0_guest_status(cop0); 1326 + 1327 + /* 1328 + * If MSA state is already live, it is undefined how it interacts with 1329 + * FR=0 FPU state, and we don't want to hit reserved instruction 1330 + * exceptions trying to save the MSA state later when CU=1 && FR=1, so 1331 + * play it safe and save it first. 1332 + * 1333 + * In theory we shouldn't ever hit this case since kvm_lose_fpu() should 1334 + * get called when guest CU1 is set, however we can't trust the guest 1335 + * not to clobber the status register directly via the commpage. 1336 + */ 1337 + if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && 1338 + vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) 1339 + kvm_lose_fpu(vcpu); 1340 + 1329 1341 /* 1330 1342 * Enable FPU for guest 1331 1343 * We set FR and FRE according to guest context 1332 1344 */ 1333 - sr = kvm_read_c0_guest_status(cop0); 1334 1345 change_c0_status(ST0_CU1 | ST0_FR, sr); 1335 1346 if (cpu_has_fre) { 1336 1347 cfg5 = kvm_read_c0_guest_config5(cop0); ··· 1362 1343 preempt_enable(); 1363 1344 } 1364 1345 1365 - /* Drop FPU without saving it */ 1346 + #ifdef CONFIG_CPU_HAS_MSA 1347 + /* Enable MSA for guest and restore context */ 1348 + void kvm_own_msa(struct kvm_vcpu *vcpu) 1349 + { 1350 + struct mips_coproc *cop0 = vcpu->arch.cop0; 1351 + unsigned int sr, cfg5; 1352 + 1353 + preempt_disable(); 1354 + 1355 + /* 1356 + * Enable FPU if enabled in guest, since we're restoring FPU context 1357 + * anyway. We set FR and FRE according to guest context. 1358 + */ 1359 + if (kvm_mips_guest_has_fpu(&vcpu->arch)) { 1360 + sr = kvm_read_c0_guest_status(cop0); 1361 + 1362 + /* 1363 + * If FR=0 FPU state is already live, it is undefined how it 1364 + * interacts with MSA state, so play it safe and save it first. 1365 + */ 1366 + if (!(sr & ST0_FR) && 1367 + (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | 1368 + KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU) 1369 + kvm_lose_fpu(vcpu); 1370 + 1371 + change_c0_status(ST0_CU1 | ST0_FR, sr); 1372 + if (sr & ST0_CU1 && cpu_has_fre) { 1373 + cfg5 = kvm_read_c0_guest_config5(cop0); 1374 + change_c0_config5(MIPS_CONF5_FRE, cfg5); 1375 + } 1376 + } 1377 + 1378 + /* Enable MSA for guest */ 1379 + set_c0_config5(MIPS_CONF5_MSAEN); 1380 + enable_fpu_hazard(); 1381 + 1382 + switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) { 1383 + case KVM_MIPS_FPU_FPU: 1384 + /* 1385 + * Guest FPU state already loaded, only restore upper MSA state 1386 + */ 1387 + __kvm_restore_msa_upper(&vcpu->arch); 1388 + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; 1389 + break; 1390 + case 0: 1391 + /* Neither FPU or MSA already active, restore full MSA state */ 1392 + __kvm_restore_msa(&vcpu->arch); 1393 + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA; 1394 + if (kvm_mips_guest_has_fpu(&vcpu->arch)) 1395 + vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU; 1396 + break; 1397 + default: 1398 + break; 1399 + } 1400 + 1401 + preempt_enable(); 1402 + } 1403 + #endif 1404 + 1405 + /* Drop FPU & MSA without saving it */ 1366 1406 void kvm_drop_fpu(struct kvm_vcpu *vcpu) 1367 1407 { 1368 1408 preempt_disable(); 1409 + if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { 1410 + disable_msa(); 1411 + vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA; 1412 + } 1369 1413 if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1370 1414 clear_c0_status(ST0_CU1 | ST0_FR); 1371 1415 vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU; ··· 1436 1354 preempt_enable(); 1437 1355 } 1438 1356 1439 - /* Save and disable FPU */ 1357 + /* Save and disable FPU & MSA */ 1440 1358 void kvm_lose_fpu(struct kvm_vcpu *vcpu) 1441 1359 { 1442 1360 /* 1443 - * FPU gets disabled in root context (hardware) when it is disabled in 1444 - * guest context (software), but the register state in the hardware may 1445 - * still be in use. This is why we explicitly re-enable the hardware 1361 + * FPU & MSA get disabled in root context (hardware) when it is disabled 1362 + * in guest context (software), but the register state in the hardware 1363 + * may still be in use. This is why we explicitly re-enable the hardware 1446 1364 * before saving. 1447 1365 */ 1448 1366 1449 1367 preempt_disable(); 1450 - if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1368 + if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) { 1369 + set_c0_config5(MIPS_CONF5_MSAEN); 1370 + enable_fpu_hazard(); 1371 + 1372 + __kvm_save_msa(&vcpu->arch); 1373 + 1374 + /* Disable MSA & FPU */ 1375 + disable_msa(); 1376 + if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) 1377 + clear_c0_status(ST0_CU1 | ST0_FR); 1378 + vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA); 1379 + } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) { 1451 1380 set_c0_status(ST0_CU1); 1452 1381 enable_fpu_hazard(); 1453 1382 ··· 1472 1379 } 1473 1380 1474 1381 /* 1475 - * Step over a specific ctc1 to FCSR which is used to restore guest FCSR state 1476 - * and may trigger a "harmless" FP exception if cause bits are set in the value 1477 - * being written. 1382 + * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are 1383 + * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP 1384 + * exception if cause bits are set in the value being written. 1478 1385 */ 1479 1386 static int kvm_mips_csr_die_notify(struct notifier_block *self, 1480 1387 unsigned long cmd, void *ptr) ··· 1483 1390 struct pt_regs *regs = args->regs; 1484 1391 unsigned long pc; 1485 1392 1486 - /* Only interested in FPE */ 1487 - if (cmd != DIE_FP) 1393 + /* Only interested in FPE and MSAFPE */ 1394 + if (cmd != DIE_FP && cmd != DIE_MSAFP) 1488 1395 return NOTIFY_DONE; 1489 1396 1490 1397 /* Return immediately if guest context isn't active */ ··· 1499 1406 case DIE_FP: 1500 1407 /* match 2nd instruction in __kvm_restore_fcsr */ 1501 1408 if (pc != (unsigned long)&__kvm_restore_fcsr + 4) 1409 + return NOTIFY_DONE; 1410 + break; 1411 + case DIE_MSAFP: 1412 + /* match 2nd/3rd instruction in __kvm_restore_msacsr */ 1413 + if (!cpu_has_msa || 1414 + pc < (unsigned long)&__kvm_restore_msacsr + 4 || 1415 + pc > (unsigned long)&__kvm_restore_msacsr + 8) 1502 1416 return NOTIFY_DONE; 1503 1417 break; 1504 1418 }
+161
arch/mips/kvm/msa.S
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * MIPS SIMD Architecture (MSA) context handling code for KVM. 7 + * 8 + * Copyright (C) 2015 Imagination Technologies Ltd. 9 + */ 10 + 11 + #include <asm/asm.h> 12 + #include <asm/asm-offsets.h> 13 + #include <asm/asmmacro.h> 14 + #include <asm/regdef.h> 15 + 16 + .set noreorder 17 + .set noat 18 + 19 + LEAF(__kvm_save_msa) 20 + st_d 0, VCPU_FPR0, a0 21 + st_d 1, VCPU_FPR1, a0 22 + st_d 2, VCPU_FPR2, a0 23 + st_d 3, VCPU_FPR3, a0 24 + st_d 4, VCPU_FPR4, a0 25 + st_d 5, VCPU_FPR5, a0 26 + st_d 6, VCPU_FPR6, a0 27 + st_d 7, VCPU_FPR7, a0 28 + st_d 8, VCPU_FPR8, a0 29 + st_d 9, VCPU_FPR9, a0 30 + st_d 10, VCPU_FPR10, a0 31 + st_d 11, VCPU_FPR11, a0 32 + st_d 12, VCPU_FPR12, a0 33 + st_d 13, VCPU_FPR13, a0 34 + st_d 14, VCPU_FPR14, a0 35 + st_d 15, VCPU_FPR15, a0 36 + st_d 16, VCPU_FPR16, a0 37 + st_d 17, VCPU_FPR17, a0 38 + st_d 18, VCPU_FPR18, a0 39 + st_d 19, VCPU_FPR19, a0 40 + st_d 20, VCPU_FPR20, a0 41 + st_d 21, VCPU_FPR21, a0 42 + st_d 22, VCPU_FPR22, a0 43 + st_d 23, VCPU_FPR23, a0 44 + st_d 24, VCPU_FPR24, a0 45 + st_d 25, VCPU_FPR25, a0 46 + st_d 26, VCPU_FPR26, a0 47 + st_d 27, VCPU_FPR27, a0 48 + st_d 28, VCPU_FPR28, a0 49 + st_d 29, VCPU_FPR29, a0 50 + st_d 30, VCPU_FPR30, a0 51 + st_d 31, VCPU_FPR31, a0 52 + jr ra 53 + nop 54 + END(__kvm_save_msa) 55 + 56 + LEAF(__kvm_restore_msa) 57 + ld_d 0, VCPU_FPR0, a0 58 + ld_d 1, VCPU_FPR1, a0 59 + ld_d 2, VCPU_FPR2, a0 60 + ld_d 3, VCPU_FPR3, a0 61 + ld_d 4, VCPU_FPR4, a0 62 + ld_d 5, VCPU_FPR5, a0 63 + ld_d 6, VCPU_FPR6, a0 64 + ld_d 7, VCPU_FPR7, a0 65 + ld_d 8, VCPU_FPR8, a0 66 + ld_d 9, VCPU_FPR9, a0 67 + ld_d 10, VCPU_FPR10, a0 68 + ld_d 11, VCPU_FPR11, a0 69 + ld_d 12, VCPU_FPR12, a0 70 + ld_d 13, VCPU_FPR13, a0 71 + ld_d 14, VCPU_FPR14, a0 72 + ld_d 15, VCPU_FPR15, a0 73 + ld_d 16, VCPU_FPR16, a0 74 + ld_d 17, VCPU_FPR17, a0 75 + ld_d 18, VCPU_FPR18, a0 76 + ld_d 19, VCPU_FPR19, a0 77 + ld_d 20, VCPU_FPR20, a0 78 + ld_d 21, VCPU_FPR21, a0 79 + ld_d 22, VCPU_FPR22, a0 80 + ld_d 23, VCPU_FPR23, a0 81 + ld_d 24, VCPU_FPR24, a0 82 + ld_d 25, VCPU_FPR25, a0 83 + ld_d 26, VCPU_FPR26, a0 84 + ld_d 27, VCPU_FPR27, a0 85 + ld_d 28, VCPU_FPR28, a0 86 + ld_d 29, VCPU_FPR29, a0 87 + ld_d 30, VCPU_FPR30, a0 88 + ld_d 31, VCPU_FPR31, a0 89 + jr ra 90 + nop 91 + END(__kvm_restore_msa) 92 + 93 + .macro kvm_restore_msa_upper wr, off, base 94 + .set push 95 + .set noat 96 + #ifdef CONFIG_64BIT 97 + ld $1, \off(\base) 98 + insert_d \wr, 1 99 + #elif defined(CONFIG_CPU_LITTLE_ENDIAN) 100 + lw $1, \off(\base) 101 + insert_w \wr, 2 102 + lw $1, (\off+4)(\base) 103 + insert_w \wr, 3 104 + #else /* CONFIG_CPU_BIG_ENDIAN */ 105 + lw $1, (\off+4)(\base) 106 + insert_w \wr, 2 107 + lw $1, \off(\base) 108 + insert_w \wr, 3 109 + #endif 110 + .set pop 111 + .endm 112 + 113 + LEAF(__kvm_restore_msa_upper) 114 + kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 115 + kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 116 + kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 117 + kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 118 + kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 119 + kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 120 + kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 121 + kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 122 + kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 123 + kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 124 + kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 125 + kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 126 + kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 127 + kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 128 + kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 129 + kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 130 + kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 131 + kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 132 + kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 133 + kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 134 + kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 135 + kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 136 + kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 137 + kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 138 + kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 139 + kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 140 + kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 141 + kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 142 + kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 143 + kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 144 + kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 145 + kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 146 + jr ra 147 + nop 148 + END(__kvm_restore_msa_upper) 149 + 150 + LEAF(__kvm_restore_msacsr) 151 + lw t0, VCPU_MSA_CSR(a0) 152 + /* 153 + * The ctcmsa must stay at this offset in __kvm_restore_msacsr. 154 + * See kvm_mips_csr_die_notify() which handles t0 containing a value 155 + * which triggers an MSA FP Exception, which must be stepped over and 156 + * ignored since the set cause bits must remain there for the guest. 157 + */ 158 + _ctcmsa MSA_CSR, t0 159 + jr ra 160 + nop 161 + END(__kvm_restore_msacsr)