Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 *
6 * Derived from book3s_rmhandlers.S and other files, which are:
7 *
8 * Copyright SUSE Linux Products GmbH 2009
9 *
10 * Authors: Alexander Graf <agraf@suse.de>
11 */
12
13#include <asm/ppc_asm.h>
14#include <asm/code-patching-asm.h>
15#include <asm/kvm_asm.h>
16#include <asm/reg.h>
17#include <asm/mmu.h>
18#include <asm/page.h>
19#include <asm/ptrace.h>
20#include <asm/hvcall.h>
21#include <asm/asm-offsets.h>
22#include <asm/exception-64s.h>
23#include <asm/kvm_book3s_asm.h>
24#include <asm/book3s/64/mmu-hash.h>
25#include <asm/export.h>
26#include <asm/tm.h>
27#include <asm/opal.h>
28#include <asm/xive-regs.h>
29#include <asm/thread_info.h>
30#include <asm/asm-compat.h>
31#include <asm/feature-fixups.h>
32#include <asm/cpuidle.h>
33#include <asm/ultravisor-api.h>
34
35/* Sign-extend HDEC if not on POWER9 */
36#define EXTEND_HDEC(reg) \
37BEGIN_FTR_SECTION; \
38 extsw reg, reg; \
39END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
40
41/* Values in HSTATE_NAPPING(r13) */
42#define NAPPING_CEDE 1
43#define NAPPING_NOVCPU 2
44#define NAPPING_UNSPLIT 3
45
46/* Stack frame offsets for kvmppc_hv_entry */
47#define SFS 208
48#define STACK_SLOT_TRAP (SFS-4)
49#define STACK_SLOT_SHORT_PATH (SFS-8)
50#define STACK_SLOT_TID (SFS-16)
51#define STACK_SLOT_PSSCR (SFS-24)
52#define STACK_SLOT_PID (SFS-32)
53#define STACK_SLOT_IAMR (SFS-40)
54#define STACK_SLOT_CIABR (SFS-48)
55#define STACK_SLOT_DAWR0 (SFS-56)
56#define STACK_SLOT_DAWRX0 (SFS-64)
57#define STACK_SLOT_HFSCR (SFS-72)
58#define STACK_SLOT_AMR (SFS-80)
59#define STACK_SLOT_UAMOR (SFS-88)
60#define STACK_SLOT_DAWR1 (SFS-96)
61#define STACK_SLOT_DAWRX1 (SFS-104)
62#define STACK_SLOT_FSCR (SFS-112)
63/* the following is used by the P9 short path */
64#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
65
66/*
67 * Call kvmppc_hv_entry in real mode.
68 * Must be called with interrupts hard-disabled.
69 *
70 * Input Registers:
71 *
72 * LR = return address to continue at after eventually re-enabling MMU
73 */
74_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
75 mflr r0
76 std r0, PPC_LR_STKOFF(r1)
77 stdu r1, -112(r1)
78 mfmsr r10
79 std r10, HSTATE_HOST_MSR(r13)
80 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
81 li r0,MSR_RI
82 andc r0,r10,r0
83 li r6,MSR_IR | MSR_DR
84 andc r6,r10,r6
85 mtmsrd r0,1 /* clear RI in MSR */
86 mtsrr0 r5
87 mtsrr1 r6
88 RFI_TO_KERNEL
89
90kvmppc_call_hv_entry:
91 ld r4, HSTATE_KVM_VCPU(r13)
92 bl kvmppc_hv_entry
93
94 /* Back from guest - restore host state and return to caller */
95
96BEGIN_FTR_SECTION
97 /* Restore host DABR and DABRX */
98 ld r5,HSTATE_DABR(r13)
99 li r6,7
100 mtspr SPRN_DABR,r5
101 mtspr SPRN_DABRX,r6
102END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
103
104 /* Restore SPRG3 */
105 ld r3,PACA_SPRG_VDSO(r13)
106 mtspr SPRN_SPRG_VDSO_WRITE,r3
107
108 /* Reload the host's PMU registers */
109 bl kvmhv_load_host_pmu
110
111 /*
112 * Reload DEC. HDEC interrupts were disabled when
113 * we reloaded the host's LPCR value.
114 */
115 ld r3, HSTATE_DECEXP(r13)
116 mftb r4
117 subf r4, r4, r3
118 mtspr SPRN_DEC, r4
119
120 /* hwthread_req may have got set by cede or no vcpu, so clear it */
121 li r0, 0
122 stb r0, HSTATE_HWTHREAD_REQ(r13)
123
124 /*
125 * For external interrupts we need to call the Linux
126 * handler to process the interrupt. We do that by jumping
127 * to absolute address 0x500 for external interrupts.
128 * The [h]rfid at the end of the handler will return to
129 * the book3s_hv_interrupts.S code. For other interrupts
130 * we do the rfid to get back to the book3s_hv_interrupts.S
131 * code here.
132 */
133 ld r8, 112+PPC_LR_STKOFF(r1)
134 addi r1, r1, 112
135 ld r7, HSTATE_HOST_MSR(r13)
136
137 /* Return the trap number on this thread as the return value */
138 mr r3, r12
139
140 /*
141 * If we came back from the guest via a relocation-on interrupt,
142 * we will be in virtual mode at this point, which makes it a
143 * little easier to get back to the caller.
144 */
145 mfmsr r0
146 andi. r0, r0, MSR_IR /* in real mode? */
147 bne .Lvirt_return
148
149 /* RFI into the highmem handler */
150 mfmsr r6
151 li r0, MSR_RI
152 andc r6, r6, r0
153 mtmsrd r6, 1 /* Clear RI in MSR */
154 mtsrr0 r8
155 mtsrr1 r7
156 RFI_TO_KERNEL
157
158 /* Virtual-mode return */
159.Lvirt_return:
160 mtlr r8
161 blr
162
163kvmppc_primary_no_guest:
164 /* We handle this much like a ceded vcpu */
165 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
166 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
167 /* HDEC value came from DEC in the first place, it will fit */
168 mfspr r3, SPRN_HDEC
169 mtspr SPRN_DEC, r3
170 /*
171 * Make sure the primary has finished the MMU switch.
172 * We should never get here on a secondary thread, but
173 * check it for robustness' sake.
174 */
175 ld r5, HSTATE_KVM_VCORE(r13)
17665: lbz r0, VCORE_IN_GUEST(r5)
177 cmpwi r0, 0
178 beq 65b
179 /* Set LPCR. */
180 ld r8,VCORE_LPCR(r5)
181 mtspr SPRN_LPCR,r8
182 isync
183 /* set our bit in napping_threads */
184 ld r5, HSTATE_KVM_VCORE(r13)
185 lbz r7, HSTATE_PTID(r13)
186 li r0, 1
187 sld r0, r0, r7
188 addi r6, r5, VCORE_NAPPING_THREADS
1891: lwarx r3, 0, r6
190 or r3, r3, r0
191 stwcx. r3, 0, r6
192 bne 1b
193 /* order napping_threads update vs testing entry_exit_map */
194 isync
195 li r12, 0
196 lwz r7, VCORE_ENTRY_EXIT(r5)
197 cmpwi r7, 0x100
198 bge kvm_novcpu_exit /* another thread already exiting */
199 li r3, NAPPING_NOVCPU
200 stb r3, HSTATE_NAPPING(r13)
201
202 li r3, 0 /* Don't wake on privileged (OS) doorbell */
203 b kvm_do_nap
204
205/*
206 * kvm_novcpu_wakeup
207 * Entered from kvm_start_guest if kvm_hstate.napping is set
208 * to NAPPING_NOVCPU
209 * r2 = kernel TOC
210 * r13 = paca
211 */
212kvm_novcpu_wakeup:
213 ld r1, HSTATE_HOST_R1(r13)
214 ld r5, HSTATE_KVM_VCORE(r13)
215 li r0, 0
216 stb r0, HSTATE_NAPPING(r13)
217
218 /* check the wake reason */
219 bl kvmppc_check_wake_reason
220
221 /*
222 * Restore volatile registers since we could have called
223 * a C routine in kvmppc_check_wake_reason.
224 * r5 = VCORE
225 */
226 ld r5, HSTATE_KVM_VCORE(r13)
227
228 /* see if any other thread is already exiting */
229 lwz r0, VCORE_ENTRY_EXIT(r5)
230 cmpwi r0, 0x100
231 bge kvm_novcpu_exit
232
233 /* clear our bit in napping_threads */
234 lbz r7, HSTATE_PTID(r13)
235 li r0, 1
236 sld r0, r0, r7
237 addi r6, r5, VCORE_NAPPING_THREADS
2384: lwarx r7, 0, r6
239 andc r7, r7, r0
240 stwcx. r7, 0, r6
241 bne 4b
242
243 /* See if the wake reason means we need to exit */
244 cmpdi r3, 0
245 bge kvm_novcpu_exit
246
247 /* See if our timeslice has expired (HDEC is negative) */
248 mfspr r0, SPRN_HDEC
249 EXTEND_HDEC(r0)
250 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
251 cmpdi r0, 0
252 blt kvm_novcpu_exit
253
254 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
255 ld r4, HSTATE_KVM_VCPU(r13)
256 cmpdi r4, 0
257 beq kvmppc_primary_no_guest
258
259#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
260 addi r3, r4, VCPU_TB_RMENTRY
261 bl kvmhv_start_timing
262#endif
263 b kvmppc_got_guest
264
265kvm_novcpu_exit:
266#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
267 ld r4, HSTATE_KVM_VCPU(r13)
268 cmpdi r4, 0
269 beq 13f
270 addi r3, r4, VCPU_TB_RMEXIT
271 bl kvmhv_accumulate_time
272#endif
27313: mr r3, r12
274 stw r12, STACK_SLOT_TRAP(r1)
275 bl kvmhv_commence_exit
276 nop
277 b kvmhv_switch_to_host
278
279/*
280 * We come in here when wakened from Linux offline idle code.
281 * Relocation is off
282 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
283 */
284_GLOBAL(idle_kvm_start_guest)
285 ld r4,PACAEMERGSP(r13)
286 mfcr r5
287 mflr r0
288 std r1,0(r4)
289 std r5,8(r4)
290 std r0,16(r4)
291 subi r1,r4,STACK_FRAME_OVERHEAD
292 SAVE_NVGPRS(r1)
293
294 /*
295 * Could avoid this and pass it through in r3. For now,
296 * code expects it to be in SRR1.
297 */
298 mtspr SPRN_SRR1,r3
299
300 li r0,0
301 stb r0,PACA_FTRACE_ENABLED(r13)
302
303 li r0,KVM_HWTHREAD_IN_KVM
304 stb r0,HSTATE_HWTHREAD_STATE(r13)
305
306 /* kvm cede / napping does not come through here */
307 lbz r0,HSTATE_NAPPING(r13)
308 twnei r0,0
309
310 b 1f
311
312kvm_unsplit_wakeup:
313 li r0, 0
314 stb r0, HSTATE_NAPPING(r13)
315
3161:
317
318 /*
319 * We weren't napping due to cede, so this must be a secondary
320 * thread being woken up to run a guest, or being woken up due
321 * to a stray IPI. (Or due to some machine check or hypervisor
322 * maintenance interrupt while the core is in KVM.)
323 */
324
325 /* Check the wake reason in SRR1 to see why we got here */
326 bl kvmppc_check_wake_reason
327 /*
328 * kvmppc_check_wake_reason could invoke a C routine, but we
329 * have no volatile registers to restore when we return.
330 */
331
332 cmpdi r3, 0
333 bge kvm_no_guest
334
335 /* get vcore pointer, NULL if we have nothing to run */
336 ld r5,HSTATE_KVM_VCORE(r13)
337 cmpdi r5,0
338 /* if we have no vcore to run, go back to sleep */
339 beq kvm_no_guest
340
341kvm_secondary_got_guest:
342
343 /* Set HSTATE_DSCR(r13) to something sensible */
344 ld r6, PACA_DSCR_DEFAULT(r13)
345 std r6, HSTATE_DSCR(r13)
346
347 /* On thread 0 of a subcore, set HDEC to max */
348 lbz r4, HSTATE_PTID(r13)
349 cmpwi r4, 0
350 bne 63f
351 LOAD_REG_ADDR(r6, decrementer_max)
352 ld r6, 0(r6)
353 mtspr SPRN_HDEC, r6
354BEGIN_FTR_SECTION
355 /* and set per-LPAR registers, if doing dynamic micro-threading */
356 ld r6, HSTATE_SPLIT_MODE(r13)
357 cmpdi r6, 0
358 beq 63f
359 ld r0, KVM_SPLIT_RPR(r6)
360 mtspr SPRN_RPR, r0
361 ld r0, KVM_SPLIT_PMMAR(r6)
362 mtspr SPRN_PMMAR, r0
363 ld r0, KVM_SPLIT_LDBAR(r6)
364 mtspr SPRN_LDBAR, r0
365 isync
366END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
36763:
368 /* Order load of vcpu after load of vcore */
369 lwsync
370 ld r4, HSTATE_KVM_VCPU(r13)
371 bl kvmppc_hv_entry
372
373 /* Back from the guest, go back to nap */
374 /* Clear our vcpu and vcore pointers so we don't come back in early */
375 li r0, 0
376 std r0, HSTATE_KVM_VCPU(r13)
377 /*
378 * Once we clear HSTATE_KVM_VCORE(r13), the code in
379 * kvmppc_run_core() is going to assume that all our vcpu
380 * state is visible in memory. This lwsync makes sure
381 * that that is true.
382 */
383 lwsync
384 std r0, HSTATE_KVM_VCORE(r13)
385
386 /*
387 * All secondaries exiting guest will fall through this path.
388 * Before proceeding, just check for HMI interrupt and
389 * invoke opal hmi handler. By now we are sure that the
390 * primary thread on this core/subcore has already made partition
391 * switch/TB resync and we are good to call opal hmi handler.
392 */
393 cmpwi r12, BOOK3S_INTERRUPT_HMI
394 bne kvm_no_guest
395
396 li r3,0 /* NULL argument */
397 bl hmi_exception_realmode
398/*
399 * At this point we have finished executing in the guest.
400 * We need to wait for hwthread_req to become zero, since
401 * we may not turn on the MMU while hwthread_req is non-zero.
402 * While waiting we also need to check if we get given a vcpu to run.
403 */
404kvm_no_guest:
405 lbz r3, HSTATE_HWTHREAD_REQ(r13)
406 cmpwi r3, 0
407 bne 53f
408 HMT_MEDIUM
409 li r0, KVM_HWTHREAD_IN_KERNEL
410 stb r0, HSTATE_HWTHREAD_STATE(r13)
411 /* need to recheck hwthread_req after a barrier, to avoid race */
412 sync
413 lbz r3, HSTATE_HWTHREAD_REQ(r13)
414 cmpwi r3, 0
415 bne 54f
416
417 /*
418 * Jump to idle_return_gpr_loss, which returns to the
419 * idle_kvm_start_guest caller.
420 */
421 li r3, LPCR_PECE0
422 mfspr r4, SPRN_LPCR
423 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
424 mtspr SPRN_LPCR, r4
425 /* set up r3 for return */
426 mfspr r3,SPRN_SRR1
427 REST_NVGPRS(r1)
428 addi r1, r1, STACK_FRAME_OVERHEAD
429 ld r0, 16(r1)
430 ld r5, 8(r1)
431 ld r1, 0(r1)
432 mtlr r0
433 mtcr r5
434 blr
435
43653:
437BEGIN_FTR_SECTION
438 HMT_LOW
439 ld r5, HSTATE_KVM_VCORE(r13)
440 cmpdi r5, 0
441 bne 60f
442 ld r3, HSTATE_SPLIT_MODE(r13)
443 cmpdi r3, 0
444 beq kvm_no_guest
445 lbz r0, KVM_SPLIT_DO_NAP(r3)
446 cmpwi r0, 0
447 beq kvm_no_guest
448 HMT_MEDIUM
449 b kvm_unsplit_nap
45060: HMT_MEDIUM
451 b kvm_secondary_got_guest
452FTR_SECTION_ELSE
453 HMT_LOW
454 ld r5, HSTATE_KVM_VCORE(r13)
455 cmpdi r5, 0
456 beq kvm_no_guest
457 HMT_MEDIUM
458 b kvm_secondary_got_guest
459ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
460
46154: li r0, KVM_HWTHREAD_IN_KVM
462 stb r0, HSTATE_HWTHREAD_STATE(r13)
463 b kvm_no_guest
464
465/*
466 * Here the primary thread is trying to return the core to
467 * whole-core mode, so we need to nap.
468 */
469kvm_unsplit_nap:
470 /*
471 * When secondaries are napping in kvm_unsplit_nap() with
472 * hwthread_req = 1, HMI goes ignored even though subcores are
473 * already exited the guest. Hence HMI keeps waking up secondaries
474 * from nap in a loop and secondaries always go back to nap since
475 * no vcore is assigned to them. This makes impossible for primary
476 * thread to get hold of secondary threads resulting into a soft
477 * lockup in KVM path.
478 *
479 * Let us check if HMI is pending and handle it before we go to nap.
480 */
481 cmpwi r12, BOOK3S_INTERRUPT_HMI
482 bne 55f
483 li r3, 0 /* NULL argument */
484 bl hmi_exception_realmode
48555:
486 /*
487 * Ensure that secondary doesn't nap when it has
488 * its vcore pointer set.
489 */
490 sync /* matches smp_mb() before setting split_info.do_nap */
491 ld r0, HSTATE_KVM_VCORE(r13)
492 cmpdi r0, 0
493 bne kvm_no_guest
494 /* clear any pending message */
495BEGIN_FTR_SECTION
496 lis r6, (PPC_DBELL_SERVER << (63-36))@h
497 PPC_MSGCLR(6)
498END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
499 /* Set kvm_split_mode.napped[tid] = 1 */
500 ld r3, HSTATE_SPLIT_MODE(r13)
501 li r0, 1
502 lhz r4, PACAPACAINDEX(r13)
503 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
504 addi r4, r4, KVM_SPLIT_NAPPED
505 stbx r0, r3, r4
506 /* Check the do_nap flag again after setting napped[] */
507 sync
508 lbz r0, KVM_SPLIT_DO_NAP(r3)
509 cmpwi r0, 0
510 beq 57f
511 li r3, NAPPING_UNSPLIT
512 stb r3, HSTATE_NAPPING(r13)
513 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
514 mfspr r5, SPRN_LPCR
515 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
516 b kvm_nap_sequence
517
51857: li r0, 0
519 stbx r0, r3, r4
520 b kvm_no_guest
521
522/******************************************************************************
523 * *
524 * Entry code *
525 * *
526 *****************************************************************************/
527
528.global kvmppc_hv_entry
529kvmppc_hv_entry:
530
531 /* Required state:
532 *
533 * R4 = vcpu pointer (or NULL)
534 * MSR = ~IR|DR
535 * R13 = PACA
536 * R1 = host R1
537 * R2 = TOC
538 * all other volatile GPRS = free
539 * Does not preserve non-volatile GPRs or CR fields
540 */
541 mflr r0
542 std r0, PPC_LR_STKOFF(r1)
543 stdu r1, -SFS(r1)
544
545 /* Save R1 in the PACA */
546 std r1, HSTATE_HOST_R1(r13)
547
548 li r6, KVM_GUEST_MODE_HOST_HV
549 stb r6, HSTATE_IN_GUEST(r13)
550
551#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
552 /* Store initial timestamp */
553 cmpdi r4, 0
554 beq 1f
555 addi r3, r4, VCPU_TB_RMENTRY
556 bl kvmhv_start_timing
5571:
558#endif
559
560 ld r5, HSTATE_KVM_VCORE(r13)
561 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
562
563 /*
564 * POWER7/POWER8 host -> guest partition switch code.
565 * We don't have to lock against concurrent tlbies,
566 * but we do have to coordinate across hardware threads.
567 */
568 /* Set bit in entry map iff exit map is zero. */
569 li r7, 1
570 lbz r6, HSTATE_PTID(r13)
571 sld r7, r7, r6
572 addi r8, r5, VCORE_ENTRY_EXIT
57321: lwarx r3, 0, r8
574 cmpwi r3, 0x100 /* any threads starting to exit? */
575 bge secondary_too_late /* if so we're too late to the party */
576 or r3, r3, r7
577 stwcx. r3, 0, r8
578 bne 21b
579
580 /* Primary thread switches to guest partition. */
581 cmpwi r6,0
582 bne 10f
583
584 lwz r7,KVM_LPID(r9)
585BEGIN_FTR_SECTION
586 ld r6,KVM_SDR1(r9)
587 li r0,LPID_RSVD /* switch to reserved LPID */
588 mtspr SPRN_LPID,r0
589 ptesync
590 mtspr SPRN_SDR1,r6 /* switch to partition page table */
591END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
592 mtspr SPRN_LPID,r7
593 isync
594
595 /* See if we need to flush the TLB. */
596 mr r3, r9 /* kvm pointer */
597 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
598 li r5, 0 /* nested vcpu pointer */
599 bl kvmppc_check_need_tlb_flush
600 nop
601 ld r5, HSTATE_KVM_VCORE(r13)
602
603 /* Add timebase offset onto timebase */
60422: ld r8,VCORE_TB_OFFSET(r5)
605 cmpdi r8,0
606 beq 37f
607 std r8, VCORE_TB_OFFSET_APPL(r5)
608 mftb r6 /* current host timebase */
609 add r8,r8,r6
610 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
611 mftb r7 /* check if lower 24 bits overflowed */
612 clrldi r6,r6,40
613 clrldi r7,r7,40
614 cmpld r7,r6
615 bge 37f
616 addis r8,r8,0x100 /* if so, increment upper 40 bits */
617 mtspr SPRN_TBU40,r8
618
619 /* Load guest PCR value to select appropriate compat mode */
62037: ld r7, VCORE_PCR(r5)
621 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
622 cmpld r7, r6
623 beq 38f
624 or r7, r7, r6
625 mtspr SPRN_PCR, r7
62638:
627
628BEGIN_FTR_SECTION
629 /* DPDES and VTB are shared between threads */
630 ld r8, VCORE_DPDES(r5)
631 ld r7, VCORE_VTB(r5)
632 mtspr SPRN_DPDES, r8
633 mtspr SPRN_VTB, r7
634END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
635
636 /* Mark the subcore state as inside guest */
637 bl kvmppc_subcore_enter_guest
638 nop
639 ld r5, HSTATE_KVM_VCORE(r13)
640 ld r4, HSTATE_KVM_VCPU(r13)
641 li r0,1
642 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
643
644 /* Do we have a guest vcpu to run? */
64510: cmpdi r4, 0
646 beq kvmppc_primary_no_guest
647kvmppc_got_guest:
648 /* Increment yield count if they have a VPA */
649 ld r3, VCPU_VPA(r4)
650 cmpdi r3, 0
651 beq 25f
652 li r6, LPPACA_YIELDCOUNT
653 LWZX_BE r5, r3, r6
654 addi r5, r5, 1
655 STWX_BE r5, r3, r6
656 li r6, 1
657 stb r6, VCPU_VPA_DIRTY(r4)
65825:
659
660 /* Save purr/spurr */
661 mfspr r5,SPRN_PURR
662 mfspr r6,SPRN_SPURR
663 std r5,HSTATE_PURR(r13)
664 std r6,HSTATE_SPURR(r13)
665 ld r7,VCPU_PURR(r4)
666 ld r8,VCPU_SPURR(r4)
667 mtspr SPRN_PURR,r7
668 mtspr SPRN_SPURR,r8
669
670 /* Save host values of some registers */
671BEGIN_FTR_SECTION
672 mfspr r5, SPRN_TIDR
673 mfspr r6, SPRN_PSSCR
674 mfspr r7, SPRN_PID
675 std r5, STACK_SLOT_TID(r1)
676 std r6, STACK_SLOT_PSSCR(r1)
677 std r7, STACK_SLOT_PID(r1)
678 mfspr r5, SPRN_HFSCR
679 std r5, STACK_SLOT_HFSCR(r1)
680END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
681BEGIN_FTR_SECTION
682 mfspr r5, SPRN_CIABR
683 mfspr r6, SPRN_DAWR0
684 mfspr r7, SPRN_DAWRX0
685 mfspr r8, SPRN_IAMR
686 std r5, STACK_SLOT_CIABR(r1)
687 std r6, STACK_SLOT_DAWR0(r1)
688 std r7, STACK_SLOT_DAWRX0(r1)
689 std r8, STACK_SLOT_IAMR(r1)
690 mfspr r5, SPRN_FSCR
691 std r5, STACK_SLOT_FSCR(r1)
692END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
693BEGIN_FTR_SECTION
694 mfspr r6, SPRN_DAWR1
695 mfspr r7, SPRN_DAWRX1
696 std r6, STACK_SLOT_DAWR1(r1)
697 std r7, STACK_SLOT_DAWRX1(r1)
698END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
699
700 mfspr r5, SPRN_AMR
701 std r5, STACK_SLOT_AMR(r1)
702 mfspr r6, SPRN_UAMOR
703 std r6, STACK_SLOT_UAMOR(r1)
704
705BEGIN_FTR_SECTION
706 /* Set partition DABR */
707 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
708 lwz r5,VCPU_DABRX(r4)
709 ld r6,VCPU_DABR(r4)
710 mtspr SPRN_DABRX,r5
711 mtspr SPRN_DABR,r6
712 isync
713END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
714
715#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
716/*
717 * Branch around the call if both CPU_FTR_TM and
718 * CPU_FTR_P9_TM_HV_ASSIST are off.
719 */
720BEGIN_FTR_SECTION
721 b 91f
722END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
723 /*
724 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
725 */
726 mr r3, r4
727 ld r4, VCPU_MSR(r3)
728 li r5, 0 /* don't preserve non-vol regs */
729 bl kvmppc_restore_tm_hv
730 nop
731 ld r4, HSTATE_KVM_VCPU(r13)
73291:
733#endif
734
735 /* Load guest PMU registers; r4 = vcpu pointer here */
736 mr r3, r4
737 bl kvmhv_load_guest_pmu
738
739 /* Load up FP, VMX and VSX registers */
740 ld r4, HSTATE_KVM_VCPU(r13)
741 bl kvmppc_load_fp
742
743 ld r14, VCPU_GPR(R14)(r4)
744 ld r15, VCPU_GPR(R15)(r4)
745 ld r16, VCPU_GPR(R16)(r4)
746 ld r17, VCPU_GPR(R17)(r4)
747 ld r18, VCPU_GPR(R18)(r4)
748 ld r19, VCPU_GPR(R19)(r4)
749 ld r20, VCPU_GPR(R20)(r4)
750 ld r21, VCPU_GPR(R21)(r4)
751 ld r22, VCPU_GPR(R22)(r4)
752 ld r23, VCPU_GPR(R23)(r4)
753 ld r24, VCPU_GPR(R24)(r4)
754 ld r25, VCPU_GPR(R25)(r4)
755 ld r26, VCPU_GPR(R26)(r4)
756 ld r27, VCPU_GPR(R27)(r4)
757 ld r28, VCPU_GPR(R28)(r4)
758 ld r29, VCPU_GPR(R29)(r4)
759 ld r30, VCPU_GPR(R30)(r4)
760 ld r31, VCPU_GPR(R31)(r4)
761
762 /* Switch DSCR to guest value */
763 ld r5, VCPU_DSCR(r4)
764 mtspr SPRN_DSCR, r5
765
766BEGIN_FTR_SECTION
767 /* Skip next section on POWER7 */
768 b 8f
769END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
770 /* Load up POWER8-specific registers */
771 ld r5, VCPU_IAMR(r4)
772 lwz r6, VCPU_PSPB(r4)
773 ld r7, VCPU_FSCR(r4)
774 mtspr SPRN_IAMR, r5
775 mtspr SPRN_PSPB, r6
776 mtspr SPRN_FSCR, r7
777 /*
778 * Handle broken DAWR case by not writing it. This means we
779 * can still store the DAWR register for migration.
780 */
781 LOAD_REG_ADDR(r5, dawr_force_enable)
782 lbz r5, 0(r5)
783 cmpdi r5, 0
784 beq 1f
785 ld r5, VCPU_DAWR0(r4)
786 ld r6, VCPU_DAWRX0(r4)
787 mtspr SPRN_DAWR0, r5
788 mtspr SPRN_DAWRX0, r6
789BEGIN_FTR_SECTION
790 ld r5, VCPU_DAWR1(r4)
791 ld r6, VCPU_DAWRX1(r4)
792 mtspr SPRN_DAWR1, r5
793 mtspr SPRN_DAWRX1, r6
794END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
7951:
796 ld r7, VCPU_CIABR(r4)
797 ld r8, VCPU_TAR(r4)
798 mtspr SPRN_CIABR, r7
799 mtspr SPRN_TAR, r8
800 ld r5, VCPU_IC(r4)
801 ld r8, VCPU_EBBHR(r4)
802 mtspr SPRN_IC, r5
803 mtspr SPRN_EBBHR, r8
804 ld r5, VCPU_EBBRR(r4)
805 ld r6, VCPU_BESCR(r4)
806 lwz r7, VCPU_GUEST_PID(r4)
807 ld r8, VCPU_WORT(r4)
808 mtspr SPRN_EBBRR, r5
809 mtspr SPRN_BESCR, r6
810 mtspr SPRN_PID, r7
811 mtspr SPRN_WORT, r8
812BEGIN_FTR_SECTION
813 /* POWER8-only registers */
814 ld r5, VCPU_TCSCR(r4)
815 ld r6, VCPU_ACOP(r4)
816 ld r7, VCPU_CSIGR(r4)
817 ld r8, VCPU_TACR(r4)
818 mtspr SPRN_TCSCR, r5
819 mtspr SPRN_ACOP, r6
820 mtspr SPRN_CSIGR, r7
821 mtspr SPRN_TACR, r8
822 nop
823FTR_SECTION_ELSE
824 /* POWER9-only registers */
825 ld r5, VCPU_TID(r4)
826 ld r6, VCPU_PSSCR(r4)
827 lbz r8, HSTATE_FAKE_SUSPEND(r13)
828 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
829 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
830 ld r7, VCPU_HFSCR(r4)
831 mtspr SPRN_TIDR, r5
832 mtspr SPRN_PSSCR, r6
833 mtspr SPRN_HFSCR, r7
834ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
8358:
836
837 ld r5, VCPU_SPRG0(r4)
838 ld r6, VCPU_SPRG1(r4)
839 ld r7, VCPU_SPRG2(r4)
840 ld r8, VCPU_SPRG3(r4)
841 mtspr SPRN_SPRG0, r5
842 mtspr SPRN_SPRG1, r6
843 mtspr SPRN_SPRG2, r7
844 mtspr SPRN_SPRG3, r8
845
846 /* Load up DAR and DSISR */
847 ld r5, VCPU_DAR(r4)
848 lwz r6, VCPU_DSISR(r4)
849 mtspr SPRN_DAR, r5
850 mtspr SPRN_DSISR, r6
851
852 /* Restore AMR and UAMOR, set AMOR to all 1s */
853 ld r5,VCPU_AMR(r4)
854 ld r6,VCPU_UAMOR(r4)
855 li r7,-1
856 mtspr SPRN_AMR,r5
857 mtspr SPRN_UAMOR,r6
858 mtspr SPRN_AMOR,r7
859
860 /* Restore state of CTRL run bit; assume 1 on entry */
861 lwz r5,VCPU_CTRL(r4)
862 andi. r5,r5,1
863 bne 4f
864 mfspr r6,SPRN_CTRLF
865 clrrdi r6,r6,1
866 mtspr SPRN_CTRLT,r6
8674:
868 /* Secondary threads wait for primary to have done partition switch */
869 ld r5, HSTATE_KVM_VCORE(r13)
870 lbz r6, HSTATE_PTID(r13)
871 cmpwi r6, 0
872 beq 21f
873 lbz r0, VCORE_IN_GUEST(r5)
874 cmpwi r0, 0
875 bne 21f
876 HMT_LOW
87720: lwz r3, VCORE_ENTRY_EXIT(r5)
878 cmpwi r3, 0x100
879 bge no_switch_exit
880 lbz r0, VCORE_IN_GUEST(r5)
881 cmpwi r0, 0
882 beq 20b
883 HMT_MEDIUM
88421:
885 /* Set LPCR. */
886 ld r8,VCORE_LPCR(r5)
887 mtspr SPRN_LPCR,r8
888 isync
889
890 /*
891 * Set the decrementer to the guest decrementer.
892 */
893 ld r8,VCPU_DEC_EXPIRES(r4)
894 /* r8 is a host timebase value here, convert to guest TB */
895 ld r5,HSTATE_KVM_VCORE(r13)
896 ld r6,VCORE_TB_OFFSET_APPL(r5)
897 add r8,r8,r6
898 mftb r7
899 subf r3,r7,r8
900 mtspr SPRN_DEC,r3
901
902 /* Check if HDEC expires soon */
903 mfspr r3, SPRN_HDEC
904 EXTEND_HDEC(r3)
905 cmpdi r3, 512 /* 1 microsecond */
906 blt hdec_soon
907
908 ld r6, VCPU_KVM(r4)
909 lbz r0, KVM_RADIX(r6)
910 cmpwi r0, 0
911 bne 9f
912
913 /* For hash guest, clear out and reload the SLB */
914BEGIN_MMU_FTR_SECTION
915 /* Radix host won't have populated the SLB, so no need to clear */
916 li r6, 0
917 slbmte r6, r6
918 PPC_SLBIA(6)
919 ptesync
920END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
921
922 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
923 lwz r5,VCPU_SLB_MAX(r4)
924 cmpwi r5,0
925 beq 9f
926 mtctr r5
927 addi r6,r4,VCPU_SLB
9281: ld r8,VCPU_SLB_E(r6)
929 ld r9,VCPU_SLB_V(r6)
930 slbmte r9,r8
931 addi r6,r6,VCPU_SLB_SIZE
932 bdnz 1b
9339:
934
935#ifdef CONFIG_KVM_XICS
936 /* We are entering the guest on that thread, push VCPU to XIVE */
937 ld r11, VCPU_XIVE_SAVED_STATE(r4)
938 li r9, TM_QW1_OS
939 lwz r8, VCPU_XIVE_CAM_WORD(r4)
940 cmpwi r8, 0
941 beq no_xive
942 li r7, TM_QW1_OS + TM_WORD2
943 mfmsr r0
944 andi. r0, r0, MSR_DR /* in real mode? */
945 beq 2f
946 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
947 cmpldi cr1, r10, 0
948 beq cr1, no_xive
949 eieio
950 stdx r11,r9,r10
951 stwx r8,r7,r10
952 b 3f
9532: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
954 cmpldi cr1, r10, 0
955 beq cr1, no_xive
956 eieio
957 stdcix r11,r9,r10
958 stwcix r8,r7,r10
9593: li r9, 1
960 stb r9, VCPU_XIVE_PUSHED(r4)
961 eieio
962
963 /*
964 * We clear the irq_pending flag. There is a small chance of a
965 * race vs. the escalation interrupt happening on another
966 * processor setting it again, but the only consequence is to
967 * cause a spurrious wakeup on the next H_CEDE which is not an
968 * issue.
969 */
970 li r0,0
971 stb r0, VCPU_IRQ_PENDING(r4)
972
973 /*
974 * In single escalation mode, if the escalation interrupt is
975 * on, we mask it.
976 */
977 lbz r0, VCPU_XIVE_ESC_ON(r4)
978 cmpwi cr1, r0,0
979 beq cr1, 1f
980 li r9, XIVE_ESB_SET_PQ_01
981 beq 4f /* in real mode? */
982 ld r10, VCPU_XIVE_ESC_VADDR(r4)
983 ldx r0, r10, r9
984 b 5f
9854: ld r10, VCPU_XIVE_ESC_RADDR(r4)
986 ldcix r0, r10, r9
9875: sync
988
989 /* We have a possible subtle race here: The escalation interrupt might
990 * have fired and be on its way to the host queue while we mask it,
991 * and if we unmask it early enough (re-cede right away), there is
992 * a theorical possibility that it fires again, thus landing in the
993 * target queue more than once which is a big no-no.
994 *
995 * Fortunately, solving this is rather easy. If the above load setting
996 * PQ to 01 returns a previous value where P is set, then we know the
997 * escalation interrupt is somewhere on its way to the host. In that
998 * case we simply don't clear the xive_esc_on flag below. It will be
999 * eventually cleared by the handler for the escalation interrupt.
1000 *
1001 * Then, when doing a cede, we check that flag again before re-enabling
1002 * the escalation interrupt, and if set, we abort the cede.
1003 */
1004 andi. r0, r0, XIVE_ESB_VAL_P
1005 bne- 1f
1006
1007 /* Now P is 0, we can clear the flag */
1008 li r0, 0
1009 stb r0, VCPU_XIVE_ESC_ON(r4)
10101:
1011no_xive:
1012#endif /* CONFIG_KVM_XICS */
1013
1014 li r0, 0
1015 stw r0, STACK_SLOT_SHORT_PATH(r1)
1016
1017deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
1018 /* Check if we can deliver an external or decrementer interrupt now */
1019 ld r0, VCPU_PENDING_EXC(r4)
1020BEGIN_FTR_SECTION
1021 /* On POWER9, also check for emulated doorbell interrupt */
1022 lbz r3, VCPU_DBELL_REQ(r4)
1023 or r0, r0, r3
1024END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1025 cmpdi r0, 0
1026 beq 71f
1027 mr r3, r4
1028 bl kvmppc_guest_entry_inject_int
1029 ld r4, HSTATE_KVM_VCPU(r13)
103071:
1031 ld r6, VCPU_SRR0(r4)
1032 ld r7, VCPU_SRR1(r4)
1033 mtspr SPRN_SRR0, r6
1034 mtspr SPRN_SRR1, r7
1035
1036fast_guest_entry_c:
1037 ld r10, VCPU_PC(r4)
1038 ld r11, VCPU_MSR(r4)
1039 /* r11 = vcpu->arch.msr & ~MSR_HV */
1040 rldicl r11, r11, 63 - MSR_HV_LG, 1
1041 rotldi r11, r11, 1 + MSR_HV_LG
1042 ori r11, r11, MSR_ME
1043
1044 ld r6, VCPU_CTR(r4)
1045 ld r7, VCPU_XER(r4)
1046 mtctr r6
1047 mtxer r7
1048
1049/*
1050 * Required state:
1051 * R4 = vcpu
1052 * R10: value for HSRR0
1053 * R11: value for HSRR1
1054 * R13 = PACA
1055 */
1056fast_guest_return:
1057 li r0,0
1058 stb r0,VCPU_CEDED(r4) /* cancel cede */
1059 mtspr SPRN_HSRR0,r10
1060 mtspr SPRN_HSRR1,r11
1061
1062 /* Activate guest mode, so faults get handled by KVM */
1063 li r9, KVM_GUEST_MODE_GUEST_HV
1064 stb r9, HSTATE_IN_GUEST(r13)
1065
1066#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1067 /* Accumulate timing */
1068 addi r3, r4, VCPU_TB_GUEST
1069 bl kvmhv_accumulate_time
1070#endif
1071
1072 /* Enter guest */
1073
1074BEGIN_FTR_SECTION
1075 ld r5, VCPU_CFAR(r4)
1076 mtspr SPRN_CFAR, r5
1077END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1078BEGIN_FTR_SECTION
1079 ld r0, VCPU_PPR(r4)
1080END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1081
1082 ld r5, VCPU_LR(r4)
1083 mtlr r5
1084
1085 ld r1, VCPU_GPR(R1)(r4)
1086 ld r5, VCPU_GPR(R5)(r4)
1087 ld r8, VCPU_GPR(R8)(r4)
1088 ld r9, VCPU_GPR(R9)(r4)
1089 ld r10, VCPU_GPR(R10)(r4)
1090 ld r11, VCPU_GPR(R11)(r4)
1091 ld r12, VCPU_GPR(R12)(r4)
1092 ld r13, VCPU_GPR(R13)(r4)
1093
1094BEGIN_FTR_SECTION
1095 mtspr SPRN_PPR, r0
1096END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1097
1098/* Move canary into DSISR to check for later */
1099BEGIN_FTR_SECTION
1100 li r0, 0x7fff
1101 mtspr SPRN_HDSISR, r0
1102END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1103
1104 ld r6, VCPU_KVM(r4)
1105 lbz r7, KVM_SECURE_GUEST(r6)
1106 cmpdi r7, 0
1107 ld r6, VCPU_GPR(R6)(r4)
1108 ld r7, VCPU_GPR(R7)(r4)
1109 bne ret_to_ultra
1110
1111 ld r0, VCPU_CR(r4)
1112 mtcr r0
1113
1114 ld r0, VCPU_GPR(R0)(r4)
1115 ld r2, VCPU_GPR(R2)(r4)
1116 ld r3, VCPU_GPR(R3)(r4)
1117 ld r4, VCPU_GPR(R4)(r4)
1118 HRFI_TO_GUEST
1119 b .
1120/*
1121 * Use UV_RETURN ultracall to return control back to the Ultravisor after
1122 * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
1123 * to the Hypervisor.
1124 *
1125 * All registers have already been loaded, except:
1126 * R0 = hcall result
1127 * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
1128 * R3 = UV_RETURN
1129 */
1130ret_to_ultra:
1131 ld r0, VCPU_CR(r4)
1132 mtcr r0
1133
1134 ld r0, VCPU_GPR(R3)(r4)
1135 mfspr r2, SPRN_SRR1
1136 li r3, 0
1137 ori r3, r3, UV_RETURN
1138 ld r4, VCPU_GPR(R4)(r4)
1139 sc 2
1140
1141/*
1142 * Enter the guest on a P9 or later system where we have exactly
1143 * one vcpu per vcore and we don't need to go to real mode
1144 * (which implies that host and guest are both using radix MMU mode).
1145 * r3 = vcpu pointer
1146 * Most SPRs and all the VSRs have been loaded already.
1147 */
1148_GLOBAL(__kvmhv_vcpu_entry_p9)
1149EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
1150 mflr r0
1151 std r0, PPC_LR_STKOFF(r1)
1152 stdu r1, -SFS(r1)
1153
1154 li r0, 1
1155 stw r0, STACK_SLOT_SHORT_PATH(r1)
1156
1157 std r3, HSTATE_KVM_VCPU(r13)
1158 mfcr r4
1159 stw r4, SFS+8(r1)
1160
1161 std r1, HSTATE_HOST_R1(r13)
1162
1163 reg = 14
1164 .rept 18
1165 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1166 reg = reg + 1
1167 .endr
1168
1169 reg = 14
1170 .rept 18
1171 ld reg, __VCPU_GPR(reg)(r3)
1172 reg = reg + 1
1173 .endr
1174
1175 mfmsr r10
1176 std r10, HSTATE_HOST_MSR(r13)
1177
1178 mr r4, r3
1179 b fast_guest_entry_c
1180guest_exit_short_path:
1181 /*
1182 * Malicious or buggy radix guests may have inserted SLB entries
1183 * (only 0..3 because radix always runs with UPRT=1), so these must
1184 * be cleared here to avoid side-channels. slbmte is used rather
1185 * than slbia, as it won't clear cached translations.
1186 */
1187 li r0,0
1188 slbmte r0,r0
1189 li r4,1
1190 slbmte r0,r4
1191 li r4,2
1192 slbmte r0,r4
1193 li r4,3
1194 slbmte r0,r4
1195
1196 li r0, KVM_GUEST_MODE_NONE
1197 stb r0, HSTATE_IN_GUEST(r13)
1198
1199 reg = 14
1200 .rept 18
1201 std reg, __VCPU_GPR(reg)(r9)
1202 reg = reg + 1
1203 .endr
1204
1205 reg = 14
1206 .rept 18
1207 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1208 reg = reg + 1
1209 .endr
1210
1211 lwz r4, SFS+8(r1)
1212 mtcr r4
1213
1214 mr r3, r12 /* trap number */
1215
1216 addi r1, r1, SFS
1217 ld r0, PPC_LR_STKOFF(r1)
1218 mtlr r0
1219
1220 /* If we are in real mode, do a rfid to get back to the caller */
1221 mfmsr r4
1222 andi. r5, r4, MSR_IR
1223 bnelr
1224 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
1225 mtspr SPRN_SRR0, r0
1226 ld r10, HSTATE_HOST_MSR(r13)
1227 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
1228 mtspr SPRN_SRR1, r10
1229 RFI_TO_KERNEL
1230 b .
1231
1232secondary_too_late:
1233 li r12, 0
1234 stw r12, STACK_SLOT_TRAP(r1)
1235 cmpdi r4, 0
1236 beq 11f
1237 stw r12, VCPU_TRAP(r4)
1238#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1239 addi r3, r4, VCPU_TB_RMEXIT
1240 bl kvmhv_accumulate_time
1241#endif
124211: b kvmhv_switch_to_host
1243
1244no_switch_exit:
1245 HMT_MEDIUM
1246 li r12, 0
1247 b 12f
1248hdec_soon:
1249 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
125012: stw r12, VCPU_TRAP(r4)
1251 mr r9, r4
1252#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1253 addi r3, r4, VCPU_TB_RMEXIT
1254 bl kvmhv_accumulate_time
1255#endif
1256 b guest_bypass
1257
1258/******************************************************************************
1259 * *
1260 * Exit code *
1261 * *
1262 *****************************************************************************/
1263
1264/*
1265 * We come here from the first-level interrupt handlers.
1266 */
1267 .globl kvmppc_interrupt_hv
1268kvmppc_interrupt_hv:
1269 /*
1270 * Register contents:
1271 * R12 = (guest CR << 32) | interrupt vector
1272 * R13 = PACA
1273 * guest R12 saved in shadow VCPU SCRATCH0
1274 * guest R13 saved in SPRN_SCRATCH0
1275 */
1276 std r9, HSTATE_SCRATCH2(r13)
1277 lbz r9, HSTATE_IN_GUEST(r13)
1278 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1279 beq kvmppc_bad_host_intr
1280#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1281 cmpwi r9, KVM_GUEST_MODE_GUEST
1282 ld r9, HSTATE_SCRATCH2(r13)
1283 beq kvmppc_interrupt_pr
1284#endif
1285 /* We're now back in the host but in guest MMU context */
1286 li r9, KVM_GUEST_MODE_HOST_HV
1287 stb r9, HSTATE_IN_GUEST(r13)
1288
1289 ld r9, HSTATE_KVM_VCPU(r13)
1290
1291 /* Save registers */
1292
1293 std r0, VCPU_GPR(R0)(r9)
1294 std r1, VCPU_GPR(R1)(r9)
1295 std r2, VCPU_GPR(R2)(r9)
1296 std r3, VCPU_GPR(R3)(r9)
1297 std r4, VCPU_GPR(R4)(r9)
1298 std r5, VCPU_GPR(R5)(r9)
1299 std r6, VCPU_GPR(R6)(r9)
1300 std r7, VCPU_GPR(R7)(r9)
1301 std r8, VCPU_GPR(R8)(r9)
1302 ld r0, HSTATE_SCRATCH2(r13)
1303 std r0, VCPU_GPR(R9)(r9)
1304 std r10, VCPU_GPR(R10)(r9)
1305 std r11, VCPU_GPR(R11)(r9)
1306 ld r3, HSTATE_SCRATCH0(r13)
1307 std r3, VCPU_GPR(R12)(r9)
1308 /* CR is in the high half of r12 */
1309 srdi r4, r12, 32
1310 std r4, VCPU_CR(r9)
1311BEGIN_FTR_SECTION
1312 ld r3, HSTATE_CFAR(r13)
1313 std r3, VCPU_CFAR(r9)
1314END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1315BEGIN_FTR_SECTION
1316 ld r4, HSTATE_PPR(r13)
1317 std r4, VCPU_PPR(r9)
1318END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1319
1320 /* Restore R1/R2 so we can handle faults */
1321 ld r1, HSTATE_HOST_R1(r13)
1322 ld r2, PACATOC(r13)
1323
1324 mfspr r10, SPRN_SRR0
1325 mfspr r11, SPRN_SRR1
1326 std r10, VCPU_SRR0(r9)
1327 std r11, VCPU_SRR1(r9)
1328 /* trap is in the low half of r12, clear CR from the high half */
1329 clrldi r12, r12, 32
1330 andi. r0, r12, 2 /* need to read HSRR0/1? */
1331 beq 1f
1332 mfspr r10, SPRN_HSRR0
1333 mfspr r11, SPRN_HSRR1
1334 clrrdi r12, r12, 2
13351: std r10, VCPU_PC(r9)
1336 std r11, VCPU_MSR(r9)
1337
1338 GET_SCRATCH0(r3)
1339 mflr r4
1340 std r3, VCPU_GPR(R13)(r9)
1341 std r4, VCPU_LR(r9)
1342
1343 stw r12,VCPU_TRAP(r9)
1344
1345 /*
1346 * Now that we have saved away SRR0/1 and HSRR0/1,
1347 * interrupts are recoverable in principle, so set MSR_RI.
1348 * This becomes important for relocation-on interrupts from
1349 * the guest, which we can get in radix mode on POWER9.
1350 */
1351 li r0, MSR_RI
1352 mtmsrd r0, 1
1353
1354#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1355 addi r3, r9, VCPU_TB_RMINTR
1356 mr r4, r9
1357 bl kvmhv_accumulate_time
1358 ld r5, VCPU_GPR(R5)(r9)
1359 ld r6, VCPU_GPR(R6)(r9)
1360 ld r7, VCPU_GPR(R7)(r9)
1361 ld r8, VCPU_GPR(R8)(r9)
1362#endif
1363
1364 /* Save HEIR (HV emulation assist reg) in emul_inst
1365 if this is an HEI (HV emulation interrupt, e40) */
1366 li r3,KVM_INST_FETCH_FAILED
1367 stw r3,VCPU_LAST_INST(r9)
1368 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1369 bne 11f
1370 mfspr r3,SPRN_HEIR
137111: stw r3,VCPU_HEIR(r9)
1372
1373 /* these are volatile across C function calls */
1374 mfctr r3
1375 mfxer r4
1376 std r3, VCPU_CTR(r9)
1377 std r4, VCPU_XER(r9)
1378
1379 /* Save more register state */
1380 mfdar r3
1381 mfdsisr r4
1382 std r3, VCPU_DAR(r9)
1383 stw r4, VCPU_DSISR(r9)
1384
1385 /* If this is a page table miss then see if it's theirs or ours */
1386 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1387 beq kvmppc_hdsi
1388 std r3, VCPU_FAULT_DAR(r9)
1389 stw r4, VCPU_FAULT_DSISR(r9)
1390 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1391 beq kvmppc_hisi
1392
1393#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1394 /* For softpatch interrupt, go off and do TM instruction emulation */
1395 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1396 beq kvmppc_tm_emul
1397#endif
1398
1399 /* See if this is a leftover HDEC interrupt */
1400 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1401 bne 2f
1402 mfspr r3,SPRN_HDEC
1403 EXTEND_HDEC(r3)
1404 cmpdi r3,0
1405 mr r4,r9
1406 bge fast_guest_return
14072:
1408 /* See if this is an hcall we can handle in real mode */
1409 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1410 beq hcall_try_real_mode
1411
1412 /* Hypervisor doorbell - exit only if host IPI flag set */
1413 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1414 bne 3f
1415BEGIN_FTR_SECTION
1416 PPC_MSGSYNC
1417 lwsync
1418 /* always exit if we're running a nested guest */
1419 ld r0, VCPU_NESTED(r9)
1420 cmpdi r0, 0
1421 bne guest_exit_cont
1422END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1423 lbz r0, HSTATE_HOST_IPI(r13)
1424 cmpwi r0, 0
1425 beq maybe_reenter_guest
1426 b guest_exit_cont
14273:
1428 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1429 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1430 bne 14f
1431 mfspr r3, SPRN_HFSCR
1432 std r3, VCPU_HFSCR(r9)
1433 b guest_exit_cont
143414:
1435 /* External interrupt ? */
1436 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1437 beq kvmppc_guest_external
1438 /* See if it is a machine check */
1439 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1440 beq machine_check_realmode
1441 /* Or a hypervisor maintenance interrupt */
1442 cmpwi r12, BOOK3S_INTERRUPT_HMI
1443 beq hmi_realmode
1444
1445guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1446
1447#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1448 addi r3, r9, VCPU_TB_RMEXIT
1449 mr r4, r9
1450 bl kvmhv_accumulate_time
1451#endif
1452#ifdef CONFIG_KVM_XICS
1453 /* We are exiting, pull the VP from the XIVE */
1454 lbz r0, VCPU_XIVE_PUSHED(r9)
1455 cmpwi cr0, r0, 0
1456 beq 1f
1457 li r7, TM_SPC_PULL_OS_CTX
1458 li r6, TM_QW1_OS
1459 mfmsr r0
1460 andi. r0, r0, MSR_DR /* in real mode? */
1461 beq 2f
1462 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1463 cmpldi cr0, r10, 0
1464 beq 1f
1465 /* First load to pull the context, we ignore the value */
1466 eieio
1467 lwzx r11, r7, r10
1468 /* Second load to recover the context state (Words 0 and 1) */
1469 ldx r11, r6, r10
1470 b 3f
14712: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1472 cmpldi cr0, r10, 0
1473 beq 1f
1474 /* First load to pull the context, we ignore the value */
1475 eieio
1476 lwzcix r11, r7, r10
1477 /* Second load to recover the context state (Words 0 and 1) */
1478 ldcix r11, r6, r10
14793: std r11, VCPU_XIVE_SAVED_STATE(r9)
1480 /* Fixup some of the state for the next load */
1481 li r10, 0
1482 li r0, 0xff
1483 stb r10, VCPU_XIVE_PUSHED(r9)
1484 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1485 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1486 eieio
14871:
1488#endif /* CONFIG_KVM_XICS */
1489
1490 /*
1491 * Possibly flush the link stack here, before we do a blr in
1492 * guest_exit_short_path.
1493 */
14941: nop
1495 patch_site 1b patch__call_kvm_flush_link_stack
1496
1497 /* If we came in through the P9 short path, go back out to C now */
1498 lwz r0, STACK_SLOT_SHORT_PATH(r1)
1499 cmpwi r0, 0
1500 bne guest_exit_short_path
1501
1502 /* For hash guest, read the guest SLB and save it away */
1503 ld r5, VCPU_KVM(r9)
1504 lbz r0, KVM_RADIX(r5)
1505 li r5, 0
1506 cmpwi r0, 0
1507 bne 0f /* for radix, save 0 entries */
1508 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1509 mtctr r0
1510 li r6,0
1511 addi r7,r9,VCPU_SLB
15121: slbmfee r8,r6
1513 andis. r0,r8,SLB_ESID_V@h
1514 beq 2f
1515 add r8,r8,r6 /* put index in */
1516 slbmfev r3,r6
1517 std r8,VCPU_SLB_E(r7)
1518 std r3,VCPU_SLB_V(r7)
1519 addi r7,r7,VCPU_SLB_SIZE
1520 addi r5,r5,1
15212: addi r6,r6,1
1522 bdnz 1b
1523 /* Finally clear out the SLB */
1524 li r0,0
1525 slbmte r0,r0
1526 PPC_SLBIA(6)
1527 ptesync
1528 stw r5,VCPU_SLB_MAX(r9)
1529
1530 /* load host SLB entries */
1531BEGIN_MMU_FTR_SECTION
1532 b guest_bypass
1533END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1534 ld r8,PACA_SLBSHADOWPTR(r13)
1535
1536 .rept SLB_NUM_BOLTED
1537 li r3, SLBSHADOW_SAVEAREA
1538 LDX_BE r5, r8, r3
1539 addi r3, r3, 8
1540 LDX_BE r6, r8, r3
1541 andis. r7,r5,SLB_ESID_V@h
1542 beq 1f
1543 slbmte r6,r5
15441: addi r8,r8,16
1545 .endr
1546 b guest_bypass
1547
15480: /*
1549 * Sanitise radix guest SLB, see guest_exit_short_path comment.
1550 * We clear vcpu->arch.slb_max to match earlier behaviour.
1551 */
1552 li r0,0
1553 stw r0,VCPU_SLB_MAX(r9)
1554 slbmte r0,r0
1555 li r4,1
1556 slbmte r0,r4
1557 li r4,2
1558 slbmte r0,r4
1559 li r4,3
1560 slbmte r0,r4
1561
1562guest_bypass:
1563 stw r12, STACK_SLOT_TRAP(r1)
1564
1565 /* Save DEC */
1566 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1567 ld r3, HSTATE_KVM_VCORE(r13)
1568 mfspr r5,SPRN_DEC
1569 mftb r6
1570 /* On P9, if the guest has large decr enabled, don't sign extend */
1571BEGIN_FTR_SECTION
1572 ld r4, VCORE_LPCR(r3)
1573 andis. r4, r4, LPCR_LD@h
1574 bne 16f
1575END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1576 extsw r5,r5
157716: add r5,r5,r6
1578 /* r5 is a guest timebase value here, convert to host TB */
1579 ld r4,VCORE_TB_OFFSET_APPL(r3)
1580 subf r5,r4,r5
1581 std r5,VCPU_DEC_EXPIRES(r9)
1582
1583 /* Increment exit count, poke other threads to exit */
1584 mr r3, r12
1585 bl kvmhv_commence_exit
1586 nop
1587 ld r9, HSTATE_KVM_VCPU(r13)
1588
1589 /* Stop others sending VCPU interrupts to this physical CPU */
1590 li r0, -1
1591 stw r0, VCPU_CPU(r9)
1592 stw r0, VCPU_THREAD_CPU(r9)
1593
1594 /* Save guest CTRL register, set runlatch to 1 */
1595 mfspr r6,SPRN_CTRLF
1596 stw r6,VCPU_CTRL(r9)
1597 andi. r0,r6,1
1598 bne 4f
1599 ori r6,r6,1
1600 mtspr SPRN_CTRLT,r6
16014:
1602 /*
1603 * Save the guest PURR/SPURR
1604 */
1605 mfspr r5,SPRN_PURR
1606 mfspr r6,SPRN_SPURR
1607 ld r7,VCPU_PURR(r9)
1608 ld r8,VCPU_SPURR(r9)
1609 std r5,VCPU_PURR(r9)
1610 std r6,VCPU_SPURR(r9)
1611 subf r5,r7,r5
1612 subf r6,r8,r6
1613
1614 /*
1615 * Restore host PURR/SPURR and add guest times
1616 * so that the time in the guest gets accounted.
1617 */
1618 ld r3,HSTATE_PURR(r13)
1619 ld r4,HSTATE_SPURR(r13)
1620 add r3,r3,r5
1621 add r4,r4,r6
1622 mtspr SPRN_PURR,r3
1623 mtspr SPRN_SPURR,r4
1624
1625BEGIN_FTR_SECTION
1626 b 8f
1627END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1628 /* Save POWER8-specific registers */
1629 mfspr r5, SPRN_IAMR
1630 mfspr r6, SPRN_PSPB
1631 mfspr r7, SPRN_FSCR
1632 std r5, VCPU_IAMR(r9)
1633 stw r6, VCPU_PSPB(r9)
1634 std r7, VCPU_FSCR(r9)
1635 mfspr r5, SPRN_IC
1636 mfspr r7, SPRN_TAR
1637 std r5, VCPU_IC(r9)
1638 std r7, VCPU_TAR(r9)
1639 mfspr r8, SPRN_EBBHR
1640 std r8, VCPU_EBBHR(r9)
1641 mfspr r5, SPRN_EBBRR
1642 mfspr r6, SPRN_BESCR
1643 mfspr r7, SPRN_PID
1644 mfspr r8, SPRN_WORT
1645 std r5, VCPU_EBBRR(r9)
1646 std r6, VCPU_BESCR(r9)
1647 stw r7, VCPU_GUEST_PID(r9)
1648 std r8, VCPU_WORT(r9)
1649BEGIN_FTR_SECTION
1650 mfspr r5, SPRN_TCSCR
1651 mfspr r6, SPRN_ACOP
1652 mfspr r7, SPRN_CSIGR
1653 mfspr r8, SPRN_TACR
1654 std r5, VCPU_TCSCR(r9)
1655 std r6, VCPU_ACOP(r9)
1656 std r7, VCPU_CSIGR(r9)
1657 std r8, VCPU_TACR(r9)
1658FTR_SECTION_ELSE
1659 mfspr r5, SPRN_TIDR
1660 mfspr r6, SPRN_PSSCR
1661 std r5, VCPU_TID(r9)
1662 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1663 rotldi r6, r6, 60
1664 std r6, VCPU_PSSCR(r9)
1665 /* Restore host HFSCR value */
1666 ld r7, STACK_SLOT_HFSCR(r1)
1667 mtspr SPRN_HFSCR, r7
1668ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1669BEGIN_FTR_SECTION
1670 ld r5, STACK_SLOT_FSCR(r1)
1671 mtspr SPRN_FSCR, r5
1672END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1673 /*
1674 * Restore various registers to 0, where non-zero values
1675 * set by the guest could disrupt the host.
1676 */
1677 li r0, 0
1678 mtspr SPRN_PSPB, r0
1679 mtspr SPRN_WORT, r0
1680BEGIN_FTR_SECTION
1681 mtspr SPRN_TCSCR, r0
1682 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1683 li r0, 1
1684 sldi r0, r0, 31
1685 mtspr SPRN_MMCRS, r0
1686END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1687
1688 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1689 ld r8, STACK_SLOT_IAMR(r1)
1690 mtspr SPRN_IAMR, r8
1691
16928: /* Power7 jumps back in here */
1693 mfspr r5,SPRN_AMR
1694 mfspr r6,SPRN_UAMOR
1695 std r5,VCPU_AMR(r9)
1696 std r6,VCPU_UAMOR(r9)
1697 ld r5,STACK_SLOT_AMR(r1)
1698 ld r6,STACK_SLOT_UAMOR(r1)
1699 mtspr SPRN_AMR, r5
1700 mtspr SPRN_UAMOR, r6
1701
1702 /* Switch DSCR back to host value */
1703 mfspr r8, SPRN_DSCR
1704 ld r7, HSTATE_DSCR(r13)
1705 std r8, VCPU_DSCR(r9)
1706 mtspr SPRN_DSCR, r7
1707
1708 /* Save non-volatile GPRs */
1709 std r14, VCPU_GPR(R14)(r9)
1710 std r15, VCPU_GPR(R15)(r9)
1711 std r16, VCPU_GPR(R16)(r9)
1712 std r17, VCPU_GPR(R17)(r9)
1713 std r18, VCPU_GPR(R18)(r9)
1714 std r19, VCPU_GPR(R19)(r9)
1715 std r20, VCPU_GPR(R20)(r9)
1716 std r21, VCPU_GPR(R21)(r9)
1717 std r22, VCPU_GPR(R22)(r9)
1718 std r23, VCPU_GPR(R23)(r9)
1719 std r24, VCPU_GPR(R24)(r9)
1720 std r25, VCPU_GPR(R25)(r9)
1721 std r26, VCPU_GPR(R26)(r9)
1722 std r27, VCPU_GPR(R27)(r9)
1723 std r28, VCPU_GPR(R28)(r9)
1724 std r29, VCPU_GPR(R29)(r9)
1725 std r30, VCPU_GPR(R30)(r9)
1726 std r31, VCPU_GPR(R31)(r9)
1727
1728 /* Save SPRGs */
1729 mfspr r3, SPRN_SPRG0
1730 mfspr r4, SPRN_SPRG1
1731 mfspr r5, SPRN_SPRG2
1732 mfspr r6, SPRN_SPRG3
1733 std r3, VCPU_SPRG0(r9)
1734 std r4, VCPU_SPRG1(r9)
1735 std r5, VCPU_SPRG2(r9)
1736 std r6, VCPU_SPRG3(r9)
1737
1738 /* save FP state */
1739 mr r3, r9
1740 bl kvmppc_save_fp
1741
1742#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1743/*
1744 * Branch around the call if both CPU_FTR_TM and
1745 * CPU_FTR_P9_TM_HV_ASSIST are off.
1746 */
1747BEGIN_FTR_SECTION
1748 b 91f
1749END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1750 /*
1751 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
1752 */
1753 mr r3, r9
1754 ld r4, VCPU_MSR(r3)
1755 li r5, 0 /* don't preserve non-vol regs */
1756 bl kvmppc_save_tm_hv
1757 nop
1758 ld r9, HSTATE_KVM_VCPU(r13)
175991:
1760#endif
1761
1762 /* Increment yield count if they have a VPA */
1763 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1764 cmpdi r8, 0
1765 beq 25f
1766 li r4, LPPACA_YIELDCOUNT
1767 LWZX_BE r3, r8, r4
1768 addi r3, r3, 1
1769 STWX_BE r3, r8, r4
1770 li r3, 1
1771 stb r3, VCPU_VPA_DIRTY(r9)
177225:
1773 /* Save PMU registers if requested */
1774 /* r8 and cr0.eq are live here */
1775 mr r3, r9
1776 li r4, 1
1777 beq 21f /* if no VPA, save PMU stuff anyway */
1778 lbz r4, LPPACA_PMCINUSE(r8)
177921: bl kvmhv_save_guest_pmu
1780 ld r9, HSTATE_KVM_VCPU(r13)
1781
1782 /* Restore host values of some registers */
1783BEGIN_FTR_SECTION
1784 ld r5, STACK_SLOT_CIABR(r1)
1785 ld r6, STACK_SLOT_DAWR0(r1)
1786 ld r7, STACK_SLOT_DAWRX0(r1)
1787 mtspr SPRN_CIABR, r5
1788 /*
1789 * If the DAWR doesn't work, it's ok to write these here as
1790 * this value should always be zero
1791 */
1792 mtspr SPRN_DAWR0, r6
1793 mtspr SPRN_DAWRX0, r7
1794END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1795BEGIN_FTR_SECTION
1796 ld r6, STACK_SLOT_DAWR1(r1)
1797 ld r7, STACK_SLOT_DAWRX1(r1)
1798 mtspr SPRN_DAWR1, r6
1799 mtspr SPRN_DAWRX1, r7
1800END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S | CPU_FTR_DAWR1)
1801BEGIN_FTR_SECTION
1802 ld r5, STACK_SLOT_TID(r1)
1803 ld r6, STACK_SLOT_PSSCR(r1)
1804 ld r7, STACK_SLOT_PID(r1)
1805 mtspr SPRN_TIDR, r5
1806 mtspr SPRN_PSSCR, r6
1807 mtspr SPRN_PID, r7
1808END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1809
1810#ifdef CONFIG_PPC_RADIX_MMU
1811 /*
1812 * Are we running hash or radix ?
1813 */
1814 ld r5, VCPU_KVM(r9)
1815 lbz r0, KVM_RADIX(r5)
1816 cmpwi cr2, r0, 0
1817 beq cr2, 2f
1818
1819 /*
1820 * Radix: do eieio; tlbsync; ptesync sequence in case we
1821 * interrupted the guest between a tlbie and a ptesync.
1822 */
1823 eieio
1824 tlbsync
1825 ptesync
1826
1827BEGIN_FTR_SECTION
1828 /* Radix: Handle the case where the guest used an illegal PID */
1829 LOAD_REG_ADDR(r4, mmu_base_pid)
1830 lwz r3, VCPU_GUEST_PID(r9)
1831 lwz r5, 0(r4)
1832 cmpw cr0,r3,r5
1833 blt 2f
1834
1835 /*
1836 * Illegal PID, the HW might have prefetched and cached in the TLB
1837 * some translations for the LPID 0 / guest PID combination which
1838 * Linux doesn't know about, so we need to flush that PID out of
1839 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1840 * the right context.
1841 */
1842 li r0,0
1843 mtspr SPRN_LPID,r0
1844 isync
1845
1846 /* Then do a congruence class local flush */
1847 ld r6,VCPU_KVM(r9)
1848 lwz r0,KVM_TLB_SETS(r6)
1849 mtctr r0
1850 li r7,0x400 /* IS field = 0b01 */
1851 ptesync
1852 sldi r0,r3,32 /* RS has PID */
18531: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1854 addi r7,r7,0x1000
1855 bdnz 1b
1856 ptesync
1857END_FTR_SECTION_IFSET(CPU_FTR_P9_RADIX_PREFETCH_BUG)
1858
18592:
1860#endif /* CONFIG_PPC_RADIX_MMU */
1861
1862 /*
1863 * cp_abort is required if the processor supports local copy-paste
1864 * to clear the copy buffer that was under control of the guest.
1865 */
1866BEGIN_FTR_SECTION
1867 PPC_CP_ABORT
1868END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
1869
1870 /*
1871 * POWER7/POWER8 guest -> host partition switch code.
1872 * We don't have to lock against tlbies but we do
1873 * have to coordinate the hardware threads.
1874 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1875 */
1876kvmhv_switch_to_host:
1877 /* Secondary threads wait for primary to do partition switch */
1878 ld r5,HSTATE_KVM_VCORE(r13)
1879 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1880 lbz r3,HSTATE_PTID(r13)
1881 cmpwi r3,0
1882 beq 15f
1883 HMT_LOW
188413: lbz r3,VCORE_IN_GUEST(r5)
1885 cmpwi r3,0
1886 bne 13b
1887 HMT_MEDIUM
1888 b 16f
1889
1890 /* Primary thread waits for all the secondaries to exit guest */
189115: lwz r3,VCORE_ENTRY_EXIT(r5)
1892 rlwinm r0,r3,32-8,0xff
1893 clrldi r3,r3,56
1894 cmpw r3,r0
1895 bne 15b
1896 isync
1897
1898 /* Did we actually switch to the guest at all? */
1899 lbz r6, VCORE_IN_GUEST(r5)
1900 cmpwi r6, 0
1901 beq 19f
1902
1903 /* Primary thread switches back to host partition */
1904 lwz r7,KVM_HOST_LPID(r4)
1905BEGIN_FTR_SECTION
1906 ld r6,KVM_HOST_SDR1(r4)
1907 li r8,LPID_RSVD /* switch to reserved LPID */
1908 mtspr SPRN_LPID,r8
1909 ptesync
1910 mtspr SPRN_SDR1,r6 /* switch to host page table */
1911END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1912 mtspr SPRN_LPID,r7
1913 isync
1914
1915BEGIN_FTR_SECTION
1916 /* DPDES and VTB are shared between threads */
1917 mfspr r7, SPRN_DPDES
1918 mfspr r8, SPRN_VTB
1919 std r7, VCORE_DPDES(r5)
1920 std r8, VCORE_VTB(r5)
1921 /* clear DPDES so we don't get guest doorbells in the host */
1922 li r8, 0
1923 mtspr SPRN_DPDES, r8
1924END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1925
1926 /* Subtract timebase offset from timebase */
1927 ld r8, VCORE_TB_OFFSET_APPL(r5)
1928 cmpdi r8,0
1929 beq 17f
1930 li r0, 0
1931 std r0, VCORE_TB_OFFSET_APPL(r5)
1932 mftb r6 /* current guest timebase */
1933 subf r8,r8,r6
1934 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1935 mftb r7 /* check if lower 24 bits overflowed */
1936 clrldi r6,r6,40
1937 clrldi r7,r7,40
1938 cmpld r7,r6
1939 bge 17f
1940 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1941 mtspr SPRN_TBU40,r8
1942
194317:
1944 /*
1945 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1946 * above, which may or may not have already called
1947 * kvmppc_subcore_exit_guest. Fortunately, all that
1948 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1949 * it again here is benign even if kvmppc_realmode_hmi_handler
1950 * has already called it.
1951 */
1952 bl kvmppc_subcore_exit_guest
1953 nop
195430: ld r5,HSTATE_KVM_VCORE(r13)
1955 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1956
1957 /* Reset PCR */
1958 ld r0, VCORE_PCR(r5)
1959 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
1960 cmpld r0, r6
1961 beq 18f
1962 mtspr SPRN_PCR, r6
196318:
1964 /* Signal secondary CPUs to continue */
1965 li r0, 0
1966 stb r0,VCORE_IN_GUEST(r5)
196719: lis r8,0x7fff /* MAX_INT@h */
1968 mtspr SPRN_HDEC,r8
1969
197016: ld r8,KVM_HOST_LPCR(r4)
1971 mtspr SPRN_LPCR,r8
1972 isync
1973
1974#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1975 /* Finish timing, if we have a vcpu */
1976 ld r4, HSTATE_KVM_VCPU(r13)
1977 cmpdi r4, 0
1978 li r3, 0
1979 beq 2f
1980 bl kvmhv_accumulate_time
19812:
1982#endif
1983 /* Unset guest mode */
1984 li r0, KVM_GUEST_MODE_NONE
1985 stb r0, HSTATE_IN_GUEST(r13)
1986
1987 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
1988 ld r0, SFS+PPC_LR_STKOFF(r1)
1989 addi r1, r1, SFS
1990 mtlr r0
1991 blr
1992
1993.balign 32
1994.global kvm_flush_link_stack
1995kvm_flush_link_stack:
1996 /* Save LR into r0 */
1997 mflr r0
1998
1999 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
2000 .rept 32
2001 bl .+4
2002 .endr
2003
2004 /* And on Power9 it's up to 64. */
2005BEGIN_FTR_SECTION
2006 .rept 32
2007 bl .+4
2008 .endr
2009END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2010
2011 /* Restore LR */
2012 mtlr r0
2013 blr
2014
2015kvmppc_guest_external:
2016 /* External interrupt, first check for host_ipi. If this is
2017 * set, we know the host wants us out so let's do it now
2018 */
2019 bl kvmppc_read_intr
2020
2021 /*
2022 * Restore the active volatile registers after returning from
2023 * a C function.
2024 */
2025 ld r9, HSTATE_KVM_VCPU(r13)
2026 li r12, BOOK3S_INTERRUPT_EXTERNAL
2027
2028 /*
2029 * kvmppc_read_intr return codes:
2030 *
2031 * Exit to host (r3 > 0)
2032 * 1 An interrupt is pending that needs to be handled by the host
2033 * Exit guest and return to host by branching to guest_exit_cont
2034 *
2035 * 2 Passthrough that needs completion in the host
2036 * Exit guest and return to host by branching to guest_exit_cont
2037 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
2038 * to indicate to the host to complete handling the interrupt
2039 *
2040 * Before returning to guest, we check if any CPU is heading out
2041 * to the host and if so, we head out also. If no CPUs are heading
2042 * check return values <= 0.
2043 *
2044 * Return to guest (r3 <= 0)
2045 * 0 No external interrupt is pending
2046 * -1 A guest wakeup IPI (which has now been cleared)
2047 * In either case, we return to guest to deliver any pending
2048 * guest interrupts.
2049 *
2050 * -2 A PCI passthrough external interrupt was handled
2051 * (interrupt was delivered directly to guest)
2052 * Return to guest to deliver any pending guest interrupts.
2053 */
2054
2055 cmpdi r3, 1
2056 ble 1f
2057
2058 /* Return code = 2 */
2059 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2060 stw r12, VCPU_TRAP(r9)
2061 b guest_exit_cont
2062
20631: /* Return code <= 1 */
2064 cmpdi r3, 0
2065 bgt guest_exit_cont
2066
2067 /* Return code <= 0 */
2068maybe_reenter_guest:
2069 ld r5, HSTATE_KVM_VCORE(r13)
2070 lwz r0, VCORE_ENTRY_EXIT(r5)
2071 cmpwi r0, 0x100
2072 mr r4, r9
2073 blt deliver_guest_interrupt
2074 b guest_exit_cont
2075
2076#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2077/*
2078 * Softpatch interrupt for transactional memory emulation cases
2079 * on POWER9 DD2.2. This is early in the guest exit path - we
2080 * haven't saved registers or done a treclaim yet.
2081 */
2082kvmppc_tm_emul:
2083 /* Save instruction image in HEIR */
2084 mfspr r3, SPRN_HEIR
2085 stw r3, VCPU_HEIR(r9)
2086
2087 /*
2088 * The cases we want to handle here are those where the guest
2089 * is in real suspend mode and is trying to transition to
2090 * transactional mode.
2091 */
2092 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2093 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2094 bne guest_exit_cont
2095 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2096 cmpwi r3, 1 /* or if not in suspend state */
2097 bne guest_exit_cont
2098
2099 /* Call C code to do the emulation */
2100 mr r3, r9
2101 bl kvmhv_p9_tm_emulation_early
2102 nop
2103 ld r9, HSTATE_KVM_VCPU(r13)
2104 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2105 cmpwi r3, 0
2106 beq guest_exit_cont /* continue exiting if not handled */
2107 ld r10, VCPU_PC(r9)
2108 ld r11, VCPU_MSR(r9)
2109 b fast_interrupt_c_return /* go back to guest if handled */
2110#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2111
2112/*
2113 * Check whether an HDSI is an HPTE not found fault or something else.
2114 * If it is an HPTE not found fault that is due to the guest accessing
2115 * a page that they have mapped but which we have paged out, then
2116 * we continue on with the guest exit path. In all other cases,
2117 * reflect the HDSI to the guest as a DSI.
2118 */
2119kvmppc_hdsi:
2120 ld r3, VCPU_KVM(r9)
2121 lbz r0, KVM_RADIX(r3)
2122 mfspr r4, SPRN_HDAR
2123 mfspr r6, SPRN_HDSISR
2124BEGIN_FTR_SECTION
2125 /* Look for DSISR canary. If we find it, retry instruction */
2126 cmpdi r6, 0x7fff
2127 beq 6f
2128END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2129 cmpwi r0, 0
2130 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2131 /* HPTE not found fault or protection fault? */
2132 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2133 beq 1f /* if not, send it to the guest */
2134 andi. r0, r11, MSR_DR /* data relocation enabled? */
2135 beq 3f
2136BEGIN_FTR_SECTION
2137 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2138 b 4f
2139END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2140 clrrdi r0, r4, 28
2141 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2142 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2143 bne 7f /* if no SLB entry found */
21444: std r4, VCPU_FAULT_DAR(r9)
2145 stw r6, VCPU_FAULT_DSISR(r9)
2146
2147 /* Search the hash table. */
2148 mr r3, r9 /* vcpu pointer */
2149 li r7, 1 /* data fault */
2150 bl kvmppc_hpte_hv_fault
2151 ld r9, HSTATE_KVM_VCPU(r13)
2152 ld r10, VCPU_PC(r9)
2153 ld r11, VCPU_MSR(r9)
2154 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2155 cmpdi r3, 0 /* retry the instruction */
2156 beq 6f
2157 cmpdi r3, -1 /* handle in kernel mode */
2158 beq guest_exit_cont
2159 cmpdi r3, -2 /* MMIO emulation; need instr word */
2160 beq 2f
2161
2162 /* Synthesize a DSI (or DSegI) for the guest */
2163 ld r4, VCPU_FAULT_DAR(r9)
2164 mr r6, r3
21651: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2166 mtspr SPRN_DSISR, r6
21677: mtspr SPRN_DAR, r4
2168 mtspr SPRN_SRR0, r10
2169 mtspr SPRN_SRR1, r11
2170 mr r10, r0
2171 bl kvmppc_msr_interrupt
2172fast_interrupt_c_return:
21736: ld r7, VCPU_CTR(r9)
2174 ld r8, VCPU_XER(r9)
2175 mtctr r7
2176 mtxer r8
2177 mr r4, r9
2178 b fast_guest_return
2179
21803: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2181 ld r5, KVM_VRMA_SLB_V(r5)
2182 b 4b
2183
2184 /* If this is for emulated MMIO, load the instruction word */
21852: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2186
2187 /* Set guest mode to 'jump over instruction' so if lwz faults
2188 * we'll just continue at the next IP. */
2189 li r0, KVM_GUEST_MODE_SKIP
2190 stb r0, HSTATE_IN_GUEST(r13)
2191
2192 /* Do the access with MSR:DR enabled */
2193 mfmsr r3
2194 ori r4, r3, MSR_DR /* Enable paging for data */
2195 mtmsrd r4
2196 lwz r8, 0(r10)
2197 mtmsrd r3
2198
2199 /* Store the result */
2200 stw r8, VCPU_LAST_INST(r9)
2201
2202 /* Unset guest mode. */
2203 li r0, KVM_GUEST_MODE_HOST_HV
2204 stb r0, HSTATE_IN_GUEST(r13)
2205 b guest_exit_cont
2206
2207.Lradix_hdsi:
2208 std r4, VCPU_FAULT_DAR(r9)
2209 stw r6, VCPU_FAULT_DSISR(r9)
2210.Lradix_hisi:
2211 mfspr r5, SPRN_ASDR
2212 std r5, VCPU_FAULT_GPA(r9)
2213 b guest_exit_cont
2214
2215/*
2216 * Similarly for an HISI, reflect it to the guest as an ISI unless
2217 * it is an HPTE not found fault for a page that we have paged out.
2218 */
2219kvmppc_hisi:
2220 ld r3, VCPU_KVM(r9)
2221 lbz r0, KVM_RADIX(r3)
2222 cmpwi r0, 0
2223 bne .Lradix_hisi /* for radix, just save ASDR */
2224 andis. r0, r11, SRR1_ISI_NOPT@h
2225 beq 1f
2226 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2227 beq 3f
2228BEGIN_FTR_SECTION
2229 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2230 b 4f
2231END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2232 clrrdi r0, r10, 28
2233 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2234 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2235 bne 7f /* if no SLB entry found */
22364:
2237 /* Search the hash table. */
2238 mr r3, r9 /* vcpu pointer */
2239 mr r4, r10
2240 mr r6, r11
2241 li r7, 0 /* instruction fault */
2242 bl kvmppc_hpte_hv_fault
2243 ld r9, HSTATE_KVM_VCPU(r13)
2244 ld r10, VCPU_PC(r9)
2245 ld r11, VCPU_MSR(r9)
2246 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2247 cmpdi r3, 0 /* retry the instruction */
2248 beq fast_interrupt_c_return
2249 cmpdi r3, -1 /* handle in kernel mode */
2250 beq guest_exit_cont
2251
2252 /* Synthesize an ISI (or ISegI) for the guest */
2253 mr r11, r3
22541: li r0, BOOK3S_INTERRUPT_INST_STORAGE
22557: mtspr SPRN_SRR0, r10
2256 mtspr SPRN_SRR1, r11
2257 mr r10, r0
2258 bl kvmppc_msr_interrupt
2259 b fast_interrupt_c_return
2260
22613: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2262 ld r5, KVM_VRMA_SLB_V(r6)
2263 b 4b
2264
2265/*
2266 * Try to handle an hcall in real mode.
2267 * Returns to the guest if we handle it, or continues on up to
2268 * the kernel if we can't (i.e. if we don't have a handler for
2269 * it, or if the handler returns H_TOO_HARD).
2270 *
2271 * r5 - r8 contain hcall args,
2272 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2273 */
2274hcall_try_real_mode:
2275 ld r3,VCPU_GPR(R3)(r9)
2276 andi. r0,r11,MSR_PR
2277 /* sc 1 from userspace - reflect to guest syscall */
2278 bne sc_1_fast_return
2279 /* sc 1 from nested guest - give it to L1 to handle */
2280 ld r0, VCPU_NESTED(r9)
2281 cmpdi r0, 0
2282 bne guest_exit_cont
2283 clrrdi r3,r3,2
2284 cmpldi r3,hcall_real_table_end - hcall_real_table
2285 bge guest_exit_cont
2286 /* See if this hcall is enabled for in-kernel handling */
2287 ld r4, VCPU_KVM(r9)
2288 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2289 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2290 add r4, r4, r0
2291 ld r0, KVM_ENABLED_HCALLS(r4)
2292 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2293 srd r0, r0, r4
2294 andi. r0, r0, 1
2295 beq guest_exit_cont
2296 /* Get pointer to handler, if any, and call it */
2297 LOAD_REG_ADDR(r4, hcall_real_table)
2298 lwax r3,r3,r4
2299 cmpwi r3,0
2300 beq guest_exit_cont
2301 add r12,r3,r4
2302 mtctr r12
2303 mr r3,r9 /* get vcpu pointer */
2304 ld r4,VCPU_GPR(R4)(r9)
2305 bctrl
2306 cmpdi r3,H_TOO_HARD
2307 beq hcall_real_fallback
2308 ld r4,HSTATE_KVM_VCPU(r13)
2309 std r3,VCPU_GPR(R3)(r4)
2310 ld r10,VCPU_PC(r4)
2311 ld r11,VCPU_MSR(r4)
2312 b fast_guest_return
2313
2314sc_1_fast_return:
2315 mtspr SPRN_SRR0,r10
2316 mtspr SPRN_SRR1,r11
2317 li r10, BOOK3S_INTERRUPT_SYSCALL
2318 bl kvmppc_msr_interrupt
2319 mr r4,r9
2320 b fast_guest_return
2321
2322 /* We've attempted a real mode hcall, but it's punted it back
2323 * to userspace. We need to restore some clobbered volatiles
2324 * before resuming the pass-it-to-qemu path */
2325hcall_real_fallback:
2326 li r12,BOOK3S_INTERRUPT_SYSCALL
2327 ld r9, HSTATE_KVM_VCPU(r13)
2328
2329 b guest_exit_cont
2330
2331 .globl hcall_real_table
2332hcall_real_table:
2333 .long 0 /* 0 - unused */
2334 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2335 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2336 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2337 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2338 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2339 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2340#ifdef CONFIG_SPAPR_TCE_IOMMU
2341 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2342 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2343#else
2344 .long 0 /* 0x1c */
2345 .long 0 /* 0x20 */
2346#endif
2347 .long 0 /* 0x24 - H_SET_SPRG0 */
2348 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2349 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
2350 .long 0 /* 0x30 */
2351 .long 0 /* 0x34 */
2352 .long 0 /* 0x38 */
2353 .long 0 /* 0x3c */
2354 .long 0 /* 0x40 */
2355 .long 0 /* 0x44 */
2356 .long 0 /* 0x48 */
2357 .long 0 /* 0x4c */
2358 .long 0 /* 0x50 */
2359 .long 0 /* 0x54 */
2360 .long 0 /* 0x58 */
2361 .long 0 /* 0x5c */
2362 .long 0 /* 0x60 */
2363#ifdef CONFIG_KVM_XICS
2364 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2365 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2366 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2367 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2368 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2369#else
2370 .long 0 /* 0x64 - H_EOI */
2371 .long 0 /* 0x68 - H_CPPR */
2372 .long 0 /* 0x6c - H_IPI */
2373 .long 0 /* 0x70 - H_IPOLL */
2374 .long 0 /* 0x74 - H_XIRR */
2375#endif
2376 .long 0 /* 0x78 */
2377 .long 0 /* 0x7c */
2378 .long 0 /* 0x80 */
2379 .long 0 /* 0x84 */
2380 .long 0 /* 0x88 */
2381 .long 0 /* 0x8c */
2382 .long 0 /* 0x90 */
2383 .long 0 /* 0x94 */
2384 .long 0 /* 0x98 */
2385 .long 0 /* 0x9c */
2386 .long 0 /* 0xa0 */
2387 .long 0 /* 0xa4 */
2388 .long 0 /* 0xa8 */
2389 .long 0 /* 0xac */
2390 .long 0 /* 0xb0 */
2391 .long 0 /* 0xb4 */
2392 .long 0 /* 0xb8 */
2393 .long 0 /* 0xbc */
2394 .long 0 /* 0xc0 */
2395 .long 0 /* 0xc4 */
2396 .long 0 /* 0xc8 */
2397 .long 0 /* 0xcc */
2398 .long 0 /* 0xd0 */
2399 .long 0 /* 0xd4 */
2400 .long 0 /* 0xd8 */
2401 .long 0 /* 0xdc */
2402 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2403 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2404 .long 0 /* 0xe8 */
2405 .long 0 /* 0xec */
2406 .long 0 /* 0xf0 */
2407 .long 0 /* 0xf4 */
2408 .long 0 /* 0xf8 */
2409 .long 0 /* 0xfc */
2410 .long 0 /* 0x100 */
2411 .long 0 /* 0x104 */
2412 .long 0 /* 0x108 */
2413 .long 0 /* 0x10c */
2414 .long 0 /* 0x110 */
2415 .long 0 /* 0x114 */
2416 .long 0 /* 0x118 */
2417 .long 0 /* 0x11c */
2418 .long 0 /* 0x120 */
2419 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2420 .long 0 /* 0x128 */
2421 .long 0 /* 0x12c */
2422 .long 0 /* 0x130 */
2423 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2424#ifdef CONFIG_SPAPR_TCE_IOMMU
2425 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2426 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2427#else
2428 .long 0 /* 0x138 */
2429 .long 0 /* 0x13c */
2430#endif
2431 .long 0 /* 0x140 */
2432 .long 0 /* 0x144 */
2433 .long 0 /* 0x148 */
2434 .long 0 /* 0x14c */
2435 .long 0 /* 0x150 */
2436 .long 0 /* 0x154 */
2437 .long 0 /* 0x158 */
2438 .long 0 /* 0x15c */
2439 .long 0 /* 0x160 */
2440 .long 0 /* 0x164 */
2441 .long 0 /* 0x168 */
2442 .long 0 /* 0x16c */
2443 .long 0 /* 0x170 */
2444 .long 0 /* 0x174 */
2445 .long 0 /* 0x178 */
2446 .long 0 /* 0x17c */
2447 .long 0 /* 0x180 */
2448 .long 0 /* 0x184 */
2449 .long 0 /* 0x188 */
2450 .long 0 /* 0x18c */
2451 .long 0 /* 0x190 */
2452 .long 0 /* 0x194 */
2453 .long 0 /* 0x198 */
2454 .long 0 /* 0x19c */
2455 .long 0 /* 0x1a0 */
2456 .long 0 /* 0x1a4 */
2457 .long 0 /* 0x1a8 */
2458 .long 0 /* 0x1ac */
2459 .long 0 /* 0x1b0 */
2460 .long 0 /* 0x1b4 */
2461 .long 0 /* 0x1b8 */
2462 .long 0 /* 0x1bc */
2463 .long 0 /* 0x1c0 */
2464 .long 0 /* 0x1c4 */
2465 .long 0 /* 0x1c8 */
2466 .long 0 /* 0x1cc */
2467 .long 0 /* 0x1d0 */
2468 .long 0 /* 0x1d4 */
2469 .long 0 /* 0x1d8 */
2470 .long 0 /* 0x1dc */
2471 .long 0 /* 0x1e0 */
2472 .long 0 /* 0x1e4 */
2473 .long 0 /* 0x1e8 */
2474 .long 0 /* 0x1ec */
2475 .long 0 /* 0x1f0 */
2476 .long 0 /* 0x1f4 */
2477 .long 0 /* 0x1f8 */
2478 .long 0 /* 0x1fc */
2479 .long 0 /* 0x200 */
2480 .long 0 /* 0x204 */
2481 .long 0 /* 0x208 */
2482 .long 0 /* 0x20c */
2483 .long 0 /* 0x210 */
2484 .long 0 /* 0x214 */
2485 .long 0 /* 0x218 */
2486 .long 0 /* 0x21c */
2487 .long 0 /* 0x220 */
2488 .long 0 /* 0x224 */
2489 .long 0 /* 0x228 */
2490 .long 0 /* 0x22c */
2491 .long 0 /* 0x230 */
2492 .long 0 /* 0x234 */
2493 .long 0 /* 0x238 */
2494 .long 0 /* 0x23c */
2495 .long 0 /* 0x240 */
2496 .long 0 /* 0x244 */
2497 .long 0 /* 0x248 */
2498 .long 0 /* 0x24c */
2499 .long 0 /* 0x250 */
2500 .long 0 /* 0x254 */
2501 .long 0 /* 0x258 */
2502 .long 0 /* 0x25c */
2503 .long 0 /* 0x260 */
2504 .long 0 /* 0x264 */
2505 .long 0 /* 0x268 */
2506 .long 0 /* 0x26c */
2507 .long 0 /* 0x270 */
2508 .long 0 /* 0x274 */
2509 .long 0 /* 0x278 */
2510 .long 0 /* 0x27c */
2511 .long 0 /* 0x280 */
2512 .long 0 /* 0x284 */
2513 .long 0 /* 0x288 */
2514 .long 0 /* 0x28c */
2515 .long 0 /* 0x290 */
2516 .long 0 /* 0x294 */
2517 .long 0 /* 0x298 */
2518 .long 0 /* 0x29c */
2519 .long 0 /* 0x2a0 */
2520 .long 0 /* 0x2a4 */
2521 .long 0 /* 0x2a8 */
2522 .long 0 /* 0x2ac */
2523 .long 0 /* 0x2b0 */
2524 .long 0 /* 0x2b4 */
2525 .long 0 /* 0x2b8 */
2526 .long 0 /* 0x2bc */
2527 .long 0 /* 0x2c0 */
2528 .long 0 /* 0x2c4 */
2529 .long 0 /* 0x2c8 */
2530 .long 0 /* 0x2cc */
2531 .long 0 /* 0x2d0 */
2532 .long 0 /* 0x2d4 */
2533 .long 0 /* 0x2d8 */
2534 .long 0 /* 0x2dc */
2535 .long 0 /* 0x2e0 */
2536 .long 0 /* 0x2e4 */
2537 .long 0 /* 0x2e8 */
2538 .long 0 /* 0x2ec */
2539 .long 0 /* 0x2f0 */
2540 .long 0 /* 0x2f4 */
2541 .long 0 /* 0x2f8 */
2542#ifdef CONFIG_KVM_XICS
2543 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2544#else
2545 .long 0 /* 0x2fc - H_XIRR_X*/
2546#endif
2547 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2548 .globl hcall_real_table_end
2549hcall_real_table_end:
2550
2551_GLOBAL(kvmppc_h_set_xdabr)
2552EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
2553 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2554 beq 6f
2555 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2556 andc. r0, r5, r0
2557 beq 3f
25586: li r3, H_PARAMETER
2559 blr
2560
2561_GLOBAL(kvmppc_h_set_dabr)
2562EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
2563 li r5, DABRX_USER | DABRX_KERNEL
25643:
2565BEGIN_FTR_SECTION
2566 b 2f
2567END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2568 std r4,VCPU_DABR(r3)
2569 stw r5, VCPU_DABRX(r3)
2570 mtspr SPRN_DABRX, r5
2571 /* Work around P7 bug where DABR can get corrupted on mtspr */
25721: mtspr SPRN_DABR,r4
2573 mfspr r5, SPRN_DABR
2574 cmpd r4, r5
2575 bne 1b
2576 isync
2577 li r3,0
2578 blr
2579
25802:
2581 LOAD_REG_ADDR(r11, dawr_force_enable)
2582 lbz r11, 0(r11)
2583 cmpdi r11, 0
2584 bne 3f
2585 li r3, H_HARDWARE
2586 blr
25873:
2588 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2589 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2590 rlwimi r5, r4, 2, DAWRX_WT
2591 clrrdi r4, r4, 3
2592 std r4, VCPU_DAWR0(r3)
2593 std r5, VCPU_DAWRX0(r3)
2594 /*
2595 * If came in through the real mode hcall handler then it is necessary
2596 * to write the registers since the return path won't. Otherwise it is
2597 * sufficient to store then in the vcpu struct as they will be loaded
2598 * next time the vcpu is run.
2599 */
2600 mfmsr r6
2601 andi. r6, r6, MSR_DR /* in real mode? */
2602 bne 4f
2603 mtspr SPRN_DAWR0, r4
2604 mtspr SPRN_DAWRX0, r5
26054: li r3, 0
2606 blr
2607
2608_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2609 ori r11,r11,MSR_EE
2610 std r11,VCPU_MSR(r3)
2611 li r0,1
2612 stb r0,VCPU_CEDED(r3)
2613 sync /* order setting ceded vs. testing prodded */
2614 lbz r5,VCPU_PRODDED(r3)
2615 cmpwi r5,0
2616 bne kvm_cede_prodded
2617 li r12,0 /* set trap to 0 to say hcall is handled */
2618 stw r12,VCPU_TRAP(r3)
2619 li r0,H_SUCCESS
2620 std r0,VCPU_GPR(R3)(r3)
2621
2622 /*
2623 * Set our bit in the bitmask of napping threads unless all the
2624 * other threads are already napping, in which case we send this
2625 * up to the host.
2626 */
2627 ld r5,HSTATE_KVM_VCORE(r13)
2628 lbz r6,HSTATE_PTID(r13)
2629 lwz r8,VCORE_ENTRY_EXIT(r5)
2630 clrldi r8,r8,56
2631 li r0,1
2632 sld r0,r0,r6
2633 addi r6,r5,VCORE_NAPPING_THREADS
263431: lwarx r4,0,r6
2635 or r4,r4,r0
2636 cmpw r4,r8
2637 beq kvm_cede_exit
2638 stwcx. r4,0,r6
2639 bne 31b
2640 /* order napping_threads update vs testing entry_exit_map */
2641 isync
2642 li r0,NAPPING_CEDE
2643 stb r0,HSTATE_NAPPING(r13)
2644 lwz r7,VCORE_ENTRY_EXIT(r5)
2645 cmpwi r7,0x100
2646 bge 33f /* another thread already exiting */
2647
2648/*
2649 * Although not specifically required by the architecture, POWER7
2650 * preserves the following registers in nap mode, even if an SMT mode
2651 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2652 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2653 */
2654 /* Save non-volatile GPRs */
2655 std r14, VCPU_GPR(R14)(r3)
2656 std r15, VCPU_GPR(R15)(r3)
2657 std r16, VCPU_GPR(R16)(r3)
2658 std r17, VCPU_GPR(R17)(r3)
2659 std r18, VCPU_GPR(R18)(r3)
2660 std r19, VCPU_GPR(R19)(r3)
2661 std r20, VCPU_GPR(R20)(r3)
2662 std r21, VCPU_GPR(R21)(r3)
2663 std r22, VCPU_GPR(R22)(r3)
2664 std r23, VCPU_GPR(R23)(r3)
2665 std r24, VCPU_GPR(R24)(r3)
2666 std r25, VCPU_GPR(R25)(r3)
2667 std r26, VCPU_GPR(R26)(r3)
2668 std r27, VCPU_GPR(R27)(r3)
2669 std r28, VCPU_GPR(R28)(r3)
2670 std r29, VCPU_GPR(R29)(r3)
2671 std r30, VCPU_GPR(R30)(r3)
2672 std r31, VCPU_GPR(R31)(r3)
2673
2674 /* save FP state */
2675 bl kvmppc_save_fp
2676
2677#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2678/*
2679 * Branch around the call if both CPU_FTR_TM and
2680 * CPU_FTR_P9_TM_HV_ASSIST are off.
2681 */
2682BEGIN_FTR_SECTION
2683 b 91f
2684END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2685 /*
2686 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2687 */
2688 ld r3, HSTATE_KVM_VCPU(r13)
2689 ld r4, VCPU_MSR(r3)
2690 li r5, 0 /* don't preserve non-vol regs */
2691 bl kvmppc_save_tm_hv
2692 nop
269391:
2694#endif
2695
2696 /*
2697 * Set DEC to the smaller of DEC and HDEC, so that we wake
2698 * no later than the end of our timeslice (HDEC interrupts
2699 * don't wake us from nap).
2700 */
2701 mfspr r3, SPRN_DEC
2702 mfspr r4, SPRN_HDEC
2703 mftb r5
2704BEGIN_FTR_SECTION
2705 /* On P9 check whether the guest has large decrementer mode enabled */
2706 ld r6, HSTATE_KVM_VCORE(r13)
2707 ld r6, VCORE_LPCR(r6)
2708 andis. r6, r6, LPCR_LD@h
2709 bne 68f
2710END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2711 extsw r3, r3
271268: EXTEND_HDEC(r4)
2713 cmpd r3, r4
2714 ble 67f
2715 mtspr SPRN_DEC, r4
271667:
2717 /* save expiry time of guest decrementer */
2718 add r3, r3, r5
2719 ld r4, HSTATE_KVM_VCPU(r13)
2720 ld r5, HSTATE_KVM_VCORE(r13)
2721 ld r6, VCORE_TB_OFFSET_APPL(r5)
2722 subf r3, r6, r3 /* convert to host TB value */
2723 std r3, VCPU_DEC_EXPIRES(r4)
2724
2725#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2726 ld r4, HSTATE_KVM_VCPU(r13)
2727 addi r3, r4, VCPU_TB_CEDE
2728 bl kvmhv_accumulate_time
2729#endif
2730
2731 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2732
2733 /* Go back to host stack */
2734 ld r1, HSTATE_HOST_R1(r13)
2735
2736 /*
2737 * Take a nap until a decrementer or external or doobell interrupt
2738 * occurs, with PECE1 and PECE0 set in LPCR.
2739 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2740 * Also clear the runlatch bit before napping.
2741 */
2742kvm_do_nap:
2743 mfspr r0, SPRN_CTRLF
2744 clrrdi r0, r0, 1
2745 mtspr SPRN_CTRLT, r0
2746
2747 li r0,1
2748 stb r0,HSTATE_HWTHREAD_REQ(r13)
2749 mfspr r5,SPRN_LPCR
2750 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2751BEGIN_FTR_SECTION
2752 ori r5, r5, LPCR_PECEDH
2753 rlwimi r5, r3, 0, LPCR_PECEDP
2754END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2755
2756kvm_nap_sequence: /* desired LPCR value in r5 */
2757BEGIN_FTR_SECTION
2758 /*
2759 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2760 * enable state loss = 1 (allow SMT mode switch)
2761 * requested level = 0 (just stop dispatching)
2762 */
2763 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2764 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2765 li r4, LPCR_PECE_HVEE@higher
2766 sldi r4, r4, 32
2767 or r5, r5, r4
2768FTR_SECTION_ELSE
2769 li r3, PNV_THREAD_NAP
2770ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2771 mtspr SPRN_LPCR,r5
2772 isync
2773
2774BEGIN_FTR_SECTION
2775 bl isa300_idle_stop_mayloss
2776FTR_SECTION_ELSE
2777 bl isa206_idle_insn_mayloss
2778ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2779
2780 mfspr r0, SPRN_CTRLF
2781 ori r0, r0, 1
2782 mtspr SPRN_CTRLT, r0
2783
2784 mtspr SPRN_SRR1, r3
2785
2786 li r0, 0
2787 stb r0, PACA_FTRACE_ENABLED(r13)
2788
2789 li r0, KVM_HWTHREAD_IN_KVM
2790 stb r0, HSTATE_HWTHREAD_STATE(r13)
2791
2792 lbz r0, HSTATE_NAPPING(r13)
2793 cmpwi r0, NAPPING_CEDE
2794 beq kvm_end_cede
2795 cmpwi r0, NAPPING_NOVCPU
2796 beq kvm_novcpu_wakeup
2797BEGIN_FTR_SECTION
2798 cmpwi r0, NAPPING_UNSPLIT
2799 beq kvm_unsplit_wakeup
2800END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
2801 twi 31,0,0 /* Nap state must not be zero */
2802
280333: mr r4, r3
2804 li r3, 0
2805 li r12, 0
2806 b 34f
2807
2808kvm_end_cede:
2809 /* Woken by external or decrementer interrupt */
2810
2811 /* get vcpu pointer */
2812 ld r4, HSTATE_KVM_VCPU(r13)
2813
2814#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2815 addi r3, r4, VCPU_TB_RMINTR
2816 bl kvmhv_accumulate_time
2817#endif
2818
2819#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2820/*
2821 * Branch around the call if both CPU_FTR_TM and
2822 * CPU_FTR_P9_TM_HV_ASSIST are off.
2823 */
2824BEGIN_FTR_SECTION
2825 b 91f
2826END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2827 /*
2828 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2829 */
2830 mr r3, r4
2831 ld r4, VCPU_MSR(r3)
2832 li r5, 0 /* don't preserve non-vol regs */
2833 bl kvmppc_restore_tm_hv
2834 nop
2835 ld r4, HSTATE_KVM_VCPU(r13)
283691:
2837#endif
2838
2839 /* load up FP state */
2840 bl kvmppc_load_fp
2841
2842 /* Restore guest decrementer */
2843 ld r3, VCPU_DEC_EXPIRES(r4)
2844 ld r5, HSTATE_KVM_VCORE(r13)
2845 ld r6, VCORE_TB_OFFSET_APPL(r5)
2846 add r3, r3, r6 /* convert host TB to guest TB value */
2847 mftb r7
2848 subf r3, r7, r3
2849 mtspr SPRN_DEC, r3
2850
2851 /* Load NV GPRS */
2852 ld r14, VCPU_GPR(R14)(r4)
2853 ld r15, VCPU_GPR(R15)(r4)
2854 ld r16, VCPU_GPR(R16)(r4)
2855 ld r17, VCPU_GPR(R17)(r4)
2856 ld r18, VCPU_GPR(R18)(r4)
2857 ld r19, VCPU_GPR(R19)(r4)
2858 ld r20, VCPU_GPR(R20)(r4)
2859 ld r21, VCPU_GPR(R21)(r4)
2860 ld r22, VCPU_GPR(R22)(r4)
2861 ld r23, VCPU_GPR(R23)(r4)
2862 ld r24, VCPU_GPR(R24)(r4)
2863 ld r25, VCPU_GPR(R25)(r4)
2864 ld r26, VCPU_GPR(R26)(r4)
2865 ld r27, VCPU_GPR(R27)(r4)
2866 ld r28, VCPU_GPR(R28)(r4)
2867 ld r29, VCPU_GPR(R29)(r4)
2868 ld r30, VCPU_GPR(R30)(r4)
2869 ld r31, VCPU_GPR(R31)(r4)
2870
2871 /* Check the wake reason in SRR1 to see why we got here */
2872 bl kvmppc_check_wake_reason
2873
2874 /*
2875 * Restore volatile registers since we could have called a
2876 * C routine in kvmppc_check_wake_reason
2877 * r4 = VCPU
2878 * r3 tells us whether we need to return to host or not
2879 * WARNING: it gets checked further down:
2880 * should not modify r3 until this check is done.
2881 */
2882 ld r4, HSTATE_KVM_VCPU(r13)
2883
2884 /* clear our bit in vcore->napping_threads */
288534: ld r5,HSTATE_KVM_VCORE(r13)
2886 lbz r7,HSTATE_PTID(r13)
2887 li r0,1
2888 sld r0,r0,r7
2889 addi r6,r5,VCORE_NAPPING_THREADS
289032: lwarx r7,0,r6
2891 andc r7,r7,r0
2892 stwcx. r7,0,r6
2893 bne 32b
2894 li r0,0
2895 stb r0,HSTATE_NAPPING(r13)
2896
2897 /* See if the wake reason saved in r3 means we need to exit */
2898 stw r12, VCPU_TRAP(r4)
2899 mr r9, r4
2900 cmpdi r3, 0
2901 bgt guest_exit_cont
2902 b maybe_reenter_guest
2903
2904 /* cede when already previously prodded case */
2905kvm_cede_prodded:
2906 li r0,0
2907 stb r0,VCPU_PRODDED(r3)
2908 sync /* order testing prodded vs. clearing ceded */
2909 stb r0,VCPU_CEDED(r3)
2910 li r3,H_SUCCESS
2911 blr
2912
2913 /* we've ceded but we want to give control to the host */
2914kvm_cede_exit:
2915 ld r9, HSTATE_KVM_VCPU(r13)
2916#ifdef CONFIG_KVM_XICS
2917 /* are we using XIVE with single escalation? */
2918 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2919 cmpdi r10, 0
2920 beq 3f
2921 li r6, XIVE_ESB_SET_PQ_00
2922 /*
2923 * If we still have a pending escalation, abort the cede,
2924 * and we must set PQ to 10 rather than 00 so that we don't
2925 * potentially end up with two entries for the escalation
2926 * interrupt in the XIVE interrupt queue. In that case
2927 * we also don't want to set xive_esc_on to 1 here in
2928 * case we race with xive_esc_irq().
2929 */
2930 lbz r5, VCPU_XIVE_ESC_ON(r9)
2931 cmpwi r5, 0
2932 beq 4f
2933 li r0, 0
2934 stb r0, VCPU_CEDED(r9)
2935 /*
2936 * The escalation interrupts are special as we don't EOI them.
2937 * There is no need to use the load-after-store ordering offset
2938 * to set PQ to 10 as we won't use StoreEOI.
2939 */
2940 li r6, XIVE_ESB_SET_PQ_10
2941 b 5f
29424: li r0, 1
2943 stb r0, VCPU_XIVE_ESC_ON(r9)
2944 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2945 sync
29465: /* Enable XIVE escalation */
2947 mfmsr r0
2948 andi. r0, r0, MSR_DR /* in real mode? */
2949 beq 1f
2950 ldx r0, r10, r6
2951 b 2f
29521: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2953 ldcix r0, r10, r6
29542: sync
2955#endif /* CONFIG_KVM_XICS */
29563: b guest_exit_cont
2957
2958 /* Try to do machine check recovery in real mode */
2959machine_check_realmode:
2960 mr r3, r9 /* get vcpu pointer */
2961 bl kvmppc_realmode_machine_check
2962 nop
2963 /* all machine checks go to virtual mode for further handling */
2964 ld r9, HSTATE_KVM_VCPU(r13)
2965 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2966 b guest_exit_cont
2967
2968/*
2969 * Call C code to handle a HMI in real mode.
2970 * Only the primary thread does the call, secondary threads are handled
2971 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2972 * r9 points to the vcpu on entry
2973 */
2974hmi_realmode:
2975 lbz r0, HSTATE_PTID(r13)
2976 cmpwi r0, 0
2977 bne guest_exit_cont
2978 bl kvmppc_realmode_hmi_handler
2979 ld r9, HSTATE_KVM_VCPU(r13)
2980 li r12, BOOK3S_INTERRUPT_HMI
2981 b guest_exit_cont
2982
2983/*
2984 * Check the reason we woke from nap, and take appropriate action.
2985 * Returns (in r3):
2986 * 0 if nothing needs to be done
2987 * 1 if something happened that needs to be handled by the host
2988 * -1 if there was a guest wakeup (IPI or msgsnd)
2989 * -2 if we handled a PCI passthrough interrupt (returned by
2990 * kvmppc_read_intr only)
2991 *
2992 * Also sets r12 to the interrupt vector for any interrupt that needs
2993 * to be handled now by the host (0x500 for external interrupt), or zero.
2994 * Modifies all volatile registers (since it may call a C function).
2995 * This routine calls kvmppc_read_intr, a C function, if an external
2996 * interrupt is pending.
2997 */
2998kvmppc_check_wake_reason:
2999 mfspr r6, SPRN_SRR1
3000BEGIN_FTR_SECTION
3001 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
3002FTR_SECTION_ELSE
3003 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
3004ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
3005 cmpwi r6, 8 /* was it an external interrupt? */
3006 beq 7f /* if so, see what it was */
3007 li r3, 0
3008 li r12, 0
3009 cmpwi r6, 6 /* was it the decrementer? */
3010 beq 0f
3011BEGIN_FTR_SECTION
3012 cmpwi r6, 5 /* privileged doorbell? */
3013 beq 0f
3014 cmpwi r6, 3 /* hypervisor doorbell? */
3015 beq 3f
3016END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3017 cmpwi r6, 0xa /* Hypervisor maintenance ? */
3018 beq 4f
3019 li r3, 1 /* anything else, return 1 */
30200: blr
3021
3022 /* hypervisor doorbell */
30233: li r12, BOOK3S_INTERRUPT_H_DOORBELL
3024
3025 /*
3026 * Clear the doorbell as we will invoke the handler
3027 * explicitly in the guest exit path.
3028 */
3029 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3030 PPC_MSGCLR(6)
3031 /* see if it's a host IPI */
3032 li r3, 1
3033BEGIN_FTR_SECTION
3034 PPC_MSGSYNC
3035 lwsync
3036END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3037 lbz r0, HSTATE_HOST_IPI(r13)
3038 cmpwi r0, 0
3039 bnelr
3040 /* if not, return -1 */
3041 li r3, -1
3042 blr
3043
3044 /* Woken up due to Hypervisor maintenance interrupt */
30454: li r12, BOOK3S_INTERRUPT_HMI
3046 li r3, 1
3047 blr
3048
3049 /* external interrupt - create a stack frame so we can call C */
30507: mflr r0
3051 std r0, PPC_LR_STKOFF(r1)
3052 stdu r1, -PPC_MIN_STKFRM(r1)
3053 bl kvmppc_read_intr
3054 nop
3055 li r12, BOOK3S_INTERRUPT_EXTERNAL
3056 cmpdi r3, 1
3057 ble 1f
3058
3059 /*
3060 * Return code of 2 means PCI passthrough interrupt, but
3061 * we need to return back to host to complete handling the
3062 * interrupt. Trap reason is expected in r12 by guest
3063 * exit code.
3064 */
3065 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
30661:
3067 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3068 addi r1, r1, PPC_MIN_STKFRM
3069 mtlr r0
3070 blr
3071
3072/*
3073 * Save away FP, VMX and VSX registers.
3074 * r3 = vcpu pointer
3075 * N.B. r30 and r31 are volatile across this function,
3076 * thus it is not callable from C.
3077 */
3078kvmppc_save_fp:
3079 mflr r30
3080 mr r31,r3
3081 mfmsr r5
3082 ori r8,r5,MSR_FP
3083#ifdef CONFIG_ALTIVEC
3084BEGIN_FTR_SECTION
3085 oris r8,r8,MSR_VEC@h
3086END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3087#endif
3088#ifdef CONFIG_VSX
3089BEGIN_FTR_SECTION
3090 oris r8,r8,MSR_VSX@h
3091END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3092#endif
3093 mtmsrd r8
3094 addi r3,r3,VCPU_FPRS
3095 bl store_fp_state
3096#ifdef CONFIG_ALTIVEC
3097BEGIN_FTR_SECTION
3098 addi r3,r31,VCPU_VRS
3099 bl store_vr_state
3100END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3101#endif
3102 mfspr r6,SPRN_VRSAVE
3103 stw r6,VCPU_VRSAVE(r31)
3104 mtlr r30
3105 blr
3106
3107/*
3108 * Load up FP, VMX and VSX registers
3109 * r4 = vcpu pointer
3110 * N.B. r30 and r31 are volatile across this function,
3111 * thus it is not callable from C.
3112 */
3113kvmppc_load_fp:
3114 mflr r30
3115 mr r31,r4
3116 mfmsr r9
3117 ori r8,r9,MSR_FP
3118#ifdef CONFIG_ALTIVEC
3119BEGIN_FTR_SECTION
3120 oris r8,r8,MSR_VEC@h
3121END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3122#endif
3123#ifdef CONFIG_VSX
3124BEGIN_FTR_SECTION
3125 oris r8,r8,MSR_VSX@h
3126END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3127#endif
3128 mtmsrd r8
3129 addi r3,r4,VCPU_FPRS
3130 bl load_fp_state
3131#ifdef CONFIG_ALTIVEC
3132BEGIN_FTR_SECTION
3133 addi r3,r31,VCPU_VRS
3134 bl load_vr_state
3135END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3136#endif
3137 lwz r7,VCPU_VRSAVE(r31)
3138 mtspr SPRN_VRSAVE,r7
3139 mtlr r30
3140 mr r4,r31
3141 blr
3142
3143#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3144/*
3145 * Save transactional state and TM-related registers.
3146 * Called with r3 pointing to the vcpu struct and r4 containing
3147 * the guest MSR value.
3148 * r5 is non-zero iff non-volatile register state needs to be maintained.
3149 * If r5 == 0, this can modify all checkpointed registers, but
3150 * restores r1 and r2 before exit.
3151 */
3152_GLOBAL_TOC(kvmppc_save_tm_hv)
3153EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
3154 /* See if we need to handle fake suspend mode */
3155BEGIN_FTR_SECTION
3156 b __kvmppc_save_tm
3157END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3158
3159 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3160 cmpwi r0, 0
3161 beq __kvmppc_save_tm
3162
3163 /* The following code handles the fake_suspend = 1 case */
3164 mflr r0
3165 std r0, PPC_LR_STKOFF(r1)
3166 stdu r1, -PPC_MIN_STKFRM(r1)
3167
3168 /* Turn on TM. */
3169 mfmsr r8
3170 li r0, 1
3171 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3172 mtmsrd r8
3173
3174 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3175 beq 4f
3176BEGIN_FTR_SECTION
3177 bl pnv_power9_force_smt4_catch
3178END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3179 nop
3180
3181 /* We have to treclaim here because that's the only way to do S->N */
3182 li r3, TM_CAUSE_KVM_RESCHED
3183 TRECLAIM(R3)
3184
3185 /*
3186 * We were in fake suspend, so we are not going to save the
3187 * register state as the guest checkpointed state (since
3188 * we already have it), therefore we can now use any volatile GPR.
3189 * In fact treclaim in fake suspend state doesn't modify
3190 * any registers.
3191 */
3192
3193BEGIN_FTR_SECTION
3194 bl pnv_power9_force_smt4_release
3195END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3196 nop
3197
31984:
3199 mfspr r3, SPRN_PSSCR
3200 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3201 li r0, PSSCR_FAKE_SUSPEND
3202 andc r3, r3, r0
3203 mtspr SPRN_PSSCR, r3
3204
3205 /* Don't save TEXASR, use value from last exit in real suspend state */
3206 ld r9, HSTATE_KVM_VCPU(r13)
3207 mfspr r5, SPRN_TFHAR
3208 mfspr r6, SPRN_TFIAR
3209 std r5, VCPU_TFHAR(r9)
3210 std r6, VCPU_TFIAR(r9)
3211
3212 addi r1, r1, PPC_MIN_STKFRM
3213 ld r0, PPC_LR_STKOFF(r1)
3214 mtlr r0
3215 blr
3216
3217/*
3218 * Restore transactional state and TM-related registers.
3219 * Called with r3 pointing to the vcpu struct
3220 * and r4 containing the guest MSR value.
3221 * r5 is non-zero iff non-volatile register state needs to be maintained.
3222 * This potentially modifies all checkpointed registers.
3223 * It restores r1 and r2 from the PACA.
3224 */
3225_GLOBAL_TOC(kvmppc_restore_tm_hv)
3226EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
3227 /*
3228 * If we are doing TM emulation for the guest on a POWER9 DD2,
3229 * then we don't actually do a trechkpt -- we either set up
3230 * fake-suspend mode, or emulate a TM rollback.
3231 */
3232BEGIN_FTR_SECTION
3233 b __kvmppc_restore_tm
3234END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3235 mflr r0
3236 std r0, PPC_LR_STKOFF(r1)
3237
3238 li r0, 0
3239 stb r0, HSTATE_FAKE_SUSPEND(r13)
3240
3241 /* Turn on TM so we can restore TM SPRs */
3242 mfmsr r5
3243 li r0, 1
3244 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3245 mtmsrd r5
3246
3247 /*
3248 * The user may change these outside of a transaction, so they must
3249 * always be context switched.
3250 */
3251 ld r5, VCPU_TFHAR(r3)
3252 ld r6, VCPU_TFIAR(r3)
3253 ld r7, VCPU_TEXASR(r3)
3254 mtspr SPRN_TFHAR, r5
3255 mtspr SPRN_TFIAR, r6
3256 mtspr SPRN_TEXASR, r7
3257
3258 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3259 beqlr /* TM not active in guest */
3260
3261 /* Make sure the failure summary is set */
3262 oris r7, r7, (TEXASR_FS)@h
3263 mtspr SPRN_TEXASR, r7
3264
3265 cmpwi r5, 1 /* check for suspended state */
3266 bgt 10f
3267 stb r5, HSTATE_FAKE_SUSPEND(r13)
3268 b 9f /* and return */
326910: stdu r1, -PPC_MIN_STKFRM(r1)
3270 /* guest is in transactional state, so simulate rollback */
3271 bl kvmhv_emulate_tm_rollback
3272 nop
3273 addi r1, r1, PPC_MIN_STKFRM
32749: ld r0, PPC_LR_STKOFF(r1)
3275 mtlr r0
3276 blr
3277#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3278
3279/*
3280 * We come here if we get any exception or interrupt while we are
3281 * executing host real mode code while in guest MMU context.
3282 * r12 is (CR << 32) | vector
3283 * r13 points to our PACA
3284 * r12 is saved in HSTATE_SCRATCH0(r13)
3285 * r9 is saved in HSTATE_SCRATCH2(r13)
3286 * r13 is saved in HSPRG1
3287 * cfar is saved in HSTATE_CFAR(r13)
3288 * ppr is saved in HSTATE_PPR(r13)
3289 */
3290kvmppc_bad_host_intr:
3291 /*
3292 * Switch to the emergency stack, but start half-way down in
3293 * case we were already on it.
3294 */
3295 mr r9, r1
3296 std r1, PACAR1(r13)
3297 ld r1, PACAEMERGSP(r13)
3298 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3299 std r9, 0(r1)
3300 std r0, GPR0(r1)
3301 std r9, GPR1(r1)
3302 std r2, GPR2(r1)
3303 SAVE_4GPRS(3, r1)
3304 SAVE_2GPRS(7, r1)
3305 srdi r0, r12, 32
3306 clrldi r12, r12, 32
3307 std r0, _CCR(r1)
3308 std r12, _TRAP(r1)
3309 andi. r0, r12, 2
3310 beq 1f
3311 mfspr r3, SPRN_HSRR0
3312 mfspr r4, SPRN_HSRR1
3313 mfspr r5, SPRN_HDAR
3314 mfspr r6, SPRN_HDSISR
3315 b 2f
33161: mfspr r3, SPRN_SRR0
3317 mfspr r4, SPRN_SRR1
3318 mfspr r5, SPRN_DAR
3319 mfspr r6, SPRN_DSISR
33202: std r3, _NIP(r1)
3321 std r4, _MSR(r1)
3322 std r5, _DAR(r1)
3323 std r6, _DSISR(r1)
3324 ld r9, HSTATE_SCRATCH2(r13)
3325 ld r12, HSTATE_SCRATCH0(r13)
3326 GET_SCRATCH0(r0)
3327 SAVE_4GPRS(9, r1)
3328 std r0, GPR13(r1)
3329 SAVE_NVGPRS(r1)
3330 ld r5, HSTATE_CFAR(r13)
3331 std r5, ORIG_GPR3(r1)
3332 mflr r3
3333 mfctr r4
3334 mfxer r5
3335 lbz r6, PACAIRQSOFTMASK(r13)
3336 std r3, _LINK(r1)
3337 std r4, _CTR(r1)
3338 std r5, _XER(r1)
3339 std r6, SOFTE(r1)
3340 ld r2, PACATOC(r13)
3341 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3342 std r3, STACK_FRAME_OVERHEAD-16(r1)
3343
3344 /*
3345 * On POWER9 do a minimal restore of the MMU and call C code,
3346 * which will print a message and panic.
3347 * XXX On POWER7 and POWER8, we just spin here since we don't
3348 * know what the other threads are doing (and we don't want to
3349 * coordinate with them) - but at least we now have register state
3350 * in memory that we might be able to look at from another CPU.
3351 */
3352BEGIN_FTR_SECTION
3353 b .
3354END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3355 ld r9, HSTATE_KVM_VCPU(r13)
3356 ld r10, VCPU_KVM(r9)
3357
3358 li r0, 0
3359 mtspr SPRN_AMR, r0
3360 mtspr SPRN_IAMR, r0
3361 mtspr SPRN_CIABR, r0
3362 mtspr SPRN_DAWRX0, r0
3363BEGIN_FTR_SECTION
3364 mtspr SPRN_DAWRX1, r0
3365END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
3366
3367 /* Clear hash and radix guest SLB, see guest_exit_short_path comment. */
3368 slbmte r0, r0
3369 PPC_SLBIA(6)
3370
3371BEGIN_MMU_FTR_SECTION
3372 b 4f
3373END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3374
3375 ptesync
3376 ld r8, PACA_SLBSHADOWPTR(r13)
3377 .rept SLB_NUM_BOLTED
3378 li r3, SLBSHADOW_SAVEAREA
3379 LDX_BE r5, r8, r3
3380 addi r3, r3, 8
3381 LDX_BE r6, r8, r3
3382 andis. r7, r5, SLB_ESID_V@h
3383 beq 3f
3384 slbmte r6, r5
33853: addi r8, r8, 16
3386 .endr
3387
33884: lwz r7, KVM_HOST_LPID(r10)
3389 mtspr SPRN_LPID, r7
3390 mtspr SPRN_PID, r0
3391 ld r8, KVM_HOST_LPCR(r10)
3392 mtspr SPRN_LPCR, r8
3393 isync
3394 li r0, KVM_GUEST_MODE_NONE
3395 stb r0, HSTATE_IN_GUEST(r13)
3396
3397 /*
3398 * Turn on the MMU and jump to C code
3399 */
3400 bcl 20, 31, .+4
34015: mflr r3
3402 addi r3, r3, 9f - 5b
3403 li r4, -1
3404 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3405 ld r4, PACAKMSR(r13)
3406 mtspr SPRN_SRR0, r3
3407 mtspr SPRN_SRR1, r4
3408 RFI_TO_KERNEL
34099: addi r3, r1, STACK_FRAME_OVERHEAD
3410 bl kvmppc_bad_interrupt
3411 b 9b
3412
3413/*
3414 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3415 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3416 * r11 has the guest MSR value (in/out)
3417 * r9 has a vcpu pointer (in)
3418 * r0 is used as a scratch register
3419 */
3420kvmppc_msr_interrupt:
3421 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3422 cmpwi r0, 2 /* Check if we are in transactional state.. */
3423 ld r11, VCPU_INTR_MSR(r9)
3424 bne 1f
3425 /* ... if transactional, change to suspended */
3426 li r0, 1
34271: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3428 blr
3429
3430/*
3431 * Load up guest PMU state. R3 points to the vcpu struct.
3432 */
3433_GLOBAL(kvmhv_load_guest_pmu)
3434EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
3435 mr r4, r3
3436 mflr r0
3437 li r3, 1
3438 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3439 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3440 isync
3441BEGIN_FTR_SECTION
3442 ld r3, VCPU_MMCR(r4)
3443 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3444 cmpwi r5, MMCR0_PMAO
3445 beql kvmppc_fix_pmao
3446END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3447 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
3448 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
3449 lwz r6, VCPU_PMC + 8(r4)
3450 lwz r7, VCPU_PMC + 12(r4)
3451 lwz r8, VCPU_PMC + 16(r4)
3452 lwz r9, VCPU_PMC + 20(r4)
3453 mtspr SPRN_PMC1, r3
3454 mtspr SPRN_PMC2, r5
3455 mtspr SPRN_PMC3, r6
3456 mtspr SPRN_PMC4, r7
3457 mtspr SPRN_PMC5, r8
3458 mtspr SPRN_PMC6, r9
3459 ld r3, VCPU_MMCR(r4)
3460 ld r5, VCPU_MMCR + 8(r4)
3461 ld r6, VCPU_MMCRA(r4)
3462 ld r7, VCPU_SIAR(r4)
3463 ld r8, VCPU_SDAR(r4)
3464 mtspr SPRN_MMCR1, r5
3465 mtspr SPRN_MMCRA, r6
3466 mtspr SPRN_SIAR, r7
3467 mtspr SPRN_SDAR, r8
3468BEGIN_FTR_SECTION
3469 ld r5, VCPU_MMCR + 24(r4)
3470 ld r6, VCPU_SIER + 8(r4)
3471 ld r7, VCPU_SIER + 16(r4)
3472 mtspr SPRN_MMCR3, r5
3473 mtspr SPRN_SIER2, r6
3474 mtspr SPRN_SIER3, r7
3475END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
3476BEGIN_FTR_SECTION
3477 ld r5, VCPU_MMCR + 16(r4)
3478 ld r6, VCPU_SIER(r4)
3479 mtspr SPRN_MMCR2, r5
3480 mtspr SPRN_SIER, r6
3481BEGIN_FTR_SECTION_NESTED(96)
3482 lwz r7, VCPU_PMC + 24(r4)
3483 lwz r8, VCPU_PMC + 28(r4)
3484 ld r9, VCPU_MMCRS(r4)
3485 mtspr SPRN_SPMC1, r7
3486 mtspr SPRN_SPMC2, r8
3487 mtspr SPRN_MMCRS, r9
3488END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3489END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3490 mtspr SPRN_MMCR0, r3
3491 isync
3492 mtlr r0
3493 blr
3494
3495/*
3496 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
3497 */
3498_GLOBAL(kvmhv_load_host_pmu)
3499EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
3500 mflr r0
3501 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
3502 cmpwi r4, 0
3503 beq 23f /* skip if not */
3504BEGIN_FTR_SECTION
3505 ld r3, HSTATE_MMCR0(r13)
3506 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3507 cmpwi r4, MMCR0_PMAO
3508 beql kvmppc_fix_pmao
3509END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3510 lwz r3, HSTATE_PMC1(r13)
3511 lwz r4, HSTATE_PMC2(r13)
3512 lwz r5, HSTATE_PMC3(r13)
3513 lwz r6, HSTATE_PMC4(r13)
3514 lwz r8, HSTATE_PMC5(r13)
3515 lwz r9, HSTATE_PMC6(r13)
3516 mtspr SPRN_PMC1, r3
3517 mtspr SPRN_PMC2, r4
3518 mtspr SPRN_PMC3, r5
3519 mtspr SPRN_PMC4, r6
3520 mtspr SPRN_PMC5, r8
3521 mtspr SPRN_PMC6, r9
3522 ld r3, HSTATE_MMCR0(r13)
3523 ld r4, HSTATE_MMCR1(r13)
3524 ld r5, HSTATE_MMCRA(r13)
3525 ld r6, HSTATE_SIAR(r13)
3526 ld r7, HSTATE_SDAR(r13)
3527 mtspr SPRN_MMCR1, r4
3528 mtspr SPRN_MMCRA, r5
3529 mtspr SPRN_SIAR, r6
3530 mtspr SPRN_SDAR, r7
3531BEGIN_FTR_SECTION
3532 ld r8, HSTATE_MMCR2(r13)
3533 ld r9, HSTATE_SIER(r13)
3534 mtspr SPRN_MMCR2, r8
3535 mtspr SPRN_SIER, r9
3536END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3537BEGIN_FTR_SECTION
3538 ld r5, HSTATE_MMCR3(r13)
3539 ld r6, HSTATE_SIER2(r13)
3540 ld r7, HSTATE_SIER3(r13)
3541 mtspr SPRN_MMCR3, r5
3542 mtspr SPRN_SIER2, r6
3543 mtspr SPRN_SIER3, r7
3544END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
3545 mtspr SPRN_MMCR0, r3
3546 isync
3547 mtlr r0
354823: blr
3549
3550/*
3551 * Save guest PMU state into the vcpu struct.
3552 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
3553 */
3554_GLOBAL(kvmhv_save_guest_pmu)
3555EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
3556 mr r9, r3
3557 mr r8, r4
3558BEGIN_FTR_SECTION
3559 /*
3560 * POWER8 seems to have a hardware bug where setting
3561 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
3562 * when some counters are already negative doesn't seem
3563 * to cause a performance monitor alert (and hence interrupt).
3564 * The effect of this is that when saving the PMU state,
3565 * if there is no PMU alert pending when we read MMCR0
3566 * before freezing the counters, but one becomes pending
3567 * before we read the counters, we lose it.
3568 * To work around this, we need a way to freeze the counters
3569 * before reading MMCR0. Normally, freezing the counters
3570 * is done by writing MMCR0 (to set MMCR0[FC]) which
3571 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
3572 * we can also freeze the counters using MMCR2, by writing
3573 * 1s to all the counter freeze condition bits (there are
3574 * 9 bits each for 6 counters).
3575 */
3576 li r3, -1 /* set all freeze bits */
3577 clrrdi r3, r3, 10
3578 mfspr r10, SPRN_MMCR2
3579 mtspr SPRN_MMCR2, r3
3580 isync
3581END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3582 li r3, 1
3583 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3584 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
3585 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3586 mfspr r6, SPRN_MMCRA
3587 /* Clear MMCRA in order to disable SDAR updates */
3588 li r7, 0
3589 mtspr SPRN_MMCRA, r7
3590 isync
3591 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
3592 bne 21f
3593 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
3594 b 22f
359521: mfspr r5, SPRN_MMCR1
3596 mfspr r7, SPRN_SIAR
3597 mfspr r8, SPRN_SDAR
3598 std r4, VCPU_MMCR(r9)
3599 std r5, VCPU_MMCR + 8(r9)
3600 std r6, VCPU_MMCRA(r9)
3601BEGIN_FTR_SECTION
3602 std r10, VCPU_MMCR + 16(r9)
3603END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3604BEGIN_FTR_SECTION
3605 mfspr r5, SPRN_MMCR3
3606 mfspr r6, SPRN_SIER2
3607 mfspr r7, SPRN_SIER3
3608 std r5, VCPU_MMCR + 24(r9)
3609 std r6, VCPU_SIER + 8(r9)
3610 std r7, VCPU_SIER + 16(r9)
3611END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
3612 std r7, VCPU_SIAR(r9)
3613 std r8, VCPU_SDAR(r9)
3614 mfspr r3, SPRN_PMC1
3615 mfspr r4, SPRN_PMC2
3616 mfspr r5, SPRN_PMC3
3617 mfspr r6, SPRN_PMC4
3618 mfspr r7, SPRN_PMC5
3619 mfspr r8, SPRN_PMC6
3620 stw r3, VCPU_PMC(r9)
3621 stw r4, VCPU_PMC + 4(r9)
3622 stw r5, VCPU_PMC + 8(r9)
3623 stw r6, VCPU_PMC + 12(r9)
3624 stw r7, VCPU_PMC + 16(r9)
3625 stw r8, VCPU_PMC + 20(r9)
3626BEGIN_FTR_SECTION
3627 mfspr r5, SPRN_SIER
3628 std r5, VCPU_SIER(r9)
3629BEGIN_FTR_SECTION_NESTED(96)
3630 mfspr r6, SPRN_SPMC1
3631 mfspr r7, SPRN_SPMC2
3632 mfspr r8, SPRN_MMCRS
3633 stw r6, VCPU_PMC + 24(r9)
3634 stw r7, VCPU_PMC + 28(r9)
3635 std r8, VCPU_MMCRS(r9)
3636 lis r4, 0x8000
3637 mtspr SPRN_MMCRS, r4
3638END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3639END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
364022: blr
3641
3642/*
3643 * This works around a hardware bug on POWER8E processors, where
3644 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3645 * performance monitor interrupt. Instead, when we need to have
3646 * an interrupt pending, we have to arrange for a counter to overflow.
3647 */
3648kvmppc_fix_pmao:
3649 li r3, 0
3650 mtspr SPRN_MMCR2, r3
3651 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3652 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3653 mtspr SPRN_MMCR0, r3
3654 lis r3, 0x7fff
3655 ori r3, r3, 0xffff
3656 mtspr SPRN_PMC6, r3
3657 isync
3658 blr
3659
3660#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3661/*
3662 * Start timing an activity
3663 * r3 = pointer to time accumulation struct, r4 = vcpu
3664 */
3665kvmhv_start_timing:
3666 ld r5, HSTATE_KVM_VCORE(r13)
3667 ld r6, VCORE_TB_OFFSET_APPL(r5)
3668 mftb r5
3669 subf r5, r6, r5 /* subtract current timebase offset */
3670 std r3, VCPU_CUR_ACTIVITY(r4)
3671 std r5, VCPU_ACTIVITY_START(r4)
3672 blr
3673
3674/*
3675 * Accumulate time to one activity and start another.
3676 * r3 = pointer to new time accumulation struct, r4 = vcpu
3677 */
3678kvmhv_accumulate_time:
3679 ld r5, HSTATE_KVM_VCORE(r13)
3680 ld r8, VCORE_TB_OFFSET_APPL(r5)
3681 ld r5, VCPU_CUR_ACTIVITY(r4)
3682 ld r6, VCPU_ACTIVITY_START(r4)
3683 std r3, VCPU_CUR_ACTIVITY(r4)
3684 mftb r7
3685 subf r7, r8, r7 /* subtract current timebase offset */
3686 std r7, VCPU_ACTIVITY_START(r4)
3687 cmpdi r5, 0
3688 beqlr
3689 subf r3, r6, r7
3690 ld r8, TAS_SEQCOUNT(r5)
3691 cmpdi r8, 0
3692 addi r8, r8, 1
3693 std r8, TAS_SEQCOUNT(r5)
3694 lwsync
3695 ld r7, TAS_TOTAL(r5)
3696 add r7, r7, r3
3697 std r7, TAS_TOTAL(r5)
3698 ld r6, TAS_MIN(r5)
3699 ld r7, TAS_MAX(r5)
3700 beq 3f
3701 cmpd r3, r6
3702 bge 1f
37033: std r3, TAS_MIN(r5)
37041: cmpd r3, r7
3705 ble 2f
3706 std r3, TAS_MAX(r5)
37072: lwsync
3708 addi r8, r8, 1
3709 std r8, TAS_SEQCOUNT(r5)
3710 blr
3711#endif