Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
23#include <asm/mmu.h>
24#include <asm/page.h>
25#include <asm/ptrace.h>
26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
30#include <asm/mmu-hash64.h>
31#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
34
35/* Values in HSTATE_NAPPING(r13) */
36#define NAPPING_CEDE 1
37#define NAPPING_NOVCPU 2
38
39/*
40 * Call kvmppc_hv_entry in real mode.
41 * Must be called with interrupts hard-disabled.
42 *
43 * Input Registers:
44 *
45 * LR = return address to continue at after eventually re-enabling MMU
46 */
47_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
48 mflr r0
49 std r0, PPC_LR_STKOFF(r1)
50 stdu r1, -112(r1)
51 mfmsr r10
52 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
53 li r0,MSR_RI
54 andc r0,r10,r0
55 li r6,MSR_IR | MSR_DR
56 andc r6,r10,r6
57 mtmsrd r0,1 /* clear RI in MSR */
58 mtsrr0 r5
59 mtsrr1 r6
60 RFI
61
62kvmppc_call_hv_entry:
63 ld r4, HSTATE_KVM_VCPU(r13)
64 bl kvmppc_hv_entry
65
66 /* Back from guest - restore host state and return to caller */
67
68BEGIN_FTR_SECTION
69 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
71 li r6,7
72 mtspr SPRN_DABR,r5
73 mtspr SPRN_DABRX,r6
74END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
75
76 /* Restore SPRG3 */
77 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
79
80 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
83 cmpwi r4, 0
84 beq 23f /* skip if not */
85BEGIN_FTR_SECTION
86 ld r3, HSTATE_MMCR0(r13)
87 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
88 cmpwi r4, MMCR0_PMAO
89 beql kvmppc_fix_pmao
90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
91 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
97 mtspr SPRN_PMC1, r3
98 mtspr SPRN_PMC2, r4
99 mtspr SPRN_PMC3, r5
100 mtspr SPRN_PMC4, r6
101 mtspr SPRN_PMC5, r8
102 mtspr SPRN_PMC6, r9
103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
108 mtspr SPRN_MMCR1, r4
109 mtspr SPRN_MMCRA, r5
110 mtspr SPRN_SIAR, r6
111 mtspr SPRN_SDAR, r7
112BEGIN_FTR_SECTION
113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
115 mtspr SPRN_MMCR2, r8
116 mtspr SPRN_SIER, r9
117END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
118 mtspr SPRN_MMCR0, r3
119 isync
12023:
121
122 /*
123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
125 */
126 ld r3, HSTATE_DECEXP(r13)
127 mftb r4
128 subf r4, r4, r3
129 mtspr SPRN_DEC, r4
130
131 /*
132 * For external and machine check interrupts, we need
133 * to call the Linux handler to process the interrupt.
134 * We do that by jumping to absolute address 0x500 for
135 * external interrupts, or the machine_check_fwnmi label
136 * for machine checks (since firmware might have patched
137 * the vector area at 0x200). The [h]rfid at the end of the
138 * handler will return to the book3s_hv_interrupts.S code.
139 * For other interrupts we do the rfid to get back
140 * to the book3s_hv_interrupts.S code here.
141 */
142 ld r8, 112+PPC_LR_STKOFF(r1)
143 addi r1, r1, 112
144 ld r7, HSTATE_HOST_MSR(r13)
145
146 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
148 beq 11f
149 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
150 beq cr2, 14f /* HMI check */
151
152 /* RFI into the highmem handler, or branch to interrupt handler */
153 mfmsr r6
154 li r0, MSR_RI
155 andc r6, r6, r0
156 mtmsrd r6, 1 /* Clear RI in MSR */
157 mtsrr0 r8
158 mtsrr1 r7
159 beq cr1, 13f /* machine check */
160 RFI
161
162 /* On POWER7, we have external interrupts set to use HSRR0/1 */
16311: mtspr SPRN_HSRR0, r8
164 mtspr SPRN_HSRR1, r7
165 ba 0x500
166
16713: b machine_check_fwnmi
168
16914: mtspr SPRN_HSRR0, r8
170 mtspr SPRN_HSRR1, r7
171 b hmi_exception_after_realmode
172
173kvmppc_primary_no_guest:
174 /* We handle this much like a ceded vcpu */
175 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
176 mfspr r3, SPRN_HDEC
177 mtspr SPRN_DEC, r3
178 /*
179 * Make sure the primary has finished the MMU switch.
180 * We should never get here on a secondary thread, but
181 * check it for robustness' sake.
182 */
183 ld r5, HSTATE_KVM_VCORE(r13)
18465: lbz r0, VCORE_IN_GUEST(r5)
185 cmpwi r0, 0
186 beq 65b
187 /* Set LPCR. */
188 ld r8,VCORE_LPCR(r5)
189 mtspr SPRN_LPCR,r8
190 isync
191 /* set our bit in napping_threads */
192 ld r5, HSTATE_KVM_VCORE(r13)
193 lbz r7, HSTATE_PTID(r13)
194 li r0, 1
195 sld r0, r0, r7
196 addi r6, r5, VCORE_NAPPING_THREADS
1971: lwarx r3, 0, r6
198 or r3, r3, r0
199 stwcx. r3, 0, r6
200 bne 1b
201 /* order napping_threads update vs testing entry_exit_map */
202 isync
203 li r12, 0
204 lwz r7, VCORE_ENTRY_EXIT(r5)
205 cmpwi r7, 0x100
206 bge kvm_novcpu_exit /* another thread already exiting */
207 li r3, NAPPING_NOVCPU
208 stb r3, HSTATE_NAPPING(r13)
209
210 li r3, 0 /* Don't wake on privileged (OS) doorbell */
211 b kvm_do_nap
212
213kvm_novcpu_wakeup:
214 ld r1, HSTATE_HOST_R1(r13)
215 ld r5, HSTATE_KVM_VCORE(r13)
216 li r0, 0
217 stb r0, HSTATE_NAPPING(r13)
218 stb r0, HSTATE_HWTHREAD_REQ(r13)
219
220 /* check the wake reason */
221 bl kvmppc_check_wake_reason
222
223 /* see if any other thread is already exiting */
224 lwz r0, VCORE_ENTRY_EXIT(r5)
225 cmpwi r0, 0x100
226 bge kvm_novcpu_exit
227
228 /* clear our bit in napping_threads */
229 lbz r7, HSTATE_PTID(r13)
230 li r0, 1
231 sld r0, r0, r7
232 addi r6, r5, VCORE_NAPPING_THREADS
2334: lwarx r7, 0, r6
234 andc r7, r7, r0
235 stwcx. r7, 0, r6
236 bne 4b
237
238 /* See if the wake reason means we need to exit */
239 cmpdi r3, 0
240 bge kvm_novcpu_exit
241
242 /* See if our timeslice has expired (HDEC is negative) */
243 mfspr r0, SPRN_HDEC
244 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
245 cmpwi r0, 0
246 blt kvm_novcpu_exit
247
248 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
249 ld r4, HSTATE_KVM_VCPU(r13)
250 cmpdi r4, 0
251 beq kvmppc_primary_no_guest
252
253#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
254 addi r3, r4, VCPU_TB_RMENTRY
255 bl kvmhv_start_timing
256#endif
257 b kvmppc_got_guest
258
259kvm_novcpu_exit:
260#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
261 ld r4, HSTATE_KVM_VCPU(r13)
262 cmpdi r4, 0
263 beq 13f
264 addi r3, r4, VCPU_TB_RMEXIT
265 bl kvmhv_accumulate_time
266#endif
26713: mr r3, r12
268 stw r12, 112-4(r1)
269 bl kvmhv_commence_exit
270 nop
271 lwz r12, 112-4(r1)
272 b kvmhv_switch_to_host
273
274/*
275 * We come in here when wakened from nap mode.
276 * Relocation is off and most register values are lost.
277 * r13 points to the PACA.
278 */
279 .globl kvm_start_guest
280kvm_start_guest:
281
282 /* Set runlatch bit the minute you wake up from nap */
283 mfspr r0, SPRN_CTRLF
284 ori r0, r0, 1
285 mtspr SPRN_CTRLT, r0
286
287 ld r2,PACATOC(r13)
288
289 li r0,KVM_HWTHREAD_IN_KVM
290 stb r0,HSTATE_HWTHREAD_STATE(r13)
291
292 /* NV GPR values from power7_idle() will no longer be valid */
293 li r0,1
294 stb r0,PACA_NAPSTATELOST(r13)
295
296 /* were we napping due to cede? */
297 lbz r0,HSTATE_NAPPING(r13)
298 cmpwi r0,NAPPING_CEDE
299 beq kvm_end_cede
300 cmpwi r0,NAPPING_NOVCPU
301 beq kvm_novcpu_wakeup
302
303 ld r1,PACAEMERGSP(r13)
304 subi r1,r1,STACK_FRAME_OVERHEAD
305
306 /*
307 * We weren't napping due to cede, so this must be a secondary
308 * thread being woken up to run a guest, or being woken up due
309 * to a stray IPI. (Or due to some machine check or hypervisor
310 * maintenance interrupt while the core is in KVM.)
311 */
312
313 /* Check the wake reason in SRR1 to see why we got here */
314 bl kvmppc_check_wake_reason
315 cmpdi r3, 0
316 bge kvm_no_guest
317
318 /* get vcpu pointer, NULL if we have no vcpu to run */
319 ld r4,HSTATE_KVM_VCPU(r13)
320 cmpdi r4,0
321 /* if we have no vcpu to run, go back to sleep */
322 beq kvm_no_guest
323
324kvm_secondary_got_guest:
325
326 /* Set HSTATE_DSCR(r13) to something sensible */
327 ld r6, PACA_DSCR(r13)
328 std r6, HSTATE_DSCR(r13)
329
330 /* Order load of vcore, ptid etc. after load of vcpu */
331 lwsync
332 bl kvmppc_hv_entry
333
334 /* Back from the guest, go back to nap */
335 /* Clear our vcpu pointer so we don't come back in early */
336 li r0, 0
337 /*
338 * Once we clear HSTATE_KVM_VCPU(r13), the code in
339 * kvmppc_run_core() is going to assume that all our vcpu
340 * state is visible in memory. This lwsync makes sure
341 * that that is true.
342 */
343 lwsync
344 std r0, HSTATE_KVM_VCPU(r13)
345
346/*
347 * At this point we have finished executing in the guest.
348 * We need to wait for hwthread_req to become zero, since
349 * we may not turn on the MMU while hwthread_req is non-zero.
350 * While waiting we also need to check if we get given a vcpu to run.
351 */
352kvm_no_guest:
353 lbz r3, HSTATE_HWTHREAD_REQ(r13)
354 cmpwi r3, 0
355 bne 53f
356 HMT_MEDIUM
357 li r0, KVM_HWTHREAD_IN_KERNEL
358 stb r0, HSTATE_HWTHREAD_STATE(r13)
359 /* need to recheck hwthread_req after a barrier, to avoid race */
360 sync
361 lbz r3, HSTATE_HWTHREAD_REQ(r13)
362 cmpwi r3, 0
363 bne 54f
364/*
365 * We jump to power7_wakeup_loss, which will return to the caller
366 * of power7_nap in the powernv cpu offline loop. The value we
367 * put in r3 becomes the return value for power7_nap.
368 */
369 li r3, LPCR_PECE0
370 mfspr r4, SPRN_LPCR
371 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
372 mtspr SPRN_LPCR, r4
373 li r3, 0
374 b power7_wakeup_loss
375
37653: HMT_LOW
377 ld r4, HSTATE_KVM_VCPU(r13)
378 cmpdi r4, 0
379 beq kvm_no_guest
380 HMT_MEDIUM
381 b kvm_secondary_got_guest
382
38354: li r0, KVM_HWTHREAD_IN_KVM
384 stb r0, HSTATE_HWTHREAD_STATE(r13)
385 b kvm_no_guest
386
387/******************************************************************************
388 * *
389 * Entry code *
390 * *
391 *****************************************************************************/
392
393.global kvmppc_hv_entry
394kvmppc_hv_entry:
395
396 /* Required state:
397 *
398 * R4 = vcpu pointer (or NULL)
399 * MSR = ~IR|DR
400 * R13 = PACA
401 * R1 = host R1
402 * R2 = TOC
403 * all other volatile GPRS = free
404 */
405 mflr r0
406 std r0, PPC_LR_STKOFF(r1)
407 stdu r1, -112(r1)
408
409 /* Save R1 in the PACA */
410 std r1, HSTATE_HOST_R1(r13)
411
412 li r6, KVM_GUEST_MODE_HOST_HV
413 stb r6, HSTATE_IN_GUEST(r13)
414
415#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
416 /* Store initial timestamp */
417 cmpdi r4, 0
418 beq 1f
419 addi r3, r4, VCPU_TB_RMENTRY
420 bl kvmhv_start_timing
4211:
422#endif
423 /* Clear out SLB */
424 li r6,0
425 slbmte r6,r6
426 slbia
427 ptesync
428
429 /*
430 * POWER7/POWER8 host -> guest partition switch code.
431 * We don't have to lock against concurrent tlbies,
432 * but we do have to coordinate across hardware threads.
433 */
434 /* Set bit in entry map iff exit map is zero. */
435 ld r5, HSTATE_KVM_VCORE(r13)
436 li r7, 1
437 lbz r6, HSTATE_PTID(r13)
438 sld r7, r7, r6
439 addi r9, r5, VCORE_ENTRY_EXIT
44021: lwarx r3, 0, r9
441 cmpwi r3, 0x100 /* any threads starting to exit? */
442 bge secondary_too_late /* if so we're too late to the party */
443 or r3, r3, r7
444 stwcx. r3, 0, r9
445 bne 21b
446
447 /* Primary thread switches to guest partition. */
448 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
449 cmpwi r6,0
450 bne 10f
451 ld r6,KVM_SDR1(r9)
452 lwz r7,KVM_LPID(r9)
453 li r0,LPID_RSVD /* switch to reserved LPID */
454 mtspr SPRN_LPID,r0
455 ptesync
456 mtspr SPRN_SDR1,r6 /* switch to partition page table */
457 mtspr SPRN_LPID,r7
458 isync
459
460 /* See if we need to flush the TLB */
461 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
462 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
463 srdi r6,r6,6 /* doubleword number */
464 sldi r6,r6,3 /* address offset */
465 add r6,r6,r9
466 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
467 li r0,1
468 sld r0,r0,r7
469 ld r7,0(r6)
470 and. r7,r7,r0
471 beq 22f
47223: ldarx r7,0,r6 /* if set, clear the bit */
473 andc r7,r7,r0
474 stdcx. r7,0,r6
475 bne 23b
476 /* Flush the TLB of any entries for this LPID */
477 /* use arch 2.07S as a proxy for POWER8 */
478BEGIN_FTR_SECTION
479 li r6,512 /* POWER8 has 512 sets */
480FTR_SECTION_ELSE
481 li r6,128 /* POWER7 has 128 sets */
482ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
483 mtctr r6
484 li r7,0x800 /* IS field = 0b10 */
485 ptesync
48628: tlbiel r7
487 addi r7,r7,0x1000
488 bdnz 28b
489 ptesync
490
491 /* Add timebase offset onto timebase */
49222: ld r8,VCORE_TB_OFFSET(r5)
493 cmpdi r8,0
494 beq 37f
495 mftb r6 /* current host timebase */
496 add r8,r8,r6
497 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
498 mftb r7 /* check if lower 24 bits overflowed */
499 clrldi r6,r6,40
500 clrldi r7,r7,40
501 cmpld r7,r6
502 bge 37f
503 addis r8,r8,0x100 /* if so, increment upper 40 bits */
504 mtspr SPRN_TBU40,r8
505
506 /* Load guest PCR value to select appropriate compat mode */
50737: ld r7, VCORE_PCR(r5)
508 cmpdi r7, 0
509 beq 38f
510 mtspr SPRN_PCR, r7
51138:
512
513BEGIN_FTR_SECTION
514 /* DPDES is shared between threads */
515 ld r8, VCORE_DPDES(r5)
516 mtspr SPRN_DPDES, r8
517END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
518
519 li r0,1
520 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
521
522 /* Do we have a guest vcpu to run? */
52310: cmpdi r4, 0
524 beq kvmppc_primary_no_guest
525kvmppc_got_guest:
526
527 /* Load up guest SLB entries */
528 lwz r5,VCPU_SLB_MAX(r4)
529 cmpwi r5,0
530 beq 9f
531 mtctr r5
532 addi r6,r4,VCPU_SLB
5331: ld r8,VCPU_SLB_E(r6)
534 ld r9,VCPU_SLB_V(r6)
535 slbmte r9,r8
536 addi r6,r6,VCPU_SLB_SIZE
537 bdnz 1b
5389:
539 /* Increment yield count if they have a VPA */
540 ld r3, VCPU_VPA(r4)
541 cmpdi r3, 0
542 beq 25f
543 li r6, LPPACA_YIELDCOUNT
544 LWZX_BE r5, r3, r6
545 addi r5, r5, 1
546 STWX_BE r5, r3, r6
547 li r6, 1
548 stb r6, VCPU_VPA_DIRTY(r4)
54925:
550
551 /* Save purr/spurr */
552 mfspr r5,SPRN_PURR
553 mfspr r6,SPRN_SPURR
554 std r5,HSTATE_PURR(r13)
555 std r6,HSTATE_SPURR(r13)
556 ld r7,VCPU_PURR(r4)
557 ld r8,VCPU_SPURR(r4)
558 mtspr SPRN_PURR,r7
559 mtspr SPRN_SPURR,r8
560
561BEGIN_FTR_SECTION
562 /* Set partition DABR */
563 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
564 lwz r5,VCPU_DABRX(r4)
565 ld r6,VCPU_DABR(r4)
566 mtspr SPRN_DABRX,r5
567 mtspr SPRN_DABR,r6
568 isync
569END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
570
571#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
572BEGIN_FTR_SECTION
573 b skip_tm
574END_FTR_SECTION_IFCLR(CPU_FTR_TM)
575
576 /* Turn on TM/FP/VSX/VMX so we can restore them. */
577 mfmsr r5
578 li r6, MSR_TM >> 32
579 sldi r6, r6, 32
580 or r5, r5, r6
581 ori r5, r5, MSR_FP
582 oris r5, r5, (MSR_VEC | MSR_VSX)@h
583 mtmsrd r5
584
585 /*
586 * The user may change these outside of a transaction, so they must
587 * always be context switched.
588 */
589 ld r5, VCPU_TFHAR(r4)
590 ld r6, VCPU_TFIAR(r4)
591 ld r7, VCPU_TEXASR(r4)
592 mtspr SPRN_TFHAR, r5
593 mtspr SPRN_TFIAR, r6
594 mtspr SPRN_TEXASR, r7
595
596 ld r5, VCPU_MSR(r4)
597 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
598 beq skip_tm /* TM not active in guest */
599
600 /* Make sure the failure summary is set, otherwise we'll program check
601 * when we trechkpt. It's possible that this might have been not set
602 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
603 * host.
604 */
605 oris r7, r7, (TEXASR_FS)@h
606 mtspr SPRN_TEXASR, r7
607
608 /*
609 * We need to load up the checkpointed state for the guest.
610 * We need to do this early as it will blow away any GPRs, VSRs and
611 * some SPRs.
612 */
613
614 mr r31, r4
615 addi r3, r31, VCPU_FPRS_TM
616 bl load_fp_state
617 addi r3, r31, VCPU_VRS_TM
618 bl load_vr_state
619 mr r4, r31
620 lwz r7, VCPU_VRSAVE_TM(r4)
621 mtspr SPRN_VRSAVE, r7
622
623 ld r5, VCPU_LR_TM(r4)
624 lwz r6, VCPU_CR_TM(r4)
625 ld r7, VCPU_CTR_TM(r4)
626 ld r8, VCPU_AMR_TM(r4)
627 ld r9, VCPU_TAR_TM(r4)
628 mtlr r5
629 mtcr r6
630 mtctr r7
631 mtspr SPRN_AMR, r8
632 mtspr SPRN_TAR, r9
633
634 /*
635 * Load up PPR and DSCR values but don't put them in the actual SPRs
636 * till the last moment to avoid running with userspace PPR and DSCR for
637 * too long.
638 */
639 ld r29, VCPU_DSCR_TM(r4)
640 ld r30, VCPU_PPR_TM(r4)
641
642 std r2, PACATMSCRATCH(r13) /* Save TOC */
643
644 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
645 li r5, 0
646 mtmsrd r5, 1
647
648 /* Load GPRs r0-r28 */
649 reg = 0
650 .rept 29
651 ld reg, VCPU_GPRS_TM(reg)(r31)
652 reg = reg + 1
653 .endr
654
655 mtspr SPRN_DSCR, r29
656 mtspr SPRN_PPR, r30
657
658 /* Load final GPRs */
659 ld 29, VCPU_GPRS_TM(29)(r31)
660 ld 30, VCPU_GPRS_TM(30)(r31)
661 ld 31, VCPU_GPRS_TM(31)(r31)
662
663 /* TM checkpointed state is now setup. All GPRs are now volatile. */
664 TRECHKPT
665
666 /* Now let's get back the state we need. */
667 HMT_MEDIUM
668 GET_PACA(r13)
669 ld r29, HSTATE_DSCR(r13)
670 mtspr SPRN_DSCR, r29
671 ld r4, HSTATE_KVM_VCPU(r13)
672 ld r1, HSTATE_HOST_R1(r13)
673 ld r2, PACATMSCRATCH(r13)
674
675 /* Set the MSR RI since we have our registers back. */
676 li r5, MSR_RI
677 mtmsrd r5, 1
678skip_tm:
679#endif
680
681 /* Load guest PMU registers */
682 /* R4 is live here (vcpu pointer) */
683 li r3, 1
684 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
685 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
686 isync
687BEGIN_FTR_SECTION
688 ld r3, VCPU_MMCR(r4)
689 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
690 cmpwi r5, MMCR0_PMAO
691 beql kvmppc_fix_pmao
692END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
693 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
694 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
695 lwz r6, VCPU_PMC + 8(r4)
696 lwz r7, VCPU_PMC + 12(r4)
697 lwz r8, VCPU_PMC + 16(r4)
698 lwz r9, VCPU_PMC + 20(r4)
699 mtspr SPRN_PMC1, r3
700 mtspr SPRN_PMC2, r5
701 mtspr SPRN_PMC3, r6
702 mtspr SPRN_PMC4, r7
703 mtspr SPRN_PMC5, r8
704 mtspr SPRN_PMC6, r9
705 ld r3, VCPU_MMCR(r4)
706 ld r5, VCPU_MMCR + 8(r4)
707 ld r6, VCPU_MMCR + 16(r4)
708 ld r7, VCPU_SIAR(r4)
709 ld r8, VCPU_SDAR(r4)
710 mtspr SPRN_MMCR1, r5
711 mtspr SPRN_MMCRA, r6
712 mtspr SPRN_SIAR, r7
713 mtspr SPRN_SDAR, r8
714BEGIN_FTR_SECTION
715 ld r5, VCPU_MMCR + 24(r4)
716 ld r6, VCPU_SIER(r4)
717 lwz r7, VCPU_PMC + 24(r4)
718 lwz r8, VCPU_PMC + 28(r4)
719 ld r9, VCPU_MMCR + 32(r4)
720 mtspr SPRN_MMCR2, r5
721 mtspr SPRN_SIER, r6
722 mtspr SPRN_SPMC1, r7
723 mtspr SPRN_SPMC2, r8
724 mtspr SPRN_MMCRS, r9
725END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
726 mtspr SPRN_MMCR0, r3
727 isync
728
729 /* Load up FP, VMX and VSX registers */
730 bl kvmppc_load_fp
731
732 ld r14, VCPU_GPR(R14)(r4)
733 ld r15, VCPU_GPR(R15)(r4)
734 ld r16, VCPU_GPR(R16)(r4)
735 ld r17, VCPU_GPR(R17)(r4)
736 ld r18, VCPU_GPR(R18)(r4)
737 ld r19, VCPU_GPR(R19)(r4)
738 ld r20, VCPU_GPR(R20)(r4)
739 ld r21, VCPU_GPR(R21)(r4)
740 ld r22, VCPU_GPR(R22)(r4)
741 ld r23, VCPU_GPR(R23)(r4)
742 ld r24, VCPU_GPR(R24)(r4)
743 ld r25, VCPU_GPR(R25)(r4)
744 ld r26, VCPU_GPR(R26)(r4)
745 ld r27, VCPU_GPR(R27)(r4)
746 ld r28, VCPU_GPR(R28)(r4)
747 ld r29, VCPU_GPR(R29)(r4)
748 ld r30, VCPU_GPR(R30)(r4)
749 ld r31, VCPU_GPR(R31)(r4)
750
751 /* Switch DSCR to guest value */
752 ld r5, VCPU_DSCR(r4)
753 mtspr SPRN_DSCR, r5
754
755BEGIN_FTR_SECTION
756 /* Skip next section on POWER7 */
757 b 8f
758END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
759 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
760 mfmsr r8
761 li r0, 1
762 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
763 mtmsrd r8
764
765 /* Load up POWER8-specific registers */
766 ld r5, VCPU_IAMR(r4)
767 lwz r6, VCPU_PSPB(r4)
768 ld r7, VCPU_FSCR(r4)
769 mtspr SPRN_IAMR, r5
770 mtspr SPRN_PSPB, r6
771 mtspr SPRN_FSCR, r7
772 ld r5, VCPU_DAWR(r4)
773 ld r6, VCPU_DAWRX(r4)
774 ld r7, VCPU_CIABR(r4)
775 ld r8, VCPU_TAR(r4)
776 mtspr SPRN_DAWR, r5
777 mtspr SPRN_DAWRX, r6
778 mtspr SPRN_CIABR, r7
779 mtspr SPRN_TAR, r8
780 ld r5, VCPU_IC(r4)
781 ld r6, VCPU_VTB(r4)
782 mtspr SPRN_IC, r5
783 mtspr SPRN_VTB, r6
784 ld r8, VCPU_EBBHR(r4)
785 mtspr SPRN_EBBHR, r8
786 ld r5, VCPU_EBBRR(r4)
787 ld r6, VCPU_BESCR(r4)
788 ld r7, VCPU_CSIGR(r4)
789 ld r8, VCPU_TACR(r4)
790 mtspr SPRN_EBBRR, r5
791 mtspr SPRN_BESCR, r6
792 mtspr SPRN_CSIGR, r7
793 mtspr SPRN_TACR, r8
794 ld r5, VCPU_TCSCR(r4)
795 ld r6, VCPU_ACOP(r4)
796 lwz r7, VCPU_GUEST_PID(r4)
797 ld r8, VCPU_WORT(r4)
798 mtspr SPRN_TCSCR, r5
799 mtspr SPRN_ACOP, r6
800 mtspr SPRN_PID, r7
801 mtspr SPRN_WORT, r8
8028:
803
804 /*
805 * Set the decrementer to the guest decrementer.
806 */
807 ld r8,VCPU_DEC_EXPIRES(r4)
808 /* r8 is a host timebase value here, convert to guest TB */
809 ld r5,HSTATE_KVM_VCORE(r13)
810 ld r6,VCORE_TB_OFFSET(r5)
811 add r8,r8,r6
812 mftb r7
813 subf r3,r7,r8
814 mtspr SPRN_DEC,r3
815 stw r3,VCPU_DEC(r4)
816
817 ld r5, VCPU_SPRG0(r4)
818 ld r6, VCPU_SPRG1(r4)
819 ld r7, VCPU_SPRG2(r4)
820 ld r8, VCPU_SPRG3(r4)
821 mtspr SPRN_SPRG0, r5
822 mtspr SPRN_SPRG1, r6
823 mtspr SPRN_SPRG2, r7
824 mtspr SPRN_SPRG3, r8
825
826 /* Load up DAR and DSISR */
827 ld r5, VCPU_DAR(r4)
828 lwz r6, VCPU_DSISR(r4)
829 mtspr SPRN_DAR, r5
830 mtspr SPRN_DSISR, r6
831
832 /* Restore AMR and UAMOR, set AMOR to all 1s */
833 ld r5,VCPU_AMR(r4)
834 ld r6,VCPU_UAMOR(r4)
835 li r7,-1
836 mtspr SPRN_AMR,r5
837 mtspr SPRN_UAMOR,r6
838 mtspr SPRN_AMOR,r7
839
840 /* Restore state of CTRL run bit; assume 1 on entry */
841 lwz r5,VCPU_CTRL(r4)
842 andi. r5,r5,1
843 bne 4f
844 mfspr r6,SPRN_CTRLF
845 clrrdi r6,r6,1
846 mtspr SPRN_CTRLT,r6
8474:
848 /* Secondary threads wait for primary to have done partition switch */
849 ld r5, HSTATE_KVM_VCORE(r13)
850 lbz r6, HSTATE_PTID(r13)
851 cmpwi r6, 0
852 beq 21f
853 lbz r0, VCORE_IN_GUEST(r5)
854 cmpwi r0, 0
855 bne 21f
856 HMT_LOW
85720: lbz r0, VCORE_IN_GUEST(r5)
858 cmpwi r0, 0
859 beq 20b
860 HMT_MEDIUM
86121:
862 /* Set LPCR. */
863 ld r8,VCORE_LPCR(r5)
864 mtspr SPRN_LPCR,r8
865 isync
866
867 /* Check if HDEC expires soon */
868 mfspr r3, SPRN_HDEC
869 cmpwi r3, 512 /* 1 microsecond */
870 blt hdec_soon
871
872 ld r6, VCPU_CTR(r4)
873 lwz r7, VCPU_XER(r4)
874
875 mtctr r6
876 mtxer r7
877
878kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
879 ld r10, VCPU_PC(r4)
880 ld r11, VCPU_MSR(r4)
881 ld r6, VCPU_SRR0(r4)
882 ld r7, VCPU_SRR1(r4)
883 mtspr SPRN_SRR0, r6
884 mtspr SPRN_SRR1, r7
885
886deliver_guest_interrupt:
887 /* r11 = vcpu->arch.msr & ~MSR_HV */
888 rldicl r11, r11, 63 - MSR_HV_LG, 1
889 rotldi r11, r11, 1 + MSR_HV_LG
890 ori r11, r11, MSR_ME
891
892 /* Check if we can deliver an external or decrementer interrupt now */
893 ld r0, VCPU_PENDING_EXC(r4)
894 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
895 cmpdi cr1, r0, 0
896 andi. r8, r11, MSR_EE
897 mfspr r8, SPRN_LPCR
898 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
899 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
900 mtspr SPRN_LPCR, r8
901 isync
902 beq 5f
903 li r0, BOOK3S_INTERRUPT_EXTERNAL
904 bne cr1, 12f
905 mfspr r0, SPRN_DEC
906 cmpwi r0, 0
907 li r0, BOOK3S_INTERRUPT_DECREMENTER
908 bge 5f
909
91012: mtspr SPRN_SRR0, r10
911 mr r10,r0
912 mtspr SPRN_SRR1, r11
913 mr r9, r4
914 bl kvmppc_msr_interrupt
9155:
916
917/*
918 * Required state:
919 * R4 = vcpu
920 * R10: value for HSRR0
921 * R11: value for HSRR1
922 * R13 = PACA
923 */
924fast_guest_return:
925 li r0,0
926 stb r0,VCPU_CEDED(r4) /* cancel cede */
927 mtspr SPRN_HSRR0,r10
928 mtspr SPRN_HSRR1,r11
929
930 /* Activate guest mode, so faults get handled by KVM */
931 li r9, KVM_GUEST_MODE_GUEST_HV
932 stb r9, HSTATE_IN_GUEST(r13)
933
934#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
935 /* Accumulate timing */
936 addi r3, r4, VCPU_TB_GUEST
937 bl kvmhv_accumulate_time
938#endif
939
940 /* Enter guest */
941
942BEGIN_FTR_SECTION
943 ld r5, VCPU_CFAR(r4)
944 mtspr SPRN_CFAR, r5
945END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
946BEGIN_FTR_SECTION
947 ld r0, VCPU_PPR(r4)
948END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
949
950 ld r5, VCPU_LR(r4)
951 lwz r6, VCPU_CR(r4)
952 mtlr r5
953 mtcr r6
954
955 ld r1, VCPU_GPR(R1)(r4)
956 ld r2, VCPU_GPR(R2)(r4)
957 ld r3, VCPU_GPR(R3)(r4)
958 ld r5, VCPU_GPR(R5)(r4)
959 ld r6, VCPU_GPR(R6)(r4)
960 ld r7, VCPU_GPR(R7)(r4)
961 ld r8, VCPU_GPR(R8)(r4)
962 ld r9, VCPU_GPR(R9)(r4)
963 ld r10, VCPU_GPR(R10)(r4)
964 ld r11, VCPU_GPR(R11)(r4)
965 ld r12, VCPU_GPR(R12)(r4)
966 ld r13, VCPU_GPR(R13)(r4)
967
968BEGIN_FTR_SECTION
969 mtspr SPRN_PPR, r0
970END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
971 ld r0, VCPU_GPR(R0)(r4)
972 ld r4, VCPU_GPR(R4)(r4)
973
974 hrfid
975 b .
976
977secondary_too_late:
978 li r12, 0
979 cmpdi r4, 0
980 beq 11f
981 stw r12, VCPU_TRAP(r4)
982#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
983 addi r3, r4, VCPU_TB_RMEXIT
984 bl kvmhv_accumulate_time
985#endif
98611: b kvmhv_switch_to_host
987
988hdec_soon:
989 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
990 stw r12, VCPU_TRAP(r4)
991 mr r9, r4
992#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
993 addi r3, r4, VCPU_TB_RMEXIT
994 bl kvmhv_accumulate_time
995#endif
996 b guest_exit_cont
997
998/******************************************************************************
999 * *
1000 * Exit code *
1001 * *
1002 *****************************************************************************/
1003
1004/*
1005 * We come here from the first-level interrupt handlers.
1006 */
1007 .globl kvmppc_interrupt_hv
1008kvmppc_interrupt_hv:
1009 /*
1010 * Register contents:
1011 * R12 = interrupt vector
1012 * R13 = PACA
1013 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1014 * guest R13 saved in SPRN_SCRATCH0
1015 */
1016 std r9, HSTATE_SCRATCH2(r13)
1017
1018 lbz r9, HSTATE_IN_GUEST(r13)
1019 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1020 beq kvmppc_bad_host_intr
1021#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1022 cmpwi r9, KVM_GUEST_MODE_GUEST
1023 ld r9, HSTATE_SCRATCH2(r13)
1024 beq kvmppc_interrupt_pr
1025#endif
1026 /* We're now back in the host but in guest MMU context */
1027 li r9, KVM_GUEST_MODE_HOST_HV
1028 stb r9, HSTATE_IN_GUEST(r13)
1029
1030 ld r9, HSTATE_KVM_VCPU(r13)
1031
1032 /* Save registers */
1033
1034 std r0, VCPU_GPR(R0)(r9)
1035 std r1, VCPU_GPR(R1)(r9)
1036 std r2, VCPU_GPR(R2)(r9)
1037 std r3, VCPU_GPR(R3)(r9)
1038 std r4, VCPU_GPR(R4)(r9)
1039 std r5, VCPU_GPR(R5)(r9)
1040 std r6, VCPU_GPR(R6)(r9)
1041 std r7, VCPU_GPR(R7)(r9)
1042 std r8, VCPU_GPR(R8)(r9)
1043 ld r0, HSTATE_SCRATCH2(r13)
1044 std r0, VCPU_GPR(R9)(r9)
1045 std r10, VCPU_GPR(R10)(r9)
1046 std r11, VCPU_GPR(R11)(r9)
1047 ld r3, HSTATE_SCRATCH0(r13)
1048 lwz r4, HSTATE_SCRATCH1(r13)
1049 std r3, VCPU_GPR(R12)(r9)
1050 stw r4, VCPU_CR(r9)
1051BEGIN_FTR_SECTION
1052 ld r3, HSTATE_CFAR(r13)
1053 std r3, VCPU_CFAR(r9)
1054END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1055BEGIN_FTR_SECTION
1056 ld r4, HSTATE_PPR(r13)
1057 std r4, VCPU_PPR(r9)
1058END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1059
1060 /* Restore R1/R2 so we can handle faults */
1061 ld r1, HSTATE_HOST_R1(r13)
1062 ld r2, PACATOC(r13)
1063
1064 mfspr r10, SPRN_SRR0
1065 mfspr r11, SPRN_SRR1
1066 std r10, VCPU_SRR0(r9)
1067 std r11, VCPU_SRR1(r9)
1068 andi. r0, r12, 2 /* need to read HSRR0/1? */
1069 beq 1f
1070 mfspr r10, SPRN_HSRR0
1071 mfspr r11, SPRN_HSRR1
1072 clrrdi r12, r12, 2
10731: std r10, VCPU_PC(r9)
1074 std r11, VCPU_MSR(r9)
1075
1076 GET_SCRATCH0(r3)
1077 mflr r4
1078 std r3, VCPU_GPR(R13)(r9)
1079 std r4, VCPU_LR(r9)
1080
1081 stw r12,VCPU_TRAP(r9)
1082
1083#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1084 addi r3, r9, VCPU_TB_RMINTR
1085 mr r4, r9
1086 bl kvmhv_accumulate_time
1087 ld r5, VCPU_GPR(R5)(r9)
1088 ld r6, VCPU_GPR(R6)(r9)
1089 ld r7, VCPU_GPR(R7)(r9)
1090 ld r8, VCPU_GPR(R8)(r9)
1091#endif
1092
1093 /* Save HEIR (HV emulation assist reg) in emul_inst
1094 if this is an HEI (HV emulation interrupt, e40) */
1095 li r3,KVM_INST_FETCH_FAILED
1096 stw r3,VCPU_LAST_INST(r9)
1097 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1098 bne 11f
1099 mfspr r3,SPRN_HEIR
110011: stw r3,VCPU_HEIR(r9)
1101
1102 /* these are volatile across C function calls */
1103 mfctr r3
1104 mfxer r4
1105 std r3, VCPU_CTR(r9)
1106 stw r4, VCPU_XER(r9)
1107
1108 /* If this is a page table miss then see if it's theirs or ours */
1109 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1110 beq kvmppc_hdsi
1111 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1112 beq kvmppc_hisi
1113
1114 /* See if this is a leftover HDEC interrupt */
1115 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1116 bne 2f
1117 mfspr r3,SPRN_HDEC
1118 cmpwi r3,0
1119 mr r4,r9
1120 bge fast_guest_return
11212:
1122 /* See if this is an hcall we can handle in real mode */
1123 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1124 beq hcall_try_real_mode
1125
1126 /* Hypervisor doorbell - exit only if host IPI flag set */
1127 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1128 bne 3f
1129 lbz r0, HSTATE_HOST_IPI(r13)
1130 beq 4f
1131 b guest_exit_cont
11323:
1133 /* External interrupt ? */
1134 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1135 bne+ guest_exit_cont
1136
1137 /* External interrupt, first check for host_ipi. If this is
1138 * set, we know the host wants us out so let's do it now
1139 */
1140 bl kvmppc_read_intr
1141 cmpdi r3, 0
1142 bgt guest_exit_cont
1143
1144 /* Check if any CPU is heading out to the host, if so head out too */
11454: ld r5, HSTATE_KVM_VCORE(r13)
1146 lwz r0, VCORE_ENTRY_EXIT(r5)
1147 cmpwi r0, 0x100
1148 mr r4, r9
1149 blt deliver_guest_interrupt
1150
1151guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1152 /* Save more register state */
1153 mfdar r6
1154 mfdsisr r7
1155 std r6, VCPU_DAR(r9)
1156 stw r7, VCPU_DSISR(r9)
1157 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1158 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1159 beq mc_cont
1160 std r6, VCPU_FAULT_DAR(r9)
1161 stw r7, VCPU_FAULT_DSISR(r9)
1162
1163 /* See if it is a machine check */
1164 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1165 beq machine_check_realmode
1166mc_cont:
1167#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1168 addi r3, r9, VCPU_TB_RMEXIT
1169 mr r4, r9
1170 bl kvmhv_accumulate_time
1171#endif
1172
1173 /* Increment exit count, poke other threads to exit */
1174 bl kvmhv_commence_exit
1175 nop
1176 ld r9, HSTATE_KVM_VCPU(r13)
1177 lwz r12, VCPU_TRAP(r9)
1178
1179 /* Save guest CTRL register, set runlatch to 1 */
1180 mfspr r6,SPRN_CTRLF
1181 stw r6,VCPU_CTRL(r9)
1182 andi. r0,r6,1
1183 bne 4f
1184 ori r6,r6,1
1185 mtspr SPRN_CTRLT,r6
11864:
1187 /* Read the guest SLB and save it away */
1188 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1189 mtctr r0
1190 li r6,0
1191 addi r7,r9,VCPU_SLB
1192 li r5,0
11931: slbmfee r8,r6
1194 andis. r0,r8,SLB_ESID_V@h
1195 beq 2f
1196 add r8,r8,r6 /* put index in */
1197 slbmfev r3,r6
1198 std r8,VCPU_SLB_E(r7)
1199 std r3,VCPU_SLB_V(r7)
1200 addi r7,r7,VCPU_SLB_SIZE
1201 addi r5,r5,1
12022: addi r6,r6,1
1203 bdnz 1b
1204 stw r5,VCPU_SLB_MAX(r9)
1205
1206 /*
1207 * Save the guest PURR/SPURR
1208 */
1209 mfspr r5,SPRN_PURR
1210 mfspr r6,SPRN_SPURR
1211 ld r7,VCPU_PURR(r9)
1212 ld r8,VCPU_SPURR(r9)
1213 std r5,VCPU_PURR(r9)
1214 std r6,VCPU_SPURR(r9)
1215 subf r5,r7,r5
1216 subf r6,r8,r6
1217
1218 /*
1219 * Restore host PURR/SPURR and add guest times
1220 * so that the time in the guest gets accounted.
1221 */
1222 ld r3,HSTATE_PURR(r13)
1223 ld r4,HSTATE_SPURR(r13)
1224 add r3,r3,r5
1225 add r4,r4,r6
1226 mtspr SPRN_PURR,r3
1227 mtspr SPRN_SPURR,r4
1228
1229 /* Save DEC */
1230 mfspr r5,SPRN_DEC
1231 mftb r6
1232 extsw r5,r5
1233 add r5,r5,r6
1234 /* r5 is a guest timebase value here, convert to host TB */
1235 ld r3,HSTATE_KVM_VCORE(r13)
1236 ld r4,VCORE_TB_OFFSET(r3)
1237 subf r5,r4,r5
1238 std r5,VCPU_DEC_EXPIRES(r9)
1239
1240BEGIN_FTR_SECTION
1241 b 8f
1242END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1243 /* Save POWER8-specific registers */
1244 mfspr r5, SPRN_IAMR
1245 mfspr r6, SPRN_PSPB
1246 mfspr r7, SPRN_FSCR
1247 std r5, VCPU_IAMR(r9)
1248 stw r6, VCPU_PSPB(r9)
1249 std r7, VCPU_FSCR(r9)
1250 mfspr r5, SPRN_IC
1251 mfspr r6, SPRN_VTB
1252 mfspr r7, SPRN_TAR
1253 std r5, VCPU_IC(r9)
1254 std r6, VCPU_VTB(r9)
1255 std r7, VCPU_TAR(r9)
1256 mfspr r8, SPRN_EBBHR
1257 std r8, VCPU_EBBHR(r9)
1258 mfspr r5, SPRN_EBBRR
1259 mfspr r6, SPRN_BESCR
1260 mfspr r7, SPRN_CSIGR
1261 mfspr r8, SPRN_TACR
1262 std r5, VCPU_EBBRR(r9)
1263 std r6, VCPU_BESCR(r9)
1264 std r7, VCPU_CSIGR(r9)
1265 std r8, VCPU_TACR(r9)
1266 mfspr r5, SPRN_TCSCR
1267 mfspr r6, SPRN_ACOP
1268 mfspr r7, SPRN_PID
1269 mfspr r8, SPRN_WORT
1270 std r5, VCPU_TCSCR(r9)
1271 std r6, VCPU_ACOP(r9)
1272 stw r7, VCPU_GUEST_PID(r9)
1273 std r8, VCPU_WORT(r9)
12748:
1275
1276 /* Save and reset AMR and UAMOR before turning on the MMU */
1277 mfspr r5,SPRN_AMR
1278 mfspr r6,SPRN_UAMOR
1279 std r5,VCPU_AMR(r9)
1280 std r6,VCPU_UAMOR(r9)
1281 li r6,0
1282 mtspr SPRN_AMR,r6
1283
1284 /* Switch DSCR back to host value */
1285 mfspr r8, SPRN_DSCR
1286 ld r7, HSTATE_DSCR(r13)
1287 std r8, VCPU_DSCR(r9)
1288 mtspr SPRN_DSCR, r7
1289
1290 /* Save non-volatile GPRs */
1291 std r14, VCPU_GPR(R14)(r9)
1292 std r15, VCPU_GPR(R15)(r9)
1293 std r16, VCPU_GPR(R16)(r9)
1294 std r17, VCPU_GPR(R17)(r9)
1295 std r18, VCPU_GPR(R18)(r9)
1296 std r19, VCPU_GPR(R19)(r9)
1297 std r20, VCPU_GPR(R20)(r9)
1298 std r21, VCPU_GPR(R21)(r9)
1299 std r22, VCPU_GPR(R22)(r9)
1300 std r23, VCPU_GPR(R23)(r9)
1301 std r24, VCPU_GPR(R24)(r9)
1302 std r25, VCPU_GPR(R25)(r9)
1303 std r26, VCPU_GPR(R26)(r9)
1304 std r27, VCPU_GPR(R27)(r9)
1305 std r28, VCPU_GPR(R28)(r9)
1306 std r29, VCPU_GPR(R29)(r9)
1307 std r30, VCPU_GPR(R30)(r9)
1308 std r31, VCPU_GPR(R31)(r9)
1309
1310 /* Save SPRGs */
1311 mfspr r3, SPRN_SPRG0
1312 mfspr r4, SPRN_SPRG1
1313 mfspr r5, SPRN_SPRG2
1314 mfspr r6, SPRN_SPRG3
1315 std r3, VCPU_SPRG0(r9)
1316 std r4, VCPU_SPRG1(r9)
1317 std r5, VCPU_SPRG2(r9)
1318 std r6, VCPU_SPRG3(r9)
1319
1320 /* save FP state */
1321 mr r3, r9
1322 bl kvmppc_save_fp
1323
1324#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1325BEGIN_FTR_SECTION
1326 b 2f
1327END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1328 /* Turn on TM. */
1329 mfmsr r8
1330 li r0, 1
1331 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1332 mtmsrd r8
1333
1334 ld r5, VCPU_MSR(r9)
1335 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1336 beq 1f /* TM not active in guest. */
1337
1338 li r3, TM_CAUSE_KVM_RESCHED
1339
1340 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1341 li r5, 0
1342 mtmsrd r5, 1
1343
1344 /* All GPRs are volatile at this point. */
1345 TRECLAIM(R3)
1346
1347 /* Temporarily store r13 and r9 so we have some regs to play with */
1348 SET_SCRATCH0(r13)
1349 GET_PACA(r13)
1350 std r9, PACATMSCRATCH(r13)
1351 ld r9, HSTATE_KVM_VCPU(r13)
1352
1353 /* Get a few more GPRs free. */
1354 std r29, VCPU_GPRS_TM(29)(r9)
1355 std r30, VCPU_GPRS_TM(30)(r9)
1356 std r31, VCPU_GPRS_TM(31)(r9)
1357
1358 /* Save away PPR and DSCR soon so don't run with user values. */
1359 mfspr r31, SPRN_PPR
1360 HMT_MEDIUM
1361 mfspr r30, SPRN_DSCR
1362 ld r29, HSTATE_DSCR(r13)
1363 mtspr SPRN_DSCR, r29
1364
1365 /* Save all but r9, r13 & r29-r31 */
1366 reg = 0
1367 .rept 29
1368 .if (reg != 9) && (reg != 13)
1369 std reg, VCPU_GPRS_TM(reg)(r9)
1370 .endif
1371 reg = reg + 1
1372 .endr
1373 /* ... now save r13 */
1374 GET_SCRATCH0(r4)
1375 std r4, VCPU_GPRS_TM(13)(r9)
1376 /* ... and save r9 */
1377 ld r4, PACATMSCRATCH(r13)
1378 std r4, VCPU_GPRS_TM(9)(r9)
1379
1380 /* Reload stack pointer and TOC. */
1381 ld r1, HSTATE_HOST_R1(r13)
1382 ld r2, PACATOC(r13)
1383
1384 /* Set MSR RI now we have r1 and r13 back. */
1385 li r5, MSR_RI
1386 mtmsrd r5, 1
1387
1388 /* Save away checkpinted SPRs. */
1389 std r31, VCPU_PPR_TM(r9)
1390 std r30, VCPU_DSCR_TM(r9)
1391 mflr r5
1392 mfcr r6
1393 mfctr r7
1394 mfspr r8, SPRN_AMR
1395 mfspr r10, SPRN_TAR
1396 std r5, VCPU_LR_TM(r9)
1397 stw r6, VCPU_CR_TM(r9)
1398 std r7, VCPU_CTR_TM(r9)
1399 std r8, VCPU_AMR_TM(r9)
1400 std r10, VCPU_TAR_TM(r9)
1401
1402 /* Restore r12 as trap number. */
1403 lwz r12, VCPU_TRAP(r9)
1404
1405 /* Save FP/VSX. */
1406 addi r3, r9, VCPU_FPRS_TM
1407 bl store_fp_state
1408 addi r3, r9, VCPU_VRS_TM
1409 bl store_vr_state
1410 mfspr r6, SPRN_VRSAVE
1411 stw r6, VCPU_VRSAVE_TM(r9)
14121:
1413 /*
1414 * We need to save these SPRs after the treclaim so that the software
1415 * error code is recorded correctly in the TEXASR. Also the user may
1416 * change these outside of a transaction, so they must always be
1417 * context switched.
1418 */
1419 mfspr r5, SPRN_TFHAR
1420 mfspr r6, SPRN_TFIAR
1421 mfspr r7, SPRN_TEXASR
1422 std r5, VCPU_TFHAR(r9)
1423 std r6, VCPU_TFIAR(r9)
1424 std r7, VCPU_TEXASR(r9)
14252:
1426#endif
1427
1428 /* Increment yield count if they have a VPA */
1429 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1430 cmpdi r8, 0
1431 beq 25f
1432 li r4, LPPACA_YIELDCOUNT
1433 LWZX_BE r3, r8, r4
1434 addi r3, r3, 1
1435 STWX_BE r3, r8, r4
1436 li r3, 1
1437 stb r3, VCPU_VPA_DIRTY(r9)
143825:
1439 /* Save PMU registers if requested */
1440 /* r8 and cr0.eq are live here */
1441BEGIN_FTR_SECTION
1442 /*
1443 * POWER8 seems to have a hardware bug where setting
1444 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1445 * when some counters are already negative doesn't seem
1446 * to cause a performance monitor alert (and hence interrupt).
1447 * The effect of this is that when saving the PMU state,
1448 * if there is no PMU alert pending when we read MMCR0
1449 * before freezing the counters, but one becomes pending
1450 * before we read the counters, we lose it.
1451 * To work around this, we need a way to freeze the counters
1452 * before reading MMCR0. Normally, freezing the counters
1453 * is done by writing MMCR0 (to set MMCR0[FC]) which
1454 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1455 * we can also freeze the counters using MMCR2, by writing
1456 * 1s to all the counter freeze condition bits (there are
1457 * 9 bits each for 6 counters).
1458 */
1459 li r3, -1 /* set all freeze bits */
1460 clrrdi r3, r3, 10
1461 mfspr r10, SPRN_MMCR2
1462 mtspr SPRN_MMCR2, r3
1463 isync
1464END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1465 li r3, 1
1466 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1467 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1468 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1469 mfspr r6, SPRN_MMCRA
1470 /* Clear MMCRA in order to disable SDAR updates */
1471 li r7, 0
1472 mtspr SPRN_MMCRA, r7
1473 isync
1474 beq 21f /* if no VPA, save PMU stuff anyway */
1475 lbz r7, LPPACA_PMCINUSE(r8)
1476 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1477 bne 21f
1478 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1479 b 22f
148021: mfspr r5, SPRN_MMCR1
1481 mfspr r7, SPRN_SIAR
1482 mfspr r8, SPRN_SDAR
1483 std r4, VCPU_MMCR(r9)
1484 std r5, VCPU_MMCR + 8(r9)
1485 std r6, VCPU_MMCR + 16(r9)
1486BEGIN_FTR_SECTION
1487 std r10, VCPU_MMCR + 24(r9)
1488END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1489 std r7, VCPU_SIAR(r9)
1490 std r8, VCPU_SDAR(r9)
1491 mfspr r3, SPRN_PMC1
1492 mfspr r4, SPRN_PMC2
1493 mfspr r5, SPRN_PMC3
1494 mfspr r6, SPRN_PMC4
1495 mfspr r7, SPRN_PMC5
1496 mfspr r8, SPRN_PMC6
1497 stw r3, VCPU_PMC(r9)
1498 stw r4, VCPU_PMC + 4(r9)
1499 stw r5, VCPU_PMC + 8(r9)
1500 stw r6, VCPU_PMC + 12(r9)
1501 stw r7, VCPU_PMC + 16(r9)
1502 stw r8, VCPU_PMC + 20(r9)
1503BEGIN_FTR_SECTION
1504 mfspr r5, SPRN_SIER
1505 mfspr r6, SPRN_SPMC1
1506 mfspr r7, SPRN_SPMC2
1507 mfspr r8, SPRN_MMCRS
1508 std r5, VCPU_SIER(r9)
1509 stw r6, VCPU_PMC + 24(r9)
1510 stw r7, VCPU_PMC + 28(r9)
1511 std r8, VCPU_MMCR + 32(r9)
1512 lis r4, 0x8000
1513 mtspr SPRN_MMCRS, r4
1514END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
151522:
1516 /* Clear out SLB */
1517 li r5,0
1518 slbmte r5,r5
1519 slbia
1520 ptesync
1521
1522 /*
1523 * POWER7/POWER8 guest -> host partition switch code.
1524 * We don't have to lock against tlbies but we do
1525 * have to coordinate the hardware threads.
1526 */
1527kvmhv_switch_to_host:
1528 /* Secondary threads wait for primary to do partition switch */
1529 ld r5,HSTATE_KVM_VCORE(r13)
1530 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1531 lbz r3,HSTATE_PTID(r13)
1532 cmpwi r3,0
1533 beq 15f
1534 HMT_LOW
153513: lbz r3,VCORE_IN_GUEST(r5)
1536 cmpwi r3,0
1537 bne 13b
1538 HMT_MEDIUM
1539 b 16f
1540
1541 /* Primary thread waits for all the secondaries to exit guest */
154215: lwz r3,VCORE_ENTRY_EXIT(r5)
1543 srwi r0,r3,8
1544 clrldi r3,r3,56
1545 cmpw r3,r0
1546 bne 15b
1547 isync
1548
1549 /* Primary thread switches back to host partition */
1550 ld r6,KVM_HOST_SDR1(r4)
1551 lwz r7,KVM_HOST_LPID(r4)
1552 li r8,LPID_RSVD /* switch to reserved LPID */
1553 mtspr SPRN_LPID,r8
1554 ptesync
1555 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1556 mtspr SPRN_LPID,r7
1557 isync
1558
1559BEGIN_FTR_SECTION
1560 /* DPDES is shared between threads */
1561 mfspr r7, SPRN_DPDES
1562 std r7, VCORE_DPDES(r5)
1563 /* clear DPDES so we don't get guest doorbells in the host */
1564 li r8, 0
1565 mtspr SPRN_DPDES, r8
1566END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1567
1568 /* Subtract timebase offset from timebase */
1569 ld r8,VCORE_TB_OFFSET(r5)
1570 cmpdi r8,0
1571 beq 17f
1572 mftb r6 /* current guest timebase */
1573 subf r8,r8,r6
1574 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1575 mftb r7 /* check if lower 24 bits overflowed */
1576 clrldi r6,r6,40
1577 clrldi r7,r7,40
1578 cmpld r7,r6
1579 bge 17f
1580 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1581 mtspr SPRN_TBU40,r8
1582
1583 /* Reset PCR */
158417: ld r0, VCORE_PCR(r5)
1585 cmpdi r0, 0
1586 beq 18f
1587 li r0, 0
1588 mtspr SPRN_PCR, r0
158918:
1590 /* Signal secondary CPUs to continue */
1591 stb r0,VCORE_IN_GUEST(r5)
1592 lis r8,0x7fff /* MAX_INT@h */
1593 mtspr SPRN_HDEC,r8
1594
159516: ld r8,KVM_HOST_LPCR(r4)
1596 mtspr SPRN_LPCR,r8
1597 isync
1598
1599 /* load host SLB entries */
1600 ld r8,PACA_SLBSHADOWPTR(r13)
1601
1602 .rept SLB_NUM_BOLTED
1603 li r3, SLBSHADOW_SAVEAREA
1604 LDX_BE r5, r8, r3
1605 addi r3, r3, 8
1606 LDX_BE r6, r8, r3
1607 andis. r7,r5,SLB_ESID_V@h
1608 beq 1f
1609 slbmte r6,r5
16101: addi r8,r8,16
1611 .endr
1612
1613#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1614 /* Finish timing, if we have a vcpu */
1615 ld r4, HSTATE_KVM_VCPU(r13)
1616 cmpdi r4, 0
1617 li r3, 0
1618 beq 2f
1619 bl kvmhv_accumulate_time
16202:
1621#endif
1622 /* Unset guest mode */
1623 li r0, KVM_GUEST_MODE_NONE
1624 stb r0, HSTATE_IN_GUEST(r13)
1625
1626 ld r0, 112+PPC_LR_STKOFF(r1)
1627 addi r1, r1, 112
1628 mtlr r0
1629 blr
1630
1631/*
1632 * Check whether an HDSI is an HPTE not found fault or something else.
1633 * If it is an HPTE not found fault that is due to the guest accessing
1634 * a page that they have mapped but which we have paged out, then
1635 * we continue on with the guest exit path. In all other cases,
1636 * reflect the HDSI to the guest as a DSI.
1637 */
1638kvmppc_hdsi:
1639 mfspr r4, SPRN_HDAR
1640 mfspr r6, SPRN_HDSISR
1641 /* HPTE not found fault or protection fault? */
1642 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1643 beq 1f /* if not, send it to the guest */
1644 andi. r0, r11, MSR_DR /* data relocation enabled? */
1645 beq 3f
1646 clrrdi r0, r4, 28
1647 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1648 bne 1f /* if no SLB entry found */
16494: std r4, VCPU_FAULT_DAR(r9)
1650 stw r6, VCPU_FAULT_DSISR(r9)
1651
1652 /* Search the hash table. */
1653 mr r3, r9 /* vcpu pointer */
1654 li r7, 1 /* data fault */
1655 bl kvmppc_hpte_hv_fault
1656 ld r9, HSTATE_KVM_VCPU(r13)
1657 ld r10, VCPU_PC(r9)
1658 ld r11, VCPU_MSR(r9)
1659 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1660 cmpdi r3, 0 /* retry the instruction */
1661 beq 6f
1662 cmpdi r3, -1 /* handle in kernel mode */
1663 beq guest_exit_cont
1664 cmpdi r3, -2 /* MMIO emulation; need instr word */
1665 beq 2f
1666
1667 /* Synthesize a DSI for the guest */
1668 ld r4, VCPU_FAULT_DAR(r9)
1669 mr r6, r3
16701: mtspr SPRN_DAR, r4
1671 mtspr SPRN_DSISR, r6
1672 mtspr SPRN_SRR0, r10
1673 mtspr SPRN_SRR1, r11
1674 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1675 bl kvmppc_msr_interrupt
1676fast_interrupt_c_return:
16776: ld r7, VCPU_CTR(r9)
1678 lwz r8, VCPU_XER(r9)
1679 mtctr r7
1680 mtxer r8
1681 mr r4, r9
1682 b fast_guest_return
1683
16843: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1685 ld r5, KVM_VRMA_SLB_V(r5)
1686 b 4b
1687
1688 /* If this is for emulated MMIO, load the instruction word */
16892: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1690
1691 /* Set guest mode to 'jump over instruction' so if lwz faults
1692 * we'll just continue at the next IP. */
1693 li r0, KVM_GUEST_MODE_SKIP
1694 stb r0, HSTATE_IN_GUEST(r13)
1695
1696 /* Do the access with MSR:DR enabled */
1697 mfmsr r3
1698 ori r4, r3, MSR_DR /* Enable paging for data */
1699 mtmsrd r4
1700 lwz r8, 0(r10)
1701 mtmsrd r3
1702
1703 /* Store the result */
1704 stw r8, VCPU_LAST_INST(r9)
1705
1706 /* Unset guest mode. */
1707 li r0, KVM_GUEST_MODE_HOST_HV
1708 stb r0, HSTATE_IN_GUEST(r13)
1709 b guest_exit_cont
1710
1711/*
1712 * Similarly for an HISI, reflect it to the guest as an ISI unless
1713 * it is an HPTE not found fault for a page that we have paged out.
1714 */
1715kvmppc_hisi:
1716 andis. r0, r11, SRR1_ISI_NOPT@h
1717 beq 1f
1718 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1719 beq 3f
1720 clrrdi r0, r10, 28
1721 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
1722 bne 1f /* if no SLB entry found */
17234:
1724 /* Search the hash table. */
1725 mr r3, r9 /* vcpu pointer */
1726 mr r4, r10
1727 mr r6, r11
1728 li r7, 0 /* instruction fault */
1729 bl kvmppc_hpte_hv_fault
1730 ld r9, HSTATE_KVM_VCPU(r13)
1731 ld r10, VCPU_PC(r9)
1732 ld r11, VCPU_MSR(r9)
1733 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1734 cmpdi r3, 0 /* retry the instruction */
1735 beq fast_interrupt_c_return
1736 cmpdi r3, -1 /* handle in kernel mode */
1737 beq guest_exit_cont
1738
1739 /* Synthesize an ISI for the guest */
1740 mr r11, r3
17411: mtspr SPRN_SRR0, r10
1742 mtspr SPRN_SRR1, r11
1743 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1744 bl kvmppc_msr_interrupt
1745 b fast_interrupt_c_return
1746
17473: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1748 ld r5, KVM_VRMA_SLB_V(r6)
1749 b 4b
1750
1751/*
1752 * Try to handle an hcall in real mode.
1753 * Returns to the guest if we handle it, or continues on up to
1754 * the kernel if we can't (i.e. if we don't have a handler for
1755 * it, or if the handler returns H_TOO_HARD).
1756 *
1757 * r5 - r8 contain hcall args,
1758 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
1759 */
1760hcall_try_real_mode:
1761 ld r3,VCPU_GPR(R3)(r9)
1762 andi. r0,r11,MSR_PR
1763 /* sc 1 from userspace - reflect to guest syscall */
1764 bne sc_1_fast_return
1765 clrrdi r3,r3,2
1766 cmpldi r3,hcall_real_table_end - hcall_real_table
1767 bge guest_exit_cont
1768 /* See if this hcall is enabled for in-kernel handling */
1769 ld r4, VCPU_KVM(r9)
1770 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1771 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1772 add r4, r4, r0
1773 ld r0, KVM_ENABLED_HCALLS(r4)
1774 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1775 srd r0, r0, r4
1776 andi. r0, r0, 1
1777 beq guest_exit_cont
1778 /* Get pointer to handler, if any, and call it */
1779 LOAD_REG_ADDR(r4, hcall_real_table)
1780 lwax r3,r3,r4
1781 cmpwi r3,0
1782 beq guest_exit_cont
1783 add r12,r3,r4
1784 mtctr r12
1785 mr r3,r9 /* get vcpu pointer */
1786 ld r4,VCPU_GPR(R4)(r9)
1787 bctrl
1788 cmpdi r3,H_TOO_HARD
1789 beq hcall_real_fallback
1790 ld r4,HSTATE_KVM_VCPU(r13)
1791 std r3,VCPU_GPR(R3)(r4)
1792 ld r10,VCPU_PC(r4)
1793 ld r11,VCPU_MSR(r4)
1794 b fast_guest_return
1795
1796sc_1_fast_return:
1797 mtspr SPRN_SRR0,r10
1798 mtspr SPRN_SRR1,r11
1799 li r10, BOOK3S_INTERRUPT_SYSCALL
1800 bl kvmppc_msr_interrupt
1801 mr r4,r9
1802 b fast_guest_return
1803
1804 /* We've attempted a real mode hcall, but it's punted it back
1805 * to userspace. We need to restore some clobbered volatiles
1806 * before resuming the pass-it-to-qemu path */
1807hcall_real_fallback:
1808 li r12,BOOK3S_INTERRUPT_SYSCALL
1809 ld r9, HSTATE_KVM_VCPU(r13)
1810
1811 b guest_exit_cont
1812
1813 .globl hcall_real_table
1814hcall_real_table:
1815 .long 0 /* 0 - unused */
1816 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1817 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1818 .long DOTSYM(kvmppc_h_read) - hcall_real_table
1819 .long 0 /* 0x10 - H_CLEAR_MOD */
1820 .long 0 /* 0x14 - H_CLEAR_REF */
1821 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1822 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1823 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
1824 .long 0 /* 0x24 - H_SET_SPRG0 */
1825 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
1826 .long 0 /* 0x2c */
1827 .long 0 /* 0x30 */
1828 .long 0 /* 0x34 */
1829 .long 0 /* 0x38 */
1830 .long 0 /* 0x3c */
1831 .long 0 /* 0x40 */
1832 .long 0 /* 0x44 */
1833 .long 0 /* 0x48 */
1834 .long 0 /* 0x4c */
1835 .long 0 /* 0x50 */
1836 .long 0 /* 0x54 */
1837 .long 0 /* 0x58 */
1838 .long 0 /* 0x5c */
1839 .long 0 /* 0x60 */
1840#ifdef CONFIG_KVM_XICS
1841 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1842 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1843 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
1844 .long 0 /* 0x70 - H_IPOLL */
1845 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
1846#else
1847 .long 0 /* 0x64 - H_EOI */
1848 .long 0 /* 0x68 - H_CPPR */
1849 .long 0 /* 0x6c - H_IPI */
1850 .long 0 /* 0x70 - H_IPOLL */
1851 .long 0 /* 0x74 - H_XIRR */
1852#endif
1853 .long 0 /* 0x78 */
1854 .long 0 /* 0x7c */
1855 .long 0 /* 0x80 */
1856 .long 0 /* 0x84 */
1857 .long 0 /* 0x88 */
1858 .long 0 /* 0x8c */
1859 .long 0 /* 0x90 */
1860 .long 0 /* 0x94 */
1861 .long 0 /* 0x98 */
1862 .long 0 /* 0x9c */
1863 .long 0 /* 0xa0 */
1864 .long 0 /* 0xa4 */
1865 .long 0 /* 0xa8 */
1866 .long 0 /* 0xac */
1867 .long 0 /* 0xb0 */
1868 .long 0 /* 0xb4 */
1869 .long 0 /* 0xb8 */
1870 .long 0 /* 0xbc */
1871 .long 0 /* 0xc0 */
1872 .long 0 /* 0xc4 */
1873 .long 0 /* 0xc8 */
1874 .long 0 /* 0xcc */
1875 .long 0 /* 0xd0 */
1876 .long 0 /* 0xd4 */
1877 .long 0 /* 0xd8 */
1878 .long 0 /* 0xdc */
1879 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
1880 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
1881 .long 0 /* 0xe8 */
1882 .long 0 /* 0xec */
1883 .long 0 /* 0xf0 */
1884 .long 0 /* 0xf4 */
1885 .long 0 /* 0xf8 */
1886 .long 0 /* 0xfc */
1887 .long 0 /* 0x100 */
1888 .long 0 /* 0x104 */
1889 .long 0 /* 0x108 */
1890 .long 0 /* 0x10c */
1891 .long 0 /* 0x110 */
1892 .long 0 /* 0x114 */
1893 .long 0 /* 0x118 */
1894 .long 0 /* 0x11c */
1895 .long 0 /* 0x120 */
1896 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
1897 .long 0 /* 0x128 */
1898 .long 0 /* 0x12c */
1899 .long 0 /* 0x130 */
1900 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
1901 .long 0 /* 0x138 */
1902 .long 0 /* 0x13c */
1903 .long 0 /* 0x140 */
1904 .long 0 /* 0x144 */
1905 .long 0 /* 0x148 */
1906 .long 0 /* 0x14c */
1907 .long 0 /* 0x150 */
1908 .long 0 /* 0x154 */
1909 .long 0 /* 0x158 */
1910 .long 0 /* 0x15c */
1911 .long 0 /* 0x160 */
1912 .long 0 /* 0x164 */
1913 .long 0 /* 0x168 */
1914 .long 0 /* 0x16c */
1915 .long 0 /* 0x170 */
1916 .long 0 /* 0x174 */
1917 .long 0 /* 0x178 */
1918 .long 0 /* 0x17c */
1919 .long 0 /* 0x180 */
1920 .long 0 /* 0x184 */
1921 .long 0 /* 0x188 */
1922 .long 0 /* 0x18c */
1923 .long 0 /* 0x190 */
1924 .long 0 /* 0x194 */
1925 .long 0 /* 0x198 */
1926 .long 0 /* 0x19c */
1927 .long 0 /* 0x1a0 */
1928 .long 0 /* 0x1a4 */
1929 .long 0 /* 0x1a8 */
1930 .long 0 /* 0x1ac */
1931 .long 0 /* 0x1b0 */
1932 .long 0 /* 0x1b4 */
1933 .long 0 /* 0x1b8 */
1934 .long 0 /* 0x1bc */
1935 .long 0 /* 0x1c0 */
1936 .long 0 /* 0x1c4 */
1937 .long 0 /* 0x1c8 */
1938 .long 0 /* 0x1cc */
1939 .long 0 /* 0x1d0 */
1940 .long 0 /* 0x1d4 */
1941 .long 0 /* 0x1d8 */
1942 .long 0 /* 0x1dc */
1943 .long 0 /* 0x1e0 */
1944 .long 0 /* 0x1e4 */
1945 .long 0 /* 0x1e8 */
1946 .long 0 /* 0x1ec */
1947 .long 0 /* 0x1f0 */
1948 .long 0 /* 0x1f4 */
1949 .long 0 /* 0x1f8 */
1950 .long 0 /* 0x1fc */
1951 .long 0 /* 0x200 */
1952 .long 0 /* 0x204 */
1953 .long 0 /* 0x208 */
1954 .long 0 /* 0x20c */
1955 .long 0 /* 0x210 */
1956 .long 0 /* 0x214 */
1957 .long 0 /* 0x218 */
1958 .long 0 /* 0x21c */
1959 .long 0 /* 0x220 */
1960 .long 0 /* 0x224 */
1961 .long 0 /* 0x228 */
1962 .long 0 /* 0x22c */
1963 .long 0 /* 0x230 */
1964 .long 0 /* 0x234 */
1965 .long 0 /* 0x238 */
1966 .long 0 /* 0x23c */
1967 .long 0 /* 0x240 */
1968 .long 0 /* 0x244 */
1969 .long 0 /* 0x248 */
1970 .long 0 /* 0x24c */
1971 .long 0 /* 0x250 */
1972 .long 0 /* 0x254 */
1973 .long 0 /* 0x258 */
1974 .long 0 /* 0x25c */
1975 .long 0 /* 0x260 */
1976 .long 0 /* 0x264 */
1977 .long 0 /* 0x268 */
1978 .long 0 /* 0x26c */
1979 .long 0 /* 0x270 */
1980 .long 0 /* 0x274 */
1981 .long 0 /* 0x278 */
1982 .long 0 /* 0x27c */
1983 .long 0 /* 0x280 */
1984 .long 0 /* 0x284 */
1985 .long 0 /* 0x288 */
1986 .long 0 /* 0x28c */
1987 .long 0 /* 0x290 */
1988 .long 0 /* 0x294 */
1989 .long 0 /* 0x298 */
1990 .long 0 /* 0x29c */
1991 .long 0 /* 0x2a0 */
1992 .long 0 /* 0x2a4 */
1993 .long 0 /* 0x2a8 */
1994 .long 0 /* 0x2ac */
1995 .long 0 /* 0x2b0 */
1996 .long 0 /* 0x2b4 */
1997 .long 0 /* 0x2b8 */
1998 .long 0 /* 0x2bc */
1999 .long 0 /* 0x2c0 */
2000 .long 0 /* 0x2c4 */
2001 .long 0 /* 0x2c8 */
2002 .long 0 /* 0x2cc */
2003 .long 0 /* 0x2d0 */
2004 .long 0 /* 0x2d4 */
2005 .long 0 /* 0x2d8 */
2006 .long 0 /* 0x2dc */
2007 .long 0 /* 0x2e0 */
2008 .long 0 /* 0x2e4 */
2009 .long 0 /* 0x2e8 */
2010 .long 0 /* 0x2ec */
2011 .long 0 /* 0x2f0 */
2012 .long 0 /* 0x2f4 */
2013 .long 0 /* 0x2f8 */
2014 .long 0 /* 0x2fc */
2015 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2016 .globl hcall_real_table_end
2017hcall_real_table_end:
2018
2019_GLOBAL(kvmppc_h_set_xdabr)
2020 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2021 beq 6f
2022 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2023 andc. r0, r5, r0
2024 beq 3f
20256: li r3, H_PARAMETER
2026 blr
2027
2028_GLOBAL(kvmppc_h_set_dabr)
2029 li r5, DABRX_USER | DABRX_KERNEL
20303:
2031BEGIN_FTR_SECTION
2032 b 2f
2033END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2034 std r4,VCPU_DABR(r3)
2035 stw r5, VCPU_DABRX(r3)
2036 mtspr SPRN_DABRX, r5
2037 /* Work around P7 bug where DABR can get corrupted on mtspr */
20381: mtspr SPRN_DABR,r4
2039 mfspr r5, SPRN_DABR
2040 cmpd r4, r5
2041 bne 1b
2042 isync
2043 li r3,0
2044 blr
2045
2046 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
20472: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2048 rlwimi r5, r4, 1, DAWRX_WT
2049 clrrdi r4, r4, 3
2050 std r4, VCPU_DAWR(r3)
2051 std r5, VCPU_DAWRX(r3)
2052 mtspr SPRN_DAWR, r4
2053 mtspr SPRN_DAWRX, r5
2054 li r3, 0
2055 blr
2056
2057_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2058 ori r11,r11,MSR_EE
2059 std r11,VCPU_MSR(r3)
2060 li r0,1
2061 stb r0,VCPU_CEDED(r3)
2062 sync /* order setting ceded vs. testing prodded */
2063 lbz r5,VCPU_PRODDED(r3)
2064 cmpwi r5,0
2065 bne kvm_cede_prodded
2066 li r12,0 /* set trap to 0 to say hcall is handled */
2067 stw r12,VCPU_TRAP(r3)
2068 li r0,H_SUCCESS
2069 std r0,VCPU_GPR(R3)(r3)
2070
2071 /*
2072 * Set our bit in the bitmask of napping threads unless all the
2073 * other threads are already napping, in which case we send this
2074 * up to the host.
2075 */
2076 ld r5,HSTATE_KVM_VCORE(r13)
2077 lbz r6,HSTATE_PTID(r13)
2078 lwz r8,VCORE_ENTRY_EXIT(r5)
2079 clrldi r8,r8,56
2080 li r0,1
2081 sld r0,r0,r6
2082 addi r6,r5,VCORE_NAPPING_THREADS
208331: lwarx r4,0,r6
2084 or r4,r4,r0
2085 cmpw r4,r8
2086 beq kvm_cede_exit
2087 stwcx. r4,0,r6
2088 bne 31b
2089 /* order napping_threads update vs testing entry_exit_map */
2090 isync
2091 li r0,NAPPING_CEDE
2092 stb r0,HSTATE_NAPPING(r13)
2093 lwz r7,VCORE_ENTRY_EXIT(r5)
2094 cmpwi r7,0x100
2095 bge 33f /* another thread already exiting */
2096
2097/*
2098 * Although not specifically required by the architecture, POWER7
2099 * preserves the following registers in nap mode, even if an SMT mode
2100 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2101 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2102 */
2103 /* Save non-volatile GPRs */
2104 std r14, VCPU_GPR(R14)(r3)
2105 std r15, VCPU_GPR(R15)(r3)
2106 std r16, VCPU_GPR(R16)(r3)
2107 std r17, VCPU_GPR(R17)(r3)
2108 std r18, VCPU_GPR(R18)(r3)
2109 std r19, VCPU_GPR(R19)(r3)
2110 std r20, VCPU_GPR(R20)(r3)
2111 std r21, VCPU_GPR(R21)(r3)
2112 std r22, VCPU_GPR(R22)(r3)
2113 std r23, VCPU_GPR(R23)(r3)
2114 std r24, VCPU_GPR(R24)(r3)
2115 std r25, VCPU_GPR(R25)(r3)
2116 std r26, VCPU_GPR(R26)(r3)
2117 std r27, VCPU_GPR(R27)(r3)
2118 std r28, VCPU_GPR(R28)(r3)
2119 std r29, VCPU_GPR(R29)(r3)
2120 std r30, VCPU_GPR(R30)(r3)
2121 std r31, VCPU_GPR(R31)(r3)
2122
2123 /* save FP state */
2124 bl kvmppc_save_fp
2125
2126 /*
2127 * Set DEC to the smaller of DEC and HDEC, so that we wake
2128 * no later than the end of our timeslice (HDEC interrupts
2129 * don't wake us from nap).
2130 */
2131 mfspr r3, SPRN_DEC
2132 mfspr r4, SPRN_HDEC
2133 mftb r5
2134 cmpw r3, r4
2135 ble 67f
2136 mtspr SPRN_DEC, r4
213767:
2138 /* save expiry time of guest decrementer */
2139 extsw r3, r3
2140 add r3, r3, r5
2141 ld r4, HSTATE_KVM_VCPU(r13)
2142 ld r5, HSTATE_KVM_VCORE(r13)
2143 ld r6, VCORE_TB_OFFSET(r5)
2144 subf r3, r6, r3 /* convert to host TB value */
2145 std r3, VCPU_DEC_EXPIRES(r4)
2146
2147#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2148 ld r4, HSTATE_KVM_VCPU(r13)
2149 addi r3, r4, VCPU_TB_CEDE
2150 bl kvmhv_accumulate_time
2151#endif
2152
2153 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2154
2155 /*
2156 * Take a nap until a decrementer or external or doobell interrupt
2157 * occurs, with PECE1 and PECE0 set in LPCR.
2158 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2159 * Also clear the runlatch bit before napping.
2160 */
2161kvm_do_nap:
2162 mfspr r0, SPRN_CTRLF
2163 clrrdi r0, r0, 1
2164 mtspr SPRN_CTRLT, r0
2165
2166 li r0,1
2167 stb r0,HSTATE_HWTHREAD_REQ(r13)
2168 mfspr r5,SPRN_LPCR
2169 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2170BEGIN_FTR_SECTION
2171 ori r5, r5, LPCR_PECEDH
2172 rlwimi r5, r3, 0, LPCR_PECEDP
2173END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2174 mtspr SPRN_LPCR,r5
2175 isync
2176 li r0, 0
2177 std r0, HSTATE_SCRATCH0(r13)
2178 ptesync
2179 ld r0, HSTATE_SCRATCH0(r13)
21801: cmpd r0, r0
2181 bne 1b
2182 nap
2183 b .
2184
218533: mr r4, r3
2186 li r3, 0
2187 li r12, 0
2188 b 34f
2189
2190kvm_end_cede:
2191 /* get vcpu pointer */
2192 ld r4, HSTATE_KVM_VCPU(r13)
2193
2194 /* Woken by external or decrementer interrupt */
2195 ld r1, HSTATE_HOST_R1(r13)
2196
2197#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2198 addi r3, r4, VCPU_TB_RMINTR
2199 bl kvmhv_accumulate_time
2200#endif
2201
2202 /* load up FP state */
2203 bl kvmppc_load_fp
2204
2205 /* Restore guest decrementer */
2206 ld r3, VCPU_DEC_EXPIRES(r4)
2207 ld r5, HSTATE_KVM_VCORE(r13)
2208 ld r6, VCORE_TB_OFFSET(r5)
2209 add r3, r3, r6 /* convert host TB to guest TB value */
2210 mftb r7
2211 subf r3, r7, r3
2212 mtspr SPRN_DEC, r3
2213
2214 /* Load NV GPRS */
2215 ld r14, VCPU_GPR(R14)(r4)
2216 ld r15, VCPU_GPR(R15)(r4)
2217 ld r16, VCPU_GPR(R16)(r4)
2218 ld r17, VCPU_GPR(R17)(r4)
2219 ld r18, VCPU_GPR(R18)(r4)
2220 ld r19, VCPU_GPR(R19)(r4)
2221 ld r20, VCPU_GPR(R20)(r4)
2222 ld r21, VCPU_GPR(R21)(r4)
2223 ld r22, VCPU_GPR(R22)(r4)
2224 ld r23, VCPU_GPR(R23)(r4)
2225 ld r24, VCPU_GPR(R24)(r4)
2226 ld r25, VCPU_GPR(R25)(r4)
2227 ld r26, VCPU_GPR(R26)(r4)
2228 ld r27, VCPU_GPR(R27)(r4)
2229 ld r28, VCPU_GPR(R28)(r4)
2230 ld r29, VCPU_GPR(R29)(r4)
2231 ld r30, VCPU_GPR(R30)(r4)
2232 ld r31, VCPU_GPR(R31)(r4)
2233
2234 /* Check the wake reason in SRR1 to see why we got here */
2235 bl kvmppc_check_wake_reason
2236
2237 /* clear our bit in vcore->napping_threads */
223834: ld r5,HSTATE_KVM_VCORE(r13)
2239 lbz r7,HSTATE_PTID(r13)
2240 li r0,1
2241 sld r0,r0,r7
2242 addi r6,r5,VCORE_NAPPING_THREADS
224332: lwarx r7,0,r6
2244 andc r7,r7,r0
2245 stwcx. r7,0,r6
2246 bne 32b
2247 li r0,0
2248 stb r0,HSTATE_NAPPING(r13)
2249
2250 /* See if the wake reason means we need to exit */
2251 stw r12, VCPU_TRAP(r4)
2252 mr r9, r4
2253 cmpdi r3, 0
2254 bgt guest_exit_cont
2255
2256 /* see if any other thread is already exiting */
2257 lwz r0,VCORE_ENTRY_EXIT(r5)
2258 cmpwi r0,0x100
2259 bge guest_exit_cont
2260
2261 b kvmppc_cede_reentry /* if not go back to guest */
2262
2263 /* cede when already previously prodded case */
2264kvm_cede_prodded:
2265 li r0,0
2266 stb r0,VCPU_PRODDED(r3)
2267 sync /* order testing prodded vs. clearing ceded */
2268 stb r0,VCPU_CEDED(r3)
2269 li r3,H_SUCCESS
2270 blr
2271
2272 /* we've ceded but we want to give control to the host */
2273kvm_cede_exit:
2274 ld r9, HSTATE_KVM_VCPU(r13)
2275 b guest_exit_cont
2276
2277 /* Try to handle a machine check in real mode */
2278machine_check_realmode:
2279 mr r3, r9 /* get vcpu pointer */
2280 bl kvmppc_realmode_machine_check
2281 nop
2282 cmpdi r3, 0 /* Did we handle MCE ? */
2283 ld r9, HSTATE_KVM_VCPU(r13)
2284 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2285 /*
2286 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2287 * machine check interrupt (set HSRR0 to 0x200). And for handled
2288 * errors (no-fatal), just go back to guest execution with current
2289 * HSRR0 instead of exiting guest. This new approach will inject
2290 * machine check to guest for fatal error causing guest to crash.
2291 *
2292 * The old code used to return to host for unhandled errors which
2293 * was causing guest to hang with soft lockups inside guest and
2294 * makes it difficult to recover guest instance.
2295 */
2296 ld r10, VCPU_PC(r9)
2297 ld r11, VCPU_MSR(r9)
2298 bne 2f /* Continue guest execution. */
2299 /* If not, deliver a machine check. SRR0/1 are already set */
2300 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2301 ld r11, VCPU_MSR(r9)
2302 bl kvmppc_msr_interrupt
23032: b fast_interrupt_c_return
2304
2305/*
2306 * Check the reason we woke from nap, and take appropriate action.
2307 * Returns (in r3):
2308 * 0 if nothing needs to be done
2309 * 1 if something happened that needs to be handled by the host
2310 * -1 if there was a guest wakeup (IPI or msgsnd)
2311 *
2312 * Also sets r12 to the interrupt vector for any interrupt that needs
2313 * to be handled now by the host (0x500 for external interrupt), or zero.
2314 * Modifies r0, r6, r7, r8.
2315 */
2316kvmppc_check_wake_reason:
2317 mfspr r6, SPRN_SRR1
2318BEGIN_FTR_SECTION
2319 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2320FTR_SECTION_ELSE
2321 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2322ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2323 cmpwi r6, 8 /* was it an external interrupt? */
2324 li r12, BOOK3S_INTERRUPT_EXTERNAL
2325 beq kvmppc_read_intr /* if so, see what it was */
2326 li r3, 0
2327 li r12, 0
2328 cmpwi r6, 6 /* was it the decrementer? */
2329 beq 0f
2330BEGIN_FTR_SECTION
2331 cmpwi r6, 5 /* privileged doorbell? */
2332 beq 0f
2333 cmpwi r6, 3 /* hypervisor doorbell? */
2334 beq 3f
2335END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2336 li r3, 1 /* anything else, return 1 */
23370: blr
2338
2339 /* hypervisor doorbell */
23403: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2341 /* see if it's a host IPI */
2342 li r3, 1
2343 lbz r0, HSTATE_HOST_IPI(r13)
2344 cmpwi r0, 0
2345 bnelr
2346 /* if not, clear it and return -1 */
2347 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2348 PPC_MSGCLR(6)
2349 li r3, -1
2350 blr
2351
2352/*
2353 * Determine what sort of external interrupt is pending (if any).
2354 * Returns:
2355 * 0 if no interrupt is pending
2356 * 1 if an interrupt is pending that needs to be handled by the host
2357 * -1 if there was a guest wakeup IPI (which has now been cleared)
2358 * Modifies r0, r6, r7, r8, returns value in r3.
2359 */
2360kvmppc_read_intr:
2361 /* see if a host IPI is pending */
2362 li r3, 1
2363 lbz r0, HSTATE_HOST_IPI(r13)
2364 cmpwi r0, 0
2365 bne 1f
2366
2367 /* Now read the interrupt from the ICP */
2368 ld r6, HSTATE_XICS_PHYS(r13)
2369 li r7, XICS_XIRR
2370 cmpdi r6, 0
2371 beq- 1f
2372 lwzcix r0, r6, r7
2373 /*
2374 * Save XIRR for later. Since we get in in reverse endian on LE
2375 * systems, save it byte reversed and fetch it back in host endian.
2376 */
2377 li r3, HSTATE_SAVED_XIRR
2378 STWX_BE r0, r3, r13
2379#ifdef __LITTLE_ENDIAN__
2380 lwz r3, HSTATE_SAVED_XIRR(r13)
2381#else
2382 mr r3, r0
2383#endif
2384 rlwinm. r3, r3, 0, 0xffffff
2385 sync
2386 beq 1f /* if nothing pending in the ICP */
2387
2388 /* We found something in the ICP...
2389 *
2390 * If it's not an IPI, stash it in the PACA and return to
2391 * the host, we don't (yet) handle directing real external
2392 * interrupts directly to the guest
2393 */
2394 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2395 bne 42f
2396
2397 /* It's an IPI, clear the MFRR and EOI it */
2398 li r3, 0xff
2399 li r8, XICS_MFRR
2400 stbcix r3, r6, r8 /* clear the IPI */
2401 stwcix r0, r6, r7 /* EOI it */
2402 sync
2403
2404 /* We need to re-check host IPI now in case it got set in the
2405 * meantime. If it's clear, we bounce the interrupt to the
2406 * guest
2407 */
2408 lbz r0, HSTATE_HOST_IPI(r13)
2409 cmpwi r0, 0
2410 bne- 43f
2411
2412 /* OK, it's an IPI for us */
2413 li r12, 0
2414 li r3, -1
24151: blr
2416
241742: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2418 * the PACA earlier, it will be picked up by the host ICP driver
2419 */
2420 li r3, 1
2421 b 1b
2422
242343: /* We raced with the host, we need to resend that IPI, bummer */
2424 li r0, IPI_PRIORITY
2425 stbcix r0, r6, r8 /* set the IPI */
2426 sync
2427 li r3, 1
2428 b 1b
2429
2430/*
2431 * Save away FP, VMX and VSX registers.
2432 * r3 = vcpu pointer
2433 * N.B. r30 and r31 are volatile across this function,
2434 * thus it is not callable from C.
2435 */
2436kvmppc_save_fp:
2437 mflr r30
2438 mr r31,r3
2439 mfmsr r5
2440 ori r8,r5,MSR_FP
2441#ifdef CONFIG_ALTIVEC
2442BEGIN_FTR_SECTION
2443 oris r8,r8,MSR_VEC@h
2444END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2445#endif
2446#ifdef CONFIG_VSX
2447BEGIN_FTR_SECTION
2448 oris r8,r8,MSR_VSX@h
2449END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2450#endif
2451 mtmsrd r8
2452 addi r3,r3,VCPU_FPRS
2453 bl store_fp_state
2454#ifdef CONFIG_ALTIVEC
2455BEGIN_FTR_SECTION
2456 addi r3,r31,VCPU_VRS
2457 bl store_vr_state
2458END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2459#endif
2460 mfspr r6,SPRN_VRSAVE
2461 stw r6,VCPU_VRSAVE(r31)
2462 mtlr r30
2463 blr
2464
2465/*
2466 * Load up FP, VMX and VSX registers
2467 * r4 = vcpu pointer
2468 * N.B. r30 and r31 are volatile across this function,
2469 * thus it is not callable from C.
2470 */
2471kvmppc_load_fp:
2472 mflr r30
2473 mr r31,r4
2474 mfmsr r9
2475 ori r8,r9,MSR_FP
2476#ifdef CONFIG_ALTIVEC
2477BEGIN_FTR_SECTION
2478 oris r8,r8,MSR_VEC@h
2479END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2480#endif
2481#ifdef CONFIG_VSX
2482BEGIN_FTR_SECTION
2483 oris r8,r8,MSR_VSX@h
2484END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2485#endif
2486 mtmsrd r8
2487 addi r3,r4,VCPU_FPRS
2488 bl load_fp_state
2489#ifdef CONFIG_ALTIVEC
2490BEGIN_FTR_SECTION
2491 addi r3,r31,VCPU_VRS
2492 bl load_vr_state
2493END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2494#endif
2495 lwz r7,VCPU_VRSAVE(r31)
2496 mtspr SPRN_VRSAVE,r7
2497 mtlr r30
2498 mr r4,r31
2499 blr
2500
2501/*
2502 * We come here if we get any exception or interrupt while we are
2503 * executing host real mode code while in guest MMU context.
2504 * For now just spin, but we should do something better.
2505 */
2506kvmppc_bad_host_intr:
2507 b .
2508
2509/*
2510 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2511 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2512 * r11 has the guest MSR value (in/out)
2513 * r9 has a vcpu pointer (in)
2514 * r0 is used as a scratch register
2515 */
2516kvmppc_msr_interrupt:
2517 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2518 cmpwi r0, 2 /* Check if we are in transactional state.. */
2519 ld r11, VCPU_INTR_MSR(r9)
2520 bne 1f
2521 /* ... if transactional, change to suspended */
2522 li r0, 1
25231: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2524 blr
2525
2526/*
2527 * This works around a hardware bug on POWER8E processors, where
2528 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2529 * performance monitor interrupt. Instead, when we need to have
2530 * an interrupt pending, we have to arrange for a counter to overflow.
2531 */
2532kvmppc_fix_pmao:
2533 li r3, 0
2534 mtspr SPRN_MMCR2, r3
2535 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2536 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2537 mtspr SPRN_MMCR0, r3
2538 lis r3, 0x7fff
2539 ori r3, r3, 0xffff
2540 mtspr SPRN_PMC6, r3
2541 isync
2542 blr
2543
2544#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2545/*
2546 * Start timing an activity
2547 * r3 = pointer to time accumulation struct, r4 = vcpu
2548 */
2549kvmhv_start_timing:
2550 ld r5, HSTATE_KVM_VCORE(r13)
2551 lbz r6, VCORE_IN_GUEST(r5)
2552 cmpwi r6, 0
2553 beq 5f /* if in guest, need to */
2554 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
25555: mftb r5
2556 subf r5, r6, r5
2557 std r3, VCPU_CUR_ACTIVITY(r4)
2558 std r5, VCPU_ACTIVITY_START(r4)
2559 blr
2560
2561/*
2562 * Accumulate time to one activity and start another.
2563 * r3 = pointer to new time accumulation struct, r4 = vcpu
2564 */
2565kvmhv_accumulate_time:
2566 ld r5, HSTATE_KVM_VCORE(r13)
2567 lbz r8, VCORE_IN_GUEST(r5)
2568 cmpwi r8, 0
2569 beq 4f /* if in guest, need to */
2570 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
25714: ld r5, VCPU_CUR_ACTIVITY(r4)
2572 ld r6, VCPU_ACTIVITY_START(r4)
2573 std r3, VCPU_CUR_ACTIVITY(r4)
2574 mftb r7
2575 subf r7, r8, r7
2576 std r7, VCPU_ACTIVITY_START(r4)
2577 cmpdi r5, 0
2578 beqlr
2579 subf r3, r6, r7
2580 ld r8, TAS_SEQCOUNT(r5)
2581 cmpdi r8, 0
2582 addi r8, r8, 1
2583 std r8, TAS_SEQCOUNT(r5)
2584 lwsync
2585 ld r7, TAS_TOTAL(r5)
2586 add r7, r7, r3
2587 std r7, TAS_TOTAL(r5)
2588 ld r6, TAS_MIN(r5)
2589 ld r7, TAS_MAX(r5)
2590 beq 3f
2591 cmpd r3, r6
2592 bge 1f
25933: std r3, TAS_MIN(r5)
25941: cmpd r3, r7
2595 ble 2f
2596 std r3, TAS_MAX(r5)
25972: lwsync
2598 addi r8, r8, 1
2599 std r8, TAS_SEQCOUNT(r5)
2600 blr
2601#endif