Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: PPC: Book3S HV: Translate kvmhv_commence_exit to C

This replaces the assembler code for kvmhv_commence_exit() with C code
in book3s_hv_builtin.c. It also moves the IPI sending code that was
in book3s_hv_rm_xics.c into a new kvmhv_rm_send_ipi() function so it
can be used by kvmhv_commence_exit() as well as icp_rm_set_vcpu_irq().

Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>

authored by

Paul Mackerras and committed by
Alexander Graf
eddb60fb 6af27c84

+75 -68
+2
arch/powerpc/include/asm/kvm_book3s_64.h
··· 438 438 439 439 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm); 440 440 441 + extern void kvmhv_rm_send_ipi(int cpu); 442 + 441 443 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ 442 444 443 445 #endif /* __ASM_KVM_BOOK3S_64_H__ */
+63
arch/powerpc/kvm/book3s_hv_builtin.c
··· 22 22 #include <asm/kvm_ppc.h> 23 23 #include <asm/kvm_book3s.h> 24 24 #include <asm/archrandom.h> 25 + #include <asm/xics.h> 25 26 26 27 #define KVM_CMA_CHUNK_ORDER 18 27 28 ··· 184 183 return H_SUCCESS; 185 184 186 185 return H_HARDWARE; 186 + } 187 + 188 + static inline void rm_writeb(unsigned long paddr, u8 val) 189 + { 190 + __asm__ __volatile__("stbcix %0,0,%1" 191 + : : "r" (val), "r" (paddr) : "memory"); 192 + } 193 + 194 + /* 195 + * Send an interrupt to another CPU. 196 + * This can only be called in real mode. 197 + * The caller needs to include any barrier needed to order writes 198 + * to memory vs. the IPI/message. 199 + */ 200 + void kvmhv_rm_send_ipi(int cpu) 201 + { 202 + unsigned long xics_phys; 203 + 204 + /* Poke the target */ 205 + xics_phys = paca[cpu].kvm_hstate.xics_phys; 206 + rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 207 + } 208 + 209 + /* 210 + * The following functions are called from the assembly code 211 + * in book3s_hv_rmhandlers.S. 212 + */ 213 + static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) 214 + { 215 + int cpu = vc->pcpu; 216 + 217 + /* Order setting of exit map vs. msgsnd/IPI */ 218 + smp_mb(); 219 + for (; active; active >>= 1, ++cpu) 220 + if (active & 1) 221 + kvmhv_rm_send_ipi(cpu); 222 + } 223 + 224 + void kvmhv_commence_exit(int trap) 225 + { 226 + struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; 227 + int ptid = local_paca->kvm_hstate.ptid; 228 + int me, ee; 229 + 230 + /* Set our bit in the threads-exiting-guest map in the 0xff00 231 + bits of vcore->entry_exit_map */ 232 + me = 0x100 << ptid; 233 + do { 234 + ee = vc->entry_exit_map; 235 + } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); 236 + 237 + /* Are we the first here? */ 238 + if ((ee >> 8) != 0) 239 + return; 240 + 241 + /* 242 + * Trigger the other threads in this vcore to exit the guest. 243 + * If this is a hypervisor decrementer interrupt then they 244 + * will be already on their way out of the guest. 245 + */ 246 + if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) 247 + kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); 187 248 }
+2 -10
arch/powerpc/kvm/book3s_hv_rm_xics.c
··· 26 26 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 27 27 u32 new_irq); 28 28 29 - static inline void rm_writeb(unsigned long paddr, u8 val) 30 - { 31 - __asm__ __volatile__("sync; stbcix %0,0,%1" 32 - : : "r" (val), "r" (paddr) : "memory"); 33 - } 34 - 35 29 /* -- ICS routines -- */ 36 30 static void ics_rm_check_resend(struct kvmppc_xics *xics, 37 31 struct kvmppc_ics *ics, struct kvmppc_icp *icp) ··· 54 60 struct kvm_vcpu *this_vcpu) 55 61 { 56 62 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; 57 - unsigned long xics_phys; 58 63 int cpu; 59 64 60 65 /* Mark the target VCPU as having an interrupt pending */ ··· 76 83 /* In SMT cpu will always point to thread 0, we adjust it */ 77 84 cpu += vcpu->arch.ptid; 78 85 79 - /* Not too hard, then poke the target */ 80 - xics_phys = paca[cpu].kvm_hstate.xics_phys; 81 - rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 86 + smp_mb(); 87 + kvmhv_rm_send_ipi(cpu); 82 88 } 83 89 84 90 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
+8 -58
arch/powerpc/kvm/book3s_hv_rmhandlers.S
··· 264 264 addi r3, r4, VCPU_TB_RMEXIT 265 265 bl kvmhv_accumulate_time 266 266 #endif 267 - 13: bl kvmhv_commence_exit 267 + 13: mr r3, r12 268 + stw r12, 112-4(r1) 269 + bl kvmhv_commence_exit 270 + nop 271 + lwz r12, 112-4(r1) 268 272 b kvmhv_switch_to_host 269 273 270 274 /* ··· 1165 1161 1166 1162 /* Increment exit count, poke other threads to exit */ 1167 1163 bl kvmhv_commence_exit 1164 + nop 1165 + ld r9, HSTATE_KVM_VCPU(r13) 1166 + lwz r12, VCPU_TRAP(r9) 1168 1167 1169 1168 /* Save guest CTRL register, set runlatch to 1 */ 1170 1169 mfspr r6,SPRN_CTRLF ··· 1618 1611 1619 1612 ld r0, 112+PPC_LR_STKOFF(r1) 1620 1613 addi r1, r1, 112 1621 - mtlr r0 1622 - blr 1623 - 1624 - kvmhv_commence_exit: /* r12 = trap, r13 = paca, doesn't trash r9 */ 1625 - mflr r0 1626 - std r0, PPC_LR_STKOFF(r1) 1627 - stdu r1, -PPC_MIN_STKFRM(r1) 1628 - 1629 - /* Set our bit in the threads-exiting-guest map in the 0xff00 1630 - bits of vcore->entry_exit_map */ 1631 - ld r5, HSTATE_KVM_VCORE(r13) 1632 - lbz r4, HSTATE_PTID(r13) 1633 - li r7, 0x100 1634 - sld r7, r7, r4 1635 - addi r6, r5, VCORE_ENTRY_EXIT 1636 - 41: lwarx r3, 0, r6 1637 - or r0, r3, r7 1638 - stwcx. r0, 0, r6 1639 - bne 41b 1640 - isync /* order stwcx. vs. reading napping_threads */ 1641 - 1642 - /* 1643 - * At this point we have an interrupt that we have to pass 1644 - * up to the kernel or qemu; we can't handle it in real mode. 1645 - * Thus we have to do a partition switch, so we have to 1646 - * collect the other threads, if we are the first thread 1647 - * to take an interrupt. To do this, we send a message or 1648 - * IPI to all the threads that have their bit set in the entry 1649 - * map in vcore->entry_exit_map (other than ourselves). 1650 - * However, we don't need to bother if this is an HDEC 1651 - * interrupt, since the other threads will already be on their 1652 - * way here in that case. 1653 - */ 1654 - cmpwi r3,0x100 /* Are we the first here? */ 1655 - bge 43f 1656 - cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 1657 - beq 43f 1658 - 1659 - srwi r0,r7,8 1660 - andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1661 - beq 43f 1662 - /* Order entry/exit update vs. IPIs */ 1663 - sync 1664 - mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1665 - subf r6,r4,r13 1666 - 42: andi. r0,r3,1 1667 - beq 44f 1668 - ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ 1669 - li r0,IPI_PRIORITY 1670 - li r7,XICS_MFRR 1671 - stbcix r0,r7,r8 /* trigger the IPI */ 1672 - 44: srdi. r3,r3,1 1673 - addi r6,r6,PACA_SIZE 1674 - bne 42b 1675 - 1676 - 43: ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1) 1677 - addi r1, r1, PPC_MIN_STKFRM 1678 1614 mtlr r0 1679 1615 blr 1680 1616