Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/64s: Add workaround for P9 vector CI load issue

POWER9 DD2.1 and earlier has an issue where some cache inhibited
vector load will return bad data. The workaround is two part, one
firmware/microcode part triggers HMI interrupts when hitting such
loads, the other part is this patch which then emulates the
instructions in Linux.

The affected instructions are limited to lxvd2x, lxvw4x, lxvb16x and
lxvh8x.

When an instruction triggers the HMI, all threads in the core will be
sent to the HMI handler, not just the one running the vector load.

In general, these spurious HMIs are detected by the emulation code and
we just return back to the running process. Unfortunately, if a
spurious interrupt occurs on a vector load that's to normal memory we
have no way to detect that it's spurious (unless we walk the page
tables, which is very expensive). In this case we emulate the load but
we need do so using a vector load itself to ensure 128bit atomicity is
preserved.

Some additional debugfs emulated instruction counters are added also.

Signed-off-by: Michael Neuling <mikey@neuling.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
[mpe: Switch CONFIG_PPC_BOOK3S_64 to CONFIG_VSX to unbreak the build]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Michael Neuling and committed by
Michael Ellerman
5080332c b9fde58d

+271 -5
+4
arch/powerpc/include/asm/emulated_ops.h
··· 55 55 struct ppc_emulated_entry mfdscr; 56 56 struct ppc_emulated_entry mtdscr; 57 57 struct ppc_emulated_entry lq_stq; 58 + struct ppc_emulated_entry lxvw4x; 59 + struct ppc_emulated_entry lxvh8x; 60 + struct ppc_emulated_entry lxvd2x; 61 + struct ppc_emulated_entry lxvb16x; 58 62 #endif 59 63 } ppc_emulated; 60 64
+1
arch/powerpc/include/asm/paca.h
··· 210 210 */ 211 211 u16 in_mce; 212 212 u8 hmi_event_available; /* HMI event is available */ 213 + u8 hmi_p9_special_emu; /* HMI P9 special emulation */ 213 214 #endif 214 215 215 216 /* Stuff for accurate time accounting */
+17
arch/powerpc/include/asm/uaccess.h
··· 173 173 174 174 extern long __get_user_bad(void); 175 175 176 + /* 177 + * This does an atomic 128 byte aligned load from userspace. 178 + * Upto caller to do enable_kernel_vmx() before calling! 179 + */ 180 + #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \ 181 + __asm__ __volatile__( \ 182 + "1: lvx 0,0,%1 # get user\n" \ 183 + " stvx 0,0,%2 # put kernel\n" \ 184 + "2:\n" \ 185 + ".section .fixup,\"ax\"\n" \ 186 + "3: li %0,%3\n" \ 187 + " b 2b\n" \ 188 + ".previous\n" \ 189 + EX_TABLE(1b, 3b) \ 190 + : "=r" (err) \ 191 + : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err)) 192 + 176 193 #define __get_user_asm(x, addr, err, op) \ 177 194 __asm__ __volatile__( \ 178 195 "1: "op" %1,0(%2) # get_user\n" \
+12 -4
arch/powerpc/kernel/exceptions-64s.S
··· 1010 1010 EXCEPTION_PROLOG_COMMON_3(0xe60) 1011 1011 addi r3,r1,STACK_FRAME_OVERHEAD 1012 1012 BRANCH_LINK_TO_FAR(hmi_exception_realmode) /* Function call ABI */ 1013 + cmpdi cr0,r3,0 1014 + 1013 1015 /* Windup the stack. */ 1014 1016 /* Move original HSRR0 and HSRR1 into the respective regs */ 1015 1017 ld r9,_MSR(r1) ··· 1028 1026 REST_8GPRS(2, r1) 1029 1027 REST_GPR(10, r1) 1030 1028 ld r11,_CCR(r1) 1029 + REST_2GPRS(12, r1) 1030 + bne 1f 1031 1031 mtcr r11 1032 1032 REST_GPR(11, r1) 1033 - REST_2GPRS(12, r1) 1034 - /* restore original r1. */ 1033 + ld r1,GPR1(r1) 1034 + hrfid 1035 + 1036 + 1: mtcr r11 1037 + REST_GPR(11, r1) 1035 1038 ld r1,GPR1(r1) 1036 1039 1037 1040 /* ··· 1049 1042 EXCEPTION_PROLOG_0(PACA_EXGEN) 1050 1043 b tramp_real_hmi_exception 1051 1044 1052 - EXC_COMMON_ASYNC(hmi_exception_common, 0xe60, handle_hmi_exception) 1053 - 1045 + EXC_COMMON_BEGIN(hmi_exception_common) 1046 + EXCEPTION_COMMON(PACA_EXGEN, 0xe60, hmi_exception_common, handle_hmi_exception, 1047 + ret_from_except, FINISH_NAP;ADD_NVGPRS;ADD_RECONCILE;RUNLATCH_ON) 1054 1048 1055 1049 EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0x20) 1056 1050 EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x20, 0xe80)
+29 -1
arch/powerpc/kernel/mce.c
··· 470 470 { 471 471 __this_cpu_inc(irq_stat.hmi_exceptions); 472 472 473 + #ifdef CONFIG_PPC_BOOK3S_64 474 + /* Workaround for P9 vector CI loads (see p9_hmi_special_emu) */ 475 + if (pvr_version_is(PVR_POWER9)) { 476 + unsigned long hmer = mfspr(SPRN_HMER); 477 + 478 + /* Do we have the debug bit set */ 479 + if (hmer & PPC_BIT(17)) { 480 + hmer &= ~PPC_BIT(17); 481 + mtspr(SPRN_HMER, hmer); 482 + 483 + /* 484 + * Now to avoid problems with soft-disable we 485 + * only do the emulation if we are coming from 486 + * user space 487 + */ 488 + if (user_mode(regs)) 489 + local_paca->hmi_p9_special_emu = 1; 490 + 491 + /* 492 + * Don't bother going to OPAL if that's the 493 + * only relevant bit. 494 + */ 495 + if (!(hmer & mfspr(SPRN_HMEER))) 496 + return local_paca->hmi_p9_special_emu; 497 + } 498 + } 499 + #endif /* CONFIG_PPC_BOOK3S_64 */ 500 + 473 501 wait_for_subcore_guest_exit(); 474 502 475 503 if (ppc_md.hmi_exception_early) ··· 505 477 506 478 wait_for_tb_resync(); 507 479 508 - return 0; 480 + return 1; 509 481 }
+201
arch/powerpc/kernel/traps.c
··· 37 37 #include <linux/kdebug.h> 38 38 #include <linux/ratelimit.h> 39 39 #include <linux/context_tracking.h> 40 + #include <linux/smp.h> 40 41 41 42 #include <asm/emulated_ops.h> 42 43 #include <asm/pgtable.h> ··· 700 699 die("System Management Interrupt", regs, SIGABRT); 701 700 } 702 701 702 + #ifdef CONFIG_VSX 703 + static void p9_hmi_special_emu(struct pt_regs *regs) 704 + { 705 + unsigned int ra, rb, t, i, sel, instr, rc; 706 + const void __user *addr; 707 + u8 vbuf[16], *vdst; 708 + unsigned long ea, msr, msr_mask; 709 + bool swap; 710 + 711 + if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip)) 712 + return; 713 + 714 + /* 715 + * lxvb16x opcode: 0x7c0006d8 716 + * lxvd2x opcode: 0x7c000698 717 + * lxvh8x opcode: 0x7c000658 718 + * lxvw4x opcode: 0x7c000618 719 + */ 720 + if ((instr & 0xfc00073e) != 0x7c000618) { 721 + pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx" 722 + " instr=%08x\n", 723 + smp_processor_id(), current->comm, current->pid, 724 + regs->nip, instr); 725 + return; 726 + } 727 + 728 + /* Grab vector registers into the task struct */ 729 + msr = regs->msr; /* Grab msr before we flush the bits */ 730 + flush_vsx_to_thread(current); 731 + enable_kernel_altivec(); 732 + 733 + /* 734 + * Is userspace running with a different endian (this is rare but 735 + * not impossible) 736 + */ 737 + swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE); 738 + 739 + /* Decode the instruction */ 740 + ra = (instr >> 16) & 0x1f; 741 + rb = (instr >> 11) & 0x1f; 742 + t = (instr >> 21) & 0x1f; 743 + if (instr & 1) 744 + vdst = (u8 *)&current->thread.vr_state.vr[t]; 745 + else 746 + vdst = (u8 *)&current->thread.fp_state.fpr[t][0]; 747 + 748 + /* Grab the vector address */ 749 + ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0); 750 + if (is_32bit_task()) 751 + ea &= 0xfffffffful; 752 + addr = (__force const void __user *)ea; 753 + 754 + /* Check it */ 755 + if (!access_ok(VERIFY_READ, addr, 16)) { 756 + pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx" 757 + " instr=%08x addr=%016lx\n", 758 + smp_processor_id(), current->comm, current->pid, 759 + regs->nip, instr, (unsigned long)addr); 760 + return; 761 + } 762 + 763 + /* Read the vector */ 764 + rc = 0; 765 + if ((unsigned long)addr & 0xfUL) 766 + /* unaligned case */ 767 + rc = __copy_from_user_inatomic(vbuf, addr, 16); 768 + else 769 + __get_user_atomic_128_aligned(vbuf, addr, rc); 770 + if (rc) { 771 + pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx" 772 + " instr=%08x addr=%016lx\n", 773 + smp_processor_id(), current->comm, current->pid, 774 + regs->nip, instr, (unsigned long)addr); 775 + return; 776 + } 777 + 778 + pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx" 779 + " instr=%08x addr=%016lx\n", 780 + smp_processor_id(), current->comm, current->pid, regs->nip, 781 + instr, (unsigned long) addr); 782 + 783 + /* Grab instruction "selector" */ 784 + sel = (instr >> 6) & 3; 785 + 786 + /* 787 + * Check to make sure the facility is actually enabled. This 788 + * could happen if we get a false positive hit. 789 + * 790 + * lxvd2x/lxvw4x always check MSR VSX sel = 0,2 791 + * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3 792 + */ 793 + msr_mask = MSR_VSX; 794 + if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */ 795 + msr_mask = MSR_VEC; 796 + if (!(msr & msr_mask)) { 797 + pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx" 798 + " instr=%08x msr:%016lx\n", 799 + smp_processor_id(), current->comm, current->pid, 800 + regs->nip, instr, msr); 801 + return; 802 + } 803 + 804 + /* Do logging here before we modify sel based on endian */ 805 + switch (sel) { 806 + case 0: /* lxvw4x */ 807 + PPC_WARN_EMULATED(lxvw4x, regs); 808 + break; 809 + case 1: /* lxvh8x */ 810 + PPC_WARN_EMULATED(lxvh8x, regs); 811 + break; 812 + case 2: /* lxvd2x */ 813 + PPC_WARN_EMULATED(lxvd2x, regs); 814 + break; 815 + case 3: /* lxvb16x */ 816 + PPC_WARN_EMULATED(lxvb16x, regs); 817 + break; 818 + } 819 + 820 + #ifdef __LITTLE_ENDIAN__ 821 + /* 822 + * An LE kernel stores the vector in the task struct as an LE 823 + * byte array (effectively swapping both the components and 824 + * the content of the components). Those instructions expect 825 + * the components to remain in ascending address order, so we 826 + * swap them back. 827 + * 828 + * If we are running a BE user space, the expectation is that 829 + * of a simple memcpy, so forcing the emulation to look like 830 + * a lxvb16x should do the trick. 831 + */ 832 + if (swap) 833 + sel = 3; 834 + 835 + switch (sel) { 836 + case 0: /* lxvw4x */ 837 + for (i = 0; i < 4; i++) 838 + ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i]; 839 + break; 840 + case 1: /* lxvh8x */ 841 + for (i = 0; i < 8; i++) 842 + ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i]; 843 + break; 844 + case 2: /* lxvd2x */ 845 + for (i = 0; i < 2; i++) 846 + ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i]; 847 + break; 848 + case 3: /* lxvb16x */ 849 + for (i = 0; i < 16; i++) 850 + vdst[i] = vbuf[15-i]; 851 + break; 852 + } 853 + #else /* __LITTLE_ENDIAN__ */ 854 + /* On a big endian kernel, a BE userspace only needs a memcpy */ 855 + if (!swap) 856 + sel = 3; 857 + 858 + /* Otherwise, we need to swap the content of the components */ 859 + switch (sel) { 860 + case 0: /* lxvw4x */ 861 + for (i = 0; i < 4; i++) 862 + ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]); 863 + break; 864 + case 1: /* lxvh8x */ 865 + for (i = 0; i < 8; i++) 866 + ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]); 867 + break; 868 + case 2: /* lxvd2x */ 869 + for (i = 0; i < 2; i++) 870 + ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]); 871 + break; 872 + case 3: /* lxvb16x */ 873 + memcpy(vdst, vbuf, 16); 874 + break; 875 + } 876 + #endif /* !__LITTLE_ENDIAN__ */ 877 + 878 + /* Go to next instruction */ 879 + regs->nip += 4; 880 + } 881 + #endif /* CONFIG_VSX */ 882 + 703 883 void handle_hmi_exception(struct pt_regs *regs) 704 884 { 705 885 struct pt_regs *old_regs; 706 886 707 887 old_regs = set_irq_regs(regs); 708 888 irq_enter(); 889 + 890 + #ifdef CONFIG_VSX 891 + /* Real mode flagged P9 special emu is needed */ 892 + if (local_paca->hmi_p9_special_emu) { 893 + local_paca->hmi_p9_special_emu = 0; 894 + 895 + /* 896 + * We don't want to take page faults while doing the 897 + * emulation, we just replay the instruction if necessary. 898 + */ 899 + pagefault_disable(); 900 + p9_hmi_special_emu(regs); 901 + pagefault_enable(); 902 + } 903 + #endif /* CONFIG_VSX */ 709 904 710 905 if (ppc_md.handle_hmi_exception) 711 906 ppc_md.handle_hmi_exception(regs); ··· 2121 1924 WARN_EMULATED_SETUP(mfdscr), 2122 1925 WARN_EMULATED_SETUP(mtdscr), 2123 1926 WARN_EMULATED_SETUP(lq_stq), 1927 + WARN_EMULATED_SETUP(lxvw4x), 1928 + WARN_EMULATED_SETUP(lxvh8x), 1929 + WARN_EMULATED_SETUP(lxvd2x), 1930 + WARN_EMULATED_SETUP(lxvb16x), 2124 1931 #endif 2125 1932 }; 2126 1933
+7
arch/powerpc/platforms/powernv/smp.c
··· 49 49 50 50 static void pnv_smp_setup_cpu(int cpu) 51 51 { 52 + /* 53 + * P9 workaround for CI vector load (see traps.c), 54 + * enable the corresponding HMI interrupt 55 + */ 56 + if (pvr_version_is(PVR_POWER9)) 57 + mtspr(SPRN_HMEER, mfspr(SPRN_HMEER) | PPC_BIT(17)); 58 + 52 59 if (xive_enabled()) 53 60 xive_smp_setup_cpu(); 54 61 else if (cpu != boot_cpuid)