Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'powerpc-cve-2020-4788' into fixes

From Daniel's cover letter:

IBM Power9 processors can speculatively operate on data in the L1 cache
before it has been completely validated, via a way-prediction mechanism. It
is not possible for an attacker to determine the contents of impermissible
memory using this method, since these systems implement a combination of
hardware and software security measures to prevent scenarios where
protected data could be leaked.

However these measures don't address the scenario where an attacker induces
the operating system to speculatively execute instructions using data that
the attacker controls. This can be used for example to speculatively bypass
"kernel user access prevention" techniques, as discovered by Anthony
Steinhauser of Google's Safeside Project. This is not an attack by itself,
but there is a possibility it could be used in conjunction with
side-channels or other weaknesses in the privileged code to construct an
attack.

This issue can be mitigated by flushing the L1 cache between privilege
boundaries of concern.

This patch series flushes the L1 cache on kernel entry (patch 2) and after the
kernel performs any user accesses (patch 3). It also adds a self-test and
performs some related cleanups.

+693 -147
+7
Documentation/admin-guide/kernel-parameters.txt
··· 2858 2858 mds=off [X86] 2859 2859 tsx_async_abort=off [X86] 2860 2860 kvm.nx_huge_pages=off [X86] 2861 + no_entry_flush [PPC] 2862 + no_uaccess_flush [PPC] 2861 2863 2862 2864 Exceptions: 2863 2865 This does not have any effect on ··· 3188 3186 3189 3187 noefi Disable EFI runtime services support. 3190 3188 3189 + no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel. 3190 + 3191 3191 noexec [IA-64] 3192 3192 3193 3193 noexec [X86] ··· 3238 3234 3239 3235 nospec_store_bypass_disable 3240 3236 [HW] Disable all mitigations for the Speculative Store Bypass vulnerability 3237 + 3238 + no_uaccess_flush 3239 + [PPC] Don't flush the L1-D cache after accessing user data. 3241 3240 3242 3241 noxsave [BUGS=X86] Disables x86 extended register state save 3243 3242 and restore using xsave. The kernel will fallback to
+42 -24
arch/powerpc/include/asm/book3s/64/kup-radix.h
··· 27 27 #endif 28 28 .endm 29 29 30 + #ifdef CONFIG_PPC_KUAP 30 31 .macro kuap_check_amr gpr1, gpr2 31 32 #ifdef CONFIG_PPC_KUAP_DEBUG 32 33 BEGIN_MMU_FTR_SECTION_NESTED(67) ··· 39 38 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_RADIX_KUAP, 67) 40 39 #endif 41 40 .endm 41 + #endif 42 42 43 43 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr 44 44 #ifdef CONFIG_PPC_KUAP ··· 62 60 .endm 63 61 64 62 #else /* !__ASSEMBLY__ */ 63 + 64 + DECLARE_STATIC_KEY_FALSE(uaccess_flush_key); 65 65 66 66 #ifdef CONFIG_PPC_KUAP 67 67 ··· 107 103 108 104 static inline unsigned long get_kuap(void) 109 105 { 106 + /* 107 + * We return AMR_KUAP_BLOCKED when we don't support KUAP because 108 + * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to 109 + * cause restore_user_access to do a flush. 110 + * 111 + * This has no effect in terms of actually blocking things on hash, 112 + * so it doesn't break anything. 113 + */ 110 114 if (!early_mmu_has_feature(MMU_FTR_RADIX_KUAP)) 111 - return 0; 115 + return AMR_KUAP_BLOCKED; 112 116 113 117 return mfspr(SPRN_AMR); 114 118 } ··· 134 122 mtspr(SPRN_AMR, value); 135 123 isync(); 136 124 } 125 + 126 + static inline bool 127 + bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) 128 + { 129 + return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && 130 + (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), 131 + "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); 132 + } 133 + #else /* CONFIG_PPC_KUAP */ 134 + static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) { } 135 + 136 + static inline unsigned long kuap_get_and_check_amr(void) 137 + { 138 + return 0UL; 139 + } 140 + 141 + static inline unsigned long get_kuap(void) 142 + { 143 + return AMR_KUAP_BLOCKED; 144 + } 145 + 146 + static inline void set_kuap(unsigned long value) { } 147 + #endif /* !CONFIG_PPC_KUAP */ 137 148 138 149 static __always_inline void allow_user_access(void __user *to, const void __user *from, 139 150 unsigned long size, unsigned long dir) ··· 177 142 unsigned long size, unsigned long dir) 178 143 { 179 144 set_kuap(AMR_KUAP_BLOCKED); 145 + if (static_branch_unlikely(&uaccess_flush_key)) 146 + do_uaccess_flush(); 180 147 } 181 148 182 149 static inline unsigned long prevent_user_access_return(void) ··· 186 149 unsigned long flags = get_kuap(); 187 150 188 151 set_kuap(AMR_KUAP_BLOCKED); 152 + if (static_branch_unlikely(&uaccess_flush_key)) 153 + do_uaccess_flush(); 189 154 190 155 return flags; 191 156 } ··· 195 156 static inline void restore_user_access(unsigned long flags) 196 157 { 197 158 set_kuap(flags); 159 + if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED) 160 + do_uaccess_flush(); 198 161 } 199 - 200 - static inline bool 201 - bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) 202 - { 203 - return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) && 204 - (regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)), 205 - "Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read"); 206 - } 207 - #else /* CONFIG_PPC_KUAP */ 208 - static inline void kuap_restore_amr(struct pt_regs *regs, unsigned long amr) 209 - { 210 - } 211 - 212 - static inline void kuap_check_amr(void) 213 - { 214 - } 215 - 216 - static inline unsigned long kuap_get_and_check_amr(void) 217 - { 218 - return 0; 219 - } 220 - #endif /* CONFIG_PPC_KUAP */ 221 - 222 162 #endif /* __ASSEMBLY__ */ 223 163 224 164 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
+11 -1
arch/powerpc/include/asm/exception-64s.h
··· 57 57 nop; \ 58 58 nop 59 59 60 + #define ENTRY_FLUSH_SLOT \ 61 + ENTRY_FLUSH_FIXUP_SECTION; \ 62 + nop; \ 63 + nop; \ 64 + nop; 65 + 60 66 /* 61 67 * r10 must be free to use, r13 must be paca 62 68 */ 63 69 #define INTERRUPT_TO_KERNEL \ 64 - STF_ENTRY_BARRIER_SLOT 70 + STF_ENTRY_BARRIER_SLOT; \ 71 + ENTRY_FLUSH_SLOT 65 72 66 73 /* 67 74 * Macros for annotating the expected destination of (h)rfid ··· 144 137 RFSCV; \ 145 138 b rfscv_flush_fallback 146 139 140 + #else /* __ASSEMBLY__ */ 141 + /* Prototype for function defined in exceptions-64s.S */ 142 + void do_uaccess_flush(void); 147 143 #endif /* __ASSEMBLY__ */ 148 144 149 145 #endif /* _ASM_POWERPC_EXCEPTION_H */
+19
arch/powerpc/include/asm/feature-fixups.h
··· 205 205 FTR_ENTRY_OFFSET 955b-956b; \ 206 206 .popsection; 207 207 208 + #define UACCESS_FLUSH_FIXUP_SECTION \ 209 + 959: \ 210 + .pushsection __uaccess_flush_fixup,"a"; \ 211 + .align 2; \ 212 + 960: \ 213 + FTR_ENTRY_OFFSET 959b-960b; \ 214 + .popsection; 215 + 216 + #define ENTRY_FLUSH_FIXUP_SECTION \ 217 + 957: \ 218 + .pushsection __entry_flush_fixup,"a"; \ 219 + .align 2; \ 220 + 958: \ 221 + FTR_ENTRY_OFFSET 957b-958b; \ 222 + .popsection; 223 + 208 224 #define RFI_FLUSH_FIXUP_SECTION \ 209 225 951: \ 210 226 .pushsection __rfi_flush_fixup,"a"; \ ··· 253 237 #include <linux/types.h> 254 238 255 239 extern long stf_barrier_fallback; 240 + extern long entry_flush_fallback; 256 241 extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; 257 242 extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; 243 + extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup; 244 + extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; 258 245 extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; 259 246 extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; 260 247 extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+20 -6
arch/powerpc/include/asm/kup.h
··· 14 14 #define KUAP_CURRENT_WRITE 8 15 15 #define KUAP_CURRENT (KUAP_CURRENT_READ | KUAP_CURRENT_WRITE) 16 16 17 - #ifdef CONFIG_PPC64 17 + #ifdef CONFIG_PPC_BOOK3S_64 18 18 #include <asm/book3s/64/kup-radix.h> 19 19 #endif 20 20 #ifdef CONFIG_PPC_8xx ··· 35 35 .macro kuap_check current, gpr 36 36 .endm 37 37 38 + .macro kuap_check_amr gpr1, gpr2 39 + .endm 40 + 38 41 #endif 39 42 40 43 #else /* !__ASSEMBLY__ */ ··· 56 53 void setup_kuap(bool disabled); 57 54 #else 58 55 static inline void setup_kuap(bool disabled) { } 56 + 57 + static inline bool 58 + bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) 59 + { 60 + return false; 61 + } 62 + 63 + static inline void kuap_check_amr(void) { } 64 + 65 + /* 66 + * book3s/64/kup-radix.h defines these functions for the !KUAP case to flush 67 + * the L1D cache after user accesses. Only include the empty stubs for other 68 + * platforms. 69 + */ 70 + #ifndef CONFIG_PPC_BOOK3S_64 59 71 static inline void allow_user_access(void __user *to, const void __user *from, 60 72 unsigned long size, unsigned long dir) { } 61 73 static inline void prevent_user_access(void __user *to, const void __user *from, 62 74 unsigned long size, unsigned long dir) { } 63 75 static inline unsigned long prevent_user_access_return(void) { return 0UL; } 64 76 static inline void restore_user_access(unsigned long flags) { } 65 - static inline bool 66 - bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) 67 - { 68 - return false; 69 - } 77 + #endif /* CONFIG_PPC_BOOK3S_64 */ 70 78 #endif /* CONFIG_PPC_KUAP */ 71 79 72 80 static inline void allow_read_from_user(const void __user *from, unsigned long size)
+7
arch/powerpc/include/asm/security_features.h
··· 86 86 // Software required to flush link stack on context switch 87 87 #define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull 88 88 89 + // The L1-D cache should be flushed when entering the kernel 90 + #define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull 91 + 92 + // The L1-D cache should be flushed after user accesses from the kernel 93 + #define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull 89 94 90 95 // Features enabled by default 91 96 #define SEC_FTR_DEFAULT \ 92 97 (SEC_FTR_L1D_FLUSH_HV | \ 93 98 SEC_FTR_L1D_FLUSH_PR | \ 94 99 SEC_FTR_BNDS_CHK_SPEC_BAR | \ 100 + SEC_FTR_L1D_FLUSH_ENTRY | \ 101 + SEC_FTR_L1D_FLUSH_UACCESS | \ 95 102 SEC_FTR_FAVOUR_SECURITY) 96 103 97 104 #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+4
arch/powerpc/include/asm/setup.h
··· 52 52 }; 53 53 54 54 void setup_rfi_flush(enum l1d_flush_type, bool enable); 55 + void setup_entry_flush(bool enable); 56 + void setup_uaccess_flush(bool enable); 55 57 void do_rfi_flush_fixups(enum l1d_flush_type types); 56 58 #ifdef CONFIG_PPC_BARRIER_NOSPEC 57 59 void setup_barrier_nospec(void); 58 60 #else 59 61 static inline void setup_barrier_nospec(void) { }; 60 62 #endif 63 + void do_uaccess_flush_fixups(enum l1d_flush_type types); 64 + void do_entry_flush_fixups(enum l1d_flush_type types); 61 65 void do_barrier_nospec_fixups(bool enable); 62 66 extern bool barrier_nospec_enabled; 63 67
+42 -38
arch/powerpc/kernel/exceptions-64s.S
··· 2952 2952 .endr 2953 2953 blr 2954 2954 2955 - TRAMP_REAL_BEGIN(rfi_flush_fallback) 2956 - SET_SCRATCH0(r13); 2957 - GET_PACA(r13); 2958 - std r1,PACA_EXRFI+EX_R12(r13) 2959 - ld r1,PACAKSAVE(r13) 2960 - std r9,PACA_EXRFI+EX_R9(r13) 2961 - std r10,PACA_EXRFI+EX_R10(r13) 2962 - std r11,PACA_EXRFI+EX_R11(r13) 2963 - mfctr r9 2955 + /* Clobbers r10, r11, ctr */ 2956 + .macro L1D_DISPLACEMENT_FLUSH 2964 2957 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 2965 2958 ld r11,PACA_L1D_FLUSH_SIZE(r13) 2966 2959 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ ··· 2964 2971 sync 2965 2972 2966 2973 /* 2967 - * The load adresses are at staggered offsets within cachelines, 2974 + * The load addresses are at staggered offsets within cachelines, 2968 2975 * which suits some pipelines better (on others it should not 2969 2976 * hurt). 2970 2977 */ ··· 2979 2986 ld r11,(0x80 + 8)*7(r10) 2980 2987 addi r10,r10,0x80*8 2981 2988 bdnz 1b 2989 + .endm 2982 2990 2991 + TRAMP_REAL_BEGIN(entry_flush_fallback) 2992 + std r9,PACA_EXRFI+EX_R9(r13) 2993 + std r10,PACA_EXRFI+EX_R10(r13) 2994 + std r11,PACA_EXRFI+EX_R11(r13) 2995 + mfctr r9 2996 + L1D_DISPLACEMENT_FLUSH 2997 + mtctr r9 2998 + ld r9,PACA_EXRFI+EX_R9(r13) 2999 + ld r10,PACA_EXRFI+EX_R10(r13) 3000 + ld r11,PACA_EXRFI+EX_R11(r13) 3001 + blr 3002 + 3003 + TRAMP_REAL_BEGIN(rfi_flush_fallback) 3004 + SET_SCRATCH0(r13); 3005 + GET_PACA(r13); 3006 + std r1,PACA_EXRFI+EX_R12(r13) 3007 + ld r1,PACAKSAVE(r13) 3008 + std r9,PACA_EXRFI+EX_R9(r13) 3009 + std r10,PACA_EXRFI+EX_R10(r13) 3010 + std r11,PACA_EXRFI+EX_R11(r13) 3011 + mfctr r9 3012 + L1D_DISPLACEMENT_FLUSH 2983 3013 mtctr r9 2984 3014 ld r9,PACA_EXRFI+EX_R9(r13) 2985 3015 ld r10,PACA_EXRFI+EX_R10(r13) ··· 3020 3004 std r10,PACA_EXRFI+EX_R10(r13) 3021 3005 std r11,PACA_EXRFI+EX_R11(r13) 3022 3006 mfctr r9 3023 - ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) 3024 - ld r11,PACA_L1D_FLUSH_SIZE(r13) 3025 - srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ 3026 - mtctr r11 3027 - DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */ 3028 - 3029 - /* order ld/st prior to dcbt stop all streams with flushing */ 3030 - sync 3031 - 3032 - /* 3033 - * The load adresses are at staggered offsets within cachelines, 3034 - * which suits some pipelines better (on others it should not 3035 - * hurt). 3036 - */ 3037 - 1: 3038 - ld r11,(0x80 + 8)*0(r10) 3039 - ld r11,(0x80 + 8)*1(r10) 3040 - ld r11,(0x80 + 8)*2(r10) 3041 - ld r11,(0x80 + 8)*3(r10) 3042 - ld r11,(0x80 + 8)*4(r10) 3043 - ld r11,(0x80 + 8)*5(r10) 3044 - ld r11,(0x80 + 8)*6(r10) 3045 - ld r11,(0x80 + 8)*7(r10) 3046 - addi r10,r10,0x80*8 3047 - bdnz 1b 3048 - 3007 + L1D_DISPLACEMENT_FLUSH 3049 3008 mtctr r9 3050 3009 ld r9,PACA_EXRFI+EX_R9(r13) 3051 3010 ld r10,PACA_EXRFI+EX_R10(r13) ··· 3071 3080 RFSCV 3072 3081 3073 3082 USE_TEXT_SECTION() 3074 - MASKED_INTERRUPT 3075 - MASKED_INTERRUPT hsrr=1 3083 + 3084 + _GLOBAL(do_uaccess_flush) 3085 + UACCESS_FLUSH_FIXUP_SECTION 3086 + nop 3087 + nop 3088 + nop 3089 + blr 3090 + L1D_DISPLACEMENT_FLUSH 3091 + blr 3092 + _ASM_NOKPROBE_SYMBOL(do_uaccess_flush) 3093 + EXPORT_SYMBOL(do_uaccess_flush) 3094 + 3095 + 3096 + MASKED_INTERRUPT 3097 + MASKED_INTERRUPT hsrr=1 3076 3098 3077 3099 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER 3078 3100 kvmppc_skip_interrupt:
+121 -1
arch/powerpc/kernel/setup_64.c
··· 945 945 static enum l1d_flush_type enabled_flush_types; 946 946 static void *l1d_flush_fallback_area; 947 947 static bool no_rfi_flush; 948 + static bool no_entry_flush; 949 + static bool no_uaccess_flush; 948 950 bool rfi_flush; 951 + bool entry_flush; 952 + bool uaccess_flush; 953 + DEFINE_STATIC_KEY_FALSE(uaccess_flush_key); 954 + EXPORT_SYMBOL(uaccess_flush_key); 949 955 950 956 static int __init handle_no_rfi_flush(char *p) 951 957 { ··· 960 954 return 0; 961 955 } 962 956 early_param("no_rfi_flush", handle_no_rfi_flush); 957 + 958 + static int __init handle_no_entry_flush(char *p) 959 + { 960 + pr_info("entry-flush: disabled on command line."); 961 + no_entry_flush = true; 962 + return 0; 963 + } 964 + early_param("no_entry_flush", handle_no_entry_flush); 965 + 966 + static int __init handle_no_uaccess_flush(char *p) 967 + { 968 + pr_info("uaccess-flush: disabled on command line."); 969 + no_uaccess_flush = true; 970 + return 0; 971 + } 972 + early_param("no_uaccess_flush", handle_no_uaccess_flush); 963 973 964 974 /* 965 975 * The RFI flush is not KPTI, but because users will see doco that says to use ··· 1006 984 do_rfi_flush_fixups(L1D_FLUSH_NONE); 1007 985 1008 986 rfi_flush = enable; 987 + } 988 + 989 + void entry_flush_enable(bool enable) 990 + { 991 + if (enable) { 992 + do_entry_flush_fixups(enabled_flush_types); 993 + on_each_cpu(do_nothing, NULL, 1); 994 + } else { 995 + do_entry_flush_fixups(L1D_FLUSH_NONE); 996 + } 997 + 998 + entry_flush = enable; 999 + } 1000 + 1001 + void uaccess_flush_enable(bool enable) 1002 + { 1003 + if (enable) { 1004 + do_uaccess_flush_fixups(enabled_flush_types); 1005 + static_branch_enable(&uaccess_flush_key); 1006 + on_each_cpu(do_nothing, NULL, 1); 1007 + } else { 1008 + static_branch_disable(&uaccess_flush_key); 1009 + do_uaccess_flush_fixups(L1D_FLUSH_NONE); 1010 + } 1011 + 1012 + uaccess_flush = enable; 1009 1013 } 1010 1014 1011 1015 static void __ref init_fallback_flush(void) ··· 1092 1044 1093 1045 enabled_flush_types = types; 1094 1046 1095 - if (!no_rfi_flush && !cpu_mitigations_off()) 1047 + if (!cpu_mitigations_off() && !no_rfi_flush) 1096 1048 rfi_flush_enable(enable); 1049 + } 1050 + 1051 + void setup_entry_flush(bool enable) 1052 + { 1053 + if (cpu_mitigations_off()) 1054 + return; 1055 + 1056 + if (!no_entry_flush) 1057 + entry_flush_enable(enable); 1058 + } 1059 + 1060 + void setup_uaccess_flush(bool enable) 1061 + { 1062 + if (cpu_mitigations_off()) 1063 + return; 1064 + 1065 + if (!no_uaccess_flush) 1066 + uaccess_flush_enable(enable); 1097 1067 } 1098 1068 1099 1069 #ifdef CONFIG_DEBUG_FS ··· 1141 1075 1142 1076 DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n"); 1143 1077 1078 + static int entry_flush_set(void *data, u64 val) 1079 + { 1080 + bool enable; 1081 + 1082 + if (val == 1) 1083 + enable = true; 1084 + else if (val == 0) 1085 + enable = false; 1086 + else 1087 + return -EINVAL; 1088 + 1089 + /* Only do anything if we're changing state */ 1090 + if (enable != entry_flush) 1091 + entry_flush_enable(enable); 1092 + 1093 + return 0; 1094 + } 1095 + 1096 + static int entry_flush_get(void *data, u64 *val) 1097 + { 1098 + *val = entry_flush ? 1 : 0; 1099 + return 0; 1100 + } 1101 + 1102 + DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); 1103 + 1104 + static int uaccess_flush_set(void *data, u64 val) 1105 + { 1106 + bool enable; 1107 + 1108 + if (val == 1) 1109 + enable = true; 1110 + else if (val == 0) 1111 + enable = false; 1112 + else 1113 + return -EINVAL; 1114 + 1115 + /* Only do anything if we're changing state */ 1116 + if (enable != uaccess_flush) 1117 + uaccess_flush_enable(enable); 1118 + 1119 + return 0; 1120 + } 1121 + 1122 + static int uaccess_flush_get(void *data, u64 *val) 1123 + { 1124 + *val = uaccess_flush ? 1 : 0; 1125 + return 0; 1126 + } 1127 + 1128 + DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n"); 1129 + 1144 1130 static __init int rfi_flush_debugfs_init(void) 1145 1131 { 1146 1132 debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); 1133 + debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); 1134 + debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush); 1147 1135 return 0; 1148 1136 } 1149 1137 device_initcall(rfi_flush_debugfs_init);
+1 -1
arch/powerpc/kernel/syscall_64.c
··· 2 2 3 3 #include <linux/err.h> 4 4 #include <asm/asm-prototypes.h> 5 - #include <asm/book3s/64/kup-radix.h> 5 + #include <asm/kup.h> 6 6 #include <asm/cputime.h> 7 7 #include <asm/hw_irq.h> 8 8 #include <asm/kprobes.h>
+14
arch/powerpc/kernel/vmlinux.lds.S
··· 132 132 } 133 133 134 134 . = ALIGN(8); 135 + __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) { 136 + __start___uaccess_flush_fixup = .; 137 + *(__uaccess_flush_fixup) 138 + __stop___uaccess_flush_fixup = .; 139 + } 140 + 141 + . = ALIGN(8); 142 + __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { 143 + __start___entry_flush_fixup = .; 144 + *(__entry_flush_fixup) 145 + __stop___entry_flush_fixup = .; 146 + } 147 + 148 + . = ALIGN(8); 135 149 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) { 136 150 __start___stf_exit_barrier_fixup = .; 137 151 *(__stf_exit_barrier_fixup)
+104
arch/powerpc/lib/feature-fixups.c
··· 234 234 do_stf_exit_barrier_fixups(types); 235 235 } 236 236 237 + void do_uaccess_flush_fixups(enum l1d_flush_type types) 238 + { 239 + unsigned int instrs[4], *dest; 240 + long *start, *end; 241 + int i; 242 + 243 + start = PTRRELOC(&__start___uaccess_flush_fixup); 244 + end = PTRRELOC(&__stop___uaccess_flush_fixup); 245 + 246 + instrs[0] = 0x60000000; /* nop */ 247 + instrs[1] = 0x60000000; /* nop */ 248 + instrs[2] = 0x60000000; /* nop */ 249 + instrs[3] = 0x4e800020; /* blr */ 250 + 251 + i = 0; 252 + if (types == L1D_FLUSH_FALLBACK) { 253 + instrs[3] = 0x60000000; /* nop */ 254 + /* fallthrough to fallback flush */ 255 + } 256 + 257 + if (types & L1D_FLUSH_ORI) { 258 + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ 259 + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ 260 + } 261 + 262 + if (types & L1D_FLUSH_MTTRIG) 263 + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ 264 + 265 + for (i = 0; start < end; start++, i++) { 266 + dest = (void *)start + *start; 267 + 268 + pr_devel("patching dest %lx\n", (unsigned long)dest); 269 + 270 + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); 271 + 272 + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1])); 273 + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); 274 + patch_instruction((struct ppc_inst *)(dest + 3), ppc_inst(instrs[3])); 275 + } 276 + 277 + printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i, 278 + (types == L1D_FLUSH_NONE) ? "no" : 279 + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : 280 + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) 281 + ? "ori+mttrig type" 282 + : "ori type" : 283 + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" 284 + : "unknown"); 285 + } 286 + 287 + void do_entry_flush_fixups(enum l1d_flush_type types) 288 + { 289 + unsigned int instrs[3], *dest; 290 + long *start, *end; 291 + int i; 292 + 293 + start = PTRRELOC(&__start___entry_flush_fixup); 294 + end = PTRRELOC(&__stop___entry_flush_fixup); 295 + 296 + instrs[0] = 0x60000000; /* nop */ 297 + instrs[1] = 0x60000000; /* nop */ 298 + instrs[2] = 0x60000000; /* nop */ 299 + 300 + i = 0; 301 + if (types == L1D_FLUSH_FALLBACK) { 302 + instrs[i++] = 0x7d4802a6; /* mflr r10 */ 303 + instrs[i++] = 0x60000000; /* branch patched below */ 304 + instrs[i++] = 0x7d4803a6; /* mtlr r10 */ 305 + } 306 + 307 + if (types & L1D_FLUSH_ORI) { 308 + instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */ 309 + instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/ 310 + } 311 + 312 + if (types & L1D_FLUSH_MTTRIG) 313 + instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */ 314 + 315 + for (i = 0; start < end; start++, i++) { 316 + dest = (void *)start + *start; 317 + 318 + pr_devel("patching dest %lx\n", (unsigned long)dest); 319 + 320 + patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0])); 321 + 322 + if (types == L1D_FLUSH_FALLBACK) 323 + patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&entry_flush_fallback, 324 + BRANCH_SET_LINK); 325 + else 326 + patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1])); 327 + 328 + patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2])); 329 + } 330 + 331 + printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i, 332 + (types == L1D_FLUSH_NONE) ? "no" : 333 + (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" : 334 + (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG) 335 + ? "ori+mttrig type" 336 + : "ori type" : 337 + (types & L1D_FLUSH_MTTRIG) ? "mttrig type" 338 + : "unknown"); 339 + } 340 + 237 341 void do_rfi_flush_fixups(enum l1d_flush_type types) 238 342 { 239 343 unsigned int instrs[3], *dest;
+21 -3
arch/powerpc/platforms/powernv/setup.c
··· 98 98 security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); 99 99 } 100 100 101 - static void pnv_setup_rfi_flush(void) 101 + static void pnv_setup_security_mitigations(void) 102 102 { 103 103 struct device_node *np, *fw_features; 104 104 enum l1d_flush_type type; ··· 122 122 type = L1D_FLUSH_ORI; 123 123 } 124 124 125 + /* 126 + * If we are non-Power9 bare metal, we don't need to flush on kernel 127 + * entry or after user access: they fix a P9 specific vulnerability. 128 + */ 129 + if (!pvr_version_is(PVR_POWER9)) { 130 + security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY); 131 + security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS); 132 + } 133 + 125 134 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \ 126 135 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \ 127 136 security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV)); 128 137 129 138 setup_rfi_flush(type, enable); 130 139 setup_count_cache_flush(); 140 + 141 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 142 + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); 143 + setup_entry_flush(enable); 144 + 145 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 146 + security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); 147 + setup_uaccess_flush(enable); 148 + 149 + setup_stf_barrier(); 131 150 } 132 151 133 152 static void __init pnv_check_guarded_cores(void) ··· 175 156 { 176 157 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); 177 158 178 - pnv_setup_rfi_flush(); 179 - setup_stf_barrier(); 159 + pnv_setup_security_mitigations(); 180 160 181 161 /* Initialize SMP */ 182 162 pnv_smp_init();
+2 -2
arch/powerpc/platforms/pseries/mobility.c
··· 349 349 350 350 cpus_read_unlock(); 351 351 352 - /* Possibly switch to a new RFI flush type */ 353 - pseries_setup_rfi_flush(); 352 + /* Possibly switch to a new L1 flush type */ 353 + pseries_setup_security_mitigations(); 354 354 355 355 /* Reinitialise system information for hv-24x7 */ 356 356 read_24x7_sys_info();
+1 -1
arch/powerpc/platforms/pseries/pseries.h
··· 111 111 112 112 int dlpar_workqueue_init(void); 113 113 114 - void pseries_setup_rfi_flush(void); 114 + void pseries_setup_security_mitigations(void); 115 115 void pseries_lpar_read_hblkrm_characteristics(void); 116 116 117 117 #endif /* _PSERIES_PSERIES_H */
+12 -3
arch/powerpc/platforms/pseries/setup.c
··· 542 542 security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR); 543 543 } 544 544 545 - void pseries_setup_rfi_flush(void) 545 + void pseries_setup_security_mitigations(void) 546 546 { 547 547 struct h_cpu_char_result result; 548 548 enum l1d_flush_type types; ··· 579 579 580 580 setup_rfi_flush(types, enable); 581 581 setup_count_cache_flush(); 582 + 583 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 584 + security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); 585 + setup_entry_flush(enable); 586 + 587 + enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 588 + security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS); 589 + setup_uaccess_flush(enable); 590 + 591 + setup_stf_barrier(); 582 592 } 583 593 584 594 #ifdef CONFIG_PCI_IOV ··· 778 768 779 769 fwnmi_init(); 780 770 781 - pseries_setup_rfi_flush(); 782 - setup_stf_barrier(); 771 + pseries_setup_security_mitigations(); 783 772 pseries_lpar_read_hblkrm_characteristics(); 784 773 785 774 /* By default, only probe PCI (can be overridden by rtas_pci) */
+5
tools/testing/selftests/powerpc/include/utils.h
··· 42 42 int perf_event_disable(int fd); 43 43 int perf_event_reset(int fd); 44 44 45 + struct perf_event_read { 46 + __u64 nr; 47 + __u64 l1d_misses; 48 + }; 49 + 45 50 #if !defined(__GLIBC_PREREQ) || !__GLIBC_PREREQ(2, 30) 46 51 #include <unistd.h> 47 52 #include <sys/syscall.h>
+1
tools/testing/selftests/powerpc/security/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 2 rfi_flush 3 + entry_flush
+3 -1
tools/testing/selftests/powerpc/security/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0+ 2 2 3 - TEST_GEN_PROGS := rfi_flush spectre_v2 3 + TEST_GEN_PROGS := rfi_flush entry_flush spectre_v2 4 4 top_srcdir = ../../../../.. 5 5 6 6 CFLAGS += -I../../../../../usr/include ··· 11 11 12 12 $(OUTPUT)/spectre_v2: CFLAGS += -m64 13 13 $(OUTPUT)/spectre_v2: ../pmu/event.c branch_loops.S 14 + $(OUTPUT)/rfi_flush: flush_utils.c 15 + $(OUTPUT)/entry_flush: flush_utils.c
+139
tools/testing/selftests/powerpc/security/entry_flush.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + /* 4 + * Copyright 2018 IBM Corporation. 5 + */ 6 + 7 + #define __SANE_USERSPACE_TYPES__ 8 + 9 + #include <sys/types.h> 10 + #include <stdint.h> 11 + #include <malloc.h> 12 + #include <unistd.h> 13 + #include <signal.h> 14 + #include <stdlib.h> 15 + #include <string.h> 16 + #include <stdio.h> 17 + #include "utils.h" 18 + #include "flush_utils.h" 19 + 20 + int entry_flush_test(void) 21 + { 22 + char *p; 23 + int repetitions = 10; 24 + int fd, passes = 0, iter, rc = 0; 25 + struct perf_event_read v; 26 + __u64 l1d_misses_total = 0; 27 + unsigned long iterations = 100000, zero_size = 24 * 1024; 28 + unsigned long l1d_misses_expected; 29 + int rfi_flush_orig; 30 + int entry_flush, entry_flush_orig; 31 + 32 + SKIP_IF(geteuid() != 0); 33 + 34 + // The PMU event we use only works on Power7 or later 35 + SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); 36 + 37 + if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) { 38 + perror("Unable to read powerpc/rfi_flush debugfs file"); 39 + SKIP_IF(1); 40 + } 41 + 42 + if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) { 43 + perror("Unable to read powerpc/entry_flush debugfs file"); 44 + SKIP_IF(1); 45 + } 46 + 47 + if (rfi_flush_orig != 0) { 48 + if (write_debugfs_file("powerpc/rfi_flush", 0) < 0) { 49 + perror("error writing to powerpc/rfi_flush debugfs file"); 50 + FAIL_IF(1); 51 + } 52 + } 53 + 54 + entry_flush = entry_flush_orig; 55 + 56 + fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1); 57 + FAIL_IF(fd < 0); 58 + 59 + p = (char *)memalign(zero_size, CACHELINE_SIZE); 60 + 61 + FAIL_IF(perf_event_enable(fd)); 62 + 63 + // disable L1 prefetching 64 + set_dscr(1); 65 + 66 + iter = repetitions; 67 + 68 + /* 69 + * We expect to see l1d miss for each cacheline access when entry_flush 70 + * is set. Allow a small variation on this. 71 + */ 72 + l1d_misses_expected = iterations * (zero_size / CACHELINE_SIZE - 2); 73 + 74 + again: 75 + FAIL_IF(perf_event_reset(fd)); 76 + 77 + syscall_loop(p, iterations, zero_size); 78 + 79 + FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v)); 80 + 81 + if (entry_flush && v.l1d_misses >= l1d_misses_expected) 82 + passes++; 83 + else if (!entry_flush && v.l1d_misses < (l1d_misses_expected / 2)) 84 + passes++; 85 + 86 + l1d_misses_total += v.l1d_misses; 87 + 88 + while (--iter) 89 + goto again; 90 + 91 + if (passes < repetitions) { 92 + printf("FAIL (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d failures]\n", 93 + entry_flush, l1d_misses_total, entry_flush ? '<' : '>', 94 + entry_flush ? repetitions * l1d_misses_expected : 95 + repetitions * l1d_misses_expected / 2, 96 + repetitions - passes, repetitions); 97 + rc = 1; 98 + } else { 99 + printf("PASS (L1D misses with entry_flush=%d: %llu %c %lu) [%d/%d pass]\n", 100 + entry_flush, l1d_misses_total, entry_flush ? '>' : '<', 101 + entry_flush ? repetitions * l1d_misses_expected : 102 + repetitions * l1d_misses_expected / 2, 103 + passes, repetitions); 104 + } 105 + 106 + if (entry_flush == entry_flush_orig) { 107 + entry_flush = !entry_flush_orig; 108 + if (write_debugfs_file("powerpc/entry_flush", entry_flush) < 0) { 109 + perror("error writing to powerpc/entry_flush debugfs file"); 110 + return 1; 111 + } 112 + iter = repetitions; 113 + l1d_misses_total = 0; 114 + passes = 0; 115 + goto again; 116 + } 117 + 118 + perf_event_disable(fd); 119 + close(fd); 120 + 121 + set_dscr(0); 122 + 123 + if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) { 124 + perror("unable to restore original value of powerpc/rfi_flush debugfs file"); 125 + return 1; 126 + } 127 + 128 + if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) { 129 + perror("unable to restore original value of powerpc/entry_flush debugfs file"); 130 + return 1; 131 + } 132 + 133 + return rc; 134 + } 135 + 136 + int main(int argc, char *argv[]) 137 + { 138 + return test_harness(entry_flush_test, "entry_flush_test"); 139 + }
+70
tools/testing/selftests/powerpc/security/flush_utils.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + 3 + /* 4 + * Copyright 2018 IBM Corporation. 5 + */ 6 + 7 + #define __SANE_USERSPACE_TYPES__ 8 + 9 + #include <sys/types.h> 10 + #include <stdint.h> 11 + #include <unistd.h> 12 + #include <signal.h> 13 + #include <stdlib.h> 14 + #include <string.h> 15 + #include <stdio.h> 16 + #include "utils.h" 17 + #include "flush_utils.h" 18 + 19 + static inline __u64 load(void *addr) 20 + { 21 + __u64 tmp; 22 + 23 + asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr)); 24 + 25 + return tmp; 26 + } 27 + 28 + void syscall_loop(char *p, unsigned long iterations, 29 + unsigned long zero_size) 30 + { 31 + for (unsigned long i = 0; i < iterations; i++) { 32 + for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE) 33 + load(p + j); 34 + getppid(); 35 + } 36 + } 37 + 38 + static void sigill_handler(int signr, siginfo_t *info, void *unused) 39 + { 40 + static int warned; 41 + ucontext_t *ctx = (ucontext_t *)unused; 42 + unsigned long *pc = &UCONTEXT_NIA(ctx); 43 + 44 + /* mtspr 3,RS to check for move to DSCR below */ 45 + if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) { 46 + if (!warned++) 47 + printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n"); 48 + *pc += 4; 49 + } else { 50 + printf("SIGILL at %p\n", pc); 51 + abort(); 52 + } 53 + } 54 + 55 + void set_dscr(unsigned long val) 56 + { 57 + static int init; 58 + struct sigaction sa; 59 + 60 + if (!init) { 61 + memset(&sa, 0, sizeof(sa)); 62 + sa.sa_sigaction = sigill_handler; 63 + sa.sa_flags = SA_SIGINFO; 64 + if (sigaction(SIGILL, &sa, NULL)) 65 + perror("sigill_handler"); 66 + init = 1; 67 + } 68 + 69 + asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); 70 + }
+17
tools/testing/selftests/powerpc/security/flush_utils.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + 3 + /* 4 + * Copyright 2018 IBM Corporation. 5 + */ 6 + 7 + #ifndef _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H 8 + #define _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H 9 + 10 + #define CACHELINE_SIZE 128 11 + 12 + void syscall_loop(char *p, unsigned long iterations, 13 + unsigned long zero_size); 14 + 15 + void set_dscr(unsigned long val); 16 + 17 + #endif /* _SELFTESTS_POWERPC_SECURITY_FLUSH_UTILS_H */
+30 -66
tools/testing/selftests/powerpc/security/rfi_flush.c
··· 10 10 #include <stdint.h> 11 11 #include <malloc.h> 12 12 #include <unistd.h> 13 - #include <signal.h> 14 13 #include <stdlib.h> 15 14 #include <string.h> 16 15 #include <stdio.h> 17 16 #include "utils.h" 17 + #include "flush_utils.h" 18 18 19 - #define CACHELINE_SIZE 128 20 - 21 - struct perf_event_read { 22 - __u64 nr; 23 - __u64 l1d_misses; 24 - }; 25 - 26 - static inline __u64 load(void *addr) 27 - { 28 - __u64 tmp; 29 - 30 - asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr)); 31 - 32 - return tmp; 33 - } 34 - 35 - static void syscall_loop(char *p, unsigned long iterations, 36 - unsigned long zero_size) 37 - { 38 - for (unsigned long i = 0; i < iterations; i++) { 39 - for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE) 40 - load(p + j); 41 - getppid(); 42 - } 43 - } 44 - 45 - static void sigill_handler(int signr, siginfo_t *info, void *unused) 46 - { 47 - static int warned = 0; 48 - ucontext_t *ctx = (ucontext_t *)unused; 49 - unsigned long *pc = &UCONTEXT_NIA(ctx); 50 - 51 - /* mtspr 3,RS to check for move to DSCR below */ 52 - if ((*((unsigned int *)*pc) & 0xfc1fffff) == 0x7c0303a6) { 53 - if (!warned++) 54 - printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n"); 55 - *pc += 4; 56 - } else { 57 - printf("SIGILL at %p\n", pc); 58 - abort(); 59 - } 60 - } 61 - 62 - static void set_dscr(unsigned long val) 63 - { 64 - static int init = 0; 65 - struct sigaction sa; 66 - 67 - if (!init) { 68 - memset(&sa, 0, sizeof(sa)); 69 - sa.sa_sigaction = sigill_handler; 70 - sa.sa_flags = SA_SIGINFO; 71 - if (sigaction(SIGILL, &sa, NULL)) 72 - perror("sigill_handler"); 73 - init = 1; 74 - } 75 - 76 - asm volatile("mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR)); 77 - } 78 19 79 20 int rfi_flush_test(void) 80 21 { ··· 26 85 __u64 l1d_misses_total = 0; 27 86 unsigned long iterations = 100000, zero_size = 24 * 1024; 28 87 unsigned long l1d_misses_expected; 29 - int rfi_flush_org, rfi_flush; 88 + int rfi_flush_orig, rfi_flush; 89 + int have_entry_flush, entry_flush_orig; 30 90 31 91 SKIP_IF(geteuid() != 0); 32 92 33 93 // The PMU event we use only works on Power7 or later 34 94 SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06)); 35 95 36 - if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_org)) { 96 + if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_orig) < 0) { 37 97 perror("Unable to read powerpc/rfi_flush debugfs file"); 38 98 SKIP_IF(1); 39 99 } 40 100 41 - rfi_flush = rfi_flush_org; 101 + if (read_debugfs_file("powerpc/entry_flush", &entry_flush_orig) < 0) { 102 + have_entry_flush = 0; 103 + } else { 104 + have_entry_flush = 1; 105 + 106 + if (entry_flush_orig != 0) { 107 + if (write_debugfs_file("powerpc/entry_flush", 0) < 0) { 108 + perror("error writing to powerpc/entry_flush debugfs file"); 109 + return 1; 110 + } 111 + } 112 + } 113 + 114 + rfi_flush = rfi_flush_orig; 42 115 43 116 fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1); 44 117 FAIL_IF(fd < 0); ··· 61 106 62 107 FAIL_IF(perf_event_enable(fd)); 63 108 109 + // disable L1 prefetching 64 110 set_dscr(1); 65 111 66 112 iter = repetitions; ··· 103 147 repetitions * l1d_misses_expected / 2, 104 148 passes, repetitions); 105 149 106 - if (rfi_flush == rfi_flush_org) { 107 - rfi_flush = !rfi_flush_org; 150 + if (rfi_flush == rfi_flush_orig) { 151 + rfi_flush = !rfi_flush_orig; 108 152 if (write_debugfs_file("powerpc/rfi_flush", rfi_flush) < 0) { 109 153 perror("error writing to powerpc/rfi_flush debugfs file"); 110 154 return 1; ··· 120 164 121 165 set_dscr(0); 122 166 123 - if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_org) < 0) { 167 + if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_orig) < 0) { 124 168 perror("unable to restore original value of powerpc/rfi_flush debugfs file"); 125 169 return 1; 170 + } 171 + 172 + if (have_entry_flush) { 173 + if (write_debugfs_file("powerpc/entry_flush", entry_flush_orig) < 0) { 174 + perror("unable to restore original value of powerpc/entry_flush " 175 + "debugfs file"); 176 + return 1; 177 + } 126 178 } 127 179 128 180 return rc;