Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/kuap: Add kuap_lock()

Add kuap_lock() and call it when entering interrupts from user.

It is called kuap_lock() as it is similar to kuap_save_and_lock()
without the save.

However book3s/32 already have a kuap_lock(). Rename it
kuap_lock_addr().

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/4437e2deb9f6f549f7089d45e9c6f96a7e77905a.1634627931.git.christophe.leroy@csgroup.eu

authored by

Christophe Leroy and committed by
Michael Ellerman
937fb700 2341964e

+28 -6
+9 -5
arch/powerpc/include/asm/book3s/32/kup.h
··· 57 57 void kuap_lock_all_ool(void); 58 58 void kuap_unlock_all_ool(void); 59 59 60 - static inline void kuap_lock(unsigned long addr, bool ool) 60 + static inline void kuap_lock_addr(unsigned long addr, bool ool) 61 61 { 62 62 if (likely(addr != KUAP_ALL)) 63 63 kuap_lock_one(addr); ··· 77 77 kuap_unlock_all_ool(); 78 78 } 79 79 80 + static inline void __kuap_lock(void) 81 + { 82 + } 83 + 80 84 static inline void __kuap_save_and_lock(struct pt_regs *regs) 81 85 { 82 86 unsigned long kuap = current->thread.kuap; ··· 90 86 return; 91 87 92 88 current->thread.kuap = KUAP_NONE; 93 - kuap_lock(kuap, false); 89 + kuap_lock_addr(kuap, false); 94 90 } 95 91 96 92 static inline void kuap_user_restore(struct pt_regs *regs) ··· 101 97 { 102 98 if (unlikely(kuap != KUAP_NONE)) { 103 99 current->thread.kuap = KUAP_NONE; 104 - kuap_lock(kuap, false); 100 + kuap_lock_addr(kuap, false); 105 101 } 106 102 107 103 if (likely(regs->kuap == KUAP_NONE)) ··· 143 139 return; 144 140 145 141 current->thread.kuap = KUAP_NONE; 146 - kuap_lock(kuap, true); 142 + kuap_lock_addr(kuap, true); 147 143 } 148 144 149 145 static inline unsigned long __prevent_user_access_return(void) ··· 152 148 153 149 if (flags != KUAP_NONE) { 154 150 current->thread.kuap = KUAP_NONE; 155 - kuap_lock(flags, true); 151 + kuap_lock_addr(flags, true); 156 152 } 157 153 158 154 return flags;
+4 -1
arch/powerpc/include/asm/interrupt.h
··· 140 140 trace_hardirqs_off(); 141 141 142 142 if (user_mode(regs)) 143 - account_cpu_user_entry(); 143 + kuap_lock(); 144 144 else 145 145 kuap_save_and_lock(regs); 146 + 147 + if (user_mode(regs)) 148 + account_cpu_user_entry(); 146 149 #endif 147 150 148 151 #ifdef CONFIG_PPC64
+9
arch/powerpc/include/asm/kup.h
··· 49 49 } 50 50 51 51 static inline void __kuap_assert_locked(void) { } 52 + static inline void __kuap_lock(void) { } 52 53 static inline void __kuap_save_and_lock(struct pt_regs *regs) { } 53 54 static inline void kuap_user_restore(struct pt_regs *regs) { } 54 55 static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { } ··· 92 91 } 93 92 94 93 #ifdef CONFIG_PPC32 94 + static __always_inline void kuap_lock(void) 95 + { 96 + if (kuap_is_disabled()) 97 + return; 98 + 99 + __kuap_lock(); 100 + } 101 + 95 102 static __always_inline void kuap_save_and_lock(struct pt_regs *regs) 96 103 { 97 104 if (kuap_is_disabled())
+4
arch/powerpc/include/asm/nohash/32/kup-8xx.h
··· 20 20 return static_branch_unlikely(&disable_kuap_key); 21 21 } 22 22 23 + static inline void __kuap_lock(void) 24 + { 25 + } 26 + 23 27 static inline void __kuap_save_and_lock(struct pt_regs *regs) 24 28 { 25 29 regs->kuap = mfspr(SPRN_MD_AP);
+2
arch/powerpc/kernel/interrupt.c
··· 81 81 { 82 82 syscall_fn f; 83 83 84 + kuap_lock(); 85 + 84 86 regs->orig_gpr3 = r3; 85 87 86 88 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))