Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: move preemption disablement to prctl handlers

In the next patch, we will start reading sctlr_user from
mte_update_sctlr_user and subsequently writing a new value based on the
task's TCF setting and potentially the per-CPU TCF preference. This
means that we need to be careful to disable preemption around any
code sequences that read from sctlr_user and subsequently write to
sctlr_user and/or SCTLR_EL1, so that we don't end up writing a stale
value (based on the previous CPU's TCF preference) to either of them.

We currently have four such sequences, in the prctl handlers for
PR_SET_TAGGED_ADDR_CTRL and PR_PAC_SET_ENABLED_KEYS, as well as in
the task initialization code that resets the prctl settings. Change
the prctl handlers to disable preemption in the handlers themselves
rather than the functions that they call, and change the task
initialization code to call the respective prctl handlers instead of
setting sctlr_user directly.

As a result of this change, we no longer need the helper function
set_task_sctlr_el1, nor does its behavior make sense any more, so
remove it.

Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/Ic0e8a0c00bb47d786c1e8011df0b7fe99bee4bb5
Link: https://lore.kernel.org/r/20210727205300.2554659-4-pcc@google.com
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Peter Collingbourne and committed by
Catalin Marinas
d2e0d8f9 433c38f4

+24 -29
+6 -6
arch/arm64/include/asm/pointer_auth.h
··· 10 10 #include <asm/memory.h> 11 11 #include <asm/sysreg.h> 12 12 13 + #define PR_PAC_ENABLED_KEYS_MASK \ 14 + (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) 15 + 13 16 #ifdef CONFIG_ARM64_PTR_AUTH 14 17 /* 15 18 * Each key is a 128-bit quantity which is split across a pair of 64-bit ··· 120 117 \ 121 118 /* enable all keys */ \ 122 119 if (system_supports_address_auth()) \ 123 - set_task_sctlr_el1(current->thread.sctlr_user | \ 124 - SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ 125 - SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); \ 120 + ptrauth_set_enabled_keys(current, \ 121 + PR_PAC_ENABLED_KEYS_MASK, \ 122 + PR_PAC_ENABLED_KEYS_MASK); \ 126 123 } while (0) 127 124 128 125 #define ptrauth_thread_switch_user(tsk) \ ··· 148 145 #define ptrauth_thread_init_kernel(tsk) 149 146 #define ptrauth_thread_switch_kernel(tsk) 150 147 #endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ 151 - 152 - #define PR_PAC_ENABLED_KEYS_MASK \ 153 - (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) 154 148 155 149 #endif /* __ASM_POINTER_AUTH_H */
+1 -1
arch/arm64/include/asm/processor.h
··· 259 259 260 260 unsigned long get_wchan(struct task_struct *p); 261 261 262 - void set_task_sctlr_el1(u64 sctlr); 262 + void update_sctlr_el1(u64 sctlr); 263 263 264 264 /* Thread switching */ 265 265 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
+4 -4
arch/arm64/kernel/mte.c
··· 218 218 write_sysreg_s(0, SYS_TFSRE0_EL1); 219 219 clear_thread_flag(TIF_MTE_ASYNC_FAULT); 220 220 /* disable tag checking and reset tag generation mask */ 221 - current->thread.mte_ctrl = MTE_CTRL_GCR_USER_EXCL_MASK; 222 - mte_update_sctlr_user(current); 223 - set_task_sctlr_el1(current->thread.sctlr_user); 221 + set_mte_ctrl(current, 0); 224 222 } 225 223 226 224 void mte_thread_switch(struct task_struct *next) ··· 276 278 277 279 task->thread.mte_ctrl = mte_ctrl; 278 280 if (task == current) { 281 + preempt_disable(); 279 282 mte_update_sctlr_user(task); 280 - set_task_sctlr_el1(task->thread.sctlr_user); 283 + update_sctlr_el1(task->thread.sctlr_user); 284 + preempt_enable(); 281 285 } 282 286 283 287 return 0;
+6 -4
arch/arm64/kernel/pointer_auth.c
··· 67 67 int ptrauth_set_enabled_keys(struct task_struct *tsk, unsigned long keys, 68 68 unsigned long enabled) 69 69 { 70 - u64 sctlr = tsk->thread.sctlr_user; 70 + u64 sctlr; 71 71 72 72 if (!system_supports_address_auth()) 73 73 return -EINVAL; ··· 78 78 if ((keys & ~PR_PAC_ENABLED_KEYS_MASK) || (enabled & ~keys)) 79 79 return -EINVAL; 80 80 81 + preempt_disable(); 82 + sctlr = tsk->thread.sctlr_user; 81 83 sctlr &= ~arg_to_enxx_mask(keys); 82 84 sctlr |= arg_to_enxx_mask(enabled); 85 + tsk->thread.sctlr_user = sctlr; 83 86 if (tsk == current) 84 - set_task_sctlr_el1(sctlr); 85 - else 86 - tsk->thread.sctlr_user = sctlr; 87 + update_sctlr_el1(sctlr); 88 + preempt_enable(); 87 89 88 90 return 0; 89 91 }
+7 -14
arch/arm64/kernel/process.c
··· 477 477 set_tsk_thread_flag(next, TIF_NOTIFY_RESUME); 478 478 } 479 479 480 - static void update_sctlr_el1(u64 sctlr) 480 + /* 481 + * __switch_to() checks current->thread.sctlr_user as an optimisation. Therefore 482 + * this function must be called with preemption disabled and the update to 483 + * sctlr_user must be made in the same preemption disabled block so that 484 + * __switch_to() does not see the variable update before the SCTLR_EL1 one. 485 + */ 486 + void update_sctlr_el1(u64 sctlr) 481 487 { 482 488 /* 483 489 * EnIA must not be cleared while in the kernel as this is necessary for ··· 493 487 494 488 /* ISB required for the kernel uaccess routines when setting TCF0. */ 495 489 isb(); 496 - } 497 - 498 - void set_task_sctlr_el1(u64 sctlr) 499 - { 500 - /* 501 - * __switch_to() checks current->thread.sctlr as an 502 - * optimisation. Disable preemption so that it does not see 503 - * the variable update before the SCTLR_EL1 one. 504 - */ 505 - preempt_disable(); 506 - current->thread.sctlr_user = sctlr; 507 - update_sctlr_el1(sctlr); 508 - preempt_enable(); 509 490 } 510 491 511 492 /*