Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Remove logic to kill 32-bit tasks on 64-bit-only cores

The scheduler now knows enough about these braindead systems to place
32-bit tasks accordingly, so throw out the safety checks and allow the
ret-to-user path to avoid do_notify_resume() if there is nothing to do.

Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Link: https://lore.kernel.org/r/20210730112443.23245-16-will@kernel.org

authored by

Will Deacon and committed by
Peter Zijlstra
94f9c00f ead7de46

+1 -39
+1 -13
arch/arm64/kernel/process.c
··· 469 469 write_sysreg(val, cntkctl_el1); 470 470 } 471 471 472 - static void compat_thread_switch(struct task_struct *next) 473 - { 474 - if (!is_compat_thread(task_thread_info(next))) 475 - return; 476 - 477 - if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) 478 - set_tsk_thread_flag(next, TIF_NOTIFY_RESUME); 479 - } 480 - 481 472 static void update_sctlr_el1(u64 sctlr) 482 473 { 483 474 /* ··· 510 519 ssbs_thread_switch(next); 511 520 erratum_1418040_thread_switch(prev, next); 512 521 ptrauth_thread_switch_user(next); 513 - compat_thread_switch(next); 514 522 515 523 /* 516 524 * Complete any pending TLB or cache maintenance on this CPU in case ··· 611 621 * at the point of execve(), although we try a bit harder to 612 622 * honour the cpuset hierarchy. 613 623 */ 614 - if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { 624 + if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) 615 625 force_compatible_cpus_allowed_ptr(current); 616 - set_tsk_thread_flag(current, TIF_NOTIFY_RESUME); 617 - } 618 626 } else if (static_branch_unlikely(&arm64_mismatched_32bit_el0)) { 619 627 relax_compatible_cpus_allowed_ptr(current); 620 628 }
-26
arch/arm64/kernel/signal.c
··· 912 912 restore_saved_sigmask(); 913 913 } 914 914 915 - static bool cpu_affinity_invalid(struct pt_regs *regs) 916 - { 917 - if (!compat_user_mode(regs)) 918 - return false; 919 - 920 - /* 921 - * We're preemptible, but a reschedule will cause us to check the 922 - * affinity again. 923 - */ 924 - return !cpumask_test_cpu(raw_smp_processor_id(), 925 - system_32bit_el0_cpumask()); 926 - } 927 - 928 915 asmlinkage void do_notify_resume(struct pt_regs *regs, 929 916 unsigned long thread_flags) 930 917 { ··· 939 952 if (thread_flags & _TIF_NOTIFY_RESUME) { 940 953 tracehook_notify_resume(regs); 941 954 rseq_handle_notify_resume(NULL, regs); 942 - 943 - /* 944 - * If we reschedule after checking the affinity 945 - * then we must ensure that TIF_NOTIFY_RESUME 946 - * is set so that we check the affinity again. 947 - * Since tracehook_notify_resume() clears the 948 - * flag, ensure that the compiler doesn't move 949 - * it after the affinity check. 950 - */ 951 - barrier(); 952 - 953 - if (cpu_affinity_invalid(regs)) 954 - force_sig(SIGKILL); 955 955 } 956 956 957 957 if (thread_flags & _TIF_FOREIGN_FPSTATE)