Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'arch-cleanup-2020-10-22' of git://git.kernel.dk/linux-block

Pull arch task_work cleanups from Jens Axboe:
"Two cleanups that don't fit other categories:

- Finally get the task_work_add() cleanup done properly, so we don't
have random 0/1/false/true/TWA_SIGNAL confusing use cases. Updates
all callers, and also fixes up the documentation for
task_work_add().

- While working on some TIF related changes for 5.11, this
TIF_NOTIFY_RESUME cleanup fell out of that. Remove some arch
duplication for how that is handled"

* tag 'arch-cleanup-2020-10-22' of git://git.kernel.dk/linux-block:
task_work: cleanup notification modes
tracehook: clear TIF_NOTIFY_RESUME in tracehook_notify_resume()

+64 -76
-1
arch/alpha/kernel/signal.c
··· 531 531 do_signal(regs, r0, r19); 532 532 r0 = 0; 533 533 } else { 534 - clear_thread_flag(TIF_NOTIFY_RESUME); 535 534 tracehook_notify_resume(regs); 536 535 } 537 536 }
+1 -1
arch/arc/kernel/signal.c
··· 394 394 * ASM glue gaurantees that this is only called when returning to 395 395 * user mode 396 396 */ 397 - if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 397 + if (test_thread_flag(TIF_NOTIFY_RESUME)) 398 398 tracehook_notify_resume(regs); 399 399 }
-1
arch/arm/kernel/signal.c
··· 669 669 } else if (thread_flags & _TIF_UPROBE) { 670 670 uprobe_notify_resume(regs); 671 671 } else { 672 - clear_thread_flag(TIF_NOTIFY_RESUME); 673 672 tracehook_notify_resume(regs); 674 673 rseq_handle_notify_resume(NULL, regs); 675 674 }
-1
arch/arm64/kernel/signal.c
··· 946 946 do_signal(regs); 947 947 948 948 if (thread_flags & _TIF_NOTIFY_RESUME) { 949 - clear_thread_flag(TIF_NOTIFY_RESUME); 950 949 tracehook_notify_resume(regs); 951 950 rseq_handle_notify_resume(NULL, regs); 952 951 }
+1 -3
arch/c6x/kernel/signal.c
··· 316 316 if (thread_info_flags & (1 << TIF_SIGPENDING)) 317 317 do_signal(regs, syscall); 318 318 319 - if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) { 320 - clear_thread_flag(TIF_NOTIFY_RESUME); 319 + if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) 321 320 tracehook_notify_resume(regs); 322 - } 323 321 }
-1
arch/csky/kernel/signal.c
··· 261 261 do_signal(regs); 262 262 263 263 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 264 - clear_thread_flag(TIF_NOTIFY_RESUME); 265 264 tracehook_notify_resume(regs); 266 265 rseq_handle_notify_resume(NULL, regs); 267 266 }
+1 -3
arch/h8300/kernel/signal.c
··· 282 282 if (thread_info_flags & _TIF_SIGPENDING) 283 283 do_signal(regs); 284 284 285 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 286 - clear_thread_flag(TIF_NOTIFY_RESUME); 285 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 287 286 tracehook_notify_resume(regs); 288 - } 289 287 }
-1
arch/hexagon/kernel/process.c
··· 180 180 } 181 181 182 182 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 183 - clear_thread_flag(TIF_NOTIFY_RESUME); 184 183 tracehook_notify_resume(regs); 185 184 return 1; 186 185 }
+1 -1
arch/ia64/kernel/process.c
··· 176 176 ia64_do_signal(scr, in_syscall); 177 177 } 178 178 179 - if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) { 179 + if (test_thread_flag(TIF_NOTIFY_RESUME)) { 180 180 local_irq_enable(); /* force interrupt enable */ 181 181 tracehook_notify_resume(&scr->pt); 182 182 }
+1 -1
arch/m68k/kernel/signal.c
··· 1136 1136 if (test_thread_flag(TIF_SIGPENDING)) 1137 1137 do_signal(regs); 1138 1138 1139 - if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 1139 + if (test_thread_flag(TIF_NOTIFY_RESUME)) 1140 1140 tracehook_notify_resume(regs); 1141 1141 }
+1 -1
arch/microblaze/kernel/signal.c
··· 316 316 if (test_thread_flag(TIF_SIGPENDING)) 317 317 do_signal(regs, in_syscall); 318 318 319 - if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 319 + if (test_thread_flag(TIF_NOTIFY_RESUME)) 320 320 tracehook_notify_resume(regs); 321 321 }
-1
arch/mips/kernel/signal.c
··· 907 907 do_signal(regs); 908 908 909 909 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 910 - clear_thread_flag(TIF_NOTIFY_RESUME); 911 910 tracehook_notify_resume(regs); 912 911 rseq_handle_notify_resume(NULL, regs); 913 912 }
+1 -3
arch/nds32/kernel/signal.c
··· 379 379 if (thread_flags & _TIF_SIGPENDING) 380 380 do_signal(regs); 381 381 382 - if (thread_flags & _TIF_NOTIFY_RESUME) { 383 - clear_thread_flag(TIF_NOTIFY_RESUME); 382 + if (thread_flags & _TIF_NOTIFY_RESUME) 384 383 tracehook_notify_resume(regs); 385 - } 386 384 }
+1 -1
arch/nios2/kernel/signal.c
··· 317 317 */ 318 318 return restart; 319 319 } 320 - } else if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 320 + } else if (test_thread_flag(TIF_NOTIFY_RESUME)) 321 321 tracehook_notify_resume(regs); 322 322 323 323 return 0;
-1
arch/openrisc/kernel/signal.c
··· 311 311 } 312 312 syscall = 0; 313 313 } else { 314 - clear_thread_flag(TIF_NOTIFY_RESUME); 315 314 tracehook_notify_resume(regs); 316 315 } 317 316 }
+1 -3
arch/parisc/kernel/signal.c
··· 606 606 if (test_thread_flag(TIF_SIGPENDING)) 607 607 do_signal(regs, in_syscall); 608 608 609 - if (test_thread_flag(TIF_NOTIFY_RESUME)) { 610 - clear_thread_flag(TIF_NOTIFY_RESUME); 609 + if (test_thread_flag(TIF_NOTIFY_RESUME)) 611 610 tracehook_notify_resume(regs); 612 - } 613 611 }
-1
arch/powerpc/kernel/signal.c
··· 324 324 } 325 325 326 326 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 327 - clear_thread_flag(TIF_NOTIFY_RESUME); 328 327 tracehook_notify_resume(regs); 329 328 rseq_handle_notify_resume(NULL, regs); 330 329 }
+1 -3
arch/riscv/kernel/signal.c
··· 313 313 if (thread_info_flags & _TIF_SIGPENDING) 314 314 do_signal(regs); 315 315 316 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 317 - clear_thread_flag(TIF_NOTIFY_RESUME); 316 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 318 317 tracehook_notify_resume(regs); 319 - } 320 318 }
-1
arch/s390/kernel/signal.c
··· 535 535 536 536 void do_notify_resume(struct pt_regs *regs) 537 537 { 538 - clear_thread_flag(TIF_NOTIFY_RESUME); 539 538 tracehook_notify_resume(regs); 540 539 rseq_handle_notify_resume(NULL, regs); 541 540 }
+1 -3
arch/sh/kernel/signal_32.c
··· 502 502 if (thread_info_flags & _TIF_SIGPENDING) 503 503 do_signal(regs, save_r0); 504 504 505 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 506 - clear_thread_flag(TIF_NOTIFY_RESUME); 505 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 507 506 tracehook_notify_resume(regs); 508 - } 509 507 }
+1 -3
arch/sparc/kernel/signal_32.c
··· 523 523 { 524 524 if (thread_info_flags & _TIF_SIGPENDING) 525 525 do_signal(regs, orig_i0); 526 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 527 - clear_thread_flag(TIF_NOTIFY_RESUME); 526 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 528 527 tracehook_notify_resume(regs); 529 - } 530 528 } 531 529 532 530 asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+1 -3
arch/sparc/kernel/signal_64.c
··· 551 551 uprobe_notify_resume(regs); 552 552 if (thread_info_flags & _TIF_SIGPENDING) 553 553 do_signal(regs, orig_i0); 554 - if (thread_info_flags & _TIF_NOTIFY_RESUME) { 555 - clear_thread_flag(TIF_NOTIFY_RESUME); 554 + if (thread_info_flags & _TIF_NOTIFY_RESUME) 556 555 tracehook_notify_resume(regs); 557 - } 558 556 user_enter(); 559 557 } 560 558
+1 -1
arch/um/kernel/process.c
··· 101 101 schedule(); 102 102 if (test_thread_flag(TIF_SIGPENDING)) 103 103 do_signal(regs); 104 - if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 104 + if (test_thread_flag(TIF_NOTIFY_RESUME)) 105 105 tracehook_notify_resume(regs); 106 106 } 107 107
+1 -1
arch/x86/kernel/cpu/mce/core.c
··· 1277 1277 else 1278 1278 current->mce_kill_me.func = kill_me_maybe; 1279 1279 1280 - task_work_add(current, &current->mce_kill_me, true); 1280 + task_work_add(current, &current->mce_kill_me, TWA_RESUME); 1281 1281 } 1282 1282 1283 1283 /*
+1 -1
arch/x86/kernel/cpu/resctrl/rdtgroup.c
··· 561 561 * callback has been invoked. 562 562 */ 563 563 atomic_inc(&rdtgrp->waitcount); 564 - ret = task_work_add(tsk, &callback->work, true); 564 + ret = task_work_add(tsk, &callback->work, TWA_RESUME); 565 565 if (ret) { 566 566 /* 567 567 * Task is exiting. Drop the refcount and free the callback.
+1 -1
arch/xtensa/kernel/signal.c
··· 501 501 if (test_thread_flag(TIF_SIGPENDING)) 502 502 do_signal(regs); 503 503 504 - if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) 504 + if (test_thread_flag(TIF_NOTIFY_RESUME)) 505 505 tracehook_notify_resume(regs); 506 506 }
+1 -1
drivers/acpi/apei/ghes.c
··· 942 942 estatus_node->task_work.func = ghes_kick_task_work; 943 943 estatus_node->task_work_cpu = smp_processor_id(); 944 944 ret = task_work_add(current, &estatus_node->task_work, 945 - true); 945 + TWA_RESUME); 946 946 if (ret) 947 947 estatus_node->task_work.func = NULL; 948 948 }
+1 -1
drivers/android/binder.c
··· 2229 2229 __close_fd_get_file(fd, &twcb->file); 2230 2230 if (twcb->file) { 2231 2231 filp_close(twcb->file, current->files); 2232 - task_work_add(current, &twcb->twork, true); 2232 + task_work_add(current, &twcb->twork, TWA_RESUME); 2233 2233 } else { 2234 2234 kfree(twcb); 2235 2235 }
+1 -1
fs/file_table.c
··· 339 339 340 340 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { 341 341 init_task_work(&file->f_u.fu_rcuhead, ____fput); 342 - if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) 342 + if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME)) 343 343 return; 344 344 /* 345 345 * After this task has run exit_task_work(),
+7 -6
fs/io_uring.c
··· 1976 1976 { 1977 1977 struct task_struct *tsk = req->task; 1978 1978 struct io_ring_ctx *ctx = req->ctx; 1979 - int ret, notify; 1979 + enum task_work_notify_mode notify; 1980 + int ret; 1980 1981 1981 1982 if (tsk->flags & PF_EXITING) 1982 1983 return -ESRCH; ··· 1988 1987 * processing task_work. There's no reliable way to tell if TWA_RESUME 1989 1988 * will do the job. 1990 1989 */ 1991 - notify = 0; 1990 + notify = TWA_NONE; 1992 1991 if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) 1993 1992 notify = TWA_SIGNAL; 1994 1993 ··· 2057 2056 2058 2057 init_task_work(&req->task_work, io_req_task_cancel); 2059 2058 tsk = io_wq_get_task(req->ctx->io_wq); 2060 - task_work_add(tsk, &req->task_work, 0); 2059 + task_work_add(tsk, &req->task_work, TWA_NONE); 2061 2060 wake_up_process(tsk); 2062 2061 } 2063 2062 } ··· 2178 2177 struct task_struct *tsk; 2179 2178 2180 2179 tsk = io_wq_get_task(req->ctx->io_wq); 2181 - task_work_add(tsk, &req->task_work, 0); 2180 + task_work_add(tsk, &req->task_work, TWA_NONE); 2182 2181 wake_up_process(tsk); 2183 2182 } 2184 2183 } ··· 3292 3291 /* queue just for cancelation */ 3293 3292 init_task_work(&req->task_work, io_req_task_cancel); 3294 3293 tsk = io_wq_get_task(req->ctx->io_wq); 3295 - task_work_add(tsk, &req->task_work, 0); 3294 + task_work_add(tsk, &req->task_work, TWA_NONE); 3296 3295 wake_up_process(tsk); 3297 3296 } 3298 3297 return 1; ··· 4858 4857 4859 4858 WRITE_ONCE(poll->canceled, true); 4860 4859 tsk = io_wq_get_task(req->ctx->io_wq); 4861 - task_work_add(tsk, &req->task_work, 0); 4860 + task_work_add(tsk, &req->task_work, TWA_NONE); 4862 4861 wake_up_process(tsk); 4863 4862 } 4864 4863 return 1;
+1 -1
fs/namespace.c
··· 1191 1191 struct task_struct *task = current; 1192 1192 if (likely(!(task->flags & PF_KTHREAD))) { 1193 1193 init_task_work(&mnt->mnt_rcu, __cleanup_mnt); 1194 - if (!task_work_add(task, &mnt->mnt_rcu, true)) 1194 + if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME)) 1195 1195 return; 1196 1196 } 1197 1197 if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
+8 -3
include/linux/task_work.h
··· 13 13 twork->func = func; 14 14 } 15 15 16 - #define TWA_RESUME 1 17 - #define TWA_SIGNAL 2 18 - int task_work_add(struct task_struct *task, struct callback_head *twork, int); 16 + enum task_work_notify_mode { 17 + TWA_NONE, 18 + TWA_RESUME, 19 + TWA_SIGNAL, 20 + }; 21 + 22 + int task_work_add(struct task_struct *task, struct callback_head *twork, 23 + enum task_work_notify_mode mode); 19 24 20 25 struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t); 21 26 void task_work_run(void);
+2 -2
include/linux/tracehook.h
··· 178 178 */ 179 179 static inline void tracehook_notify_resume(struct pt_regs *regs) 180 180 { 181 + clear_thread_flag(TIF_NOTIFY_RESUME); 181 182 /* 182 - * The caller just cleared TIF_NOTIFY_RESUME. This barrier 183 - * pairs with task_work_add()->set_notify_resume() after 183 + * This barrier pairs with task_work_add()->set_notify_resume() after 184 184 * hlist_add_head(task->task_works); 185 185 */ 186 186 smp_mb__after_atomic();
-1
kernel/entry/common.c
··· 161 161 arch_do_signal(regs); 162 162 163 163 if (ti_work & _TIF_NOTIFY_RESUME) { 164 - clear_thread_flag(TIF_NOTIFY_RESUME); 165 164 tracehook_notify_resume(regs); 166 165 rseq_handle_notify_resume(NULL, regs); 167 166 }
+1 -3
kernel/entry/kvm.c
··· 16 16 if (ti_work & _TIF_NEED_RESCHED) 17 17 schedule(); 18 18 19 - if (ti_work & _TIF_NOTIFY_RESUME) { 20 - clear_thread_flag(TIF_NOTIFY_RESUME); 19 + if (ti_work & _TIF_NOTIFY_RESUME) 21 20 tracehook_notify_resume(NULL); 22 - } 23 21 24 22 ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work); 25 23 if (ret)
+1 -1
kernel/events/uprobes.c
··· 1823 1823 1824 1824 t->utask->dup_xol_addr = area->vaddr; 1825 1825 init_task_work(&t->utask->dup_xol_work, dup_xol_work); 1826 - task_work_add(t, &t->utask->dup_xol_work, true); 1826 + task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME); 1827 1827 } 1828 1828 1829 1829 /*
+1 -1
kernel/irq/manage.c
··· 1162 1162 handler_fn = irq_thread_fn; 1163 1163 1164 1164 init_task_work(&on_exit_work, irq_thread_dtor); 1165 - task_work_add(current, &on_exit_work, false); 1165 + task_work_add(current, &on_exit_work, TWA_NONE); 1166 1166 1167 1167 irq_thread_check_affinity(desc, action); 1168 1168
+1 -1
kernel/sched/fair.c
··· 2928 2928 curr->node_stamp += period; 2929 2929 2930 2930 if (!time_before(jiffies, curr->mm->numa_next_scan)) 2931 - task_work_add(curr, work, true); 2931 + task_work_add(curr, work, TWA_RESUME); 2932 2932 } 2933 2933 } 2934 2934
+20 -10
kernel/task_work.c
··· 9 9 * task_work_add - ask the @task to execute @work->func() 10 10 * @task: the task which should run the callback 11 11 * @work: the callback to run 12 - * @notify: send the notification if true 12 + * @notify: how to notify the targeted task 13 13 * 14 - * Queue @work for task_work_run() below and notify the @task if @notify. 15 - * Fails if the @task is exiting/exited and thus it can't process this @work. 16 - * Otherwise @work->func() will be called when the @task returns from kernel 17 - * mode or exits. 14 + * Queue @work for task_work_run() below and notify the @task if @notify 15 + * is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the 16 + * it will interrupt the targeted task and run the task_work. @TWA_RESUME 17 + * work is run only when the task exits the kernel and returns to user mode, 18 + * or before entering guest mode. Fails if the @task is exiting/exited and thus 19 + * it can't process this @work. Otherwise @work->func() will be called when the 20 + * @task goes through one of the aforementioned transitions, or exits. 18 21 * 19 - * This is like the signal handler which runs in kernel mode, but it doesn't 20 - * try to wake up the @task. 22 + * If the targeted task is exiting, then an error is returned and the work item 23 + * is not queued. It's up to the caller to arrange for an alternative mechanism 24 + * in that case. 21 25 * 22 - * Note: there is no ordering guarantee on works queued here. 26 + * Note: there is no ordering guarantee on works queued here. The task_work 27 + * list is LIFO. 23 28 * 24 29 * RETURNS: 25 30 * 0 if succeeds or -ESRCH. 26 31 */ 27 - int 28 - task_work_add(struct task_struct *task, struct callback_head *work, int notify) 32 + int task_work_add(struct task_struct *task, struct callback_head *work, 33 + enum task_work_notify_mode notify) 29 34 { 30 35 struct callback_head *head; 31 36 unsigned long flags; ··· 43 38 } while (cmpxchg(&task->task_works, head, work) != head); 44 39 45 40 switch (notify) { 41 + case TWA_NONE: 42 + break; 46 43 case TWA_RESUME: 47 44 set_notify_resume(task); 48 45 break; ··· 60 53 signal_wake_up(task, 0); 61 54 unlock_task_sighand(task, &flags); 62 55 } 56 + break; 57 + default: 58 + WARN_ON_ONCE(1); 63 59 break; 64 60 } 65 61
+1 -1
security/keys/keyctl.c
··· 1693 1693 1694 1694 /* the replacement session keyring is applied just prior to userspace 1695 1695 * restarting */ 1696 - ret = task_work_add(parent, newwork, true); 1696 + ret = task_work_add(parent, newwork, TWA_RESUME); 1697 1697 if (!ret) 1698 1698 newwork = NULL; 1699 1699 unlock:
+1 -1
security/yama/yama_lsm.c
··· 99 99 info->access = access; 100 100 info->target = target; 101 101 info->agent = agent; 102 - if (task_work_add(current, &info->work, true) == 0) 102 + if (task_work_add(current, &info->work, TWA_RESUME) == 0) 103 103 return; /* success */ 104 104 105 105 WARN(1, "report_access called from exiting task");