Merge tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block

Pull io_uring followup fixes from Jens Axboe:

- The SIGSTOP change from Eric, so we properly ignore that for
PF_IO_WORKER threads.

- Disallow sending signals to PF_IO_WORKER threads in general, we're
not interested in having them funnel back to the io_uring owning
task.

- Stable fix from Stefan, ensuring we properly break links for short
send/sendmsg recv/recvmsg if MSG_WAITALL is set.

- Catch and loop when needing to run task_work before a PF_IO_WORKER
threads goes to sleep.

* tag 'io_uring-5.12-2021-03-21' of git://git.kernel.dk/linux-block:
io_uring: call req_set_fail_links() on short send[msg]()/recv[msg]() with MSG_WAITALL
io-wq: ensure task is running before processing task_work
signal: don't allow STOP on PF_IO_WORKER threads
signal: don't allow sending any signals to PF_IO_WORKER threads

Changed files
+31 -7
fs
kernel
+6 -2
fs/io-wq.c
··· 386 386 return NULL; 387 387 } 388 388 389 - static void io_flush_signals(void) 389 + static bool io_flush_signals(void) 390 390 { 391 391 if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) { 392 + __set_current_state(TASK_RUNNING); 392 393 if (current->task_works) 393 394 task_work_run(); 394 395 clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL); 396 + return true; 395 397 } 398 + return false; 396 399 } 397 400 398 401 static void io_assign_current_work(struct io_worker *worker, ··· 502 499 } 503 500 __io_worker_idle(wqe, worker); 504 501 raw_spin_unlock_irq(&wqe->lock); 505 - io_flush_signals(); 502 + if (io_flush_signals()) 503 + continue; 506 504 ret = schedule_timeout(WORKER_IDLE_TIMEOUT); 507 505 if (try_to_freeze() || ret) 508 506 continue;
+20 -4
fs/io_uring.c
··· 4386 4386 struct io_async_msghdr iomsg, *kmsg; 4387 4387 struct socket *sock; 4388 4388 unsigned flags; 4389 + int min_ret = 0; 4389 4390 int ret; 4390 4391 4391 4392 sock = sock_from_file(req->file); ··· 4407 4406 else if (issue_flags & IO_URING_F_NONBLOCK) 4408 4407 flags |= MSG_DONTWAIT; 4409 4408 4409 + if (flags & MSG_WAITALL) 4410 + min_ret = iov_iter_count(&kmsg->msg.msg_iter); 4411 + 4410 4412 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); 4411 4413 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) 4412 4414 return io_setup_async_msg(req, kmsg); ··· 4420 4416 if (kmsg->free_iov) 4421 4417 kfree(kmsg->free_iov); 4422 4418 req->flags &= ~REQ_F_NEED_CLEANUP; 4423 - if (ret < 0) 4419 + if (ret < min_ret) 4424 4420 req_set_fail_links(req); 4425 4421 __io_req_complete(req, issue_flags, ret, 0); 4426 4422 return 0; ··· 4433 4429 struct iovec iov; 4434 4430 struct socket *sock; 4435 4431 unsigned flags; 4432 + int min_ret = 0; 4436 4433 int ret; 4437 4434 4438 4435 sock = sock_from_file(req->file); ··· 4455 4450 else if (issue_flags & IO_URING_F_NONBLOCK) 4456 4451 flags |= MSG_DONTWAIT; 4457 4452 4453 + if (flags & MSG_WAITALL) 4454 + min_ret = iov_iter_count(&msg.msg_iter); 4455 + 4458 4456 msg.msg_flags = flags; 4459 4457 ret = sock_sendmsg(sock, &msg); 4460 4458 if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN) ··· 4465 4457 if (ret == -ERESTARTSYS) 4466 4458 ret = -EINTR; 4467 4459 4468 - if (ret < 0) 4460 + if (ret < min_ret) 4469 4461 req_set_fail_links(req); 4470 4462 __io_req_complete(req, issue_flags, ret, 0); 4471 4463 return 0; ··· 4617 4609 struct socket *sock; 4618 4610 struct io_buffer *kbuf; 4619 4611 unsigned flags; 4612 + int min_ret = 0; 4620 4613 int ret, cflags = 0; 4621 4614 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 4622 4615 ··· 4649 4640 else if (force_nonblock) 4650 4641 flags |= MSG_DONTWAIT; 4651 4642 4643 + if (flags & MSG_WAITALL) 4644 + min_ret = iov_iter_count(&kmsg->msg.msg_iter); 4645 + 4652 4646 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, 4653 4647 kmsg->uaddr, flags); 4654 4648 if (force_nonblock && ret == -EAGAIN) ··· 4665 4653 if (kmsg->free_iov) 4666 4654 kfree(kmsg->free_iov); 4667 4655 req->flags &= ~REQ_F_NEED_CLEANUP; 4668 - if (ret < 0) 4656 + if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) 4669 4657 req_set_fail_links(req); 4670 4658 __io_req_complete(req, issue_flags, ret, cflags); 4671 4659 return 0; ··· 4680 4668 struct socket *sock; 4681 4669 struct iovec iov; 4682 4670 unsigned flags; 4671 + int min_ret = 0; 4683 4672 int ret, cflags = 0; 4684 4673 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; 4685 4674 ··· 4712 4699 else if (force_nonblock) 4713 4700 flags |= MSG_DONTWAIT; 4714 4701 4702 + if (flags & MSG_WAITALL) 4703 + min_ret = iov_iter_count(&msg.msg_iter); 4704 + 4715 4705 ret = sock_recvmsg(sock, &msg, flags); 4716 4706 if (force_nonblock && ret == -EAGAIN) 4717 4707 return -EAGAIN; ··· 4723 4707 out_free: 4724 4708 if (req->flags & REQ_F_BUFFER_SELECTED) 4725 4709 cflags = io_put_recv_kbuf(req); 4726 - if (ret < 0) 4710 + if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC)))) 4727 4711 req_set_fail_links(req); 4728 4712 __io_req_complete(req, issue_flags, ret, cflags); 4729 4713 return 0;
+5 -1
kernel/signal.c
··· 288 288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); 289 289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); 290 290 291 - if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) 291 + if (unlikely(fatal_signal_pending(task) || 292 + (task->flags & (PF_EXITING | PF_IO_WORKER)))) 292 293 return false; 293 294 294 295 if (mask & JOBCTL_STOP_SIGMASK) ··· 834 833 835 834 if (!valid_signal(sig)) 836 835 return -EINVAL; 836 + /* PF_IO_WORKER threads don't take any signals */ 837 + if (t->flags & PF_IO_WORKER) 838 + return -ESRCH; 837 839 838 840 if (!si_fromuser(info)) 839 841 return 0;