Merge tag 'io_uring-5.15-2021-10-01' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
"Two fixes in here:

- The signal issue that was discussed start of this week (me).

- Kill dead fasync support in io_uring. Looks like it was broken
since io_uring was initially merged, and given that nobody has ever
complained about it, let's just kill it (Pavel)"

* tag 'io_uring-5.15-2021-10-01' of git://git.kernel.dk/linux-block:
io_uring: kill fasync
io-wq: exclusively gate signal based exit on get_signal() return

+3 -19
+1 -4
fs/io-wq.c
··· 584 585 if (!get_signal(&ksig)) 586 continue; 587 - if (fatal_signal_pending(current) || 588 - signal_group_exit(current->signal)) 589 - break; 590 - continue; 591 } 592 last_timeout = !ret; 593 }
··· 584 585 if (!get_signal(&ksig)) 586 continue; 587 + break; 588 } 589 last_timeout = !ret; 590 }
+2 -15
fs/io_uring.c
··· 403 struct wait_queue_head cq_wait; 404 unsigned cq_extra; 405 atomic_t cq_timeouts; 406 - struct fasync_struct *cq_fasync; 407 unsigned cq_last_tm_flush; 408 } ____cacheline_aligned_in_smp; 409 ··· 1613 wake_up(&ctx->sq_data->wait); 1614 if (io_should_trigger_evfd(ctx)) 1615 eventfd_signal(ctx->cq_ev_fd, 1); 1616 - if (waitqueue_active(&ctx->poll_wait)) { 1617 wake_up_interruptible(&ctx->poll_wait); 1618 - kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); 1619 - } 1620 } 1621 1622 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ··· 1628 } 1629 if (io_should_trigger_evfd(ctx)) 1630 eventfd_signal(ctx->cq_ev_fd, 1); 1631 - if (waitqueue_active(&ctx->poll_wait)) { 1632 wake_up_interruptible(&ctx->poll_wait); 1633 - kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN); 1634 - } 1635 } 1636 1637 /* Returns true if there are no backlogged entries after the flush */ ··· 9340 return mask; 9341 } 9342 9343 - static int io_uring_fasync(int fd, struct file *file, int on) 9344 - { 9345 - struct io_ring_ctx *ctx = file->private_data; 9346 - 9347 - return fasync_helper(fd, file, on, &ctx->cq_fasync); 9348 - } 9349 - 9350 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) 9351 { 9352 const struct cred *creds; ··· 10133 .mmap_capabilities = io_uring_nommu_mmap_capabilities, 10134 #endif 10135 .poll = io_uring_poll, 10136 - .fasync = io_uring_fasync, 10137 #ifdef CONFIG_PROC_FS 10138 .show_fdinfo = io_uring_show_fdinfo, 10139 #endif
··· 403 struct wait_queue_head cq_wait; 404 unsigned cq_extra; 405 atomic_t cq_timeouts; 406 unsigned cq_last_tm_flush; 407 } ____cacheline_aligned_in_smp; 408 ··· 1614 wake_up(&ctx->sq_data->wait); 1615 if (io_should_trigger_evfd(ctx)) 1616 eventfd_signal(ctx->cq_ev_fd, 1); 1617 + if (waitqueue_active(&ctx->poll_wait)) 1618 wake_up_interruptible(&ctx->poll_wait); 1619 } 1620 1621 static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx) ··· 1631 } 1632 if (io_should_trigger_evfd(ctx)) 1633 eventfd_signal(ctx->cq_ev_fd, 1); 1634 + if (waitqueue_active(&ctx->poll_wait)) 1635 wake_up_interruptible(&ctx->poll_wait); 1636 } 1637 1638 /* Returns true if there are no backlogged entries after the flush */ ··· 9345 return mask; 9346 } 9347 9348 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) 9349 { 9350 const struct cred *creds; ··· 10145 .mmap_capabilities = io_uring_nommu_mmap_capabilities, 10146 #endif 10147 .poll = io_uring_poll, 10148 #ifdef CONFIG_PROC_FS 10149 .show_fdinfo = io_uring_show_fdinfo, 10150 #endif