Merge tag 'io_uring-5.9-2020-10-02' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:

- fix for async buffered reads if read-ahead is fully disabled (Hao)

- double poll match fix

- ->show_fdinfo() potential ABBA deadlock complaint fix

* tag 'io_uring-5.9-2020-10-02' of git://git.kernel.dk/linux-block:
io_uring: fix async buffered reads when readahead is disabled
io_uring: fix potential ABBA deadlock in ->show_fdinfo()
io_uring: always delete double poll wait entry on match

+23 -6
+18 -5
fs/io_uring.c
··· 3049 3049 if (!wake_page_match(wpq, key)) 3050 3050 return 0; 3051 3051 3052 + req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; 3052 3053 list_del_init(&wait->entry); 3053 3054 3054 3055 init_task_work(&req->task_work, io_req_task_submit); ··· 3107 3106 wait->wait.flags = 0; 3108 3107 INIT_LIST_HEAD(&wait->wait.entry); 3109 3108 kiocb->ki_flags |= IOCB_WAITQ; 3109 + kiocb->ki_flags &= ~IOCB_NOWAIT; 3110 3110 kiocb->ki_waitq = wait; 3111 3111 3112 3112 io_get_req_task(req); ··· 4744 4742 /* for instances that support it check for an event match first: */ 4745 4743 if (mask && !(mask & poll->events)) 4746 4744 return 0; 4745 + 4746 + list_del_init(&wait->entry); 4747 4747 4748 4748 if (poll && poll->head) { 4749 4749 bool done; ··· 8416 8412 8417 8413 static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) 8418 8414 { 8415 + bool has_lock; 8419 8416 int i; 8420 8417 8421 - mutex_lock(&ctx->uring_lock); 8418 + /* 8419 + * Avoid ABBA deadlock between the seq lock and the io_uring mutex, 8420 + * since fdinfo case grabs it in the opposite direction of normal use 8421 + * cases. If we fail to get the lock, we just don't iterate any 8422 + * structures that could be going away outside the io_uring mutex. 8423 + */ 8424 + has_lock = mutex_trylock(&ctx->uring_lock); 8425 + 8422 8426 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); 8423 - for (i = 0; i < ctx->nr_user_files; i++) { 8427 + for (i = 0; has_lock && i < ctx->nr_user_files; i++) { 8424 8428 struct fixed_file_table *table; 8425 8429 struct file *f; 8426 8430 ··· 8440 8428 seq_printf(m, "%5u: <none>\n", i); 8441 8429 } 8442 8430 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); 8443 - for (i = 0; i < ctx->nr_user_bufs; i++) { 8431 + for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { 8444 8432 struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; 8445 8433 8446 8434 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, 8447 8435 (unsigned int) buf->len); 8448 8436 } 8449 - if (!idr_is_empty(&ctx->personality_idr)) { 8437 + if (has_lock && !idr_is_empty(&ctx->personality_idr)) { 8450 8438 seq_printf(m, "Personalities:\n"); 8451 8439 idr_for_each(&ctx->personality_idr, io_uring_show_cred, m); 8452 8440 } ··· 8461 8449 req->task->task_works != NULL); 8462 8450 } 8463 8451 spin_unlock_irq(&ctx->completion_lock); 8464 - mutex_unlock(&ctx->uring_lock); 8452 + if (has_lock) 8453 + mutex_unlock(&ctx->uring_lock); 8465 8454 } 8466 8455 8467 8456 static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
+5 -1
mm/filemap.c
··· 2365 2365 } 2366 2366 2367 2367 if (!PageUptodate(page)) { 2368 - error = lock_page_killable(page); 2368 + if (iocb->ki_flags & IOCB_WAITQ) 2369 + error = lock_page_async(page, iocb->ki_waitq); 2370 + else 2371 + error = lock_page_killable(page); 2372 + 2369 2373 if (unlikely(error)) 2370 2374 goto readpage_error; 2371 2375 if (!PageUptodate(page)) {