Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge tag 'io_uring-5.11-2021-01-01' of git://git.kernel.dk/linux-block

Pull io_uring fixes from Jens Axboe:
"A few fixes that should go into 5.11, all marked for stable as well:

- Fix issue around identity COW'ing and users that share a ring
across processes

- Fix a hang associated with unregistering fixed files (Pavel)

- Move the 'process is exiting' cancelation a bit earlier, so
task_works aren't affected by it (Pavel)"

* tag 'io_uring-5.11-2021-01-01' of git://git.kernel.dk/linux-block:
kernel/io_uring: cancel io_uring before task works
io_uring: fix io_sqe_files_unregister() hangs
io_uring: add a helper for setting a ref node
io_uring: don't assume mm is constant across submits

+43 -21
-2
fs/file.c
··· 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <net/sock.h> 24 - #include <linux/io_uring.h> 25 26 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 27 unsigned int sysctl_nr_open_min = BITS_PER_LONG; ··· 427 struct files_struct * files = tsk->files; 428 429 if (files) { 430 - io_uring_files_cancel(files); 431 task_lock(tsk); 432 tsk->files = NULL; 433 task_unlock(tsk);
··· 21 #include <linux/rcupdate.h> 22 #include <linux/close_range.h> 23 #include <net/sock.h> 24 25 unsigned int sysctl_nr_open __read_mostly = 1024*1024; 26 unsigned int sysctl_nr_open_min = BITS_PER_LONG; ··· 428 struct files_struct * files = tsk->files; 429 430 if (files) { 431 task_lock(tsk); 432 tsk->files = NULL; 433 task_unlock(tsk);
+41 -19
fs/io_uring.c
··· 992 ACCT_PINNED, 993 }; 994 995 static void __io_complete_rw(struct io_kiocb *req, long res, long res2, 996 struct io_comp_state *cs); 997 static void io_cqring_fill_event(struct io_kiocb *req, long res); ··· 1505 spin_unlock_irq(&ctx->inflight_lock); 1506 req->work.flags |= IO_WQ_WORK_FILES; 1507 } 1508 1509 return true; 1510 } ··· 1534 } else { 1535 if (def->unbound_nonreg_file) 1536 req->work.flags |= IO_WQ_WORK_UNBOUND; 1537 - } 1538 - 1539 - /* ->mm can never change on us */ 1540 - if (!(req->work.flags & IO_WQ_WORK_MM) && 1541 - (def->work_flags & IO_WQ_WORK_MM)) { 1542 - mmgrab(id->mm); 1543 - req->work.flags |= IO_WQ_WORK_MM; 1544 } 1545 1546 /* if we fail grabbing identity, we must COW, regrab, and retry */ ··· 7235 complete(&data->done); 7236 } 7237 7238 static int io_sqe_files_unregister(struct io_ring_ctx *ctx) 7239 { 7240 struct fixed_file_data *data = ctx->file_data; 7241 - struct fixed_file_ref_node *ref_node = NULL; 7242 unsigned nr_tables, i; 7243 7244 if (!data) 7245 return -ENXIO; 7246 7247 spin_lock_bh(&data->lock); 7248 ref_node = data->node; ··· 7268 7269 /* wait for all refs nodes to complete */ 7270 flush_delayed_work(&ctx->file_put_work); 7271 - wait_for_completion(&data->done); 7272 7273 __io_sqe_files_unregister(ctx); 7274 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); ··· 7290 kfree(data); 7291 ctx->file_data = NULL; 7292 ctx->nr_user_files = 0; 7293 return 0; 7294 } 7295 ··· 7788 return PTR_ERR(ref_node); 7789 } 7790 7791 - file_data->node = ref_node; 7792 - spin_lock_bh(&file_data->lock); 7793 - list_add_tail(&ref_node->node, &file_data->ref_list); 7794 - spin_unlock_bh(&file_data->lock); 7795 - percpu_ref_get(&file_data->refs); 7796 return ret; 7797 out_fput: 7798 for (i = 0; i < ctx->nr_user_files; i++) { ··· 7944 7945 if (needs_switch) { 7946 percpu_ref_kill(&data->node->refs); 7947 - spin_lock_bh(&data->lock); 7948 - list_add_tail(&ref_node->node, &data->ref_list); 7949 - data->node = ref_node; 7950 - spin_unlock_bh(&data->lock); 7951 - percpu_ref_get(&ctx->file_data->refs); 7952 } else 7953 destroy_fixed_file_ref_node(ref_node); 7954
··· 992 ACCT_PINNED, 993 }; 994 995 + static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node); 996 + static struct fixed_file_ref_node *alloc_fixed_file_ref_node( 997 + struct io_ring_ctx *ctx); 998 + 999 static void __io_complete_rw(struct io_kiocb *req, long res, long res2, 1000 struct io_comp_state *cs); 1001 static void io_cqring_fill_event(struct io_kiocb *req, long res); ··· 1501 spin_unlock_irq(&ctx->inflight_lock); 1502 req->work.flags |= IO_WQ_WORK_FILES; 1503 } 1504 + if (!(req->work.flags & IO_WQ_WORK_MM) && 1505 + (def->work_flags & IO_WQ_WORK_MM)) { 1506 + if (id->mm != current->mm) 1507 + return false; 1508 + mmgrab(id->mm); 1509 + req->work.flags |= IO_WQ_WORK_MM; 1510 + } 1511 1512 return true; 1513 } ··· 1523 } else { 1524 if (def->unbound_nonreg_file) 1525 req->work.flags |= IO_WQ_WORK_UNBOUND; 1526 } 1527 1528 /* if we fail grabbing identity, we must COW, regrab, and retry */ ··· 7231 complete(&data->done); 7232 } 7233 7234 + static void io_sqe_files_set_node(struct fixed_file_data *file_data, 7235 + struct fixed_file_ref_node *ref_node) 7236 + { 7237 + spin_lock_bh(&file_data->lock); 7238 + file_data->node = ref_node; 7239 + list_add_tail(&ref_node->node, &file_data->ref_list); 7240 + spin_unlock_bh(&file_data->lock); 7241 + percpu_ref_get(&file_data->refs); 7242 + } 7243 + 7244 static int io_sqe_files_unregister(struct io_ring_ctx *ctx) 7245 { 7246 struct fixed_file_data *data = ctx->file_data; 7247 + struct fixed_file_ref_node *backup_node, *ref_node = NULL; 7248 unsigned nr_tables, i; 7249 + int ret; 7250 7251 if (!data) 7252 return -ENXIO; 7253 + backup_node = alloc_fixed_file_ref_node(ctx); 7254 + if (!backup_node) 7255 + return -ENOMEM; 7256 7257 spin_lock_bh(&data->lock); 7258 ref_node = data->node; ··· 7250 7251 /* wait for all refs nodes to complete */ 7252 flush_delayed_work(&ctx->file_put_work); 7253 + do { 7254 + ret = wait_for_completion_interruptible(&data->done); 7255 + if (!ret) 7256 + break; 7257 + ret = io_run_task_work_sig(); 7258 + if (ret < 0) { 7259 + percpu_ref_resurrect(&data->refs); 7260 + reinit_completion(&data->done); 7261 + io_sqe_files_set_node(data, backup_node); 7262 + return ret; 7263 + } 7264 + } while (1); 7265 7266 __io_sqe_files_unregister(ctx); 7267 nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE); ··· 7261 kfree(data); 7262 ctx->file_data = NULL; 7263 ctx->nr_user_files = 0; 7264 + destroy_fixed_file_ref_node(backup_node); 7265 return 0; 7266 } 7267 ··· 7758 return PTR_ERR(ref_node); 7759 } 7760 7761 + io_sqe_files_set_node(file_data, ref_node); 7762 return ret; 7763 out_fput: 7764 for (i = 0; i < ctx->nr_user_files; i++) { ··· 7918 7919 if (needs_switch) { 7920 percpu_ref_kill(&data->node->refs); 7921 + io_sqe_files_set_node(data, ref_node); 7922 } else 7923 destroy_fixed_file_ref_node(ref_node); 7924
+2
kernel/exit.c
··· 63 #include <linux/random.h> 64 #include <linux/rcuwait.h> 65 #include <linux/compat.h> 66 67 #include <linux/uaccess.h> 68 #include <asm/unistd.h> ··· 777 schedule(); 778 } 779 780 exit_signals(tsk); /* sets PF_EXITING */ 781 782 /* sync mm's RSS info before statistics gathering */
··· 63 #include <linux/random.h> 64 #include <linux/rcuwait.h> 65 #include <linux/compat.h> 66 + #include <linux/io_uring.h> 67 68 #include <linux/uaccess.h> 69 #include <asm/unistd.h> ··· 776 schedule(); 777 } 778 779 + io_uring_files_cancel(tsk->files); 780 exit_signals(tsk); /* sets PF_EXITING */ 781 782 /* sync mm's RSS info before statistics gathering */