Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cfq-iosched: read_lock() does not always imply rcu_read_lock()

For some configurations of CONFIG_PREEMPT that is not true. So
get rid of __call_for_each_cic() and always uses the explicitly
rcu_read_lock() protected call_for_each_cic() instead.

This fixes a potential bug related to IO scheduler removal or
online switching.

Thanks to Paul McKenney for clarifying this.

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>

+8 -16
+8 -16
block/cfq-iosched.c
··· 2582 2582 } 2583 2583 2584 2584 /* 2585 - * Must always be called with the rcu_read_lock() held 2586 - */ 2587 - static void 2588 - __call_for_each_cic(struct io_context *ioc, 2589 - void (*func)(struct io_context *, struct cfq_io_context *)) 2590 - { 2591 - struct cfq_io_context *cic; 2592 - struct hlist_node *n; 2593 - 2594 - hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2595 - func(ioc, cic); 2596 - } 2597 - 2598 - /* 2599 2585 * Call func for each cic attached to this ioc. 2600 2586 */ 2601 2587 static void 2602 2588 call_for_each_cic(struct io_context *ioc, 2603 2589 void (*func)(struct io_context *, struct cfq_io_context *)) 2604 2590 { 2591 + struct cfq_io_context *cic; 2592 + struct hlist_node *n; 2593 + 2605 2594 rcu_read_lock(); 2606 - __call_for_each_cic(ioc, func); 2595 + 2596 + hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) 2597 + func(ioc, cic); 2598 + 2607 2599 rcu_read_unlock(); 2608 2600 } 2609 2601 ··· 2656 2664 * should be ok to iterate over the known list, we will see all cic's 2657 2665 * since no new ones are added. 2658 2666 */ 2659 - __call_for_each_cic(ioc, cic_free_func); 2667 + call_for_each_cic(ioc, cic_free_func); 2660 2668 } 2661 2669 2662 2670 static void cfq_put_cooperator(struct cfq_queue *cfqq)