Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: get rid of kblock_schedule_delayed_work()

It was briefly introduced to allow CFQ to to delayed scheduling,
but we ended up removing that feature again. So lets kill the
function and export, and just switch CFQ back to the normal work
schedule since it is now passing in a '0' delay from all call
sites.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

+11 -25
-8
block/blk-core.c
··· 2492 2492 } 2493 2493 EXPORT_SYMBOL(kblockd_schedule_work); 2494 2494 2495 - int kblockd_schedule_delayed_work(struct request_queue *q, 2496 - struct delayed_work *work, 2497 - unsigned long delay) 2498 - { 2499 - return queue_delayed_work(kblockd_workqueue, work, delay); 2500 - } 2501 - EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2502 - 2503 2495 int __init blk_dev_init(void) 2504 2496 { 2505 2497 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
+11 -13
block/cfq-iosched.c
··· 150 150 * idle window management 151 151 */ 152 152 struct timer_list idle_slice_timer; 153 - struct delayed_work unplug_work; 153 + struct work_struct unplug_work; 154 154 155 155 struct cfq_queue *active_queue; 156 156 struct cfq_io_context *active_cic; ··· 268 268 * scheduler run of queue, if there are requests pending and no one in the 269 269 * driver that will restart queueing 270 270 */ 271 - static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, 272 - unsigned long delay) 271 + static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 273 272 { 274 273 if (cfqd->busy_queues) { 275 274 cfq_log(cfqd, "schedule dispatch"); 276 - kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, 277 - delay); 275 + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 278 276 } 279 277 } 280 278 ··· 1398 1400 1399 1401 if (unlikely(cfqd->active_queue == cfqq)) { 1400 1402 __cfq_slice_expired(cfqd, cfqq, 0); 1401 - cfq_schedule_dispatch(cfqd, 0); 1403 + cfq_schedule_dispatch(cfqd); 1402 1404 } 1403 1405 1404 1406 kmem_cache_free(cfq_pool, cfqq); ··· 1493 1495 { 1494 1496 if (unlikely(cfqq == cfqd->active_queue)) { 1495 1497 __cfq_slice_expired(cfqd, cfqq, 0); 1496 - cfq_schedule_dispatch(cfqd, 0); 1498 + cfq_schedule_dispatch(cfqd); 1497 1499 } 1498 1500 1499 1501 cfq_put_queue(cfqq); ··· 2211 2213 } 2212 2214 2213 2215 if (!rq_in_driver(cfqd)) 2214 - cfq_schedule_dispatch(cfqd, 0); 2216 + cfq_schedule_dispatch(cfqd); 2215 2217 } 2216 2218 2217 2219 /* ··· 2341 2343 if (cic) 2342 2344 put_io_context(cic->ioc); 2343 2345 2344 - cfq_schedule_dispatch(cfqd, 0); 2346 + cfq_schedule_dispatch(cfqd); 2345 2347 spin_unlock_irqrestore(q->queue_lock, flags); 2346 2348 cfq_log(cfqd, "set_request fail"); 2347 2349 return 1; ··· 2350 2352 static void cfq_kick_queue(struct work_struct *work) 2351 2353 { 2352 2354 struct cfq_data *cfqd = 2353 - container_of(work, struct cfq_data, unplug_work.work); 2355 + container_of(work, struct cfq_data, unplug_work); 2354 2356 struct request_queue *q = cfqd->queue; 2355 2357 2356 2358 spin_lock_irq(q->queue_lock); ··· 2404 2406 expire: 2405 2407 cfq_slice_expired(cfqd, timed_out); 2406 2408 out_kick: 2407 - cfq_schedule_dispatch(cfqd, 0); 2409 + cfq_schedule_dispatch(cfqd); 2408 2410 out_cont: 2409 2411 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2410 2412 } ··· 2412 2414 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2413 2415 { 2414 2416 del_timer_sync(&cfqd->idle_slice_timer); 2415 - cancel_delayed_work_sync(&cfqd->unplug_work); 2417 + cancel_work_sync(&cfqd->unplug_work); 2416 2418 } 2417 2419 2418 2420 static void cfq_put_async_queues(struct cfq_data *cfqd) ··· 2494 2496 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2495 2497 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2496 2498 2497 - INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); 2499 + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2498 2500 2499 2501 cfqd->cfq_quantum = cfq_quantum; 2500 2502 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
-4
include/linux/blkdev.h
··· 1172 1172 } 1173 1173 1174 1174 struct work_struct; 1175 - struct delayed_work; 1176 1175 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1177 - int kblockd_schedule_delayed_work(struct request_queue *q, 1178 - struct delayed_work *work, 1179 - unsigned long delay); 1180 1176 1181 1177 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1182 1178 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))