Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: make unplug timer trace event correspond to the schedule() unplug
block: let io_schedule() flush the plug inline

+45 -19
+12 -6
block/blk-core.c
··· 2662 2662 return !(rqa->q <= rqb->q); 2663 2663 } 2664 2664 2665 + /* 2666 + * If 'from_schedule' is true, then postpone the dispatch of requests 2667 + * until a safe kblockd context. We due this to avoid accidental big 2668 + * additional stack usage in driver dispatch, in places where the originally 2669 + * plugger did not intend it. 2670 + */ 2665 2671 static void queue_unplugged(struct request_queue *q, unsigned int depth, 2666 - bool force_kblockd) 2672 + bool from_schedule) 2667 2673 { 2668 - trace_block_unplug_io(q, depth); 2669 - __blk_run_queue(q, force_kblockd); 2674 + trace_block_unplug(q, depth, !from_schedule); 2675 + __blk_run_queue(q, from_schedule); 2670 2676 2671 2677 if (q->unplugged_fn) 2672 2678 q->unplugged_fn(q); 2673 2679 } 2674 2680 2675 - void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd) 2681 + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) 2676 2682 { 2677 2683 struct request_queue *q; 2678 2684 unsigned long flags; ··· 2713 2707 BUG_ON(!rq->q); 2714 2708 if (rq->q != q) { 2715 2709 if (q) { 2716 - queue_unplugged(q, depth, force_kblockd); 2710 + queue_unplugged(q, depth, from_schedule); 2717 2711 spin_unlock(q->queue_lock); 2718 2712 } 2719 2713 q = rq->q; ··· 2734 2728 } 2735 2729 2736 2730 if (q) { 2737 - queue_unplugged(q, depth, force_kblockd); 2731 + queue_unplugged(q, depth, from_schedule); 2738 2732 spin_unlock(q->queue_lock); 2739 2733 } 2740 2734
+13
include/linux/blkdev.h
··· 872 872 struct blk_plug *plug = tsk->plug; 873 873 874 874 if (plug) 875 + blk_flush_plug_list(plug, false); 876 + } 877 + 878 + static inline void blk_schedule_flush_plug(struct task_struct *tsk) 879 + { 880 + struct blk_plug *plug = tsk->plug; 881 + 882 + if (plug) 875 883 blk_flush_plug_list(plug, true); 876 884 } 877 885 ··· 1324 1316 static inline void blk_flush_plug(struct task_struct *task) 1325 1317 { 1326 1318 } 1319 + 1320 + static inline void blk_schedule_flush_plug(struct task_struct *task) 1321 + { 1322 + } 1323 + 1327 1324 1328 1325 static inline bool blk_needs_flush_plug(struct task_struct *tsk) 1329 1326 {
+7 -6
include/trace/events/block.h
··· 401 401 402 402 DECLARE_EVENT_CLASS(block_unplug, 403 403 404 - TP_PROTO(struct request_queue *q, unsigned int depth), 404 + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 405 405 406 - TP_ARGS(q, depth), 406 + TP_ARGS(q, depth, explicit), 407 407 408 408 TP_STRUCT__entry( 409 409 __field( int, nr_rq ) ··· 419 419 ); 420 420 421 421 /** 422 - * block_unplug_io - release of operations requests in request queue 422 + * block_unplug - release of operations requests in request queue 423 423 * @q: request queue to unplug 424 424 * @depth: number of requests just added to the queue 425 + * @explicit: whether this was an explicit unplug, or one from schedule() 425 426 * 426 427 * Unplug request queue @q because device driver is scheduled to work 427 428 * on elements in the request queue. 428 429 */ 429 - DEFINE_EVENT(block_unplug, block_unplug_io, 430 + DEFINE_EVENT(block_unplug, block_unplug, 430 431 431 - TP_PROTO(struct request_queue *q, unsigned int depth), 432 + TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), 432 433 433 - TP_ARGS(q, depth) 434 + TP_ARGS(q, depth, explicit) 434 435 ); 435 436 436 437 /**
+1 -1
kernel/sched.c
··· 4118 4118 */ 4119 4119 if (blk_needs_flush_plug(prev)) { 4120 4120 raw_spin_unlock(&rq->lock); 4121 - blk_flush_plug(prev); 4121 + blk_schedule_flush_plug(prev); 4122 4122 raw_spin_lock(&rq->lock); 4123 4123 } 4124 4124 }
+12 -6
kernel/trace/blktrace.c
··· 850 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 851 851 } 852 852 853 - static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q, 854 - unsigned int depth) 853 + static void blk_add_trace_unplug(void *ignore, struct request_queue *q, 854 + unsigned int depth, bool explicit) 855 855 { 856 856 struct blk_trace *bt = q->blk_trace; 857 857 858 858 if (bt) { 859 859 __be64 rpdu = cpu_to_be64(depth); 860 + u32 what; 860 861 861 - __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 862 - sizeof(rpdu), &rpdu); 862 + if (explicit) 863 + what = BLK_TA_UNPLUG_IO; 864 + else 865 + what = BLK_TA_UNPLUG_TIMER; 866 + 867 + __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); 863 868 } 864 869 } 865 870 ··· 1007 1002 WARN_ON(ret); 1008 1003 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1009 1004 WARN_ON(ret); 1010 - ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1005 + ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); 1011 1006 WARN_ON(ret); 1012 1007 ret = register_trace_block_split(blk_add_trace_split, NULL); 1013 1008 WARN_ON(ret); ··· 1022 1017 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1023 1018 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1024 1019 unregister_trace_block_split(blk_add_trace_split, NULL); 1025 - unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1020 + unregister_trace_block_unplug(blk_add_trace_unplug, NULL); 1026 1021 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1027 1022 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1028 1023 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); ··· 1337 1332 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, 1338 1333 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, 1339 1334 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, 1335 + [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, 1340 1336 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, 1341 1337 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, 1342 1338 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },