Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: make unplug timer trace event correspond to the schedule() unplug block: let io_schedule() flush the plug inline
···2662 return !(rqa->q <= rqb->q);2663}26640000002665static void queue_unplugged(struct request_queue *q, unsigned int depth,2666- bool force_kblockd)2667{2668- trace_block_unplug_io(q, depth);2669- __blk_run_queue(q, force_kblockd);26702671 if (q->unplugged_fn)2672 q->unplugged_fn(q);2673}26742675-void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)2676{2677 struct request_queue *q;2678 unsigned long flags;···2713 BUG_ON(!rq->q);2714 if (rq->q != q) {2715 if (q) {2716- queue_unplugged(q, depth, force_kblockd);2717 spin_unlock(q->queue_lock);2718 }2719 q = rq->q;···2734 }27352736 if (q) {2737- queue_unplugged(q, depth, force_kblockd);2738 spin_unlock(q->queue_lock);2739 }2740
···2662 return !(rqa->q <= rqb->q);2663}26642665+/*2666+ * If 'from_schedule' is true, then postpone the dispatch of requests2667+ * until a safe kblockd context. We due this to avoid accidental big2668+ * additional stack usage in driver dispatch, in places where the originally2669+ * plugger did not intend it.2670+ */2671static void queue_unplugged(struct request_queue *q, unsigned int depth,2672+ bool from_schedule)2673{2674+ trace_block_unplug(q, depth, !from_schedule);2675+ __blk_run_queue(q, from_schedule);26762677 if (q->unplugged_fn)2678 q->unplugged_fn(q);2679}26802681+void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)2682{2683 struct request_queue *q;2684 unsigned long flags;···2707 BUG_ON(!rq->q);2708 if (rq->q != q) {2709 if (q) {2710+ queue_unplugged(q, depth, from_schedule);2711 spin_unlock(q->queue_lock);2712 }2713 q = rq->q;···2728 }27292730 if (q) {2731+ queue_unplugged(q, depth, from_schedule);2732 spin_unlock(q->queue_lock);2733 }2734
···401402DECLARE_EVENT_CLASS(block_unplug,403404- TP_PROTO(struct request_queue *q, unsigned int depth),405406- TP_ARGS(q, depth),407408 TP_STRUCT__entry(409 __field( int, nr_rq )···419);420421/**422- * block_unplug_io - release of operations requests in request queue423 * @q: request queue to unplug424 * @depth: number of requests just added to the queue0425 *426 * Unplug request queue @q because device driver is scheduled to work427 * on elements in the request queue.428 */429-DEFINE_EVENT(block_unplug, block_unplug_io,430431- TP_PROTO(struct request_queue *q, unsigned int depth),432433- TP_ARGS(q, depth)434);435436/**
···401402DECLARE_EVENT_CLASS(block_unplug,403404+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),405406+ TP_ARGS(q, depth, explicit),407408 TP_STRUCT__entry(409 __field( int, nr_rq )···419);420421/**422+ * block_unplug - release of operations requests in request queue423 * @q: request queue to unplug424 * @depth: number of requests just added to the queue425+ * @explicit: whether this was an explicit unplug, or one from schedule()426 *427 * Unplug request queue @q because device driver is scheduled to work428 * on elements in the request queue.429 */430+DEFINE_EVENT(block_unplug, block_unplug,431432+ TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),433434+ TP_ARGS(q, depth, explicit)435);436437/**
+1-1
kernel/sched.c
···4118 */4119 if (blk_needs_flush_plug(prev)) {4120 raw_spin_unlock(&rq->lock);4121- blk_flush_plug(prev);4122 raw_spin_lock(&rq->lock);4123 }4124 }
···4118 */4119 if (blk_needs_flush_plug(prev)) {4120 raw_spin_unlock(&rq->lock);4121+ blk_schedule_flush_plug(prev);4122 raw_spin_lock(&rq->lock);4123 }4124 }