block: make unplug timer trace event correspond to the schedule() unplug
It's a pretty close match to what we had before - the timer triggering would mean that nobody unplugged the plug in due time, in the new scheme this matches very closely what the schedule() unplug now is. It's essentially the difference between an explicit unplug (IO unplug) or an implicit unplug (timer unplug, we scheduled with pending IO queued). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
This commit is contained in:
parent
a237c1c5bc
commit
49cac01e1f
3 changed files with 31 additions and 18 deletions
|
@ -2662,17 +2662,23 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
|
|||
return !(rqa->q <= rqb->q);
|
||||
}
|
||||
|
||||
/*
|
||||
* If 'from_schedule' is true, then postpone the dispatch of requests
|
||||
* until a safe kblockd context. We due this to avoid accidental big
|
||||
* additional stack usage in driver dispatch, in places where the originally
|
||||
* plugger did not intend it.
|
||||
*/
|
||||
static void queue_unplugged(struct request_queue *q, unsigned int depth,
|
||||
bool force_kblockd)
|
||||
bool from_schedule)
|
||||
{
|
||||
trace_block_unplug_io(q, depth);
|
||||
__blk_run_queue(q, force_kblockd);
|
||||
trace_block_unplug(q, depth, !from_schedule);
|
||||
__blk_run_queue(q, from_schedule);
|
||||
|
||||
if (q->unplugged_fn)
|
||||
q->unplugged_fn(q);
|
||||
}
|
||||
|
||||
void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
|
||||
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
struct request_queue *q;
|
||||
unsigned long flags;
|
||||
|
@ -2707,7 +2713,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
|
|||
BUG_ON(!rq->q);
|
||||
if (rq->q != q) {
|
||||
if (q) {
|
||||
queue_unplugged(q, depth, force_kblockd);
|
||||
queue_unplugged(q, depth, from_schedule);
|
||||
spin_unlock(q->queue_lock);
|
||||
}
|
||||
q = rq->q;
|
||||
|
@ -2728,7 +2734,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool force_kblockd)
|
|||
}
|
||||
|
||||
if (q) {
|
||||
queue_unplugged(q, depth, force_kblockd);
|
||||
queue_unplugged(q, depth, from_schedule);
|
||||
spin_unlock(q->queue_lock);
|
||||
}
|
||||
|
||||
|
|
|
@ -401,9 +401,9 @@ TRACE_EVENT(block_plug,
|
|||
|
||||
DECLARE_EVENT_CLASS(block_unplug,
|
||||
|
||||
TP_PROTO(struct request_queue *q, unsigned int depth),
|
||||
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
|
||||
|
||||
TP_ARGS(q, depth),
|
||||
TP_ARGS(q, depth, explicit),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( int, nr_rq )
|
||||
|
@ -419,18 +419,19 @@ DECLARE_EVENT_CLASS(block_unplug,
|
|||
);
|
||||
|
||||
/**
|
||||
* block_unplug_io - release of operations requests in request queue
|
||||
* block_unplug - release of operations requests in request queue
|
||||
* @q: request queue to unplug
|
||||
* @depth: number of requests just added to the queue
|
||||
* @explicit: whether this was an explicit unplug, or one from schedule()
|
||||
*
|
||||
* Unplug request queue @q because device driver is scheduled to work
|
||||
* on elements in the request queue.
|
||||
*/
|
||||
DEFINE_EVENT(block_unplug, block_unplug_io,
|
||||
DEFINE_EVENT(block_unplug, block_unplug,
|
||||
|
||||
TP_PROTO(struct request_queue *q, unsigned int depth),
|
||||
TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
|
||||
|
||||
TP_ARGS(q, depth)
|
||||
TP_ARGS(q, depth, explicit)
|
||||
);
|
||||
|
||||
/**
|
||||
|
|
|
@ -850,16 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
|
|||
__blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
|
||||
}
|
||||
|
||||
static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q,
|
||||
unsigned int depth)
|
||||
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
|
||||
unsigned int depth, bool explicit)
|
||||
{
|
||||
struct blk_trace *bt = q->blk_trace;
|
||||
|
||||
if (bt) {
|
||||
__be64 rpdu = cpu_to_be64(depth);
|
||||
u32 what;
|
||||
|
||||
__blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
|
||||
sizeof(rpdu), &rpdu);
|
||||
if (explicit)
|
||||
what = BLK_TA_UNPLUG_IO;
|
||||
else
|
||||
what = BLK_TA_UNPLUG_TIMER;
|
||||
|
||||
__blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1002,7 +1007,7 @@ static void blk_register_tracepoints(void)
|
|||
WARN_ON(ret);
|
||||
ret = register_trace_block_plug(blk_add_trace_plug, NULL);
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
|
||||
ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
|
||||
WARN_ON(ret);
|
||||
ret = register_trace_block_split(blk_add_trace_split, NULL);
|
||||
WARN_ON(ret);
|
||||
|
@ -1017,7 +1022,7 @@ static void blk_unregister_tracepoints(void)
|
|||
unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
|
||||
unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
|
||||
unregister_trace_block_split(blk_add_trace_split, NULL);
|
||||
unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
|
||||
unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
|
||||
unregister_trace_block_plug(blk_add_trace_plug, NULL);
|
||||
unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
|
||||
unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
|
||||
|
@ -1332,6 +1337,7 @@ static const struct {
|
|||
[__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
|
||||
[__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
|
||||
[__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
|
||||
[__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
|
||||
[__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
|
||||
[__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
|
||||
[__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
|
||||
|
|
Loading…
Reference in a new issue