[PATCH] block: implement elv_insert and use it (fix ordcolor flipping bug)
q->ordcolor must only be flipped on initial queueing of a hardbarrier request. Constructing ordered sequence and requeueing used to pass through __elv_add_request() which flips q->ordcolor when it sees a barrier request. This patch separates out elv_insert() from __elv_add_request() and uses elv_insert() when constructing ordered sequence and requeueing. elv_insert() inserts the given request at the specified position and does nothing else. Signed-off-by: Tejun Heo <htejun@gmail.com> Acked-by: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
e5ea0a9fca
commit
30e9656cc3
3 changed files with 41 additions and 34 deletions
|
@ -293,7 +293,7 @@ void elv_requeue_request(request_queue_t *q, struct request *rq)
|
||||||
|
|
||||||
rq->flags &= ~REQ_STARTED;
|
rq->flags &= ~REQ_STARTED;
|
||||||
|
|
||||||
__elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE, 0);
|
elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void elv_drain_elevator(request_queue_t *q)
|
static void elv_drain_elevator(request_queue_t *q)
|
||||||
|
@ -310,41 +310,11 @@ static void elv_drain_elevator(request_queue_t *q)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
|
void elv_insert(request_queue_t *q, struct request *rq, int where)
|
||||||
int plug)
|
|
||||||
{
|
{
|
||||||
struct list_head *pos;
|
struct list_head *pos;
|
||||||
unsigned ordseq;
|
unsigned ordseq;
|
||||||
|
|
||||||
if (q->ordcolor)
|
|
||||||
rq->flags |= REQ_ORDERED_COLOR;
|
|
||||||
|
|
||||||
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
|
|
||||||
/*
|
|
||||||
* toggle ordered color
|
|
||||||
*/
|
|
||||||
if (blk_barrier_rq(rq))
|
|
||||||
q->ordcolor ^= 1;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* barriers implicitly indicate back insertion
|
|
||||||
*/
|
|
||||||
if (where == ELEVATOR_INSERT_SORT)
|
|
||||||
where = ELEVATOR_INSERT_BACK;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* this request is scheduling boundary, update end_sector
|
|
||||||
*/
|
|
||||||
if (blk_fs_request(rq)) {
|
|
||||||
q->end_sector = rq_end_sector(rq);
|
|
||||||
q->boundary_rq = rq;
|
|
||||||
}
|
|
||||||
} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
|
|
||||||
where = ELEVATOR_INSERT_BACK;
|
|
||||||
|
|
||||||
if (plug)
|
|
||||||
blk_plug_device(q);
|
|
||||||
|
|
||||||
rq->q = q;
|
rq->q = q;
|
||||||
|
|
||||||
switch (where) {
|
switch (where) {
|
||||||
|
@ -425,6 +395,42 @@ void __elv_add_request(request_queue_t *q, struct request *rq, int where,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __elv_add_request(request_queue_t *q, struct request *rq, int where,
|
||||||
|
int plug)
|
||||||
|
{
|
||||||
|
if (q->ordcolor)
|
||||||
|
rq->flags |= REQ_ORDERED_COLOR;
|
||||||
|
|
||||||
|
if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
|
||||||
|
/*
|
||||||
|
* toggle ordered color
|
||||||
|
*/
|
||||||
|
if (blk_barrier_rq(rq))
|
||||||
|
q->ordcolor ^= 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* barriers implicitly indicate back insertion
|
||||||
|
*/
|
||||||
|
if (where == ELEVATOR_INSERT_SORT)
|
||||||
|
where = ELEVATOR_INSERT_BACK;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this request is scheduling boundary, update
|
||||||
|
* end_sector
|
||||||
|
*/
|
||||||
|
if (blk_fs_request(rq)) {
|
||||||
|
q->end_sector = rq_end_sector(rq);
|
||||||
|
q->boundary_rq = rq;
|
||||||
|
}
|
||||||
|
} else if (!(rq->flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT)
|
||||||
|
where = ELEVATOR_INSERT_BACK;
|
||||||
|
|
||||||
|
if (plug)
|
||||||
|
blk_plug_device(q);
|
||||||
|
|
||||||
|
elv_insert(q, rq, where);
|
||||||
|
}
|
||||||
|
|
||||||
void elv_add_request(request_queue_t *q, struct request *rq, int where,
|
void elv_add_request(request_queue_t *q, struct request *rq, int where,
|
||||||
int plug)
|
int plug)
|
||||||
{
|
{
|
||||||
|
|
|
@ -454,7 +454,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
|
||||||
rq->end_io = end_io;
|
rq->end_io = end_io;
|
||||||
q->prepare_flush_fn(q, rq);
|
q->prepare_flush_fn(q, rq);
|
||||||
|
|
||||||
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct request *start_ordered(request_queue_t *q,
|
static inline struct request *start_ordered(request_queue_t *q,
|
||||||
|
@ -490,7 +490,7 @@ static inline struct request *start_ordered(request_queue_t *q,
|
||||||
else
|
else
|
||||||
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
|
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
|
||||||
|
|
||||||
__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
|
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
||||||
|
|
||||||
if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
|
if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
|
||||||
queue_flush(q, QUEUE_ORDERED_PREFLUSH);
|
queue_flush(q, QUEUE_ORDERED_PREFLUSH);
|
||||||
|
|
|
@ -82,6 +82,7 @@ struct elevator_queue
|
||||||
extern void elv_dispatch_sort(request_queue_t *, struct request *);
|
extern void elv_dispatch_sort(request_queue_t *, struct request *);
|
||||||
extern void elv_add_request(request_queue_t *, struct request *, int, int);
|
extern void elv_add_request(request_queue_t *, struct request *, int, int);
|
||||||
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
|
extern void __elv_add_request(request_queue_t *, struct request *, int, int);
|
||||||
|
extern void elv_insert(request_queue_t *, struct request *, int);
|
||||||
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
|
extern int elv_merge(request_queue_t *, struct request **, struct bio *);
|
||||||
extern void elv_merge_requests(request_queue_t *, struct request *,
|
extern void elv_merge_requests(request_queue_t *, struct request *,
|
||||||
struct request *);
|
struct request *);
|
||||||
|
|
Loading…
Reference in a new issue