[BLOCK] ll_rw_blk: fastpath get_request()
Originally from: Nick Piggin <nickpiggin@yahoo.com.au> Move current_io_context out of the get_request fastpth. Also try to streamline a few other things in this area. Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
parent
ef9be1d336
commit
88ee5ef157
1 changed files with 37 additions and 33 deletions
|
@ -1908,40 +1908,40 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
|
||||||
{
|
{
|
||||||
struct request *rq = NULL;
|
struct request *rq = NULL;
|
||||||
struct request_list *rl = &q->rq;
|
struct request_list *rl = &q->rq;
|
||||||
struct io_context *ioc = current_io_context(GFP_ATOMIC);
|
struct io_context *ioc = NULL;
|
||||||
int priv;
|
int may_queue, priv;
|
||||||
|
|
||||||
if (rl->count[rw]+1 >= q->nr_requests) {
|
may_queue = elv_may_queue(q, rw, bio);
|
||||||
/*
|
if (may_queue == ELV_MQUEUE_NO)
|
||||||
* The queue will fill after this allocation, so set it as
|
goto rq_starved;
|
||||||
* full, and mark this process as "batching". This process
|
|
||||||
* will be allowed to complete a batch of requests, others
|
if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) {
|
||||||
* will be blocked.
|
if (rl->count[rw]+1 >= q->nr_requests) {
|
||||||
*/
|
ioc = current_io_context(GFP_ATOMIC);
|
||||||
if (!blk_queue_full(q, rw)) {
|
/*
|
||||||
ioc_set_batching(q, ioc);
|
* The queue will fill after this allocation, so set
|
||||||
blk_set_queue_full(q, rw);
|
* it as full, and mark this process as "batching".
|
||||||
|
* This process will be allowed to complete a batch of
|
||||||
|
* requests, others will be blocked.
|
||||||
|
*/
|
||||||
|
if (!blk_queue_full(q, rw)) {
|
||||||
|
ioc_set_batching(q, ioc);
|
||||||
|
blk_set_queue_full(q, rw);
|
||||||
|
} else {
|
||||||
|
if (may_queue != ELV_MQUEUE_MUST
|
||||||
|
&& !ioc_batching(q, ioc)) {
|
||||||
|
/*
|
||||||
|
* The queue is full and the allocating
|
||||||
|
* process is not a "batcher", and not
|
||||||
|
* exempted by the IO scheduler
|
||||||
|
*/
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
set_queue_congested(q, rw);
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (elv_may_queue(q, rw, bio)) {
|
|
||||||
case ELV_MQUEUE_NO:
|
|
||||||
goto rq_starved;
|
|
||||||
case ELV_MQUEUE_MAY:
|
|
||||||
break;
|
|
||||||
case ELV_MQUEUE_MUST:
|
|
||||||
goto get_rq;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
|
|
||||||
/*
|
|
||||||
* The queue is full and the allocating process is not a
|
|
||||||
* "batcher", and not exempted by the IO scheduler
|
|
||||||
*/
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
get_rq:
|
|
||||||
/*
|
/*
|
||||||
* Only allow batching queuers to allocate up to 50% over the defined
|
* Only allow batching queuers to allocate up to 50% over the defined
|
||||||
* limit of requests, otherwise we could have thousands of requests
|
* limit of requests, otherwise we could have thousands of requests
|
||||||
|
@ -1952,8 +1952,6 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
|
||||||
|
|
||||||
rl->count[rw]++;
|
rl->count[rw]++;
|
||||||
rl->starved[rw] = 0;
|
rl->starved[rw] = 0;
|
||||||
if (rl->count[rw] >= queue_congestion_on_threshold(q))
|
|
||||||
set_queue_congested(q, rw);
|
|
||||||
|
|
||||||
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
|
||||||
if (priv)
|
if (priv)
|
||||||
|
@ -1962,7 +1960,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
||||||
rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
|
rq = blk_alloc_request(q, rw, bio, priv, gfp_mask);
|
||||||
if (!rq) {
|
if (unlikely(!rq)) {
|
||||||
/*
|
/*
|
||||||
* Allocation failed presumably due to memory. Undo anything
|
* Allocation failed presumably due to memory. Undo anything
|
||||||
* we might have messed up.
|
* we might have messed up.
|
||||||
|
@ -1987,6 +1985,12 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* ioc may be NULL here, and ioc_batching will be false. That's
|
||||||
|
* OK, if the queue is under the request limit then requests need
|
||||||
|
* not count toward the nr_batch_requests limit. There will always
|
||||||
|
* be some limit enforced by BLK_BATCH_TIME.
|
||||||
|
*/
|
||||||
if (ioc_batching(q, ioc))
|
if (ioc_batching(q, ioc))
|
||||||
ioc->nr_batch_requests--;
|
ioc->nr_batch_requests--;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue