Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: [BLOCK] Don't allow empty barriers to be passed down to queues that don't grok them dm: bounce_pfn limit added Deadline iosched: Fix batching fairness Deadline iosched: Reset batch for ordered requests Deadline iosched: Factor out finding latter reques
This commit is contained in:
commit
b4f555081f
4 changed files with 40 additions and 28 deletions
|
@ -55,6 +55,20 @@ static void deadline_move_request(struct deadline_data *, struct request *);
|
||||||
|
|
||||||
#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
|
#define RQ_RB_ROOT(dd, rq) (&(dd)->sort_list[rq_data_dir((rq))])
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get the request after `rq' in sector-sorted order
|
||||||
|
*/
|
||||||
|
static inline struct request *
|
||||||
|
deadline_latter_request(struct request *rq)
|
||||||
|
{
|
||||||
|
struct rb_node *node = rb_next(&rq->rb_node);
|
||||||
|
|
||||||
|
if (node)
|
||||||
|
return rb_entry_rq(node);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
|
deadline_add_rq_rb(struct deadline_data *dd, struct request *rq)
|
||||||
{
|
{
|
||||||
|
@ -74,13 +88,8 @@ deadline_del_rq_rb(struct deadline_data *dd, struct request *rq)
|
||||||
{
|
{
|
||||||
const int data_dir = rq_data_dir(rq);
|
const int data_dir = rq_data_dir(rq);
|
||||||
|
|
||||||
if (dd->next_rq[data_dir] == rq) {
|
if (dd->next_rq[data_dir] == rq)
|
||||||
struct rb_node *rbnext = rb_next(&rq->rb_node);
|
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
||||||
|
|
||||||
dd->next_rq[data_dir] = NULL;
|
|
||||||
if (rbnext)
|
|
||||||
dd->next_rq[data_dir] = rb_entry_rq(rbnext);
|
|
||||||
}
|
|
||||||
|
|
||||||
elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
|
elv_rb_del(RQ_RB_ROOT(dd, rq), rq);
|
||||||
}
|
}
|
||||||
|
@ -198,13 +207,10 @@ static void
|
||||||
deadline_move_request(struct deadline_data *dd, struct request *rq)
|
deadline_move_request(struct deadline_data *dd, struct request *rq)
|
||||||
{
|
{
|
||||||
const int data_dir = rq_data_dir(rq);
|
const int data_dir = rq_data_dir(rq);
|
||||||
struct rb_node *rbnext = rb_next(&rq->rb_node);
|
|
||||||
|
|
||||||
dd->next_rq[READ] = NULL;
|
dd->next_rq[READ] = NULL;
|
||||||
dd->next_rq[WRITE] = NULL;
|
dd->next_rq[WRITE] = NULL;
|
||||||
|
dd->next_rq[data_dir] = deadline_latter_request(rq);
|
||||||
if (rbnext)
|
|
||||||
dd->next_rq[data_dir] = rb_entry_rq(rbnext);
|
|
||||||
|
|
||||||
dd->last_sector = rq->sector + rq->nr_sectors;
|
dd->last_sector = rq->sector + rq->nr_sectors;
|
||||||
|
|
||||||
|
@ -301,30 +307,23 @@ static int deadline_dispatch_requests(struct request_queue *q, int force)
|
||||||
/*
|
/*
|
||||||
* we are not running a batch, find best request for selected data_dir
|
* we are not running a batch, find best request for selected data_dir
|
||||||
*/
|
*/
|
||||||
if (deadline_check_fifo(dd, data_dir)) {
|
if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) {
|
||||||
/* An expired request exists - satisfy it */
|
/*
|
||||||
dd->batching = 0;
|
* A deadline has expired, the last request was in the other
|
||||||
|
* direction, or we have run out of higher-sectored requests.
|
||||||
|
* Start again from the request with the earliest expiry time.
|
||||||
|
*/
|
||||||
rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
|
rq = rq_entry_fifo(dd->fifo_list[data_dir].next);
|
||||||
|
} else {
|
||||||
} else if (dd->next_rq[data_dir]) {
|
|
||||||
/*
|
/*
|
||||||
* The last req was the same dir and we have a next request in
|
* The last req was the same dir and we have a next request in
|
||||||
* sort order. No expired requests so continue on from here.
|
* sort order. No expired requests so continue on from here.
|
||||||
*/
|
*/
|
||||||
rq = dd->next_rq[data_dir];
|
rq = dd->next_rq[data_dir];
|
||||||
} else {
|
|
||||||
struct rb_node *node;
|
|
||||||
/*
|
|
||||||
* The last req was the other direction or we have run out of
|
|
||||||
* higher-sectored requests. Go back to the lowest sectored
|
|
||||||
* request (1 way elevator) and start a new batch.
|
|
||||||
*/
|
|
||||||
dd->batching = 0;
|
|
||||||
node = rb_first(&dd->sort_list[data_dir]);
|
|
||||||
if (node)
|
|
||||||
rq = rb_entry_rq(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dd->batching = 0;
|
||||||
|
|
||||||
dispatch_request:
|
dispatch_request:
|
||||||
/*
|
/*
|
||||||
* rq is the selected appropriate request.
|
* rq is the selected appropriate request.
|
||||||
|
|
|
@ -3221,6 +3221,7 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
sector_t old_sector;
|
sector_t old_sector;
|
||||||
int ret, nr_sectors = bio_sectors(bio);
|
int ret, nr_sectors = bio_sectors(bio);
|
||||||
dev_t old_dev;
|
dev_t old_dev;
|
||||||
|
int err = -EIO;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
|
@ -3248,7 +3249,7 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
bdevname(bio->bi_bdev, b),
|
bdevname(bio->bi_bdev, b),
|
||||||
(long long) bio->bi_sector);
|
(long long) bio->bi_sector);
|
||||||
end_io:
|
end_io:
|
||||||
bio_endio(bio, -EIO);
|
bio_endio(bio, err);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3283,6 +3284,10 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
|
|
||||||
if (bio_check_eod(bio, nr_sectors))
|
if (bio_check_eod(bio, nr_sectors))
|
||||||
goto end_io;
|
goto end_io;
|
||||||
|
if (bio_empty_barrier(bio) && !q->prepare_flush_fn) {
|
||||||
|
err = -EOPNOTSUPP;
|
||||||
|
goto end_io;
|
||||||
|
}
|
||||||
|
|
||||||
ret = q->make_request_fn(q, bio);
|
ret = q->make_request_fn(q, bio);
|
||||||
} while (ret);
|
} while (ret);
|
||||||
|
|
|
@ -102,6 +102,8 @@ static void combine_restrictions_low(struct io_restrictions *lhs,
|
||||||
lhs->seg_boundary_mask =
|
lhs->seg_boundary_mask =
|
||||||
min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
|
min_not_zero(lhs->seg_boundary_mask, rhs->seg_boundary_mask);
|
||||||
|
|
||||||
|
lhs->bounce_pfn = min_not_zero(lhs->bounce_pfn, rhs->bounce_pfn);
|
||||||
|
|
||||||
lhs->no_cluster |= rhs->no_cluster;
|
lhs->no_cluster |= rhs->no_cluster;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -566,6 +568,8 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev)
|
||||||
min_not_zero(rs->seg_boundary_mask,
|
min_not_zero(rs->seg_boundary_mask,
|
||||||
q->seg_boundary_mask);
|
q->seg_boundary_mask);
|
||||||
|
|
||||||
|
rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn);
|
||||||
|
|
||||||
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(dm_set_device_limits);
|
EXPORT_SYMBOL_GPL(dm_set_device_limits);
|
||||||
|
@ -707,6 +711,8 @@ static void check_for_valid_limits(struct io_restrictions *rs)
|
||||||
rs->max_segment_size = MAX_SEGMENT_SIZE;
|
rs->max_segment_size = MAX_SEGMENT_SIZE;
|
||||||
if (!rs->seg_boundary_mask)
|
if (!rs->seg_boundary_mask)
|
||||||
rs->seg_boundary_mask = -1;
|
rs->seg_boundary_mask = -1;
|
||||||
|
if (!rs->bounce_pfn)
|
||||||
|
rs->bounce_pfn = -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
int dm_table_add_target(struct dm_table *t, const char *type,
|
int dm_table_add_target(struct dm_table *t, const char *type,
|
||||||
|
@ -891,6 +897,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
|
||||||
q->hardsect_size = t->limits.hardsect_size;
|
q->hardsect_size = t->limits.hardsect_size;
|
||||||
q->max_segment_size = t->limits.max_segment_size;
|
q->max_segment_size = t->limits.max_segment_size;
|
||||||
q->seg_boundary_mask = t->limits.seg_boundary_mask;
|
q->seg_boundary_mask = t->limits.seg_boundary_mask;
|
||||||
|
q->bounce_pfn = t->limits.bounce_pfn;
|
||||||
if (t->limits.no_cluster)
|
if (t->limits.no_cluster)
|
||||||
q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
|
q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
|
||||||
else
|
else
|
||||||
|
|
|
@ -116,6 +116,7 @@ struct io_restrictions {
|
||||||
unsigned short hardsect_size;
|
unsigned short hardsect_size;
|
||||||
unsigned int max_segment_size;
|
unsigned int max_segment_size;
|
||||||
unsigned long seg_boundary_mask;
|
unsigned long seg_boundary_mask;
|
||||||
|
unsigned long bounce_pfn;
|
||||||
unsigned char no_cluster; /* inverted so that 0 is default */
|
unsigned char no_cluster; /* inverted so that 0 is default */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue