Merge branch 'for-3.16/blk-mq-tagging' into for-3.16/core
This commit is contained in:
commit
acb12e0a9c
2 changed files with 41 additions and 25 deletions
|
@ -44,7 +44,7 @@ static int __bt_get_word(struct blk_mq_bitmap *bm, unsigned int last_tag)
|
|||
{
|
||||
int tag, org_last_tag, end;
|
||||
|
||||
org_last_tag = last_tag = TAG_TO_BIT(last_tag);
|
||||
org_last_tag = last_tag;
|
||||
end = bm->depth;
|
||||
do {
|
||||
restart:
|
||||
|
@ -84,12 +84,12 @@ static int __bt_get(struct blk_mq_bitmap_tags *bt, unsigned int *tag_cache)
|
|||
int index, i, tag;
|
||||
|
||||
last_tag = org_last_tag = *tag_cache;
|
||||
index = TAG_TO_INDEX(last_tag);
|
||||
index = TAG_TO_INDEX(bt, last_tag);
|
||||
|
||||
for (i = 0; i < bt->map_nr; i++) {
|
||||
tag = __bt_get_word(&bt->map[index], last_tag);
|
||||
tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag));
|
||||
if (tag != -1) {
|
||||
tag += index * BITS_PER_LONG;
|
||||
tag += (index << bt->bits_per_word);
|
||||
goto done;
|
||||
}
|
||||
|
||||
|
@ -233,14 +233,17 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
|
|||
|
||||
static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
|
||||
{
|
||||
const int index = TAG_TO_INDEX(tag);
|
||||
const int index = TAG_TO_INDEX(bt, tag);
|
||||
struct bt_wait_state *bs;
|
||||
|
||||
clear_bit(TAG_TO_BIT(tag), &bt->map[index].word);
|
||||
/*
|
||||
* The unlock memory barrier need to order access to req in free
|
||||
* path and clearing tag bit
|
||||
*/
|
||||
clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
|
||||
|
||||
bs = bt_wake_ptr(bt);
|
||||
if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
|
||||
smp_mb__after_clear_bit();
|
||||
atomic_set(&bs->wait_cnt, bt->wake_cnt);
|
||||
bt_index_inc(&bt->wake_index);
|
||||
wake_up(&bs->wait);
|
||||
|
@ -292,7 +295,7 @@ static void bt_for_each_free(struct blk_mq_bitmap_tags *bt,
|
|||
bit++;
|
||||
} while (1);
|
||||
|
||||
off += BITS_PER_LONG;
|
||||
off += (1 << bt->bits_per_word);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -333,14 +336,31 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
|
|||
{
|
||||
int i;
|
||||
|
||||
bt->bits_per_word = ilog2(BITS_PER_LONG);
|
||||
|
||||
/*
|
||||
* Depth can be zero for reserved tags, that's not a failure
|
||||
* condition.
|
||||
*/
|
||||
if (depth) {
|
||||
int nr, i, map_depth;
|
||||
unsigned int nr, i, map_depth, tags_per_word;
|
||||
|
||||
nr = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
|
||||
tags_per_word = (1 << bt->bits_per_word);
|
||||
|
||||
/*
|
||||
* If the tag space is small, shrink the number of tags
|
||||
* per word so we spread over a few cachelines, at least.
|
||||
* If less than 4 tags, just forget about it, it's not
|
||||
* going to work optimally anyway.
|
||||
*/
|
||||
if (depth >= 4) {
|
||||
while (tags_per_word * 4 > depth) {
|
||||
bt->bits_per_word--;
|
||||
tags_per_word = (1 << bt->bits_per_word);
|
||||
}
|
||||
}
|
||||
|
||||
nr = ALIGN(depth, tags_per_word) / tags_per_word;
|
||||
bt->map = kzalloc_node(nr * sizeof(struct blk_mq_bitmap),
|
||||
GFP_KERNEL, node);
|
||||
if (!bt->map)
|
||||
|
@ -349,8 +369,8 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
|
|||
bt->map_nr = nr;
|
||||
map_depth = depth;
|
||||
for (i = 0; i < nr; i++) {
|
||||
bt->map[i].depth = min(map_depth, BITS_PER_LONG);
|
||||
map_depth -= BITS_PER_LONG;
|
||||
bt->map[i].depth = min(map_depth, tags_per_word);
|
||||
map_depth -= tags_per_word;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -397,7 +417,6 @@ static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
|
|||
struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
unsigned int reserved_tags, int node)
|
||||
{
|
||||
unsigned int nr_tags, nr_cache;
|
||||
struct blk_mq_tags *tags;
|
||||
|
||||
if (total_tags > BLK_MQ_TAG_MAX) {
|
||||
|
@ -409,9 +428,6 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
|||
if (!tags)
|
||||
return NULL;
|
||||
|
||||
nr_tags = total_tags - reserved_tags;
|
||||
nr_cache = nr_tags / num_online_cpus();
|
||||
|
||||
tags->nr_tags = total_tags;
|
||||
tags->nr_reserved_tags = reserved_tags;
|
||||
|
||||
|
@ -429,10 +445,7 @@ void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag)
|
|||
{
|
||||
unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
|
||||
|
||||
if (depth > 1)
|
||||
*tag = prandom_u32() % (depth - 1);
|
||||
else
|
||||
*tag = 0;
|
||||
*tag = prandom_u32() % depth;
|
||||
}
|
||||
|
||||
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
|
||||
|
@ -443,8 +456,10 @@ ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
|
|||
if (!tags)
|
||||
return 0;
|
||||
|
||||
page += sprintf(page, "nr_tags=%u, reserved_tags=%u\n",
|
||||
tags->nr_tags, tags->nr_reserved_tags);
|
||||
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
|
||||
"bits_per_word=%u\n",
|
||||
tags->nr_tags, tags->nr_reserved_tags,
|
||||
tags->bitmap_tags.bits_per_word);
|
||||
|
||||
free = bt_unused_tags(&tags->bitmap_tags);
|
||||
res = bt_unused_tags(&tags->breserved_tags);
|
||||
|
|
|
@ -11,8 +11,8 @@ struct bt_wait_state {
|
|||
wait_queue_head_t wait;
|
||||
} ____cacheline_aligned_in_smp;
|
||||
|
||||
#define TAG_TO_INDEX(tag) ((tag) / BITS_PER_LONG)
|
||||
#define TAG_TO_BIT(tag) ((tag) & (BITS_PER_LONG - 1))
|
||||
#define TAG_TO_INDEX(bt, tag) ((tag) >> (bt)->bits_per_word)
|
||||
#define TAG_TO_BIT(bt, tag) ((tag) & ((1 << (bt)->bits_per_word) - 1))
|
||||
|
||||
struct blk_mq_bitmap {
|
||||
unsigned long word;
|
||||
|
@ -22,9 +22,10 @@ struct blk_mq_bitmap {
|
|||
struct blk_mq_bitmap_tags {
|
||||
unsigned int depth;
|
||||
unsigned int wake_cnt;
|
||||
unsigned int bits_per_word;
|
||||
|
||||
struct blk_mq_bitmap *map;
|
||||
unsigned int map_nr;
|
||||
struct blk_mq_bitmap *map;
|
||||
|
||||
unsigned int wake_index;
|
||||
struct bt_wait_state *bs;
|
||||
|
|
Loading…
Reference in a new issue