bcache: Convert btree_iter to struct btree_keys
More work to disentangle bset.c from struct btree Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
f67342dd34
commit
c052dd9a26
6 changed files with 41 additions and 38 deletions
|
@ -764,7 +764,7 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t,
|
||||||
return (struct bset_search_iter) {l, r};
|
return (struct bset_search_iter) {l, r};
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
||||||
const struct bkey *search)
|
const struct bkey *search)
|
||||||
{
|
{
|
||||||
struct bset_search_iter i;
|
struct bset_search_iter i;
|
||||||
|
@ -787,7 +787,7 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
||||||
if (unlikely(!t->size)) {
|
if (unlikely(!t->size)) {
|
||||||
i.l = t->data->start;
|
i.l = t->data->start;
|
||||||
i.r = bset_bkey_last(t->data);
|
i.r = bset_bkey_last(t->data);
|
||||||
} else if (bset_written(&b->keys, t)) {
|
} else if (bset_written(b, t)) {
|
||||||
/*
|
/*
|
||||||
* Each node in the auxiliary search tree covers a certain range
|
* Each node in the auxiliary search tree covers a certain range
|
||||||
* of bits, and keys above and below the set it covers might
|
* of bits, and keys above and below the set it covers might
|
||||||
|
@ -803,14 +803,14 @@ struct bkey *__bch_bset_search(struct btree *b, struct bset_tree *t,
|
||||||
|
|
||||||
i = bset_search_tree(t, search);
|
i = bset_search_tree(t, search);
|
||||||
} else {
|
} else {
|
||||||
BUG_ON(!b->keys.nsets &&
|
BUG_ON(!b->nsets &&
|
||||||
t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
|
t->size < bkey_to_cacheline(t, bset_bkey_last(t->data)));
|
||||||
|
|
||||||
i = bset_search_write_set(t, search);
|
i = bset_search_write_set(t, search);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (expensive_debug_checks(b->c)) {
|
if (btree_keys_expensive_checks(b)) {
|
||||||
BUG_ON(bset_written(&b->keys, t) &&
|
BUG_ON(bset_written(b, t) &&
|
||||||
i.l != t->data->start &&
|
i.l != t->data->start &&
|
||||||
bkey_cmp(tree_to_prev_bkey(t,
|
bkey_cmp(tree_to_prev_bkey(t,
|
||||||
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
|
inorder_to_tree(bkey_to_cacheline(t, i.l), t)),
|
||||||
|
@ -853,7 +853,7 @@ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
||||||
btree_iter_cmp));
|
btree_iter_cmp));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bkey *__bch_btree_iter_init(struct btree *b,
|
static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
|
||||||
struct btree_iter *iter,
|
struct btree_iter *iter,
|
||||||
struct bkey *search,
|
struct bkey *search,
|
||||||
struct bset_tree *start)
|
struct bset_tree *start)
|
||||||
|
@ -866,7 +866,7 @@ static struct bkey *__bch_btree_iter_init(struct btree *b,
|
||||||
iter->b = b;
|
iter->b = b;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (; start <= bset_tree_last(&b->keys); start++) {
|
for (; start <= bset_tree_last(b); start++) {
|
||||||
ret = bch_bset_search(b, start, search);
|
ret = bch_bset_search(b, start, search);
|
||||||
bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
|
bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
|
||||||
}
|
}
|
||||||
|
@ -874,11 +874,11 @@ static struct bkey *__bch_btree_iter_init(struct btree *b,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bkey *bch_btree_iter_init(struct btree *b,
|
struct bkey *bch_btree_iter_init(struct btree_keys *b,
|
||||||
struct btree_iter *iter,
|
struct btree_iter *iter,
|
||||||
struct bkey *search)
|
struct bkey *search)
|
||||||
{
|
{
|
||||||
return __bch_btree_iter_init(b, iter, search, b->keys.set);
|
return __bch_btree_iter_init(b, iter, search, b->set);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(bch_btree_iter_init);
|
EXPORT_SYMBOL(bch_btree_iter_init);
|
||||||
|
|
||||||
|
@ -1047,7 +1047,7 @@ void bch_btree_sort_partial(struct btree *b, unsigned start,
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
int oldsize = bch_count_data(b);
|
int oldsize = bch_count_data(b);
|
||||||
|
|
||||||
__bch_btree_iter_init(b, &iter, NULL, &b->keys.set[start]);
|
__bch_btree_iter_init(&b->keys, &iter, NULL, &b->keys.set[start]);
|
||||||
|
|
||||||
if (start) {
|
if (start) {
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
@ -1080,7 +1080,7 @@ void bch_btree_sort_into(struct btree *b, struct btree *new,
|
||||||
uint64_t start_time = local_clock();
|
uint64_t start_time = local_clock();
|
||||||
|
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
bch_btree_iter_init(b, &iter, NULL);
|
bch_btree_iter_init(&b->keys, &iter, NULL);
|
||||||
|
|
||||||
btree_mergesort(&b->keys, new->keys.set->data, &iter, false, true);
|
btree_mergesort(&b->keys, new->keys.set->data, &iter, false, true);
|
||||||
|
|
||||||
|
|
|
@ -309,7 +309,7 @@ static inline bool bch_bkey_try_merge(struct btree_keys *b,
|
||||||
struct btree_iter {
|
struct btree_iter {
|
||||||
size_t size, used;
|
size_t size, used;
|
||||||
#ifdef CONFIG_BCACHE_DEBUG
|
#ifdef CONFIG_BCACHE_DEBUG
|
||||||
struct btree *b;
|
struct btree_keys *b;
|
||||||
#endif
|
#endif
|
||||||
struct btree_iter_set {
|
struct btree_iter_set {
|
||||||
struct bkey *k, *end;
|
struct bkey *k, *end;
|
||||||
|
@ -323,21 +323,30 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *,
|
||||||
struct btree_keys *, ptr_filter_fn);
|
struct btree_keys *, ptr_filter_fn);
|
||||||
|
|
||||||
void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
|
void bch_btree_iter_push(struct btree_iter *, struct bkey *, struct bkey *);
|
||||||
struct bkey *bch_btree_iter_init(struct btree *, struct btree_iter *,
|
struct bkey *bch_btree_iter_init(struct btree_keys *, struct btree_iter *,
|
||||||
struct bkey *);
|
struct bkey *);
|
||||||
|
|
||||||
struct bkey *__bch_bset_search(struct btree *, struct bset_tree *,
|
struct bkey *__bch_bset_search(struct btree_keys *, struct bset_tree *,
|
||||||
const struct bkey *);
|
const struct bkey *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the first key that is strictly greater than search
|
* Returns the first key that is strictly greater than search
|
||||||
*/
|
*/
|
||||||
static inline struct bkey *bch_bset_search(struct btree *b, struct bset_tree *t,
|
static inline struct bkey *bch_bset_search(struct btree_keys *b,
|
||||||
|
struct bset_tree *t,
|
||||||
const struct bkey *search)
|
const struct bkey *search)
|
||||||
{
|
{
|
||||||
return search ? __bch_bset_search(b, t, search) : t->data->start;
|
return search ? __bch_bset_search(b, t, search) : t->data->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define for_each_key_filter(b, k, iter, filter) \
|
||||||
|
for (bch_btree_iter_init((b), (iter), NULL); \
|
||||||
|
((k) = bch_btree_iter_next_filter((iter), (b), filter));)
|
||||||
|
|
||||||
|
#define for_each_key(b, k, iter) \
|
||||||
|
for (bch_btree_iter_init((b), (iter), NULL); \
|
||||||
|
((k) = bch_btree_iter_next(iter));)
|
||||||
|
|
||||||
/* Sorting */
|
/* Sorting */
|
||||||
|
|
||||||
struct bset_sort_state {
|
struct bset_sort_state {
|
||||||
|
|
|
@ -212,7 +212,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||||
iter->used = 0;
|
iter->used = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_BCACHE_DEBUG
|
#ifdef CONFIG_BCACHE_DEBUG
|
||||||
iter->b = b;
|
iter->b = &b->keys;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (!i->seq)
|
if (!i->seq)
|
||||||
|
@ -1195,7 +1195,7 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
||||||
|
|
||||||
gc->nodes++;
|
gc->nodes++;
|
||||||
|
|
||||||
for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
|
||||||
stale = max(stale, btree_mark_key(b, k));
|
stale = max(stale, btree_mark_key(b, k));
|
||||||
keys++;
|
keys++;
|
||||||
|
|
||||||
|
@ -1386,7 +1386,7 @@ static unsigned btree_gc_count_keys(struct btree *b)
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
unsigned ret = 0;
|
unsigned ret = 0;
|
||||||
|
|
||||||
for_each_key_filter(b, k, &iter, bch_ptr_bad)
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
||||||
ret += bkey_u64s(k);
|
ret += bkey_u64s(k);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1406,7 +1406,7 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
|
||||||
struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
|
struct gc_merge_info *last = r + GC_MERGE_NODES - 1;
|
||||||
|
|
||||||
bch_keylist_init(&keys);
|
bch_keylist_init(&keys);
|
||||||
bch_btree_iter_init(b, &iter, &b->c->gc_done);
|
bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
|
||||||
|
|
||||||
for (i = 0; i < GC_MERGE_NODES; i++)
|
for (i = 0; i < GC_MERGE_NODES; i++)
|
||||||
r[i].b = ERR_PTR(-EINTR);
|
r[i].b = ERR_PTR(-EINTR);
|
||||||
|
@ -1722,7 +1722,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
|
||||||
struct bucket *g;
|
struct bucket *g;
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
for_each_key_filter(b, k, &iter, bch_ptr_invalid) {
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
|
||||||
for (i = 0; i < KEY_PTRS(k); i++) {
|
for (i = 0; i < KEY_PTRS(k); i++) {
|
||||||
if (!ptr_available(b->c, k, i))
|
if (!ptr_available(b->c, k, i))
|
||||||
continue;
|
continue;
|
||||||
|
@ -1745,7 +1745,7 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (b->level) {
|
if (b->level) {
|
||||||
bch_btree_iter_init(b, &iter, NULL);
|
bch_btree_iter_init(&b->keys, &iter, NULL);
|
||||||
|
|
||||||
do {
|
do {
|
||||||
k = bch_btree_iter_next_filter(&iter, &b->keys,
|
k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||||
|
@ -1892,7 +1892,7 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
|
||||||
* depends on us inserting a new key for the top
|
* depends on us inserting a new key for the top
|
||||||
* here.
|
* here.
|
||||||
*/
|
*/
|
||||||
top = bch_bset_search(b,
|
top = bch_bset_search(&b->keys,
|
||||||
bset_tree_last(&b->keys),
|
bset_tree_last(&b->keys),
|
||||||
insert);
|
insert);
|
||||||
bch_bset_insert(&b->keys, top, k);
|
bch_bset_insert(&b->keys, top, k);
|
||||||
|
@ -1965,7 +1965,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
|
||||||
* the previous key.
|
* the previous key.
|
||||||
*/
|
*/
|
||||||
prev = NULL;
|
prev = NULL;
|
||||||
m = bch_btree_iter_init(b, &iter,
|
m = bch_btree_iter_init(&b->keys, &iter,
|
||||||
PRECEDING_KEY(&START_KEY(k)));
|
PRECEDING_KEY(&START_KEY(k)));
|
||||||
|
|
||||||
if (fix_overlapping_extents(b, k, &iter, replace_key)) {
|
if (fix_overlapping_extents(b, k, &iter, replace_key)) {
|
||||||
|
@ -2001,7 +2001,7 @@ static bool btree_insert_key(struct btree *b, struct btree_op *op,
|
||||||
goto copy;
|
goto copy;
|
||||||
} else {
|
} else {
|
||||||
BUG_ON(replace_key);
|
BUG_ON(replace_key);
|
||||||
m = bch_bset_search(b, bset_tree_last(&b->keys), k);
|
m = bch_bset_search(&b->keys, bset_tree_last(&b->keys), k);
|
||||||
}
|
}
|
||||||
|
|
||||||
insert: bch_bset_insert(&b->keys, m, k);
|
insert: bch_bset_insert(&b->keys, m, k);
|
||||||
|
@ -2357,7 +2357,7 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
bch_btree_iter_init(b, &iter, from);
|
bch_btree_iter_init(&b->keys, &iter, from);
|
||||||
|
|
||||||
while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
|
while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||||
bch_ptr_bad))) {
|
bch_ptr_bad))) {
|
||||||
|
@ -2390,7 +2390,7 @@ static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
|
|
||||||
bch_btree_iter_init(b, &iter, from);
|
bch_btree_iter_init(&b->keys, &iter, from);
|
||||||
|
|
||||||
while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
|
while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
|
||||||
ret = !b->level
|
ret = !b->level
|
||||||
|
|
|
@ -201,14 +201,6 @@ void bkey_put(struct cache_set *c, struct bkey *k);
|
||||||
iter++) \
|
iter++) \
|
||||||
hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
|
hlist_for_each_entry_rcu((b), (c)->bucket_hash + iter, hash)
|
||||||
|
|
||||||
#define for_each_key_filter(b, k, iter, filter) \
|
|
||||||
for (bch_btree_iter_init((b), (iter), NULL); \
|
|
||||||
((k) = bch_btree_iter_next_filter((iter), &(b)->keys, filter));)
|
|
||||||
|
|
||||||
#define for_each_key(b, k, iter) \
|
|
||||||
for (bch_btree_iter_init((b), (iter), NULL); \
|
|
||||||
((k) = bch_btree_iter_next(iter));)
|
|
||||||
|
|
||||||
/* Recursing down the btree */
|
/* Recursing down the btree */
|
||||||
|
|
||||||
struct btree_op {
|
struct btree_op {
|
||||||
|
|
|
@ -246,7 +246,7 @@ int __bch_count_data(struct btree *b)
|
||||||
struct bkey *k;
|
struct bkey *k;
|
||||||
|
|
||||||
if (!b->level)
|
if (!b->level)
|
||||||
for_each_key(b, k, &iter)
|
for_each_key(&b->keys, k, &iter)
|
||||||
ret += KEY_SIZE(k);
|
ret += KEY_SIZE(k);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -258,7 +258,7 @@ void __bch_check_keys(struct btree *b, const char *fmt, ...)
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
const char *err;
|
const char *err;
|
||||||
|
|
||||||
for_each_key(b, k, &iter) {
|
for_each_key(&b->keys, k, &iter) {
|
||||||
if (!b->level) {
|
if (!b->level) {
|
||||||
err = "Keys out of order";
|
err = "Keys out of order";
|
||||||
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
|
if (p && bkey_cmp(&START_KEY(p), &START_KEY(k)) > 0)
|
||||||
|
@ -298,6 +298,7 @@ void __bch_check_keys(struct btree *b, const char *fmt, ...)
|
||||||
|
|
||||||
void bch_btree_iter_next_check(struct btree_iter *iter)
|
void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
|
#if 0
|
||||||
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
||||||
|
|
||||||
if (next < iter->data->end &&
|
if (next < iter->data->end &&
|
||||||
|
@ -305,6 +306,7 @@ void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||||
bch_dump_bucket(iter->b);
|
bch_dump_bucket(iter->b);
|
||||||
panic("Key skipped backwards\n");
|
panic("Key skipped backwards\n");
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -460,7 +460,7 @@ SHOW(__bch_cache_set)
|
||||||
rw_lock(false, b, b->level);
|
rw_lock(false, b, b->level);
|
||||||
} while (b != c->root);
|
} while (b != c->root);
|
||||||
|
|
||||||
for_each_key_filter(b, k, &iter, bch_ptr_bad)
|
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
||||||
bytes += bkey_bytes(k);
|
bytes += bkey_bytes(k);
|
||||||
|
|
||||||
rw_unlock(false, b);
|
rw_unlock(false, b);
|
||||||
|
|
Loading…
Reference in a new issue