bcache: Fix moving_gc deadlocking with a foreground write

Deadlock happened because a foreground write slept, waiting for a bucket
to be allocated. Normally the gc would mark buckets available for invalidation.
But the moving_gc was stuck waiting for outstanding writes to complete.
These writes used the bcache_wq, the same queue foreground writes used.

This fix gives moving_gc its own work queue, so it was still finish moving
even if foreground writes are stuck waiting for allocation. It also makes
work queue a parameter to the data_insert path, so moving_gc can use its
workqueue for writes.

Signed-off-by: Nicholas Swenson <nks@daterainc.com>
Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
Nicholas Swenson 2014-01-09 16:03:04 -08:00 committed by Kent Overstreet
parent 90db6919f5
commit da415a096f
5 changed files with 16 additions and 8 deletions

View file

@ -628,6 +628,8 @@ struct cache_set {
/* Number of moving GC bios in flight */ /* Number of moving GC bios in flight */
struct semaphore moving_in_flight; struct semaphore moving_in_flight;
struct workqueue_struct *moving_gc_wq;
struct btree *root; struct btree *root;
#ifdef CONFIG_BCACHE_DEBUG #ifdef CONFIG_BCACHE_DEBUG

View file

@ -115,7 +115,7 @@ static void write_moving(struct closure *cl)
closure_call(&op->cl, bch_data_insert, NULL, cl); closure_call(&op->cl, bch_data_insert, NULL, cl);
} }
continue_at(cl, write_moving_finish, system_wq); continue_at(cl, write_moving_finish, op->wq);
} }
static void read_moving_submit(struct closure *cl) static void read_moving_submit(struct closure *cl)
@ -125,7 +125,7 @@ static void read_moving_submit(struct closure *cl)
bch_submit_bbio(bio, io->op.c, &io->w->key, 0); bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
continue_at(cl, write_moving, system_wq); continue_at(cl, write_moving, io->op.wq);
} }
static void read_moving(struct cache_set *c) static void read_moving(struct cache_set *c)
@ -160,6 +160,7 @@ static void read_moving(struct cache_set *c)
io->w = w; io->w = w;
io->op.inode = KEY_INODE(&w->key); io->op.inode = KEY_INODE(&w->key);
io->op.c = c; io->op.c = c;
io->op.wq = c->moving_gc_wq;
moving_init(io); moving_init(io);
bio = &io->bio.bio; bio = &io->bio.bio;

View file

@ -248,7 +248,7 @@ static void bch_data_insert_keys(struct closure *cl)
atomic_dec_bug(journal_ref); atomic_dec_bug(journal_ref);
if (!op->insert_data_done) if (!op->insert_data_done)
continue_at(cl, bch_data_insert_start, bcache_wq); continue_at(cl, bch_data_insert_start, op->wq);
bch_keylist_free(&op->insert_keys); bch_keylist_free(&op->insert_keys);
closure_return(cl); closure_return(cl);
@ -297,7 +297,7 @@ static void bch_data_invalidate(struct closure *cl)
op->insert_data_done = true; op->insert_data_done = true;
bio_put(bio); bio_put(bio);
out: out:
continue_at(cl, bch_data_insert_keys, bcache_wq); continue_at(cl, bch_data_insert_keys, op->wq);
} }
static void bch_data_insert_error(struct closure *cl) static void bch_data_insert_error(struct closure *cl)
@ -340,7 +340,7 @@ static void bch_data_insert_endio(struct bio *bio, int error)
if (op->writeback) if (op->writeback)
op->error = error; op->error = error;
else if (!op->replace) else if (!op->replace)
set_closure_fn(cl, bch_data_insert_error, bcache_wq); set_closure_fn(cl, bch_data_insert_error, op->wq);
else else
set_closure_fn(cl, NULL, NULL); set_closure_fn(cl, NULL, NULL);
} }
@ -376,7 +376,7 @@ static void bch_data_insert_start(struct closure *cl)
if (bch_keylist_realloc(&op->insert_keys, if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0), 3 + (op->csum ? 1 : 0),
op->c)) op->c))
continue_at(cl, bch_data_insert_keys, bcache_wq); continue_at(cl, bch_data_insert_keys, op->wq);
k = op->insert_keys.top; k = op->insert_keys.top;
bkey_init(k); bkey_init(k);
@ -413,7 +413,7 @@ static void bch_data_insert_start(struct closure *cl)
} while (n != bio); } while (n != bio);
op->insert_data_done = true; op->insert_data_done = true;
continue_at(cl, bch_data_insert_keys, bcache_wq); continue_at(cl, bch_data_insert_keys, op->wq);
err: err:
/* bch_alloc_sectors() blocks if s->writeback = true */ /* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback); BUG_ON(op->writeback);
@ -442,7 +442,7 @@ static void bch_data_insert_start(struct closure *cl)
bio_put(bio); bio_put(bio);
if (!bch_keylist_empty(&op->insert_keys)) if (!bch_keylist_empty(&op->insert_keys))
continue_at(cl, bch_data_insert_keys, bcache_wq); continue_at(cl, bch_data_insert_keys, op->wq);
else else
closure_return(cl); closure_return(cl);
} }
@ -824,6 +824,7 @@ static inline struct search *search_alloc(struct bio *bio,
s->iop.error = 0; s->iop.error = 0;
s->iop.flags = 0; s->iop.flags = 0;
s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0;
s->iop.wq = bcache_wq;
return s; return s;
} }

View file

@ -7,6 +7,7 @@ struct data_insert_op {
struct closure cl; struct closure cl;
struct cache_set *c; struct cache_set *c;
struct bio *bio; struct bio *bio;
struct workqueue_struct *wq;
unsigned inode; unsigned inode;
uint16_t write_point; uint16_t write_point;

View file

@ -1356,6 +1356,8 @@ static void cache_set_free(struct closure *cl)
bch_bset_sort_state_free(&c->sort); bch_bset_sort_state_free(&c->sort);
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
if (c->moving_gc_wq)
destroy_workqueue(c->moving_gc_wq);
if (c->bio_split) if (c->bio_split)
bioset_free(c->bio_split); bioset_free(c->bio_split);
if (c->fill_iter) if (c->fill_iter)
@ -1522,6 +1524,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
!(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
!(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
!(c->moving_gc_wq = create_workqueue("bcache_gc")) ||
bch_journal_alloc(c) || bch_journal_alloc(c) ||
bch_btree_cache_alloc(c) || bch_btree_cache_alloc(c) ||
bch_open_buckets_alloc(c) || bch_open_buckets_alloc(c) ||