dm cache: prevent corruption caused by discard_block_size > cache_block_size
If the discard block size is larger than the cache block size we will not properly quiesce IO to a region that is about to be discarded. This results in a race between a cache migration where no copy is needed, and a write to an adjacent cache block that's within the same large discard block. Workaround this by limiting the discard_block_size to cache_block_size. Also limit the max_discard_sectors to cache_block_size. A more comprehensive fix that introduces range locking support in the bio_prison and proper quiescing of a discard range that spans multiple cache blocks is already in development. Reported-by: Morgan Mears <Morgan.Mears@netapp.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Acked-by: Joe Thornber <ejt@redhat.com> Acked-by: Heinz Mauelshagen <heinzm@redhat.com> Cc: stable@vger.kernel.org
This commit is contained in:
parent
428e469864
commit
d132cc6d9e
1 changed files with 3 additions and 34 deletions
|
@ -239,7 +239,7 @@ struct cache {
|
|||
*/
|
||||
dm_dblock_t discard_nr_blocks;
|
||||
unsigned long *discard_bitset;
|
||||
uint32_t discard_block_size; /* a power of 2 times sectors per block */
|
||||
uint32_t discard_block_size;
|
||||
|
||||
/*
|
||||
* Rather than reconstructing the table line for the status we just
|
||||
|
@ -2171,35 +2171,6 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We want the discard block size to be a power of two, at least the size
|
||||
* of the cache block size, and have no more than 2^14 discard blocks
|
||||
* across the origin.
|
||||
*/
|
||||
#define MAX_DISCARD_BLOCKS (1 << 14)
|
||||
|
||||
static bool too_many_discard_blocks(sector_t discard_block_size,
|
||||
sector_t origin_size)
|
||||
{
|
||||
(void) sector_div(origin_size, discard_block_size);
|
||||
|
||||
return origin_size > MAX_DISCARD_BLOCKS;
|
||||
}
|
||||
|
||||
static sector_t calculate_discard_block_size(sector_t cache_block_size,
|
||||
sector_t origin_size)
|
||||
{
|
||||
sector_t discard_block_size;
|
||||
|
||||
discard_block_size = roundup_pow_of_two(cache_block_size);
|
||||
|
||||
if (origin_size)
|
||||
while (too_many_discard_blocks(discard_block_size, origin_size))
|
||||
discard_block_size *= 2;
|
||||
|
||||
return discard_block_size;
|
||||
}
|
||||
|
||||
#define DEFAULT_MIGRATION_THRESHOLD 2048
|
||||
|
||||
static int cache_create(struct cache_args *ca, struct cache **result)
|
||||
|
@ -2321,9 +2292,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
|||
}
|
||||
clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
|
||||
|
||||
cache->discard_block_size =
|
||||
calculate_discard_block_size(cache->sectors_per_block,
|
||||
cache->origin_sectors);
|
||||
cache->discard_block_size = cache->sectors_per_block;
|
||||
cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
|
||||
cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
|
||||
if (!cache->discard_bitset) {
|
||||
|
@ -3120,7 +3089,7 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
|
|||
/*
|
||||
* FIXME: these limits may be incompatible with the cache device
|
||||
*/
|
||||
limits->max_discard_sectors = cache->discard_block_size * 1024;
|
||||
limits->max_discard_sectors = cache->discard_block_size;
|
||||
limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue