- A dm thinp fix for crash that will occur when metadata device failure

races with discard passdown to the underlying data device.
 
 - A dm raid fix to not access the superblock's >= 1.9.0 'sectors' member
   unconditionally.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJZVV3cAAoJEMUj8QotnQNaowQIALi4gHBXASsRpox5/zuYi0Wq
 iGzLT0bVdZ5BnNZYC11pINwd8aSsCecVJGoCfA1l7ohqFrWBZ1ivmSgmT0M+yyao
 /uX/eH22HdT5maiCmeAdcfYeYj5z+9CkLObB9rlsDZ5bqEbHEntXSH5tn3lN906K
 dVc3BU5MCve/ZjtUIvbZ+iMOejigtFj9U1l5KcNi49MNT/OGk7oaYDwbqvAIIxBE
 M41BXuwdX4FBw4qa3cE2AUWWFcyaJoqH/AU3SDu1wsfLNiXd3HouVZg0pWI8mOO5
 DHkLSZBP5eaWpyL2eZScv1f/DuF8r9Ph4SE0C0ZpLkNbAG0ULyLwFFF8CziyYbw=
 =HwfU
 -----END PGP SIGNATURE-----

Merge tag 'for-4.12/dm-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - dm thinp fix for crash that will occur when metadata device failure
   races with discard passdown to the underlying data device.

 - dm raid fix to not access the superblock's >= 1.9.0 'sectors' member
   unconditionally.

* tag 'for-4.12/dm-fixes-5' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm thin: do not queue freed thin mapping for next stage processing
  dm raid: fix oops on upgrading to extended superblock format
This commit is contained in:
Linus Torvalds 2017-06-29 14:23:02 -07:00
commit 27bc344014
2 changed files with 27 additions and 16 deletions

View file

@ -1927,7 +1927,7 @@ struct dm_raid_superblock {
/******************************************************************** /********************************************************************
* BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
* *
* FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
*/ */
__le32 flags; /* Flags defining array states for reshaping */ __le32 flags; /* Flags defining array states for reshaping */
@ -2092,6 +2092,11 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->layout = cpu_to_le32(mddev->layout); sb->layout = cpu_to_le32(mddev->layout);
sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
/********************************************************************
* BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!!
*
* FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist
*/
sb->new_level = cpu_to_le32(mddev->new_level); sb->new_level = cpu_to_le32(mddev->new_level);
sb->new_layout = cpu_to_le32(mddev->new_layout); sb->new_layout = cpu_to_le32(mddev->new_layout);
sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors);
@ -2438,8 +2443,14 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) { if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
/* Retrieve device size stored in superblock to be prepared for shrink */ /*
rdev->sectors = le64_to_cpu(sb->sectors); * Retrieve rdev size stored in superblock to be prepared for shrink.
* Check extended superblock members are present otherwise the size
* will not be set!
*/
if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190)
rdev->sectors = le64_to_cpu(sb->sectors);
rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
if (rdev->recovery_offset == MaxSector) if (rdev->recovery_offset == MaxSector)
set_bit(In_sync, &rdev->flags); set_bit(In_sync, &rdev->flags);

View file

@ -1094,6 +1094,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
return; return;
} }
/*
* Increment the unmapped blocks. This prevents a race between the
* passdown io and reallocation of freed blocks.
*/
r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
if (r) {
metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
bio_io_error(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
return;
}
discard_parent = bio_alloc(GFP_NOIO, 1); discard_parent = bio_alloc(GFP_NOIO, 1);
if (!discard_parent) { if (!discard_parent) {
DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.", DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
@ -1114,19 +1127,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
end_discard(&op, r); end_discard(&op, r);
} }
} }
/*
* Increment the unmapped blocks. This prevents a race between the
* passdown io and reallocation of freed blocks.
*/
r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
if (r) {
metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
bio_io_error(m->bio);
cell_defer_no_holder(tc, m->cell);
mempool_free(m, pool->mapping_pool);
return;
}
} }
static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m) static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)