raid5: check faulty flag for array status during recovery.
When we have more than 1 drive failure, it's possible we start rebuild one drive while leaving another faulty drive in array. To determine whether array will be optimal after building, current code only check whether a drive is missing, which could potentially lead to data corruption. This patch is to add checking Faulty flag. Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
d1901ef099
commit
16d9cfab93
1 changed files with 9 additions and 4 deletions
|
@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
|
|||
schedule_timeout_uninterruptible(1);
|
||||
}
|
||||
/* Need to check if array will still be degraded after recovery/resync
|
||||
* We don't need to check the 'failed' flag as when that gets set,
|
||||
* recovery aborts.
|
||||
* Note in case of > 1 drive failures it's possible we're rebuilding
|
||||
* one drive while leaving another faulty drive in array.
|
||||
*/
|
||||
for (i = 0; i < conf->raid_disks; i++)
|
||||
if (conf->disks[i].rdev == NULL)
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < conf->raid_disks; i++) {
|
||||
struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev);
|
||||
|
||||
if (rdev == NULL || test_bit(Faulty, &rdev->flags))
|
||||
still_degraded = 1;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded);
|
||||
|
||||
|
|
Loading…
Reference in a new issue