md/raid5: close recently introduced race in stripe_head management.
As release_stripe and __release_stripe decrement ->count and then
manipulate ->lru both under ->device_lock, it is important that
get_active_stripe() increments ->count and clears ->lru also under
->device_lock.
However we currently list_del_init ->lru under the lock, but increment
the ->count outside the lock. This can lead to races and list
corruption.
So move the atomic_inc(&sh->count) up inside the ->device_lock
protected region.
Note that we still increment ->count without device lock in the case
where get_free_stripe() was called, and in fact don't take
->device_lock at all in that path.
This is safe because if the stripe_head can be found by
get_free_stripe, then the hash lock assures us the no-one else could
possibly be calling release_stripe() at the same time.
Fixes: 566c09c534
Cc: stable@vger.kernel.org (3.13)
Reported-and-tested-by: Ian Kumlien <ian.kumlien@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
This commit is contained in:
parent
9f97e4b128
commit
7da9d450ab
1 changed files with 4 additions and 4 deletions
|
@ -675,8 +675,10 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|
||||||
|| !conf->inactive_blocked),
|
|| !conf->inactive_blocked),
|
||||||
*(conf->hash_locks + hash));
|
*(conf->hash_locks + hash));
|
||||||
conf->inactive_blocked = 0;
|
conf->inactive_blocked = 0;
|
||||||
} else
|
} else {
|
||||||
init_stripe(sh, sector, previous);
|
init_stripe(sh, sector, previous);
|
||||||
|
atomic_inc(&sh->count);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
spin_lock(&conf->device_lock);
|
spin_lock(&conf->device_lock);
|
||||||
if (atomic_read(&sh->count)) {
|
if (atomic_read(&sh->count)) {
|
||||||
|
@ -695,13 +697,11 @@ get_active_stripe(struct r5conf *conf, sector_t sector,
|
||||||
sh->group = NULL;
|
sh->group = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
atomic_inc(&sh->count);
|
||||||
spin_unlock(&conf->device_lock);
|
spin_unlock(&conf->device_lock);
|
||||||
}
|
}
|
||||||
} while (sh == NULL);
|
} while (sh == NULL);
|
||||||
|
|
||||||
if (sh)
|
|
||||||
atomic_inc(&sh->count);
|
|
||||||
|
|
||||||
spin_unlock_irq(conf->hash_locks + hash);
|
spin_unlock_irq(conf->hash_locks + hash);
|
||||||
return sh;
|
return sh;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue