NFS: Fix races nfs_page_group_destroy() vs nfs_destroy_unlinked_subrequests()
[ Upstream commit 08ca8b21f760c0ed5034a5c122092eec22ccf8f4 ]
When a subrequest is being detached from the subgroup, we want to
ensure that it is not holding the group lock, or in the process
of waiting for the group lock.
Fixes: 5b2b5187fa
("NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases")
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
a8cc52270f
commit
1f39a7cc5d
3 changed files with 55 additions and 24 deletions
|
@ -131,9 +131,44 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
|
||||
|
||||
/*
|
||||
* nfs_page_set_headlock - set the request PG_HEADLOCK
|
||||
* @req: request that is to be locked
|
||||
*
|
||||
* this lock must be held when modifying req->wb_head
|
||||
*
|
||||
* return 0 on success, < 0 on error
|
||||
*/
|
||||
int
|
||||
nfs_page_set_headlock(struct nfs_page *req)
|
||||
{
|
||||
if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
|
||||
return 0;
|
||||
|
||||
set_bit(PG_CONTENDED1, &req->wb_flags);
|
||||
smp_mb__after_atomic();
|
||||
return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_page_clear_headlock - clear the request PG_HEADLOCK
|
||||
* @req: request that is to be locked
|
||||
*/
|
||||
void
|
||||
nfs_page_clear_headlock(struct nfs_page *req)
|
||||
{
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(PG_HEADLOCK, &req->wb_flags);
|
||||
smp_mb__after_atomic();
|
||||
if (!test_bit(PG_CONTENDED1, &req->wb_flags))
|
||||
return;
|
||||
wake_up_bit(&req->wb_flags, PG_HEADLOCK);
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_page_group_lock - lock the head of the page group
|
||||
* @req - request in group that is to be locked
|
||||
* @req: request in group that is to be locked
|
||||
*
|
||||
* this lock must be held when traversing or modifying the page
|
||||
* group list
|
||||
|
@ -143,36 +178,24 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
|
|||
int
|
||||
nfs_page_group_lock(struct nfs_page *req)
|
||||
{
|
||||
struct nfs_page *head = req->wb_head;
|
||||
int ret;
|
||||
|
||||
WARN_ON_ONCE(head != head->wb_head);
|
||||
|
||||
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
|
||||
return 0;
|
||||
|
||||
set_bit(PG_CONTENDED1, &head->wb_flags);
|
||||
smp_mb__after_atomic();
|
||||
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
ret = nfs_page_set_headlock(req);
|
||||
if (ret || req->wb_head == req)
|
||||
return ret;
|
||||
return nfs_page_set_headlock(req->wb_head);
|
||||
}
|
||||
|
||||
/*
|
||||
* nfs_page_group_unlock - unlock the head of the page group
|
||||
* @req - request in group that is to be unlocked
|
||||
* @req: request in group that is to be unlocked
|
||||
*/
|
||||
void
|
||||
nfs_page_group_unlock(struct nfs_page *req)
|
||||
{
|
||||
struct nfs_page *head = req->wb_head;
|
||||
|
||||
WARN_ON_ONCE(head != head->wb_head);
|
||||
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(PG_HEADLOCK, &head->wb_flags);
|
||||
smp_mb__after_atomic();
|
||||
if (!test_bit(PG_CONTENDED1, &head->wb_flags))
|
||||
return;
|
||||
wake_up_bit(&head->wb_flags, PG_HEADLOCK);
|
||||
if (req != req->wb_head)
|
||||
nfs_page_clear_headlock(req->wb_head);
|
||||
nfs_page_clear_headlock(req);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -416,22 +416,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
|
|||
destroy_list = (subreq->wb_this_page == old_head) ?
|
||||
NULL : subreq->wb_this_page;
|
||||
|
||||
/* Note: lock subreq in order to change subreq->wb_head */
|
||||
nfs_page_set_headlock(subreq);
|
||||
WARN_ON_ONCE(old_head != subreq->wb_head);
|
||||
|
||||
/* make sure old group is not used */
|
||||
subreq->wb_this_page = subreq;
|
||||
subreq->wb_head = subreq;
|
||||
|
||||
clear_bit(PG_REMOVE, &subreq->wb_flags);
|
||||
|
||||
/* Note: races with nfs_page_group_destroy() */
|
||||
if (!kref_read(&subreq->wb_kref)) {
|
||||
/* Check if we raced with nfs_page_group_destroy() */
|
||||
if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
|
||||
if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
|
||||
nfs_page_clear_headlock(subreq);
|
||||
nfs_free_request(subreq);
|
||||
} else
|
||||
nfs_page_clear_headlock(subreq);
|
||||
continue;
|
||||
}
|
||||
nfs_page_clear_headlock(subreq);
|
||||
|
||||
subreq->wb_head = subreq;
|
||||
nfs_release_request(old_head);
|
||||
|
||||
if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
|
||||
|
|
|
@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *);
|
|||
extern int nfs_page_group_lock(struct nfs_page *);
|
||||
extern void nfs_page_group_unlock(struct nfs_page *);
|
||||
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
|
||||
extern int nfs_page_set_headlock(struct nfs_page *req);
|
||||
extern void nfs_page_clear_headlock(struct nfs_page *req);
|
||||
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue