nvme: change namespaces_mutext to namespaces_rwsem
namespaces_mutext is used to synchronize the operations on ctrl namespaces list. Most of the time, it is a read operation. On the other hand, there are many interfaces in nvme core that need this lock, such as nvme_wait_freeze, and even more interfaces will be added. If we use mutex here, circular dependency could be introduced easily. For example: context A context B nvme_xxx nvme_xxx hold namespaces_mutext require namespaces_mutext sync context B So it is better to change it from mutex to rwsem. Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
6f8e0d787e
commit
765cc031cd
3 changed files with 35 additions and 35 deletions
|
@ -1125,13 +1125,13 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl)
|
|||
struct nvme_ns *ns, *next;
|
||||
LIST_HEAD(rm_list);
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_write(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
if (ns->disk && nvme_revalidate_disk(ns->disk)) {
|
||||
list_move_tail(&ns->list, &rm_list);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_write(&ctrl->namespaces_rwsem);
|
||||
|
||||
list_for_each_entry_safe(ns, next, &rm_list, list)
|
||||
nvme_ns_remove(ns);
|
||||
|
@ -2441,7 +2441,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
|
|||
struct nvme_ns *ns;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
if (list_empty(&ctrl->namespaces)) {
|
||||
ret = -ENOTTY;
|
||||
goto out_unlock;
|
||||
|
@ -2458,14 +2458,14 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
|
|||
dev_warn(ctrl->device,
|
||||
"using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
|
||||
kref_get(&ns->kref);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
|
||||
ret = nvme_user_cmd(ctrl, ns, argp);
|
||||
nvme_put_ns(ns);
|
||||
return ret;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2894,7 +2894,7 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
{
|
||||
struct nvme_ns *ns, *ret = NULL;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
if (ns->head->ns_id == nsid) {
|
||||
if (!kref_get_unless_zero(&ns->kref))
|
||||
|
@ -2905,7 +2905,7 @@ static struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
if (ns->head->ns_id > nsid)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3016,9 +3016,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
|||
|
||||
__nvme_revalidate_disk(disk, id);
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_write(&ctrl->namespaces_rwsem);
|
||||
list_add_tail(&ns->list, &ctrl->namespaces);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_write(&ctrl->namespaces_rwsem);
|
||||
|
||||
nvme_get_ctrl(ctrl);
|
||||
|
||||
|
@ -3072,9 +3072,9 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
|||
list_del_rcu(&ns->siblings);
|
||||
mutex_unlock(&ns->ctrl->subsys->lock);
|
||||
|
||||
mutex_lock(&ns->ctrl->namespaces_mutex);
|
||||
down_write(&ns->ctrl->namespaces_rwsem);
|
||||
list_del_init(&ns->list);
|
||||
mutex_unlock(&ns->ctrl->namespaces_mutex);
|
||||
up_write(&ns->ctrl->namespaces_rwsem);
|
||||
|
||||
synchronize_srcu(&ns->head->srcu);
|
||||
nvme_mpath_check_last_path(ns);
|
||||
|
@ -3100,12 +3100,12 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
|||
struct nvme_ns *ns, *next;
|
||||
LIST_HEAD(rm_list);
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_write(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
|
||||
if (ns->head->ns_id > nsid)
|
||||
list_move_tail(&ns->list, &rm_list);
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_write(&ctrl->namespaces_rwsem);
|
||||
|
||||
list_for_each_entry_safe(ns, next, &rm_list, list)
|
||||
nvme_ns_remove(ns);
|
||||
|
@ -3185,9 +3185,9 @@ static void nvme_scan_work(struct work_struct *work)
|
|||
}
|
||||
nvme_scan_ns_sequential(ctrl, nn);
|
||||
done:
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_write(&ctrl->namespaces_rwsem);
|
||||
list_sort(NULL, &ctrl->namespaces, ns_cmp);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_write(&ctrl->namespaces_rwsem);
|
||||
kfree(id);
|
||||
}
|
||||
|
||||
|
@ -3220,9 +3220,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
|
|||
if (ctrl->state == NVME_CTRL_DEAD)
|
||||
nvme_kill_queues(ctrl);
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_write(&ctrl->namespaces_rwsem);
|
||||
list_splice_init(&ctrl->namespaces, &ns_list);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_write(&ctrl->namespaces_rwsem);
|
||||
|
||||
list_for_each_entry_safe(ns, next, &ns_list, list)
|
||||
nvme_ns_remove(ns);
|
||||
|
@ -3411,7 +3411,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
|||
ctrl->state = NVME_CTRL_NEW;
|
||||
spin_lock_init(&ctrl->lock);
|
||||
INIT_LIST_HEAD(&ctrl->namespaces);
|
||||
mutex_init(&ctrl->namespaces_mutex);
|
||||
init_rwsem(&ctrl->namespaces_rwsem);
|
||||
ctrl->dev = dev;
|
||||
ctrl->ops = ops;
|
||||
ctrl->quirks = quirks;
|
||||
|
@ -3472,7 +3472,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
|
||||
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
||||
if (ctrl->admin_q)
|
||||
|
@ -3491,7 +3491,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
|
|||
/* Forcibly unquiesce queues to avoid blocking dispatch */
|
||||
blk_mq_unquiesce_queue(ns->queue);
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_kill_queues);
|
||||
|
||||
|
@ -3499,10 +3499,10 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
blk_mq_unfreeze_queue(ns->queue);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_unfreeze);
|
||||
|
||||
|
@ -3510,13 +3510,13 @@ void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
timeout = blk_mq_freeze_queue_wait_timeout(ns->queue, timeout);
|
||||
if (timeout <= 0)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout);
|
||||
|
||||
|
@ -3524,10 +3524,10 @@ void nvme_wait_freeze(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
blk_mq_freeze_queue_wait(ns->queue);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_wait_freeze);
|
||||
|
||||
|
@ -3535,10 +3535,10 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
blk_freeze_queue_start(ns->queue);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_freeze);
|
||||
|
||||
|
@ -3546,10 +3546,10 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
blk_mq_quiesce_queue(ns->queue);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_stop_queues);
|
||||
|
||||
|
@ -3557,10 +3557,10 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list)
|
||||
blk_mq_unquiesce_queue(ns->queue);
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_queues);
|
||||
|
||||
|
|
|
@ -44,12 +44,12 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
|
|||
{
|
||||
struct nvme_ns *ns;
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
down_read(&ctrl->namespaces_rwsem);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
if (ns->head->disk)
|
||||
kblockd_schedule_work(&ns->head->requeue_work);
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
up_read(&ctrl->namespaces_rwsem);
|
||||
}
|
||||
|
||||
static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
|
||||
|
|
|
@ -141,7 +141,7 @@ struct nvme_ctrl {
|
|||
struct blk_mq_tag_set *tagset;
|
||||
struct blk_mq_tag_set *admin_tagset;
|
||||
struct list_head namespaces;
|
||||
struct mutex namespaces_mutex;
|
||||
struct rw_semaphore namespaces_rwsem;
|
||||
struct device ctrl_device;
|
||||
struct device *device; /* char device */
|
||||
struct cdev cdev;
|
||||
|
|
Loading…
Reference in a new issue