nvme-rdma: fix timeout handler
[ Upstream commit 4c174e6366746ae8d49f9cc409f728eebb7a9ac9 ] Currently, we have several problems with the timeout handler: 1. If we timeout on the controller establishment flow, we will hang because we don't execute the error recovery (and we shouldn't because the create_ctrl flow needs to fail and cleanup on its own) 2. We might also hang if we get a disconnet on a queue while the controller is already deleting. This racy flow can cause the controller disable/shutdown admin command to hang. We cannot complete a timed out request from the timeout handler without mutual exclusion from the teardown flow (e.g. nvme_rdma_error_recovery_work). So we serialize it in the timeout handler and teardown io and admin queues to guarantee that no one races with us from completing the request. Reported-by: Jaesoo Lee <jalee@purestorage.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
bbbb9874a9
commit
550e0ea7e7
1 changed files with 18 additions and 8 deletions
|
@ -1672,18 +1672,28 @@ static enum blk_eh_timer_return
|
||||||
nvme_rdma_timeout(struct request *rq, bool reserved)
|
nvme_rdma_timeout(struct request *rq, bool reserved)
|
||||||
{
|
{
|
||||||
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
|
||||||
|
struct nvme_rdma_queue *queue = req->queue;
|
||||||
|
struct nvme_rdma_ctrl *ctrl = queue->ctrl;
|
||||||
|
|
||||||
dev_warn(req->queue->ctrl->ctrl.device,
|
dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
|
||||||
"I/O %d QID %d timeout, reset controller\n",
|
rq->tag, nvme_rdma_queue_idx(queue));
|
||||||
rq->tag, nvme_rdma_queue_idx(req->queue));
|
|
||||||
|
|
||||||
/* queue error recovery */
|
if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
|
||||||
nvme_rdma_error_recovery(req->queue->ctrl);
|
/*
|
||||||
|
* Teardown immediately if controller times out while starting
|
||||||
|
* or we are already started error recovery. all outstanding
|
||||||
|
* requests are completed on shutdown, so we return BLK_EH_DONE.
|
||||||
|
*/
|
||||||
|
flush_work(&ctrl->err_work);
|
||||||
|
nvme_rdma_teardown_io_queues(ctrl, false);
|
||||||
|
nvme_rdma_teardown_admin_queue(ctrl, false);
|
||||||
|
return BLK_EH_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
/* fail with DNR on cmd timeout */
|
dev_warn(ctrl->ctrl.device, "starting error recovery\n");
|
||||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
|
nvme_rdma_error_recovery(ctrl);
|
||||||
|
|
||||||
return BLK_EH_DONE;
|
return BLK_EH_RESET_TIMER;
|
||||||
}
|
}
|
||||||
|
|
||||||
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
Loading…
Reference in a new issue