Merge tag 'nvme-5.10-2020-11-05' of git://git.infradead.org/nvme into block-5.10
[sfrench/cifs-2.6.git] / drivers / nvme / host / rdma.c
index 541b0cba6d80191be7d6d0a5857c2274ce0aac90..65e3d0ef36e1a33dffad84405aaa07d819385380 100644 (file)
@@ -122,7 +122,6 @@ struct nvme_rdma_ctrl {
        struct sockaddr_storage src_addr;
 
        struct nvme_ctrl        ctrl;
-       struct mutex            teardown_lock;
        bool                    use_inline_data;
        u32                     io_queues[HCTX_MAX_TYPES];
 };
@@ -1010,8 +1009,8 @@ out_free_io_queues:
 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       mutex_lock(&ctrl->teardown_lock);
        blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+       blk_sync_queue(ctrl->ctrl.admin_q);
        nvme_rdma_stop_queue(&ctrl->queues[0]);
        if (ctrl->ctrl.admin_tagset) {
                blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
@@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (remove)
                blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
        nvme_rdma_destroy_admin_queue(ctrl, remove);
-       mutex_unlock(&ctrl->teardown_lock);
 }
 
 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
                bool remove)
 {
-       mutex_lock(&ctrl->teardown_lock);
        if (ctrl->ctrl.queue_count > 1) {
                nvme_start_freeze(&ctrl->ctrl);
                nvme_stop_queues(&ctrl->ctrl);
+               nvme_sync_io_queues(&ctrl->ctrl);
                nvme_rdma_stop_io_queues(ctrl);
                if (ctrl->ctrl.tagset) {
                        blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
@@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
                        nvme_start_queues(&ctrl->ctrl);
                nvme_rdma_destroy_io_queues(ctrl, remove);
        }
-       mutex_unlock(&ctrl->teardown_lock);
 }
 
 static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
@@ -1976,16 +1973,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq)
 {
        struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
        struct nvme_rdma_queue *queue = req->queue;
-       struct nvme_rdma_ctrl *ctrl = queue->ctrl;
 
-       /* fence other contexts that may complete the command */
-       mutex_lock(&ctrl->teardown_lock);
        nvme_rdma_stop_queue(queue);
-       if (!blk_mq_request_completed(rq)) {
+       if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) {
                nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
                blk_mq_complete_request(rq);
        }
-       mutex_unlock(&ctrl->teardown_lock);
 }
 
 static enum blk_eh_timer_return
@@ -2320,7 +2313,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
                return ERR_PTR(-ENOMEM);
        ctrl->ctrl.opts = opts;
        INIT_LIST_HEAD(&ctrl->list);
-       mutex_init(&ctrl->teardown_lock);
 
        if (!(opts->mask & NVMF_OPT_TRSVCID)) {
                opts->trsvcid =