s390/dasd: simplify locking in dasd_times_out
authorSebastian Ott <sebott@linux.ibm.com>
Wed, 16 May 2018 09:25:21 +0000 (11:25 +0200)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Fri, 25 May 2018 06:12:50 +0000 (08:12 +0200)
Provide __dasd_cancel_req that is called with the ccw device lock
held to simplify the locking in dasd_times_out. Also this removes
the following sparse warning:
context imbalance in 'dasd_times_out' - different lock contexts for basic block

Note: with this change dasd_schedule_device_bh is now called (via
dasd_cancel_req) with the ccw device lock held. But is is already
the case for other codepaths.

Signed-off-by: Sebastian Ott <sebott@linux.ibm.com>
Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
drivers/s390/block/dasd.c

index 04143c08bd6e8fe13f16b9536053f3c3b0c2d771..f401b4c38cfce3e892fe8f704288e9e3cb92b5c4 100644 (file)
@@ -2569,14 +2569,11 @@ EXPORT_SYMBOL(dasd_sleep_on_immediatly);
  * Cancellation of a request is an asynchronous operation! The calling
  * function has to wait until the request is properly returned via callback.
  */
-int dasd_cancel_req(struct dasd_ccw_req *cqr)
+static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
 {
        struct dasd_device *device = cqr->startdev;
-       unsigned long flags;
-       int rc;
+       int rc = 0;
 
-       rc = 0;
-       spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
        switch (cqr->status) {
        case DASD_CQR_QUEUED:
                /* request was not started - just set to cleared */
@@ -2596,11 +2593,21 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
        default: /* already finished or clear pending - do nothing */
                break;
        }
-       spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
        dasd_schedule_device_bh(device);
        return rc;
 }
-EXPORT_SYMBOL(dasd_cancel_req);
+
+int dasd_cancel_req(struct dasd_ccw_req *cqr)
+{
+       struct dasd_device *device = cqr->startdev;
+       unsigned long flags;
+       int rc;
+
+       spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+       rc = __dasd_cancel_req(cqr);
+       spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
+       return rc;
+}
 
 /*
  * SECTION: Operations of the dasd_block layer.
@@ -3082,12 +3089,10 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
        cqr->retries = -1;
        cqr->intrc = -ETIMEDOUT;
        if (cqr->status >= DASD_CQR_QUEUED) {
-               spin_unlock(get_ccwdev_lock(device->cdev));
-               rc = dasd_cancel_req(cqr);
+               rc = __dasd_cancel_req(cqr);
        } else if (cqr->status == DASD_CQR_FILLED ||
                   cqr->status == DASD_CQR_NEED_ERP) {
                cqr->status = DASD_CQR_TERMINATED;
-               spin_unlock(get_ccwdev_lock(device->cdev));
        } else if (cqr->status == DASD_CQR_IN_ERP) {
                struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
 
@@ -3102,9 +3107,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
                        searchcqr->retries = -1;
                        searchcqr->intrc = -ETIMEDOUT;
                        if (searchcqr->status >= DASD_CQR_QUEUED) {
-                               spin_unlock(get_ccwdev_lock(device->cdev));
-                               rc = dasd_cancel_req(searchcqr);
-                               spin_lock(get_ccwdev_lock(device->cdev));
+                               rc = __dasd_cancel_req(searchcqr);
                        } else if ((searchcqr->status == DASD_CQR_FILLED) ||
                                   (searchcqr->status == DASD_CQR_NEED_ERP)) {
                                searchcqr->status = DASD_CQR_TERMINATED;
@@ -3118,8 +3121,8 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
                        }
                        break;
                }
-               spin_unlock(get_ccwdev_lock(device->cdev));
        }
+       spin_unlock(get_ccwdev_lock(device->cdev));
        dasd_schedule_block_bh(block);
        spin_unlock(&block->queue_lock);
        spin_unlock_irqrestore(&cqr->dq->lock, flags);