s390/dasd: Implement block timeout handling
authorHannes Reinecke <hare@suse.de>
Wed, 30 Jan 2013 09:26:14 +0000 (09:26 +0000)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Mon, 1 Jul 2013 15:31:20 +0000 (17:31 +0200)
This patch implements generic block layer timeout handling
callbacks for DASDs. When the timeout expires the respective
cqr is aborted.

With this timeout handler time-critical request abort
is guaranteed as the abort does not depend on the internal
state of the various DASD driver queues.

Signed-off-by: Hannes Reinecke <hare@suse.de>
Acked-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Stefan Weinhuber <wein@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
drivers/s390/block/dasd.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c

index 000e5140bda4d54051e9669e7a2a9dc88769d107..87478becedb0d4c0393d80d4ff05e6e0aa0a166e 100644 (file)
@@ -2573,8 +2573,10 @@ static void __dasd_process_request_queue(struct dasd_block *block)
                 */
                cqr->callback_data = (void *) req;
                cqr->status = DASD_CQR_FILLED;
+               req->completion_data = cqr;
                blk_start_request(req);
                list_add_tail(&cqr->blocklist, &block->ccw_queue);
+               INIT_LIST_HEAD(&cqr->devlist);
                dasd_profile_start(block, cqr, req);
        }
 }
@@ -2861,6 +2863,80 @@ static void do_dasd_request(struct request_queue *queue)
        spin_unlock(&block->queue_lock);
 }
 
+/*
+ * Block timeout callback, called from the block layer
+ *
+ * request_queue lock is held on entry.
+ *
+ * Return values:
+ * BLK_EH_RESET_TIMER if the request should be left running
+ * BLK_EH_NOT_HANDLED if the request is handled or terminated
+ *                   by the driver.
+ */
+enum blk_eh_timer_return dasd_times_out(struct request *req)
+{
+       struct dasd_ccw_req *cqr = req->completion_data;
+       struct dasd_block *block = req->q->queuedata;
+       struct dasd_device *device;
+       int rc = 0;
+
+       if (!cqr)
+               return BLK_EH_NOT_HANDLED;
+
+       device = cqr->startdev ? cqr->startdev : block->base;
+       DBF_DEV_EVENT(DBF_WARNING, device,
+                     " dasd_times_out cqr %p status %x",
+                     cqr, cqr->status);
+
+       spin_lock(&block->queue_lock);
+       spin_lock(get_ccwdev_lock(device->cdev));
+       cqr->retries = -1;
+       cqr->intrc = -ETIMEDOUT;
+       if (cqr->status >= DASD_CQR_QUEUED) {
+               spin_unlock(get_ccwdev_lock(device->cdev));
+               rc = dasd_cancel_req(cqr);
+       } else if (cqr->status == DASD_CQR_FILLED ||
+                  cqr->status == DASD_CQR_NEED_ERP) {
+               cqr->status = DASD_CQR_TERMINATED;
+               spin_unlock(get_ccwdev_lock(device->cdev));
+       } else if (cqr->status == DASD_CQR_IN_ERP) {
+               struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr;
+
+               list_for_each_entry_safe(searchcqr, nextcqr,
+                                        &block->ccw_queue, blocklist) {
+                       tmpcqr = searchcqr;
+                       while (tmpcqr->refers)
+                               tmpcqr = tmpcqr->refers;
+                       if (tmpcqr != cqr)
+                               continue;
+                       /* searchcqr is an ERP request for cqr */
+                       searchcqr->retries = -1;
+                       searchcqr->intrc = -ETIMEDOUT;
+                       if (searchcqr->status >= DASD_CQR_QUEUED) {
+                               spin_unlock(get_ccwdev_lock(device->cdev));
+                               rc = dasd_cancel_req(searchcqr);
+                               spin_lock(get_ccwdev_lock(device->cdev));
+                       } else if ((searchcqr->status == DASD_CQR_FILLED) ||
+                                  (searchcqr->status == DASD_CQR_NEED_ERP)) {
+                               searchcqr->status = DASD_CQR_TERMINATED;
+                               rc = 0;
+                       } else if (searchcqr->status == DASD_CQR_IN_ERP) {
+                               /*
+                                * Shouldn't happen; most recent ERP
+                                * request is at the front of queue
+                                */
+                               continue;
+                       }
+                       break;
+               }
+               spin_unlock(get_ccwdev_lock(device->cdev));
+       }
+       dasd_schedule_block_bh(block);
+       spin_unlock(&block->queue_lock);
+
+       return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED;
+}
+
 /*
  * Allocate and initialize request queue and default I/O scheduler.
  */
index 154842242c3d01158c7934dd91c3ae47e1a93577..feca317b33debfb78409540fab75b1841c0019a0 100644 (file)
@@ -583,7 +583,10 @@ dasd_diag_free_cp(struct dasd_ccw_req *cqr, struct request *req)
 
 static void dasd_diag_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
-       cqr->status = DASD_CQR_FILLED;
+       if (cqr->retries < 0)
+               cqr->status = DASD_CQR_FAILED;
+       else
+               cqr->status = DASD_CQR_FILLED;
 };
 
 /* Fill in IOCTL data for device. */
index 05f5694c9c49c3b666bd0caab0072acfe560e974..e61a6deea3c0fc6bc406c7ac2555b6eda1015dc3 100644 (file)
@@ -2381,6 +2381,10 @@ sleep:
 
 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
+       if (cqr->retries < 0) {
+               cqr->status = DASD_CQR_FAILED;
+               return;
+       }
        cqr->status = DASD_CQR_FILLED;
        if (cqr->block && (cqr->startdev != cqr->block->base)) {
                dasd_eckd_reset_ccw_to_base_io(cqr);
index d232c83ea95a529284de1a4d95ac1b5291d74161..9cbc8c32ba595739cdff63da752a8f080b51e0de 100644 (file)
@@ -428,7 +428,10 @@ out:
 
 static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
 {
-       cqr->status = DASD_CQR_FILLED;
+       if (cqr->retries < 0)
+               cqr->status = DASD_CQR_FAILED;
+       else
+               cqr->status = DASD_CQR_FILLED;
 };
 
 static int