MAINTAINERS - Remove HP Fibre Channel HBA no longer in tree
[sfrench/cifs-2.6.git] / block / cfq-iosched.c
index 9e809345f71abd718cecc27f6fd4ea068ed4775c..a4809de6fea656a4ac6091f6c61c38f536a4f766 100644 (file)
@@ -160,6 +160,7 @@ struct cfq_queue {
 
        unsigned long slice_end;
        long slice_resid;
+       unsigned int slice_dispatch;
 
        /* pending metadata requests */
        int meta_pending;
@@ -176,13 +177,12 @@ struct cfq_queue {
 enum cfqq_state_flags {
        CFQ_CFQQ_FLAG_on_rr = 0,        /* on round-robin busy list */
        CFQ_CFQQ_FLAG_wait_request,     /* waiting for a request */
+       CFQ_CFQQ_FLAG_must_dispatch,    /* must be allowed a dispatch */
        CFQ_CFQQ_FLAG_must_alloc,       /* must be allowed rq alloc */
        CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
-       CFQ_CFQQ_FLAG_must_dispatch,    /* must dispatch, even if expired */
        CFQ_CFQQ_FLAG_fifo_expire,      /* FIFO checked in this slice */
        CFQ_CFQQ_FLAG_idle_window,      /* slice idling enabled */
        CFQ_CFQQ_FLAG_prio_changed,     /* task priority has changed */
-       CFQ_CFQQ_FLAG_queue_new,        /* queue never been serviced */
        CFQ_CFQQ_FLAG_slice_new,        /* no requests dispatched in slice */
        CFQ_CFQQ_FLAG_sync,             /* synchronous queue */
 };
@@ -203,13 +203,12 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)           \
 
 CFQ_CFQQ_FNS(on_rr);
 CFQ_CFQQ_FNS(wait_request);
+CFQ_CFQQ_FNS(must_dispatch);
 CFQ_CFQQ_FNS(must_alloc);
 CFQ_CFQQ_FNS(must_alloc_slice);
-CFQ_CFQQ_FNS(must_dispatch);
 CFQ_CFQQ_FNS(fifo_expire);
 CFQ_CFQQ_FNS(idle_window);
 CFQ_CFQQ_FNS(prio_changed);
-CFQ_CFQQ_FNS(queue_new);
 CFQ_CFQQ_FNS(slice_new);
 CFQ_CFQQ_FNS(sync);
 #undef CFQ_CFQQ_FNS
@@ -774,10 +773,15 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
        if (cfqq) {
                cfq_log_cfqq(cfqd, cfqq, "set_active");
                cfqq->slice_end = 0;
+               cfqq->slice_dispatch = 0;
+
+               cfq_clear_cfqq_wait_request(cfqq);
+               cfq_clear_cfqq_must_dispatch(cfqq);
                cfq_clear_cfqq_must_alloc_slice(cfqq);
                cfq_clear_cfqq_fifo_expire(cfqq);
                cfq_mark_cfqq_slice_new(cfqq);
-               cfq_clear_cfqq_queue_new(cfqq);
+
+               del_timer(&cfqd->idle_slice_timer);
        }
 
        cfqd->active_queue = cfqq;
@@ -795,7 +799,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
        if (cfq_cfqq_wait_request(cfqq))
                del_timer(&cfqd->idle_slice_timer);
 
-       cfq_clear_cfqq_must_dispatch(cfqq);
        cfq_clear_cfqq_wait_request(cfqq);
 
        /*
@@ -924,7 +927,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
            (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
                return;
 
-       cfq_mark_cfqq_must_dispatch(cfqq);
        cfq_mark_cfqq_wait_request(cfqq);
 
        /*
@@ -1010,7 +1012,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
        /*
         * The active queue has run out of time, expire it and select new.
         */
-       if (cfq_slice_used(cfqq))
+       if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
                goto expire;
 
        /*
@@ -1053,66 +1055,6 @@ keep_queue:
        return cfqq;
 }
 
-/*
- * Dispatch some requests from cfqq, moving them to the request queue
- * dispatch list.
- */
-static int
-__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                       int max_dispatch)
-{
-       int dispatched = 0;
-
-       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
-
-       do {
-               struct request *rq;
-
-               /*
-                * follow expired path, else get first next available
-                */
-               rq = cfq_check_fifo(cfqq);
-               if (rq == NULL)
-                       rq = cfqq->next_rq;
-
-               /*
-                * finally, insert request into driver dispatch list
-                */
-               cfq_dispatch_insert(cfqd->queue, rq);
-
-               dispatched++;
-
-               if (!cfqd->active_cic) {
-                       atomic_inc(&RQ_CIC(rq)->ioc->refcount);
-                       cfqd->active_cic = RQ_CIC(rq);
-               }
-
-               if (RB_EMPTY_ROOT(&cfqq->sort_list))
-                       break;
-
-               /*
-                * If there is a non-empty RT cfqq waiting for current
-                * cfqq's timeslice to complete, pre-empt this cfqq
-                */
-               if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
-                       break;
-
-       } while (dispatched < max_dispatch);
-
-       /*
-        * expire an async queue immediately if it has used up its slice. idle
-        * queue always expire after 1 dispatch round.
-        */
-       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
-           dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
-           cfq_class_idle(cfqq))) {
-               cfqq->slice_end = jiffies + 1;
-               cfq_slice_expired(cfqd, 0);
-       }
-
-       return dispatched;
-}
-
 static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
 {
        int dispatched = 0;
@@ -1146,11 +1088,45 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
        return dispatched;
 }
 
+/*
+ * Dispatch a request from cfqq, moving them to the request queue
+ * dispatch list.
+ */
+static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+       struct request *rq;
+
+       BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
+
+       /*
+        * follow expired path, else get first next available
+        */
+       rq = cfq_check_fifo(cfqq);
+       if (!rq)
+               rq = cfqq->next_rq;
+
+       /*
+        * insert request into driver dispatch list
+        */
+       cfq_dispatch_insert(cfqd->queue, rq);
+
+       if (!cfqd->active_cic) {
+               struct cfq_io_context *cic = RQ_CIC(rq);
+
+               atomic_inc(&cic->ioc->refcount);
+               cfqd->active_cic = cic;
+       }
+}
+
+/*
+ * Find the cfqq that we need to service and move a request from that to the
+ * dispatch list
+ */
 static int cfq_dispatch_requests(struct request_queue *q, int force)
 {
        struct cfq_data *cfqd = q->elevator->elevator_data;
        struct cfq_queue *cfqq;
-       int dispatched;
+       unsigned int max_dispatch;
 
        if (!cfqd->busy_queues)
                return 0;
@@ -1158,29 +1134,63 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
        if (unlikely(force))
                return cfq_forced_dispatch(cfqd);
 
-       dispatched = 0;
-       while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
-               int max_dispatch;
+       cfqq = cfq_select_queue(cfqd);
+       if (!cfqq)
+               return 0;
+
+       /*
+        * If this is an async queue and we have sync IO in flight, let it wait
+        */
+       if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
+               return 0;
+
+       max_dispatch = cfqd->cfq_quantum;
+       if (cfq_class_idle(cfqq))
+               max_dispatch = 1;
 
-               max_dispatch = cfqd->cfq_quantum;
+       /*
+        * Does this cfqq already have too much IO in flight?
+        */
+       if (cfqq->dispatched >= max_dispatch) {
+               /*
+                * idle queue must always only have a single IO in flight
+                */
                if (cfq_class_idle(cfqq))
-                       max_dispatch = 1;
+                       return 0;
 
-               if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1)
-                       break;
+               /*
+                * We have other queues, don't allow more IO from this one
+                */
+               if (cfqd->busy_queues > 1)
+                       return 0;
 
-               if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
-                       break;
+               /*
+                * we are the only queue, allow up to 4 times of 'quantum'
+                */
+               if (cfqq->dispatched >= 4 * max_dispatch)
+                       return 0;
+       }
 
-               cfq_clear_cfqq_must_dispatch(cfqq);
-               cfq_clear_cfqq_wait_request(cfqq);
-               del_timer(&cfqd->idle_slice_timer);
+       /*
+        * Dispatch a request from this cfqq
+        */
+       cfq_dispatch_request(cfqd, cfqq);
+       cfqq->slice_dispatch++;
+       cfq_clear_cfqq_must_dispatch(cfqq);
 
-               dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
+       /*
+        * expire an async queue immediately if it has used up its slice. idle
+        * queue always expire after 1 dispatch round.
+        */
+       if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
+           cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
+           cfq_class_idle(cfqq))) {
+               cfqq->slice_end = jiffies + 1;
+               cfq_slice_expired(cfqd, 0);
        }
 
-       cfq_log(cfqd, "dispatched=%d", dispatched);
-       return dispatched;
+       cfq_log(cfqd, "dispatched a request");
+       return 1;
 }
 
 /*
@@ -1506,7 +1516,6 @@ retry:
                cfqq->cfqd = cfqd;
 
                cfq_mark_cfqq_prio_changed(cfqq);
-               cfq_mark_cfqq_queue_new(cfqq);
 
                cfq_init_prio_data(cfqq, ioc);
 
@@ -1893,15 +1902,13 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 
        if (cfqq == cfqd->active_queue) {
                /*
-                * if we are waiting for a request for this queue, let it rip
-                * immediately and flag that we must not expire this queue
-                * just now
+                * Remember that we saw a request from this process, but
+                * don't start queuing just yet. Otherwise we risk seeing lots
+                * of tiny requests, because we disrupt the normal plugging
+                * and merging.
                 */
-               if (cfq_cfqq_wait_request(cfqq)) {
+               if (cfq_cfqq_wait_request(cfqq))
                        cfq_mark_cfqq_must_dispatch(cfqq);
-                       del_timer(&cfqd->idle_slice_timer);
-                       blk_start_queueing(cfqd->queue);
-               }
        } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
                /*
                 * not the active queue - expire current slice if it is
@@ -1910,7 +1917,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                 * this new queue is RT and the current one is BE
                 */
                cfq_preempt_queue(cfqd, cfqq);
-               cfq_mark_cfqq_must_dispatch(cfqq);
                blk_start_queueing(cfqd->queue);
        }
 }
@@ -2171,6 +2177,12 @@ static void cfq_idle_slice_timer(unsigned long data)
        if (cfqq) {
                timed_out = 0;
 
+               /*
+                * We saw a request before the queue expired, let it through
+                */
+               if (cfq_cfqq_must_dispatch(cfqq))
+                       goto out_kick;
+
                /*
                 * expired
                 */
@@ -2187,10 +2199,8 @@ static void cfq_idle_slice_timer(unsigned long data)
                /*
                 * not expired and it has a request pending, let it dispatch
                 */
-               if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
-                       cfq_mark_cfqq_must_dispatch(cfqq);
+               if (!RB_EMPTY_ROOT(&cfqq->sort_list))
                        goto out_kick;
-               }
        }
 expire:
        cfq_slice_expired(cfqd, timed_out);