Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
[sfrench/cifs-2.6.git] / block / cfq-iosched.c
index 64df3fa303b0929eeac9e9609b3e71e7f88b6d2c..e0aa4dad67424ae45bfa93150f14c1856f75140e 100644 (file)
@@ -92,6 +92,8 @@ struct cfq_data {
        struct cfq_queue *active_queue;
        struct cfq_io_context *active_cic;
 
+       struct cfq_queue *async_cfqq[IOPRIO_BE_NR];
+
        struct timer_list idle_class_timer;
 
        sector_t last_position;
@@ -1351,8 +1353,8 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
 }
 
 static struct cfq_queue *
-cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
-             gfp_t gfp_mask)
+cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
+                    struct task_struct *tsk, gfp_t gfp_mask)
 {
        struct cfq_queue *cfqq, *new_cfqq = NULL;
        struct cfq_io_context *cic;
@@ -1405,12 +1407,35 @@ retry:
        if (new_cfqq)
                kmem_cache_free(cfq_pool, new_cfqq);
 
-       atomic_inc(&cfqq->ref);
 out:
        WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
        return cfqq;
 }
 
+static struct cfq_queue *
+cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk,
+             gfp_t gfp_mask)
+{
+       const int ioprio = task_ioprio(tsk);
+       struct cfq_queue *cfqq = NULL;
+
+       if (!is_sync)
+               cfqq = cfqd->async_cfqq[ioprio];
+       if (!cfqq)
+               cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask);
+
+       /*
+        * pin the queue now that it's allocated, scheduler exit will prune it
+        */
+       if (!is_sync && !cfqd->async_cfqq[ioprio]) {
+               atomic_inc(&cfqq->ref);
+               cfqd->async_cfqq[ioprio] = cfqq;
+       }
+
+       atomic_inc(&cfqq->ref);
+       return cfqq;
+}
+
 /*
  * We drop cfq io contexts lazily, so we may find a dead one.
  */
@@ -2019,6 +2044,7 @@ static void cfq_exit_queue(elevator_t *e)
 {
        struct cfq_data *cfqd = e->elevator_data;
        request_queue_t *q = cfqd->queue;
+       int i;
 
        cfq_shutdown_timer_wq(cfqd);
 
@@ -2035,6 +2061,13 @@ static void cfq_exit_queue(elevator_t *e)
                __cfq_exit_single_io_context(cfqd, cic);
        }
 
+       /*
+        * Put the async queues
+        */
+       for (i = 0; i < IOPRIO_BE_NR; i++)
+               if (cfqd->async_cfqq[i])        
+                       cfq_put_queue(cfqd->async_cfqq[i]);
+
        spin_unlock_irq(q->queue_lock);
 
        cfq_shutdown_timer_wq(cfqd);
@@ -2090,13 +2123,11 @@ static void cfq_slab_kill(void)
 
 static int __init cfq_slab_setup(void)
 {
-       cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
-                                       NULL, NULL);
+       cfq_pool = KMEM_CACHE(cfq_queue, 0);
        if (!cfq_pool)
                goto fail;
 
-       cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
-                       sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
+       cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
        if (!cfq_ioc_pool)
                goto fail;