LSM/SELinux: Interfaces to allow FS to control mount options
[sfrench/cifs-2.6.git] / block / cfq-iosched.c
index f28d1fb30608cd5667b6b6f9c67e545bdb4802de..0f962ecae91fb68345fe1d916791a78e06a2f2e8 100644 (file)
 /*
  * tunables
  */
-static const int cfq_quantum = 4;              /* max queue in one round of service */
+/* max queue in one round of service */
+static const int cfq_quantum = 4;
 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
-static const int cfq_back_max = 16 * 1024;     /* maximum backwards seek, in KiB */
-static const int cfq_back_penalty = 2;         /* penalty of a backwards seek */
-
+/* maximum backwards seek, in KiB */
+static const int cfq_back_max = 16 * 1024;
+/* penalty of a backwards seek */
+static const int cfq_back_penalty = 2;
 static const int cfq_slice_sync = HZ / 10;
 static int cfq_slice_async = HZ / 25;
 static const int cfq_slice_async_rq = 2;
@@ -37,7 +39,8 @@ static int cfq_slice_idle = HZ / 125;
 
 #define CFQ_SLICE_SCALE                (5)
 
-#define RQ_CIC(rq)             ((struct cfq_io_context*)(rq)->elevator_private)
+#define RQ_CIC(rq)             \
+       ((struct cfq_io_context *) (rq)->elevator_private)
 #define RQ_CFQQ(rq)            ((rq)->elevator_private2)
 
 static struct kmem_cache *cfq_pool;
@@ -171,15 +174,15 @@ enum cfqq_state_flags {
 #define CFQ_CFQQ_FNS(name)                                             \
 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)                \
 {                                                                      \
-       cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);                     \
+       (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name);                   \
 }                                                                      \
 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)       \
 {                                                                      \
-       cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                    \
+       (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);                  \
 }                                                                      \
 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)                \
 {                                                                      \
-       return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;        \
+       return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;      \
 }
 
 CFQ_CFQQ_FNS(on_rr);
@@ -1005,7 +1008,8 @@ __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
                /*
                 * follow expired path, else get first next available
                 */
-               if ((rq = cfq_check_fifo(cfqq)) == NULL)
+               rq = cfq_check_fifo(cfqq);
+               if (rq == NULL)
                        rq = cfqq->next_rq;
 
                /*
@@ -1141,38 +1145,19 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
 /*
  * Call func for each cic attached to this ioc. Returns number of cic's seen.
  */
-#define CIC_GANG_NR    16
 static unsigned int
 call_for_each_cic(struct io_context *ioc,
                  void (*func)(struct io_context *, struct cfq_io_context *))
 {
-       struct cfq_io_context *cics[CIC_GANG_NR];
-       unsigned long index = 0;
-       unsigned int called = 0;
-       int nr;
+       struct cfq_io_context *cic;
+       struct hlist_node *n;
+       int called = 0;
 
        rcu_read_lock();
-
-       do {
-               int i;
-
-               /*
-                * Perhaps there's a better way - this just gang lookups from
-                * 0 to the end, restarting after each CIC_GANG_NR from the
-                * last key + 1.
-                */
-               nr = radix_tree_gang_lookup(&ioc->radix_root, (void **) cics,
-                                               index, CIC_GANG_NR);
-               if (!nr)
-                       break;
-
-               called += nr;
-               index = 1 + (unsigned long) cics[nr - 1]->key;
-
-               for (i = 0; i < nr; i++)
-                       func(ioc, cics[i]);
-       } while (nr == CIC_GANG_NR);
-
+       hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list) {
+               func(ioc, cic);
+               called++;
+       }
        rcu_read_unlock();
 
        return called;
@@ -1186,6 +1171,7 @@ static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
 
        spin_lock_irqsave(&ioc->lock, flags);
        radix_tree_delete(&ioc->radix_root, cic->dead_key);
+       hlist_del_rcu(&cic->cic_list);
        spin_unlock_irqrestore(&ioc->lock, flags);
 
        kmem_cache_free(cfq_ioc_pool, cic);
@@ -1276,6 +1262,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
        if (cic) {
                cic->last_end_request = jiffies;
                INIT_LIST_HEAD(&cic->queue_list);
+               INIT_HLIST_NODE(&cic->cic_list);
                cic->dtor = cfq_free_io_context;
                cic->exit = cfq_exit_io_context;
                elv_ioc_count_inc(ioc_count);
@@ -1294,28 +1281,28 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
 
        ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
        switch (ioprio_class) {
-               default:
-                       printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
-               case IOPRIO_CLASS_NONE:
-                       /*
-                        * no prio set, place us in the middle of the BE classes
-                        */
-                       cfqq->ioprio = task_nice_ioprio(tsk);
-                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
-                       break;
-               case IOPRIO_CLASS_RT:
-                       cfqq->ioprio = task_ioprio(ioc);
-                       cfqq->ioprio_class = IOPRIO_CLASS_RT;
-                       break;
-               case IOPRIO_CLASS_BE:
-                       cfqq->ioprio = task_ioprio(ioc);
-                       cfqq->ioprio_class = IOPRIO_CLASS_BE;
-                       break;
-               case IOPRIO_CLASS_IDLE:
-                       cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
-                       cfqq->ioprio = 7;
-                       cfq_clear_cfqq_idle_window(cfqq);
-                       break;
+       default:
+               printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
+       case IOPRIO_CLASS_NONE:
+               /*
+                * no prio set, place us in the middle of the BE classes
+                */
+               cfqq->ioprio = task_nice_ioprio(tsk);
+               cfqq->ioprio_class = IOPRIO_CLASS_BE;
+               break;
+       case IOPRIO_CLASS_RT:
+               cfqq->ioprio = task_ioprio(ioc);
+               cfqq->ioprio_class = IOPRIO_CLASS_RT;
+               break;
+       case IOPRIO_CLASS_BE:
+               cfqq->ioprio = task_ioprio(ioc);
+               cfqq->ioprio_class = IOPRIO_CLASS_BE;
+               break;
+       case IOPRIO_CLASS_IDLE:
+               cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
+               cfqq->ioprio = 7;
+               cfq_clear_cfqq_idle_window(cfqq);
+               break;
        }
 
        /*
@@ -1427,7 +1414,7 @@ out:
 static struct cfq_queue **
 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
 {
-       switch(ioprio_class) {
+       switch (ioprio_class) {
        case IOPRIO_CLASS_RT:
                return &cfqd->async_cfqq[0][ioprio];
        case IOPRIO_CLASS_BE:
@@ -1497,6 +1484,7 @@ cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
                rcu_assign_pointer(ioc->ioc_data, NULL);
 
        radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
+       hlist_del_rcu(&cic->cic_list);
        spin_unlock_irqrestore(&ioc->lock, flags);
 
        cfq_cic_free(cic);
@@ -1557,6 +1545,8 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
                spin_lock_irqsave(&ioc->lock, flags);
                ret = radix_tree_insert(&ioc->radix_root,
                                                (unsigned long) cfqd, cic);
+               if (!ret)
+                       hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
                spin_unlock_irqrestore(&ioc->lock, flags);
 
                radix_tree_preload_end();
@@ -2018,7 +2008,8 @@ static void cfq_idle_slice_timer(unsigned long data)
 
        spin_lock_irqsave(cfqd->queue->queue_lock, flags);
 
-       if ((cfqq = cfqd->active_queue) != NULL) {
+       cfqq = cfqd->active_queue;
+       if (cfqq) {
                timed_out = 0;
 
                /*
@@ -2212,14 +2203,18 @@ static ssize_t __FUNC(elevator_t *e, const char *page, size_t count)    \
        return ret;                                                     \
 }
 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
-STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
+               UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
+               UINT_MAX, 1);
 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
-STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
+               UINT_MAX, 0);
 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
-STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
+               UINT_MAX, 0);
 #undef STORE_FUNCTION
 
 #define CFQ_ATTR(name) \