blkcg: remove unused __blkg_release_rcu()
[sfrench/cifs-2.6.git] / block / blk-cgroup.c
index 992da5592c6ed14208116b794975c75c2b3986a1..2bed5725aa035e28c03225ffa4b8c4f8aeb9a85a 100644 (file)
@@ -76,9 +76,6 @@ static void blkg_free(struct blkcg_gq *blkg)
                if (blkg->pd[i])
                        blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
 
-       if (blkg->blkcg != &blkcg_root)
-               blk_exit_rl(blkg->q, &blkg->rl);
-
        blkg_rwstat_exit(&blkg->stat_ios);
        blkg_rwstat_exit(&blkg->stat_bytes);
        kfree(blkg);
@@ -142,13 +139,6 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
        INIT_LIST_HEAD(&blkg->q_node);
        blkg->blkcg = blkcg;
 
-       /* root blkg uses @q->root_rl, init rl only for !root blkgs */
-       if (blkcg != &blkcg_root) {
-               if (blk_init_rl(&blkg->rl, q, gfp_mask))
-                       goto err_free;
-               blkg->rl.blkg = blkg;
-       }
-
        for (i = 0; i < BLKCG_MAX_POLS; i++) {
                struct blkcg_policy *pol = blkcg_policy[i];
                struct blkg_policy_data *pd;
@@ -187,7 +177,7 @@ struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
        blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
        if (blkg && blkg->q == q) {
                if (update_hint) {
-                       lockdep_assert_held(q->queue_lock);
+                       lockdep_assert_held(&q->queue_lock);
                        rcu_assign_pointer(blkcg->blkg_hint, blkg);
                }
                return blkg;
@@ -210,7 +200,13 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
        int i, ret;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(&q->queue_lock);
+
+       /* request_queue is dying, do not create/recreate a blkg */
+       if (blk_queue_dying(q)) {
+               ret = -ENODEV;
+               goto err_free_blkg;
+       }
 
        /* blkg holds a reference to blkcg */
        if (!css_tryget_online(&blkcg->css)) {
@@ -305,7 +301,7 @@ err_free_blkg:
  * that all non-root blkg's have access to the parent blkg.  This function
  * should be called under RCU read lock and @q->queue_lock.
  *
- * Returns the blkg or the closest blkg if blkg_create fails as it walks
+ * Returns the blkg or the closest blkg if blkg_create() fails as it walks
  * down from root.
  */
 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
@@ -314,14 +310,7 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
        struct blkcg_gq *blkg;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
-       lockdep_assert_held(q->queue_lock);
-
-       /*
-        * This could be the first entry point of blkcg implementation and
-        * we shouldn't allow anything to go through for a bypassing queue.
-        */
-       if (unlikely(blk_queue_bypass(q)))
-               return q->root_blkg;
+       lockdep_assert_held(&q->queue_lock);
 
        blkg = __blkg_lookup(blkcg, q, true);
        if (blkg)
@@ -368,14 +357,13 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
                                    struct request_queue *q)
 {
        struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
-       unsigned long flags;
 
        if (unlikely(!blkg)) {
-               spin_lock_irqsave(q->queue_lock, flags);
+               unsigned long flags;
 
+               spin_lock_irqsave(&q->queue_lock, flags);
                blkg = __blkg_lookup_create(blkcg, q);
-
-               spin_unlock_irqrestore(q->queue_lock, flags);
+               spin_unlock_irqrestore(&q->queue_lock, flags);
        }
 
        return blkg;
@@ -387,7 +375,7 @@ static void blkg_destroy(struct blkcg_gq *blkg)
        struct blkcg_gq *parent = blkg->parent;
        int i;
 
-       lockdep_assert_held(blkg->q->queue_lock);
+       lockdep_assert_held(&blkg->q->queue_lock);
        lockdep_assert_held(&blkcg->lock);
 
        /* Something wrong if we are trying to remove same group twice */
@@ -437,8 +425,7 @@ static void blkg_destroy_all(struct request_queue *q)
 {
        struct blkcg_gq *blkg, *n;
 
-       lockdep_assert_held(q->queue_lock);
-
+       spin_lock_irq(&q->queue_lock);
        list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) {
                struct blkcg *blkcg = blkg->blkcg;
 
@@ -448,42 +435,7 @@ static void blkg_destroy_all(struct request_queue *q)
        }
 
        q->root_blkg = NULL;
-       q->root_rl.blkg = NULL;
-}
-
-/*
- * The next function used by blk_queue_for_each_rl().  It's a bit tricky
- * because the root blkg uses @q->root_rl instead of its own rl.
- */
-struct request_list *__blk_queue_next_rl(struct request_list *rl,
-                                        struct request_queue *q)
-{
-       struct list_head *ent;
-       struct blkcg_gq *blkg;
-
-       /*
-        * Determine the current blkg list_head.  The first entry is
-        * root_rl which is off @q->blkg_list and mapped to the head.
-        */
-       if (rl == &q->root_rl) {
-               ent = &q->blkg_list;
-               /* There are no more block groups, hence no request lists */
-               if (list_empty(ent))
-                       return NULL;
-       } else {
-               blkg = container_of(rl, struct blkcg_gq, rl);
-               ent = &blkg->q_node;
-       }
-
-       /* walk to the next list_head, skip root blkcg */
-       ent = ent->next;
-       if (ent == &q->root_blkg->q_node)
-               ent = ent->next;
-       if (ent == &q->blkg_list)
-               return NULL;
-
-       blkg = container_of(ent, struct blkcg_gq, q_node);
-       return &blkg->rl;
+       spin_unlock_irq(&q->queue_lock);
 }
 
 static int blkcg_reset_stats(struct cgroup_subsys_state *css,
@@ -525,7 +477,6 @@ const char *blkg_dev_name(struct blkcg_gq *blkg)
                return dev_name(blkg->q->backing_dev_info->dev);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(blkg_dev_name);
 
 /**
  * blkcg_print_blkgs - helper for printing per-blkg data
@@ -556,10 +507,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
 
        rcu_read_lock();
        hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
-               spin_lock_irq(blkg->q->queue_lock);
+               spin_lock_irq(&blkg->q->queue_lock);
                if (blkcg_policy_enabled(blkg->q, pol))
                        total += prfill(sf, blkg->pd[pol->plid], data);
-               spin_unlock_irq(blkg->q->queue_lock);
+               spin_unlock_irq(&blkg->q->queue_lock);
        }
        rcu_read_unlock();
 
@@ -757,7 +708,7 @@ u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
        struct cgroup_subsys_state *pos_css;
        u64 sum = 0;
 
-       lockdep_assert_held(blkg->q->queue_lock);
+       lockdep_assert_held(&blkg->q->queue_lock);
 
        rcu_read_lock();
        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -800,7 +751,7 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
        struct blkg_rwstat sum = { };
        int i;
 
-       lockdep_assert_held(blkg->q->queue_lock);
+       lockdep_assert_held(&blkg->q->queue_lock);
 
        rcu_read_lock();
        blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
@@ -831,18 +782,10 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
                                          struct request_queue *q)
 {
        WARN_ON_ONCE(!rcu_read_lock_held());
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(&q->queue_lock);
 
        if (!blkcg_policy_enabled(q, pol))
                return ERR_PTR(-EOPNOTSUPP);
-
-       /*
-        * This could be the first entry point of blkcg implementation and
-        * we shouldn't allow anything to go through for a bypassing queue.
-        */
-       if (unlikely(blk_queue_bypass(q)))
-               return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY);
-
        return __blkg_lookup(blkcg, q, true /* update_hint */);
 }
 
@@ -860,7 +803,7 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
  */
 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                   char *input, struct blkg_conf_ctx *ctx)
-       __acquires(rcu) __acquires(disk->queue->queue_lock)
+       __acquires(rcu) __acquires(&disk->queue->queue_lock)
 {
        struct gendisk *disk;
        struct request_queue *q;
@@ -888,7 +831,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
        q = disk->queue;
 
        rcu_read_lock();
-       spin_lock_irq(q->queue_lock);
+       spin_lock_irq(&q->queue_lock);
 
        blkg = blkg_lookup_check(blkcg, pol, q);
        if (IS_ERR(blkg)) {
@@ -915,7 +858,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                }
 
                /* Drop locks to do new blkg allocation with GFP_KERNEL. */
-               spin_unlock_irq(q->queue_lock);
+               spin_unlock_irq(&q->queue_lock);
                rcu_read_unlock();
 
                new_blkg = blkg_alloc(pos, q, GFP_KERNEL);
@@ -925,7 +868,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
                }
 
                rcu_read_lock();
-               spin_lock_irq(q->queue_lock);
+               spin_lock_irq(&q->queue_lock);
 
                blkg = blkg_lookup_check(pos, pol, q);
                if (IS_ERR(blkg)) {
@@ -953,7 +896,7 @@ success:
        return 0;
 
 fail_unlock:
-       spin_unlock_irq(q->queue_lock);
+       spin_unlock_irq(&q->queue_lock);
        rcu_read_unlock();
 fail:
        put_disk_and_module(disk);
@@ -969,7 +912,6 @@ fail:
        }
        return ret;
 }
-EXPORT_SYMBOL_GPL(blkg_conf_prep);
 
 /**
  * blkg_conf_finish - finish up per-blkg config update
@@ -979,13 +921,12 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
  * with blkg_conf_prep().
  */
 void blkg_conf_finish(struct blkg_conf_ctx *ctx)
-       __releases(ctx->disk->queue->queue_lock) __releases(rcu)
+       __releases(&ctx->disk->queue->queue_lock) __releases(rcu)
 {
-       spin_unlock_irq(ctx->disk->queue->queue_lock);
+       spin_unlock_irq(&ctx->disk->queue->queue_lock);
        rcu_read_unlock();
        put_disk_and_module(ctx->disk);
 }
-EXPORT_SYMBOL_GPL(blkg_conf_finish);
 
 static int blkcg_print_stat(struct seq_file *sf, void *v)
 {
@@ -1015,7 +956,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                 */
                off += scnprintf(buf+off, size-off, "%s ", dname);
 
-               spin_lock_irq(blkg->q->queue_lock);
+               spin_lock_irq(&blkg->q->queue_lock);
 
                rwstat = blkg_rwstat_recursive_sum(blkg, NULL,
                                        offsetof(struct blkcg_gq, stat_bytes));
@@ -1029,7 +970,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
                wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]);
                dios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_DISCARD]);
 
-               spin_unlock_irq(blkg->q->queue_lock);
+               spin_unlock_irq(&blkg->q->queue_lock);
 
                if (rbytes || wbytes || rios || wios) {
                        has_stats = true;
@@ -1150,9 +1091,9 @@ void blkcg_destroy_blkgs(struct blkcg *blkcg)
                                                struct blkcg_gq, blkcg_node);
                struct request_queue *q = blkg->q;
 
-               if (spin_trylock(q->queue_lock)) {
+               if (spin_trylock(&q->queue_lock)) {
                        blkg_destroy(blkg);
-                       spin_unlock(q->queue_lock);
+                       spin_unlock(&q->queue_lock);
                } else {
                        spin_unlock_irq(&blkcg->lock);
                        cpu_relax();
@@ -1273,36 +1214,31 @@ int blkcg_init_queue(struct request_queue *q)
 
        /* Make sure the root blkg exists. */
        rcu_read_lock();
-       spin_lock_irq(q->queue_lock);
+       spin_lock_irq(&q->queue_lock);
        blkg = blkg_create(&blkcg_root, q, new_blkg);
        if (IS_ERR(blkg))
                goto err_unlock;
        q->root_blkg = blkg;
-       q->root_rl.blkg = blkg;
-       spin_unlock_irq(q->queue_lock);
+       spin_unlock_irq(&q->queue_lock);
        rcu_read_unlock();
 
        if (preloaded)
                radix_tree_preload_end();
 
        ret = blk_iolatency_init(q);
-       if (ret) {
-               spin_lock_irq(q->queue_lock);
-               blkg_destroy_all(q);
-               spin_unlock_irq(q->queue_lock);
-               return ret;
-       }
+       if (ret)
+               goto err_destroy_all;
 
        ret = blk_throtl_init(q);
-       if (ret) {
-               spin_lock_irq(q->queue_lock);
-               blkg_destroy_all(q);
-               spin_unlock_irq(q->queue_lock);
-       }
-       return ret;
+       if (ret)
+               goto err_destroy_all;
+       return 0;
 
+err_destroy_all:
+       blkg_destroy_all(q);
+       return ret;
 err_unlock:
-       spin_unlock_irq(q->queue_lock);
+       spin_unlock_irq(&q->queue_lock);
        rcu_read_unlock();
        if (preloaded)
                radix_tree_preload_end();
@@ -1317,7 +1253,7 @@ err_unlock:
  */
 void blkcg_drain_queue(struct request_queue *q)
 {
-       lockdep_assert_held(q->queue_lock);
+       lockdep_assert_held(&q->queue_lock);
 
        /*
         * @q could be exiting and already have destroyed all blkgs as
@@ -1337,10 +1273,7 @@ void blkcg_drain_queue(struct request_queue *q)
  */
 void blkcg_exit_queue(struct request_queue *q)
 {
-       spin_lock_irq(q->queue_lock);
        blkg_destroy_all(q);
-       spin_unlock_irq(q->queue_lock);
-
        blk_throtl_exit(q);
 }
 
@@ -1444,10 +1377,8 @@ int blkcg_activate_policy(struct request_queue *q,
        if (blkcg_policy_enabled(q, pol))
                return 0;
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
-       else
-               blk_queue_bypass_start(q);
 pd_prealloc:
        if (!pd_prealloc) {
                pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node);
@@ -1457,7 +1388,7 @@ pd_prealloc:
                }
        }
 
-       spin_lock_irq(q->queue_lock);
+       spin_lock_irq(&q->queue_lock);
 
        list_for_each_entry(blkg, &q->blkg_list, q_node) {
                struct blkg_policy_data *pd;
@@ -1469,7 +1400,7 @@ pd_prealloc:
                if (!pd)
                        swap(pd, pd_prealloc);
                if (!pd) {
-                       spin_unlock_irq(q->queue_lock);
+                       spin_unlock_irq(&q->queue_lock);
                        goto pd_prealloc;
                }
 
@@ -1483,12 +1414,10 @@ pd_prealloc:
        __set_bit(pol->plid, q->blkcg_pols);
        ret = 0;
 
-       spin_unlock_irq(q->queue_lock);
+       spin_unlock_irq(&q->queue_lock);
 out_bypass_end:
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
-       else
-               blk_queue_bypass_end(q);
        if (pd_prealloc)
                pol->pd_free_fn(pd_prealloc);
        return ret;
@@ -1511,12 +1440,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
        if (!blkcg_policy_enabled(q, pol))
                return;
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
-       else
-               blk_queue_bypass_start(q);
 
-       spin_lock_irq(q->queue_lock);
+       spin_lock_irq(&q->queue_lock);
 
        __clear_bit(pol->plid, q->blkcg_pols);
 
@@ -1529,12 +1456,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
                }
        }
 
-       spin_unlock_irq(q->queue_lock);
+       spin_unlock_irq(&q->queue_lock);
 
-       if (q->mq_ops)
+       if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
-       else
-               blk_queue_bypass_end(q);
 }
 EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
 
@@ -1808,7 +1733,6 @@ out:
        rcu_read_unlock();
        blk_put_queue(q);
 }
-EXPORT_SYMBOL_GPL(blkcg_maybe_throttle_current);
 
 /**
  * blkcg_schedule_throttle - this task needs to check for throttling
@@ -1842,7 +1766,6 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
                current->use_memdelay = use_memdelay;
        set_notify_resume(current);
 }
-EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
 
 /**
  * blkcg_add_delay - add delay to this blkg
@@ -1857,7 +1780,6 @@ void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta)
        blkcg_scale_delay(blkg, now);
        atomic64_add(delta, &blkg->delay_nsec);
 }
-EXPORT_SYMBOL_GPL(blkcg_add_delay);
 
 module_param(blkcg_debug_stats, bool, 0644);
 MODULE_PARM_DESC(blkcg_debug_stats, "True if you want debug stats, false if not");