bdi: Mark congested->bdi as internal
[sfrench/cifs-2.6.git] / mm / backing-dev.c
index 6d861d090e9fc79d39e2b48f57b1d9f4bc91463f..12408f86783ccd40370c5b653f501924f735b65a 100644 (file)
@@ -438,7 +438,7 @@ retry:
                return NULL;
 
        atomic_set(&new_congested->refcnt, 0);
-       new_congested->bdi = bdi;
+       new_congested->__bdi = bdi;
        new_congested->blkcg_id = blkcg_id;
        goto retry;
 
@@ -466,10 +466,10 @@ void wb_congested_put(struct bdi_writeback_congested *congested)
        }
 
        /* bdi might already have been destroyed leaving @congested unlinked */
-       if (congested->bdi) {
+       if (congested->__bdi) {
                rb_erase(&congested->rb_node,
-                        &congested->bdi->cgwb_congested_tree);
-               congested->bdi = NULL;
+                        &congested->__bdi->cgwb_congested_tree);
+               congested->__bdi = NULL;
        }
 
        spin_unlock_irqrestore(&cgwb_lock, flags);
@@ -683,33 +683,26 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
 {
        struct radix_tree_iter iter;
-       struct rb_node *rbn;
        void **slot;
 
        WARN_ON(test_bit(WB_registered, &bdi->wb.state));
 
        spin_lock_irq(&cgwb_lock);
-
        radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
                cgwb_kill(*slot);
-
-       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
-               struct bdi_writeback_congested *congested =
-                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
-
-               rb_erase(rbn, &bdi->cgwb_congested_tree);
-               congested->bdi = NULL;  /* mark @congested unlinked */
-       }
-
        spin_unlock_irq(&cgwb_lock);
 
        /*
-        * All cgwb's and their congested states must be shutdown and
-        * released before returning.  Drain the usage counter to wait for
-        * all cgwb's and cgwb_congested's ever created on @bdi.
+        * All cgwb's must be shutdown and released before returning.  Drain
+        * the usage counter to wait for all cgwb's ever created on @bdi.
         */
        atomic_dec(&bdi->usage_cnt);
        wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
+       /*
+        * Grab back our reference so that we hold it when @bdi gets
+        * re-registered.
+        */
+       atomic_inc(&bdi->usage_cnt);
 }
 
 /**
@@ -749,6 +742,21 @@ void wb_blkcg_offline(struct blkcg *blkcg)
        spin_unlock_irq(&cgwb_lock);
 }
 
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
+{
+       struct rb_node *rbn;
+
+       spin_lock_irq(&cgwb_lock);
+       while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
+               struct bdi_writeback_congested *congested =
+                       rb_entry(rbn, struct bdi_writeback_congested, rb_node);
+
+               rb_erase(rbn, &bdi->cgwb_congested_tree);
+               congested->__bdi = NULL;        /* mark @congested unlinked */
+       }
+       spin_unlock_irq(&cgwb_lock);
+}
+
 #else  /* CONFIG_CGROUP_WRITEBACK */
 
 static int cgwb_bdi_init(struct backing_dev_info *bdi)
@@ -769,7 +777,9 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
        return 0;
 }
 
-static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
+static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
+
+static void cgwb_bdi_exit(struct backing_dev_info *bdi)
 {
        wb_congested_put(bdi->wb_congested);
 }
@@ -857,6 +867,8 @@ int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
                        MINOR(owner->devt));
        if (rc)
                return rc;
+       /* Leaking owner reference... */
+       WARN_ON(bdi->owner);
        bdi->owner = owner;
        get_device(owner);
        return 0;
@@ -898,6 +910,7 @@ static void bdi_exit(struct backing_dev_info *bdi)
 {
        WARN_ON_ONCE(bdi->dev);
        wb_exit(&bdi->wb);
+       cgwb_bdi_exit(bdi);
 }
 
 static void release_bdi(struct kref *ref)