Merge branch 'for-3.14-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[sfrench/cifs-2.6.git] / kernel / cgroup.c
index 52719ce55dd3dfefd24e8084e181de3d0ed95ea2..0c753ddd223bf9d543dfbb2d89915a08ab537f46 100644 (file)
@@ -2905,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void)
                 * We should check if the process is exiting, otherwise
                 * it will race with cgroup_exit() in that the list
                 * entry won't be deleted though the process has exited.
+                * Do it while holding siglock so that we don't end up
+                * racing against cgroup_exit().
                 */
+               spin_lock_irq(&p->sighand->siglock);
                if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
                        list_add(&p->cg_list, &task_css_set(p)->tasks);
+               spin_unlock_irq(&p->sighand->siglock);
+
                task_unlock(p);
        } while_each_thread(g, p);
        read_unlock(&tasklist_lock);
@@ -4107,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
 
        err = percpu_ref_init(&css->refcnt, css_release);
        if (err)
-               goto err_free;
+               goto err_free_css;
 
        init_css(css, ss, cgrp);
 
        err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
        if (err)
-               goto err_free;
+               goto err_free_percpu_ref;
 
        err = online_css(css);
        if (err)
-               goto err_free;
+               goto err_clear_dir;
 
        dget(cgrp->dentry);
        css_get(css->parent);
@@ -4133,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
 
        return 0;
 
-err_free:
+err_clear_dir:
+       cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+err_free_percpu_ref:
        percpu_ref_cancel_init(&css->refcnt);
+err_free_css:
        ss->css_free(css);
        return err;
 }
@@ -4844,16 +4852,12 @@ static int __init cgroup_wq_init(void)
        /*
         * There isn't much point in executing destruction path in
         * parallel.  Good chunk is serialized with cgroup_mutex anyway.
-        *
-        * XXX: Must be ordered to make sure parent is offlined after
-        * children.  The ordering requirement is for memcg where a
-        * parent's offline may wait for a child's leading to deadlock.  In
-        * the long term, this should be fixed from memcg side.
+        * Use 1 for @max_active.
         *
         * We would prefer to do this in cgroup_init() above, but that
         * is called before init_workqueues(): so leave this until after.
         */
-       cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
+       cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
        BUG_ON(!cgroup_destroy_wq);
 
        /*