memcg: don't trigger oom at page migration
[sfrench/cifs-2.6.git] / mm / memcontrol.c
index 8ce4e9e4795960e5258f3c77424fb209094b9d7a..3ba72e6556ccac9f3fdf034c8a4b5f6fd2036d50 100644 (file)
@@ -103,6 +103,8 @@ struct mem_cgroup_per_zone {
         */
        struct list_head        lists[NR_LRU_LISTS];
        unsigned long           count[NR_LRU_LISTS];
+
+       struct zone_reclaim_stat reclaim_stat;
 };
 /* Macro for accessing counter */
 #define MEM_CGROUP_ZSTAT(mz, idx)      ((mz)->count[(idx)])
@@ -142,9 +144,28 @@ struct mem_cgroup {
         */
        struct mem_cgroup_lru_info info;
 
+       /*
+         protect against reclaim related member.
+       */
+       spinlock_t reclaim_param_lock;
+
        int     prev_priority;  /* for recording reclaim priority */
+
+       /*
+        * While reclaiming in a hiearchy, we cache the last child we
+        * reclaimed from. Protected by cgroup_lock()
+        */
+       struct mem_cgroup *last_scanned_child;
+       /*
+        * Should the accounting and control be hierarchical, per subtree?
+        */
+       bool use_hierarchy;
+       unsigned long   last_oom_jiffies;
        int             obsolete;
        atomic_t        refcnt;
+
+       unsigned int    swappiness;
+
        /*
         * statistics. This must be placed at the end of memcg.
         */
@@ -172,7 +193,6 @@ pcg_default_flags[NR_CHARGE_TYPE] = {
        0, /* FORCE */
 };
 
-
 /* for encoding cft->private value on file */
 #define _MEM                   (0)
 #define _MEMSWAP               (1)
@@ -220,6 +240,9 @@ page_cgroup_zoneinfo(struct page_cgroup *pc)
        int nid = page_cgroup_nid(pc);
        int zid = page_cgroup_zid(pc);
 
+       if (!mem)
+               return NULL;
+
        return mem_cgroup_zoneinfo(mem, nid, zid);
 }
 
@@ -279,7 +302,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
        struct mem_cgroup *mem;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
        /* can happen while we handle swapcache. */
@@ -302,7 +325,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
        struct mem_cgroup_per_zone *mz;
        struct page_cgroup *pc;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
 
        pc = lookup_page_cgroup(page);
@@ -319,7 +342,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
        struct page_cgroup *pc;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        pc = lookup_page_cgroup(page);
        /* barrier to sync with "charge" */
@@ -344,7 +367,7 @@ static void mem_cgroup_lru_fixup(struct page *page)
 void mem_cgroup_move_lists(struct page *page,
                           enum lru_list from, enum lru_list to)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        mem_cgroup_del_lru_list(page, from);
        mem_cgroup_add_lru_list(page, to);
@@ -382,39 +405,108 @@ int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
  */
 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
 {
-       return mem->prev_priority;
+       int prev_priority;
+
+       spin_lock(&mem->reclaim_param_lock);
+       prev_priority = mem->prev_priority;
+       spin_unlock(&mem->reclaim_param_lock);
+
+       return prev_priority;
 }
 
 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
 {
+       spin_lock(&mem->reclaim_param_lock);
        if (priority < mem->prev_priority)
                mem->prev_priority = priority;
+       spin_unlock(&mem->reclaim_param_lock);
 }
 
 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
 {
+       spin_lock(&mem->reclaim_param_lock);
        mem->prev_priority = priority;
+       spin_unlock(&mem->reclaim_param_lock);
 }
 
-/*
- * Calculate # of pages to be scanned in this priority/zone.
- * See also vmscan.c
- *
- * priority starts from "DEF_PRIORITY" and decremented in each loop.
- * (see include/linux/mmzone.h)
- */
+static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
+{
+       unsigned long active;
+       unsigned long inactive;
+       unsigned long gb;
+       unsigned long inactive_ratio;
+
+       inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
+       active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
 
-long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
-                                       int priority, enum lru_list lru)
+       gb = (inactive + active) >> (30 - PAGE_SHIFT);
+       if (gb)
+               inactive_ratio = int_sqrt(10 * gb);
+       else
+               inactive_ratio = 1;
+
+       if (present_pages) {
+               present_pages[0] = inactive;
+               present_pages[1] = active;
+       }
+
+       return inactive_ratio;
+}
+
+int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
+{
+       unsigned long active;
+       unsigned long inactive;
+       unsigned long present_pages[2];
+       unsigned long inactive_ratio;
+
+       inactive_ratio = calc_inactive_ratio(memcg, present_pages);
+
+       inactive = present_pages[0];
+       active = present_pages[1];
+
+       if (inactive * inactive_ratio < active)
+               return 1;
+
+       return 0;
+}
+
+unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
+                                      struct zone *zone,
+                                      enum lru_list lru)
 {
-       long nr_pages;
        int nid = zone->zone_pgdat->node_id;
        int zid = zone_idx(zone);
-       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
+       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
 
-       nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
+       return MEM_CGROUP_ZSTAT(mz, lru);
+}
 
-       return (nr_pages >> priority);
+struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
+                                                     struct zone *zone)
+{
+       int nid = zone->zone_pgdat->node_id;
+       int zid = zone_idx(zone);
+       struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
+
+       return &mz->reclaim_stat;
+}
+
+struct zone_reclaim_stat *
+mem_cgroup_get_reclaim_stat_from_page(struct page *page)
+{
+       struct page_cgroup *pc;
+       struct mem_cgroup_per_zone *mz;
+
+       if (mem_cgroup_disabled())
+               return NULL;
+
+       pc = lookup_page_cgroup(page);
+       mz = page_cgroup_zoneinfo(pc);
+       if (!mz)
+               return NULL;
+
+       return &mz->reclaim_stat;
 }
 
 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
@@ -461,6 +553,197 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        return nr_taken;
 }
 
+#define mem_cgroup_from_res_counter(counter, member)   \
+       container_of(counter, struct mem_cgroup, member)
+
+/*
+ * This routine finds the DFS walk successor. This routine should be
+ * called with cgroup_mutex held
+ */
+static struct mem_cgroup *
+mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+{
+       struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
+
+       curr_cgroup = curr->css.cgroup;
+       root_cgroup = root_mem->css.cgroup;
+
+       if (!list_empty(&curr_cgroup->children)) {
+               /*
+                * Walk down to children
+                */
+               mem_cgroup_put(curr);
+               cgroup = list_entry(curr_cgroup->children.next,
+                                               struct cgroup, sibling);
+               curr = mem_cgroup_from_cont(cgroup);
+               mem_cgroup_get(curr);
+               goto done;
+       }
+
+visit_parent:
+       if (curr_cgroup == root_cgroup) {
+               mem_cgroup_put(curr);
+               curr = root_mem;
+               mem_cgroup_get(curr);
+               goto done;
+       }
+
+       /*
+        * Goto next sibling
+        */
+       if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
+               mem_cgroup_put(curr);
+               cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
+                                               sibling);
+               curr = mem_cgroup_from_cont(cgroup);
+               mem_cgroup_get(curr);
+               goto done;
+       }
+
+       /*
+        * Go up to next parent and next parent's sibling if need be
+        */
+       curr_cgroup = curr_cgroup->parent;
+       goto visit_parent;
+
+done:
+       root_mem->last_scanned_child = curr;
+       return curr;
+}
+
+/*
+ * Visit the first child (need not be the first child as per the ordering
+ * of the cgroup list, since we track last_scanned_child) of @mem and use
+ * that to reclaim free pages from.
+ */
+static struct mem_cgroup *
+mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+{
+       struct cgroup *cgroup;
+       struct mem_cgroup *ret;
+       bool obsolete = (root_mem->last_scanned_child &&
+                               root_mem->last_scanned_child->obsolete);
+
+       /*
+        * Scan all children under the mem_cgroup mem
+        */
+       cgroup_lock();
+       if (list_empty(&root_mem->css.cgroup->children)) {
+               ret = root_mem;
+               goto done;
+       }
+
+       if (!root_mem->last_scanned_child || obsolete) {
+
+               if (obsolete)
+                       mem_cgroup_put(root_mem->last_scanned_child);
+
+               cgroup = list_first_entry(&root_mem->css.cgroup->children,
+                               struct cgroup, sibling);
+               ret = mem_cgroup_from_cont(cgroup);
+               mem_cgroup_get(ret);
+       } else
+               ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
+                                               root_mem);
+
+done:
+       root_mem->last_scanned_child = ret;
+       cgroup_unlock();
+       return ret;
+}
+
+static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
+{
+       if (do_swap_account) {
+               if (res_counter_check_under_limit(&mem->res) &&
+                       res_counter_check_under_limit(&mem->memsw))
+                       return true;
+       } else
+               if (res_counter_check_under_limit(&mem->res))
+                       return true;
+       return false;
+}
+
+static unsigned int get_swappiness(struct mem_cgroup *memcg)
+{
+       struct cgroup *cgrp = memcg->css.cgroup;
+       unsigned int swappiness;
+
+       /* root ? */
+       if (cgrp->parent == NULL)
+               return vm_swappiness;
+
+       spin_lock(&memcg->reclaim_param_lock);
+       swappiness = memcg->swappiness;
+       spin_unlock(&memcg->reclaim_param_lock);
+
+       return swappiness;
+}
+
+/*
+ * Dance down the hierarchy if needed to reclaim memory. We remember the
+ * last child we reclaimed from, so that we don't end up penalizing
+ * one child extensively based on its position in the children list.
+ *
+ * root_mem is the original ancestor that we've been reclaim from.
+ */
+static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
+                                               gfp_t gfp_mask, bool noswap)
+{
+       struct mem_cgroup *next_mem;
+       int ret = 0;
+
+       /*
+        * Reclaim unconditionally and don't check for return value.
+        * We need to reclaim in the current group and down the tree.
+        * One might think about checking for children before reclaiming,
+        * but there might be left over accounting, even after children
+        * have left.
+        */
+       ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
+                                          get_swappiness(root_mem));
+       if (mem_cgroup_check_under_limit(root_mem))
+               return 0;
+       if (!root_mem->use_hierarchy)
+               return ret;
+
+       next_mem = mem_cgroup_get_first_node(root_mem);
+
+       while (next_mem != root_mem) {
+               if (next_mem->obsolete) {
+                       mem_cgroup_put(next_mem);
+                       cgroup_lock();
+                       next_mem = mem_cgroup_get_first_node(root_mem);
+                       cgroup_unlock();
+                       continue;
+               }
+               ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
+                                                  get_swappiness(next_mem));
+               if (mem_cgroup_check_under_limit(root_mem))
+                       return 0;
+               cgroup_lock();
+               next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
+               cgroup_unlock();
+       }
+       return ret;
+}
+
+bool mem_cgroup_oom_called(struct task_struct *task)
+{
+       bool ret = false;
+       struct mem_cgroup *mem;
+       struct mm_struct *mm;
+
+       rcu_read_lock();
+       mm = task->mm;
+       if (!mm)
+               mm = &init_mm;
+       mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
+       if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
+               ret = true;
+       rcu_read_unlock();
+       return ret;
+}
 /*
  * Unlike exported interface, "oom" parameter is added. if oom==true,
  * oom-killer can be invoked.
@@ -469,8 +752,16 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                        gfp_t gfp_mask, struct mem_cgroup **memcg,
                        bool oom)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem, *mem_over_limit;
        int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
+       struct res_counter *fail_res;
+
+       if (unlikely(test_thread_flag(TIF_MEMDIE))) {
+               /* Don't account this! */
+               *memcg = NULL;
+               return 0;
+       }
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -499,22 +790,29 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                int ret;
                bool noswap = false;
 
-               ret = res_counter_charge(&mem->res, PAGE_SIZE);
+               ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
                if (likely(!ret)) {
                        if (!do_swap_account)
                                break;
-                       ret = res_counter_charge(&mem->memsw, PAGE_SIZE);
+                       ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
+                                                       &fail_res);
                        if (likely(!ret))
                                break;
                        /* mem+swap counter fails */
                        res_counter_uncharge(&mem->res, PAGE_SIZE);
                        noswap = true;
-               }
+                       mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+                                                                       memsw);
+               } else
+                       /* mem counter fails */
+                       mem_over_limit = mem_cgroup_from_res_counter(fail_res,
+                                                                       res);
+
                if (!(gfp_mask & __GFP_WAIT))
                        goto nomem;
 
-               if (try_to_free_mem_cgroup_pages(mem, gfp_mask, noswap))
-                       continue;
+               ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
+                                                       noswap);
 
                /*
                 * try_to_free_mem_cgroup_pages() might not give us a full
@@ -524,16 +822,14 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                 * current usage of the cgroup before giving up
                 *
                 */
-               if (!do_swap_account &&
-                       res_counter_check_under_limit(&mem->res))
-                       continue;
-               if (do_swap_account &&
-                       res_counter_check_under_limit(&mem->memsw))
+               if (mem_cgroup_check_under_limit(mem_over_limit))
                        continue;
 
                if (!nr_retries--) {
-                       if (oom)
-                               mem_cgroup_out_of_memory(mem, gfp_mask);
+                       if (oom) {
+                               mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
+                               mem_over_limit->last_oom_jiffies = jiffies;
+                       }
                        goto nomem;
                }
        }
@@ -670,7 +966,7 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
 
 
        ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
-       if (ret)
+       if (ret || !parent)
                return ret;
 
        if (!get_page_unless_zero(page))
@@ -721,7 +1017,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
 
        mem = memcg;
        ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
-       if (ret)
+       if (ret || !mem)
                return ret;
 
        __mem_cgroup_commit_charge(mem, pc, ctype);
@@ -731,7 +1027,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
 int mem_cgroup_newpage_charge(struct page *page,
                              struct mm_struct *mm, gfp_t gfp_mask)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (PageCompound(page))
                return 0;
@@ -753,7 +1049,7 @@ int mem_cgroup_newpage_charge(struct page *page,
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (PageCompound(page))
                return 0;
@@ -799,7 +1095,7 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
        struct mem_cgroup *mem;
        swp_entry_t     ent;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
 
        if (!do_swap_account)
@@ -833,7 +1129,7 @@ int mem_cgroup_cache_charge_swapin(struct page *page,
 {
        int ret = 0;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (unlikely(!mm))
                mm = &init_mm;
@@ -880,7 +1176,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 {
        struct page_cgroup *pc;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        if (!ptr)
                return;
@@ -909,7 +1205,7 @@ void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
 
 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
 {
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return;
        if (!mem)
                return;
@@ -930,7 +1226,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        struct mem_cgroup *mem = NULL;
        struct mem_cgroup_per_zone *mz;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return NULL;
 
        if (PageSwapCache(page))
@@ -976,7 +1272,9 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
        mz = page_cgroup_zoneinfo(pc);
        unlock_page_cgroup(pc);
 
-       css_put(&mem->css);
+       /* at swapout, this memcg will be accessed to record to swap */
+       if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
+               css_put(&mem->css);
 
        return mem;
 
@@ -1017,6 +1315,8 @@ void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
                swap_cgroup_record(ent, memcg);
                mem_cgroup_get(memcg);
        }
+       if (memcg)
+               css_put(&memcg->css);
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -1049,7 +1349,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        struct mem_cgroup *mem = NULL;
        int ret = 0;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
 
        pc = lookup_page_cgroup(page);
@@ -1061,7 +1361,7 @@ int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
        unlock_page_cgroup(pc);
 
        if (mem) {
-               ret = mem_cgroup_try_charge(NULL, GFP_HIGHUSER_MOVABLE, &mem);
+               ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
                css_put(&mem->css);
        }
        *ptr = mem;
@@ -1131,7 +1431,7 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
        int progress = 0;
        int retry = MEM_CGROUP_RECLAIM_RETRIES;
 
-       if (mem_cgroup_subsys.disabled)
+       if (mem_cgroup_disabled())
                return 0;
        if (!mm)
                return 0;
@@ -1146,8 +1446,9 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
        rcu_read_unlock();
 
        do {
-               progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
-               progress += res_counter_check_under_limit(&mem->res);
+               progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true,
+                                                       get_swappiness(mem));
+               progress += mem_cgroup_check_under_limit(mem);
        } while (!progress && --retry);
 
        css_put(&mem->css);
@@ -1191,9 +1492,12 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
                        break;
 
                progress = try_to_free_mem_cgroup_pages(memcg,
-                               GFP_HIGHUSER_MOVABLE, false);
+                                                       GFP_KERNEL,
+                                                       false,
+                                                       get_swappiness(memcg));
                if (!progress)                  retry_count--;
        }
+
        return ret;
 }
 
@@ -1231,7 +1535,8 @@ int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
                        break;
 
                oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
-               try_to_free_mem_cgroup_pages(memcg, GFP_HIGHUSER_MOVABLE, true);
+               try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true,
+                                            get_swappiness(memcg));
                curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
                if (curusage >= oldusage)
                        retry_count--;
@@ -1277,7 +1582,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
                }
                spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-               ret = mem_cgroup_move_parent(pc, mem, GFP_HIGHUSER_MOVABLE);
+               ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
                if (ret == -ENOMEM)
                        break;
 
@@ -1362,8 +1667,8 @@ try_to_free:
                        ret = -EINTR;
                        goto out;
                }
-               progress = try_to_free_mem_cgroup_pages(mem,
-                                                 GFP_HIGHUSER_MOVABLE, false);
+               progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
+                                               false, get_swappiness(mem));
                if (!progress) {
                        nr_retries--;
                        /* maybe some writeback is necessary */
@@ -1385,6 +1690,44 @@ int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
 }
 
 
+static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+{
+       return mem_cgroup_from_cont(cont)->use_hierarchy;
+}
+
+static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
+                                       u64 val)
+{
+       int retval = 0;
+       struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+       struct cgroup *parent = cont->parent;
+       struct mem_cgroup *parent_mem = NULL;
+
+       if (parent)
+               parent_mem = mem_cgroup_from_cont(parent);
+
+       cgroup_lock();
+       /*
+        * If parent's use_hiearchy is set, we can't make any modifications
+        * in the child subtrees. If it is unset, then the change can
+        * occur, provided the current cgroup has no children.
+        *
+        * For the root cgroup, parent_mem is NULL, we allow value to be
+        * set if there are no children.
+        */
+       if ((!parent_mem || !parent_mem->use_hierarchy) &&
+                               (val == 1 || val == 0)) {
+               if (list_empty(&cont->children))
+                       mem->use_hierarchy = val;
+               else
+                       retval = -EBUSY;
+       } else
+               retval = -EINVAL;
+       cgroup_unlock();
+
+       return retval;
+}
+
 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
        struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
@@ -1439,6 +1782,34 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
        return ret;
 }
 
+static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
+               unsigned long long *mem_limit, unsigned long long *memsw_limit)
+{
+       struct cgroup *cgroup;
+       unsigned long long min_limit, min_memsw_limit, tmp;
+
+       min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
+       min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+       cgroup = memcg->css.cgroup;
+       if (!memcg->use_hierarchy)
+               goto out;
+
+       while (cgroup->parent) {
+               cgroup = cgroup->parent;
+               memcg = mem_cgroup_from_cont(cgroup);
+               if (!memcg->use_hierarchy)
+                       break;
+               tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
+               min_limit = min(min_limit, tmp);
+               tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
+               min_memsw_limit = min(min_memsw_limit, tmp);
+       }
+out:
+       *mem_limit = min_limit;
+       *memsw_limit = min_memsw_limit;
+       return;
+}
+
 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 {
        struct mem_cgroup *mem;
@@ -1512,6 +1883,74 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
                cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
 
        }
+       {
+               unsigned long long limit, memsw_limit;
+               memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
+               cb->fill(cb, "hierarchical_memory_limit", limit);
+               if (do_swap_account)
+                       cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
+       }
+
+#ifdef CONFIG_DEBUG_VM
+       cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
+
+       {
+               int nid, zid;
+               struct mem_cgroup_per_zone *mz;
+               unsigned long recent_rotated[2] = {0, 0};
+               unsigned long recent_scanned[2] = {0, 0};
+
+               for_each_online_node(nid)
+                       for (zid = 0; zid < MAX_NR_ZONES; zid++) {
+                               mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
+
+                               recent_rotated[0] +=
+                                       mz->reclaim_stat.recent_rotated[0];
+                               recent_rotated[1] +=
+                                       mz->reclaim_stat.recent_rotated[1];
+                               recent_scanned[0] +=
+                                       mz->reclaim_stat.recent_scanned[0];
+                               recent_scanned[1] +=
+                                       mz->reclaim_stat.recent_scanned[1];
+                       }
+               cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
+               cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
+               cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
+               cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
+       }
+#endif
+
+       return 0;
+}
+
+static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+
+       return get_swappiness(memcg);
+}
+
+static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
+                                      u64 val)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+       struct mem_cgroup *parent;
+       if (val > 100)
+               return -EINVAL;
+
+       if (cgrp->parent == NULL)
+               return -EINVAL;
+
+       parent = mem_cgroup_from_cont(cgrp->parent);
+       /* If under hierarchy, only empty-root can set this value */
+       if ((parent->use_hierarchy) ||
+           (memcg->use_hierarchy && !list_empty(&cgrp->children)))
+               return -EINVAL;
+
+       spin_lock(&memcg->reclaim_param_lock);
+       memcg->swappiness = val;
+       spin_unlock(&memcg->reclaim_param_lock);
+
        return 0;
 }
 
@@ -1548,6 +1987,16 @@ static struct cftype mem_cgroup_files[] = {
                .name = "force_empty",
                .trigger = mem_cgroup_force_empty_write,
        },
+       {
+               .name = "use_hierarchy",
+               .write_u64 = mem_cgroup_hierarchy_write,
+               .read_u64 = mem_cgroup_hierarchy_read,
+       },
+       {
+               .name = "swappiness",
+               .read_u64 = mem_cgroup_swappiness_read,
+               .write_u64 = mem_cgroup_swappiness_write,
+       },
 };
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -1697,7 +2146,7 @@ static void mem_cgroup_put(struct mem_cgroup *mem)
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 static void __init enable_swap_cgroup(void)
 {
-       if (!mem_cgroup_subsys.disabled && really_do_swap_account)
+       if (!mem_cgroup_disabled() && really_do_swap_account)
                do_swap_account = 1;
 }
 #else
@@ -1709,22 +2158,37 @@ static void __init enable_swap_cgroup(void)
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem, *parent;
        int node;
 
        mem = mem_cgroup_alloc();
        if (!mem)
                return ERR_PTR(-ENOMEM);
 
-       res_counter_init(&mem->res);
-       res_counter_init(&mem->memsw);
-
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
        /* root ? */
-       if (cont->parent == NULL)
+       if (cont->parent == NULL) {
                enable_swap_cgroup();
+               parent = NULL;
+       } else {
+               parent = mem_cgroup_from_cont(cont->parent);
+               mem->use_hierarchy = parent->use_hierarchy;
+       }
+
+       if (parent && parent->use_hierarchy) {
+               res_counter_init(&mem->res, &parent->res);
+               res_counter_init(&mem->memsw, &parent->memsw);
+       } else {
+               res_counter_init(&mem->res, NULL);
+               res_counter_init(&mem->memsw, NULL);
+       }
+       mem->last_scanned_child = NULL;
+       spin_lock_init(&mem->reclaim_param_lock);
+
+       if (parent)
+               mem->swappiness = get_swappiness(parent);
 
        return &mem->css;
 free_out:
@@ -1766,25 +2230,10 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
                                struct cgroup *old_cont,
                                struct task_struct *p)
 {
-       struct mm_struct *mm;
-       struct mem_cgroup *mem, *old_mem;
-
-       mm = get_task_mm(p);
-       if (mm == NULL)
-               return;
-
-       mem = mem_cgroup_from_cont(cont);
-       old_mem = mem_cgroup_from_cont(old_cont);
-
        /*
-        * Only thread group leaders are allowed to migrate, the mm_struct is
-        * in effect owned by the leader
+        * FIXME: It's better to move charges of this process from old
+        * memcg to new memcg. But it's just on TODO-List now.
         */
-       if (!thread_group_leader(p))
-               goto out;
-
-out:
-       mmput(mm);
 }
 
 struct cgroup_subsys mem_cgroup_subsys = {