Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/lrg/voltage-2.6
[sfrench/cifs-2.6.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES      5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account         (0)
52 #endif
53
54 static DEFINE_MUTEX(memcg_tasklist);    /* can be hold under cgroup_mutex */
55
56 /*
57  * Statistics for memory cgroup.
58  */
59 enum mem_cgroup_stat_index {
60         /*
61          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
62          */
63         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
64         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
65         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
66         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
67
68         MEM_CGROUP_STAT_NSTATS,
69 };
70
71 struct mem_cgroup_stat_cpu {
72         s64 count[MEM_CGROUP_STAT_NSTATS];
73 } ____cacheline_aligned_in_smp;
74
75 struct mem_cgroup_stat {
76         struct mem_cgroup_stat_cpu cpustat[0];
77 };
78
79 /*
80  * For accounting under irq disable, no need for increment preempt count.
81  */
82 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
83                 enum mem_cgroup_stat_index idx, int val)
84 {
85         stat->count[idx] += val;
86 }
87
88 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
89                 enum mem_cgroup_stat_index idx)
90 {
91         int cpu;
92         s64 ret = 0;
93         for_each_possible_cpu(cpu)
94                 ret += stat->cpustat[cpu].count[idx];
95         return ret;
96 }
97
98 /*
99  * per-zone information in memory controller.
100  */
101 struct mem_cgroup_per_zone {
102         /*
103          * spin_lock to protect the per cgroup LRU
104          */
105         struct list_head        lists[NR_LRU_LISTS];
106         unsigned long           count[NR_LRU_LISTS];
107
108         struct zone_reclaim_stat reclaim_stat;
109 };
110 /* Macro for accessing counter */
111 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
112
113 struct mem_cgroup_per_node {
114         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
115 };
116
117 struct mem_cgroup_lru_info {
118         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
119 };
120
121 /*
122  * The memory controller data structure. The memory controller controls both
123  * page cache and RSS per cgroup. We would eventually like to provide
124  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
125  * to help the administrator determine what knobs to tune.
126  *
127  * TODO: Add a water mark for the memory controller. Reclaim will begin when
128  * we hit the water mark. May be even add a low water mark, such that
129  * no reclaim occurs from a cgroup at it's low water mark, this is
130  * a feature that will be implemented much later in the future.
131  */
132 struct mem_cgroup {
133         struct cgroup_subsys_state css;
134         /*
135          * the counter to account for memory usage
136          */
137         struct res_counter res;
138         /*
139          * the counter to account for mem+swap usage.
140          */
141         struct res_counter memsw;
142         /*
143          * Per cgroup active and inactive list, similar to the
144          * per zone LRU lists.
145          */
146         struct mem_cgroup_lru_info info;
147
148         /*
149           protect against reclaim related member.
150         */
151         spinlock_t reclaim_param_lock;
152
153         int     prev_priority;  /* for recording reclaim priority */
154
155         /*
156          * While reclaiming in a hiearchy, we cache the last child we
157          * reclaimed from. Protected by hierarchy_mutex
158          */
159         struct mem_cgroup *last_scanned_child;
160         /*
161          * Should the accounting and control be hierarchical, per subtree?
162          */
163         bool use_hierarchy;
164         unsigned long   last_oom_jiffies;
165         atomic_t        refcnt;
166
167         unsigned int    swappiness;
168
169         /*
170          * statistics. This must be placed at the end of memcg.
171          */
172         struct mem_cgroup_stat stat;
173 };
174
175 enum charge_type {
176         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
177         MEM_CGROUP_CHARGE_TYPE_MAPPED,
178         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
179         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
180         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
181         NR_CHARGE_TYPE,
182 };
183
184 /* only for here (for easy reading.) */
185 #define PCGF_CACHE      (1UL << PCG_CACHE)
186 #define PCGF_USED       (1UL << PCG_USED)
187 #define PCGF_LOCK       (1UL << PCG_LOCK)
188 static const unsigned long
189 pcg_default_flags[NR_CHARGE_TYPE] = {
190         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
191         PCGF_USED | PCGF_LOCK, /* Anon */
192         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
193         0, /* FORCE */
194 };
195
196 /* for encoding cft->private value on file */
197 #define _MEM                    (0)
198 #define _MEMSWAP                (1)
199 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
200 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
201 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
202
203 static void mem_cgroup_get(struct mem_cgroup *mem);
204 static void mem_cgroup_put(struct mem_cgroup *mem);
205
206 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
207                                          struct page_cgroup *pc,
208                                          bool charge)
209 {
210         int val = (charge)? 1 : -1;
211         struct mem_cgroup_stat *stat = &mem->stat;
212         struct mem_cgroup_stat_cpu *cpustat;
213         int cpu = get_cpu();
214
215         cpustat = &stat->cpustat[cpu];
216         if (PageCgroupCache(pc))
217                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
218         else
219                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
220
221         if (charge)
222                 __mem_cgroup_stat_add_safe(cpustat,
223                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
224         else
225                 __mem_cgroup_stat_add_safe(cpustat,
226                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
227         put_cpu();
228 }
229
230 static struct mem_cgroup_per_zone *
231 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
232 {
233         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
234 }
235
236 static struct mem_cgroup_per_zone *
237 page_cgroup_zoneinfo(struct page_cgroup *pc)
238 {
239         struct mem_cgroup *mem = pc->mem_cgroup;
240         int nid = page_cgroup_nid(pc);
241         int zid = page_cgroup_zid(pc);
242
243         if (!mem)
244                 return NULL;
245
246         return mem_cgroup_zoneinfo(mem, nid, zid);
247 }
248
249 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
250                                         enum lru_list idx)
251 {
252         int nid, zid;
253         struct mem_cgroup_per_zone *mz;
254         u64 total = 0;
255
256         for_each_online_node(nid)
257                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
258                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
259                         total += MEM_CGROUP_ZSTAT(mz, idx);
260                 }
261         return total;
262 }
263
264 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
265 {
266         return container_of(cgroup_subsys_state(cont,
267                                 mem_cgroup_subsys_id), struct mem_cgroup,
268                                 css);
269 }
270
271 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
272 {
273         /*
274          * mm_update_next_owner() may clear mm->owner to NULL
275          * if it races with swapoff, page migration, etc.
276          * So this can be called with p == NULL.
277          */
278         if (unlikely(!p))
279                 return NULL;
280
281         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
282                                 struct mem_cgroup, css);
283 }
284
285 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
286 {
287         struct mem_cgroup *mem = NULL;
288         /*
289          * Because we have no locks, mm->owner's may be being moved to other
290          * cgroup. We use css_tryget() here even if this looks
291          * pessimistic (rather than adding locks here).
292          */
293         rcu_read_lock();
294         do {
295                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
296                 if (unlikely(!mem))
297                         break;
298         } while (!css_tryget(&mem->css));
299         rcu_read_unlock();
300         return mem;
301 }
302
303 static bool mem_cgroup_is_obsolete(struct mem_cgroup *mem)
304 {
305         if (!mem)
306                 return true;
307         return css_is_removed(&mem->css);
308 }
309
310 /*
311  * Following LRU functions are allowed to be used without PCG_LOCK.
312  * Operations are called by routine of global LRU independently from memcg.
313  * What we have to take care of here is validness of pc->mem_cgroup.
314  *
315  * Changes to pc->mem_cgroup happens when
316  * 1. charge
317  * 2. moving account
318  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
319  * It is added to LRU before charge.
320  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
321  * When moving account, the page is not on LRU. It's isolated.
322  */
323
324 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
325 {
326         struct page_cgroup *pc;
327         struct mem_cgroup *mem;
328         struct mem_cgroup_per_zone *mz;
329
330         if (mem_cgroup_disabled())
331                 return;
332         pc = lookup_page_cgroup(page);
333         /* can happen while we handle swapcache. */
334         if (list_empty(&pc->lru) || !pc->mem_cgroup)
335                 return;
336         /*
337          * We don't check PCG_USED bit. It's cleared when the "page" is finally
338          * removed from global LRU.
339          */
340         mz = page_cgroup_zoneinfo(pc);
341         mem = pc->mem_cgroup;
342         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
343         list_del_init(&pc->lru);
344         return;
345 }
346
347 void mem_cgroup_del_lru(struct page *page)
348 {
349         mem_cgroup_del_lru_list(page, page_lru(page));
350 }
351
352 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
353 {
354         struct mem_cgroup_per_zone *mz;
355         struct page_cgroup *pc;
356
357         if (mem_cgroup_disabled())
358                 return;
359
360         pc = lookup_page_cgroup(page);
361         smp_rmb();
362         /* unused page is not rotated. */
363         if (!PageCgroupUsed(pc))
364                 return;
365         mz = page_cgroup_zoneinfo(pc);
366         list_move(&pc->lru, &mz->lists[lru]);
367 }
368
369 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
370 {
371         struct page_cgroup *pc;
372         struct mem_cgroup_per_zone *mz;
373
374         if (mem_cgroup_disabled())
375                 return;
376         pc = lookup_page_cgroup(page);
377         /* barrier to sync with "charge" */
378         smp_rmb();
379         if (!PageCgroupUsed(pc))
380                 return;
381
382         mz = page_cgroup_zoneinfo(pc);
383         MEM_CGROUP_ZSTAT(mz, lru) += 1;
384         list_add(&pc->lru, &mz->lists[lru]);
385 }
386
387 /*
388  * At handling SwapCache, pc->mem_cgroup may be changed while it's linked to
389  * lru because the page may.be reused after it's fully uncharged (because of
390  * SwapCache behavior).To handle that, unlink page_cgroup from LRU when charge
391  * it again. This function is only used to charge SwapCache. It's done under
392  * lock_page and expected that zone->lru_lock is never held.
393  */
394 static void mem_cgroup_lru_del_before_commit_swapcache(struct page *page)
395 {
396         unsigned long flags;
397         struct zone *zone = page_zone(page);
398         struct page_cgroup *pc = lookup_page_cgroup(page);
399
400         spin_lock_irqsave(&zone->lru_lock, flags);
401         /*
402          * Forget old LRU when this page_cgroup is *not* used. This Used bit
403          * is guarded by lock_page() because the page is SwapCache.
404          */
405         if (!PageCgroupUsed(pc))
406                 mem_cgroup_del_lru_list(page, page_lru(page));
407         spin_unlock_irqrestore(&zone->lru_lock, flags);
408 }
409
410 static void mem_cgroup_lru_add_after_commit_swapcache(struct page *page)
411 {
412         unsigned long flags;
413         struct zone *zone = page_zone(page);
414         struct page_cgroup *pc = lookup_page_cgroup(page);
415
416         spin_lock_irqsave(&zone->lru_lock, flags);
417         /* link when the page is linked to LRU but page_cgroup isn't */
418         if (PageLRU(page) && list_empty(&pc->lru))
419                 mem_cgroup_add_lru_list(page, page_lru(page));
420         spin_unlock_irqrestore(&zone->lru_lock, flags);
421 }
422
423
424 void mem_cgroup_move_lists(struct page *page,
425                            enum lru_list from, enum lru_list to)
426 {
427         if (mem_cgroup_disabled())
428                 return;
429         mem_cgroup_del_lru_list(page, from);
430         mem_cgroup_add_lru_list(page, to);
431 }
432
433 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
434 {
435         int ret;
436
437         task_lock(task);
438         ret = task->mm && mm_match_cgroup(task->mm, mem);
439         task_unlock(task);
440         return ret;
441 }
442
443 /*
444  * Calculate mapped_ratio under memory controller. This will be used in
445  * vmscan.c for deteremining we have to reclaim mapped pages.
446  */
447 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
448 {
449         long total, rss;
450
451         /*
452          * usage is recorded in bytes. But, here, we assume the number of
453          * physical pages can be represented by "long" on any arch.
454          */
455         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
456         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
457         return (int)((rss * 100L) / total);
458 }
459
460 /*
461  * prev_priority control...this will be used in memory reclaim path.
462  */
463 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
464 {
465         int prev_priority;
466
467         spin_lock(&mem->reclaim_param_lock);
468         prev_priority = mem->prev_priority;
469         spin_unlock(&mem->reclaim_param_lock);
470
471         return prev_priority;
472 }
473
474 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
475 {
476         spin_lock(&mem->reclaim_param_lock);
477         if (priority < mem->prev_priority)
478                 mem->prev_priority = priority;
479         spin_unlock(&mem->reclaim_param_lock);
480 }
481
482 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
483 {
484         spin_lock(&mem->reclaim_param_lock);
485         mem->prev_priority = priority;
486         spin_unlock(&mem->reclaim_param_lock);
487 }
488
489 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
490 {
491         unsigned long active;
492         unsigned long inactive;
493         unsigned long gb;
494         unsigned long inactive_ratio;
495
496         inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
497         active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
498
499         gb = (inactive + active) >> (30 - PAGE_SHIFT);
500         if (gb)
501                 inactive_ratio = int_sqrt(10 * gb);
502         else
503                 inactive_ratio = 1;
504
505         if (present_pages) {
506                 present_pages[0] = inactive;
507                 present_pages[1] = active;
508         }
509
510         return inactive_ratio;
511 }
512
513 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
514 {
515         unsigned long active;
516         unsigned long inactive;
517         unsigned long present_pages[2];
518         unsigned long inactive_ratio;
519
520         inactive_ratio = calc_inactive_ratio(memcg, present_pages);
521
522         inactive = present_pages[0];
523         active = present_pages[1];
524
525         if (inactive * inactive_ratio < active)
526                 return 1;
527
528         return 0;
529 }
530
531 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
532                                        struct zone *zone,
533                                        enum lru_list lru)
534 {
535         int nid = zone->zone_pgdat->node_id;
536         int zid = zone_idx(zone);
537         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
538
539         return MEM_CGROUP_ZSTAT(mz, lru);
540 }
541
542 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
543                                                       struct zone *zone)
544 {
545         int nid = zone->zone_pgdat->node_id;
546         int zid = zone_idx(zone);
547         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
548
549         return &mz->reclaim_stat;
550 }
551
552 struct zone_reclaim_stat *
553 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
554 {
555         struct page_cgroup *pc;
556         struct mem_cgroup_per_zone *mz;
557
558         if (mem_cgroup_disabled())
559                 return NULL;
560
561         pc = lookup_page_cgroup(page);
562         mz = page_cgroup_zoneinfo(pc);
563         if (!mz)
564                 return NULL;
565
566         return &mz->reclaim_stat;
567 }
568
569 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
570                                         struct list_head *dst,
571                                         unsigned long *scanned, int order,
572                                         int mode, struct zone *z,
573                                         struct mem_cgroup *mem_cont,
574                                         int active, int file)
575 {
576         unsigned long nr_taken = 0;
577         struct page *page;
578         unsigned long scan;
579         LIST_HEAD(pc_list);
580         struct list_head *src;
581         struct page_cgroup *pc, *tmp;
582         int nid = z->zone_pgdat->node_id;
583         int zid = zone_idx(z);
584         struct mem_cgroup_per_zone *mz;
585         int lru = LRU_FILE * !!file + !!active;
586
587         BUG_ON(!mem_cont);
588         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
589         src = &mz->lists[lru];
590
591         scan = 0;
592         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
593                 if (scan >= nr_to_scan)
594                         break;
595
596                 page = pc->page;
597                 if (unlikely(!PageCgroupUsed(pc)))
598                         continue;
599                 if (unlikely(!PageLRU(page)))
600                         continue;
601
602                 scan++;
603                 if (__isolate_lru_page(page, mode, file) == 0) {
604                         list_move(&page->lru, dst);
605                         nr_taken++;
606                 }
607         }
608
609         *scanned = scan;
610         return nr_taken;
611 }
612
613 #define mem_cgroup_from_res_counter(counter, member)    \
614         container_of(counter, struct mem_cgroup, member)
615
616 /*
617  * This routine finds the DFS walk successor. This routine should be
618  * called with hierarchy_mutex held
619  */
620 static struct mem_cgroup *
621 mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
622 {
623         struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
624
625         curr_cgroup = curr->css.cgroup;
626         root_cgroup = root_mem->css.cgroup;
627
628         if (!list_empty(&curr_cgroup->children)) {
629                 /*
630                  * Walk down to children
631                  */
632                 mem_cgroup_put(curr);
633                 cgroup = list_entry(curr_cgroup->children.next,
634                                                 struct cgroup, sibling);
635                 curr = mem_cgroup_from_cont(cgroup);
636                 mem_cgroup_get(curr);
637                 goto done;
638         }
639
640 visit_parent:
641         if (curr_cgroup == root_cgroup) {
642                 mem_cgroup_put(curr);
643                 curr = root_mem;
644                 mem_cgroup_get(curr);
645                 goto done;
646         }
647
648         /*
649          * Goto next sibling
650          */
651         if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
652                 mem_cgroup_put(curr);
653                 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
654                                                 sibling);
655                 curr = mem_cgroup_from_cont(cgroup);
656                 mem_cgroup_get(curr);
657                 goto done;
658         }
659
660         /*
661          * Go up to next parent and next parent's sibling if need be
662          */
663         curr_cgroup = curr_cgroup->parent;
664         goto visit_parent;
665
666 done:
667         root_mem->last_scanned_child = curr;
668         return curr;
669 }
670
671 /*
672  * Visit the first child (need not be the first child as per the ordering
673  * of the cgroup list, since we track last_scanned_child) of @mem and use
674  * that to reclaim free pages from.
675  */
676 static struct mem_cgroup *
677 mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
678 {
679         struct cgroup *cgroup;
680         struct mem_cgroup *ret;
681         bool obsolete;
682
683         obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
684
685         /*
686          * Scan all children under the mem_cgroup mem
687          */
688         mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
689         if (list_empty(&root_mem->css.cgroup->children)) {
690                 ret = root_mem;
691                 goto done;
692         }
693
694         if (!root_mem->last_scanned_child || obsolete) {
695
696                 if (obsolete && root_mem->last_scanned_child)
697                         mem_cgroup_put(root_mem->last_scanned_child);
698
699                 cgroup = list_first_entry(&root_mem->css.cgroup->children,
700                                 struct cgroup, sibling);
701                 ret = mem_cgroup_from_cont(cgroup);
702                 mem_cgroup_get(ret);
703         } else
704                 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
705                                                 root_mem);
706
707 done:
708         root_mem->last_scanned_child = ret;
709         mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
710         return ret;
711 }
712
713 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
714 {
715         if (do_swap_account) {
716                 if (res_counter_check_under_limit(&mem->res) &&
717                         res_counter_check_under_limit(&mem->memsw))
718                         return true;
719         } else
720                 if (res_counter_check_under_limit(&mem->res))
721                         return true;
722         return false;
723 }
724
725 static unsigned int get_swappiness(struct mem_cgroup *memcg)
726 {
727         struct cgroup *cgrp = memcg->css.cgroup;
728         unsigned int swappiness;
729
730         /* root ? */
731         if (cgrp->parent == NULL)
732                 return vm_swappiness;
733
734         spin_lock(&memcg->reclaim_param_lock);
735         swappiness = memcg->swappiness;
736         spin_unlock(&memcg->reclaim_param_lock);
737
738         return swappiness;
739 }
740
741 /*
742  * Dance down the hierarchy if needed to reclaim memory. We remember the
743  * last child we reclaimed from, so that we don't end up penalizing
744  * one child extensively based on its position in the children list.
745  *
746  * root_mem is the original ancestor that we've been reclaim from.
747  */
748 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
749                                                 gfp_t gfp_mask, bool noswap)
750 {
751         struct mem_cgroup *next_mem;
752         int ret = 0;
753
754         /*
755          * Reclaim unconditionally and don't check for return value.
756          * We need to reclaim in the current group and down the tree.
757          * One might think about checking for children before reclaiming,
758          * but there might be left over accounting, even after children
759          * have left.
760          */
761         ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
762                                            get_swappiness(root_mem));
763         if (mem_cgroup_check_under_limit(root_mem))
764                 return 0;
765         if (!root_mem->use_hierarchy)
766                 return ret;
767
768         next_mem = mem_cgroup_get_first_node(root_mem);
769
770         while (next_mem != root_mem) {
771                 if (mem_cgroup_is_obsolete(next_mem)) {
772                         mem_cgroup_put(next_mem);
773                         next_mem = mem_cgroup_get_first_node(root_mem);
774                         continue;
775                 }
776                 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
777                                                    get_swappiness(next_mem));
778                 if (mem_cgroup_check_under_limit(root_mem))
779                         return 0;
780                 mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
781                 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
782                 mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
783         }
784         return ret;
785 }
786
787 bool mem_cgroup_oom_called(struct task_struct *task)
788 {
789         bool ret = false;
790         struct mem_cgroup *mem;
791         struct mm_struct *mm;
792
793         rcu_read_lock();
794         mm = task->mm;
795         if (!mm)
796                 mm = &init_mm;
797         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
798         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
799                 ret = true;
800         rcu_read_unlock();
801         return ret;
802 }
803 /*
804  * Unlike exported interface, "oom" parameter is added. if oom==true,
805  * oom-killer can be invoked.
806  */
807 static int __mem_cgroup_try_charge(struct mm_struct *mm,
808                         gfp_t gfp_mask, struct mem_cgroup **memcg,
809                         bool oom)
810 {
811         struct mem_cgroup *mem, *mem_over_limit;
812         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
813         struct res_counter *fail_res;
814
815         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
816                 /* Don't account this! */
817                 *memcg = NULL;
818                 return 0;
819         }
820
821         /*
822          * We always charge the cgroup the mm_struct belongs to.
823          * The mm_struct's mem_cgroup changes on task migration if the
824          * thread group leader migrates. It's possible that mm is not
825          * set, if so charge the init_mm (happens for pagecache usage).
826          */
827         mem = *memcg;
828         if (likely(!mem)) {
829                 mem = try_get_mem_cgroup_from_mm(mm);
830                 *memcg = mem;
831         } else {
832                 css_get(&mem->css);
833         }
834         if (unlikely(!mem))
835                 return 0;
836
837         VM_BUG_ON(mem_cgroup_is_obsolete(mem));
838
839         while (1) {
840                 int ret;
841                 bool noswap = false;
842
843                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
844                 if (likely(!ret)) {
845                         if (!do_swap_account)
846                                 break;
847                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
848                                                         &fail_res);
849                         if (likely(!ret))
850                                 break;
851                         /* mem+swap counter fails */
852                         res_counter_uncharge(&mem->res, PAGE_SIZE);
853                         noswap = true;
854                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
855                                                                         memsw);
856                 } else
857                         /* mem counter fails */
858                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
859                                                                         res);
860
861                 if (!(gfp_mask & __GFP_WAIT))
862                         goto nomem;
863
864                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
865                                                         noswap);
866
867                 /*
868                  * try_to_free_mem_cgroup_pages() might not give us a full
869                  * picture of reclaim. Some pages are reclaimed and might be
870                  * moved to swap cache or just unmapped from the cgroup.
871                  * Check the limit again to see if the reclaim reduced the
872                  * current usage of the cgroup before giving up
873                  *
874                  */
875                 if (mem_cgroup_check_under_limit(mem_over_limit))
876                         continue;
877
878                 if (!nr_retries--) {
879                         if (oom) {
880                                 mutex_lock(&memcg_tasklist);
881                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
882                                 mutex_unlock(&memcg_tasklist);
883                                 mem_over_limit->last_oom_jiffies = jiffies;
884                         }
885                         goto nomem;
886                 }
887         }
888         return 0;
889 nomem:
890         css_put(&mem->css);
891         return -ENOMEM;
892 }
893
894 static struct mem_cgroup *try_get_mem_cgroup_from_swapcache(struct page *page)
895 {
896         struct mem_cgroup *mem;
897         swp_entry_t ent;
898
899         if (!PageSwapCache(page))
900                 return NULL;
901
902         ent.val = page_private(page);
903         mem = lookup_swap_cgroup(ent);
904         if (!mem)
905                 return NULL;
906         if (!css_tryget(&mem->css))
907                 return NULL;
908         return mem;
909 }
910
911 /*
912  * commit a charge got by __mem_cgroup_try_charge() and makes page_cgroup to be
913  * USED state. If already USED, uncharge and return.
914  */
915
916 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
917                                      struct page_cgroup *pc,
918                                      enum charge_type ctype)
919 {
920         /* try_charge() can return NULL to *memcg, taking care of it. */
921         if (!mem)
922                 return;
923
924         lock_page_cgroup(pc);
925         if (unlikely(PageCgroupUsed(pc))) {
926                 unlock_page_cgroup(pc);
927                 res_counter_uncharge(&mem->res, PAGE_SIZE);
928                 if (do_swap_account)
929                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
930                 css_put(&mem->css);
931                 return;
932         }
933         pc->mem_cgroup = mem;
934         smp_wmb();
935         pc->flags = pcg_default_flags[ctype];
936
937         mem_cgroup_charge_statistics(mem, pc, true);
938
939         unlock_page_cgroup(pc);
940 }
941
942 /**
943  * mem_cgroup_move_account - move account of the page
944  * @pc: page_cgroup of the page.
945  * @from: mem_cgroup which the page is moved from.
946  * @to: mem_cgroup which the page is moved to. @from != @to.
947  *
948  * The caller must confirm following.
949  * - page is not on LRU (isolate_page() is useful.)
950  *
951  * returns 0 at success,
952  * returns -EBUSY when lock is busy or "pc" is unstable.
953  *
954  * This function does "uncharge" from old cgroup but doesn't do "charge" to
955  * new cgroup. It should be done by a caller.
956  */
957
958 static int mem_cgroup_move_account(struct page_cgroup *pc,
959         struct mem_cgroup *from, struct mem_cgroup *to)
960 {
961         struct mem_cgroup_per_zone *from_mz, *to_mz;
962         int nid, zid;
963         int ret = -EBUSY;
964
965         VM_BUG_ON(from == to);
966         VM_BUG_ON(PageLRU(pc->page));
967
968         nid = page_cgroup_nid(pc);
969         zid = page_cgroup_zid(pc);
970         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
971         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
972
973         if (!trylock_page_cgroup(pc))
974                 return ret;
975
976         if (!PageCgroupUsed(pc))
977                 goto out;
978
979         if (pc->mem_cgroup != from)
980                 goto out;
981
982         css_put(&from->css);
983         res_counter_uncharge(&from->res, PAGE_SIZE);
984         mem_cgroup_charge_statistics(from, pc, false);
985         if (do_swap_account)
986                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
987         pc->mem_cgroup = to;
988         mem_cgroup_charge_statistics(to, pc, true);
989         css_get(&to->css);
990         ret = 0;
991 out:
992         unlock_page_cgroup(pc);
993         return ret;
994 }
995
996 /*
997  * move charges to its parent.
998  */
999
1000 static int mem_cgroup_move_parent(struct page_cgroup *pc,
1001                                   struct mem_cgroup *child,
1002                                   gfp_t gfp_mask)
1003 {
1004         struct page *page = pc->page;
1005         struct cgroup *cg = child->css.cgroup;
1006         struct cgroup *pcg = cg->parent;
1007         struct mem_cgroup *parent;
1008         int ret;
1009
1010         /* Is ROOT ? */
1011         if (!pcg)
1012                 return -EINVAL;
1013
1014
1015         parent = mem_cgroup_from_cont(pcg);
1016
1017
1018         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
1019         if (ret || !parent)
1020                 return ret;
1021
1022         if (!get_page_unless_zero(page))
1023                 return -EBUSY;
1024
1025         ret = isolate_lru_page(page);
1026
1027         if (ret)
1028                 goto cancel;
1029
1030         ret = mem_cgroup_move_account(pc, child, parent);
1031
1032         /* drop extra refcnt by try_charge() (move_account increment one) */
1033         css_put(&parent->css);
1034         putback_lru_page(page);
1035         if (!ret) {
1036                 put_page(page);
1037                 return 0;
1038         }
1039         /* uncharge if move fails */
1040 cancel:
1041         res_counter_uncharge(&parent->res, PAGE_SIZE);
1042         if (do_swap_account)
1043                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
1044         put_page(page);
1045         return ret;
1046 }
1047
1048 /*
1049  * Charge the memory controller for page usage.
1050  * Return
1051  * 0 if the charge was successful
1052  * < 0 if the cgroup is over its limit
1053  */
1054 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
1055                                 gfp_t gfp_mask, enum charge_type ctype,
1056                                 struct mem_cgroup *memcg)
1057 {
1058         struct mem_cgroup *mem;
1059         struct page_cgroup *pc;
1060         int ret;
1061
1062         pc = lookup_page_cgroup(page);
1063         /* can happen at boot */
1064         if (unlikely(!pc))
1065                 return 0;
1066         prefetchw(pc);
1067
1068         mem = memcg;
1069         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
1070         if (ret || !mem)
1071                 return ret;
1072
1073         __mem_cgroup_commit_charge(mem, pc, ctype);
1074         return 0;
1075 }
1076
1077 int mem_cgroup_newpage_charge(struct page *page,
1078                               struct mm_struct *mm, gfp_t gfp_mask)
1079 {
1080         if (mem_cgroup_disabled())
1081                 return 0;
1082         if (PageCompound(page))
1083                 return 0;
1084         /*
1085          * If already mapped, we don't have to account.
1086          * If page cache, page->mapping has address_space.
1087          * But page->mapping may have out-of-use anon_vma pointer,
1088          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1089          * is NULL.
1090          */
1091         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1092                 return 0;
1093         if (unlikely(!mm))
1094                 mm = &init_mm;
1095         return mem_cgroup_charge_common(page, mm, gfp_mask,
1096                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1097 }
1098
1099 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1100                                 gfp_t gfp_mask)
1101 {
1102         struct mem_cgroup *mem = NULL;
1103         int ret;
1104
1105         if (mem_cgroup_disabled())
1106                 return 0;
1107         if (PageCompound(page))
1108                 return 0;
1109         /*
1110          * Corner case handling. This is called from add_to_page_cache()
1111          * in usual. But some FS (shmem) precharges this page before calling it
1112          * and call add_to_page_cache() with GFP_NOWAIT.
1113          *
1114          * For GFP_NOWAIT case, the page may be pre-charged before calling
1115          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1116          * charge twice. (It works but has to pay a bit larger cost.)
1117          * And when the page is SwapCache, it should take swap information
1118          * into account. This is under lock_page() now.
1119          */
1120         if (!(gfp_mask & __GFP_WAIT)) {
1121                 struct page_cgroup *pc;
1122
1123
1124                 pc = lookup_page_cgroup(page);
1125                 if (!pc)
1126                         return 0;
1127                 lock_page_cgroup(pc);
1128                 if (PageCgroupUsed(pc)) {
1129                         unlock_page_cgroup(pc);
1130                         return 0;
1131                 }
1132                 unlock_page_cgroup(pc);
1133         }
1134
1135         if (do_swap_account && PageSwapCache(page)) {
1136                 mem = try_get_mem_cgroup_from_swapcache(page);
1137                 if (mem)
1138                         mm = NULL;
1139                   else
1140                         mem = NULL;
1141                 /* SwapCache may be still linked to LRU now. */
1142                 mem_cgroup_lru_del_before_commit_swapcache(page);
1143         }
1144
1145         if (unlikely(!mm && !mem))
1146                 mm = &init_mm;
1147
1148         if (page_is_file_cache(page))
1149                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1150                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1151
1152         ret = mem_cgroup_charge_common(page, mm, gfp_mask,
1153                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1154         if (mem)
1155                 css_put(&mem->css);
1156         if (PageSwapCache(page))
1157                 mem_cgroup_lru_add_after_commit_swapcache(page);
1158
1159         if (do_swap_account && !ret && PageSwapCache(page)) {
1160                 swp_entry_t ent = {.val = page_private(page)};
1161                 /* avoid double counting */
1162                 mem = swap_cgroup_record(ent, NULL);
1163                 if (mem) {
1164                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1165                         mem_cgroup_put(mem);
1166                 }
1167         }
1168         return ret;
1169 }
1170
1171 /*
1172  * While swap-in, try_charge -> commit or cancel, the page is locked.
1173  * And when try_charge() successfully returns, one refcnt to memcg without
1174  * struct page_cgroup is aquired. This refcnt will be cumsumed by
1175  * "commit()" or removed by "cancel()"
1176  */
1177 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1178                                  struct page *page,
1179                                  gfp_t mask, struct mem_cgroup **ptr)
1180 {
1181         struct mem_cgroup *mem;
1182         int ret;
1183
1184         if (mem_cgroup_disabled())
1185                 return 0;
1186
1187         if (!do_swap_account)
1188                 goto charge_cur_mm;
1189         /*
1190          * A racing thread's fault, or swapoff, may have already updated
1191          * the pte, and even removed page from swap cache: return success
1192          * to go on to do_swap_page()'s pte_same() test, which should fail.
1193          */
1194         if (!PageSwapCache(page))
1195                 return 0;
1196         mem = try_get_mem_cgroup_from_swapcache(page);
1197         if (!mem)
1198                 goto charge_cur_mm;
1199         *ptr = mem;
1200         ret = __mem_cgroup_try_charge(NULL, mask, ptr, true);
1201         /* drop extra refcnt from tryget */
1202         css_put(&mem->css);
1203         return ret;
1204 charge_cur_mm:
1205         if (unlikely(!mm))
1206                 mm = &init_mm;
1207         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1208 }
1209
1210 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1211 {
1212         struct page_cgroup *pc;
1213
1214         if (mem_cgroup_disabled())
1215                 return;
1216         if (!ptr)
1217                 return;
1218         pc = lookup_page_cgroup(page);
1219         mem_cgroup_lru_del_before_commit_swapcache(page);
1220         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1221         mem_cgroup_lru_add_after_commit_swapcache(page);
1222         /*
1223          * Now swap is on-memory. This means this page may be
1224          * counted both as mem and swap....double count.
1225          * Fix it by uncharging from memsw. Basically, this SwapCache is stable
1226          * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
1227          * may call delete_from_swap_cache() before reach here.
1228          */
1229         if (do_swap_account && PageSwapCache(page)) {
1230                 swp_entry_t ent = {.val = page_private(page)};
1231                 struct mem_cgroup *memcg;
1232                 memcg = swap_cgroup_record(ent, NULL);
1233                 if (memcg) {
1234                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1235                         mem_cgroup_put(memcg);
1236                 }
1237
1238         }
1239         /* add this page(page_cgroup) to the LRU we want. */
1240
1241 }
1242
1243 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1244 {
1245         if (mem_cgroup_disabled())
1246                 return;
1247         if (!mem)
1248                 return;
1249         res_counter_uncharge(&mem->res, PAGE_SIZE);
1250         if (do_swap_account)
1251                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1252         css_put(&mem->css);
1253 }
1254
1255
1256 /*
1257  * uncharge if !page_mapped(page)
1258  */
1259 static struct mem_cgroup *
1260 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1261 {
1262         struct page_cgroup *pc;
1263         struct mem_cgroup *mem = NULL;
1264         struct mem_cgroup_per_zone *mz;
1265
1266         if (mem_cgroup_disabled())
1267                 return NULL;
1268
1269         if (PageSwapCache(page))
1270                 return NULL;
1271
1272         /*
1273          * Check if our page_cgroup is valid
1274          */
1275         pc = lookup_page_cgroup(page);
1276         if (unlikely(!pc || !PageCgroupUsed(pc)))
1277                 return NULL;
1278
1279         lock_page_cgroup(pc);
1280
1281         mem = pc->mem_cgroup;
1282
1283         if (!PageCgroupUsed(pc))
1284                 goto unlock_out;
1285
1286         switch (ctype) {
1287         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1288                 if (page_mapped(page))
1289                         goto unlock_out;
1290                 break;
1291         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1292                 if (!PageAnon(page)) {  /* Shared memory */
1293                         if (page->mapping && !page_is_file_cache(page))
1294                                 goto unlock_out;
1295                 } else if (page_mapped(page)) /* Anon */
1296                                 goto unlock_out;
1297                 break;
1298         default:
1299                 break;
1300         }
1301
1302         res_counter_uncharge(&mem->res, PAGE_SIZE);
1303         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1304                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1305
1306         mem_cgroup_charge_statistics(mem, pc, false);
1307         ClearPageCgroupUsed(pc);
1308         /*
1309          * pc->mem_cgroup is not cleared here. It will be accessed when it's
1310          * freed from LRU. This is safe because uncharged page is expected not
1311          * to be reused (freed soon). Exception is SwapCache, it's handled by
1312          * special functions.
1313          */
1314
1315         mz = page_cgroup_zoneinfo(pc);
1316         unlock_page_cgroup(pc);
1317
1318         /* at swapout, this memcg will be accessed to record to swap */
1319         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1320                 css_put(&mem->css);
1321
1322         return mem;
1323
1324 unlock_out:
1325         unlock_page_cgroup(pc);
1326         return NULL;
1327 }
1328
1329 void mem_cgroup_uncharge_page(struct page *page)
1330 {
1331         /* early check. */
1332         if (page_mapped(page))
1333                 return;
1334         if (page->mapping && !PageAnon(page))
1335                 return;
1336         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1337 }
1338
1339 void mem_cgroup_uncharge_cache_page(struct page *page)
1340 {
1341         VM_BUG_ON(page_mapped(page));
1342         VM_BUG_ON(page->mapping);
1343         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1344 }
1345
1346 /*
1347  * called from __delete_from_swap_cache() and drop "page" account.
1348  * memcg information is recorded to swap_cgroup of "ent"
1349  */
1350 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1351 {
1352         struct mem_cgroup *memcg;
1353
1354         memcg = __mem_cgroup_uncharge_common(page,
1355                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1356         /* record memcg information */
1357         if (do_swap_account && memcg) {
1358                 swap_cgroup_record(ent, memcg);
1359                 mem_cgroup_get(memcg);
1360         }
1361         if (memcg)
1362                 css_put(&memcg->css);
1363 }
1364
1365 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1366 /*
1367  * called from swap_entry_free(). remove record in swap_cgroup and
1368  * uncharge "memsw" account.
1369  */
1370 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1371 {
1372         struct mem_cgroup *memcg;
1373
1374         if (!do_swap_account)
1375                 return;
1376
1377         memcg = swap_cgroup_record(ent, NULL);
1378         if (memcg) {
1379                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1380                 mem_cgroup_put(memcg);
1381         }
1382 }
1383 #endif
1384
1385 /*
1386  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1387  * page belongs to.
1388  */
1389 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1390 {
1391         struct page_cgroup *pc;
1392         struct mem_cgroup *mem = NULL;
1393         int ret = 0;
1394
1395         if (mem_cgroup_disabled())
1396                 return 0;
1397
1398         pc = lookup_page_cgroup(page);
1399         lock_page_cgroup(pc);
1400         if (PageCgroupUsed(pc)) {
1401                 mem = pc->mem_cgroup;
1402                 css_get(&mem->css);
1403         }
1404         unlock_page_cgroup(pc);
1405
1406         if (mem) {
1407                 ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem, false);
1408                 css_put(&mem->css);
1409         }
1410         *ptr = mem;
1411         return ret;
1412 }
1413
1414 /* remove redundant charge if migration failed*/
1415 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1416                 struct page *oldpage, struct page *newpage)
1417 {
1418         struct page *target, *unused;
1419         struct page_cgroup *pc;
1420         enum charge_type ctype;
1421
1422         if (!mem)
1423                 return;
1424
1425         /* at migration success, oldpage->mapping is NULL. */
1426         if (oldpage->mapping) {
1427                 target = oldpage;
1428                 unused = NULL;
1429         } else {
1430                 target = newpage;
1431                 unused = oldpage;
1432         }
1433
1434         if (PageAnon(target))
1435                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1436         else if (page_is_file_cache(target))
1437                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1438         else
1439                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1440
1441         /* unused page is not on radix-tree now. */
1442         if (unused)
1443                 __mem_cgroup_uncharge_common(unused, ctype);
1444
1445         pc = lookup_page_cgroup(target);
1446         /*
1447          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1448          * So, double-counting is effectively avoided.
1449          */
1450         __mem_cgroup_commit_charge(mem, pc, ctype);
1451
1452         /*
1453          * Both of oldpage and newpage are still under lock_page().
1454          * Then, we don't have to care about race in radix-tree.
1455          * But we have to be careful that this page is unmapped or not.
1456          *
1457          * There is a case for !page_mapped(). At the start of
1458          * migration, oldpage was mapped. But now, it's zapped.
1459          * But we know *target* page is not freed/reused under us.
1460          * mem_cgroup_uncharge_page() does all necessary checks.
1461          */
1462         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1463                 mem_cgroup_uncharge_page(target);
1464 }
1465
1466 /*
1467  * A call to try to shrink memory usage under specified resource controller.
1468  * This is typically used for page reclaiming for shmem for reducing side
1469  * effect of page allocation from shmem, which is used by some mem_cgroup.
1470  */
1471 int mem_cgroup_shrink_usage(struct page *page,
1472                             struct mm_struct *mm,
1473                             gfp_t gfp_mask)
1474 {
1475         struct mem_cgroup *mem = NULL;
1476         int progress = 0;
1477         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1478
1479         if (mem_cgroup_disabled())
1480                 return 0;
1481         if (page)
1482                 mem = try_get_mem_cgroup_from_swapcache(page);
1483         if (!mem && mm)
1484                 mem = try_get_mem_cgroup_from_mm(mm);
1485         if (unlikely(!mem))
1486                 return 0;
1487
1488         do {
1489                 progress = mem_cgroup_hierarchical_reclaim(mem, gfp_mask, true);
1490                 progress += mem_cgroup_check_under_limit(mem);
1491         } while (!progress && --retry);
1492
1493         css_put(&mem->css);
1494         if (!retry)
1495                 return -ENOMEM;
1496         return 0;
1497 }
1498
1499 static DEFINE_MUTEX(set_limit_mutex);
1500
1501 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1502                                 unsigned long long val)
1503 {
1504
1505         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1506         int progress;
1507         u64 memswlimit;
1508         int ret = 0;
1509
1510         while (retry_count) {
1511                 if (signal_pending(current)) {
1512                         ret = -EINTR;
1513                         break;
1514                 }
1515                 /*
1516                  * Rather than hide all in some function, I do this in
1517                  * open coded manner. You see what this really does.
1518                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1519                  */
1520                 mutex_lock(&set_limit_mutex);
1521                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1522                 if (memswlimit < val) {
1523                         ret = -EINVAL;
1524                         mutex_unlock(&set_limit_mutex);
1525                         break;
1526                 }
1527                 ret = res_counter_set_limit(&memcg->res, val);
1528                 mutex_unlock(&set_limit_mutex);
1529
1530                 if (!ret)
1531                         break;
1532
1533                 progress = mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL,
1534                                                            false);
1535                 if (!progress)                  retry_count--;
1536         }
1537
1538         return ret;
1539 }
1540
1541 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1542                                 unsigned long long val)
1543 {
1544         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1545         u64 memlimit, oldusage, curusage;
1546         int ret;
1547
1548         if (!do_swap_account)
1549                 return -EINVAL;
1550
1551         while (retry_count) {
1552                 if (signal_pending(current)) {
1553                         ret = -EINTR;
1554                         break;
1555                 }
1556                 /*
1557                  * Rather than hide all in some function, I do this in
1558                  * open coded manner. You see what this really does.
1559                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1560                  */
1561                 mutex_lock(&set_limit_mutex);
1562                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1563                 if (memlimit > val) {
1564                         ret = -EINVAL;
1565                         mutex_unlock(&set_limit_mutex);
1566                         break;
1567                 }
1568                 ret = res_counter_set_limit(&memcg->memsw, val);
1569                 mutex_unlock(&set_limit_mutex);
1570
1571                 if (!ret)
1572                         break;
1573
1574                 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1575                 mem_cgroup_hierarchical_reclaim(memcg, GFP_KERNEL, true);
1576                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1577                 if (curusage >= oldusage)
1578                         retry_count--;
1579         }
1580         return ret;
1581 }
1582
1583 /*
1584  * This routine traverse page_cgroup in given list and drop them all.
1585  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1586  */
1587 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1588                                 int node, int zid, enum lru_list lru)
1589 {
1590         struct zone *zone;
1591         struct mem_cgroup_per_zone *mz;
1592         struct page_cgroup *pc, *busy;
1593         unsigned long flags, loop;
1594         struct list_head *list;
1595         int ret = 0;
1596
1597         zone = &NODE_DATA(node)->node_zones[zid];
1598         mz = mem_cgroup_zoneinfo(mem, node, zid);
1599         list = &mz->lists[lru];
1600
1601         loop = MEM_CGROUP_ZSTAT(mz, lru);
1602         /* give some margin against EBUSY etc...*/
1603         loop += 256;
1604         busy = NULL;
1605         while (loop--) {
1606                 ret = 0;
1607                 spin_lock_irqsave(&zone->lru_lock, flags);
1608                 if (list_empty(list)) {
1609                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1610                         break;
1611                 }
1612                 pc = list_entry(list->prev, struct page_cgroup, lru);
1613                 if (busy == pc) {
1614                         list_move(&pc->lru, list);
1615                         busy = 0;
1616                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1617                         continue;
1618                 }
1619                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1620
1621                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1622                 if (ret == -ENOMEM)
1623                         break;
1624
1625                 if (ret == -EBUSY || ret == -EINVAL) {
1626                         /* found lock contention or "pc" is obsolete. */
1627                         busy = pc;
1628                         cond_resched();
1629                 } else
1630                         busy = NULL;
1631         }
1632
1633         if (!ret && !list_empty(list))
1634                 return -EBUSY;
1635         return ret;
1636 }
1637
1638 /*
1639  * make mem_cgroup's charge to be 0 if there is no task.
1640  * This enables deleting this mem_cgroup.
1641  */
1642 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1643 {
1644         int ret;
1645         int node, zid, shrink;
1646         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1647         struct cgroup *cgrp = mem->css.cgroup;
1648
1649         css_get(&mem->css);
1650
1651         shrink = 0;
1652         /* should free all ? */
1653         if (free_all)
1654                 goto try_to_free;
1655 move_account:
1656         while (mem->res.usage > 0) {
1657                 ret = -EBUSY;
1658                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1659                         goto out;
1660                 ret = -EINTR;
1661                 if (signal_pending(current))
1662                         goto out;
1663                 /* This is for making all *used* pages to be on LRU. */
1664                 lru_add_drain_all();
1665                 ret = 0;
1666                 for_each_node_state(node, N_POSSIBLE) {
1667                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1668                                 enum lru_list l;
1669                                 for_each_lru(l) {
1670                                         ret = mem_cgroup_force_empty_list(mem,
1671                                                         node, zid, l);
1672                                         if (ret)
1673                                                 break;
1674                                 }
1675                         }
1676                         if (ret)
1677                                 break;
1678                 }
1679                 /* it seems parent cgroup doesn't have enough mem */
1680                 if (ret == -ENOMEM)
1681                         goto try_to_free;
1682                 cond_resched();
1683         }
1684         ret = 0;
1685 out:
1686         css_put(&mem->css);
1687         return ret;
1688
1689 try_to_free:
1690         /* returns EBUSY if there is a task or if we come here twice. */
1691         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1692                 ret = -EBUSY;
1693                 goto out;
1694         }
1695         /* we call try-to-free pages for make this cgroup empty */
1696         lru_add_drain_all();
1697         /* try to free all pages in this cgroup */
1698         shrink = 1;
1699         while (nr_retries && mem->res.usage > 0) {
1700                 int progress;
1701
1702                 if (signal_pending(current)) {
1703                         ret = -EINTR;
1704                         goto out;
1705                 }
1706                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1707                                                 false, get_swappiness(mem));
1708                 if (!progress) {
1709                         nr_retries--;
1710                         /* maybe some writeback is necessary */
1711                         congestion_wait(WRITE, HZ/10);
1712                 }
1713
1714         }
1715         lru_add_drain();
1716         /* try move_account...there may be some *locked* pages. */
1717         if (mem->res.usage)
1718                 goto move_account;
1719         ret = 0;
1720         goto out;
1721 }
1722
1723 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1724 {
1725         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1726 }
1727
1728
1729 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1730 {
1731         return mem_cgroup_from_cont(cont)->use_hierarchy;
1732 }
1733
1734 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1735                                         u64 val)
1736 {
1737         int retval = 0;
1738         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1739         struct cgroup *parent = cont->parent;
1740         struct mem_cgroup *parent_mem = NULL;
1741
1742         if (parent)
1743                 parent_mem = mem_cgroup_from_cont(parent);
1744
1745         cgroup_lock();
1746         /*
1747          * If parent's use_hiearchy is set, we can't make any modifications
1748          * in the child subtrees. If it is unset, then the change can
1749          * occur, provided the current cgroup has no children.
1750          *
1751          * For the root cgroup, parent_mem is NULL, we allow value to be
1752          * set if there are no children.
1753          */
1754         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1755                                 (val == 1 || val == 0)) {
1756                 if (list_empty(&cont->children))
1757                         mem->use_hierarchy = val;
1758                 else
1759                         retval = -EBUSY;
1760         } else
1761                 retval = -EINVAL;
1762         cgroup_unlock();
1763
1764         return retval;
1765 }
1766
1767 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1768 {
1769         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1770         u64 val = 0;
1771         int type, name;
1772
1773         type = MEMFILE_TYPE(cft->private);
1774         name = MEMFILE_ATTR(cft->private);
1775         switch (type) {
1776         case _MEM:
1777                 val = res_counter_read_u64(&mem->res, name);
1778                 break;
1779         case _MEMSWAP:
1780                 if (do_swap_account)
1781                         val = res_counter_read_u64(&mem->memsw, name);
1782                 break;
1783         default:
1784                 BUG();
1785                 break;
1786         }
1787         return val;
1788 }
1789 /*
1790  * The user of this function is...
1791  * RES_LIMIT.
1792  */
1793 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1794                             const char *buffer)
1795 {
1796         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1797         int type, name;
1798         unsigned long long val;
1799         int ret;
1800
1801         type = MEMFILE_TYPE(cft->private);
1802         name = MEMFILE_ATTR(cft->private);
1803         switch (name) {
1804         case RES_LIMIT:
1805                 /* This function does all necessary parse...reuse it */
1806                 ret = res_counter_memparse_write_strategy(buffer, &val);
1807                 if (ret)
1808                         break;
1809                 if (type == _MEM)
1810                         ret = mem_cgroup_resize_limit(memcg, val);
1811                 else
1812                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1813                 break;
1814         default:
1815                 ret = -EINVAL; /* should be BUG() ? */
1816                 break;
1817         }
1818         return ret;
1819 }
1820
1821 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
1822                 unsigned long long *mem_limit, unsigned long long *memsw_limit)
1823 {
1824         struct cgroup *cgroup;
1825         unsigned long long min_limit, min_memsw_limit, tmp;
1826
1827         min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1828         min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1829         cgroup = memcg->css.cgroup;
1830         if (!memcg->use_hierarchy)
1831                 goto out;
1832
1833         while (cgroup->parent) {
1834                 cgroup = cgroup->parent;
1835                 memcg = mem_cgroup_from_cont(cgroup);
1836                 if (!memcg->use_hierarchy)
1837                         break;
1838                 tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
1839                 min_limit = min(min_limit, tmp);
1840                 tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1841                 min_memsw_limit = min(min_memsw_limit, tmp);
1842         }
1843 out:
1844         *mem_limit = min_limit;
1845         *memsw_limit = min_memsw_limit;
1846         return;
1847 }
1848
1849 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1850 {
1851         struct mem_cgroup *mem;
1852         int type, name;
1853
1854         mem = mem_cgroup_from_cont(cont);
1855         type = MEMFILE_TYPE(event);
1856         name = MEMFILE_ATTR(event);
1857         switch (name) {
1858         case RES_MAX_USAGE:
1859                 if (type == _MEM)
1860                         res_counter_reset_max(&mem->res);
1861                 else
1862                         res_counter_reset_max(&mem->memsw);
1863                 break;
1864         case RES_FAILCNT:
1865                 if (type == _MEM)
1866                         res_counter_reset_failcnt(&mem->res);
1867                 else
1868                         res_counter_reset_failcnt(&mem->memsw);
1869                 break;
1870         }
1871         return 0;
1872 }
1873
1874 static const struct mem_cgroup_stat_desc {
1875         const char *msg;
1876         u64 unit;
1877 } mem_cgroup_stat_desc[] = {
1878         [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1879         [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1880         [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1881         [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1882 };
1883
1884 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1885                                  struct cgroup_map_cb *cb)
1886 {
1887         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1888         struct mem_cgroup_stat *stat = &mem_cont->stat;
1889         int i;
1890
1891         for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1892                 s64 val;
1893
1894                 val = mem_cgroup_read_stat(stat, i);
1895                 val *= mem_cgroup_stat_desc[i].unit;
1896                 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1897         }
1898         /* showing # of active pages */
1899         {
1900                 unsigned long active_anon, inactive_anon;
1901                 unsigned long active_file, inactive_file;
1902                 unsigned long unevictable;
1903
1904                 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1905                                                 LRU_INACTIVE_ANON);
1906                 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1907                                                 LRU_ACTIVE_ANON);
1908                 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1909                                                 LRU_INACTIVE_FILE);
1910                 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1911                                                 LRU_ACTIVE_FILE);
1912                 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1913                                                         LRU_UNEVICTABLE);
1914
1915                 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1916                 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1917                 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1918                 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1919                 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1920
1921         }
1922         {
1923                 unsigned long long limit, memsw_limit;
1924                 memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
1925                 cb->fill(cb, "hierarchical_memory_limit", limit);
1926                 if (do_swap_account)
1927                         cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
1928         }
1929
1930 #ifdef CONFIG_DEBUG_VM
1931         cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
1932
1933         {
1934                 int nid, zid;
1935                 struct mem_cgroup_per_zone *mz;
1936                 unsigned long recent_rotated[2] = {0, 0};
1937                 unsigned long recent_scanned[2] = {0, 0};
1938
1939                 for_each_online_node(nid)
1940                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1941                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1942
1943                                 recent_rotated[0] +=
1944                                         mz->reclaim_stat.recent_rotated[0];
1945                                 recent_rotated[1] +=
1946                                         mz->reclaim_stat.recent_rotated[1];
1947                                 recent_scanned[0] +=
1948                                         mz->reclaim_stat.recent_scanned[0];
1949                                 recent_scanned[1] +=
1950                                         mz->reclaim_stat.recent_scanned[1];
1951                         }
1952                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
1953                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
1954                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
1955                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
1956         }
1957 #endif
1958
1959         return 0;
1960 }
1961
1962 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
1963 {
1964         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1965
1966         return get_swappiness(memcg);
1967 }
1968
1969 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
1970                                        u64 val)
1971 {
1972         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1973         struct mem_cgroup *parent;
1974         if (val > 100)
1975                 return -EINVAL;
1976
1977         if (cgrp->parent == NULL)
1978                 return -EINVAL;
1979
1980         parent = mem_cgroup_from_cont(cgrp->parent);
1981         /* If under hierarchy, only empty-root can set this value */
1982         if ((parent->use_hierarchy) ||
1983             (memcg->use_hierarchy && !list_empty(&cgrp->children)))
1984                 return -EINVAL;
1985
1986         spin_lock(&memcg->reclaim_param_lock);
1987         memcg->swappiness = val;
1988         spin_unlock(&memcg->reclaim_param_lock);
1989
1990         return 0;
1991 }
1992
1993
1994 static struct cftype mem_cgroup_files[] = {
1995         {
1996                 .name = "usage_in_bytes",
1997                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1998                 .read_u64 = mem_cgroup_read,
1999         },
2000         {
2001                 .name = "max_usage_in_bytes",
2002                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
2003                 .trigger = mem_cgroup_reset,
2004                 .read_u64 = mem_cgroup_read,
2005         },
2006         {
2007                 .name = "limit_in_bytes",
2008                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
2009                 .write_string = mem_cgroup_write,
2010                 .read_u64 = mem_cgroup_read,
2011         },
2012         {
2013                 .name = "failcnt",
2014                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
2015                 .trigger = mem_cgroup_reset,
2016                 .read_u64 = mem_cgroup_read,
2017         },
2018         {
2019                 .name = "stat",
2020                 .read_map = mem_control_stat_show,
2021         },
2022         {
2023                 .name = "force_empty",
2024                 .trigger = mem_cgroup_force_empty_write,
2025         },
2026         {
2027                 .name = "use_hierarchy",
2028                 .write_u64 = mem_cgroup_hierarchy_write,
2029                 .read_u64 = mem_cgroup_hierarchy_read,
2030         },
2031         {
2032                 .name = "swappiness",
2033                 .read_u64 = mem_cgroup_swappiness_read,
2034                 .write_u64 = mem_cgroup_swappiness_write,
2035         },
2036 };
2037
2038 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2039 static struct cftype memsw_cgroup_files[] = {
2040         {
2041                 .name = "memsw.usage_in_bytes",
2042                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
2043                 .read_u64 = mem_cgroup_read,
2044         },
2045         {
2046                 .name = "memsw.max_usage_in_bytes",
2047                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
2048                 .trigger = mem_cgroup_reset,
2049                 .read_u64 = mem_cgroup_read,
2050         },
2051         {
2052                 .name = "memsw.limit_in_bytes",
2053                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
2054                 .write_string = mem_cgroup_write,
2055                 .read_u64 = mem_cgroup_read,
2056         },
2057         {
2058                 .name = "memsw.failcnt",
2059                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
2060                 .trigger = mem_cgroup_reset,
2061                 .read_u64 = mem_cgroup_read,
2062         },
2063 };
2064
2065 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2066 {
2067         if (!do_swap_account)
2068                 return 0;
2069         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2070                                 ARRAY_SIZE(memsw_cgroup_files));
2071 };
2072 #else
2073 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2074 {
2075         return 0;
2076 }
2077 #endif
2078
2079 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2080 {
2081         struct mem_cgroup_per_node *pn;
2082         struct mem_cgroup_per_zone *mz;
2083         enum lru_list l;
2084         int zone, tmp = node;
2085         /*
2086          * This routine is called against possible nodes.
2087          * But it's BUG to call kmalloc() against offline node.
2088          *
2089          * TODO: this routine can waste much memory for nodes which will
2090          *       never be onlined. It's better to use memory hotplug callback
2091          *       function.
2092          */
2093         if (!node_state(node, N_NORMAL_MEMORY))
2094                 tmp = -1;
2095         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2096         if (!pn)
2097                 return 1;
2098
2099         mem->info.nodeinfo[node] = pn;
2100         memset(pn, 0, sizeof(*pn));
2101
2102         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2103                 mz = &pn->zoneinfo[zone];
2104                 for_each_lru(l)
2105                         INIT_LIST_HEAD(&mz->lists[l]);
2106         }
2107         return 0;
2108 }
2109
2110 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2111 {
2112         kfree(mem->info.nodeinfo[node]);
2113 }
2114
2115 static int mem_cgroup_size(void)
2116 {
2117         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2118         return sizeof(struct mem_cgroup) + cpustat_size;
2119 }
2120
2121 static struct mem_cgroup *mem_cgroup_alloc(void)
2122 {
2123         struct mem_cgroup *mem;
2124         int size = mem_cgroup_size();
2125
2126         if (size < PAGE_SIZE)
2127                 mem = kmalloc(size, GFP_KERNEL);
2128         else
2129                 mem = vmalloc(size);
2130
2131         if (mem)
2132                 memset(mem, 0, size);
2133         return mem;
2134 }
2135
2136 /*
2137  * At destroying mem_cgroup, references from swap_cgroup can remain.
2138  * (scanning all at force_empty is too costly...)
2139  *
2140  * Instead of clearing all references at force_empty, we remember
2141  * the number of reference from swap_cgroup and free mem_cgroup when
2142  * it goes down to 0.
2143  *
2144  * Removal of cgroup itself succeeds regardless of refs from swap.
2145  */
2146
2147 static void __mem_cgroup_free(struct mem_cgroup *mem)
2148 {
2149         int node;
2150
2151         for_each_node_state(node, N_POSSIBLE)
2152                 free_mem_cgroup_per_zone_info(mem, node);
2153
2154         if (mem_cgroup_size() < PAGE_SIZE)
2155                 kfree(mem);
2156         else
2157                 vfree(mem);
2158 }
2159
2160 static void mem_cgroup_get(struct mem_cgroup *mem)
2161 {
2162         atomic_inc(&mem->refcnt);
2163 }
2164
2165 static void mem_cgroup_put(struct mem_cgroup *mem)
2166 {
2167         if (atomic_dec_and_test(&mem->refcnt))
2168                 __mem_cgroup_free(mem);
2169 }
2170
2171
2172 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2173 static void __init enable_swap_cgroup(void)
2174 {
2175         if (!mem_cgroup_disabled() && really_do_swap_account)
2176                 do_swap_account = 1;
2177 }
2178 #else
2179 static void __init enable_swap_cgroup(void)
2180 {
2181 }
2182 #endif
2183
2184 static struct cgroup_subsys_state *
2185 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2186 {
2187         struct mem_cgroup *mem, *parent;
2188         int node;
2189
2190         mem = mem_cgroup_alloc();
2191         if (!mem)
2192                 return ERR_PTR(-ENOMEM);
2193
2194         for_each_node_state(node, N_POSSIBLE)
2195                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2196                         goto free_out;
2197         /* root ? */
2198         if (cont->parent == NULL) {
2199                 enable_swap_cgroup();
2200                 parent = NULL;
2201         } else {
2202                 parent = mem_cgroup_from_cont(cont->parent);
2203                 mem->use_hierarchy = parent->use_hierarchy;
2204         }
2205
2206         if (parent && parent->use_hierarchy) {
2207                 res_counter_init(&mem->res, &parent->res);
2208                 res_counter_init(&mem->memsw, &parent->memsw);
2209         } else {
2210                 res_counter_init(&mem->res, NULL);
2211                 res_counter_init(&mem->memsw, NULL);
2212         }
2213         mem->last_scanned_child = NULL;
2214         spin_lock_init(&mem->reclaim_param_lock);
2215
2216         if (parent)
2217                 mem->swappiness = get_swappiness(parent);
2218         atomic_set(&mem->refcnt, 1);
2219         return &mem->css;
2220 free_out:
2221         __mem_cgroup_free(mem);
2222         return ERR_PTR(-ENOMEM);
2223 }
2224
2225 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2226                                         struct cgroup *cont)
2227 {
2228         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2229         mem_cgroup_force_empty(mem, false);
2230 }
2231
2232 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2233                                 struct cgroup *cont)
2234 {
2235         mem_cgroup_put(mem_cgroup_from_cont(cont));
2236 }
2237
2238 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2239                                 struct cgroup *cont)
2240 {
2241         int ret;
2242
2243         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2244                                 ARRAY_SIZE(mem_cgroup_files));
2245
2246         if (!ret)
2247                 ret = register_memsw_files(cont, ss);
2248         return ret;
2249 }
2250
2251 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2252                                 struct cgroup *cont,
2253                                 struct cgroup *old_cont,
2254                                 struct task_struct *p)
2255 {
2256         mutex_lock(&memcg_tasklist);
2257         /*
2258          * FIXME: It's better to move charges of this process from old
2259          * memcg to new memcg. But it's just on TODO-List now.
2260          */
2261         mutex_unlock(&memcg_tasklist);
2262 }
2263
2264 struct cgroup_subsys mem_cgroup_subsys = {
2265         .name = "memory",
2266         .subsys_id = mem_cgroup_subsys_id,
2267         .create = mem_cgroup_create,
2268         .pre_destroy = mem_cgroup_pre_destroy,
2269         .destroy = mem_cgroup_destroy,
2270         .populate = mem_cgroup_populate,
2271         .attach = mem_cgroup_move_task,
2272         .early_init = 0,
2273 };
2274
2275 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2276
2277 static int __init disable_swap_account(char *s)
2278 {
2279         really_do_swap_account = 0;
2280         return 1;
2281 }
2282 __setup("noswapaccount", disable_swap_account);
2283 #endif