memcg: swappiness
[sfrench/cifs-2.6.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES      5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account         (0)
52 #endif
53
54
55 /*
56  * Statistics for memory cgroup.
57  */
58 enum mem_cgroup_stat_index {
59         /*
60          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61          */
62         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
63         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
64         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
65         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
66
67         MEM_CGROUP_STAT_NSTATS,
68 };
69
70 struct mem_cgroup_stat_cpu {
71         s64 count[MEM_CGROUP_STAT_NSTATS];
72 } ____cacheline_aligned_in_smp;
73
74 struct mem_cgroup_stat {
75         struct mem_cgroup_stat_cpu cpustat[0];
76 };
77
78 /*
79  * For accounting under irq disable, no need for increment preempt count.
80  */
81 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82                 enum mem_cgroup_stat_index idx, int val)
83 {
84         stat->count[idx] += val;
85 }
86
87 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88                 enum mem_cgroup_stat_index idx)
89 {
90         int cpu;
91         s64 ret = 0;
92         for_each_possible_cpu(cpu)
93                 ret += stat->cpustat[cpu].count[idx];
94         return ret;
95 }
96
97 /*
98  * per-zone information in memory controller.
99  */
100 struct mem_cgroup_per_zone {
101         /*
102          * spin_lock to protect the per cgroup LRU
103          */
104         struct list_head        lists[NR_LRU_LISTS];
105         unsigned long           count[NR_LRU_LISTS];
106
107         struct zone_reclaim_stat reclaim_stat;
108 };
109 /* Macro for accessing counter */
110 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
111
112 struct mem_cgroup_per_node {
113         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
114 };
115
116 struct mem_cgroup_lru_info {
117         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
118 };
119
120 /*
121  * The memory controller data structure. The memory controller controls both
122  * page cache and RSS per cgroup. We would eventually like to provide
123  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
124  * to help the administrator determine what knobs to tune.
125  *
126  * TODO: Add a water mark for the memory controller. Reclaim will begin when
127  * we hit the water mark. May be even add a low water mark, such that
128  * no reclaim occurs from a cgroup at it's low water mark, this is
129  * a feature that will be implemented much later in the future.
130  */
131 struct mem_cgroup {
132         struct cgroup_subsys_state css;
133         /*
134          * the counter to account for memory usage
135          */
136         struct res_counter res;
137         /*
138          * the counter to account for mem+swap usage.
139          */
140         struct res_counter memsw;
141         /*
142          * Per cgroup active and inactive list, similar to the
143          * per zone LRU lists.
144          */
145         struct mem_cgroup_lru_info info;
146
147         /*
148           protect against reclaim related member.
149         */
150         spinlock_t reclaim_param_lock;
151
152         int     prev_priority;  /* for recording reclaim priority */
153
154         /*
155          * While reclaiming in a hiearchy, we cache the last child we
156          * reclaimed from. Protected by cgroup_lock()
157          */
158         struct mem_cgroup *last_scanned_child;
159         /*
160          * Should the accounting and control be hierarchical, per subtree?
161          */
162         bool use_hierarchy;
163         unsigned long   last_oom_jiffies;
164         int             obsolete;
165         atomic_t        refcnt;
166
167         unsigned int    swappiness;
168
169
170         unsigned int inactive_ratio;
171
172         /*
173          * statistics. This must be placed at the end of memcg.
174          */
175         struct mem_cgroup_stat stat;
176 };
177
178 enum charge_type {
179         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
180         MEM_CGROUP_CHARGE_TYPE_MAPPED,
181         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
182         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
183         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
184         NR_CHARGE_TYPE,
185 };
186
187 /* only for here (for easy reading.) */
188 #define PCGF_CACHE      (1UL << PCG_CACHE)
189 #define PCGF_USED       (1UL << PCG_USED)
190 #define PCGF_LOCK       (1UL << PCG_LOCK)
191 static const unsigned long
192 pcg_default_flags[NR_CHARGE_TYPE] = {
193         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
194         PCGF_USED | PCGF_LOCK, /* Anon */
195         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
196         0, /* FORCE */
197 };
198
199 /* for encoding cft->private value on file */
200 #define _MEM                    (0)
201 #define _MEMSWAP                (1)
202 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
203 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
204 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
205
206 static void mem_cgroup_get(struct mem_cgroup *mem);
207 static void mem_cgroup_put(struct mem_cgroup *mem);
208
209 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
210                                          struct page_cgroup *pc,
211                                          bool charge)
212 {
213         int val = (charge)? 1 : -1;
214         struct mem_cgroup_stat *stat = &mem->stat;
215         struct mem_cgroup_stat_cpu *cpustat;
216         int cpu = get_cpu();
217
218         cpustat = &stat->cpustat[cpu];
219         if (PageCgroupCache(pc))
220                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
221         else
222                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
223
224         if (charge)
225                 __mem_cgroup_stat_add_safe(cpustat,
226                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
227         else
228                 __mem_cgroup_stat_add_safe(cpustat,
229                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
230         put_cpu();
231 }
232
233 static struct mem_cgroup_per_zone *
234 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
235 {
236         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
237 }
238
239 static struct mem_cgroup_per_zone *
240 page_cgroup_zoneinfo(struct page_cgroup *pc)
241 {
242         struct mem_cgroup *mem = pc->mem_cgroup;
243         int nid = page_cgroup_nid(pc);
244         int zid = page_cgroup_zid(pc);
245
246         if (!mem)
247                 return NULL;
248
249         return mem_cgroup_zoneinfo(mem, nid, zid);
250 }
251
252 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
253                                         enum lru_list idx)
254 {
255         int nid, zid;
256         struct mem_cgroup_per_zone *mz;
257         u64 total = 0;
258
259         for_each_online_node(nid)
260                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
261                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
262                         total += MEM_CGROUP_ZSTAT(mz, idx);
263                 }
264         return total;
265 }
266
267 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
268 {
269         return container_of(cgroup_subsys_state(cont,
270                                 mem_cgroup_subsys_id), struct mem_cgroup,
271                                 css);
272 }
273
274 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
275 {
276         /*
277          * mm_update_next_owner() may clear mm->owner to NULL
278          * if it races with swapoff, page migration, etc.
279          * So this can be called with p == NULL.
280          */
281         if (unlikely(!p))
282                 return NULL;
283
284         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
285                                 struct mem_cgroup, css);
286 }
287
288 /*
289  * Following LRU functions are allowed to be used without PCG_LOCK.
290  * Operations are called by routine of global LRU independently from memcg.
291  * What we have to take care of here is validness of pc->mem_cgroup.
292  *
293  * Changes to pc->mem_cgroup happens when
294  * 1. charge
295  * 2. moving account
296  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
297  * It is added to LRU before charge.
298  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
299  * When moving account, the page is not on LRU. It's isolated.
300  */
301
302 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
303 {
304         struct page_cgroup *pc;
305         struct mem_cgroup *mem;
306         struct mem_cgroup_per_zone *mz;
307
308         if (mem_cgroup_disabled())
309                 return;
310         pc = lookup_page_cgroup(page);
311         /* can happen while we handle swapcache. */
312         if (list_empty(&pc->lru))
313                 return;
314         mz = page_cgroup_zoneinfo(pc);
315         mem = pc->mem_cgroup;
316         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
317         list_del_init(&pc->lru);
318         return;
319 }
320
321 void mem_cgroup_del_lru(struct page *page)
322 {
323         mem_cgroup_del_lru_list(page, page_lru(page));
324 }
325
326 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
327 {
328         struct mem_cgroup_per_zone *mz;
329         struct page_cgroup *pc;
330
331         if (mem_cgroup_disabled())
332                 return;
333
334         pc = lookup_page_cgroup(page);
335         smp_rmb();
336         /* unused page is not rotated. */
337         if (!PageCgroupUsed(pc))
338                 return;
339         mz = page_cgroup_zoneinfo(pc);
340         list_move(&pc->lru, &mz->lists[lru]);
341 }
342
343 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
344 {
345         struct page_cgroup *pc;
346         struct mem_cgroup_per_zone *mz;
347
348         if (mem_cgroup_disabled())
349                 return;
350         pc = lookup_page_cgroup(page);
351         /* barrier to sync with "charge" */
352         smp_rmb();
353         if (!PageCgroupUsed(pc))
354                 return;
355
356         mz = page_cgroup_zoneinfo(pc);
357         MEM_CGROUP_ZSTAT(mz, lru) += 1;
358         list_add(&pc->lru, &mz->lists[lru]);
359 }
360 /*
361  * To add swapcache into LRU. Be careful to all this function.
362  * zone->lru_lock shouldn't be held and irq must not be disabled.
363  */
364 static void mem_cgroup_lru_fixup(struct page *page)
365 {
366         if (!isolate_lru_page(page))
367                 putback_lru_page(page);
368 }
369
370 void mem_cgroup_move_lists(struct page *page,
371                            enum lru_list from, enum lru_list to)
372 {
373         if (mem_cgroup_disabled())
374                 return;
375         mem_cgroup_del_lru_list(page, from);
376         mem_cgroup_add_lru_list(page, to);
377 }
378
379 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
380 {
381         int ret;
382
383         task_lock(task);
384         ret = task->mm && mm_match_cgroup(task->mm, mem);
385         task_unlock(task);
386         return ret;
387 }
388
389 /*
390  * Calculate mapped_ratio under memory controller. This will be used in
391  * vmscan.c for deteremining we have to reclaim mapped pages.
392  */
393 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
394 {
395         long total, rss;
396
397         /*
398          * usage is recorded in bytes. But, here, we assume the number of
399          * physical pages can be represented by "long" on any arch.
400          */
401         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
402         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
403         return (int)((rss * 100L) / total);
404 }
405
406 /*
407  * prev_priority control...this will be used in memory reclaim path.
408  */
409 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
410 {
411         int prev_priority;
412
413         spin_lock(&mem->reclaim_param_lock);
414         prev_priority = mem->prev_priority;
415         spin_unlock(&mem->reclaim_param_lock);
416
417         return prev_priority;
418 }
419
420 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
421 {
422         spin_lock(&mem->reclaim_param_lock);
423         if (priority < mem->prev_priority)
424                 mem->prev_priority = priority;
425         spin_unlock(&mem->reclaim_param_lock);
426 }
427
428 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
429 {
430         spin_lock(&mem->reclaim_param_lock);
431         mem->prev_priority = priority;
432         spin_unlock(&mem->reclaim_param_lock);
433 }
434
435 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
436 {
437         unsigned long active;
438         unsigned long inactive;
439
440         inactive = mem_cgroup_get_all_zonestat(memcg, LRU_INACTIVE_ANON);
441         active = mem_cgroup_get_all_zonestat(memcg, LRU_ACTIVE_ANON);
442
443         if (inactive * memcg->inactive_ratio < active)
444                 return 1;
445
446         return 0;
447 }
448
449 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
450                                        struct zone *zone,
451                                        enum lru_list lru)
452 {
453         int nid = zone->zone_pgdat->node_id;
454         int zid = zone_idx(zone);
455         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
456
457         return MEM_CGROUP_ZSTAT(mz, lru);
458 }
459
460 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
461                                                       struct zone *zone)
462 {
463         int nid = zone->zone_pgdat->node_id;
464         int zid = zone_idx(zone);
465         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
466
467         return &mz->reclaim_stat;
468 }
469
470 struct zone_reclaim_stat *
471 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
472 {
473         struct page_cgroup *pc;
474         struct mem_cgroup_per_zone *mz;
475
476         if (mem_cgroup_disabled())
477                 return NULL;
478
479         pc = lookup_page_cgroup(page);
480         mz = page_cgroup_zoneinfo(pc);
481         if (!mz)
482                 return NULL;
483
484         return &mz->reclaim_stat;
485 }
486
487 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
488                                         struct list_head *dst,
489                                         unsigned long *scanned, int order,
490                                         int mode, struct zone *z,
491                                         struct mem_cgroup *mem_cont,
492                                         int active, int file)
493 {
494         unsigned long nr_taken = 0;
495         struct page *page;
496         unsigned long scan;
497         LIST_HEAD(pc_list);
498         struct list_head *src;
499         struct page_cgroup *pc, *tmp;
500         int nid = z->zone_pgdat->node_id;
501         int zid = zone_idx(z);
502         struct mem_cgroup_per_zone *mz;
503         int lru = LRU_FILE * !!file + !!active;
504
505         BUG_ON(!mem_cont);
506         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
507         src = &mz->lists[lru];
508
509         scan = 0;
510         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
511                 if (scan >= nr_to_scan)
512                         break;
513
514                 page = pc->page;
515                 if (unlikely(!PageCgroupUsed(pc)))
516                         continue;
517                 if (unlikely(!PageLRU(page)))
518                         continue;
519
520                 scan++;
521                 if (__isolate_lru_page(page, mode, file) == 0) {
522                         list_move(&page->lru, dst);
523                         nr_taken++;
524                 }
525         }
526
527         *scanned = scan;
528         return nr_taken;
529 }
530
531 #define mem_cgroup_from_res_counter(counter, member)    \
532         container_of(counter, struct mem_cgroup, member)
533
534 /*
535  * This routine finds the DFS walk successor. This routine should be
536  * called with cgroup_mutex held
537  */
538 static struct mem_cgroup *
539 mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
540 {
541         struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
542
543         curr_cgroup = curr->css.cgroup;
544         root_cgroup = root_mem->css.cgroup;
545
546         if (!list_empty(&curr_cgroup->children)) {
547                 /*
548                  * Walk down to children
549                  */
550                 mem_cgroup_put(curr);
551                 cgroup = list_entry(curr_cgroup->children.next,
552                                                 struct cgroup, sibling);
553                 curr = mem_cgroup_from_cont(cgroup);
554                 mem_cgroup_get(curr);
555                 goto done;
556         }
557
558 visit_parent:
559         if (curr_cgroup == root_cgroup) {
560                 mem_cgroup_put(curr);
561                 curr = root_mem;
562                 mem_cgroup_get(curr);
563                 goto done;
564         }
565
566         /*
567          * Goto next sibling
568          */
569         if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
570                 mem_cgroup_put(curr);
571                 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
572                                                 sibling);
573                 curr = mem_cgroup_from_cont(cgroup);
574                 mem_cgroup_get(curr);
575                 goto done;
576         }
577
578         /*
579          * Go up to next parent and next parent's sibling if need be
580          */
581         curr_cgroup = curr_cgroup->parent;
582         goto visit_parent;
583
584 done:
585         root_mem->last_scanned_child = curr;
586         return curr;
587 }
588
589 /*
590  * Visit the first child (need not be the first child as per the ordering
591  * of the cgroup list, since we track last_scanned_child) of @mem and use
592  * that to reclaim free pages from.
593  */
594 static struct mem_cgroup *
595 mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
596 {
597         struct cgroup *cgroup;
598         struct mem_cgroup *ret;
599         bool obsolete = (root_mem->last_scanned_child &&
600                                 root_mem->last_scanned_child->obsolete);
601
602         /*
603          * Scan all children under the mem_cgroup mem
604          */
605         cgroup_lock();
606         if (list_empty(&root_mem->css.cgroup->children)) {
607                 ret = root_mem;
608                 goto done;
609         }
610
611         if (!root_mem->last_scanned_child || obsolete) {
612
613                 if (obsolete)
614                         mem_cgroup_put(root_mem->last_scanned_child);
615
616                 cgroup = list_first_entry(&root_mem->css.cgroup->children,
617                                 struct cgroup, sibling);
618                 ret = mem_cgroup_from_cont(cgroup);
619                 mem_cgroup_get(ret);
620         } else
621                 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
622                                                 root_mem);
623
624 done:
625         root_mem->last_scanned_child = ret;
626         cgroup_unlock();
627         return ret;
628 }
629
630 static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
631 {
632         if (do_swap_account) {
633                 if (res_counter_check_under_limit(&mem->res) &&
634                         res_counter_check_under_limit(&mem->memsw))
635                         return true;
636         } else
637                 if (res_counter_check_under_limit(&mem->res))
638                         return true;
639         return false;
640 }
641
642 static unsigned int get_swappiness(struct mem_cgroup *memcg)
643 {
644         struct cgroup *cgrp = memcg->css.cgroup;
645         unsigned int swappiness;
646
647         /* root ? */
648         if (cgrp->parent == NULL)
649                 return vm_swappiness;
650
651         spin_lock(&memcg->reclaim_param_lock);
652         swappiness = memcg->swappiness;
653         spin_unlock(&memcg->reclaim_param_lock);
654
655         return swappiness;
656 }
657
658 /*
659  * Dance down the hierarchy if needed to reclaim memory. We remember the
660  * last child we reclaimed from, so that we don't end up penalizing
661  * one child extensively based on its position in the children list.
662  *
663  * root_mem is the original ancestor that we've been reclaim from.
664  */
665 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
666                                                 gfp_t gfp_mask, bool noswap)
667 {
668         struct mem_cgroup *next_mem;
669         int ret = 0;
670
671         /*
672          * Reclaim unconditionally and don't check for return value.
673          * We need to reclaim in the current group and down the tree.
674          * One might think about checking for children before reclaiming,
675          * but there might be left over accounting, even after children
676          * have left.
677          */
678         ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
679                                            get_swappiness(root_mem));
680         if (mem_cgroup_check_under_limit(root_mem))
681                 return 0;
682         if (!root_mem->use_hierarchy)
683                 return ret;
684
685         next_mem = mem_cgroup_get_first_node(root_mem);
686
687         while (next_mem != root_mem) {
688                 if (next_mem->obsolete) {
689                         mem_cgroup_put(next_mem);
690                         cgroup_lock();
691                         next_mem = mem_cgroup_get_first_node(root_mem);
692                         cgroup_unlock();
693                         continue;
694                 }
695                 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
696                                                    get_swappiness(next_mem));
697                 if (mem_cgroup_check_under_limit(root_mem))
698                         return 0;
699                 cgroup_lock();
700                 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
701                 cgroup_unlock();
702         }
703         return ret;
704 }
705
706 bool mem_cgroup_oom_called(struct task_struct *task)
707 {
708         bool ret = false;
709         struct mem_cgroup *mem;
710         struct mm_struct *mm;
711
712         rcu_read_lock();
713         mm = task->mm;
714         if (!mm)
715                 mm = &init_mm;
716         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
717         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
718                 ret = true;
719         rcu_read_unlock();
720         return ret;
721 }
722 /*
723  * Unlike exported interface, "oom" parameter is added. if oom==true,
724  * oom-killer can be invoked.
725  */
726 static int __mem_cgroup_try_charge(struct mm_struct *mm,
727                         gfp_t gfp_mask, struct mem_cgroup **memcg,
728                         bool oom)
729 {
730         struct mem_cgroup *mem, *mem_over_limit;
731         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
732         struct res_counter *fail_res;
733
734         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
735                 /* Don't account this! */
736                 *memcg = NULL;
737                 return 0;
738         }
739
740         /*
741          * We always charge the cgroup the mm_struct belongs to.
742          * The mm_struct's mem_cgroup changes on task migration if the
743          * thread group leader migrates. It's possible that mm is not
744          * set, if so charge the init_mm (happens for pagecache usage).
745          */
746         if (likely(!*memcg)) {
747                 rcu_read_lock();
748                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
749                 if (unlikely(!mem)) {
750                         rcu_read_unlock();
751                         return 0;
752                 }
753                 /*
754                  * For every charge from the cgroup, increment reference count
755                  */
756                 css_get(&mem->css);
757                 *memcg = mem;
758                 rcu_read_unlock();
759         } else {
760                 mem = *memcg;
761                 css_get(&mem->css);
762         }
763
764         while (1) {
765                 int ret;
766                 bool noswap = false;
767
768                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
769                 if (likely(!ret)) {
770                         if (!do_swap_account)
771                                 break;
772                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
773                                                         &fail_res);
774                         if (likely(!ret))
775                                 break;
776                         /* mem+swap counter fails */
777                         res_counter_uncharge(&mem->res, PAGE_SIZE);
778                         noswap = true;
779                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
780                                                                         memsw);
781                 } else
782                         /* mem counter fails */
783                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
784                                                                         res);
785
786                 if (!(gfp_mask & __GFP_WAIT))
787                         goto nomem;
788
789                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
790                                                         noswap);
791
792                 /*
793                  * try_to_free_mem_cgroup_pages() might not give us a full
794                  * picture of reclaim. Some pages are reclaimed and might be
795                  * moved to swap cache or just unmapped from the cgroup.
796                  * Check the limit again to see if the reclaim reduced the
797                  * current usage of the cgroup before giving up
798                  *
799                  */
800                 if (mem_cgroup_check_under_limit(mem_over_limit))
801                         continue;
802
803                 if (!nr_retries--) {
804                         if (oom) {
805                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
806                                 mem_over_limit->last_oom_jiffies = jiffies;
807                         }
808                         goto nomem;
809                 }
810         }
811         return 0;
812 nomem:
813         css_put(&mem->css);
814         return -ENOMEM;
815 }
816
817 /**
818  * mem_cgroup_try_charge - get charge of PAGE_SIZE.
819  * @mm: an mm_struct which is charged against. (when *memcg is NULL)
820  * @gfp_mask: gfp_mask for reclaim.
821  * @memcg: a pointer to memory cgroup which is charged against.
822  *
823  * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
824  * memory cgroup from @mm is got and stored in *memcg.
825  *
826  * Returns 0 if success. -ENOMEM at failure.
827  * This call can invoke OOM-Killer.
828  */
829
830 int mem_cgroup_try_charge(struct mm_struct *mm,
831                           gfp_t mask, struct mem_cgroup **memcg)
832 {
833         return __mem_cgroup_try_charge(mm, mask, memcg, true);
834 }
835
836 /*
837  * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
838  * USED state. If already USED, uncharge and return.
839  */
840
841 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
842                                      struct page_cgroup *pc,
843                                      enum charge_type ctype)
844 {
845         /* try_charge() can return NULL to *memcg, taking care of it. */
846         if (!mem)
847                 return;
848
849         lock_page_cgroup(pc);
850         if (unlikely(PageCgroupUsed(pc))) {
851                 unlock_page_cgroup(pc);
852                 res_counter_uncharge(&mem->res, PAGE_SIZE);
853                 if (do_swap_account)
854                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
855                 css_put(&mem->css);
856                 return;
857         }
858         pc->mem_cgroup = mem;
859         smp_wmb();
860         pc->flags = pcg_default_flags[ctype];
861
862         mem_cgroup_charge_statistics(mem, pc, true);
863
864         unlock_page_cgroup(pc);
865 }
866
867 /**
868  * mem_cgroup_move_account - move account of the page
869  * @pc: page_cgroup of the page.
870  * @from: mem_cgroup which the page is moved from.
871  * @to: mem_cgroup which the page is moved to. @from != @to.
872  *
873  * The caller must confirm following.
874  * - page is not on LRU (isolate_page() is useful.)
875  *
876  * returns 0 at success,
877  * returns -EBUSY when lock is busy or "pc" is unstable.
878  *
879  * This function does "uncharge" from old cgroup but doesn't do "charge" to
880  * new cgroup. It should be done by a caller.
881  */
882
883 static int mem_cgroup_move_account(struct page_cgroup *pc,
884         struct mem_cgroup *from, struct mem_cgroup *to)
885 {
886         struct mem_cgroup_per_zone *from_mz, *to_mz;
887         int nid, zid;
888         int ret = -EBUSY;
889
890         VM_BUG_ON(from == to);
891         VM_BUG_ON(PageLRU(pc->page));
892
893         nid = page_cgroup_nid(pc);
894         zid = page_cgroup_zid(pc);
895         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
896         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
897
898         if (!trylock_page_cgroup(pc))
899                 return ret;
900
901         if (!PageCgroupUsed(pc))
902                 goto out;
903
904         if (pc->mem_cgroup != from)
905                 goto out;
906
907         css_put(&from->css);
908         res_counter_uncharge(&from->res, PAGE_SIZE);
909         mem_cgroup_charge_statistics(from, pc, false);
910         if (do_swap_account)
911                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
912         pc->mem_cgroup = to;
913         mem_cgroup_charge_statistics(to, pc, true);
914         css_get(&to->css);
915         ret = 0;
916 out:
917         unlock_page_cgroup(pc);
918         return ret;
919 }
920
921 /*
922  * move charges to its parent.
923  */
924
925 static int mem_cgroup_move_parent(struct page_cgroup *pc,
926                                   struct mem_cgroup *child,
927                                   gfp_t gfp_mask)
928 {
929         struct page *page = pc->page;
930         struct cgroup *cg = child->css.cgroup;
931         struct cgroup *pcg = cg->parent;
932         struct mem_cgroup *parent;
933         int ret;
934
935         /* Is ROOT ? */
936         if (!pcg)
937                 return -EINVAL;
938
939
940         parent = mem_cgroup_from_cont(pcg);
941
942
943         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
944         if (ret || !parent)
945                 return ret;
946
947         if (!get_page_unless_zero(page))
948                 return -EBUSY;
949
950         ret = isolate_lru_page(page);
951
952         if (ret)
953                 goto cancel;
954
955         ret = mem_cgroup_move_account(pc, child, parent);
956
957         /* drop extra refcnt by try_charge() (move_account increment one) */
958         css_put(&parent->css);
959         putback_lru_page(page);
960         if (!ret) {
961                 put_page(page);
962                 return 0;
963         }
964         /* uncharge if move fails */
965 cancel:
966         res_counter_uncharge(&parent->res, PAGE_SIZE);
967         if (do_swap_account)
968                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
969         put_page(page);
970         return ret;
971 }
972
973 /*
974  * Charge the memory controller for page usage.
975  * Return
976  * 0 if the charge was successful
977  * < 0 if the cgroup is over its limit
978  */
979 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
980                                 gfp_t gfp_mask, enum charge_type ctype,
981                                 struct mem_cgroup *memcg)
982 {
983         struct mem_cgroup *mem;
984         struct page_cgroup *pc;
985         int ret;
986
987         pc = lookup_page_cgroup(page);
988         /* can happen at boot */
989         if (unlikely(!pc))
990                 return 0;
991         prefetchw(pc);
992
993         mem = memcg;
994         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
995         if (ret || !mem)
996                 return ret;
997
998         __mem_cgroup_commit_charge(mem, pc, ctype);
999         return 0;
1000 }
1001
1002 int mem_cgroup_newpage_charge(struct page *page,
1003                               struct mm_struct *mm, gfp_t gfp_mask)
1004 {
1005         if (mem_cgroup_disabled())
1006                 return 0;
1007         if (PageCompound(page))
1008                 return 0;
1009         /*
1010          * If already mapped, we don't have to account.
1011          * If page cache, page->mapping has address_space.
1012          * But page->mapping may have out-of-use anon_vma pointer,
1013          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
1014          * is NULL.
1015          */
1016         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
1017                 return 0;
1018         if (unlikely(!mm))
1019                 mm = &init_mm;
1020         return mem_cgroup_charge_common(page, mm, gfp_mask,
1021                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
1022 }
1023
1024 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
1025                                 gfp_t gfp_mask)
1026 {
1027         if (mem_cgroup_disabled())
1028                 return 0;
1029         if (PageCompound(page))
1030                 return 0;
1031         /*
1032          * Corner case handling. This is called from add_to_page_cache()
1033          * in usual. But some FS (shmem) precharges this page before calling it
1034          * and call add_to_page_cache() with GFP_NOWAIT.
1035          *
1036          * For GFP_NOWAIT case, the page may be pre-charged before calling
1037          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
1038          * charge twice. (It works but has to pay a bit larger cost.)
1039          */
1040         if (!(gfp_mask & __GFP_WAIT)) {
1041                 struct page_cgroup *pc;
1042
1043
1044                 pc = lookup_page_cgroup(page);
1045                 if (!pc)
1046                         return 0;
1047                 lock_page_cgroup(pc);
1048                 if (PageCgroupUsed(pc)) {
1049                         unlock_page_cgroup(pc);
1050                         return 0;
1051                 }
1052                 unlock_page_cgroup(pc);
1053         }
1054
1055         if (unlikely(!mm))
1056                 mm = &init_mm;
1057
1058         if (page_is_file_cache(page))
1059                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1060                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
1061         else
1062                 return mem_cgroup_charge_common(page, mm, gfp_mask,
1063                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
1064 }
1065
1066 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
1067                                  struct page *page,
1068                                  gfp_t mask, struct mem_cgroup **ptr)
1069 {
1070         struct mem_cgroup *mem;
1071         swp_entry_t     ent;
1072
1073         if (mem_cgroup_disabled())
1074                 return 0;
1075
1076         if (!do_swap_account)
1077                 goto charge_cur_mm;
1078
1079         /*
1080          * A racing thread's fault, or swapoff, may have already updated
1081          * the pte, and even removed page from swap cache: return success
1082          * to go on to do_swap_page()'s pte_same() test, which should fail.
1083          */
1084         if (!PageSwapCache(page))
1085                 return 0;
1086
1087         ent.val = page_private(page);
1088
1089         mem = lookup_swap_cgroup(ent);
1090         if (!mem || mem->obsolete)
1091                 goto charge_cur_mm;
1092         *ptr = mem;
1093         return __mem_cgroup_try_charge(NULL, mask, ptr, true);
1094 charge_cur_mm:
1095         if (unlikely(!mm))
1096                 mm = &init_mm;
1097         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1098 }
1099
1100 #ifdef CONFIG_SWAP
1101
1102 int mem_cgroup_cache_charge_swapin(struct page *page,
1103                         struct mm_struct *mm, gfp_t mask, bool locked)
1104 {
1105         int ret = 0;
1106
1107         if (mem_cgroup_disabled())
1108                 return 0;
1109         if (unlikely(!mm))
1110                 mm = &init_mm;
1111         if (!locked)
1112                 lock_page(page);
1113         /*
1114          * If not locked, the page can be dropped from SwapCache until
1115          * we reach here.
1116          */
1117         if (PageSwapCache(page)) {
1118                 struct mem_cgroup *mem = NULL;
1119                 swp_entry_t ent;
1120
1121                 ent.val = page_private(page);
1122                 if (do_swap_account) {
1123                         mem = lookup_swap_cgroup(ent);
1124                         if (mem && mem->obsolete)
1125                                 mem = NULL;
1126                         if (mem)
1127                                 mm = NULL;
1128                 }
1129                 ret = mem_cgroup_charge_common(page, mm, mask,
1130                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1131
1132                 if (!ret && do_swap_account) {
1133                         /* avoid double counting */
1134                         mem = swap_cgroup_record(ent, NULL);
1135                         if (mem) {
1136                                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1137                                 mem_cgroup_put(mem);
1138                         }
1139                 }
1140         }
1141         if (!locked)
1142                 unlock_page(page);
1143         /* add this page(page_cgroup) to the LRU we want. */
1144         mem_cgroup_lru_fixup(page);
1145
1146         return ret;
1147 }
1148 #endif
1149
1150 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1151 {
1152         struct page_cgroup *pc;
1153
1154         if (mem_cgroup_disabled())
1155                 return;
1156         if (!ptr)
1157                 return;
1158         pc = lookup_page_cgroup(page);
1159         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1160         /*
1161          * Now swap is on-memory. This means this page may be
1162          * counted both as mem and swap....double count.
1163          * Fix it by uncharging from memsw. This SwapCache is stable
1164          * because we're still under lock_page().
1165          */
1166         if (do_swap_account) {
1167                 swp_entry_t ent = {.val = page_private(page)};
1168                 struct mem_cgroup *memcg;
1169                 memcg = swap_cgroup_record(ent, NULL);
1170                 if (memcg) {
1171                         /* If memcg is obsolete, memcg can be != ptr */
1172                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1173                         mem_cgroup_put(memcg);
1174                 }
1175
1176         }
1177         /* add this page(page_cgroup) to the LRU we want. */
1178         mem_cgroup_lru_fixup(page);
1179 }
1180
1181 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1182 {
1183         if (mem_cgroup_disabled())
1184                 return;
1185         if (!mem)
1186                 return;
1187         res_counter_uncharge(&mem->res, PAGE_SIZE);
1188         if (do_swap_account)
1189                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1190         css_put(&mem->css);
1191 }
1192
1193
1194 /*
1195  * uncharge if !page_mapped(page)
1196  */
1197 static struct mem_cgroup *
1198 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1199 {
1200         struct page_cgroup *pc;
1201         struct mem_cgroup *mem = NULL;
1202         struct mem_cgroup_per_zone *mz;
1203
1204         if (mem_cgroup_disabled())
1205                 return NULL;
1206
1207         if (PageSwapCache(page))
1208                 return NULL;
1209
1210         /*
1211          * Check if our page_cgroup is valid
1212          */
1213         pc = lookup_page_cgroup(page);
1214         if (unlikely(!pc || !PageCgroupUsed(pc)))
1215                 return NULL;
1216
1217         lock_page_cgroup(pc);
1218
1219         mem = pc->mem_cgroup;
1220
1221         if (!PageCgroupUsed(pc))
1222                 goto unlock_out;
1223
1224         switch (ctype) {
1225         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1226                 if (page_mapped(page))
1227                         goto unlock_out;
1228                 break;
1229         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1230                 if (!PageAnon(page)) {  /* Shared memory */
1231                         if (page->mapping && !page_is_file_cache(page))
1232                                 goto unlock_out;
1233                 } else if (page_mapped(page)) /* Anon */
1234                                 goto unlock_out;
1235                 break;
1236         default:
1237                 break;
1238         }
1239
1240         res_counter_uncharge(&mem->res, PAGE_SIZE);
1241         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1242                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1243
1244         mem_cgroup_charge_statistics(mem, pc, false);
1245         ClearPageCgroupUsed(pc);
1246
1247         mz = page_cgroup_zoneinfo(pc);
1248         unlock_page_cgroup(pc);
1249
1250         /* at swapout, this memcg will be accessed to record to swap */
1251         if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
1252                 css_put(&mem->css);
1253
1254         return mem;
1255
1256 unlock_out:
1257         unlock_page_cgroup(pc);
1258         return NULL;
1259 }
1260
1261 void mem_cgroup_uncharge_page(struct page *page)
1262 {
1263         /* early check. */
1264         if (page_mapped(page))
1265                 return;
1266         if (page->mapping && !PageAnon(page))
1267                 return;
1268         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1269 }
1270
1271 void mem_cgroup_uncharge_cache_page(struct page *page)
1272 {
1273         VM_BUG_ON(page_mapped(page));
1274         VM_BUG_ON(page->mapping);
1275         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1276 }
1277
1278 /*
1279  * called from __delete_from_swap_cache() and drop "page" account.
1280  * memcg information is recorded to swap_cgroup of "ent"
1281  */
1282 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1283 {
1284         struct mem_cgroup *memcg;
1285
1286         memcg = __mem_cgroup_uncharge_common(page,
1287                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1288         /* record memcg information */
1289         if (do_swap_account && memcg) {
1290                 swap_cgroup_record(ent, memcg);
1291                 mem_cgroup_get(memcg);
1292         }
1293         if (memcg)
1294                 css_put(&memcg->css);
1295 }
1296
1297 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1298 /*
1299  * called from swap_entry_free(). remove record in swap_cgroup and
1300  * uncharge "memsw" account.
1301  */
1302 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1303 {
1304         struct mem_cgroup *memcg;
1305
1306         if (!do_swap_account)
1307                 return;
1308
1309         memcg = swap_cgroup_record(ent, NULL);
1310         if (memcg) {
1311                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1312                 mem_cgroup_put(memcg);
1313         }
1314 }
1315 #endif
1316
1317 /*
1318  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1319  * page belongs to.
1320  */
1321 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1322 {
1323         struct page_cgroup *pc;
1324         struct mem_cgroup *mem = NULL;
1325         int ret = 0;
1326
1327         if (mem_cgroup_disabled())
1328                 return 0;
1329
1330         pc = lookup_page_cgroup(page);
1331         lock_page_cgroup(pc);
1332         if (PageCgroupUsed(pc)) {
1333                 mem = pc->mem_cgroup;
1334                 css_get(&mem->css);
1335         }
1336         unlock_page_cgroup(pc);
1337
1338         if (mem) {
1339                 ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1340                 css_put(&mem->css);
1341         }
1342         *ptr = mem;
1343         return ret;
1344 }
1345
1346 /* remove redundant charge if migration failed*/
1347 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1348                 struct page *oldpage, struct page *newpage)
1349 {
1350         struct page *target, *unused;
1351         struct page_cgroup *pc;
1352         enum charge_type ctype;
1353
1354         if (!mem)
1355                 return;
1356
1357         /* at migration success, oldpage->mapping is NULL. */
1358         if (oldpage->mapping) {
1359                 target = oldpage;
1360                 unused = NULL;
1361         } else {
1362                 target = newpage;
1363                 unused = oldpage;
1364         }
1365
1366         if (PageAnon(target))
1367                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1368         else if (page_is_file_cache(target))
1369                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1370         else
1371                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1372
1373         /* unused page is not on radix-tree now. */
1374         if (unused)
1375                 __mem_cgroup_uncharge_common(unused, ctype);
1376
1377         pc = lookup_page_cgroup(target);
1378         /*
1379          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1380          * So, double-counting is effectively avoided.
1381          */
1382         __mem_cgroup_commit_charge(mem, pc, ctype);
1383
1384         /*
1385          * Both of oldpage and newpage are still under lock_page().
1386          * Then, we don't have to care about race in radix-tree.
1387          * But we have to be careful that this page is unmapped or not.
1388          *
1389          * There is a case for !page_mapped(). At the start of
1390          * migration, oldpage was mapped. But now, it's zapped.
1391          * But we know *target* page is not freed/reused under us.
1392          * mem_cgroup_uncharge_page() does all necessary checks.
1393          */
1394         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1395                 mem_cgroup_uncharge_page(target);
1396 }
1397
1398 /*
1399  * A call to try to shrink memory usage under specified resource controller.
1400  * This is typically used for page reclaiming for shmem for reducing side
1401  * effect of page allocation from shmem, which is used by some mem_cgroup.
1402  */
1403 int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1404 {
1405         struct mem_cgroup *mem;
1406         int progress = 0;
1407         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1408
1409         if (mem_cgroup_disabled())
1410                 return 0;
1411         if (!mm)
1412                 return 0;
1413
1414         rcu_read_lock();
1415         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1416         if (unlikely(!mem)) {
1417                 rcu_read_unlock();
1418                 return 0;
1419         }
1420         css_get(&mem->css);
1421         rcu_read_unlock();
1422
1423         do {
1424                 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true,
1425                                                         get_swappiness(mem));
1426                 progress += mem_cgroup_check_under_limit(mem);
1427         } while (!progress && --retry);
1428
1429         css_put(&mem->css);
1430         if (!retry)
1431                 return -ENOMEM;
1432         return 0;
1433 }
1434
1435 /*
1436  * The inactive anon list should be small enough that the VM never has to
1437  * do too much work, but large enough that each inactive page has a chance
1438  * to be referenced again before it is swapped out.
1439  *
1440  * this calculation is straightforward porting from
1441  * page_alloc.c::setup_per_zone_inactive_ratio().
1442  * it describe more detail.
1443  */
1444 static void mem_cgroup_set_inactive_ratio(struct mem_cgroup *memcg)
1445 {
1446         unsigned int gb, ratio;
1447
1448         gb = res_counter_read_u64(&memcg->res, RES_LIMIT) >> 30;
1449         if (gb)
1450                 ratio = int_sqrt(10 * gb);
1451         else
1452                 ratio = 1;
1453
1454         memcg->inactive_ratio = ratio;
1455
1456 }
1457
1458 static DEFINE_MUTEX(set_limit_mutex);
1459
1460 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1461                                 unsigned long long val)
1462 {
1463
1464         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1465         int progress;
1466         u64 memswlimit;
1467         int ret = 0;
1468
1469         while (retry_count) {
1470                 if (signal_pending(current)) {
1471                         ret = -EINTR;
1472                         break;
1473                 }
1474                 /*
1475                  * Rather than hide all in some function, I do this in
1476                  * open coded manner. You see what this really does.
1477                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1478                  */
1479                 mutex_lock(&set_limit_mutex);
1480                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1481                 if (memswlimit < val) {
1482                         ret = -EINVAL;
1483                         mutex_unlock(&set_limit_mutex);
1484                         break;
1485                 }
1486                 ret = res_counter_set_limit(&memcg->res, val);
1487                 mutex_unlock(&set_limit_mutex);
1488
1489                 if (!ret)
1490                         break;
1491
1492                 progress = try_to_free_mem_cgroup_pages(memcg,
1493                                                         GFP_KERNEL,
1494                                                         false,
1495                                                         get_swappiness(memcg));
1496                 if (!progress)                  retry_count--;
1497         }
1498
1499         if (!ret)
1500                 mem_cgroup_set_inactive_ratio(memcg);
1501
1502         return ret;
1503 }
1504
1505 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1506                                 unsigned long long val)
1507 {
1508         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1509         u64 memlimit, oldusage, curusage;
1510         int ret;
1511
1512         if (!do_swap_account)
1513                 return -EINVAL;
1514
1515         while (retry_count) {
1516                 if (signal_pending(current)) {
1517                         ret = -EINTR;
1518                         break;
1519                 }
1520                 /*
1521                  * Rather than hide all in some function, I do this in
1522                  * open coded manner. You see what this really does.
1523                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1524                  */
1525                 mutex_lock(&set_limit_mutex);
1526                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1527                 if (memlimit > val) {
1528                         ret = -EINVAL;
1529                         mutex_unlock(&set_limit_mutex);
1530                         break;
1531                 }
1532                 ret = res_counter_set_limit(&memcg->memsw, val);
1533                 mutex_unlock(&set_limit_mutex);
1534
1535                 if (!ret)
1536                         break;
1537
1538                 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1539                 try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true,
1540                                              get_swappiness(memcg));
1541                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1542                 if (curusage >= oldusage)
1543                         retry_count--;
1544         }
1545         return ret;
1546 }
1547
1548 /*
1549  * This routine traverse page_cgroup in given list and drop them all.
1550  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1551  */
1552 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1553                                 int node, int zid, enum lru_list lru)
1554 {
1555         struct zone *zone;
1556         struct mem_cgroup_per_zone *mz;
1557         struct page_cgroup *pc, *busy;
1558         unsigned long flags, loop;
1559         struct list_head *list;
1560         int ret = 0;
1561
1562         zone = &NODE_DATA(node)->node_zones[zid];
1563         mz = mem_cgroup_zoneinfo(mem, node, zid);
1564         list = &mz->lists[lru];
1565
1566         loop = MEM_CGROUP_ZSTAT(mz, lru);
1567         /* give some margin against EBUSY etc...*/
1568         loop += 256;
1569         busy = NULL;
1570         while (loop--) {
1571                 ret = 0;
1572                 spin_lock_irqsave(&zone->lru_lock, flags);
1573                 if (list_empty(list)) {
1574                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1575                         break;
1576                 }
1577                 pc = list_entry(list->prev, struct page_cgroup, lru);
1578                 if (busy == pc) {
1579                         list_move(&pc->lru, list);
1580                         busy = 0;
1581                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1582                         continue;
1583                 }
1584                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1585
1586                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1587                 if (ret == -ENOMEM)
1588                         break;
1589
1590                 if (ret == -EBUSY || ret == -EINVAL) {
1591                         /* found lock contention or "pc" is obsolete. */
1592                         busy = pc;
1593                         cond_resched();
1594                 } else
1595                         busy = NULL;
1596         }
1597
1598         if (!ret && !list_empty(list))
1599                 return -EBUSY;
1600         return ret;
1601 }
1602
1603 /*
1604  * make mem_cgroup's charge to be 0 if there is no task.
1605  * This enables deleting this mem_cgroup.
1606  */
1607 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1608 {
1609         int ret;
1610         int node, zid, shrink;
1611         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1612         struct cgroup *cgrp = mem->css.cgroup;
1613
1614         css_get(&mem->css);
1615
1616         shrink = 0;
1617         /* should free all ? */
1618         if (free_all)
1619                 goto try_to_free;
1620 move_account:
1621         while (mem->res.usage > 0) {
1622                 ret = -EBUSY;
1623                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1624                         goto out;
1625                 ret = -EINTR;
1626                 if (signal_pending(current))
1627                         goto out;
1628                 /* This is for making all *used* pages to be on LRU. */
1629                 lru_add_drain_all();
1630                 ret = 0;
1631                 for_each_node_state(node, N_POSSIBLE) {
1632                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1633                                 enum lru_list l;
1634                                 for_each_lru(l) {
1635                                         ret = mem_cgroup_force_empty_list(mem,
1636                                                         node, zid, l);
1637                                         if (ret)
1638                                                 break;
1639                                 }
1640                         }
1641                         if (ret)
1642                                 break;
1643                 }
1644                 /* it seems parent cgroup doesn't have enough mem */
1645                 if (ret == -ENOMEM)
1646                         goto try_to_free;
1647                 cond_resched();
1648         }
1649         ret = 0;
1650 out:
1651         css_put(&mem->css);
1652         return ret;
1653
1654 try_to_free:
1655         /* returns EBUSY if there is a task or if we come here twice. */
1656         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1657                 ret = -EBUSY;
1658                 goto out;
1659         }
1660         /* we call try-to-free pages for make this cgroup empty */
1661         lru_add_drain_all();
1662         /* try to free all pages in this cgroup */
1663         shrink = 1;
1664         while (nr_retries && mem->res.usage > 0) {
1665                 int progress;
1666
1667                 if (signal_pending(current)) {
1668                         ret = -EINTR;
1669                         goto out;
1670                 }
1671                 progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
1672                                                 false, get_swappiness(mem));
1673                 if (!progress) {
1674                         nr_retries--;
1675                         /* maybe some writeback is necessary */
1676                         congestion_wait(WRITE, HZ/10);
1677                 }
1678
1679         }
1680         lru_add_drain();
1681         /* try move_account...there may be some *locked* pages. */
1682         if (mem->res.usage)
1683                 goto move_account;
1684         ret = 0;
1685         goto out;
1686 }
1687
1688 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1689 {
1690         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1691 }
1692
1693
1694 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1695 {
1696         return mem_cgroup_from_cont(cont)->use_hierarchy;
1697 }
1698
1699 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1700                                         u64 val)
1701 {
1702         int retval = 0;
1703         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1704         struct cgroup *parent = cont->parent;
1705         struct mem_cgroup *parent_mem = NULL;
1706
1707         if (parent)
1708                 parent_mem = mem_cgroup_from_cont(parent);
1709
1710         cgroup_lock();
1711         /*
1712          * If parent's use_hiearchy is set, we can't make any modifications
1713          * in the child subtrees. If it is unset, then the change can
1714          * occur, provided the current cgroup has no children.
1715          *
1716          * For the root cgroup, parent_mem is NULL, we allow value to be
1717          * set if there are no children.
1718          */
1719         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1720                                 (val == 1 || val == 0)) {
1721                 if (list_empty(&cont->children))
1722                         mem->use_hierarchy = val;
1723                 else
1724                         retval = -EBUSY;
1725         } else
1726                 retval = -EINVAL;
1727         cgroup_unlock();
1728
1729         return retval;
1730 }
1731
1732 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1733 {
1734         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1735         u64 val = 0;
1736         int type, name;
1737
1738         type = MEMFILE_TYPE(cft->private);
1739         name = MEMFILE_ATTR(cft->private);
1740         switch (type) {
1741         case _MEM:
1742                 val = res_counter_read_u64(&mem->res, name);
1743                 break;
1744         case _MEMSWAP:
1745                 if (do_swap_account)
1746                         val = res_counter_read_u64(&mem->memsw, name);
1747                 break;
1748         default:
1749                 BUG();
1750                 break;
1751         }
1752         return val;
1753 }
1754 /*
1755  * The user of this function is...
1756  * RES_LIMIT.
1757  */
1758 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1759                             const char *buffer)
1760 {
1761         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1762         int type, name;
1763         unsigned long long val;
1764         int ret;
1765
1766         type = MEMFILE_TYPE(cft->private);
1767         name = MEMFILE_ATTR(cft->private);
1768         switch (name) {
1769         case RES_LIMIT:
1770                 /* This function does all necessary parse...reuse it */
1771                 ret = res_counter_memparse_write_strategy(buffer, &val);
1772                 if (ret)
1773                         break;
1774                 if (type == _MEM)
1775                         ret = mem_cgroup_resize_limit(memcg, val);
1776                 else
1777                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1778                 break;
1779         default:
1780                 ret = -EINVAL; /* should be BUG() ? */
1781                 break;
1782         }
1783         return ret;
1784 }
1785
1786 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1787 {
1788         struct mem_cgroup *mem;
1789         int type, name;
1790
1791         mem = mem_cgroup_from_cont(cont);
1792         type = MEMFILE_TYPE(event);
1793         name = MEMFILE_ATTR(event);
1794         switch (name) {
1795         case RES_MAX_USAGE:
1796                 if (type == _MEM)
1797                         res_counter_reset_max(&mem->res);
1798                 else
1799                         res_counter_reset_max(&mem->memsw);
1800                 break;
1801         case RES_FAILCNT:
1802                 if (type == _MEM)
1803                         res_counter_reset_failcnt(&mem->res);
1804                 else
1805                         res_counter_reset_failcnt(&mem->memsw);
1806                 break;
1807         }
1808         return 0;
1809 }
1810
1811 static const struct mem_cgroup_stat_desc {
1812         const char *msg;
1813         u64 unit;
1814 } mem_cgroup_stat_desc[] = {
1815         [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1816         [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1817         [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1818         [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1819 };
1820
1821 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1822                                  struct cgroup_map_cb *cb)
1823 {
1824         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1825         struct mem_cgroup_stat *stat = &mem_cont->stat;
1826         int i;
1827
1828         for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1829                 s64 val;
1830
1831                 val = mem_cgroup_read_stat(stat, i);
1832                 val *= mem_cgroup_stat_desc[i].unit;
1833                 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1834         }
1835         /* showing # of active pages */
1836         {
1837                 unsigned long active_anon, inactive_anon;
1838                 unsigned long active_file, inactive_file;
1839                 unsigned long unevictable;
1840
1841                 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1842                                                 LRU_INACTIVE_ANON);
1843                 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1844                                                 LRU_ACTIVE_ANON);
1845                 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1846                                                 LRU_INACTIVE_FILE);
1847                 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1848                                                 LRU_ACTIVE_FILE);
1849                 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1850                                                         LRU_UNEVICTABLE);
1851
1852                 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1853                 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1854                 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1855                 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1856                 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1857
1858         }
1859
1860 #ifdef CONFIG_DEBUG_VM
1861         cb->fill(cb, "inactive_ratio", mem_cont->inactive_ratio);
1862
1863         {
1864                 int nid, zid;
1865                 struct mem_cgroup_per_zone *mz;
1866                 unsigned long recent_rotated[2] = {0, 0};
1867                 unsigned long recent_scanned[2] = {0, 0};
1868
1869                 for_each_online_node(nid)
1870                         for (zid = 0; zid < MAX_NR_ZONES; zid++) {
1871                                 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1872
1873                                 recent_rotated[0] +=
1874                                         mz->reclaim_stat.recent_rotated[0];
1875                                 recent_rotated[1] +=
1876                                         mz->reclaim_stat.recent_rotated[1];
1877                                 recent_scanned[0] +=
1878                                         mz->reclaim_stat.recent_scanned[0];
1879                                 recent_scanned[1] +=
1880                                         mz->reclaim_stat.recent_scanned[1];
1881                         }
1882                 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
1883                 cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
1884                 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
1885                 cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
1886         }
1887 #endif
1888
1889         return 0;
1890 }
1891
1892 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
1893 {
1894         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1895
1896         return get_swappiness(memcg);
1897 }
1898
1899 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
1900                                        u64 val)
1901 {
1902         struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
1903         struct mem_cgroup *parent;
1904         if (val > 100)
1905                 return -EINVAL;
1906
1907         if (cgrp->parent == NULL)
1908                 return -EINVAL;
1909
1910         parent = mem_cgroup_from_cont(cgrp->parent);
1911         /* If under hierarchy, only empty-root can set this value */
1912         if ((parent->use_hierarchy) ||
1913             (memcg->use_hierarchy && !list_empty(&cgrp->children)))
1914                 return -EINVAL;
1915
1916         spin_lock(&memcg->reclaim_param_lock);
1917         memcg->swappiness = val;
1918         spin_unlock(&memcg->reclaim_param_lock);
1919
1920         return 0;
1921 }
1922
1923
1924 static struct cftype mem_cgroup_files[] = {
1925         {
1926                 .name = "usage_in_bytes",
1927                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1928                 .read_u64 = mem_cgroup_read,
1929         },
1930         {
1931                 .name = "max_usage_in_bytes",
1932                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1933                 .trigger = mem_cgroup_reset,
1934                 .read_u64 = mem_cgroup_read,
1935         },
1936         {
1937                 .name = "limit_in_bytes",
1938                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1939                 .write_string = mem_cgroup_write,
1940                 .read_u64 = mem_cgroup_read,
1941         },
1942         {
1943                 .name = "failcnt",
1944                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1945                 .trigger = mem_cgroup_reset,
1946                 .read_u64 = mem_cgroup_read,
1947         },
1948         {
1949                 .name = "stat",
1950                 .read_map = mem_control_stat_show,
1951         },
1952         {
1953                 .name = "force_empty",
1954                 .trigger = mem_cgroup_force_empty_write,
1955         },
1956         {
1957                 .name = "use_hierarchy",
1958                 .write_u64 = mem_cgroup_hierarchy_write,
1959                 .read_u64 = mem_cgroup_hierarchy_read,
1960         },
1961         {
1962                 .name = "swappiness",
1963                 .read_u64 = mem_cgroup_swappiness_read,
1964                 .write_u64 = mem_cgroup_swappiness_write,
1965         },
1966 };
1967
1968 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1969 static struct cftype memsw_cgroup_files[] = {
1970         {
1971                 .name = "memsw.usage_in_bytes",
1972                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1973                 .read_u64 = mem_cgroup_read,
1974         },
1975         {
1976                 .name = "memsw.max_usage_in_bytes",
1977                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1978                 .trigger = mem_cgroup_reset,
1979                 .read_u64 = mem_cgroup_read,
1980         },
1981         {
1982                 .name = "memsw.limit_in_bytes",
1983                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1984                 .write_string = mem_cgroup_write,
1985                 .read_u64 = mem_cgroup_read,
1986         },
1987         {
1988                 .name = "memsw.failcnt",
1989                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1990                 .trigger = mem_cgroup_reset,
1991                 .read_u64 = mem_cgroup_read,
1992         },
1993 };
1994
1995 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1996 {
1997         if (!do_swap_account)
1998                 return 0;
1999         return cgroup_add_files(cont, ss, memsw_cgroup_files,
2000                                 ARRAY_SIZE(memsw_cgroup_files));
2001 };
2002 #else
2003 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
2004 {
2005         return 0;
2006 }
2007 #endif
2008
2009 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2010 {
2011         struct mem_cgroup_per_node *pn;
2012         struct mem_cgroup_per_zone *mz;
2013         enum lru_list l;
2014         int zone, tmp = node;
2015         /*
2016          * This routine is called against possible nodes.
2017          * But it's BUG to call kmalloc() against offline node.
2018          *
2019          * TODO: this routine can waste much memory for nodes which will
2020          *       never be onlined. It's better to use memory hotplug callback
2021          *       function.
2022          */
2023         if (!node_state(node, N_NORMAL_MEMORY))
2024                 tmp = -1;
2025         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
2026         if (!pn)
2027                 return 1;
2028
2029         mem->info.nodeinfo[node] = pn;
2030         memset(pn, 0, sizeof(*pn));
2031
2032         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
2033                 mz = &pn->zoneinfo[zone];
2034                 for_each_lru(l)
2035                         INIT_LIST_HEAD(&mz->lists[l]);
2036         }
2037         return 0;
2038 }
2039
2040 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
2041 {
2042         kfree(mem->info.nodeinfo[node]);
2043 }
2044
2045 static int mem_cgroup_size(void)
2046 {
2047         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
2048         return sizeof(struct mem_cgroup) + cpustat_size;
2049 }
2050
2051 static struct mem_cgroup *mem_cgroup_alloc(void)
2052 {
2053         struct mem_cgroup *mem;
2054         int size = mem_cgroup_size();
2055
2056         if (size < PAGE_SIZE)
2057                 mem = kmalloc(size, GFP_KERNEL);
2058         else
2059                 mem = vmalloc(size);
2060
2061         if (mem)
2062                 memset(mem, 0, size);
2063         return mem;
2064 }
2065
2066 /*
2067  * At destroying mem_cgroup, references from swap_cgroup can remain.
2068  * (scanning all at force_empty is too costly...)
2069  *
2070  * Instead of clearing all references at force_empty, we remember
2071  * the number of reference from swap_cgroup and free mem_cgroup when
2072  * it goes down to 0.
2073  *
2074  * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
2075  * entry which points to this memcg will be ignore at swapin.
2076  *
2077  * Removal of cgroup itself succeeds regardless of refs from swap.
2078  */
2079
2080 static void mem_cgroup_free(struct mem_cgroup *mem)
2081 {
2082         int node;
2083
2084         if (atomic_read(&mem->refcnt) > 0)
2085                 return;
2086
2087
2088         for_each_node_state(node, N_POSSIBLE)
2089                 free_mem_cgroup_per_zone_info(mem, node);
2090
2091         if (mem_cgroup_size() < PAGE_SIZE)
2092                 kfree(mem);
2093         else
2094                 vfree(mem);
2095 }
2096
2097 static void mem_cgroup_get(struct mem_cgroup *mem)
2098 {
2099         atomic_inc(&mem->refcnt);
2100 }
2101
2102 static void mem_cgroup_put(struct mem_cgroup *mem)
2103 {
2104         if (atomic_dec_and_test(&mem->refcnt)) {
2105                 if (!mem->obsolete)
2106                         return;
2107                 mem_cgroup_free(mem);
2108         }
2109 }
2110
2111
2112 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2113 static void __init enable_swap_cgroup(void)
2114 {
2115         if (!mem_cgroup_disabled() && really_do_swap_account)
2116                 do_swap_account = 1;
2117 }
2118 #else
2119 static void __init enable_swap_cgroup(void)
2120 {
2121 }
2122 #endif
2123
2124 static struct cgroup_subsys_state *
2125 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
2126 {
2127         struct mem_cgroup *mem, *parent;
2128         int node;
2129
2130         mem = mem_cgroup_alloc();
2131         if (!mem)
2132                 return ERR_PTR(-ENOMEM);
2133
2134         for_each_node_state(node, N_POSSIBLE)
2135                 if (alloc_mem_cgroup_per_zone_info(mem, node))
2136                         goto free_out;
2137         /* root ? */
2138         if (cont->parent == NULL) {
2139                 enable_swap_cgroup();
2140                 parent = NULL;
2141         } else {
2142                 parent = mem_cgroup_from_cont(cont->parent);
2143                 mem->use_hierarchy = parent->use_hierarchy;
2144         }
2145
2146         if (parent && parent->use_hierarchy) {
2147                 res_counter_init(&mem->res, &parent->res);
2148                 res_counter_init(&mem->memsw, &parent->memsw);
2149         } else {
2150                 res_counter_init(&mem->res, NULL);
2151                 res_counter_init(&mem->memsw, NULL);
2152         }
2153         mem_cgroup_set_inactive_ratio(mem);
2154         mem->last_scanned_child = NULL;
2155         spin_lock_init(&mem->reclaim_param_lock);
2156
2157         if (parent)
2158                 mem->swappiness = get_swappiness(parent);
2159
2160         return &mem->css;
2161 free_out:
2162         for_each_node_state(node, N_POSSIBLE)
2163                 free_mem_cgroup_per_zone_info(mem, node);
2164         mem_cgroup_free(mem);
2165         return ERR_PTR(-ENOMEM);
2166 }
2167
2168 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
2169                                         struct cgroup *cont)
2170 {
2171         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
2172         mem->obsolete = 1;
2173         mem_cgroup_force_empty(mem, false);
2174 }
2175
2176 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
2177                                 struct cgroup *cont)
2178 {
2179         mem_cgroup_free(mem_cgroup_from_cont(cont));
2180 }
2181
2182 static int mem_cgroup_populate(struct cgroup_subsys *ss,
2183                                 struct cgroup *cont)
2184 {
2185         int ret;
2186
2187         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
2188                                 ARRAY_SIZE(mem_cgroup_files));
2189
2190         if (!ret)
2191                 ret = register_memsw_files(cont, ss);
2192         return ret;
2193 }
2194
2195 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2196                                 struct cgroup *cont,
2197                                 struct cgroup *old_cont,
2198                                 struct task_struct *p)
2199 {
2200         /*
2201          * FIXME: It's better to move charges of this process from old
2202          * memcg to new memcg. But it's just on TODO-List now.
2203          */
2204 }
2205
2206 struct cgroup_subsys mem_cgroup_subsys = {
2207         .name = "memory",
2208         .subsys_id = mem_cgroup_subsys_id,
2209         .create = mem_cgroup_create,
2210         .pre_destroy = mem_cgroup_pre_destroy,
2211         .destroy = mem_cgroup_destroy,
2212         .populate = mem_cgroup_populate,
2213         .attach = mem_cgroup_move_task,
2214         .early_init = 0,
2215 };
2216
2217 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2218
2219 static int __init disable_swap_account(char *s)
2220 {
2221         really_do_swap_account = 0;
2222         return 1;
2223 }
2224 __setup("noswapaccount", disable_swap_account);
2225 #endif