memcg: check group leader fix
[sfrench/cifs-2.6.git] / mm / memcontrol.c
1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19
20 #include <linux/res_counter.h>
21 #include <linux/memcontrol.h>
22 #include <linux/cgroup.h>
23 #include <linux/mm.h>
24 #include <linux/pagemap.h>
25 #include <linux/smp.h>
26 #include <linux/page-flags.h>
27 #include <linux/backing-dev.h>
28 #include <linux/bit_spinlock.h>
29 #include <linux/rcupdate.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/spinlock.h>
34 #include <linux/fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/vmalloc.h>
37 #include <linux/mm_inline.h>
38 #include <linux/page_cgroup.h>
39 #include "internal.h"
40
41 #include <asm/uaccess.h>
42
43 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
44 #define MEM_CGROUP_RECLAIM_RETRIES      5
45
46 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
47 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 0 */
48 int do_swap_account __read_mostly;
49 static int really_do_swap_account __initdata = 1; /* for remember boot option*/
50 #else
51 #define do_swap_account         (0)
52 #endif
53
54
55 /*
56  * Statistics for memory cgroup.
57  */
58 enum mem_cgroup_stat_index {
59         /*
60          * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
61          */
62         MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
63         MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
64         MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
65         MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
66
67         MEM_CGROUP_STAT_NSTATS,
68 };
69
70 struct mem_cgroup_stat_cpu {
71         s64 count[MEM_CGROUP_STAT_NSTATS];
72 } ____cacheline_aligned_in_smp;
73
74 struct mem_cgroup_stat {
75         struct mem_cgroup_stat_cpu cpustat[0];
76 };
77
78 /*
79  * For accounting under irq disable, no need for increment preempt count.
80  */
81 static inline void __mem_cgroup_stat_add_safe(struct mem_cgroup_stat_cpu *stat,
82                 enum mem_cgroup_stat_index idx, int val)
83 {
84         stat->count[idx] += val;
85 }
86
87 static s64 mem_cgroup_read_stat(struct mem_cgroup_stat *stat,
88                 enum mem_cgroup_stat_index idx)
89 {
90         int cpu;
91         s64 ret = 0;
92         for_each_possible_cpu(cpu)
93                 ret += stat->cpustat[cpu].count[idx];
94         return ret;
95 }
96
97 /*
98  * per-zone information in memory controller.
99  */
100 struct mem_cgroup_per_zone {
101         /*
102          * spin_lock to protect the per cgroup LRU
103          */
104         struct list_head        lists[NR_LRU_LISTS];
105         unsigned long           count[NR_LRU_LISTS];
106 };
107 /* Macro for accessing counter */
108 #define MEM_CGROUP_ZSTAT(mz, idx)       ((mz)->count[(idx)])
109
110 struct mem_cgroup_per_node {
111         struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
112 };
113
114 struct mem_cgroup_lru_info {
115         struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
116 };
117
118 /*
119  * The memory controller data structure. The memory controller controls both
120  * page cache and RSS per cgroup. We would eventually like to provide
121  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
122  * to help the administrator determine what knobs to tune.
123  *
124  * TODO: Add a water mark for the memory controller. Reclaim will begin when
125  * we hit the water mark. May be even add a low water mark, such that
126  * no reclaim occurs from a cgroup at it's low water mark, this is
127  * a feature that will be implemented much later in the future.
128  */
129 struct mem_cgroup {
130         struct cgroup_subsys_state css;
131         /*
132          * the counter to account for memory usage
133          */
134         struct res_counter res;
135         /*
136          * the counter to account for mem+swap usage.
137          */
138         struct res_counter memsw;
139         /*
140          * Per cgroup active and inactive list, similar to the
141          * per zone LRU lists.
142          */
143         struct mem_cgroup_lru_info info;
144
145         int     prev_priority;  /* for recording reclaim priority */
146
147         /*
148          * While reclaiming in a hiearchy, we cache the last child we
149          * reclaimed from. Protected by cgroup_lock()
150          */
151         struct mem_cgroup *last_scanned_child;
152         /*
153          * Should the accounting and control be hierarchical, per subtree?
154          */
155         bool use_hierarchy;
156         unsigned long   last_oom_jiffies;
157         int             obsolete;
158         atomic_t        refcnt;
159         /*
160          * statistics. This must be placed at the end of memcg.
161          */
162         struct mem_cgroup_stat stat;
163 };
164
165 enum charge_type {
166         MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
167         MEM_CGROUP_CHARGE_TYPE_MAPPED,
168         MEM_CGROUP_CHARGE_TYPE_SHMEM,   /* used by page migration of shmem */
169         MEM_CGROUP_CHARGE_TYPE_FORCE,   /* used by force_empty */
170         MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
171         NR_CHARGE_TYPE,
172 };
173
174 /* only for here (for easy reading.) */
175 #define PCGF_CACHE      (1UL << PCG_CACHE)
176 #define PCGF_USED       (1UL << PCG_USED)
177 #define PCGF_LOCK       (1UL << PCG_LOCK)
178 static const unsigned long
179 pcg_default_flags[NR_CHARGE_TYPE] = {
180         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* File Cache */
181         PCGF_USED | PCGF_LOCK, /* Anon */
182         PCGF_CACHE | PCGF_USED | PCGF_LOCK, /* Shmem */
183         0, /* FORCE */
184 };
185
186
187 /* for encoding cft->private value on file */
188 #define _MEM                    (0)
189 #define _MEMSWAP                (1)
190 #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val))
191 #define MEMFILE_TYPE(val)       (((val) >> 16) & 0xffff)
192 #define MEMFILE_ATTR(val)       ((val) & 0xffff)
193
194 static void mem_cgroup_get(struct mem_cgroup *mem);
195 static void mem_cgroup_put(struct mem_cgroup *mem);
196
197 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
198                                          struct page_cgroup *pc,
199                                          bool charge)
200 {
201         int val = (charge)? 1 : -1;
202         struct mem_cgroup_stat *stat = &mem->stat;
203         struct mem_cgroup_stat_cpu *cpustat;
204         int cpu = get_cpu();
205
206         cpustat = &stat->cpustat[cpu];
207         if (PageCgroupCache(pc))
208                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_CACHE, val);
209         else
210                 __mem_cgroup_stat_add_safe(cpustat, MEM_CGROUP_STAT_RSS, val);
211
212         if (charge)
213                 __mem_cgroup_stat_add_safe(cpustat,
214                                 MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
215         else
216                 __mem_cgroup_stat_add_safe(cpustat,
217                                 MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
218         put_cpu();
219 }
220
221 static struct mem_cgroup_per_zone *
222 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
223 {
224         return &mem->info.nodeinfo[nid]->zoneinfo[zid];
225 }
226
227 static struct mem_cgroup_per_zone *
228 page_cgroup_zoneinfo(struct page_cgroup *pc)
229 {
230         struct mem_cgroup *mem = pc->mem_cgroup;
231         int nid = page_cgroup_nid(pc);
232         int zid = page_cgroup_zid(pc);
233
234         return mem_cgroup_zoneinfo(mem, nid, zid);
235 }
236
237 static unsigned long mem_cgroup_get_all_zonestat(struct mem_cgroup *mem,
238                                         enum lru_list idx)
239 {
240         int nid, zid;
241         struct mem_cgroup_per_zone *mz;
242         u64 total = 0;
243
244         for_each_online_node(nid)
245                 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
246                         mz = mem_cgroup_zoneinfo(mem, nid, zid);
247                         total += MEM_CGROUP_ZSTAT(mz, idx);
248                 }
249         return total;
250 }
251
252 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
253 {
254         return container_of(cgroup_subsys_state(cont,
255                                 mem_cgroup_subsys_id), struct mem_cgroup,
256                                 css);
257 }
258
259 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
260 {
261         /*
262          * mm_update_next_owner() may clear mm->owner to NULL
263          * if it races with swapoff, page migration, etc.
264          * So this can be called with p == NULL.
265          */
266         if (unlikely(!p))
267                 return NULL;
268
269         return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
270                                 struct mem_cgroup, css);
271 }
272
273 /*
274  * Following LRU functions are allowed to be used without PCG_LOCK.
275  * Operations are called by routine of global LRU independently from memcg.
276  * What we have to take care of here is validness of pc->mem_cgroup.
277  *
278  * Changes to pc->mem_cgroup happens when
279  * 1. charge
280  * 2. moving account
281  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
282  * It is added to LRU before charge.
283  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
284  * When moving account, the page is not on LRU. It's isolated.
285  */
286
287 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
288 {
289         struct page_cgroup *pc;
290         struct mem_cgroup *mem;
291         struct mem_cgroup_per_zone *mz;
292
293         if (mem_cgroup_disabled())
294                 return;
295         pc = lookup_page_cgroup(page);
296         /* can happen while we handle swapcache. */
297         if (list_empty(&pc->lru))
298                 return;
299         mz = page_cgroup_zoneinfo(pc);
300         mem = pc->mem_cgroup;
301         MEM_CGROUP_ZSTAT(mz, lru) -= 1;
302         list_del_init(&pc->lru);
303         return;
304 }
305
306 void mem_cgroup_del_lru(struct page *page)
307 {
308         mem_cgroup_del_lru_list(page, page_lru(page));
309 }
310
311 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
312 {
313         struct mem_cgroup_per_zone *mz;
314         struct page_cgroup *pc;
315
316         if (mem_cgroup_disabled())
317                 return;
318
319         pc = lookup_page_cgroup(page);
320         smp_rmb();
321         /* unused page is not rotated. */
322         if (!PageCgroupUsed(pc))
323                 return;
324         mz = page_cgroup_zoneinfo(pc);
325         list_move(&pc->lru, &mz->lists[lru]);
326 }
327
328 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
329 {
330         struct page_cgroup *pc;
331         struct mem_cgroup_per_zone *mz;
332
333         if (mem_cgroup_disabled())
334                 return;
335         pc = lookup_page_cgroup(page);
336         /* barrier to sync with "charge" */
337         smp_rmb();
338         if (!PageCgroupUsed(pc))
339                 return;
340
341         mz = page_cgroup_zoneinfo(pc);
342         MEM_CGROUP_ZSTAT(mz, lru) += 1;
343         list_add(&pc->lru, &mz->lists[lru]);
344 }
345 /*
346  * To add swapcache into LRU. Be careful to all this function.
347  * zone->lru_lock shouldn't be held and irq must not be disabled.
348  */
349 static void mem_cgroup_lru_fixup(struct page *page)
350 {
351         if (!isolate_lru_page(page))
352                 putback_lru_page(page);
353 }
354
355 void mem_cgroup_move_lists(struct page *page,
356                            enum lru_list from, enum lru_list to)
357 {
358         if (mem_cgroup_disabled())
359                 return;
360         mem_cgroup_del_lru_list(page, from);
361         mem_cgroup_add_lru_list(page, to);
362 }
363
364 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
365 {
366         int ret;
367
368         task_lock(task);
369         ret = task->mm && mm_match_cgroup(task->mm, mem);
370         task_unlock(task);
371         return ret;
372 }
373
374 /*
375  * Calculate mapped_ratio under memory controller. This will be used in
376  * vmscan.c for deteremining we have to reclaim mapped pages.
377  */
378 int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
379 {
380         long total, rss;
381
382         /*
383          * usage is recorded in bytes. But, here, we assume the number of
384          * physical pages can be represented by "long" on any arch.
385          */
386         total = (long) (mem->res.usage >> PAGE_SHIFT) + 1L;
387         rss = (long)mem_cgroup_read_stat(&mem->stat, MEM_CGROUP_STAT_RSS);
388         return (int)((rss * 100L) / total);
389 }
390
391 /*
392  * prev_priority control...this will be used in memory reclaim path.
393  */
394 int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
395 {
396         return mem->prev_priority;
397 }
398
399 void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem, int priority)
400 {
401         if (priority < mem->prev_priority)
402                 mem->prev_priority = priority;
403 }
404
405 void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem, int priority)
406 {
407         mem->prev_priority = priority;
408 }
409
410 /*
411  * Calculate # of pages to be scanned in this priority/zone.
412  * See also vmscan.c
413  *
414  * priority starts from "DEF_PRIORITY" and decremented in each loop.
415  * (see include/linux/mmzone.h)
416  */
417
418 long mem_cgroup_calc_reclaim(struct mem_cgroup *mem, struct zone *zone,
419                                         int priority, enum lru_list lru)
420 {
421         long nr_pages;
422         int nid = zone->zone_pgdat->node_id;
423         int zid = zone_idx(zone);
424         struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(mem, nid, zid);
425
426         nr_pages = MEM_CGROUP_ZSTAT(mz, lru);
427
428         return (nr_pages >> priority);
429 }
430
431 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
432                                         struct list_head *dst,
433                                         unsigned long *scanned, int order,
434                                         int mode, struct zone *z,
435                                         struct mem_cgroup *mem_cont,
436                                         int active, int file)
437 {
438         unsigned long nr_taken = 0;
439         struct page *page;
440         unsigned long scan;
441         LIST_HEAD(pc_list);
442         struct list_head *src;
443         struct page_cgroup *pc, *tmp;
444         int nid = z->zone_pgdat->node_id;
445         int zid = zone_idx(z);
446         struct mem_cgroup_per_zone *mz;
447         int lru = LRU_FILE * !!file + !!active;
448
449         BUG_ON(!mem_cont);
450         mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
451         src = &mz->lists[lru];
452
453         scan = 0;
454         list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
455                 if (scan >= nr_to_scan)
456                         break;
457
458                 page = pc->page;
459                 if (unlikely(!PageCgroupUsed(pc)))
460                         continue;
461                 if (unlikely(!PageLRU(page)))
462                         continue;
463
464                 scan++;
465                 if (__isolate_lru_page(page, mode, file) == 0) {
466                         list_move(&page->lru, dst);
467                         nr_taken++;
468                 }
469         }
470
471         *scanned = scan;
472         return nr_taken;
473 }
474
475 #define mem_cgroup_from_res_counter(counter, member)    \
476         container_of(counter, struct mem_cgroup, member)
477
478 /*
479  * This routine finds the DFS walk successor. This routine should be
480  * called with cgroup_mutex held
481  */
482 static struct mem_cgroup *
483 mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
484 {
485         struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
486
487         curr_cgroup = curr->css.cgroup;
488         root_cgroup = root_mem->css.cgroup;
489
490         if (!list_empty(&curr_cgroup->children)) {
491                 /*
492                  * Walk down to children
493                  */
494                 mem_cgroup_put(curr);
495                 cgroup = list_entry(curr_cgroup->children.next,
496                                                 struct cgroup, sibling);
497                 curr = mem_cgroup_from_cont(cgroup);
498                 mem_cgroup_get(curr);
499                 goto done;
500         }
501
502 visit_parent:
503         if (curr_cgroup == root_cgroup) {
504                 mem_cgroup_put(curr);
505                 curr = root_mem;
506                 mem_cgroup_get(curr);
507                 goto done;
508         }
509
510         /*
511          * Goto next sibling
512          */
513         if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
514                 mem_cgroup_put(curr);
515                 cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
516                                                 sibling);
517                 curr = mem_cgroup_from_cont(cgroup);
518                 mem_cgroup_get(curr);
519                 goto done;
520         }
521
522         /*
523          * Go up to next parent and next parent's sibling if need be
524          */
525         curr_cgroup = curr_cgroup->parent;
526         goto visit_parent;
527
528 done:
529         root_mem->last_scanned_child = curr;
530         return curr;
531 }
532
533 /*
534  * Visit the first child (need not be the first child as per the ordering
535  * of the cgroup list, since we track last_scanned_child) of @mem and use
536  * that to reclaim free pages from.
537  */
538 static struct mem_cgroup *
539 mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
540 {
541         struct cgroup *cgroup;
542         struct mem_cgroup *ret;
543         bool obsolete = (root_mem->last_scanned_child &&
544                                 root_mem->last_scanned_child->obsolete);
545
546         /*
547          * Scan all children under the mem_cgroup mem
548          */
549         cgroup_lock();
550         if (list_empty(&root_mem->css.cgroup->children)) {
551                 ret = root_mem;
552                 goto done;
553         }
554
555         if (!root_mem->last_scanned_child || obsolete) {
556
557                 if (obsolete)
558                         mem_cgroup_put(root_mem->last_scanned_child);
559
560                 cgroup = list_first_entry(&root_mem->css.cgroup->children,
561                                 struct cgroup, sibling);
562                 ret = mem_cgroup_from_cont(cgroup);
563                 mem_cgroup_get(ret);
564         } else
565                 ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
566                                                 root_mem);
567
568 done:
569         root_mem->last_scanned_child = ret;
570         cgroup_unlock();
571         return ret;
572 }
573
574 /*
575  * Dance down the hierarchy if needed to reclaim memory. We remember the
576  * last child we reclaimed from, so that we don't end up penalizing
577  * one child extensively based on its position in the children list.
578  *
579  * root_mem is the original ancestor that we've been reclaim from.
580  */
581 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
582                                                 gfp_t gfp_mask, bool noswap)
583 {
584         struct mem_cgroup *next_mem;
585         int ret = 0;
586
587         /*
588          * Reclaim unconditionally and don't check for return value.
589          * We need to reclaim in the current group and down the tree.
590          * One might think about checking for children before reclaiming,
591          * but there might be left over accounting, even after children
592          * have left.
593          */
594         ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap);
595         if (res_counter_check_under_limit(&root_mem->res))
596                 return 0;
597
598         next_mem = mem_cgroup_get_first_node(root_mem);
599
600         while (next_mem != root_mem) {
601                 if (next_mem->obsolete) {
602                         mem_cgroup_put(next_mem);
603                         cgroup_lock();
604                         next_mem = mem_cgroup_get_first_node(root_mem);
605                         cgroup_unlock();
606                         continue;
607                 }
608                 ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap);
609                 if (res_counter_check_under_limit(&root_mem->res))
610                         return 0;
611                 cgroup_lock();
612                 next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
613                 cgroup_unlock();
614         }
615         return ret;
616 }
617
618 bool mem_cgroup_oom_called(struct task_struct *task)
619 {
620         bool ret = false;
621         struct mem_cgroup *mem;
622         struct mm_struct *mm;
623
624         rcu_read_lock();
625         mm = task->mm;
626         if (!mm)
627                 mm = &init_mm;
628         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
629         if (mem && time_before(jiffies, mem->last_oom_jiffies + HZ/10))
630                 ret = true;
631         rcu_read_unlock();
632         return ret;
633 }
634 /*
635  * Unlike exported interface, "oom" parameter is added. if oom==true,
636  * oom-killer can be invoked.
637  */
638 static int __mem_cgroup_try_charge(struct mm_struct *mm,
639                         gfp_t gfp_mask, struct mem_cgroup **memcg,
640                         bool oom)
641 {
642         struct mem_cgroup *mem, *mem_over_limit;
643         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
644         struct res_counter *fail_res;
645
646         if (unlikely(test_thread_flag(TIF_MEMDIE))) {
647                 /* Don't account this! */
648                 *memcg = NULL;
649                 return 0;
650         }
651
652         /*
653          * We always charge the cgroup the mm_struct belongs to.
654          * The mm_struct's mem_cgroup changes on task migration if the
655          * thread group leader migrates. It's possible that mm is not
656          * set, if so charge the init_mm (happens for pagecache usage).
657          */
658         if (likely(!*memcg)) {
659                 rcu_read_lock();
660                 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
661                 if (unlikely(!mem)) {
662                         rcu_read_unlock();
663                         return 0;
664                 }
665                 /*
666                  * For every charge from the cgroup, increment reference count
667                  */
668                 css_get(&mem->css);
669                 *memcg = mem;
670                 rcu_read_unlock();
671         } else {
672                 mem = *memcg;
673                 css_get(&mem->css);
674         }
675
676         while (1) {
677                 int ret;
678                 bool noswap = false;
679
680                 ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
681                 if (likely(!ret)) {
682                         if (!do_swap_account)
683                                 break;
684                         ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
685                                                         &fail_res);
686                         if (likely(!ret))
687                                 break;
688                         /* mem+swap counter fails */
689                         res_counter_uncharge(&mem->res, PAGE_SIZE);
690                         noswap = true;
691                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
692                                                                         memsw);
693                 } else
694                         /* mem counter fails */
695                         mem_over_limit = mem_cgroup_from_res_counter(fail_res,
696                                                                         res);
697
698                 if (!(gfp_mask & __GFP_WAIT))
699                         goto nomem;
700
701                 ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
702                                                         noswap);
703
704                 /*
705                  * try_to_free_mem_cgroup_pages() might not give us a full
706                  * picture of reclaim. Some pages are reclaimed and might be
707                  * moved to swap cache or just unmapped from the cgroup.
708                  * Check the limit again to see if the reclaim reduced the
709                  * current usage of the cgroup before giving up
710                  *
711                  */
712                 if (do_swap_account) {
713                         if (res_counter_check_under_limit(&mem_over_limit->res) &&
714                             res_counter_check_under_limit(&mem_over_limit->memsw))
715                                 continue;
716                 } else if (res_counter_check_under_limit(&mem_over_limit->res))
717                                 continue;
718
719                 if (!nr_retries--) {
720                         if (oom) {
721                                 mem_cgroup_out_of_memory(mem_over_limit, gfp_mask);
722                                 mem_over_limit->last_oom_jiffies = jiffies;
723                         }
724                         goto nomem;
725                 }
726         }
727         return 0;
728 nomem:
729         css_put(&mem->css);
730         return -ENOMEM;
731 }
732
733 /**
734  * mem_cgroup_try_charge - get charge of PAGE_SIZE.
735  * @mm: an mm_struct which is charged against. (when *memcg is NULL)
736  * @gfp_mask: gfp_mask for reclaim.
737  * @memcg: a pointer to memory cgroup which is charged against.
738  *
739  * charge against memory cgroup pointed by *memcg. if *memcg == NULL, estimated
740  * memory cgroup from @mm is got and stored in *memcg.
741  *
742  * Returns 0 if success. -ENOMEM at failure.
743  * This call can invoke OOM-Killer.
744  */
745
746 int mem_cgroup_try_charge(struct mm_struct *mm,
747                           gfp_t mask, struct mem_cgroup **memcg)
748 {
749         return __mem_cgroup_try_charge(mm, mask, memcg, true);
750 }
751
752 /*
753  * commit a charge got by mem_cgroup_try_charge() and makes page_cgroup to be
754  * USED state. If already USED, uncharge and return.
755  */
756
757 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
758                                      struct page_cgroup *pc,
759                                      enum charge_type ctype)
760 {
761         /* try_charge() can return NULL to *memcg, taking care of it. */
762         if (!mem)
763                 return;
764
765         lock_page_cgroup(pc);
766         if (unlikely(PageCgroupUsed(pc))) {
767                 unlock_page_cgroup(pc);
768                 res_counter_uncharge(&mem->res, PAGE_SIZE);
769                 if (do_swap_account)
770                         res_counter_uncharge(&mem->memsw, PAGE_SIZE);
771                 css_put(&mem->css);
772                 return;
773         }
774         pc->mem_cgroup = mem;
775         smp_wmb();
776         pc->flags = pcg_default_flags[ctype];
777
778         mem_cgroup_charge_statistics(mem, pc, true);
779
780         unlock_page_cgroup(pc);
781 }
782
783 /**
784  * mem_cgroup_move_account - move account of the page
785  * @pc: page_cgroup of the page.
786  * @from: mem_cgroup which the page is moved from.
787  * @to: mem_cgroup which the page is moved to. @from != @to.
788  *
789  * The caller must confirm following.
790  * - page is not on LRU (isolate_page() is useful.)
791  *
792  * returns 0 at success,
793  * returns -EBUSY when lock is busy or "pc" is unstable.
794  *
795  * This function does "uncharge" from old cgroup but doesn't do "charge" to
796  * new cgroup. It should be done by a caller.
797  */
798
799 static int mem_cgroup_move_account(struct page_cgroup *pc,
800         struct mem_cgroup *from, struct mem_cgroup *to)
801 {
802         struct mem_cgroup_per_zone *from_mz, *to_mz;
803         int nid, zid;
804         int ret = -EBUSY;
805
806         VM_BUG_ON(from == to);
807         VM_BUG_ON(PageLRU(pc->page));
808
809         nid = page_cgroup_nid(pc);
810         zid = page_cgroup_zid(pc);
811         from_mz =  mem_cgroup_zoneinfo(from, nid, zid);
812         to_mz =  mem_cgroup_zoneinfo(to, nid, zid);
813
814         if (!trylock_page_cgroup(pc))
815                 return ret;
816
817         if (!PageCgroupUsed(pc))
818                 goto out;
819
820         if (pc->mem_cgroup != from)
821                 goto out;
822
823         css_put(&from->css);
824         res_counter_uncharge(&from->res, PAGE_SIZE);
825         mem_cgroup_charge_statistics(from, pc, false);
826         if (do_swap_account)
827                 res_counter_uncharge(&from->memsw, PAGE_SIZE);
828         pc->mem_cgroup = to;
829         mem_cgroup_charge_statistics(to, pc, true);
830         css_get(&to->css);
831         ret = 0;
832 out:
833         unlock_page_cgroup(pc);
834         return ret;
835 }
836
837 /*
838  * move charges to its parent.
839  */
840
841 static int mem_cgroup_move_parent(struct page_cgroup *pc,
842                                   struct mem_cgroup *child,
843                                   gfp_t gfp_mask)
844 {
845         struct page *page = pc->page;
846         struct cgroup *cg = child->css.cgroup;
847         struct cgroup *pcg = cg->parent;
848         struct mem_cgroup *parent;
849         int ret;
850
851         /* Is ROOT ? */
852         if (!pcg)
853                 return -EINVAL;
854
855
856         parent = mem_cgroup_from_cont(pcg);
857
858
859         ret = __mem_cgroup_try_charge(NULL, gfp_mask, &parent, false);
860         if (ret || !parent)
861                 return ret;
862
863         if (!get_page_unless_zero(page))
864                 return -EBUSY;
865
866         ret = isolate_lru_page(page);
867
868         if (ret)
869                 goto cancel;
870
871         ret = mem_cgroup_move_account(pc, child, parent);
872
873         /* drop extra refcnt by try_charge() (move_account increment one) */
874         css_put(&parent->css);
875         putback_lru_page(page);
876         if (!ret) {
877                 put_page(page);
878                 return 0;
879         }
880         /* uncharge if move fails */
881 cancel:
882         res_counter_uncharge(&parent->res, PAGE_SIZE);
883         if (do_swap_account)
884                 res_counter_uncharge(&parent->memsw, PAGE_SIZE);
885         put_page(page);
886         return ret;
887 }
888
889 /*
890  * Charge the memory controller for page usage.
891  * Return
892  * 0 if the charge was successful
893  * < 0 if the cgroup is over its limit
894  */
895 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
896                                 gfp_t gfp_mask, enum charge_type ctype,
897                                 struct mem_cgroup *memcg)
898 {
899         struct mem_cgroup *mem;
900         struct page_cgroup *pc;
901         int ret;
902
903         pc = lookup_page_cgroup(page);
904         /* can happen at boot */
905         if (unlikely(!pc))
906                 return 0;
907         prefetchw(pc);
908
909         mem = memcg;
910         ret = __mem_cgroup_try_charge(mm, gfp_mask, &mem, true);
911         if (ret || !mem)
912                 return ret;
913
914         __mem_cgroup_commit_charge(mem, pc, ctype);
915         return 0;
916 }
917
918 int mem_cgroup_newpage_charge(struct page *page,
919                               struct mm_struct *mm, gfp_t gfp_mask)
920 {
921         if (mem_cgroup_disabled())
922                 return 0;
923         if (PageCompound(page))
924                 return 0;
925         /*
926          * If already mapped, we don't have to account.
927          * If page cache, page->mapping has address_space.
928          * But page->mapping may have out-of-use anon_vma pointer,
929          * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
930          * is NULL.
931          */
932         if (page_mapped(page) || (page->mapping && !PageAnon(page)))
933                 return 0;
934         if (unlikely(!mm))
935                 mm = &init_mm;
936         return mem_cgroup_charge_common(page, mm, gfp_mask,
937                                 MEM_CGROUP_CHARGE_TYPE_MAPPED, NULL);
938 }
939
940 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
941                                 gfp_t gfp_mask)
942 {
943         if (mem_cgroup_disabled())
944                 return 0;
945         if (PageCompound(page))
946                 return 0;
947         /*
948          * Corner case handling. This is called from add_to_page_cache()
949          * in usual. But some FS (shmem) precharges this page before calling it
950          * and call add_to_page_cache() with GFP_NOWAIT.
951          *
952          * For GFP_NOWAIT case, the page may be pre-charged before calling
953          * add_to_page_cache(). (See shmem.c) check it here and avoid to call
954          * charge twice. (It works but has to pay a bit larger cost.)
955          */
956         if (!(gfp_mask & __GFP_WAIT)) {
957                 struct page_cgroup *pc;
958
959
960                 pc = lookup_page_cgroup(page);
961                 if (!pc)
962                         return 0;
963                 lock_page_cgroup(pc);
964                 if (PageCgroupUsed(pc)) {
965                         unlock_page_cgroup(pc);
966                         return 0;
967                 }
968                 unlock_page_cgroup(pc);
969         }
970
971         if (unlikely(!mm))
972                 mm = &init_mm;
973
974         if (page_is_file_cache(page))
975                 return mem_cgroup_charge_common(page, mm, gfp_mask,
976                                 MEM_CGROUP_CHARGE_TYPE_CACHE, NULL);
977         else
978                 return mem_cgroup_charge_common(page, mm, gfp_mask,
979                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, NULL);
980 }
981
982 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
983                                  struct page *page,
984                                  gfp_t mask, struct mem_cgroup **ptr)
985 {
986         struct mem_cgroup *mem;
987         swp_entry_t     ent;
988
989         if (mem_cgroup_disabled())
990                 return 0;
991
992         if (!do_swap_account)
993                 goto charge_cur_mm;
994
995         /*
996          * A racing thread's fault, or swapoff, may have already updated
997          * the pte, and even removed page from swap cache: return success
998          * to go on to do_swap_page()'s pte_same() test, which should fail.
999          */
1000         if (!PageSwapCache(page))
1001                 return 0;
1002
1003         ent.val = page_private(page);
1004
1005         mem = lookup_swap_cgroup(ent);
1006         if (!mem || mem->obsolete)
1007                 goto charge_cur_mm;
1008         *ptr = mem;
1009         return __mem_cgroup_try_charge(NULL, mask, ptr, true);
1010 charge_cur_mm:
1011         if (unlikely(!mm))
1012                 mm = &init_mm;
1013         return __mem_cgroup_try_charge(mm, mask, ptr, true);
1014 }
1015
1016 #ifdef CONFIG_SWAP
1017
1018 int mem_cgroup_cache_charge_swapin(struct page *page,
1019                         struct mm_struct *mm, gfp_t mask, bool locked)
1020 {
1021         int ret = 0;
1022
1023         if (mem_cgroup_disabled())
1024                 return 0;
1025         if (unlikely(!mm))
1026                 mm = &init_mm;
1027         if (!locked)
1028                 lock_page(page);
1029         /*
1030          * If not locked, the page can be dropped from SwapCache until
1031          * we reach here.
1032          */
1033         if (PageSwapCache(page)) {
1034                 struct mem_cgroup *mem = NULL;
1035                 swp_entry_t ent;
1036
1037                 ent.val = page_private(page);
1038                 if (do_swap_account) {
1039                         mem = lookup_swap_cgroup(ent);
1040                         if (mem && mem->obsolete)
1041                                 mem = NULL;
1042                         if (mem)
1043                                 mm = NULL;
1044                 }
1045                 ret = mem_cgroup_charge_common(page, mm, mask,
1046                                 MEM_CGROUP_CHARGE_TYPE_SHMEM, mem);
1047
1048                 if (!ret && do_swap_account) {
1049                         /* avoid double counting */
1050                         mem = swap_cgroup_record(ent, NULL);
1051                         if (mem) {
1052                                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1053                                 mem_cgroup_put(mem);
1054                         }
1055                 }
1056         }
1057         if (!locked)
1058                 unlock_page(page);
1059         /* add this page(page_cgroup) to the LRU we want. */
1060         mem_cgroup_lru_fixup(page);
1061
1062         return ret;
1063 }
1064 #endif
1065
1066 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
1067 {
1068         struct page_cgroup *pc;
1069
1070         if (mem_cgroup_disabled())
1071                 return;
1072         if (!ptr)
1073                 return;
1074         pc = lookup_page_cgroup(page);
1075         __mem_cgroup_commit_charge(ptr, pc, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1076         /*
1077          * Now swap is on-memory. This means this page may be
1078          * counted both as mem and swap....double count.
1079          * Fix it by uncharging from memsw. This SwapCache is stable
1080          * because we're still under lock_page().
1081          */
1082         if (do_swap_account) {
1083                 swp_entry_t ent = {.val = page_private(page)};
1084                 struct mem_cgroup *memcg;
1085                 memcg = swap_cgroup_record(ent, NULL);
1086                 if (memcg) {
1087                         /* If memcg is obsolete, memcg can be != ptr */
1088                         res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1089                         mem_cgroup_put(memcg);
1090                 }
1091
1092         }
1093         /* add this page(page_cgroup) to the LRU we want. */
1094         mem_cgroup_lru_fixup(page);
1095 }
1096
1097 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
1098 {
1099         if (mem_cgroup_disabled())
1100                 return;
1101         if (!mem)
1102                 return;
1103         res_counter_uncharge(&mem->res, PAGE_SIZE);
1104         if (do_swap_account)
1105                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1106         css_put(&mem->css);
1107 }
1108
1109
1110 /*
1111  * uncharge if !page_mapped(page)
1112  */
1113 static struct mem_cgroup *
1114 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
1115 {
1116         struct page_cgroup *pc;
1117         struct mem_cgroup *mem = NULL;
1118         struct mem_cgroup_per_zone *mz;
1119
1120         if (mem_cgroup_disabled())
1121                 return NULL;
1122
1123         if (PageSwapCache(page))
1124                 return NULL;
1125
1126         /*
1127          * Check if our page_cgroup is valid
1128          */
1129         pc = lookup_page_cgroup(page);
1130         if (unlikely(!pc || !PageCgroupUsed(pc)))
1131                 return NULL;
1132
1133         lock_page_cgroup(pc);
1134
1135         mem = pc->mem_cgroup;
1136
1137         if (!PageCgroupUsed(pc))
1138                 goto unlock_out;
1139
1140         switch (ctype) {
1141         case MEM_CGROUP_CHARGE_TYPE_MAPPED:
1142                 if (page_mapped(page))
1143                         goto unlock_out;
1144                 break;
1145         case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
1146                 if (!PageAnon(page)) {  /* Shared memory */
1147                         if (page->mapping && !page_is_file_cache(page))
1148                                 goto unlock_out;
1149                 } else if (page_mapped(page)) /* Anon */
1150                                 goto unlock_out;
1151                 break;
1152         default:
1153                 break;
1154         }
1155
1156         res_counter_uncharge(&mem->res, PAGE_SIZE);
1157         if (do_swap_account && (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
1158                 res_counter_uncharge(&mem->memsw, PAGE_SIZE);
1159
1160         mem_cgroup_charge_statistics(mem, pc, false);
1161         ClearPageCgroupUsed(pc);
1162
1163         mz = page_cgroup_zoneinfo(pc);
1164         unlock_page_cgroup(pc);
1165
1166         css_put(&mem->css);
1167
1168         return mem;
1169
1170 unlock_out:
1171         unlock_page_cgroup(pc);
1172         return NULL;
1173 }
1174
1175 void mem_cgroup_uncharge_page(struct page *page)
1176 {
1177         /* early check. */
1178         if (page_mapped(page))
1179                 return;
1180         if (page->mapping && !PageAnon(page))
1181                 return;
1182         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
1183 }
1184
1185 void mem_cgroup_uncharge_cache_page(struct page *page)
1186 {
1187         VM_BUG_ON(page_mapped(page));
1188         VM_BUG_ON(page->mapping);
1189         __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
1190 }
1191
1192 /*
1193  * called from __delete_from_swap_cache() and drop "page" account.
1194  * memcg information is recorded to swap_cgroup of "ent"
1195  */
1196 void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
1197 {
1198         struct mem_cgroup *memcg;
1199
1200         memcg = __mem_cgroup_uncharge_common(page,
1201                                         MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
1202         /* record memcg information */
1203         if (do_swap_account && memcg) {
1204                 swap_cgroup_record(ent, memcg);
1205                 mem_cgroup_get(memcg);
1206         }
1207 }
1208
1209 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1210 /*
1211  * called from swap_entry_free(). remove record in swap_cgroup and
1212  * uncharge "memsw" account.
1213  */
1214 void mem_cgroup_uncharge_swap(swp_entry_t ent)
1215 {
1216         struct mem_cgroup *memcg;
1217
1218         if (!do_swap_account)
1219                 return;
1220
1221         memcg = swap_cgroup_record(ent, NULL);
1222         if (memcg) {
1223                 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
1224                 mem_cgroup_put(memcg);
1225         }
1226 }
1227 #endif
1228
1229 /*
1230  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
1231  * page belongs to.
1232  */
1233 int mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
1234 {
1235         struct page_cgroup *pc;
1236         struct mem_cgroup *mem = NULL;
1237         int ret = 0;
1238
1239         if (mem_cgroup_disabled())
1240                 return 0;
1241
1242         pc = lookup_page_cgroup(page);
1243         lock_page_cgroup(pc);
1244         if (PageCgroupUsed(pc)) {
1245                 mem = pc->mem_cgroup;
1246                 css_get(&mem->css);
1247         }
1248         unlock_page_cgroup(pc);
1249
1250         if (mem) {
1251                 ret = mem_cgroup_try_charge(NULL, GFP_KERNEL, &mem);
1252                 css_put(&mem->css);
1253         }
1254         *ptr = mem;
1255         return ret;
1256 }
1257
1258 /* remove redundant charge if migration failed*/
1259 void mem_cgroup_end_migration(struct mem_cgroup *mem,
1260                 struct page *oldpage, struct page *newpage)
1261 {
1262         struct page *target, *unused;
1263         struct page_cgroup *pc;
1264         enum charge_type ctype;
1265
1266         if (!mem)
1267                 return;
1268
1269         /* at migration success, oldpage->mapping is NULL. */
1270         if (oldpage->mapping) {
1271                 target = oldpage;
1272                 unused = NULL;
1273         } else {
1274                 target = newpage;
1275                 unused = oldpage;
1276         }
1277
1278         if (PageAnon(target))
1279                 ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
1280         else if (page_is_file_cache(target))
1281                 ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
1282         else
1283                 ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
1284
1285         /* unused page is not on radix-tree now. */
1286         if (unused)
1287                 __mem_cgroup_uncharge_common(unused, ctype);
1288
1289         pc = lookup_page_cgroup(target);
1290         /*
1291          * __mem_cgroup_commit_charge() check PCG_USED bit of page_cgroup.
1292          * So, double-counting is effectively avoided.
1293          */
1294         __mem_cgroup_commit_charge(mem, pc, ctype);
1295
1296         /*
1297          * Both of oldpage and newpage are still under lock_page().
1298          * Then, we don't have to care about race in radix-tree.
1299          * But we have to be careful that this page is unmapped or not.
1300          *
1301          * There is a case for !page_mapped(). At the start of
1302          * migration, oldpage was mapped. But now, it's zapped.
1303          * But we know *target* page is not freed/reused under us.
1304          * mem_cgroup_uncharge_page() does all necessary checks.
1305          */
1306         if (ctype == MEM_CGROUP_CHARGE_TYPE_MAPPED)
1307                 mem_cgroup_uncharge_page(target);
1308 }
1309
1310 /*
1311  * A call to try to shrink memory usage under specified resource controller.
1312  * This is typically used for page reclaiming for shmem for reducing side
1313  * effect of page allocation from shmem, which is used by some mem_cgroup.
1314  */
1315 int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
1316 {
1317         struct mem_cgroup *mem;
1318         int progress = 0;
1319         int retry = MEM_CGROUP_RECLAIM_RETRIES;
1320
1321         if (mem_cgroup_disabled())
1322                 return 0;
1323         if (!mm)
1324                 return 0;
1325
1326         rcu_read_lock();
1327         mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
1328         if (unlikely(!mem)) {
1329                 rcu_read_unlock();
1330                 return 0;
1331         }
1332         css_get(&mem->css);
1333         rcu_read_unlock();
1334
1335         do {
1336                 progress = try_to_free_mem_cgroup_pages(mem, gfp_mask, true);
1337                 progress += res_counter_check_under_limit(&mem->res);
1338         } while (!progress && --retry);
1339
1340         css_put(&mem->css);
1341         if (!retry)
1342                 return -ENOMEM;
1343         return 0;
1344 }
1345
1346 static DEFINE_MUTEX(set_limit_mutex);
1347
1348 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
1349                                 unsigned long long val)
1350 {
1351
1352         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1353         int progress;
1354         u64 memswlimit;
1355         int ret = 0;
1356
1357         while (retry_count) {
1358                 if (signal_pending(current)) {
1359                         ret = -EINTR;
1360                         break;
1361                 }
1362                 /*
1363                  * Rather than hide all in some function, I do this in
1364                  * open coded manner. You see what this really does.
1365                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1366                  */
1367                 mutex_lock(&set_limit_mutex);
1368                 memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1369                 if (memswlimit < val) {
1370                         ret = -EINVAL;
1371                         mutex_unlock(&set_limit_mutex);
1372                         break;
1373                 }
1374                 ret = res_counter_set_limit(&memcg->res, val);
1375                 mutex_unlock(&set_limit_mutex);
1376
1377                 if (!ret)
1378                         break;
1379
1380                 progress = try_to_free_mem_cgroup_pages(memcg,
1381                                 GFP_KERNEL, false);
1382                 if (!progress)                  retry_count--;
1383         }
1384         return ret;
1385 }
1386
1387 int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
1388                                 unsigned long long val)
1389 {
1390         int retry_count = MEM_CGROUP_RECLAIM_RETRIES;
1391         u64 memlimit, oldusage, curusage;
1392         int ret;
1393
1394         if (!do_swap_account)
1395                 return -EINVAL;
1396
1397         while (retry_count) {
1398                 if (signal_pending(current)) {
1399                         ret = -EINTR;
1400                         break;
1401                 }
1402                 /*
1403                  * Rather than hide all in some function, I do this in
1404                  * open coded manner. You see what this really does.
1405                  * We have to guarantee mem->res.limit < mem->memsw.limit.
1406                  */
1407                 mutex_lock(&set_limit_mutex);
1408                 memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1409                 if (memlimit > val) {
1410                         ret = -EINVAL;
1411                         mutex_unlock(&set_limit_mutex);
1412                         break;
1413                 }
1414                 ret = res_counter_set_limit(&memcg->memsw, val);
1415                 mutex_unlock(&set_limit_mutex);
1416
1417                 if (!ret)
1418                         break;
1419
1420                 oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1421                 try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, true);
1422                 curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
1423                 if (curusage >= oldusage)
1424                         retry_count--;
1425         }
1426         return ret;
1427 }
1428
1429 /*
1430  * This routine traverse page_cgroup in given list and drop them all.
1431  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
1432  */
1433 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
1434                                 int node, int zid, enum lru_list lru)
1435 {
1436         struct zone *zone;
1437         struct mem_cgroup_per_zone *mz;
1438         struct page_cgroup *pc, *busy;
1439         unsigned long flags, loop;
1440         struct list_head *list;
1441         int ret = 0;
1442
1443         zone = &NODE_DATA(node)->node_zones[zid];
1444         mz = mem_cgroup_zoneinfo(mem, node, zid);
1445         list = &mz->lists[lru];
1446
1447         loop = MEM_CGROUP_ZSTAT(mz, lru);
1448         /* give some margin against EBUSY etc...*/
1449         loop += 256;
1450         busy = NULL;
1451         while (loop--) {
1452                 ret = 0;
1453                 spin_lock_irqsave(&zone->lru_lock, flags);
1454                 if (list_empty(list)) {
1455                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1456                         break;
1457                 }
1458                 pc = list_entry(list->prev, struct page_cgroup, lru);
1459                 if (busy == pc) {
1460                         list_move(&pc->lru, list);
1461                         busy = 0;
1462                         spin_unlock_irqrestore(&zone->lru_lock, flags);
1463                         continue;
1464                 }
1465                 spin_unlock_irqrestore(&zone->lru_lock, flags);
1466
1467                 ret = mem_cgroup_move_parent(pc, mem, GFP_KERNEL);
1468                 if (ret == -ENOMEM)
1469                         break;
1470
1471                 if (ret == -EBUSY || ret == -EINVAL) {
1472                         /* found lock contention or "pc" is obsolete. */
1473                         busy = pc;
1474                         cond_resched();
1475                 } else
1476                         busy = NULL;
1477         }
1478
1479         if (!ret && !list_empty(list))
1480                 return -EBUSY;
1481         return ret;
1482 }
1483
1484 /*
1485  * make mem_cgroup's charge to be 0 if there is no task.
1486  * This enables deleting this mem_cgroup.
1487  */
1488 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
1489 {
1490         int ret;
1491         int node, zid, shrink;
1492         int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
1493         struct cgroup *cgrp = mem->css.cgroup;
1494
1495         css_get(&mem->css);
1496
1497         shrink = 0;
1498         /* should free all ? */
1499         if (free_all)
1500                 goto try_to_free;
1501 move_account:
1502         while (mem->res.usage > 0) {
1503                 ret = -EBUSY;
1504                 if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
1505                         goto out;
1506                 ret = -EINTR;
1507                 if (signal_pending(current))
1508                         goto out;
1509                 /* This is for making all *used* pages to be on LRU. */
1510                 lru_add_drain_all();
1511                 ret = 0;
1512                 for_each_node_state(node, N_POSSIBLE) {
1513                         for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
1514                                 enum lru_list l;
1515                                 for_each_lru(l) {
1516                                         ret = mem_cgroup_force_empty_list(mem,
1517                                                         node, zid, l);
1518                                         if (ret)
1519                                                 break;
1520                                 }
1521                         }
1522                         if (ret)
1523                                 break;
1524                 }
1525                 /* it seems parent cgroup doesn't have enough mem */
1526                 if (ret == -ENOMEM)
1527                         goto try_to_free;
1528                 cond_resched();
1529         }
1530         ret = 0;
1531 out:
1532         css_put(&mem->css);
1533         return ret;
1534
1535 try_to_free:
1536         /* returns EBUSY if there is a task or if we come here twice. */
1537         if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
1538                 ret = -EBUSY;
1539                 goto out;
1540         }
1541         /* we call try-to-free pages for make this cgroup empty */
1542         lru_add_drain_all();
1543         /* try to free all pages in this cgroup */
1544         shrink = 1;
1545         while (nr_retries && mem->res.usage > 0) {
1546                 int progress;
1547
1548                 if (signal_pending(current)) {
1549                         ret = -EINTR;
1550                         goto out;
1551                 }
1552                 progress = try_to_free_mem_cgroup_pages(mem,
1553                                                   GFP_KERNEL, false);
1554                 if (!progress) {
1555                         nr_retries--;
1556                         /* maybe some writeback is necessary */
1557                         congestion_wait(WRITE, HZ/10);
1558                 }
1559
1560         }
1561         lru_add_drain();
1562         /* try move_account...there may be some *locked* pages. */
1563         if (mem->res.usage)
1564                 goto move_account;
1565         ret = 0;
1566         goto out;
1567 }
1568
1569 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
1570 {
1571         return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
1572 }
1573
1574
1575 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
1576 {
1577         return mem_cgroup_from_cont(cont)->use_hierarchy;
1578 }
1579
1580 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
1581                                         u64 val)
1582 {
1583         int retval = 0;
1584         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1585         struct cgroup *parent = cont->parent;
1586         struct mem_cgroup *parent_mem = NULL;
1587
1588         if (parent)
1589                 parent_mem = mem_cgroup_from_cont(parent);
1590
1591         cgroup_lock();
1592         /*
1593          * If parent's use_hiearchy is set, we can't make any modifications
1594          * in the child subtrees. If it is unset, then the change can
1595          * occur, provided the current cgroup has no children.
1596          *
1597          * For the root cgroup, parent_mem is NULL, we allow value to be
1598          * set if there are no children.
1599          */
1600         if ((!parent_mem || !parent_mem->use_hierarchy) &&
1601                                 (val == 1 || val == 0)) {
1602                 if (list_empty(&cont->children))
1603                         mem->use_hierarchy = val;
1604                 else
1605                         retval = -EBUSY;
1606         } else
1607                 retval = -EINVAL;
1608         cgroup_unlock();
1609
1610         return retval;
1611 }
1612
1613 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
1614 {
1615         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1616         u64 val = 0;
1617         int type, name;
1618
1619         type = MEMFILE_TYPE(cft->private);
1620         name = MEMFILE_ATTR(cft->private);
1621         switch (type) {
1622         case _MEM:
1623                 val = res_counter_read_u64(&mem->res, name);
1624                 break;
1625         case _MEMSWAP:
1626                 if (do_swap_account)
1627                         val = res_counter_read_u64(&mem->memsw, name);
1628                 break;
1629         default:
1630                 BUG();
1631                 break;
1632         }
1633         return val;
1634 }
1635 /*
1636  * The user of this function is...
1637  * RES_LIMIT.
1638  */
1639 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
1640                             const char *buffer)
1641 {
1642         struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
1643         int type, name;
1644         unsigned long long val;
1645         int ret;
1646
1647         type = MEMFILE_TYPE(cft->private);
1648         name = MEMFILE_ATTR(cft->private);
1649         switch (name) {
1650         case RES_LIMIT:
1651                 /* This function does all necessary parse...reuse it */
1652                 ret = res_counter_memparse_write_strategy(buffer, &val);
1653                 if (ret)
1654                         break;
1655                 if (type == _MEM)
1656                         ret = mem_cgroup_resize_limit(memcg, val);
1657                 else
1658                         ret = mem_cgroup_resize_memsw_limit(memcg, val);
1659                 break;
1660         default:
1661                 ret = -EINVAL; /* should be BUG() ? */
1662                 break;
1663         }
1664         return ret;
1665 }
1666
1667 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
1668 {
1669         struct mem_cgroup *mem;
1670         int type, name;
1671
1672         mem = mem_cgroup_from_cont(cont);
1673         type = MEMFILE_TYPE(event);
1674         name = MEMFILE_ATTR(event);
1675         switch (name) {
1676         case RES_MAX_USAGE:
1677                 if (type == _MEM)
1678                         res_counter_reset_max(&mem->res);
1679                 else
1680                         res_counter_reset_max(&mem->memsw);
1681                 break;
1682         case RES_FAILCNT:
1683                 if (type == _MEM)
1684                         res_counter_reset_failcnt(&mem->res);
1685                 else
1686                         res_counter_reset_failcnt(&mem->memsw);
1687                 break;
1688         }
1689         return 0;
1690 }
1691
1692 static const struct mem_cgroup_stat_desc {
1693         const char *msg;
1694         u64 unit;
1695 } mem_cgroup_stat_desc[] = {
1696         [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
1697         [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
1698         [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
1699         [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
1700 };
1701
1702 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
1703                                  struct cgroup_map_cb *cb)
1704 {
1705         struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
1706         struct mem_cgroup_stat *stat = &mem_cont->stat;
1707         int i;
1708
1709         for (i = 0; i < ARRAY_SIZE(stat->cpustat[0].count); i++) {
1710                 s64 val;
1711
1712                 val = mem_cgroup_read_stat(stat, i);
1713                 val *= mem_cgroup_stat_desc[i].unit;
1714                 cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
1715         }
1716         /* showing # of active pages */
1717         {
1718                 unsigned long active_anon, inactive_anon;
1719                 unsigned long active_file, inactive_file;
1720                 unsigned long unevictable;
1721
1722                 inactive_anon = mem_cgroup_get_all_zonestat(mem_cont,
1723                                                 LRU_INACTIVE_ANON);
1724                 active_anon = mem_cgroup_get_all_zonestat(mem_cont,
1725                                                 LRU_ACTIVE_ANON);
1726                 inactive_file = mem_cgroup_get_all_zonestat(mem_cont,
1727                                                 LRU_INACTIVE_FILE);
1728                 active_file = mem_cgroup_get_all_zonestat(mem_cont,
1729                                                 LRU_ACTIVE_FILE);
1730                 unevictable = mem_cgroup_get_all_zonestat(mem_cont,
1731                                                         LRU_UNEVICTABLE);
1732
1733                 cb->fill(cb, "active_anon", (active_anon) * PAGE_SIZE);
1734                 cb->fill(cb, "inactive_anon", (inactive_anon) * PAGE_SIZE);
1735                 cb->fill(cb, "active_file", (active_file) * PAGE_SIZE);
1736                 cb->fill(cb, "inactive_file", (inactive_file) * PAGE_SIZE);
1737                 cb->fill(cb, "unevictable", unevictable * PAGE_SIZE);
1738
1739         }
1740         return 0;
1741 }
1742
1743
1744 static struct cftype mem_cgroup_files[] = {
1745         {
1746                 .name = "usage_in_bytes",
1747                 .private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
1748                 .read_u64 = mem_cgroup_read,
1749         },
1750         {
1751                 .name = "max_usage_in_bytes",
1752                 .private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
1753                 .trigger = mem_cgroup_reset,
1754                 .read_u64 = mem_cgroup_read,
1755         },
1756         {
1757                 .name = "limit_in_bytes",
1758                 .private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
1759                 .write_string = mem_cgroup_write,
1760                 .read_u64 = mem_cgroup_read,
1761         },
1762         {
1763                 .name = "failcnt",
1764                 .private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
1765                 .trigger = mem_cgroup_reset,
1766                 .read_u64 = mem_cgroup_read,
1767         },
1768         {
1769                 .name = "stat",
1770                 .read_map = mem_control_stat_show,
1771         },
1772         {
1773                 .name = "force_empty",
1774                 .trigger = mem_cgroup_force_empty_write,
1775         },
1776         {
1777                 .name = "use_hierarchy",
1778                 .write_u64 = mem_cgroup_hierarchy_write,
1779                 .read_u64 = mem_cgroup_hierarchy_read,
1780         },
1781 };
1782
1783 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1784 static struct cftype memsw_cgroup_files[] = {
1785         {
1786                 .name = "memsw.usage_in_bytes",
1787                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
1788                 .read_u64 = mem_cgroup_read,
1789         },
1790         {
1791                 .name = "memsw.max_usage_in_bytes",
1792                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
1793                 .trigger = mem_cgroup_reset,
1794                 .read_u64 = mem_cgroup_read,
1795         },
1796         {
1797                 .name = "memsw.limit_in_bytes",
1798                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
1799                 .write_string = mem_cgroup_write,
1800                 .read_u64 = mem_cgroup_read,
1801         },
1802         {
1803                 .name = "memsw.failcnt",
1804                 .private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
1805                 .trigger = mem_cgroup_reset,
1806                 .read_u64 = mem_cgroup_read,
1807         },
1808 };
1809
1810 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1811 {
1812         if (!do_swap_account)
1813                 return 0;
1814         return cgroup_add_files(cont, ss, memsw_cgroup_files,
1815                                 ARRAY_SIZE(memsw_cgroup_files));
1816 };
1817 #else
1818 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
1819 {
1820         return 0;
1821 }
1822 #endif
1823
1824 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1825 {
1826         struct mem_cgroup_per_node *pn;
1827         struct mem_cgroup_per_zone *mz;
1828         enum lru_list l;
1829         int zone, tmp = node;
1830         /*
1831          * This routine is called against possible nodes.
1832          * But it's BUG to call kmalloc() against offline node.
1833          *
1834          * TODO: this routine can waste much memory for nodes which will
1835          *       never be onlined. It's better to use memory hotplug callback
1836          *       function.
1837          */
1838         if (!node_state(node, N_NORMAL_MEMORY))
1839                 tmp = -1;
1840         pn = kmalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
1841         if (!pn)
1842                 return 1;
1843
1844         mem->info.nodeinfo[node] = pn;
1845         memset(pn, 0, sizeof(*pn));
1846
1847         for (zone = 0; zone < MAX_NR_ZONES; zone++) {
1848                 mz = &pn->zoneinfo[zone];
1849                 for_each_lru(l)
1850                         INIT_LIST_HEAD(&mz->lists[l]);
1851         }
1852         return 0;
1853 }
1854
1855 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
1856 {
1857         kfree(mem->info.nodeinfo[node]);
1858 }
1859
1860 static int mem_cgroup_size(void)
1861 {
1862         int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
1863         return sizeof(struct mem_cgroup) + cpustat_size;
1864 }
1865
1866 static struct mem_cgroup *mem_cgroup_alloc(void)
1867 {
1868         struct mem_cgroup *mem;
1869         int size = mem_cgroup_size();
1870
1871         if (size < PAGE_SIZE)
1872                 mem = kmalloc(size, GFP_KERNEL);
1873         else
1874                 mem = vmalloc(size);
1875
1876         if (mem)
1877                 memset(mem, 0, size);
1878         return mem;
1879 }
1880
1881 /*
1882  * At destroying mem_cgroup, references from swap_cgroup can remain.
1883  * (scanning all at force_empty is too costly...)
1884  *
1885  * Instead of clearing all references at force_empty, we remember
1886  * the number of reference from swap_cgroup and free mem_cgroup when
1887  * it goes down to 0.
1888  *
1889  * When mem_cgroup is destroyed, mem->obsolete will be set to 0 and
1890  * entry which points to this memcg will be ignore at swapin.
1891  *
1892  * Removal of cgroup itself succeeds regardless of refs from swap.
1893  */
1894
1895 static void mem_cgroup_free(struct mem_cgroup *mem)
1896 {
1897         int node;
1898
1899         if (atomic_read(&mem->refcnt) > 0)
1900                 return;
1901
1902
1903         for_each_node_state(node, N_POSSIBLE)
1904                 free_mem_cgroup_per_zone_info(mem, node);
1905
1906         if (mem_cgroup_size() < PAGE_SIZE)
1907                 kfree(mem);
1908         else
1909                 vfree(mem);
1910 }
1911
1912 static void mem_cgroup_get(struct mem_cgroup *mem)
1913 {
1914         atomic_inc(&mem->refcnt);
1915 }
1916
1917 static void mem_cgroup_put(struct mem_cgroup *mem)
1918 {
1919         if (atomic_dec_and_test(&mem->refcnt)) {
1920                 if (!mem->obsolete)
1921                         return;
1922                 mem_cgroup_free(mem);
1923         }
1924 }
1925
1926
1927 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
1928 static void __init enable_swap_cgroup(void)
1929 {
1930         if (!mem_cgroup_disabled() && really_do_swap_account)
1931                 do_swap_account = 1;
1932 }
1933 #else
1934 static void __init enable_swap_cgroup(void)
1935 {
1936 }
1937 #endif
1938
1939 static struct cgroup_subsys_state *
1940 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
1941 {
1942         struct mem_cgroup *mem, *parent;
1943         int node;
1944
1945         mem = mem_cgroup_alloc();
1946         if (!mem)
1947                 return ERR_PTR(-ENOMEM);
1948
1949         for_each_node_state(node, N_POSSIBLE)
1950                 if (alloc_mem_cgroup_per_zone_info(mem, node))
1951                         goto free_out;
1952         /* root ? */
1953         if (cont->parent == NULL) {
1954                 enable_swap_cgroup();
1955                 parent = NULL;
1956         } else {
1957                 parent = mem_cgroup_from_cont(cont->parent);
1958                 mem->use_hierarchy = parent->use_hierarchy;
1959         }
1960
1961         if (parent && parent->use_hierarchy) {
1962                 res_counter_init(&mem->res, &parent->res);
1963                 res_counter_init(&mem->memsw, &parent->memsw);
1964         } else {
1965                 res_counter_init(&mem->res, NULL);
1966                 res_counter_init(&mem->memsw, NULL);
1967         }
1968
1969         mem->last_scanned_child = NULL;
1970
1971         return &mem->css;
1972 free_out:
1973         for_each_node_state(node, N_POSSIBLE)
1974                 free_mem_cgroup_per_zone_info(mem, node);
1975         mem_cgroup_free(mem);
1976         return ERR_PTR(-ENOMEM);
1977 }
1978
1979 static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
1980                                         struct cgroup *cont)
1981 {
1982         struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
1983         mem->obsolete = 1;
1984         mem_cgroup_force_empty(mem, false);
1985 }
1986
1987 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1988                                 struct cgroup *cont)
1989 {
1990         mem_cgroup_free(mem_cgroup_from_cont(cont));
1991 }
1992
1993 static int mem_cgroup_populate(struct cgroup_subsys *ss,
1994                                 struct cgroup *cont)
1995 {
1996         int ret;
1997
1998         ret = cgroup_add_files(cont, ss, mem_cgroup_files,
1999                                 ARRAY_SIZE(mem_cgroup_files));
2000
2001         if (!ret)
2002                 ret = register_memsw_files(cont, ss);
2003         return ret;
2004 }
2005
2006 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
2007                                 struct cgroup *cont,
2008                                 struct cgroup *old_cont,
2009                                 struct task_struct *p)
2010 {
2011         /*
2012          * FIXME: It's better to move charges of this process from old
2013          * memcg to new memcg. But it's just on TODO-List now.
2014          */
2015 }
2016
2017 struct cgroup_subsys mem_cgroup_subsys = {
2018         .name = "memory",
2019         .subsys_id = mem_cgroup_subsys_id,
2020         .create = mem_cgroup_create,
2021         .pre_destroy = mem_cgroup_pre_destroy,
2022         .destroy = mem_cgroup_destroy,
2023         .populate = mem_cgroup_populate,
2024         .attach = mem_cgroup_move_task,
2025         .early_init = 0,
2026 };
2027
2028 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2029
2030 static int __init disable_swap_account(char *s)
2031 {
2032         really_do_swap_account = 0;
2033         return 1;
2034 }
2035 __setup("noswapaccount", disable_swap_account);
2036 #endif