Memory controller: make page_referenced() cgroup aware
authorBalbir Singh <balbir@linux.vnet.ibm.com>
Thu, 7 Feb 2008 08:14:01 +0000 (00:14 -0800)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Thu, 7 Feb 2008 16:42:19 +0000 (08:42 -0800)
Make page_referenced() cgroup aware.  Without this patch, page_referenced()
can cause a page to be skipped while reclaiming pages.  This patch ensures
that other cgroups do not hold pages in a particular cgroup hostage.  It
is required to ensure that shared pages are freed from a cgroup when they
are not actively referenced from the cgroup that brought them in

Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
include/linux/rmap.h
mm/memcontrol.c
mm/rmap.c
mm/vmscan.c

index bb6f5105401baceedd7faea5684cd4d4467aa0e2..9d0a830423b625f7ab6a8315dedc429621323b32 100644 (file)
@@ -43,6 +43,7 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
                                        int active);
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm);
+extern struct mem_cgroup *mm_cgroup(struct mm_struct *mm);
 
 static inline void mem_cgroup_uncharge_page(struct page *page)
 {
@@ -93,6 +94,11 @@ static inline int mem_cgroup_cache_charge(struct page *page,
        return 0;
 }
 
+static inline struct mem_cgroup *mm_cgroup(struct mm_struct *mm)
+{
+       return NULL;
+}
+
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #endif /* _LINUX_MEMCONTROL_H */
index 97347f22fc207ba25892536a4443c5c49bc5c90e..1383692ac5bd8c8dc5c06498a38795a256f56c93 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <linux/spinlock.h>
+#include <linux/memcontrol.h>
 
 /*
  * The anon_vma heads a list of private "related" vmas, to scan if
@@ -86,7 +87,7 @@ static inline void page_dup_rmap(struct page *page, struct vm_area_struct *vma,
 /*
  * Called from mm/vmscan.c to handle paging out
  */
-int page_referenced(struct page *, int is_locked);
+int page_referenced(struct page *, int is_locked, struct mem_cgroup *cnt);
 int try_to_unmap(struct page *, int ignore_refs);
 
 /*
@@ -114,7 +115,7 @@ int page_mkclean(struct page *);
 #define anon_vma_prepare(vma)  (0)
 #define anon_vma_link(vma)     do {} while (0)
 
-#define page_referenced(page,l) TestClearPageReferenced(page)
+#define page_referenced(page,l,cnt) TestClearPageReferenced(page)
 #define try_to_unmap(page, refs) SWAP_FAIL
 
 static inline int page_mkclean(struct page *page)
index 10833d969e3fa0c5c276b65e7d86bdfd6bc6f042..ff7cac602984b0ffa26159e7228376bcf19a702e 100644 (file)
@@ -110,6 +110,11 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
                                struct mem_cgroup, css);
 }
 
+inline struct mem_cgroup *mm_cgroup(struct mm_struct *mm)
+{
+       return rcu_dereference(mm->mem_cgroup);
+}
+
 void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
 {
        struct mem_cgroup *mem;
index 4a3487921effd04b4ab3c3cdbddf216259366d10..a0e92a263d12eb734e8a8171e9d575e35dc0d768 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -302,7 +302,8 @@ out:
        return referenced;
 }
 
-static int page_referenced_anon(struct page *page)
+static int page_referenced_anon(struct page *page,
+                               struct mem_cgroup *mem_cont)
 {
        unsigned int mapcount;
        struct anon_vma *anon_vma;
@@ -315,6 +316,13 @@ static int page_referenced_anon(struct page *page)
 
        mapcount = page_mapcount(page);
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+               /*
+                * If we are reclaiming on behalf of a cgroup, skip
+                * counting on behalf of references from different
+                * cgroups
+                */
+               if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
+                       continue;
                referenced += page_referenced_one(page, vma, &mapcount);
                if (!mapcount)
                        break;
@@ -335,7 +343,8 @@ static int page_referenced_anon(struct page *page)
  *
  * This function is only called from page_referenced for object-based pages.
  */
-static int page_referenced_file(struct page *page)
+static int page_referenced_file(struct page *page,
+                               struct mem_cgroup *mem_cont)
 {
        unsigned int mapcount;
        struct address_space *mapping = page->mapping;
@@ -368,6 +377,13 @@ static int page_referenced_file(struct page *page)
        mapcount = page_mapcount(page);
 
        vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               /*
+                * If we are reclaiming on behalf of a cgroup, skip
+                * counting on behalf of references from different
+                * cgroups
+                */
+               if (mem_cont && (mm_cgroup(vma->vm_mm) != mem_cont))
+                       continue;
                if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE))
                                  == (VM_LOCKED|VM_MAYSHARE)) {
                        referenced++;
@@ -390,7 +406,8 @@ static int page_referenced_file(struct page *page)
  * Quick test_and_clear_referenced for all mappings to a page,
  * returns the number of ptes which referenced the page.
  */
-int page_referenced(struct page *page, int is_locked)
+int page_referenced(struct page *page, int is_locked,
+                       struct mem_cgroup *mem_cont)
 {
        int referenced = 0;
 
@@ -402,14 +419,15 @@ int page_referenced(struct page *page, int is_locked)
 
        if (page_mapped(page) && page->mapping) {
                if (PageAnon(page))
-                       referenced += page_referenced_anon(page);
+                       referenced += page_referenced_anon(page, mem_cont);
                else if (is_locked)
-                       referenced += page_referenced_file(page);
+                       referenced += page_referenced_file(page, mem_cont);
                else if (TestSetPageLocked(page))
                        referenced++;
                else {
                        if (page->mapping)
-                               referenced += page_referenced_file(page);
+                               referenced +=
+                                       page_referenced_file(page, mem_cont);
                        unlock_page(page);
                }
        }
index 7408a8a7d882d4b333d6b6d370d9d48e1c8076d4..215f6a726b2fa53a5878580fafda2818c2757326 100644 (file)
@@ -503,7 +503,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
                }
 
-               referenced = page_referenced(page, 1);
+               referenced = page_referenced(page, 1, sc->mem_cgroup);
                /* In active use or really unfreeable?  Activate it. */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
                                        referenced && page_mapping_inuse(page))
@@ -1057,7 +1057,7 @@ force_reclaim_mapped:
                if (page_mapped(page)) {
                        if (!reclaim_mapped ||
                            (total_swap_pages == 0 && PageAnon(page)) ||
-                           page_referenced(page, 0)) {
+                           page_referenced(page, 0, sc->mem_cgroup)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }