thp: memcg huge memory
authorAndrea Arcangeli <aarcange@redhat.com>
Thu, 13 Jan 2011 23:46:57 +0000 (15:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 14 Jan 2011 01:32:43 +0000 (17:32 -0800)
Add memcg charge/uncharge to hugepage faults in huge_memory.c.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c

index 620891f4e54f4ee6543b1cc82bcf3d810804ee7c..a313403b3c5ecb589bcd78183ecb64c3bfbd6c54 100644 (file)
@@ -233,6 +233,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        VM_BUG_ON(!PageCompound(page));
        pgtable = pte_alloc_one(mm, haddr);
        if (unlikely(!pgtable)) {
+               mem_cgroup_uncharge_page(page);
                put_page(page);
                return VM_FAULT_OOM;
        }
@@ -243,6 +244,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
        spin_lock(&mm->page_table_lock);
        if (unlikely(!pmd_none(*pmd))) {
                spin_unlock(&mm->page_table_lock);
+               mem_cgroup_uncharge_page(page);
                put_page(page);
                pte_free(mm, pgtable);
        } else {
@@ -286,6 +288,10 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                page = alloc_hugepage(transparent_hugepage_defrag(vma));
                if (unlikely(!page))
                        goto out;
+               if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
+                       put_page(page);
+                       goto out;
+               }
 
                return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
        }
@@ -402,9 +408,17 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        for (i = 0; i < HPAGE_PMD_NR; i++) {
                pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
                                          vma, address);
-               if (unlikely(!pages[i])) {
-                       while (--i >= 0)
+               if (unlikely(!pages[i] ||
+                            mem_cgroup_newpage_charge(pages[i], mm,
+                                                      GFP_KERNEL))) {
+                       if (pages[i])
                                put_page(pages[i]);
+                       mem_cgroup_uncharge_start();
+                       while (--i >= 0) {
+                               mem_cgroup_uncharge_page(pages[i]);
+                               put_page(pages[i]);
+                       }
+                       mem_cgroup_uncharge_end();
                        kfree(pages);
                        ret |= VM_FAULT_OOM;
                        goto out;
@@ -455,8 +469,12 @@ out:
 
 out_free_pages:
        spin_unlock(&mm->page_table_lock);
-       for (i = 0; i < HPAGE_PMD_NR; i++)
+       mem_cgroup_uncharge_start();
+       for (i = 0; i < HPAGE_PMD_NR; i++) {
+               mem_cgroup_uncharge_page(pages[i]);
                put_page(pages[i]);
+       }
+       mem_cgroup_uncharge_end();
        kfree(pages);
        goto out;
 }
@@ -501,14 +519,22 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out;
        }
 
+       if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
+               put_page(new_page);
+               put_page(page);
+               ret |= VM_FAULT_OOM;
+               goto out;
+       }
+
        copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
        __SetPageUptodate(new_page);
 
        spin_lock(&mm->page_table_lock);
        put_page(page);
-       if (unlikely(!pmd_same(*pmd, orig_pmd)))
+       if (unlikely(!pmd_same(*pmd, orig_pmd))) {
+               mem_cgroup_uncharge_page(new_page);
                put_page(new_page);
-       else {
+       else {
                pmd_t entry;
                VM_BUG_ON(!PageHead(page));
                entry = mk_pmd(new_page, vma->vm_page_prot);