X-Git-Url: http://git.samba.org/samba.git/?a=blobdiff_plain;f=mm%2Fhugetlb.c;h=1a5642074e342532f4a844ed0d2fdc86bd9b5de9;hb=b370b08274a25cf1e2015fb7ce65c43173c8156f;hp=abe1e9f2a942281df9707530610dc0b3c349ac04;hpb=8cde045c7ee97573be6ce495b8f7c918182a2c7a;p=sfrench%2Fcifs-2.6.git diff --git a/mm/hugetlb.c b/mm/hugetlb.c index abe1e9f2a942..1a5642074e34 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -31,7 +31,7 @@ static unsigned int free_huge_pages_node[MAX_NUMNODES]; static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; -int hugetlb_dynamic_pool; +unsigned long nr_overcommit_huge_pages; static int hugetlb_next_nid; /* @@ -227,22 +227,58 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, unsigned long address) { struct page *page; + unsigned int nid; - /* Check if the dynamic pool is enabled */ - if (!hugetlb_dynamic_pool) + /* + * Assume we will successfully allocate the surplus page to + * prevent racing processes from causing the surplus to exceed + * overcommit + * + * This however introduces a different race, where a process B + * tries to grow the static hugepage pool while alloc_pages() is + * called by process A. B will only examine the per-node + * counters in determining if surplus huge pages can be + * converted to normal huge pages in adjust_pool_surplus(). A + * won't be able to increment the per-node counter, until the + * lock is dropped by B, but B doesn't drop hugetlb_lock until + * no more huge pages can be converted from surplus to normal + * state (and doesn't try to convert again). Thus, we have a + * case where a surplus huge page exists, the pool is grown, and + * the surplus huge page still exists after, even though it + * should just have been converted to a normal huge page. This + * does not leak memory, though, as the hugepage will be freed + * once it is out of use. It also does not allow the counters to + * go out of whack in adjust_pool_surplus() as we don't modify + * the node values until we've gotten the hugepage and only the + * per-node value is checked there. + */ + spin_lock(&hugetlb_lock); + if (surplus_huge_pages >= nr_overcommit_huge_pages) { + spin_unlock(&hugetlb_lock); return NULL; + } else { + nr_huge_pages++; + surplus_huge_pages++; + } + spin_unlock(&hugetlb_lock); page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, HUGETLB_PAGE_ORDER); + + spin_lock(&hugetlb_lock); if (page) { + nid = page_to_nid(page); set_compound_page_dtor(page, free_huge_page); - spin_lock(&hugetlb_lock); - nr_huge_pages++; - nr_huge_pages_node[page_to_nid(page)]++; - surplus_huge_pages++; - surplus_huge_pages_node[page_to_nid(page)]++; - spin_unlock(&hugetlb_lock); + /* + * We incremented the global counters already + */ + nr_huge_pages_node[nid]++; + surplus_huge_pages_node[nid]++; + } else { + nr_huge_pages--; + surplus_huge_pages--; } + spin_unlock(&hugetlb_lock); return page; } @@ -382,9 +418,14 @@ static struct page *alloc_huge_page_private(struct vm_area_struct *vma, if (free_huge_pages > resv_huge_pages) page = dequeue_huge_page(vma, addr); spin_unlock(&hugetlb_lock); - if (!page) + if (!page) { page = alloc_buddy_huge_page(vma, addr); - return page ? page : ERR_PTR(-VM_FAULT_OOM); + if (!page) { + hugetlb_put_quota(vma->vm_file->f_mapping, 1); + return ERR_PTR(-VM_FAULT_OOM); + } + } + return page; } static struct page *alloc_huge_page(struct vm_area_struct *vma, @@ -481,6 +522,12 @@ static unsigned long set_max_huge_pages(unsigned long count) * Increase the pool size * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. + * + * We might race with alloc_buddy_huge_page() here and be unable + * to convert a surplus huge page to a normal huge page. That is + * not critical, though, it just means the overall size of the + * pool might be one hugepage larger than it needs to be, but + * within all the constraints specified by the sysctls. */ spin_lock(&hugetlb_lock); while (surplus_huge_pages && count > persistent_huge_pages) { @@ -509,6 +556,14 @@ static unsigned long set_max_huge_pages(unsigned long count) * to keep enough around to satisfy reservations). Then place * pages into surplus state as needed so the pool will shrink * to the desired size as pages become free. + * + * By placing pages into the surplus state independent of the + * overcommit value, we are allowing the surplus pool size to + * exceed overcommit. There are few sane options here. Since + * alloc_buddy_huge_page() is checking the global counter, + * though, we'll note that we're not allowed to exceed surplus + * and won't grow the pool anywhere else. Not until one of the + * sysctls are changed, or the surplus pages go out of use. */ min_count = resv_huge_pages + nr_huge_pages - free_huge_pages; min_count = max(count, min_count); @@ -644,6 +699,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, dst_pte = huge_pte_alloc(dst, addr); if (!dst_pte) goto nomem; + + /* If the pagetables are shared don't copy or take references */ + if (dst_pte == src_pte) + continue; + spin_lock(&dst->page_table_lock); spin_lock(&src->page_table_lock); if (!pte_none(*src_pte)) { @@ -753,6 +813,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, spin_unlock(&mm->page_table_lock); copy_huge_page(new_page, old_page, address, vma); + __SetPageUptodate(new_page); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & HPAGE_MASK); @@ -798,9 +859,11 @@ retry: goto out; } clear_huge_page(page, address); + __SetPageUptodate(page); if (vma->vm_flags & VM_SHARED) { int err; + struct inode *inode = mapping->host; err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); if (err) { @@ -809,6 +872,10 @@ retry: goto retry; goto out; } + + spin_lock(&inode->i_lock); + inode->i_blocks += BLOCKS_PER_HUGEPAGE; + spin_unlock(&inode->i_lock); } else lock_page(page); } @@ -902,7 +969,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, */ pte = huge_pte_offset(mm, vaddr & HPAGE_MASK); - if (!pte || pte_none(*pte)) { + if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) { int ret; spin_unlock(&mm->page_table_lock); @@ -1151,8 +1218,10 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) if (hugetlb_get_quota(inode->i_mapping, chg)) return -ENOSPC; ret = hugetlb_acct_memory(chg); - if (ret < 0) + if (ret < 0) { + hugetlb_put_quota(inode->i_mapping, chg); return ret; + } region_add(&inode->i_mapping->private_list, from, to); return 0; } @@ -1160,6 +1229,11 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to) void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) { long chg = region_truncate(&inode->i_mapping->private_list, offset); + + spin_lock(&inode->i_lock); + inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed; + spin_unlock(&inode->i_lock); + hugetlb_put_quota(inode->i_mapping, (chg - freed)); hugetlb_acct_memory(-(chg - freed)); }