flush icache before set_pte() on ia64: flush icache at set_pte
[sfrench/cifs-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) William Irwin, April 2004
4  */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
26 unsigned long max_huge_pages;
27 static struct list_head hugepage_freelists[MAX_NUMNODES];
28 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29 static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
31 unsigned long hugepages_treat_as_movable;
32
33 /*
34  * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
35  */
36 static DEFINE_SPINLOCK(hugetlb_lock);
37
38 static void clear_huge_page(struct page *page, unsigned long addr)
39 {
40         int i;
41
42         might_sleep();
43         for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
44                 cond_resched();
45                 clear_user_highpage(page + i, addr + i * PAGE_SIZE);
46         }
47 }
48
49 static void copy_huge_page(struct page *dst, struct page *src,
50                            unsigned long addr, struct vm_area_struct *vma)
51 {
52         int i;
53
54         might_sleep();
55         for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
56                 cond_resched();
57                 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
58         }
59 }
60
61 static void enqueue_huge_page(struct page *page)
62 {
63         int nid = page_to_nid(page);
64         list_add(&page->lru, &hugepage_freelists[nid]);
65         free_huge_pages++;
66         free_huge_pages_node[nid]++;
67 }
68
69 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
70                                 unsigned long address)
71 {
72         int nid;
73         struct page *page = NULL;
74         struct mempolicy *mpol;
75         struct zonelist *zonelist = huge_zonelist(vma, address,
76                                         htlb_alloc_mask, &mpol);
77         struct zone **z;
78
79         for (z = zonelist->zones; *z; z++) {
80                 nid = zone_to_nid(*z);
81                 if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
82                     !list_empty(&hugepage_freelists[nid])) {
83                         page = list_entry(hugepage_freelists[nid].next,
84                                           struct page, lru);
85                         list_del(&page->lru);
86                         free_huge_pages--;
87                         free_huge_pages_node[nid]--;
88                         break;
89                 }
90         }
91         mpol_free(mpol);        /* unref if mpol !NULL */
92         return page;
93 }
94
95 static void free_huge_page(struct page *page)
96 {
97         BUG_ON(page_count(page));
98
99         INIT_LIST_HEAD(&page->lru);
100
101         spin_lock(&hugetlb_lock);
102         enqueue_huge_page(page);
103         spin_unlock(&hugetlb_lock);
104 }
105
106 static int alloc_fresh_huge_page(void)
107 {
108         static int prev_nid;
109         struct page *page;
110         int nid;
111
112         /*
113          * Copy static prev_nid to local nid, work on that, then copy it
114          * back to prev_nid afterwards: otherwise there's a window in which
115          * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
116          * But we don't need to use a spin_lock here: it really doesn't
117          * matter if occasionally a racer chooses the same nid as we do.
118          */
119         nid = next_node(prev_nid, node_online_map);
120         if (nid == MAX_NUMNODES)
121                 nid = first_node(node_online_map);
122         prev_nid = nid;
123
124         page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
125                                         HUGETLB_PAGE_ORDER);
126         if (page) {
127                 set_compound_page_dtor(page, free_huge_page);
128                 spin_lock(&hugetlb_lock);
129                 nr_huge_pages++;
130                 nr_huge_pages_node[page_to_nid(page)]++;
131                 spin_unlock(&hugetlb_lock);
132                 put_page(page); /* free it into the hugepage allocator */
133                 return 1;
134         }
135         return 0;
136 }
137
138 static struct page *alloc_huge_page(struct vm_area_struct *vma,
139                                     unsigned long addr)
140 {
141         struct page *page;
142
143         spin_lock(&hugetlb_lock);
144         if (vma->vm_flags & VM_MAYSHARE)
145                 resv_huge_pages--;
146         else if (free_huge_pages <= resv_huge_pages)
147                 goto fail;
148
149         page = dequeue_huge_page(vma, addr);
150         if (!page)
151                 goto fail;
152
153         spin_unlock(&hugetlb_lock);
154         set_page_refcounted(page);
155         return page;
156
157 fail:
158         if (vma->vm_flags & VM_MAYSHARE)
159                 resv_huge_pages++;
160         spin_unlock(&hugetlb_lock);
161         return NULL;
162 }
163
164 static int __init hugetlb_init(void)
165 {
166         unsigned long i;
167
168         if (HPAGE_SHIFT == 0)
169                 return 0;
170
171         for (i = 0; i < MAX_NUMNODES; ++i)
172                 INIT_LIST_HEAD(&hugepage_freelists[i]);
173
174         for (i = 0; i < max_huge_pages; ++i) {
175                 if (!alloc_fresh_huge_page())
176                         break;
177         }
178         max_huge_pages = free_huge_pages = nr_huge_pages = i;
179         printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
180         return 0;
181 }
182 module_init(hugetlb_init);
183
184 static int __init hugetlb_setup(char *s)
185 {
186         if (sscanf(s, "%lu", &max_huge_pages) <= 0)
187                 max_huge_pages = 0;
188         return 1;
189 }
190 __setup("hugepages=", hugetlb_setup);
191
192 static unsigned int cpuset_mems_nr(unsigned int *array)
193 {
194         int node;
195         unsigned int nr = 0;
196
197         for_each_node_mask(node, cpuset_current_mems_allowed)
198                 nr += array[node];
199
200         return nr;
201 }
202
203 #ifdef CONFIG_SYSCTL
204 static void update_and_free_page(struct page *page)
205 {
206         int i;
207         nr_huge_pages--;
208         nr_huge_pages_node[page_to_nid(page)]--;
209         for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
210                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
211                                 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
212                                 1 << PG_private | 1<< PG_writeback);
213         }
214         set_compound_page_dtor(page, NULL);
215         set_page_refcounted(page);
216         __free_pages(page, HUGETLB_PAGE_ORDER);
217 }
218
219 #ifdef CONFIG_HIGHMEM
220 static void try_to_free_low(unsigned long count)
221 {
222         int i;
223
224         for (i = 0; i < MAX_NUMNODES; ++i) {
225                 struct page *page, *next;
226                 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
227                         if (PageHighMem(page))
228                                 continue;
229                         list_del(&page->lru);
230                         update_and_free_page(page);
231                         free_huge_pages--;
232                         free_huge_pages_node[page_to_nid(page)]--;
233                         if (count >= nr_huge_pages)
234                                 return;
235                 }
236         }
237 }
238 #else
239 static inline void try_to_free_low(unsigned long count)
240 {
241 }
242 #endif
243
244 static unsigned long set_max_huge_pages(unsigned long count)
245 {
246         while (count > nr_huge_pages) {
247                 if (!alloc_fresh_huge_page())
248                         return nr_huge_pages;
249         }
250         if (count >= nr_huge_pages)
251                 return nr_huge_pages;
252
253         spin_lock(&hugetlb_lock);
254         count = max(count, resv_huge_pages);
255         try_to_free_low(count);
256         while (count < nr_huge_pages) {
257                 struct page *page = dequeue_huge_page(NULL, 0);
258                 if (!page)
259                         break;
260                 update_and_free_page(page);
261         }
262         spin_unlock(&hugetlb_lock);
263         return nr_huge_pages;
264 }
265
266 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
267                            struct file *file, void __user *buffer,
268                            size_t *length, loff_t *ppos)
269 {
270         proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
271         max_huge_pages = set_max_huge_pages(max_huge_pages);
272         return 0;
273 }
274
275 int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
276                         struct file *file, void __user *buffer,
277                         size_t *length, loff_t *ppos)
278 {
279         proc_dointvec(table, write, file, buffer, length, ppos);
280         if (hugepages_treat_as_movable)
281                 htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
282         else
283                 htlb_alloc_mask = GFP_HIGHUSER;
284         return 0;
285 }
286
287 #endif /* CONFIG_SYSCTL */
288
289 int hugetlb_report_meminfo(char *buf)
290 {
291         return sprintf(buf,
292                         "HugePages_Total: %5lu\n"
293                         "HugePages_Free:  %5lu\n"
294                         "HugePages_Rsvd:  %5lu\n"
295                         "Hugepagesize:    %5lu kB\n",
296                         nr_huge_pages,
297                         free_huge_pages,
298                         resv_huge_pages,
299                         HPAGE_SIZE/1024);
300 }
301
302 int hugetlb_report_node_meminfo(int nid, char *buf)
303 {
304         return sprintf(buf,
305                 "Node %d HugePages_Total: %5u\n"
306                 "Node %d HugePages_Free:  %5u\n",
307                 nid, nr_huge_pages_node[nid],
308                 nid, free_huge_pages_node[nid]);
309 }
310
311 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
312 unsigned long hugetlb_total_pages(void)
313 {
314         return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
315 }
316
317 /*
318  * We cannot handle pagefaults against hugetlb pages at all.  They cause
319  * handle_mm_fault() to try to instantiate regular-sized pages in the
320  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
321  * this far.
322  */
323 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
324 {
325         BUG();
326         return 0;
327 }
328
329 struct vm_operations_struct hugetlb_vm_ops = {
330         .fault = hugetlb_vm_op_fault,
331 };
332
333 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
334                                 int writable)
335 {
336         pte_t entry;
337
338         if (writable) {
339                 entry =
340                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
341         } else {
342                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
343         }
344         entry = pte_mkyoung(entry);
345         entry = pte_mkhuge(entry);
346
347         return entry;
348 }
349
350 static void set_huge_ptep_writable(struct vm_area_struct *vma,
351                                    unsigned long address, pte_t *ptep)
352 {
353         pte_t entry;
354
355         entry = pte_mkwrite(pte_mkdirty(*ptep));
356         if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
357                 update_mmu_cache(vma, address, entry);
358         }
359 }
360
361
362 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
363                             struct vm_area_struct *vma)
364 {
365         pte_t *src_pte, *dst_pte, entry;
366         struct page *ptepage;
367         unsigned long addr;
368         int cow;
369
370         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
371
372         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
373                 src_pte = huge_pte_offset(src, addr);
374                 if (!src_pte)
375                         continue;
376                 dst_pte = huge_pte_alloc(dst, addr);
377                 if (!dst_pte)
378                         goto nomem;
379                 spin_lock(&dst->page_table_lock);
380                 spin_lock(&src->page_table_lock);
381                 if (!pte_none(*src_pte)) {
382                         if (cow)
383                                 ptep_set_wrprotect(src, addr, src_pte);
384                         entry = *src_pte;
385                         ptepage = pte_page(entry);
386                         get_page(ptepage);
387                         set_huge_pte_at(dst, addr, dst_pte, entry);
388                 }
389                 spin_unlock(&src->page_table_lock);
390                 spin_unlock(&dst->page_table_lock);
391         }
392         return 0;
393
394 nomem:
395         return -ENOMEM;
396 }
397
398 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
399                             unsigned long end)
400 {
401         struct mm_struct *mm = vma->vm_mm;
402         unsigned long address;
403         pte_t *ptep;
404         pte_t pte;
405         struct page *page;
406         struct page *tmp;
407         /*
408          * A page gathering list, protected by per file i_mmap_lock. The
409          * lock is used to avoid list corruption from multiple unmapping
410          * of the same page since we are using page->lru.
411          */
412         LIST_HEAD(page_list);
413
414         WARN_ON(!is_vm_hugetlb_page(vma));
415         BUG_ON(start & ~HPAGE_MASK);
416         BUG_ON(end & ~HPAGE_MASK);
417
418         spin_lock(&mm->page_table_lock);
419         for (address = start; address < end; address += HPAGE_SIZE) {
420                 ptep = huge_pte_offset(mm, address);
421                 if (!ptep)
422                         continue;
423
424                 if (huge_pmd_unshare(mm, &address, ptep))
425                         continue;
426
427                 pte = huge_ptep_get_and_clear(mm, address, ptep);
428                 if (pte_none(pte))
429                         continue;
430
431                 page = pte_page(pte);
432                 if (pte_dirty(pte))
433                         set_page_dirty(page);
434                 list_add(&page->lru, &page_list);
435         }
436         spin_unlock(&mm->page_table_lock);
437         flush_tlb_range(vma, start, end);
438         list_for_each_entry_safe(page, tmp, &page_list, lru) {
439                 list_del(&page->lru);
440                 put_page(page);
441         }
442 }
443
444 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
445                           unsigned long end)
446 {
447         /*
448          * It is undesirable to test vma->vm_file as it should be non-null
449          * for valid hugetlb area. However, vm_file will be NULL in the error
450          * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
451          * do_mmap_pgoff() nullifies vma->vm_file before calling this function
452          * to clean up. Since no pte has actually been setup, it is safe to
453          * do nothing in this case.
454          */
455         if (vma->vm_file) {
456                 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
457                 __unmap_hugepage_range(vma, start, end);
458                 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
459         }
460 }
461
462 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
463                         unsigned long address, pte_t *ptep, pte_t pte)
464 {
465         struct page *old_page, *new_page;
466         int avoidcopy;
467
468         old_page = pte_page(pte);
469
470         /* If no-one else is actually using this page, avoid the copy
471          * and just make the page writable */
472         avoidcopy = (page_count(old_page) == 1);
473         if (avoidcopy) {
474                 set_huge_ptep_writable(vma, address, ptep);
475                 return 0;
476         }
477
478         page_cache_get(old_page);
479         new_page = alloc_huge_page(vma, address);
480
481         if (!new_page) {
482                 page_cache_release(old_page);
483                 return VM_FAULT_OOM;
484         }
485
486         spin_unlock(&mm->page_table_lock);
487         copy_huge_page(new_page, old_page, address, vma);
488         spin_lock(&mm->page_table_lock);
489
490         ptep = huge_pte_offset(mm, address & HPAGE_MASK);
491         if (likely(pte_same(*ptep, pte))) {
492                 /* Break COW */
493                 set_huge_pte_at(mm, address, ptep,
494                                 make_huge_pte(vma, new_page, 1));
495                 /* Make the old page be freed below */
496                 new_page = old_page;
497         }
498         page_cache_release(new_page);
499         page_cache_release(old_page);
500         return 0;
501 }
502
503 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
504                         unsigned long address, pte_t *ptep, int write_access)
505 {
506         int ret = VM_FAULT_SIGBUS;
507         unsigned long idx;
508         unsigned long size;
509         struct page *page;
510         struct address_space *mapping;
511         pte_t new_pte;
512
513         mapping = vma->vm_file->f_mapping;
514         idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
515                 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
516
517         /*
518          * Use page lock to guard against racing truncation
519          * before we get page_table_lock.
520          */
521 retry:
522         page = find_lock_page(mapping, idx);
523         if (!page) {
524                 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
525                 if (idx >= size)
526                         goto out;
527                 if (hugetlb_get_quota(mapping))
528                         goto out;
529                 page = alloc_huge_page(vma, address);
530                 if (!page) {
531                         hugetlb_put_quota(mapping);
532                         ret = VM_FAULT_OOM;
533                         goto out;
534                 }
535                 clear_huge_page(page, address);
536
537                 if (vma->vm_flags & VM_SHARED) {
538                         int err;
539
540                         err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
541                         if (err) {
542                                 put_page(page);
543                                 hugetlb_put_quota(mapping);
544                                 if (err == -EEXIST)
545                                         goto retry;
546                                 goto out;
547                         }
548                 } else
549                         lock_page(page);
550         }
551
552         spin_lock(&mm->page_table_lock);
553         size = i_size_read(mapping->host) >> HPAGE_SHIFT;
554         if (idx >= size)
555                 goto backout;
556
557         ret = 0;
558         if (!pte_none(*ptep))
559                 goto backout;
560
561         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
562                                 && (vma->vm_flags & VM_SHARED)));
563         set_huge_pte_at(mm, address, ptep, new_pte);
564
565         if (write_access && !(vma->vm_flags & VM_SHARED)) {
566                 /* Optimization, do the COW without a second fault */
567                 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
568         }
569
570         spin_unlock(&mm->page_table_lock);
571         unlock_page(page);
572 out:
573         return ret;
574
575 backout:
576         spin_unlock(&mm->page_table_lock);
577         hugetlb_put_quota(mapping);
578         unlock_page(page);
579         put_page(page);
580         goto out;
581 }
582
583 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
584                         unsigned long address, int write_access)
585 {
586         pte_t *ptep;
587         pte_t entry;
588         int ret;
589         static DEFINE_MUTEX(hugetlb_instantiation_mutex);
590
591         ptep = huge_pte_alloc(mm, address);
592         if (!ptep)
593                 return VM_FAULT_OOM;
594
595         /*
596          * Serialize hugepage allocation and instantiation, so that we don't
597          * get spurious allocation failures if two CPUs race to instantiate
598          * the same page in the page cache.
599          */
600         mutex_lock(&hugetlb_instantiation_mutex);
601         entry = *ptep;
602         if (pte_none(entry)) {
603                 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
604                 mutex_unlock(&hugetlb_instantiation_mutex);
605                 return ret;
606         }
607
608         ret = 0;
609
610         spin_lock(&mm->page_table_lock);
611         /* Check for a racing update before calling hugetlb_cow */
612         if (likely(pte_same(entry, *ptep)))
613                 if (write_access && !pte_write(entry))
614                         ret = hugetlb_cow(mm, vma, address, ptep, entry);
615         spin_unlock(&mm->page_table_lock);
616         mutex_unlock(&hugetlb_instantiation_mutex);
617
618         return ret;
619 }
620
621 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
622                         struct page **pages, struct vm_area_struct **vmas,
623                         unsigned long *position, int *length, int i)
624 {
625         unsigned long pfn_offset;
626         unsigned long vaddr = *position;
627         int remainder = *length;
628
629         spin_lock(&mm->page_table_lock);
630         while (vaddr < vma->vm_end && remainder) {
631                 pte_t *pte;
632                 struct page *page;
633
634                 /*
635                  * Some archs (sparc64, sh*) have multiple pte_ts to
636                  * each hugepage.  We have to make * sure we get the
637                  * first, for the page indexing below to work.
638                  */
639                 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
640
641                 if (!pte || pte_none(*pte)) {
642                         int ret;
643
644                         spin_unlock(&mm->page_table_lock);
645                         ret = hugetlb_fault(mm, vma, vaddr, 0);
646                         spin_lock(&mm->page_table_lock);
647                         if (!(ret & VM_FAULT_ERROR))
648                                 continue;
649
650                         remainder = 0;
651                         if (!i)
652                                 i = -EFAULT;
653                         break;
654                 }
655
656                 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
657                 page = pte_page(*pte);
658 same_page:
659                 if (pages) {
660                         get_page(page);
661                         pages[i] = page + pfn_offset;
662                 }
663
664                 if (vmas)
665                         vmas[i] = vma;
666
667                 vaddr += PAGE_SIZE;
668                 ++pfn_offset;
669                 --remainder;
670                 ++i;
671                 if (vaddr < vma->vm_end && remainder &&
672                                 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
673                         /*
674                          * We use pfn_offset to avoid touching the pageframes
675                          * of this compound page.
676                          */
677                         goto same_page;
678                 }
679         }
680         spin_unlock(&mm->page_table_lock);
681         *length = remainder;
682         *position = vaddr;
683
684         return i;
685 }
686
687 void hugetlb_change_protection(struct vm_area_struct *vma,
688                 unsigned long address, unsigned long end, pgprot_t newprot)
689 {
690         struct mm_struct *mm = vma->vm_mm;
691         unsigned long start = address;
692         pte_t *ptep;
693         pte_t pte;
694
695         BUG_ON(address >= end);
696         flush_cache_range(vma, address, end);
697
698         spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
699         spin_lock(&mm->page_table_lock);
700         for (; address < end; address += HPAGE_SIZE) {
701                 ptep = huge_pte_offset(mm, address);
702                 if (!ptep)
703                         continue;
704                 if (huge_pmd_unshare(mm, &address, ptep))
705                         continue;
706                 if (!pte_none(*ptep)) {
707                         pte = huge_ptep_get_and_clear(mm, address, ptep);
708                         pte = pte_mkhuge(pte_modify(pte, newprot));
709                         set_huge_pte_at(mm, address, ptep, pte);
710                 }
711         }
712         spin_unlock(&mm->page_table_lock);
713         spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
714
715         flush_tlb_range(vma, start, end);
716 }
717
718 struct file_region {
719         struct list_head link;
720         long from;
721         long to;
722 };
723
724 static long region_add(struct list_head *head, long f, long t)
725 {
726         struct file_region *rg, *nrg, *trg;
727
728         /* Locate the region we are either in or before. */
729         list_for_each_entry(rg, head, link)
730                 if (f <= rg->to)
731                         break;
732
733         /* Round our left edge to the current segment if it encloses us. */
734         if (f > rg->from)
735                 f = rg->from;
736
737         /* Check for and consume any regions we now overlap with. */
738         nrg = rg;
739         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
740                 if (&rg->link == head)
741                         break;
742                 if (rg->from > t)
743                         break;
744
745                 /* If this area reaches higher then extend our area to
746                  * include it completely.  If this is not the first area
747                  * which we intend to reuse, free it. */
748                 if (rg->to > t)
749                         t = rg->to;
750                 if (rg != nrg) {
751                         list_del(&rg->link);
752                         kfree(rg);
753                 }
754         }
755         nrg->from = f;
756         nrg->to = t;
757         return 0;
758 }
759
760 static long region_chg(struct list_head *head, long f, long t)
761 {
762         struct file_region *rg, *nrg;
763         long chg = 0;
764
765         /* Locate the region we are before or in. */
766         list_for_each_entry(rg, head, link)
767                 if (f <= rg->to)
768                         break;
769
770         /* If we are below the current region then a new region is required.
771          * Subtle, allocate a new region at the position but make it zero
772          * size such that we can guarentee to record the reservation. */
773         if (&rg->link == head || t < rg->from) {
774                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
775                 if (nrg == 0)
776                         return -ENOMEM;
777                 nrg->from = f;
778                 nrg->to   = f;
779                 INIT_LIST_HEAD(&nrg->link);
780                 list_add(&nrg->link, rg->link.prev);
781
782                 return t - f;
783         }
784
785         /* Round our left edge to the current segment if it encloses us. */
786         if (f > rg->from)
787                 f = rg->from;
788         chg = t - f;
789
790         /* Check for and consume any regions we now overlap with. */
791         list_for_each_entry(rg, rg->link.prev, link) {
792                 if (&rg->link == head)
793                         break;
794                 if (rg->from > t)
795                         return chg;
796
797                 /* We overlap with this area, if it extends futher than
798                  * us then we must extend ourselves.  Account for its
799                  * existing reservation. */
800                 if (rg->to > t) {
801                         chg += rg->to - t;
802                         t = rg->to;
803                 }
804                 chg -= rg->to - rg->from;
805         }
806         return chg;
807 }
808
809 static long region_truncate(struct list_head *head, long end)
810 {
811         struct file_region *rg, *trg;
812         long chg = 0;
813
814         /* Locate the region we are either in or before. */
815         list_for_each_entry(rg, head, link)
816                 if (end <= rg->to)
817                         break;
818         if (&rg->link == head)
819                 return 0;
820
821         /* If we are in the middle of a region then adjust it. */
822         if (end > rg->from) {
823                 chg = rg->to - end;
824                 rg->to = end;
825                 rg = list_entry(rg->link.next, typeof(*rg), link);
826         }
827
828         /* Drop any remaining regions. */
829         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
830                 if (&rg->link == head)
831                         break;
832                 chg += rg->to - rg->from;
833                 list_del(&rg->link);
834                 kfree(rg);
835         }
836         return chg;
837 }
838
839 static int hugetlb_acct_memory(long delta)
840 {
841         int ret = -ENOMEM;
842
843         spin_lock(&hugetlb_lock);
844         if ((delta + resv_huge_pages) <= free_huge_pages) {
845                 resv_huge_pages += delta;
846                 ret = 0;
847         }
848         spin_unlock(&hugetlb_lock);
849         return ret;
850 }
851
852 int hugetlb_reserve_pages(struct inode *inode, long from, long to)
853 {
854         long ret, chg;
855
856         chg = region_chg(&inode->i_mapping->private_list, from, to);
857         if (chg < 0)
858                 return chg;
859         /*
860          * When cpuset is configured, it breaks the strict hugetlb page
861          * reservation as the accounting is done on a global variable. Such
862          * reservation is completely rubbish in the presence of cpuset because
863          * the reservation is not checked against page availability for the
864          * current cpuset. Application can still potentially OOM'ed by kernel
865          * with lack of free htlb page in cpuset that the task is in.
866          * Attempt to enforce strict accounting with cpuset is almost
867          * impossible (or too ugly) because cpuset is too fluid that
868          * task or memory node can be dynamically moved between cpusets.
869          *
870          * The change of semantics for shared hugetlb mapping with cpuset is
871          * undesirable. However, in order to preserve some of the semantics,
872          * we fall back to check against current free page availability as
873          * a best attempt and hopefully to minimize the impact of changing
874          * semantics that cpuset has.
875          */
876         if (chg > cpuset_mems_nr(free_huge_pages_node))
877                 return -ENOMEM;
878
879         ret = hugetlb_acct_memory(chg);
880         if (ret < 0)
881                 return ret;
882         region_add(&inode->i_mapping->private_list, from, to);
883         return 0;
884 }
885
886 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
887 {
888         long chg = region_truncate(&inode->i_mapping->private_list, offset);
889         hugetlb_acct_memory(freed - chg);
890 }