mm, pagevec: rename pagevec drained field
[sfrench/cifs-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/sched/signal.h>
22 #include <linux/rmap.h>
23 #include <linux/string_helpers.h>
24 #include <linux/swap.h>
25 #include <linux/swapops.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include <linux/userfaultfd_k.h>
37 #include "internal.h"
38
39 int hugepages_treat_as_movable;
40
41 int hugetlb_max_hstate __read_mostly;
42 unsigned int default_hstate_idx;
43 struct hstate hstates[HUGE_MAX_HSTATE];
44 /*
45  * Minimum page order among possible hugepage sizes, set to a proper value
46  * at boot time.
47  */
48 static unsigned int minimum_order __read_mostly = UINT_MAX;
49
50 __initdata LIST_HEAD(huge_boot_pages);
51
52 /* for command line parsing */
53 static struct hstate * __initdata parsed_hstate;
54 static unsigned long __initdata default_hstate_max_huge_pages;
55 static unsigned long __initdata default_hstate_size;
56 static bool __initdata parsed_valid_hugepagesz = true;
57
58 /*
59  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
60  * free_huge_pages, and surplus_huge_pages.
61  */
62 DEFINE_SPINLOCK(hugetlb_lock);
63
64 /*
65  * Serializes faults on the same logical page.  This is used to
66  * prevent spurious OOMs when the hugepage pool is fully utilized.
67  */
68 static int num_fault_mutexes;
69 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
70
71 /* Forward declaration */
72 static int hugetlb_acct_memory(struct hstate *h, long delta);
73
74 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
75 {
76         bool free = (spool->count == 0) && (spool->used_hpages == 0);
77
78         spin_unlock(&spool->lock);
79
80         /* If no pages are used, and no other handles to the subpool
81          * remain, give up any reservations mased on minimum size and
82          * free the subpool */
83         if (free) {
84                 if (spool->min_hpages != -1)
85                         hugetlb_acct_memory(spool->hstate,
86                                                 -spool->min_hpages);
87                 kfree(spool);
88         }
89 }
90
91 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
92                                                 long min_hpages)
93 {
94         struct hugepage_subpool *spool;
95
96         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
97         if (!spool)
98                 return NULL;
99
100         spin_lock_init(&spool->lock);
101         spool->count = 1;
102         spool->max_hpages = max_hpages;
103         spool->hstate = h;
104         spool->min_hpages = min_hpages;
105
106         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
107                 kfree(spool);
108                 return NULL;
109         }
110         spool->rsv_hpages = min_hpages;
111
112         return spool;
113 }
114
115 void hugepage_put_subpool(struct hugepage_subpool *spool)
116 {
117         spin_lock(&spool->lock);
118         BUG_ON(!spool->count);
119         spool->count--;
120         unlock_or_release_subpool(spool);
121 }
122
123 /*
124  * Subpool accounting for allocating and reserving pages.
125  * Return -ENOMEM if there are not enough resources to satisfy the
126  * the request.  Otherwise, return the number of pages by which the
127  * global pools must be adjusted (upward).  The returned value may
128  * only be different than the passed value (delta) in the case where
129  * a subpool minimum size must be manitained.
130  */
131 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
132                                       long delta)
133 {
134         long ret = delta;
135
136         if (!spool)
137                 return ret;
138
139         spin_lock(&spool->lock);
140
141         if (spool->max_hpages != -1) {          /* maximum size accounting */
142                 if ((spool->used_hpages + delta) <= spool->max_hpages)
143                         spool->used_hpages += delta;
144                 else {
145                         ret = -ENOMEM;
146                         goto unlock_ret;
147                 }
148         }
149
150         /* minimum size accounting */
151         if (spool->min_hpages != -1 && spool->rsv_hpages) {
152                 if (delta > spool->rsv_hpages) {
153                         /*
154                          * Asking for more reserves than those already taken on
155                          * behalf of subpool.  Return difference.
156                          */
157                         ret = delta - spool->rsv_hpages;
158                         spool->rsv_hpages = 0;
159                 } else {
160                         ret = 0;        /* reserves already accounted for */
161                         spool->rsv_hpages -= delta;
162                 }
163         }
164
165 unlock_ret:
166         spin_unlock(&spool->lock);
167         return ret;
168 }
169
170 /*
171  * Subpool accounting for freeing and unreserving pages.
172  * Return the number of global page reservations that must be dropped.
173  * The return value may only be different than the passed value (delta)
174  * in the case where a subpool minimum size must be maintained.
175  */
176 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
177                                        long delta)
178 {
179         long ret = delta;
180
181         if (!spool)
182                 return delta;
183
184         spin_lock(&spool->lock);
185
186         if (spool->max_hpages != -1)            /* maximum size accounting */
187                 spool->used_hpages -= delta;
188
189          /* minimum size accounting */
190         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
191                 if (spool->rsv_hpages + delta <= spool->min_hpages)
192                         ret = 0;
193                 else
194                         ret = spool->rsv_hpages + delta - spool->min_hpages;
195
196                 spool->rsv_hpages += delta;
197                 if (spool->rsv_hpages > spool->min_hpages)
198                         spool->rsv_hpages = spool->min_hpages;
199         }
200
201         /*
202          * If hugetlbfs_put_super couldn't free spool due to an outstanding
203          * quota reference, free it now.
204          */
205         unlock_or_release_subpool(spool);
206
207         return ret;
208 }
209
210 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
211 {
212         return HUGETLBFS_SB(inode->i_sb)->spool;
213 }
214
215 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
216 {
217         return subpool_inode(file_inode(vma->vm_file));
218 }
219
220 /*
221  * Region tracking -- allows tracking of reservations and instantiated pages
222  *                    across the pages in a mapping.
223  *
224  * The region data structures are embedded into a resv_map and protected
225  * by a resv_map's lock.  The set of regions within the resv_map represent
226  * reservations for huge pages, or huge pages that have already been
227  * instantiated within the map.  The from and to elements are huge page
228  * indicies into the associated mapping.  from indicates the starting index
229  * of the region.  to represents the first index past the end of  the region.
230  *
231  * For example, a file region structure with from == 0 and to == 4 represents
232  * four huge pages in a mapping.  It is important to note that the to element
233  * represents the first element past the end of the region. This is used in
234  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
235  *
236  * Interval notation of the form [from, to) will be used to indicate that
237  * the endpoint from is inclusive and to is exclusive.
238  */
239 struct file_region {
240         struct list_head link;
241         long from;
242         long to;
243 };
244
245 /*
246  * Add the huge page range represented by [f, t) to the reserve
247  * map.  In the normal case, existing regions will be expanded
248  * to accommodate the specified range.  Sufficient regions should
249  * exist for expansion due to the previous call to region_chg
250  * with the same range.  However, it is possible that region_del
251  * could have been called after region_chg and modifed the map
252  * in such a way that no region exists to be expanded.  In this
253  * case, pull a region descriptor from the cache associated with
254  * the map and use that for the new range.
255  *
256  * Return the number of new huge pages added to the map.  This
257  * number is greater than or equal to zero.
258  */
259 static long region_add(struct resv_map *resv, long f, long t)
260 {
261         struct list_head *head = &resv->regions;
262         struct file_region *rg, *nrg, *trg;
263         long add = 0;
264
265         spin_lock(&resv->lock);
266         /* Locate the region we are either in or before. */
267         list_for_each_entry(rg, head, link)
268                 if (f <= rg->to)
269                         break;
270
271         /*
272          * If no region exists which can be expanded to include the
273          * specified range, the list must have been modified by an
274          * interleving call to region_del().  Pull a region descriptor
275          * from the cache and use it for this range.
276          */
277         if (&rg->link == head || t < rg->from) {
278                 VM_BUG_ON(resv->region_cache_count <= 0);
279
280                 resv->region_cache_count--;
281                 nrg = list_first_entry(&resv->region_cache, struct file_region,
282                                         link);
283                 list_del(&nrg->link);
284
285                 nrg->from = f;
286                 nrg->to = t;
287                 list_add(&nrg->link, rg->link.prev);
288
289                 add += t - f;
290                 goto out_locked;
291         }
292
293         /* Round our left edge to the current segment if it encloses us. */
294         if (f > rg->from)
295                 f = rg->from;
296
297         /* Check for and consume any regions we now overlap with. */
298         nrg = rg;
299         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
300                 if (&rg->link == head)
301                         break;
302                 if (rg->from > t)
303                         break;
304
305                 /* If this area reaches higher then extend our area to
306                  * include it completely.  If this is not the first area
307                  * which we intend to reuse, free it. */
308                 if (rg->to > t)
309                         t = rg->to;
310                 if (rg != nrg) {
311                         /* Decrement return value by the deleted range.
312                          * Another range will span this area so that by
313                          * end of routine add will be >= zero
314                          */
315                         add -= (rg->to - rg->from);
316                         list_del(&rg->link);
317                         kfree(rg);
318                 }
319         }
320
321         add += (nrg->from - f);         /* Added to beginning of region */
322         nrg->from = f;
323         add += t - nrg->to;             /* Added to end of region */
324         nrg->to = t;
325
326 out_locked:
327         resv->adds_in_progress--;
328         spin_unlock(&resv->lock);
329         VM_BUG_ON(add < 0);
330         return add;
331 }
332
333 /*
334  * Examine the existing reserve map and determine how many
335  * huge pages in the specified range [f, t) are NOT currently
336  * represented.  This routine is called before a subsequent
337  * call to region_add that will actually modify the reserve
338  * map to add the specified range [f, t).  region_chg does
339  * not change the number of huge pages represented by the
340  * map.  However, if the existing regions in the map can not
341  * be expanded to represent the new range, a new file_region
342  * structure is added to the map as a placeholder.  This is
343  * so that the subsequent region_add call will have all the
344  * regions it needs and will not fail.
345  *
346  * Upon entry, region_chg will also examine the cache of region descriptors
347  * associated with the map.  If there are not enough descriptors cached, one
348  * will be allocated for the in progress add operation.
349  *
350  * Returns the number of huge pages that need to be added to the existing
351  * reservation map for the range [f, t).  This number is greater or equal to
352  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
353  * is needed and can not be allocated.
354  */
355 static long region_chg(struct resv_map *resv, long f, long t)
356 {
357         struct list_head *head = &resv->regions;
358         struct file_region *rg, *nrg = NULL;
359         long chg = 0;
360
361 retry:
362         spin_lock(&resv->lock);
363 retry_locked:
364         resv->adds_in_progress++;
365
366         /*
367          * Check for sufficient descriptors in the cache to accommodate
368          * the number of in progress add operations.
369          */
370         if (resv->adds_in_progress > resv->region_cache_count) {
371                 struct file_region *trg;
372
373                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
374                 /* Must drop lock to allocate a new descriptor. */
375                 resv->adds_in_progress--;
376                 spin_unlock(&resv->lock);
377
378                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
379                 if (!trg) {
380                         kfree(nrg);
381                         return -ENOMEM;
382                 }
383
384                 spin_lock(&resv->lock);
385                 list_add(&trg->link, &resv->region_cache);
386                 resv->region_cache_count++;
387                 goto retry_locked;
388         }
389
390         /* Locate the region we are before or in. */
391         list_for_each_entry(rg, head, link)
392                 if (f <= rg->to)
393                         break;
394
395         /* If we are below the current region then a new region is required.
396          * Subtle, allocate a new region at the position but make it zero
397          * size such that we can guarantee to record the reservation. */
398         if (&rg->link == head || t < rg->from) {
399                 if (!nrg) {
400                         resv->adds_in_progress--;
401                         spin_unlock(&resv->lock);
402                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
403                         if (!nrg)
404                                 return -ENOMEM;
405
406                         nrg->from = f;
407                         nrg->to   = f;
408                         INIT_LIST_HEAD(&nrg->link);
409                         goto retry;
410                 }
411
412                 list_add(&nrg->link, rg->link.prev);
413                 chg = t - f;
414                 goto out_nrg;
415         }
416
417         /* Round our left edge to the current segment if it encloses us. */
418         if (f > rg->from)
419                 f = rg->from;
420         chg = t - f;
421
422         /* Check for and consume any regions we now overlap with. */
423         list_for_each_entry(rg, rg->link.prev, link) {
424                 if (&rg->link == head)
425                         break;
426                 if (rg->from > t)
427                         goto out;
428
429                 /* We overlap with this area, if it extends further than
430                  * us then we must extend ourselves.  Account for its
431                  * existing reservation. */
432                 if (rg->to > t) {
433                         chg += rg->to - t;
434                         t = rg->to;
435                 }
436                 chg -= rg->to - rg->from;
437         }
438
439 out:
440         spin_unlock(&resv->lock);
441         /*  We already know we raced and no longer need the new region */
442         kfree(nrg);
443         return chg;
444 out_nrg:
445         spin_unlock(&resv->lock);
446         return chg;
447 }
448
449 /*
450  * Abort the in progress add operation.  The adds_in_progress field
451  * of the resv_map keeps track of the operations in progress between
452  * calls to region_chg and region_add.  Operations are sometimes
453  * aborted after the call to region_chg.  In such cases, region_abort
454  * is called to decrement the adds_in_progress counter.
455  *
456  * NOTE: The range arguments [f, t) are not needed or used in this
457  * routine.  They are kept to make reading the calling code easier as
458  * arguments will match the associated region_chg call.
459  */
460 static void region_abort(struct resv_map *resv, long f, long t)
461 {
462         spin_lock(&resv->lock);
463         VM_BUG_ON(!resv->region_cache_count);
464         resv->adds_in_progress--;
465         spin_unlock(&resv->lock);
466 }
467
468 /*
469  * Delete the specified range [f, t) from the reserve map.  If the
470  * t parameter is LONG_MAX, this indicates that ALL regions after f
471  * should be deleted.  Locate the regions which intersect [f, t)
472  * and either trim, delete or split the existing regions.
473  *
474  * Returns the number of huge pages deleted from the reserve map.
475  * In the normal case, the return value is zero or more.  In the
476  * case where a region must be split, a new region descriptor must
477  * be allocated.  If the allocation fails, -ENOMEM will be returned.
478  * NOTE: If the parameter t == LONG_MAX, then we will never split
479  * a region and possibly return -ENOMEM.  Callers specifying
480  * t == LONG_MAX do not need to check for -ENOMEM error.
481  */
482 static long region_del(struct resv_map *resv, long f, long t)
483 {
484         struct list_head *head = &resv->regions;
485         struct file_region *rg, *trg;
486         struct file_region *nrg = NULL;
487         long del = 0;
488
489 retry:
490         spin_lock(&resv->lock);
491         list_for_each_entry_safe(rg, trg, head, link) {
492                 /*
493                  * Skip regions before the range to be deleted.  file_region
494                  * ranges are normally of the form [from, to).  However, there
495                  * may be a "placeholder" entry in the map which is of the form
496                  * (from, to) with from == to.  Check for placeholder entries
497                  * at the beginning of the range to be deleted.
498                  */
499                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
500                         continue;
501
502                 if (rg->from >= t)
503                         break;
504
505                 if (f > rg->from && t < rg->to) { /* Must split region */
506                         /*
507                          * Check for an entry in the cache before dropping
508                          * lock and attempting allocation.
509                          */
510                         if (!nrg &&
511                             resv->region_cache_count > resv->adds_in_progress) {
512                                 nrg = list_first_entry(&resv->region_cache,
513                                                         struct file_region,
514                                                         link);
515                                 list_del(&nrg->link);
516                                 resv->region_cache_count--;
517                         }
518
519                         if (!nrg) {
520                                 spin_unlock(&resv->lock);
521                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
522                                 if (!nrg)
523                                         return -ENOMEM;
524                                 goto retry;
525                         }
526
527                         del += t - f;
528
529                         /* New entry for end of split region */
530                         nrg->from = t;
531                         nrg->to = rg->to;
532                         INIT_LIST_HEAD(&nrg->link);
533
534                         /* Original entry is trimmed */
535                         rg->to = f;
536
537                         list_add(&nrg->link, &rg->link);
538                         nrg = NULL;
539                         break;
540                 }
541
542                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
543                         del += rg->to - rg->from;
544                         list_del(&rg->link);
545                         kfree(rg);
546                         continue;
547                 }
548
549                 if (f <= rg->from) {    /* Trim beginning of region */
550                         del += t - rg->from;
551                         rg->from = t;
552                 } else {                /* Trim end of region */
553                         del += rg->to - f;
554                         rg->to = f;
555                 }
556         }
557
558         spin_unlock(&resv->lock);
559         kfree(nrg);
560         return del;
561 }
562
563 /*
564  * A rare out of memory error was encountered which prevented removal of
565  * the reserve map region for a page.  The huge page itself was free'ed
566  * and removed from the page cache.  This routine will adjust the subpool
567  * usage count, and the global reserve count if needed.  By incrementing
568  * these counts, the reserve map entry which could not be deleted will
569  * appear as a "reserved" entry instead of simply dangling with incorrect
570  * counts.
571  */
572 void hugetlb_fix_reserve_counts(struct inode *inode)
573 {
574         struct hugepage_subpool *spool = subpool_inode(inode);
575         long rsv_adjust;
576
577         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
578         if (rsv_adjust) {
579                 struct hstate *h = hstate_inode(inode);
580
581                 hugetlb_acct_memory(h, 1);
582         }
583 }
584
585 /*
586  * Count and return the number of huge pages in the reserve map
587  * that intersect with the range [f, t).
588  */
589 static long region_count(struct resv_map *resv, long f, long t)
590 {
591         struct list_head *head = &resv->regions;
592         struct file_region *rg;
593         long chg = 0;
594
595         spin_lock(&resv->lock);
596         /* Locate each segment we overlap with, and count that overlap. */
597         list_for_each_entry(rg, head, link) {
598                 long seg_from;
599                 long seg_to;
600
601                 if (rg->to <= f)
602                         continue;
603                 if (rg->from >= t)
604                         break;
605
606                 seg_from = max(rg->from, f);
607                 seg_to = min(rg->to, t);
608
609                 chg += seg_to - seg_from;
610         }
611         spin_unlock(&resv->lock);
612
613         return chg;
614 }
615
616 /*
617  * Convert the address within this vma to the page offset within
618  * the mapping, in pagecache page units; huge pages here.
619  */
620 static pgoff_t vma_hugecache_offset(struct hstate *h,
621                         struct vm_area_struct *vma, unsigned long address)
622 {
623         return ((address - vma->vm_start) >> huge_page_shift(h)) +
624                         (vma->vm_pgoff >> huge_page_order(h));
625 }
626
627 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
628                                      unsigned long address)
629 {
630         return vma_hugecache_offset(hstate_vma(vma), vma, address);
631 }
632 EXPORT_SYMBOL_GPL(linear_hugepage_index);
633
634 /*
635  * Return the size of the pages allocated when backing a VMA. In the majority
636  * cases this will be same size as used by the page table entries.
637  */
638 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
639 {
640         struct hstate *hstate;
641
642         if (!is_vm_hugetlb_page(vma))
643                 return PAGE_SIZE;
644
645         hstate = hstate_vma(vma);
646
647         return 1UL << huge_page_shift(hstate);
648 }
649 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
650
651 /*
652  * Return the page size being used by the MMU to back a VMA. In the majority
653  * of cases, the page size used by the kernel matches the MMU size. On
654  * architectures where it differs, an architecture-specific version of this
655  * function is required.
656  */
657 #ifndef vma_mmu_pagesize
658 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
659 {
660         return vma_kernel_pagesize(vma);
661 }
662 #endif
663
664 /*
665  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
666  * bits of the reservation map pointer, which are always clear due to
667  * alignment.
668  */
669 #define HPAGE_RESV_OWNER    (1UL << 0)
670 #define HPAGE_RESV_UNMAPPED (1UL << 1)
671 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
672
673 /*
674  * These helpers are used to track how many pages are reserved for
675  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
676  * is guaranteed to have their future faults succeed.
677  *
678  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
679  * the reserve counters are updated with the hugetlb_lock held. It is safe
680  * to reset the VMA at fork() time as it is not in use yet and there is no
681  * chance of the global counters getting corrupted as a result of the values.
682  *
683  * The private mapping reservation is represented in a subtly different
684  * manner to a shared mapping.  A shared mapping has a region map associated
685  * with the underlying file, this region map represents the backing file
686  * pages which have ever had a reservation assigned which this persists even
687  * after the page is instantiated.  A private mapping has a region map
688  * associated with the original mmap which is attached to all VMAs which
689  * reference it, this region map represents those offsets which have consumed
690  * reservation ie. where pages have been instantiated.
691  */
692 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
693 {
694         return (unsigned long)vma->vm_private_data;
695 }
696
697 static void set_vma_private_data(struct vm_area_struct *vma,
698                                                         unsigned long value)
699 {
700         vma->vm_private_data = (void *)value;
701 }
702
703 struct resv_map *resv_map_alloc(void)
704 {
705         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
706         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
707
708         if (!resv_map || !rg) {
709                 kfree(resv_map);
710                 kfree(rg);
711                 return NULL;
712         }
713
714         kref_init(&resv_map->refs);
715         spin_lock_init(&resv_map->lock);
716         INIT_LIST_HEAD(&resv_map->regions);
717
718         resv_map->adds_in_progress = 0;
719
720         INIT_LIST_HEAD(&resv_map->region_cache);
721         list_add(&rg->link, &resv_map->region_cache);
722         resv_map->region_cache_count = 1;
723
724         return resv_map;
725 }
726
727 void resv_map_release(struct kref *ref)
728 {
729         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
730         struct list_head *head = &resv_map->region_cache;
731         struct file_region *rg, *trg;
732
733         /* Clear out any active regions before we release the map. */
734         region_del(resv_map, 0, LONG_MAX);
735
736         /* ... and any entries left in the cache */
737         list_for_each_entry_safe(rg, trg, head, link) {
738                 list_del(&rg->link);
739                 kfree(rg);
740         }
741
742         VM_BUG_ON(resv_map->adds_in_progress);
743
744         kfree(resv_map);
745 }
746
747 static inline struct resv_map *inode_resv_map(struct inode *inode)
748 {
749         return inode->i_mapping->private_data;
750 }
751
752 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
753 {
754         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
755         if (vma->vm_flags & VM_MAYSHARE) {
756                 struct address_space *mapping = vma->vm_file->f_mapping;
757                 struct inode *inode = mapping->host;
758
759                 return inode_resv_map(inode);
760
761         } else {
762                 return (struct resv_map *)(get_vma_private_data(vma) &
763                                                         ~HPAGE_RESV_MASK);
764         }
765 }
766
767 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
768 {
769         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
770         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
771
772         set_vma_private_data(vma, (get_vma_private_data(vma) &
773                                 HPAGE_RESV_MASK) | (unsigned long)map);
774 }
775
776 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
777 {
778         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
779         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
780
781         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
782 }
783
784 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
785 {
786         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
787
788         return (get_vma_private_data(vma) & flag) != 0;
789 }
790
791 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
792 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
793 {
794         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
795         if (!(vma->vm_flags & VM_MAYSHARE))
796                 vma->vm_private_data = (void *)0;
797 }
798
799 /* Returns true if the VMA has associated reserve pages */
800 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
801 {
802         if (vma->vm_flags & VM_NORESERVE) {
803                 /*
804                  * This address is already reserved by other process(chg == 0),
805                  * so, we should decrement reserved count. Without decrementing,
806                  * reserve count remains after releasing inode, because this
807                  * allocated page will go into page cache and is regarded as
808                  * coming from reserved pool in releasing step.  Currently, we
809                  * don't have any other solution to deal with this situation
810                  * properly, so add work-around here.
811                  */
812                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
813                         return true;
814                 else
815                         return false;
816         }
817
818         /* Shared mappings always use reserves */
819         if (vma->vm_flags & VM_MAYSHARE) {
820                 /*
821                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
822                  * be a region map for all pages.  The only situation where
823                  * there is no region map is if a hole was punched via
824                  * fallocate.  In this case, there really are no reverves to
825                  * use.  This situation is indicated if chg != 0.
826                  */
827                 if (chg)
828                         return false;
829                 else
830                         return true;
831         }
832
833         /*
834          * Only the process that called mmap() has reserves for
835          * private mappings.
836          */
837         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
838                 /*
839                  * Like the shared case above, a hole punch or truncate
840                  * could have been performed on the private mapping.
841                  * Examine the value of chg to determine if reserves
842                  * actually exist or were previously consumed.
843                  * Very Subtle - The value of chg comes from a previous
844                  * call to vma_needs_reserves().  The reserve map for
845                  * private mappings has different (opposite) semantics
846                  * than that of shared mappings.  vma_needs_reserves()
847                  * has already taken this difference in semantics into
848                  * account.  Therefore, the meaning of chg is the same
849                  * as in the shared case above.  Code could easily be
850                  * combined, but keeping it separate draws attention to
851                  * subtle differences.
852                  */
853                 if (chg)
854                         return false;
855                 else
856                         return true;
857         }
858
859         return false;
860 }
861
862 static void enqueue_huge_page(struct hstate *h, struct page *page)
863 {
864         int nid = page_to_nid(page);
865         list_move(&page->lru, &h->hugepage_freelists[nid]);
866         h->free_huge_pages++;
867         h->free_huge_pages_node[nid]++;
868 }
869
870 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
871 {
872         struct page *page;
873
874         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
875                 if (!PageHWPoison(page))
876                         break;
877         /*
878          * if 'non-isolated free hugepage' not found on the list,
879          * the allocation fails.
880          */
881         if (&h->hugepage_freelists[nid] == &page->lru)
882                 return NULL;
883         list_move(&page->lru, &h->hugepage_activelist);
884         set_page_refcounted(page);
885         h->free_huge_pages--;
886         h->free_huge_pages_node[nid]--;
887         return page;
888 }
889
890 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
891                 nodemask_t *nmask)
892 {
893         unsigned int cpuset_mems_cookie;
894         struct zonelist *zonelist;
895         struct zone *zone;
896         struct zoneref *z;
897         int node = -1;
898
899         zonelist = node_zonelist(nid, gfp_mask);
900
901 retry_cpuset:
902         cpuset_mems_cookie = read_mems_allowed_begin();
903         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
904                 struct page *page;
905
906                 if (!cpuset_zone_allowed(zone, gfp_mask))
907                         continue;
908                 /*
909                  * no need to ask again on the same node. Pool is node rather than
910                  * zone aware
911                  */
912                 if (zone_to_nid(zone) == node)
913                         continue;
914                 node = zone_to_nid(zone);
915
916                 page = dequeue_huge_page_node_exact(h, node);
917                 if (page)
918                         return page;
919         }
920         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
921                 goto retry_cpuset;
922
923         return NULL;
924 }
925
926 /* Movability of hugepages depends on migration support. */
927 static inline gfp_t htlb_alloc_mask(struct hstate *h)
928 {
929         if (hugepages_treat_as_movable || hugepage_migration_supported(h))
930                 return GFP_HIGHUSER_MOVABLE;
931         else
932                 return GFP_HIGHUSER;
933 }
934
935 static struct page *dequeue_huge_page_vma(struct hstate *h,
936                                 struct vm_area_struct *vma,
937                                 unsigned long address, int avoid_reserve,
938                                 long chg)
939 {
940         struct page *page;
941         struct mempolicy *mpol;
942         gfp_t gfp_mask;
943         nodemask_t *nodemask;
944         int nid;
945
946         /*
947          * A child process with MAP_PRIVATE mappings created by their parent
948          * have no page reserves. This check ensures that reservations are
949          * not "stolen". The child may still get SIGKILLed
950          */
951         if (!vma_has_reserves(vma, chg) &&
952                         h->free_huge_pages - h->resv_huge_pages == 0)
953                 goto err;
954
955         /* If reserves cannot be used, ensure enough pages are in the pool */
956         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
957                 goto err;
958
959         gfp_mask = htlb_alloc_mask(h);
960         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
961         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
962         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
963                 SetPagePrivate(page);
964                 h->resv_huge_pages--;
965         }
966
967         mpol_cond_put(mpol);
968         return page;
969
970 err:
971         return NULL;
972 }
973
974 /*
975  * common helper functions for hstate_next_node_to_{alloc|free}.
976  * We may have allocated or freed a huge page based on a different
977  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
978  * be outside of *nodes_allowed.  Ensure that we use an allowed
979  * node for alloc or free.
980  */
981 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
982 {
983         nid = next_node_in(nid, *nodes_allowed);
984         VM_BUG_ON(nid >= MAX_NUMNODES);
985
986         return nid;
987 }
988
989 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
990 {
991         if (!node_isset(nid, *nodes_allowed))
992                 nid = next_node_allowed(nid, nodes_allowed);
993         return nid;
994 }
995
996 /*
997  * returns the previously saved node ["this node"] from which to
998  * allocate a persistent huge page for the pool and advance the
999  * next node from which to allocate, handling wrap at end of node
1000  * mask.
1001  */
1002 static int hstate_next_node_to_alloc(struct hstate *h,
1003                                         nodemask_t *nodes_allowed)
1004 {
1005         int nid;
1006
1007         VM_BUG_ON(!nodes_allowed);
1008
1009         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1010         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1011
1012         return nid;
1013 }
1014
1015 /*
1016  * helper for free_pool_huge_page() - return the previously saved
1017  * node ["this node"] from which to free a huge page.  Advance the
1018  * next node id whether or not we find a free huge page to free so
1019  * that the next attempt to free addresses the next node.
1020  */
1021 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1022 {
1023         int nid;
1024
1025         VM_BUG_ON(!nodes_allowed);
1026
1027         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1028         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1029
1030         return nid;
1031 }
1032
1033 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1034         for (nr_nodes = nodes_weight(*mask);                            \
1035                 nr_nodes > 0 &&                                         \
1036                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1037                 nr_nodes--)
1038
1039 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1040         for (nr_nodes = nodes_weight(*mask);                            \
1041                 nr_nodes > 0 &&                                         \
1042                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1043                 nr_nodes--)
1044
1045 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1046 static void destroy_compound_gigantic_page(struct page *page,
1047                                         unsigned int order)
1048 {
1049         int i;
1050         int nr_pages = 1 << order;
1051         struct page *p = page + 1;
1052
1053         atomic_set(compound_mapcount_ptr(page), 0);
1054         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1055                 clear_compound_head(p);
1056                 set_page_refcounted(p);
1057         }
1058
1059         set_compound_order(page, 0);
1060         __ClearPageHead(page);
1061 }
1062
1063 static void free_gigantic_page(struct page *page, unsigned int order)
1064 {
1065         free_contig_range(page_to_pfn(page), 1 << order);
1066 }
1067
1068 static int __alloc_gigantic_page(unsigned long start_pfn,
1069                                 unsigned long nr_pages, gfp_t gfp_mask)
1070 {
1071         unsigned long end_pfn = start_pfn + nr_pages;
1072         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1073                                   gfp_mask);
1074 }
1075
1076 static bool pfn_range_valid_gigantic(struct zone *z,
1077                         unsigned long start_pfn, unsigned long nr_pages)
1078 {
1079         unsigned long i, end_pfn = start_pfn + nr_pages;
1080         struct page *page;
1081
1082         for (i = start_pfn; i < end_pfn; i++) {
1083                 if (!pfn_valid(i))
1084                         return false;
1085
1086                 page = pfn_to_page(i);
1087
1088                 if (page_zone(page) != z)
1089                         return false;
1090
1091                 if (PageReserved(page))
1092                         return false;
1093
1094                 if (page_count(page) > 0)
1095                         return false;
1096
1097                 if (PageHuge(page))
1098                         return false;
1099         }
1100
1101         return true;
1102 }
1103
1104 static bool zone_spans_last_pfn(const struct zone *zone,
1105                         unsigned long start_pfn, unsigned long nr_pages)
1106 {
1107         unsigned long last_pfn = start_pfn + nr_pages - 1;
1108         return zone_spans_pfn(zone, last_pfn);
1109 }
1110
1111 static struct page *alloc_gigantic_page(int nid, struct hstate *h)
1112 {
1113         unsigned int order = huge_page_order(h);
1114         unsigned long nr_pages = 1 << order;
1115         unsigned long ret, pfn, flags;
1116         struct zonelist *zonelist;
1117         struct zone *zone;
1118         struct zoneref *z;
1119         gfp_t gfp_mask;
1120
1121         gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1122         zonelist = node_zonelist(nid, gfp_mask);
1123         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
1124                 spin_lock_irqsave(&zone->lock, flags);
1125
1126                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1127                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1128                         if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1129                                 /*
1130                                  * We release the zone lock here because
1131                                  * alloc_contig_range() will also lock the zone
1132                                  * at some point. If there's an allocation
1133                                  * spinning on this lock, it may win the race
1134                                  * and cause alloc_contig_range() to fail...
1135                                  */
1136                                 spin_unlock_irqrestore(&zone->lock, flags);
1137                                 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1138                                 if (!ret)
1139                                         return pfn_to_page(pfn);
1140                                 spin_lock_irqsave(&zone->lock, flags);
1141                         }
1142                         pfn += nr_pages;
1143                 }
1144
1145                 spin_unlock_irqrestore(&zone->lock, flags);
1146         }
1147
1148         return NULL;
1149 }
1150
1151 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1152 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1153
1154 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1155 {
1156         struct page *page;
1157
1158         page = alloc_gigantic_page(nid, h);
1159         if (page) {
1160                 prep_compound_gigantic_page(page, huge_page_order(h));
1161                 prep_new_huge_page(h, page, nid);
1162         }
1163
1164         return page;
1165 }
1166
1167 static int alloc_fresh_gigantic_page(struct hstate *h,
1168                                 nodemask_t *nodes_allowed)
1169 {
1170         struct page *page = NULL;
1171         int nr_nodes, node;
1172
1173         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1174                 page = alloc_fresh_gigantic_page_node(h, node);
1175                 if (page)
1176                         return 1;
1177         }
1178
1179         return 0;
1180 }
1181
1182 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1183 static inline bool gigantic_page_supported(void) { return false; }
1184 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1185 static inline void destroy_compound_gigantic_page(struct page *page,
1186                                                 unsigned int order) { }
1187 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1188                                         nodemask_t *nodes_allowed) { return 0; }
1189 #endif
1190
1191 static void update_and_free_page(struct hstate *h, struct page *page)
1192 {
1193         int i;
1194
1195         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1196                 return;
1197
1198         h->nr_huge_pages--;
1199         h->nr_huge_pages_node[page_to_nid(page)]--;
1200         for (i = 0; i < pages_per_huge_page(h); i++) {
1201                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1202                                 1 << PG_referenced | 1 << PG_dirty |
1203                                 1 << PG_active | 1 << PG_private |
1204                                 1 << PG_writeback);
1205         }
1206         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1207         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1208         set_page_refcounted(page);
1209         if (hstate_is_gigantic(h)) {
1210                 destroy_compound_gigantic_page(page, huge_page_order(h));
1211                 free_gigantic_page(page, huge_page_order(h));
1212         } else {
1213                 __free_pages(page, huge_page_order(h));
1214         }
1215 }
1216
1217 struct hstate *size_to_hstate(unsigned long size)
1218 {
1219         struct hstate *h;
1220
1221         for_each_hstate(h) {
1222                 if (huge_page_size(h) == size)
1223                         return h;
1224         }
1225         return NULL;
1226 }
1227
1228 /*
1229  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1230  * to hstate->hugepage_activelist.)
1231  *
1232  * This function can be called for tail pages, but never returns true for them.
1233  */
1234 bool page_huge_active(struct page *page)
1235 {
1236         VM_BUG_ON_PAGE(!PageHuge(page), page);
1237         return PageHead(page) && PagePrivate(&page[1]);
1238 }
1239
1240 /* never called for tail page */
1241 static void set_page_huge_active(struct page *page)
1242 {
1243         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1244         SetPagePrivate(&page[1]);
1245 }
1246
1247 static void clear_page_huge_active(struct page *page)
1248 {
1249         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1250         ClearPagePrivate(&page[1]);
1251 }
1252
1253 void free_huge_page(struct page *page)
1254 {
1255         /*
1256          * Can't pass hstate in here because it is called from the
1257          * compound page destructor.
1258          */
1259         struct hstate *h = page_hstate(page);
1260         int nid = page_to_nid(page);
1261         struct hugepage_subpool *spool =
1262                 (struct hugepage_subpool *)page_private(page);
1263         bool restore_reserve;
1264
1265         set_page_private(page, 0);
1266         page->mapping = NULL;
1267         VM_BUG_ON_PAGE(page_count(page), page);
1268         VM_BUG_ON_PAGE(page_mapcount(page), page);
1269         restore_reserve = PagePrivate(page);
1270         ClearPagePrivate(page);
1271
1272         /*
1273          * A return code of zero implies that the subpool will be under its
1274          * minimum size if the reservation is not restored after page is free.
1275          * Therefore, force restore_reserve operation.
1276          */
1277         if (hugepage_subpool_put_pages(spool, 1) == 0)
1278                 restore_reserve = true;
1279
1280         spin_lock(&hugetlb_lock);
1281         clear_page_huge_active(page);
1282         hugetlb_cgroup_uncharge_page(hstate_index(h),
1283                                      pages_per_huge_page(h), page);
1284         if (restore_reserve)
1285                 h->resv_huge_pages++;
1286
1287         if (h->surplus_huge_pages_node[nid]) {
1288                 /* remove the page from active list */
1289                 list_del(&page->lru);
1290                 update_and_free_page(h, page);
1291                 h->surplus_huge_pages--;
1292                 h->surplus_huge_pages_node[nid]--;
1293         } else {
1294                 arch_clear_hugepage_flags(page);
1295                 enqueue_huge_page(h, page);
1296         }
1297         spin_unlock(&hugetlb_lock);
1298 }
1299
1300 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1301 {
1302         INIT_LIST_HEAD(&page->lru);
1303         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1304         spin_lock(&hugetlb_lock);
1305         set_hugetlb_cgroup(page, NULL);
1306         h->nr_huge_pages++;
1307         h->nr_huge_pages_node[nid]++;
1308         spin_unlock(&hugetlb_lock);
1309         put_page(page); /* free it into the hugepage allocator */
1310 }
1311
1312 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1313 {
1314         int i;
1315         int nr_pages = 1 << order;
1316         struct page *p = page + 1;
1317
1318         /* we rely on prep_new_huge_page to set the destructor */
1319         set_compound_order(page, order);
1320         __ClearPageReserved(page);
1321         __SetPageHead(page);
1322         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1323                 /*
1324                  * For gigantic hugepages allocated through bootmem at
1325                  * boot, it's safer to be consistent with the not-gigantic
1326                  * hugepages and clear the PG_reserved bit from all tail pages
1327                  * too.  Otherwse drivers using get_user_pages() to access tail
1328                  * pages may get the reference counting wrong if they see
1329                  * PG_reserved set on a tail page (despite the head page not
1330                  * having PG_reserved set).  Enforcing this consistency between
1331                  * head and tail pages allows drivers to optimize away a check
1332                  * on the head page when they need know if put_page() is needed
1333                  * after get_user_pages().
1334                  */
1335                 __ClearPageReserved(p);
1336                 set_page_count(p, 0);
1337                 set_compound_head(p, page);
1338         }
1339         atomic_set(compound_mapcount_ptr(page), -1);
1340 }
1341
1342 /*
1343  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1344  * transparent huge pages.  See the PageTransHuge() documentation for more
1345  * details.
1346  */
1347 int PageHuge(struct page *page)
1348 {
1349         if (!PageCompound(page))
1350                 return 0;
1351
1352         page = compound_head(page);
1353         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1354 }
1355 EXPORT_SYMBOL_GPL(PageHuge);
1356
1357 /*
1358  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1359  * normal or transparent huge pages.
1360  */
1361 int PageHeadHuge(struct page *page_head)
1362 {
1363         if (!PageHead(page_head))
1364                 return 0;
1365
1366         return get_compound_page_dtor(page_head) == free_huge_page;
1367 }
1368
1369 pgoff_t __basepage_index(struct page *page)
1370 {
1371         struct page *page_head = compound_head(page);
1372         pgoff_t index = page_index(page_head);
1373         unsigned long compound_idx;
1374
1375         if (!PageHuge(page_head))
1376                 return page_index(page);
1377
1378         if (compound_order(page_head) >= MAX_ORDER)
1379                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1380         else
1381                 compound_idx = page - page_head;
1382
1383         return (index << compound_order(page_head)) + compound_idx;
1384 }
1385
1386 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1387 {
1388         struct page *page;
1389
1390         page = __alloc_pages_node(nid,
1391                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1392                                                 __GFP_RETRY_MAYFAIL|__GFP_NOWARN,
1393                 huge_page_order(h));
1394         if (page) {
1395                 prep_new_huge_page(h, page, nid);
1396         }
1397
1398         return page;
1399 }
1400
1401 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1402 {
1403         struct page *page;
1404         int nr_nodes, node;
1405         int ret = 0;
1406
1407         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1408                 page = alloc_fresh_huge_page_node(h, node);
1409                 if (page) {
1410                         ret = 1;
1411                         break;
1412                 }
1413         }
1414
1415         if (ret)
1416                 count_vm_event(HTLB_BUDDY_PGALLOC);
1417         else
1418                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1419
1420         return ret;
1421 }
1422
1423 /*
1424  * Free huge page from pool from next node to free.
1425  * Attempt to keep persistent huge pages more or less
1426  * balanced over allowed nodes.
1427  * Called with hugetlb_lock locked.
1428  */
1429 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1430                                                          bool acct_surplus)
1431 {
1432         int nr_nodes, node;
1433         int ret = 0;
1434
1435         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1436                 /*
1437                  * If we're returning unused surplus pages, only examine
1438                  * nodes with surplus pages.
1439                  */
1440                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1441                     !list_empty(&h->hugepage_freelists[node])) {
1442                         struct page *page =
1443                                 list_entry(h->hugepage_freelists[node].next,
1444                                           struct page, lru);
1445                         list_del(&page->lru);
1446                         h->free_huge_pages--;
1447                         h->free_huge_pages_node[node]--;
1448                         if (acct_surplus) {
1449                                 h->surplus_huge_pages--;
1450                                 h->surplus_huge_pages_node[node]--;
1451                         }
1452                         update_and_free_page(h, page);
1453                         ret = 1;
1454                         break;
1455                 }
1456         }
1457
1458         return ret;
1459 }
1460
1461 /*
1462  * Dissolve a given free hugepage into free buddy pages. This function does
1463  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1464  * number of free hugepages would be reduced below the number of reserved
1465  * hugepages.
1466  */
1467 int dissolve_free_huge_page(struct page *page)
1468 {
1469         int rc = 0;
1470
1471         spin_lock(&hugetlb_lock);
1472         if (PageHuge(page) && !page_count(page)) {
1473                 struct page *head = compound_head(page);
1474                 struct hstate *h = page_hstate(head);
1475                 int nid = page_to_nid(head);
1476                 if (h->free_huge_pages - h->resv_huge_pages == 0) {
1477                         rc = -EBUSY;
1478                         goto out;
1479                 }
1480                 /*
1481                  * Move PageHWPoison flag from head page to the raw error page,
1482                  * which makes any subpages rather than the error page reusable.
1483                  */
1484                 if (PageHWPoison(head) && page != head) {
1485                         SetPageHWPoison(page);
1486                         ClearPageHWPoison(head);
1487                 }
1488                 list_del(&head->lru);
1489                 h->free_huge_pages--;
1490                 h->free_huge_pages_node[nid]--;
1491                 h->max_huge_pages--;
1492                 update_and_free_page(h, head);
1493         }
1494 out:
1495         spin_unlock(&hugetlb_lock);
1496         return rc;
1497 }
1498
1499 /*
1500  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1501  * make specified memory blocks removable from the system.
1502  * Note that this will dissolve a free gigantic hugepage completely, if any
1503  * part of it lies within the given range.
1504  * Also note that if dissolve_free_huge_page() returns with an error, all
1505  * free hugepages that were dissolved before that error are lost.
1506  */
1507 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1508 {
1509         unsigned long pfn;
1510         struct page *page;
1511         int rc = 0;
1512
1513         if (!hugepages_supported())
1514                 return rc;
1515
1516         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1517                 page = pfn_to_page(pfn);
1518                 if (PageHuge(page) && !page_count(page)) {
1519                         rc = dissolve_free_huge_page(page);
1520                         if (rc)
1521                                 break;
1522                 }
1523         }
1524
1525         return rc;
1526 }
1527
1528 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1529                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1530 {
1531         int order = huge_page_order(h);
1532
1533         gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1534         if (nid == NUMA_NO_NODE)
1535                 nid = numa_mem_id();
1536         return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1537 }
1538
1539 static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
1540                 int nid, nodemask_t *nmask)
1541 {
1542         struct page *page;
1543         unsigned int r_nid;
1544
1545         if (hstate_is_gigantic(h))
1546                 return NULL;
1547
1548         /*
1549          * Assume we will successfully allocate the surplus page to
1550          * prevent racing processes from causing the surplus to exceed
1551          * overcommit
1552          *
1553          * This however introduces a different race, where a process B
1554          * tries to grow the static hugepage pool while alloc_pages() is
1555          * called by process A. B will only examine the per-node
1556          * counters in determining if surplus huge pages can be
1557          * converted to normal huge pages in adjust_pool_surplus(). A
1558          * won't be able to increment the per-node counter, until the
1559          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1560          * no more huge pages can be converted from surplus to normal
1561          * state (and doesn't try to convert again). Thus, we have a
1562          * case where a surplus huge page exists, the pool is grown, and
1563          * the surplus huge page still exists after, even though it
1564          * should just have been converted to a normal huge page. This
1565          * does not leak memory, though, as the hugepage will be freed
1566          * once it is out of use. It also does not allow the counters to
1567          * go out of whack in adjust_pool_surplus() as we don't modify
1568          * the node values until we've gotten the hugepage and only the
1569          * per-node value is checked there.
1570          */
1571         spin_lock(&hugetlb_lock);
1572         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1573                 spin_unlock(&hugetlb_lock);
1574                 return NULL;
1575         } else {
1576                 h->nr_huge_pages++;
1577                 h->surplus_huge_pages++;
1578         }
1579         spin_unlock(&hugetlb_lock);
1580
1581         page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
1582
1583         spin_lock(&hugetlb_lock);
1584         if (page) {
1585                 INIT_LIST_HEAD(&page->lru);
1586                 r_nid = page_to_nid(page);
1587                 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1588                 set_hugetlb_cgroup(page, NULL);
1589                 /*
1590                  * We incremented the global counters already
1591                  */
1592                 h->nr_huge_pages_node[r_nid]++;
1593                 h->surplus_huge_pages_node[r_nid]++;
1594                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1595         } else {
1596                 h->nr_huge_pages--;
1597                 h->surplus_huge_pages--;
1598                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1599         }
1600         spin_unlock(&hugetlb_lock);
1601
1602         return page;
1603 }
1604
1605 /*
1606  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1607  */
1608 static
1609 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1610                 struct vm_area_struct *vma, unsigned long addr)
1611 {
1612         struct page *page;
1613         struct mempolicy *mpol;
1614         gfp_t gfp_mask = htlb_alloc_mask(h);
1615         int nid;
1616         nodemask_t *nodemask;
1617
1618         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1619         page = __alloc_buddy_huge_page(h, gfp_mask, nid, nodemask);
1620         mpol_cond_put(mpol);
1621
1622         return page;
1623 }
1624
1625 /*
1626  * This allocation function is useful in the context where vma is irrelevant.
1627  * E.g. soft-offlining uses this function because it only cares physical
1628  * address of error page.
1629  */
1630 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1631 {
1632         gfp_t gfp_mask = htlb_alloc_mask(h);
1633         struct page *page = NULL;
1634
1635         if (nid != NUMA_NO_NODE)
1636                 gfp_mask |= __GFP_THISNODE;
1637
1638         spin_lock(&hugetlb_lock);
1639         if (h->free_huge_pages - h->resv_huge_pages > 0)
1640                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1641         spin_unlock(&hugetlb_lock);
1642
1643         if (!page)
1644                 page = __alloc_buddy_huge_page(h, gfp_mask, nid, NULL);
1645
1646         return page;
1647 }
1648
1649
1650 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1651                 nodemask_t *nmask)
1652 {
1653         gfp_t gfp_mask = htlb_alloc_mask(h);
1654
1655         spin_lock(&hugetlb_lock);
1656         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1657                 struct page *page;
1658
1659                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1660                 if (page) {
1661                         spin_unlock(&hugetlb_lock);
1662                         return page;
1663                 }
1664         }
1665         spin_unlock(&hugetlb_lock);
1666
1667         /* No reservations, try to overcommit */
1668
1669         return __alloc_buddy_huge_page(h, gfp_mask, preferred_nid, nmask);
1670 }
1671
1672 /*
1673  * Increase the hugetlb pool such that it can accommodate a reservation
1674  * of size 'delta'.
1675  */
1676 static int gather_surplus_pages(struct hstate *h, int delta)
1677 {
1678         struct list_head surplus_list;
1679         struct page *page, *tmp;
1680         int ret, i;
1681         int needed, allocated;
1682         bool alloc_ok = true;
1683
1684         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1685         if (needed <= 0) {
1686                 h->resv_huge_pages += delta;
1687                 return 0;
1688         }
1689
1690         allocated = 0;
1691         INIT_LIST_HEAD(&surplus_list);
1692
1693         ret = -ENOMEM;
1694 retry:
1695         spin_unlock(&hugetlb_lock);
1696         for (i = 0; i < needed; i++) {
1697                 page = __alloc_buddy_huge_page(h, htlb_alloc_mask(h),
1698                                 NUMA_NO_NODE, NULL);
1699                 if (!page) {
1700                         alloc_ok = false;
1701                         break;
1702                 }
1703                 list_add(&page->lru, &surplus_list);
1704                 cond_resched();
1705         }
1706         allocated += i;
1707
1708         /*
1709          * After retaking hugetlb_lock, we need to recalculate 'needed'
1710          * because either resv_huge_pages or free_huge_pages may have changed.
1711          */
1712         spin_lock(&hugetlb_lock);
1713         needed = (h->resv_huge_pages + delta) -
1714                         (h->free_huge_pages + allocated);
1715         if (needed > 0) {
1716                 if (alloc_ok)
1717                         goto retry;
1718                 /*
1719                  * We were not able to allocate enough pages to
1720                  * satisfy the entire reservation so we free what
1721                  * we've allocated so far.
1722                  */
1723                 goto free;
1724         }
1725         /*
1726          * The surplus_list now contains _at_least_ the number of extra pages
1727          * needed to accommodate the reservation.  Add the appropriate number
1728          * of pages to the hugetlb pool and free the extras back to the buddy
1729          * allocator.  Commit the entire reservation here to prevent another
1730          * process from stealing the pages as they are added to the pool but
1731          * before they are reserved.
1732          */
1733         needed += allocated;
1734         h->resv_huge_pages += delta;
1735         ret = 0;
1736
1737         /* Free the needed pages to the hugetlb pool */
1738         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1739                 if ((--needed) < 0)
1740                         break;
1741                 /*
1742                  * This page is now managed by the hugetlb allocator and has
1743                  * no users -- drop the buddy allocator's reference.
1744                  */
1745                 put_page_testzero(page);
1746                 VM_BUG_ON_PAGE(page_count(page), page);
1747                 enqueue_huge_page(h, page);
1748         }
1749 free:
1750         spin_unlock(&hugetlb_lock);
1751
1752         /* Free unnecessary surplus pages to the buddy allocator */
1753         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1754                 put_page(page);
1755         spin_lock(&hugetlb_lock);
1756
1757         return ret;
1758 }
1759
1760 /*
1761  * This routine has two main purposes:
1762  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1763  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1764  *    to the associated reservation map.
1765  * 2) Free any unused surplus pages that may have been allocated to satisfy
1766  *    the reservation.  As many as unused_resv_pages may be freed.
1767  *
1768  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1769  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1770  * we must make sure nobody else can claim pages we are in the process of
1771  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1772  * number of huge pages we plan to free when dropping the lock.
1773  */
1774 static void return_unused_surplus_pages(struct hstate *h,
1775                                         unsigned long unused_resv_pages)
1776 {
1777         unsigned long nr_pages;
1778
1779         /* Cannot return gigantic pages currently */
1780         if (hstate_is_gigantic(h))
1781                 goto out;
1782
1783         /*
1784          * Part (or even all) of the reservation could have been backed
1785          * by pre-allocated pages. Only free surplus pages.
1786          */
1787         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1788
1789         /*
1790          * We want to release as many surplus pages as possible, spread
1791          * evenly across all nodes with memory. Iterate across these nodes
1792          * until we can no longer free unreserved surplus pages. This occurs
1793          * when the nodes with surplus pages have no free pages.
1794          * free_pool_huge_page() will balance the the freed pages across the
1795          * on-line nodes with memory and will handle the hstate accounting.
1796          *
1797          * Note that we decrement resv_huge_pages as we free the pages.  If
1798          * we drop the lock, resv_huge_pages will still be sufficiently large
1799          * to cover subsequent pages we may free.
1800          */
1801         while (nr_pages--) {
1802                 h->resv_huge_pages--;
1803                 unused_resv_pages--;
1804                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1805                         goto out;
1806                 cond_resched_lock(&hugetlb_lock);
1807         }
1808
1809 out:
1810         /* Fully uncommit the reservation */
1811         h->resv_huge_pages -= unused_resv_pages;
1812 }
1813
1814
1815 /*
1816  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1817  * are used by the huge page allocation routines to manage reservations.
1818  *
1819  * vma_needs_reservation is called to determine if the huge page at addr
1820  * within the vma has an associated reservation.  If a reservation is
1821  * needed, the value 1 is returned.  The caller is then responsible for
1822  * managing the global reservation and subpool usage counts.  After
1823  * the huge page has been allocated, vma_commit_reservation is called
1824  * to add the page to the reservation map.  If the page allocation fails,
1825  * the reservation must be ended instead of committed.  vma_end_reservation
1826  * is called in such cases.
1827  *
1828  * In the normal case, vma_commit_reservation returns the same value
1829  * as the preceding vma_needs_reservation call.  The only time this
1830  * is not the case is if a reserve map was changed between calls.  It
1831  * is the responsibility of the caller to notice the difference and
1832  * take appropriate action.
1833  *
1834  * vma_add_reservation is used in error paths where a reservation must
1835  * be restored when a newly allocated huge page must be freed.  It is
1836  * to be called after calling vma_needs_reservation to determine if a
1837  * reservation exists.
1838  */
1839 enum vma_resv_mode {
1840         VMA_NEEDS_RESV,
1841         VMA_COMMIT_RESV,
1842         VMA_END_RESV,
1843         VMA_ADD_RESV,
1844 };
1845 static long __vma_reservation_common(struct hstate *h,
1846                                 struct vm_area_struct *vma, unsigned long addr,
1847                                 enum vma_resv_mode mode)
1848 {
1849         struct resv_map *resv;
1850         pgoff_t idx;
1851         long ret;
1852
1853         resv = vma_resv_map(vma);
1854         if (!resv)
1855                 return 1;
1856
1857         idx = vma_hugecache_offset(h, vma, addr);
1858         switch (mode) {
1859         case VMA_NEEDS_RESV:
1860                 ret = region_chg(resv, idx, idx + 1);
1861                 break;
1862         case VMA_COMMIT_RESV:
1863                 ret = region_add(resv, idx, idx + 1);
1864                 break;
1865         case VMA_END_RESV:
1866                 region_abort(resv, idx, idx + 1);
1867                 ret = 0;
1868                 break;
1869         case VMA_ADD_RESV:
1870                 if (vma->vm_flags & VM_MAYSHARE)
1871                         ret = region_add(resv, idx, idx + 1);
1872                 else {
1873                         region_abort(resv, idx, idx + 1);
1874                         ret = region_del(resv, idx, idx + 1);
1875                 }
1876                 break;
1877         default:
1878                 BUG();
1879         }
1880
1881         if (vma->vm_flags & VM_MAYSHARE)
1882                 return ret;
1883         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1884                 /*
1885                  * In most cases, reserves always exist for private mappings.
1886                  * However, a file associated with mapping could have been
1887                  * hole punched or truncated after reserves were consumed.
1888                  * As subsequent fault on such a range will not use reserves.
1889                  * Subtle - The reserve map for private mappings has the
1890                  * opposite meaning than that of shared mappings.  If NO
1891                  * entry is in the reserve map, it means a reservation exists.
1892                  * If an entry exists in the reserve map, it means the
1893                  * reservation has already been consumed.  As a result, the
1894                  * return value of this routine is the opposite of the
1895                  * value returned from reserve map manipulation routines above.
1896                  */
1897                 if (ret)
1898                         return 0;
1899                 else
1900                         return 1;
1901         }
1902         else
1903                 return ret < 0 ? ret : 0;
1904 }
1905
1906 static long vma_needs_reservation(struct hstate *h,
1907                         struct vm_area_struct *vma, unsigned long addr)
1908 {
1909         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1910 }
1911
1912 static long vma_commit_reservation(struct hstate *h,
1913                         struct vm_area_struct *vma, unsigned long addr)
1914 {
1915         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1916 }
1917
1918 static void vma_end_reservation(struct hstate *h,
1919                         struct vm_area_struct *vma, unsigned long addr)
1920 {
1921         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1922 }
1923
1924 static long vma_add_reservation(struct hstate *h,
1925                         struct vm_area_struct *vma, unsigned long addr)
1926 {
1927         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1928 }
1929
1930 /*
1931  * This routine is called to restore a reservation on error paths.  In the
1932  * specific error paths, a huge page was allocated (via alloc_huge_page)
1933  * and is about to be freed.  If a reservation for the page existed,
1934  * alloc_huge_page would have consumed the reservation and set PagePrivate
1935  * in the newly allocated page.  When the page is freed via free_huge_page,
1936  * the global reservation count will be incremented if PagePrivate is set.
1937  * However, free_huge_page can not adjust the reserve map.  Adjust the
1938  * reserve map here to be consistent with global reserve count adjustments
1939  * to be made by free_huge_page.
1940  */
1941 static void restore_reserve_on_error(struct hstate *h,
1942                         struct vm_area_struct *vma, unsigned long address,
1943                         struct page *page)
1944 {
1945         if (unlikely(PagePrivate(page))) {
1946                 long rc = vma_needs_reservation(h, vma, address);
1947
1948                 if (unlikely(rc < 0)) {
1949                         /*
1950                          * Rare out of memory condition in reserve map
1951                          * manipulation.  Clear PagePrivate so that
1952                          * global reserve count will not be incremented
1953                          * by free_huge_page.  This will make it appear
1954                          * as though the reservation for this page was
1955                          * consumed.  This may prevent the task from
1956                          * faulting in the page at a later time.  This
1957                          * is better than inconsistent global huge page
1958                          * accounting of reserve counts.
1959                          */
1960                         ClearPagePrivate(page);
1961                 } else if (rc) {
1962                         rc = vma_add_reservation(h, vma, address);
1963                         if (unlikely(rc < 0))
1964                                 /*
1965                                  * See above comment about rare out of
1966                                  * memory condition.
1967                                  */
1968                                 ClearPagePrivate(page);
1969                 } else
1970                         vma_end_reservation(h, vma, address);
1971         }
1972 }
1973
1974 struct page *alloc_huge_page(struct vm_area_struct *vma,
1975                                     unsigned long addr, int avoid_reserve)
1976 {
1977         struct hugepage_subpool *spool = subpool_vma(vma);
1978         struct hstate *h = hstate_vma(vma);
1979         struct page *page;
1980         long map_chg, map_commit;
1981         long gbl_chg;
1982         int ret, idx;
1983         struct hugetlb_cgroup *h_cg;
1984
1985         idx = hstate_index(h);
1986         /*
1987          * Examine the region/reserve map to determine if the process
1988          * has a reservation for the page to be allocated.  A return
1989          * code of zero indicates a reservation exists (no change).
1990          */
1991         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1992         if (map_chg < 0)
1993                 return ERR_PTR(-ENOMEM);
1994
1995         /*
1996          * Processes that did not create the mapping will have no
1997          * reserves as indicated by the region/reserve map. Check
1998          * that the allocation will not exceed the subpool limit.
1999          * Allocations for MAP_NORESERVE mappings also need to be
2000          * checked against any subpool limit.
2001          */
2002         if (map_chg || avoid_reserve) {
2003                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2004                 if (gbl_chg < 0) {
2005                         vma_end_reservation(h, vma, addr);
2006                         return ERR_PTR(-ENOSPC);
2007                 }
2008
2009                 /*
2010                  * Even though there was no reservation in the region/reserve
2011                  * map, there could be reservations associated with the
2012                  * subpool that can be used.  This would be indicated if the
2013                  * return value of hugepage_subpool_get_pages() is zero.
2014                  * However, if avoid_reserve is specified we still avoid even
2015                  * the subpool reservations.
2016                  */
2017                 if (avoid_reserve)
2018                         gbl_chg = 1;
2019         }
2020
2021         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2022         if (ret)
2023                 goto out_subpool_put;
2024
2025         spin_lock(&hugetlb_lock);
2026         /*
2027          * glb_chg is passed to indicate whether or not a page must be taken
2028          * from the global free pool (global change).  gbl_chg == 0 indicates
2029          * a reservation exists for the allocation.
2030          */
2031         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2032         if (!page) {
2033                 spin_unlock(&hugetlb_lock);
2034                 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
2035                 if (!page)
2036                         goto out_uncharge_cgroup;
2037                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2038                         SetPagePrivate(page);
2039                         h->resv_huge_pages--;
2040                 }
2041                 spin_lock(&hugetlb_lock);
2042                 list_move(&page->lru, &h->hugepage_activelist);
2043                 /* Fall through */
2044         }
2045         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2046         spin_unlock(&hugetlb_lock);
2047
2048         set_page_private(page, (unsigned long)spool);
2049
2050         map_commit = vma_commit_reservation(h, vma, addr);
2051         if (unlikely(map_chg > map_commit)) {
2052                 /*
2053                  * The page was added to the reservation map between
2054                  * vma_needs_reservation and vma_commit_reservation.
2055                  * This indicates a race with hugetlb_reserve_pages.
2056                  * Adjust for the subpool count incremented above AND
2057                  * in hugetlb_reserve_pages for the same page.  Also,
2058                  * the reservation count added in hugetlb_reserve_pages
2059                  * no longer applies.
2060                  */
2061                 long rsv_adjust;
2062
2063                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2064                 hugetlb_acct_memory(h, -rsv_adjust);
2065         }
2066         return page;
2067
2068 out_uncharge_cgroup:
2069         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2070 out_subpool_put:
2071         if (map_chg || avoid_reserve)
2072                 hugepage_subpool_put_pages(spool, 1);
2073         vma_end_reservation(h, vma, addr);
2074         return ERR_PTR(-ENOSPC);
2075 }
2076
2077 /*
2078  * alloc_huge_page()'s wrapper which simply returns the page if allocation
2079  * succeeds, otherwise NULL. This function is called from new_vma_page(),
2080  * where no ERR_VALUE is expected to be returned.
2081  */
2082 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
2083                                 unsigned long addr, int avoid_reserve)
2084 {
2085         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2086         if (IS_ERR(page))
2087                 page = NULL;
2088         return page;
2089 }
2090
2091 int alloc_bootmem_huge_page(struct hstate *h)
2092         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2093 int __alloc_bootmem_huge_page(struct hstate *h)
2094 {
2095         struct huge_bootmem_page *m;
2096         int nr_nodes, node;
2097
2098         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2099                 void *addr;
2100
2101                 addr = memblock_virt_alloc_try_nid_nopanic(
2102                                 huge_page_size(h), huge_page_size(h),
2103                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2104                 if (addr) {
2105                         /*
2106                          * Use the beginning of the huge page to store the
2107                          * huge_bootmem_page struct (until gather_bootmem
2108                          * puts them into the mem_map).
2109                          */
2110                         m = addr;
2111                         goto found;
2112                 }
2113         }
2114         return 0;
2115
2116 found:
2117         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2118         /* Put them into a private list first because mem_map is not up yet */
2119         list_add(&m->list, &huge_boot_pages);
2120         m->hstate = h;
2121         return 1;
2122 }
2123
2124 static void __init prep_compound_huge_page(struct page *page,
2125                 unsigned int order)
2126 {
2127         if (unlikely(order > (MAX_ORDER - 1)))
2128                 prep_compound_gigantic_page(page, order);
2129         else
2130                 prep_compound_page(page, order);
2131 }
2132
2133 /* Put bootmem huge pages into the standard lists after mem_map is up */
2134 static void __init gather_bootmem_prealloc(void)
2135 {
2136         struct huge_bootmem_page *m;
2137
2138         list_for_each_entry(m, &huge_boot_pages, list) {
2139                 struct hstate *h = m->hstate;
2140                 struct page *page;
2141
2142 #ifdef CONFIG_HIGHMEM
2143                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2144                 memblock_free_late(__pa(m),
2145                                    sizeof(struct huge_bootmem_page));
2146 #else
2147                 page = virt_to_page(m);
2148 #endif
2149                 WARN_ON(page_count(page) != 1);
2150                 prep_compound_huge_page(page, h->order);
2151                 WARN_ON(PageReserved(page));
2152                 prep_new_huge_page(h, page, page_to_nid(page));
2153                 /*
2154                  * If we had gigantic hugepages allocated at boot time, we need
2155                  * to restore the 'stolen' pages to totalram_pages in order to
2156                  * fix confusing memory reports from free(1) and another
2157                  * side-effects, like CommitLimit going negative.
2158                  */
2159                 if (hstate_is_gigantic(h))
2160                         adjust_managed_page_count(page, 1 << h->order);
2161         }
2162 }
2163
2164 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2165 {
2166         unsigned long i;
2167
2168         for (i = 0; i < h->max_huge_pages; ++i) {
2169                 if (hstate_is_gigantic(h)) {
2170                         if (!alloc_bootmem_huge_page(h))
2171                                 break;
2172                 } else if (!alloc_fresh_huge_page(h,
2173                                          &node_states[N_MEMORY]))
2174                         break;
2175                 cond_resched();
2176         }
2177         if (i < h->max_huge_pages) {
2178                 char buf[32];
2179
2180                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2181                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2182                         h->max_huge_pages, buf, i);
2183                 h->max_huge_pages = i;
2184         }
2185 }
2186
2187 static void __init hugetlb_init_hstates(void)
2188 {
2189         struct hstate *h;
2190
2191         for_each_hstate(h) {
2192                 if (minimum_order > huge_page_order(h))
2193                         minimum_order = huge_page_order(h);
2194
2195                 /* oversize hugepages were init'ed in early boot */
2196                 if (!hstate_is_gigantic(h))
2197                         hugetlb_hstate_alloc_pages(h);
2198         }
2199         VM_BUG_ON(minimum_order == UINT_MAX);
2200 }
2201
2202 static void __init report_hugepages(void)
2203 {
2204         struct hstate *h;
2205
2206         for_each_hstate(h) {
2207                 char buf[32];
2208
2209                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2210                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2211                         buf, h->free_huge_pages);
2212         }
2213 }
2214
2215 #ifdef CONFIG_HIGHMEM
2216 static void try_to_free_low(struct hstate *h, unsigned long count,
2217                                                 nodemask_t *nodes_allowed)
2218 {
2219         int i;
2220
2221         if (hstate_is_gigantic(h))
2222                 return;
2223
2224         for_each_node_mask(i, *nodes_allowed) {
2225                 struct page *page, *next;
2226                 struct list_head *freel = &h->hugepage_freelists[i];
2227                 list_for_each_entry_safe(page, next, freel, lru) {
2228                         if (count >= h->nr_huge_pages)
2229                                 return;
2230                         if (PageHighMem(page))
2231                                 continue;
2232                         list_del(&page->lru);
2233                         update_and_free_page(h, page);
2234                         h->free_huge_pages--;
2235                         h->free_huge_pages_node[page_to_nid(page)]--;
2236                 }
2237         }
2238 }
2239 #else
2240 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2241                                                 nodemask_t *nodes_allowed)
2242 {
2243 }
2244 #endif
2245
2246 /*
2247  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2248  * balanced by operating on them in a round-robin fashion.
2249  * Returns 1 if an adjustment was made.
2250  */
2251 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2252                                 int delta)
2253 {
2254         int nr_nodes, node;
2255
2256         VM_BUG_ON(delta != -1 && delta != 1);
2257
2258         if (delta < 0) {
2259                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2260                         if (h->surplus_huge_pages_node[node])
2261                                 goto found;
2262                 }
2263         } else {
2264                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2265                         if (h->surplus_huge_pages_node[node] <
2266                                         h->nr_huge_pages_node[node])
2267                                 goto found;
2268                 }
2269         }
2270         return 0;
2271
2272 found:
2273         h->surplus_huge_pages += delta;
2274         h->surplus_huge_pages_node[node] += delta;
2275         return 1;
2276 }
2277
2278 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2279 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2280                                                 nodemask_t *nodes_allowed)
2281 {
2282         unsigned long min_count, ret;
2283
2284         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2285                 return h->max_huge_pages;
2286
2287         /*
2288          * Increase the pool size
2289          * First take pages out of surplus state.  Then make up the
2290          * remaining difference by allocating fresh huge pages.
2291          *
2292          * We might race with __alloc_buddy_huge_page() here and be unable
2293          * to convert a surplus huge page to a normal huge page. That is
2294          * not critical, though, it just means the overall size of the
2295          * pool might be one hugepage larger than it needs to be, but
2296          * within all the constraints specified by the sysctls.
2297          */
2298         spin_lock(&hugetlb_lock);
2299         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2300                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2301                         break;
2302         }
2303
2304         while (count > persistent_huge_pages(h)) {
2305                 /*
2306                  * If this allocation races such that we no longer need the
2307                  * page, free_huge_page will handle it by freeing the page
2308                  * and reducing the surplus.
2309                  */
2310                 spin_unlock(&hugetlb_lock);
2311
2312                 /* yield cpu to avoid soft lockup */
2313                 cond_resched();
2314
2315                 if (hstate_is_gigantic(h))
2316                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2317                 else
2318                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2319                 spin_lock(&hugetlb_lock);
2320                 if (!ret)
2321                         goto out;
2322
2323                 /* Bail for signals. Probably ctrl-c from user */
2324                 if (signal_pending(current))
2325                         goto out;
2326         }
2327
2328         /*
2329          * Decrease the pool size
2330          * First return free pages to the buddy allocator (being careful
2331          * to keep enough around to satisfy reservations).  Then place
2332          * pages into surplus state as needed so the pool will shrink
2333          * to the desired size as pages become free.
2334          *
2335          * By placing pages into the surplus state independent of the
2336          * overcommit value, we are allowing the surplus pool size to
2337          * exceed overcommit. There are few sane options here. Since
2338          * __alloc_buddy_huge_page() is checking the global counter,
2339          * though, we'll note that we're not allowed to exceed surplus
2340          * and won't grow the pool anywhere else. Not until one of the
2341          * sysctls are changed, or the surplus pages go out of use.
2342          */
2343         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2344         min_count = max(count, min_count);
2345         try_to_free_low(h, min_count, nodes_allowed);
2346         while (min_count < persistent_huge_pages(h)) {
2347                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2348                         break;
2349                 cond_resched_lock(&hugetlb_lock);
2350         }
2351         while (count < persistent_huge_pages(h)) {
2352                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2353                         break;
2354         }
2355 out:
2356         ret = persistent_huge_pages(h);
2357         spin_unlock(&hugetlb_lock);
2358         return ret;
2359 }
2360
2361 #define HSTATE_ATTR_RO(_name) \
2362         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2363
2364 #define HSTATE_ATTR(_name) \
2365         static struct kobj_attribute _name##_attr = \
2366                 __ATTR(_name, 0644, _name##_show, _name##_store)
2367
2368 static struct kobject *hugepages_kobj;
2369 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2370
2371 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2372
2373 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2374 {
2375         int i;
2376
2377         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2378                 if (hstate_kobjs[i] == kobj) {
2379                         if (nidp)
2380                                 *nidp = NUMA_NO_NODE;
2381                         return &hstates[i];
2382                 }
2383
2384         return kobj_to_node_hstate(kobj, nidp);
2385 }
2386
2387 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2388                                         struct kobj_attribute *attr, char *buf)
2389 {
2390         struct hstate *h;
2391         unsigned long nr_huge_pages;
2392         int nid;
2393
2394         h = kobj_to_hstate(kobj, &nid);
2395         if (nid == NUMA_NO_NODE)
2396                 nr_huge_pages = h->nr_huge_pages;
2397         else
2398                 nr_huge_pages = h->nr_huge_pages_node[nid];
2399
2400         return sprintf(buf, "%lu\n", nr_huge_pages);
2401 }
2402
2403 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2404                                            struct hstate *h, int nid,
2405                                            unsigned long count, size_t len)
2406 {
2407         int err;
2408         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2409
2410         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2411                 err = -EINVAL;
2412                 goto out;
2413         }
2414
2415         if (nid == NUMA_NO_NODE) {
2416                 /*
2417                  * global hstate attribute
2418                  */
2419                 if (!(obey_mempolicy &&
2420                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2421                         NODEMASK_FREE(nodes_allowed);
2422                         nodes_allowed = &node_states[N_MEMORY];
2423                 }
2424         } else if (nodes_allowed) {
2425                 /*
2426                  * per node hstate attribute: adjust count to global,
2427                  * but restrict alloc/free to the specified node.
2428                  */
2429                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2430                 init_nodemask_of_node(nodes_allowed, nid);
2431         } else
2432                 nodes_allowed = &node_states[N_MEMORY];
2433
2434         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2435
2436         if (nodes_allowed != &node_states[N_MEMORY])
2437                 NODEMASK_FREE(nodes_allowed);
2438
2439         return len;
2440 out:
2441         NODEMASK_FREE(nodes_allowed);
2442         return err;
2443 }
2444
2445 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2446                                          struct kobject *kobj, const char *buf,
2447                                          size_t len)
2448 {
2449         struct hstate *h;
2450         unsigned long count;
2451         int nid;
2452         int err;
2453
2454         err = kstrtoul(buf, 10, &count);
2455         if (err)
2456                 return err;
2457
2458         h = kobj_to_hstate(kobj, &nid);
2459         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2460 }
2461
2462 static ssize_t nr_hugepages_show(struct kobject *kobj,
2463                                        struct kobj_attribute *attr, char *buf)
2464 {
2465         return nr_hugepages_show_common(kobj, attr, buf);
2466 }
2467
2468 static ssize_t nr_hugepages_store(struct kobject *kobj,
2469                struct kobj_attribute *attr, const char *buf, size_t len)
2470 {
2471         return nr_hugepages_store_common(false, kobj, buf, len);
2472 }
2473 HSTATE_ATTR(nr_hugepages);
2474
2475 #ifdef CONFIG_NUMA
2476
2477 /*
2478  * hstate attribute for optionally mempolicy-based constraint on persistent
2479  * huge page alloc/free.
2480  */
2481 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2482                                        struct kobj_attribute *attr, char *buf)
2483 {
2484         return nr_hugepages_show_common(kobj, attr, buf);
2485 }
2486
2487 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2488                struct kobj_attribute *attr, const char *buf, size_t len)
2489 {
2490         return nr_hugepages_store_common(true, kobj, buf, len);
2491 }
2492 HSTATE_ATTR(nr_hugepages_mempolicy);
2493 #endif
2494
2495
2496 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2497                                         struct kobj_attribute *attr, char *buf)
2498 {
2499         struct hstate *h = kobj_to_hstate(kobj, NULL);
2500         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2501 }
2502
2503 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2504                 struct kobj_attribute *attr, const char *buf, size_t count)
2505 {
2506         int err;
2507         unsigned long input;
2508         struct hstate *h = kobj_to_hstate(kobj, NULL);
2509
2510         if (hstate_is_gigantic(h))
2511                 return -EINVAL;
2512
2513         err = kstrtoul(buf, 10, &input);
2514         if (err)
2515                 return err;
2516
2517         spin_lock(&hugetlb_lock);
2518         h->nr_overcommit_huge_pages = input;
2519         spin_unlock(&hugetlb_lock);
2520
2521         return count;
2522 }
2523 HSTATE_ATTR(nr_overcommit_hugepages);
2524
2525 static ssize_t free_hugepages_show(struct kobject *kobj,
2526                                         struct kobj_attribute *attr, char *buf)
2527 {
2528         struct hstate *h;
2529         unsigned long free_huge_pages;
2530         int nid;
2531
2532         h = kobj_to_hstate(kobj, &nid);
2533         if (nid == NUMA_NO_NODE)
2534                 free_huge_pages = h->free_huge_pages;
2535         else
2536                 free_huge_pages = h->free_huge_pages_node[nid];
2537
2538         return sprintf(buf, "%lu\n", free_huge_pages);
2539 }
2540 HSTATE_ATTR_RO(free_hugepages);
2541
2542 static ssize_t resv_hugepages_show(struct kobject *kobj,
2543                                         struct kobj_attribute *attr, char *buf)
2544 {
2545         struct hstate *h = kobj_to_hstate(kobj, NULL);
2546         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2547 }
2548 HSTATE_ATTR_RO(resv_hugepages);
2549
2550 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2551                                         struct kobj_attribute *attr, char *buf)
2552 {
2553         struct hstate *h;
2554         unsigned long surplus_huge_pages;
2555         int nid;
2556
2557         h = kobj_to_hstate(kobj, &nid);
2558         if (nid == NUMA_NO_NODE)
2559                 surplus_huge_pages = h->surplus_huge_pages;
2560         else
2561                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2562
2563         return sprintf(buf, "%lu\n", surplus_huge_pages);
2564 }
2565 HSTATE_ATTR_RO(surplus_hugepages);
2566
2567 static struct attribute *hstate_attrs[] = {
2568         &nr_hugepages_attr.attr,
2569         &nr_overcommit_hugepages_attr.attr,
2570         &free_hugepages_attr.attr,
2571         &resv_hugepages_attr.attr,
2572         &surplus_hugepages_attr.attr,
2573 #ifdef CONFIG_NUMA
2574         &nr_hugepages_mempolicy_attr.attr,
2575 #endif
2576         NULL,
2577 };
2578
2579 static const struct attribute_group hstate_attr_group = {
2580         .attrs = hstate_attrs,
2581 };
2582
2583 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2584                                     struct kobject **hstate_kobjs,
2585                                     const struct attribute_group *hstate_attr_group)
2586 {
2587         int retval;
2588         int hi = hstate_index(h);
2589
2590         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2591         if (!hstate_kobjs[hi])
2592                 return -ENOMEM;
2593
2594         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2595         if (retval)
2596                 kobject_put(hstate_kobjs[hi]);
2597
2598         return retval;
2599 }
2600
2601 static void __init hugetlb_sysfs_init(void)
2602 {
2603         struct hstate *h;
2604         int err;
2605
2606         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2607         if (!hugepages_kobj)
2608                 return;
2609
2610         for_each_hstate(h) {
2611                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2612                                          hstate_kobjs, &hstate_attr_group);
2613                 if (err)
2614                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2615         }
2616 }
2617
2618 #ifdef CONFIG_NUMA
2619
2620 /*
2621  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2622  * with node devices in node_devices[] using a parallel array.  The array
2623  * index of a node device or _hstate == node id.
2624  * This is here to avoid any static dependency of the node device driver, in
2625  * the base kernel, on the hugetlb module.
2626  */
2627 struct node_hstate {
2628         struct kobject          *hugepages_kobj;
2629         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2630 };
2631 static struct node_hstate node_hstates[MAX_NUMNODES];
2632
2633 /*
2634  * A subset of global hstate attributes for node devices
2635  */
2636 static struct attribute *per_node_hstate_attrs[] = {
2637         &nr_hugepages_attr.attr,
2638         &free_hugepages_attr.attr,
2639         &surplus_hugepages_attr.attr,
2640         NULL,
2641 };
2642
2643 static const struct attribute_group per_node_hstate_attr_group = {
2644         .attrs = per_node_hstate_attrs,
2645 };
2646
2647 /*
2648  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2649  * Returns node id via non-NULL nidp.
2650  */
2651 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2652 {
2653         int nid;
2654
2655         for (nid = 0; nid < nr_node_ids; nid++) {
2656                 struct node_hstate *nhs = &node_hstates[nid];
2657                 int i;
2658                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2659                         if (nhs->hstate_kobjs[i] == kobj) {
2660                                 if (nidp)
2661                                         *nidp = nid;
2662                                 return &hstates[i];
2663                         }
2664         }
2665
2666         BUG();
2667         return NULL;
2668 }
2669
2670 /*
2671  * Unregister hstate attributes from a single node device.
2672  * No-op if no hstate attributes attached.
2673  */
2674 static void hugetlb_unregister_node(struct node *node)
2675 {
2676         struct hstate *h;
2677         struct node_hstate *nhs = &node_hstates[node->dev.id];
2678
2679         if (!nhs->hugepages_kobj)
2680                 return;         /* no hstate attributes */
2681
2682         for_each_hstate(h) {
2683                 int idx = hstate_index(h);
2684                 if (nhs->hstate_kobjs[idx]) {
2685                         kobject_put(nhs->hstate_kobjs[idx]);
2686                         nhs->hstate_kobjs[idx] = NULL;
2687                 }
2688         }
2689
2690         kobject_put(nhs->hugepages_kobj);
2691         nhs->hugepages_kobj = NULL;
2692 }
2693
2694
2695 /*
2696  * Register hstate attributes for a single node device.
2697  * No-op if attributes already registered.
2698  */
2699 static void hugetlb_register_node(struct node *node)
2700 {
2701         struct hstate *h;
2702         struct node_hstate *nhs = &node_hstates[node->dev.id];
2703         int err;
2704
2705         if (nhs->hugepages_kobj)
2706                 return;         /* already allocated */
2707
2708         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2709                                                         &node->dev.kobj);
2710         if (!nhs->hugepages_kobj)
2711                 return;
2712
2713         for_each_hstate(h) {
2714                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2715                                                 nhs->hstate_kobjs,
2716                                                 &per_node_hstate_attr_group);
2717                 if (err) {
2718                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2719                                 h->name, node->dev.id);
2720                         hugetlb_unregister_node(node);
2721                         break;
2722                 }
2723         }
2724 }
2725
2726 /*
2727  * hugetlb init time:  register hstate attributes for all registered node
2728  * devices of nodes that have memory.  All on-line nodes should have
2729  * registered their associated device by this time.
2730  */
2731 static void __init hugetlb_register_all_nodes(void)
2732 {
2733         int nid;
2734
2735         for_each_node_state(nid, N_MEMORY) {
2736                 struct node *node = node_devices[nid];
2737                 if (node->dev.id == nid)
2738                         hugetlb_register_node(node);
2739         }
2740
2741         /*
2742          * Let the node device driver know we're here so it can
2743          * [un]register hstate attributes on node hotplug.
2744          */
2745         register_hugetlbfs_with_node(hugetlb_register_node,
2746                                      hugetlb_unregister_node);
2747 }
2748 #else   /* !CONFIG_NUMA */
2749
2750 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2751 {
2752         BUG();
2753         if (nidp)
2754                 *nidp = -1;
2755         return NULL;
2756 }
2757
2758 static void hugetlb_register_all_nodes(void) { }
2759
2760 #endif
2761
2762 static int __init hugetlb_init(void)
2763 {
2764         int i;
2765
2766         if (!hugepages_supported())
2767                 return 0;
2768
2769         if (!size_to_hstate(default_hstate_size)) {
2770                 if (default_hstate_size != 0) {
2771                         pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2772                                default_hstate_size, HPAGE_SIZE);
2773                 }
2774
2775                 default_hstate_size = HPAGE_SIZE;
2776                 if (!size_to_hstate(default_hstate_size))
2777                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2778         }
2779         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2780         if (default_hstate_max_huge_pages) {
2781                 if (!default_hstate.max_huge_pages)
2782                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2783         }
2784
2785         hugetlb_init_hstates();
2786         gather_bootmem_prealloc();
2787         report_hugepages();
2788
2789         hugetlb_sysfs_init();
2790         hugetlb_register_all_nodes();
2791         hugetlb_cgroup_file_init();
2792
2793 #ifdef CONFIG_SMP
2794         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2795 #else
2796         num_fault_mutexes = 1;
2797 #endif
2798         hugetlb_fault_mutex_table =
2799                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2800         BUG_ON(!hugetlb_fault_mutex_table);
2801
2802         for (i = 0; i < num_fault_mutexes; i++)
2803                 mutex_init(&hugetlb_fault_mutex_table[i]);
2804         return 0;
2805 }
2806 subsys_initcall(hugetlb_init);
2807
2808 /* Should be called on processing a hugepagesz=... option */
2809 void __init hugetlb_bad_size(void)
2810 {
2811         parsed_valid_hugepagesz = false;
2812 }
2813
2814 void __init hugetlb_add_hstate(unsigned int order)
2815 {
2816         struct hstate *h;
2817         unsigned long i;
2818
2819         if (size_to_hstate(PAGE_SIZE << order)) {
2820                 pr_warn("hugepagesz= specified twice, ignoring\n");
2821                 return;
2822         }
2823         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2824         BUG_ON(order == 0);
2825         h = &hstates[hugetlb_max_hstate++];
2826         h->order = order;
2827         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2828         h->nr_huge_pages = 0;
2829         h->free_huge_pages = 0;
2830         for (i = 0; i < MAX_NUMNODES; ++i)
2831                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2832         INIT_LIST_HEAD(&h->hugepage_activelist);
2833         h->next_nid_to_alloc = first_memory_node;
2834         h->next_nid_to_free = first_memory_node;
2835         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2836                                         huge_page_size(h)/1024);
2837
2838         parsed_hstate = h;
2839 }
2840
2841 static int __init hugetlb_nrpages_setup(char *s)
2842 {
2843         unsigned long *mhp;
2844         static unsigned long *last_mhp;
2845
2846         if (!parsed_valid_hugepagesz) {
2847                 pr_warn("hugepages = %s preceded by "
2848                         "an unsupported hugepagesz, ignoring\n", s);
2849                 parsed_valid_hugepagesz = true;
2850                 return 1;
2851         }
2852         /*
2853          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2854          * so this hugepages= parameter goes to the "default hstate".
2855          */
2856         else if (!hugetlb_max_hstate)
2857                 mhp = &default_hstate_max_huge_pages;
2858         else
2859                 mhp = &parsed_hstate->max_huge_pages;
2860
2861         if (mhp == last_mhp) {
2862                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2863                 return 1;
2864         }
2865
2866         if (sscanf(s, "%lu", mhp) <= 0)
2867                 *mhp = 0;
2868
2869         /*
2870          * Global state is always initialized later in hugetlb_init.
2871          * But we need to allocate >= MAX_ORDER hstates here early to still
2872          * use the bootmem allocator.
2873          */
2874         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2875                 hugetlb_hstate_alloc_pages(parsed_hstate);
2876
2877         last_mhp = mhp;
2878
2879         return 1;
2880 }
2881 __setup("hugepages=", hugetlb_nrpages_setup);
2882
2883 static int __init hugetlb_default_setup(char *s)
2884 {
2885         default_hstate_size = memparse(s, &s);
2886         return 1;
2887 }
2888 __setup("default_hugepagesz=", hugetlb_default_setup);
2889
2890 static unsigned int cpuset_mems_nr(unsigned int *array)
2891 {
2892         int node;
2893         unsigned int nr = 0;
2894
2895         for_each_node_mask(node, cpuset_current_mems_allowed)
2896                 nr += array[node];
2897
2898         return nr;
2899 }
2900
2901 #ifdef CONFIG_SYSCTL
2902 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2903                          struct ctl_table *table, int write,
2904                          void __user *buffer, size_t *length, loff_t *ppos)
2905 {
2906         struct hstate *h = &default_hstate;
2907         unsigned long tmp = h->max_huge_pages;
2908         int ret;
2909
2910         if (!hugepages_supported())
2911                 return -EOPNOTSUPP;
2912
2913         table->data = &tmp;
2914         table->maxlen = sizeof(unsigned long);
2915         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2916         if (ret)
2917                 goto out;
2918
2919         if (write)
2920                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2921                                                   NUMA_NO_NODE, tmp, *length);
2922 out:
2923         return ret;
2924 }
2925
2926 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2927                           void __user *buffer, size_t *length, loff_t *ppos)
2928 {
2929
2930         return hugetlb_sysctl_handler_common(false, table, write,
2931                                                         buffer, length, ppos);
2932 }
2933
2934 #ifdef CONFIG_NUMA
2935 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2936                           void __user *buffer, size_t *length, loff_t *ppos)
2937 {
2938         return hugetlb_sysctl_handler_common(true, table, write,
2939                                                         buffer, length, ppos);
2940 }
2941 #endif /* CONFIG_NUMA */
2942
2943 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2944                         void __user *buffer,
2945                         size_t *length, loff_t *ppos)
2946 {
2947         struct hstate *h = &default_hstate;
2948         unsigned long tmp;
2949         int ret;
2950
2951         if (!hugepages_supported())
2952                 return -EOPNOTSUPP;
2953
2954         tmp = h->nr_overcommit_huge_pages;
2955
2956         if (write && hstate_is_gigantic(h))
2957                 return -EINVAL;
2958
2959         table->data = &tmp;
2960         table->maxlen = sizeof(unsigned long);
2961         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2962         if (ret)
2963                 goto out;
2964
2965         if (write) {
2966                 spin_lock(&hugetlb_lock);
2967                 h->nr_overcommit_huge_pages = tmp;
2968                 spin_unlock(&hugetlb_lock);
2969         }
2970 out:
2971         return ret;
2972 }
2973
2974 #endif /* CONFIG_SYSCTL */
2975
2976 void hugetlb_report_meminfo(struct seq_file *m)
2977 {
2978         struct hstate *h = &default_hstate;
2979         if (!hugepages_supported())
2980                 return;
2981         seq_printf(m,
2982                         "HugePages_Total:   %5lu\n"
2983                         "HugePages_Free:    %5lu\n"
2984                         "HugePages_Rsvd:    %5lu\n"
2985                         "HugePages_Surp:    %5lu\n"
2986                         "Hugepagesize:   %8lu kB\n",
2987                         h->nr_huge_pages,
2988                         h->free_huge_pages,
2989                         h->resv_huge_pages,
2990                         h->surplus_huge_pages,
2991                         1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2992 }
2993
2994 int hugetlb_report_node_meminfo(int nid, char *buf)
2995 {
2996         struct hstate *h = &default_hstate;
2997         if (!hugepages_supported())
2998                 return 0;
2999         return sprintf(buf,
3000                 "Node %d HugePages_Total: %5u\n"
3001                 "Node %d HugePages_Free:  %5u\n"
3002                 "Node %d HugePages_Surp:  %5u\n",
3003                 nid, h->nr_huge_pages_node[nid],
3004                 nid, h->free_huge_pages_node[nid],
3005                 nid, h->surplus_huge_pages_node[nid]);
3006 }
3007
3008 void hugetlb_show_meminfo(void)
3009 {
3010         struct hstate *h;
3011         int nid;
3012
3013         if (!hugepages_supported())
3014                 return;
3015
3016         for_each_node_state(nid, N_MEMORY)
3017                 for_each_hstate(h)
3018                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3019                                 nid,
3020                                 h->nr_huge_pages_node[nid],
3021                                 h->free_huge_pages_node[nid],
3022                                 h->surplus_huge_pages_node[nid],
3023                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3024 }
3025
3026 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3027 {
3028         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3029                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3030 }
3031
3032 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3033 unsigned long hugetlb_total_pages(void)
3034 {
3035         struct hstate *h;
3036         unsigned long nr_total_pages = 0;
3037
3038         for_each_hstate(h)
3039                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3040         return nr_total_pages;
3041 }
3042
3043 static int hugetlb_acct_memory(struct hstate *h, long delta)
3044 {
3045         int ret = -ENOMEM;
3046
3047         spin_lock(&hugetlb_lock);
3048         /*
3049          * When cpuset is configured, it breaks the strict hugetlb page
3050          * reservation as the accounting is done on a global variable. Such
3051          * reservation is completely rubbish in the presence of cpuset because
3052          * the reservation is not checked against page availability for the
3053          * current cpuset. Application can still potentially OOM'ed by kernel
3054          * with lack of free htlb page in cpuset that the task is in.
3055          * Attempt to enforce strict accounting with cpuset is almost
3056          * impossible (or too ugly) because cpuset is too fluid that
3057          * task or memory node can be dynamically moved between cpusets.
3058          *
3059          * The change of semantics for shared hugetlb mapping with cpuset is
3060          * undesirable. However, in order to preserve some of the semantics,
3061          * we fall back to check against current free page availability as
3062          * a best attempt and hopefully to minimize the impact of changing
3063          * semantics that cpuset has.
3064          */
3065         if (delta > 0) {
3066                 if (gather_surplus_pages(h, delta) < 0)
3067                         goto out;
3068
3069                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3070                         return_unused_surplus_pages(h, delta);
3071                         goto out;
3072                 }
3073         }
3074
3075         ret = 0;
3076         if (delta < 0)
3077                 return_unused_surplus_pages(h, (unsigned long) -delta);
3078
3079 out:
3080         spin_unlock(&hugetlb_lock);
3081         return ret;
3082 }
3083
3084 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3085 {
3086         struct resv_map *resv = vma_resv_map(vma);
3087
3088         /*
3089          * This new VMA should share its siblings reservation map if present.
3090          * The VMA will only ever have a valid reservation map pointer where
3091          * it is being copied for another still existing VMA.  As that VMA
3092          * has a reference to the reservation map it cannot disappear until
3093          * after this open call completes.  It is therefore safe to take a
3094          * new reference here without additional locking.
3095          */
3096         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3097                 kref_get(&resv->refs);
3098 }
3099
3100 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3101 {
3102         struct hstate *h = hstate_vma(vma);
3103         struct resv_map *resv = vma_resv_map(vma);
3104         struct hugepage_subpool *spool = subpool_vma(vma);
3105         unsigned long reserve, start, end;
3106         long gbl_reserve;
3107
3108         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3109                 return;
3110
3111         start = vma_hugecache_offset(h, vma, vma->vm_start);
3112         end = vma_hugecache_offset(h, vma, vma->vm_end);
3113
3114         reserve = (end - start) - region_count(resv, start, end);
3115
3116         kref_put(&resv->refs, resv_map_release);
3117
3118         if (reserve) {
3119                 /*
3120                  * Decrement reserve counts.  The global reserve count may be
3121                  * adjusted if the subpool has a minimum size.
3122                  */
3123                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3124                 hugetlb_acct_memory(h, -gbl_reserve);
3125         }
3126 }
3127
3128 /*
3129  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3130  * handle_mm_fault() to try to instantiate regular-sized pages in the
3131  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3132  * this far.
3133  */
3134 static int hugetlb_vm_op_fault(struct vm_fault *vmf)
3135 {
3136         BUG();
3137         return 0;
3138 }
3139
3140 const struct vm_operations_struct hugetlb_vm_ops = {
3141         .fault = hugetlb_vm_op_fault,
3142         .open = hugetlb_vm_op_open,
3143         .close = hugetlb_vm_op_close,
3144 };
3145
3146 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3147                                 int writable)
3148 {
3149         pte_t entry;
3150
3151         if (writable) {
3152                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3153                                          vma->vm_page_prot)));
3154         } else {
3155                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3156                                            vma->vm_page_prot));
3157         }
3158         entry = pte_mkyoung(entry);
3159         entry = pte_mkhuge(entry);
3160         entry = arch_make_huge_pte(entry, vma, page, writable);
3161
3162         return entry;
3163 }
3164
3165 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3166                                    unsigned long address, pte_t *ptep)
3167 {
3168         pte_t entry;
3169
3170         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3171         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3172                 update_mmu_cache(vma, address, ptep);
3173 }
3174
3175 bool is_hugetlb_entry_migration(pte_t pte)
3176 {
3177         swp_entry_t swp;
3178
3179         if (huge_pte_none(pte) || pte_present(pte))
3180                 return false;
3181         swp = pte_to_swp_entry(pte);
3182         if (non_swap_entry(swp) && is_migration_entry(swp))
3183                 return true;
3184         else
3185                 return false;
3186 }
3187
3188 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3189 {
3190         swp_entry_t swp;
3191
3192         if (huge_pte_none(pte) || pte_present(pte))
3193                 return 0;
3194         swp = pte_to_swp_entry(pte);
3195         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3196                 return 1;
3197         else
3198                 return 0;
3199 }
3200
3201 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3202                             struct vm_area_struct *vma)
3203 {
3204         pte_t *src_pte, *dst_pte, entry;
3205         struct page *ptepage;
3206         unsigned long addr;
3207         int cow;
3208         struct hstate *h = hstate_vma(vma);
3209         unsigned long sz = huge_page_size(h);
3210         unsigned long mmun_start;       /* For mmu_notifiers */
3211         unsigned long mmun_end;         /* For mmu_notifiers */
3212         int ret = 0;
3213
3214         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3215
3216         mmun_start = vma->vm_start;
3217         mmun_end = vma->vm_end;
3218         if (cow)
3219                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3220
3221         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3222                 spinlock_t *src_ptl, *dst_ptl;
3223                 src_pte = huge_pte_offset(src, addr, sz);
3224                 if (!src_pte)
3225                         continue;
3226                 dst_pte = huge_pte_alloc(dst, addr, sz);
3227                 if (!dst_pte) {
3228                         ret = -ENOMEM;
3229                         break;
3230                 }
3231
3232                 /* If the pagetables are shared don't copy or take references */
3233                 if (dst_pte == src_pte)
3234                         continue;
3235
3236                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3237                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3238                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3239                 entry = huge_ptep_get(src_pte);
3240                 if (huge_pte_none(entry)) { /* skip none entry */
3241                         ;
3242                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3243                                     is_hugetlb_entry_hwpoisoned(entry))) {
3244                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3245
3246                         if (is_write_migration_entry(swp_entry) && cow) {
3247                                 /*
3248                                  * COW mappings require pages in both
3249                                  * parent and child to be set to read.
3250                                  */
3251                                 make_migration_entry_read(&swp_entry);
3252                                 entry = swp_entry_to_pte(swp_entry);
3253                                 set_huge_swap_pte_at(src, addr, src_pte,
3254                                                      entry, sz);
3255                         }
3256                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3257                 } else {
3258                         if (cow) {
3259                                 /*
3260                                  * No need to notify as we are downgrading page
3261                                  * table protection not changing it to point
3262                                  * to a new page.
3263                                  *
3264                                  * See Documentation/vm/mmu_notifier.txt
3265                                  */
3266                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3267                         }
3268                         entry = huge_ptep_get(src_pte);
3269                         ptepage = pte_page(entry);
3270                         get_page(ptepage);
3271                         page_dup_rmap(ptepage, true);
3272                         set_huge_pte_at(dst, addr, dst_pte, entry);
3273                         hugetlb_count_add(pages_per_huge_page(h), dst);
3274                 }
3275                 spin_unlock(src_ptl);
3276                 spin_unlock(dst_ptl);
3277         }
3278
3279         if (cow)
3280                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3281
3282         return ret;
3283 }
3284
3285 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3286                             unsigned long start, unsigned long end,
3287                             struct page *ref_page)
3288 {
3289         struct mm_struct *mm = vma->vm_mm;
3290         unsigned long address;
3291         pte_t *ptep;
3292         pte_t pte;
3293         spinlock_t *ptl;
3294         struct page *page;
3295         struct hstate *h = hstate_vma(vma);
3296         unsigned long sz = huge_page_size(h);
3297         const unsigned long mmun_start = start; /* For mmu_notifiers */
3298         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3299
3300         WARN_ON(!is_vm_hugetlb_page(vma));
3301         BUG_ON(start & ~huge_page_mask(h));
3302         BUG_ON(end & ~huge_page_mask(h));
3303
3304         /*
3305          * This is a hugetlb vma, all the pte entries should point
3306          * to huge page.
3307          */
3308         tlb_remove_check_page_size_change(tlb, sz);
3309         tlb_start_vma(tlb, vma);
3310         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3311         address = start;
3312         for (; address < end; address += sz) {
3313                 ptep = huge_pte_offset(mm, address, sz);
3314                 if (!ptep)
3315                         continue;
3316
3317                 ptl = huge_pte_lock(h, mm, ptep);
3318                 if (huge_pmd_unshare(mm, &address, ptep)) {
3319                         spin_unlock(ptl);
3320                         continue;
3321                 }
3322
3323                 pte = huge_ptep_get(ptep);
3324                 if (huge_pte_none(pte)) {
3325                         spin_unlock(ptl);
3326                         continue;
3327                 }
3328
3329                 /*
3330                  * Migrating hugepage or HWPoisoned hugepage is already
3331                  * unmapped and its refcount is dropped, so just clear pte here.
3332                  */
3333                 if (unlikely(!pte_present(pte))) {
3334                         huge_pte_clear(mm, address, ptep, sz);
3335                         spin_unlock(ptl);
3336                         continue;
3337                 }
3338
3339                 page = pte_page(pte);
3340                 /*
3341                  * If a reference page is supplied, it is because a specific
3342                  * page is being unmapped, not a range. Ensure the page we
3343                  * are about to unmap is the actual page of interest.
3344                  */
3345                 if (ref_page) {
3346                         if (page != ref_page) {
3347                                 spin_unlock(ptl);
3348                                 continue;
3349                         }
3350                         /*
3351                          * Mark the VMA as having unmapped its page so that
3352                          * future faults in this VMA will fail rather than
3353                          * looking like data was lost
3354                          */
3355                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3356                 }
3357
3358                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3359                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3360                 if (huge_pte_dirty(pte))
3361                         set_page_dirty(page);
3362
3363                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3364                 page_remove_rmap(page, true);
3365
3366                 spin_unlock(ptl);
3367                 tlb_remove_page_size(tlb, page, huge_page_size(h));
3368                 /*
3369                  * Bail out after unmapping reference page if supplied
3370                  */
3371                 if (ref_page)
3372                         break;
3373         }
3374         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3375         tlb_end_vma(tlb, vma);
3376 }
3377
3378 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3379                           struct vm_area_struct *vma, unsigned long start,
3380                           unsigned long end, struct page *ref_page)
3381 {
3382         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3383
3384         /*
3385          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3386          * test will fail on a vma being torn down, and not grab a page table
3387          * on its way out.  We're lucky that the flag has such an appropriate
3388          * name, and can in fact be safely cleared here. We could clear it
3389          * before the __unmap_hugepage_range above, but all that's necessary
3390          * is to clear it before releasing the i_mmap_rwsem. This works
3391          * because in the context this is called, the VMA is about to be
3392          * destroyed and the i_mmap_rwsem is held.
3393          */
3394         vma->vm_flags &= ~VM_MAYSHARE;
3395 }
3396
3397 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3398                           unsigned long end, struct page *ref_page)
3399 {
3400         struct mm_struct *mm;
3401         struct mmu_gather tlb;
3402
3403         mm = vma->vm_mm;
3404
3405         tlb_gather_mmu(&tlb, mm, start, end);
3406         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3407         tlb_finish_mmu(&tlb, start, end);
3408 }
3409
3410 /*
3411  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3412  * mappping it owns the reserve page for. The intention is to unmap the page
3413  * from other VMAs and let the children be SIGKILLed if they are faulting the
3414  * same region.
3415  */
3416 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3417                               struct page *page, unsigned long address)
3418 {
3419         struct hstate *h = hstate_vma(vma);
3420         struct vm_area_struct *iter_vma;
3421         struct address_space *mapping;
3422         pgoff_t pgoff;
3423
3424         /*
3425          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3426          * from page cache lookup which is in HPAGE_SIZE units.
3427          */
3428         address = address & huge_page_mask(h);
3429         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3430                         vma->vm_pgoff;
3431         mapping = vma->vm_file->f_mapping;
3432
3433         /*
3434          * Take the mapping lock for the duration of the table walk. As
3435          * this mapping should be shared between all the VMAs,
3436          * __unmap_hugepage_range() is called as the lock is already held
3437          */
3438         i_mmap_lock_write(mapping);
3439         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3440                 /* Do not unmap the current VMA */
3441                 if (iter_vma == vma)
3442                         continue;
3443
3444                 /*
3445                  * Shared VMAs have their own reserves and do not affect
3446                  * MAP_PRIVATE accounting but it is possible that a shared
3447                  * VMA is using the same page so check and skip such VMAs.
3448                  */
3449                 if (iter_vma->vm_flags & VM_MAYSHARE)
3450                         continue;
3451
3452                 /*
3453                  * Unmap the page from other VMAs without their own reserves.
3454                  * They get marked to be SIGKILLed if they fault in these
3455                  * areas. This is because a future no-page fault on this VMA
3456                  * could insert a zeroed page instead of the data existing
3457                  * from the time of fork. This would look like data corruption
3458                  */
3459                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3460                         unmap_hugepage_range(iter_vma, address,
3461                                              address + huge_page_size(h), page);
3462         }
3463         i_mmap_unlock_write(mapping);
3464 }
3465
3466 /*
3467  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3468  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3469  * cannot race with other handlers or page migration.
3470  * Keep the pte_same checks anyway to make transition from the mutex easier.
3471  */
3472 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3473                        unsigned long address, pte_t *ptep,
3474                        struct page *pagecache_page, spinlock_t *ptl)
3475 {
3476         pte_t pte;
3477         struct hstate *h = hstate_vma(vma);
3478         struct page *old_page, *new_page;
3479         int ret = 0, outside_reserve = 0;
3480         unsigned long mmun_start;       /* For mmu_notifiers */
3481         unsigned long mmun_end;         /* For mmu_notifiers */
3482
3483         pte = huge_ptep_get(ptep);
3484         old_page = pte_page(pte);
3485
3486 retry_avoidcopy:
3487         /* If no-one else is actually using this page, avoid the copy
3488          * and just make the page writable */
3489         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3490                 page_move_anon_rmap(old_page, vma);
3491                 set_huge_ptep_writable(vma, address, ptep);
3492                 return 0;
3493         }
3494
3495         /*
3496          * If the process that created a MAP_PRIVATE mapping is about to
3497          * perform a COW due to a shared page count, attempt to satisfy
3498          * the allocation without using the existing reserves. The pagecache
3499          * page is used to determine if the reserve at this address was
3500          * consumed or not. If reserves were used, a partial faulted mapping
3501          * at the time of fork() could consume its reserves on COW instead
3502          * of the full address range.
3503          */
3504         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3505                         old_page != pagecache_page)
3506                 outside_reserve = 1;
3507
3508         get_page(old_page);
3509
3510         /*
3511          * Drop page table lock as buddy allocator may be called. It will
3512          * be acquired again before returning to the caller, as expected.
3513          */
3514         spin_unlock(ptl);
3515         new_page = alloc_huge_page(vma, address, outside_reserve);
3516
3517         if (IS_ERR(new_page)) {
3518                 /*
3519                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3520                  * it is due to references held by a child and an insufficient
3521                  * huge page pool. To guarantee the original mappers
3522                  * reliability, unmap the page from child processes. The child
3523                  * may get SIGKILLed if it later faults.
3524                  */
3525                 if (outside_reserve) {
3526                         put_page(old_page);
3527                         BUG_ON(huge_pte_none(pte));
3528                         unmap_ref_private(mm, vma, old_page, address);
3529                         BUG_ON(huge_pte_none(pte));
3530                         spin_lock(ptl);
3531                         ptep = huge_pte_offset(mm, address & huge_page_mask(h),
3532                                                huge_page_size(h));
3533                         if (likely(ptep &&
3534                                    pte_same(huge_ptep_get(ptep), pte)))
3535                                 goto retry_avoidcopy;
3536                         /*
3537                          * race occurs while re-acquiring page table
3538                          * lock, and our job is done.
3539                          */
3540                         return 0;
3541                 }
3542
3543                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3544                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3545                 goto out_release_old;
3546         }
3547
3548         /*
3549          * When the original hugepage is shared one, it does not have
3550          * anon_vma prepared.
3551          */
3552         if (unlikely(anon_vma_prepare(vma))) {
3553                 ret = VM_FAULT_OOM;
3554                 goto out_release_all;
3555         }
3556
3557         copy_user_huge_page(new_page, old_page, address, vma,
3558                             pages_per_huge_page(h));
3559         __SetPageUptodate(new_page);
3560         set_page_huge_active(new_page);
3561
3562         mmun_start = address & huge_page_mask(h);
3563         mmun_end = mmun_start + huge_page_size(h);
3564         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3565
3566         /*
3567          * Retake the page table lock to check for racing updates
3568          * before the page tables are altered
3569          */
3570         spin_lock(ptl);
3571         ptep = huge_pte_offset(mm, address & huge_page_mask(h),
3572                                huge_page_size(h));
3573         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3574                 ClearPagePrivate(new_page);
3575
3576                 /* Break COW */
3577                 huge_ptep_clear_flush(vma, address, ptep);
3578                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3579                 set_huge_pte_at(mm, address, ptep,
3580                                 make_huge_pte(vma, new_page, 1));
3581                 page_remove_rmap(old_page, true);
3582                 hugepage_add_new_anon_rmap(new_page, vma, address);
3583                 /* Make the old page be freed below */
3584                 new_page = old_page;
3585         }
3586         spin_unlock(ptl);
3587         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3588 out_release_all:
3589         restore_reserve_on_error(h, vma, address, new_page);
3590         put_page(new_page);
3591 out_release_old:
3592         put_page(old_page);
3593
3594         spin_lock(ptl); /* Caller expects lock to be held */
3595         return ret;
3596 }
3597
3598 /* Return the pagecache page at a given address within a VMA */
3599 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3600                         struct vm_area_struct *vma, unsigned long address)
3601 {
3602         struct address_space *mapping;
3603         pgoff_t idx;
3604
3605         mapping = vma->vm_file->f_mapping;
3606         idx = vma_hugecache_offset(h, vma, address);
3607
3608         return find_lock_page(mapping, idx);
3609 }
3610
3611 /*
3612  * Return whether there is a pagecache page to back given address within VMA.
3613  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3614  */
3615 static bool hugetlbfs_pagecache_present(struct hstate *h,
3616                         struct vm_area_struct *vma, unsigned long address)
3617 {
3618         struct address_space *mapping;
3619         pgoff_t idx;
3620         struct page *page;
3621
3622         mapping = vma->vm_file->f_mapping;
3623         idx = vma_hugecache_offset(h, vma, address);
3624
3625         page = find_get_page(mapping, idx);
3626         if (page)
3627                 put_page(page);
3628         return page != NULL;
3629 }
3630
3631 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3632                            pgoff_t idx)
3633 {
3634         struct inode *inode = mapping->host;
3635         struct hstate *h = hstate_inode(inode);
3636         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3637
3638         if (err)
3639                 return err;
3640         ClearPagePrivate(page);
3641
3642         spin_lock(&inode->i_lock);
3643         inode->i_blocks += blocks_per_huge_page(h);
3644         spin_unlock(&inode->i_lock);
3645         return 0;
3646 }
3647
3648 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3649                            struct address_space *mapping, pgoff_t idx,
3650                            unsigned long address, pte_t *ptep, unsigned int flags)
3651 {
3652         struct hstate *h = hstate_vma(vma);
3653         int ret = VM_FAULT_SIGBUS;
3654         int anon_rmap = 0;
3655         unsigned long size;
3656         struct page *page;
3657         pte_t new_pte;
3658         spinlock_t *ptl;
3659
3660         /*
3661          * Currently, we are forced to kill the process in the event the
3662          * original mapper has unmapped pages from the child due to a failed
3663          * COW. Warn that such a situation has occurred as it may not be obvious
3664          */
3665         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3666                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3667                            current->pid);
3668                 return ret;
3669         }
3670
3671         /*
3672          * Use page lock to guard against racing truncation
3673          * before we get page_table_lock.
3674          */
3675 retry:
3676         page = find_lock_page(mapping, idx);
3677         if (!page) {
3678                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3679                 if (idx >= size)
3680                         goto out;
3681
3682                 /*
3683                  * Check for page in userfault range
3684                  */
3685                 if (userfaultfd_missing(vma)) {
3686                         u32 hash;
3687                         struct vm_fault vmf = {
3688                                 .vma = vma,
3689                                 .address = address,
3690                                 .flags = flags,
3691                                 /*
3692                                  * Hard to debug if it ends up being
3693                                  * used by a callee that assumes
3694                                  * something about the other
3695                                  * uninitialized fields... same as in
3696                                  * memory.c
3697                                  */
3698                         };
3699
3700                         /*
3701                          * hugetlb_fault_mutex must be dropped before
3702                          * handling userfault.  Reacquire after handling
3703                          * fault to make calling code simpler.
3704                          */
3705                         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3706                                                         idx, address);
3707                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3708                         ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3709                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3710                         goto out;
3711                 }
3712
3713                 page = alloc_huge_page(vma, address, 0);
3714                 if (IS_ERR(page)) {
3715                         ret = PTR_ERR(page);
3716                         if (ret == -ENOMEM)
3717                                 ret = VM_FAULT_OOM;
3718                         else
3719                                 ret = VM_FAULT_SIGBUS;
3720                         goto out;
3721                 }
3722                 clear_huge_page(page, address, pages_per_huge_page(h));
3723                 __SetPageUptodate(page);
3724                 set_page_huge_active(page);
3725
3726                 if (vma->vm_flags & VM_MAYSHARE) {
3727                         int err = huge_add_to_page_cache(page, mapping, idx);
3728                         if (err) {
3729                                 put_page(page);
3730                                 if (err == -EEXIST)
3731                                         goto retry;
3732                                 goto out;
3733                         }
3734                 } else {
3735                         lock_page(page);
3736                         if (unlikely(anon_vma_prepare(vma))) {
3737                                 ret = VM_FAULT_OOM;
3738                                 goto backout_unlocked;
3739                         }
3740                         anon_rmap = 1;
3741                 }
3742         } else {
3743                 /*
3744                  * If memory error occurs between mmap() and fault, some process
3745                  * don't have hwpoisoned swap entry for errored virtual address.
3746                  * So we need to block hugepage fault by PG_hwpoison bit check.
3747                  */
3748                 if (unlikely(PageHWPoison(page))) {
3749                         ret = VM_FAULT_HWPOISON |
3750                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3751                         goto backout_unlocked;
3752                 }
3753         }
3754
3755         /*
3756          * If we are going to COW a private mapping later, we examine the
3757          * pending reservations for this page now. This will ensure that
3758          * any allocations necessary to record that reservation occur outside
3759          * the spinlock.
3760          */
3761         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3762                 if (vma_needs_reservation(h, vma, address) < 0) {
3763                         ret = VM_FAULT_OOM;
3764                         goto backout_unlocked;
3765                 }
3766                 /* Just decrements count, does not deallocate */
3767                 vma_end_reservation(h, vma, address);
3768         }
3769
3770         ptl = huge_pte_lock(h, mm, ptep);
3771         size = i_size_read(mapping->host) >> huge_page_shift(h);
3772         if (idx >= size)
3773                 goto backout;
3774
3775         ret = 0;
3776         if (!huge_pte_none(huge_ptep_get(ptep)))
3777                 goto backout;
3778
3779         if (anon_rmap) {
3780                 ClearPagePrivate(page);
3781                 hugepage_add_new_anon_rmap(page, vma, address);
3782         } else
3783                 page_dup_rmap(page, true);
3784         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3785                                 && (vma->vm_flags & VM_SHARED)));
3786         set_huge_pte_at(mm, address, ptep, new_pte);
3787
3788         hugetlb_count_add(pages_per_huge_page(h), mm);
3789         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3790                 /* Optimization, do the COW without a second fault */
3791                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3792         }
3793
3794         spin_unlock(ptl);
3795         unlock_page(page);
3796 out:
3797         return ret;
3798
3799 backout:
3800         spin_unlock(ptl);
3801 backout_unlocked:
3802         unlock_page(page);
3803         restore_reserve_on_error(h, vma, address, page);
3804         put_page(page);
3805         goto out;
3806 }
3807
3808 #ifdef CONFIG_SMP
3809 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3810                             struct vm_area_struct *vma,
3811                             struct address_space *mapping,
3812                             pgoff_t idx, unsigned long address)
3813 {
3814         unsigned long key[2];
3815         u32 hash;
3816
3817         if (vma->vm_flags & VM_SHARED) {
3818                 key[0] = (unsigned long) mapping;
3819                 key[1] = idx;
3820         } else {
3821                 key[0] = (unsigned long) mm;
3822                 key[1] = address >> huge_page_shift(h);
3823         }
3824
3825         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3826
3827         return hash & (num_fault_mutexes - 1);
3828 }
3829 #else
3830 /*
3831  * For uniprocesor systems we always use a single mutex, so just
3832  * return 0 and avoid the hashing overhead.
3833  */
3834 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3835                             struct vm_area_struct *vma,
3836                             struct address_space *mapping,
3837                             pgoff_t idx, unsigned long address)
3838 {
3839         return 0;
3840 }
3841 #endif
3842
3843 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3844                         unsigned long address, unsigned int flags)
3845 {
3846         pte_t *ptep, entry;
3847         spinlock_t *ptl;
3848         int ret;
3849         u32 hash;
3850         pgoff_t idx;
3851         struct page *page = NULL;
3852         struct page *pagecache_page = NULL;
3853         struct hstate *h = hstate_vma(vma);
3854         struct address_space *mapping;
3855         int need_wait_lock = 0;
3856
3857         address &= huge_page_mask(h);
3858
3859         ptep = huge_pte_offset(mm, address, huge_page_size(h));
3860         if (ptep) {
3861                 entry = huge_ptep_get(ptep);
3862                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3863                         migration_entry_wait_huge(vma, mm, ptep);
3864                         return 0;
3865                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3866                         return VM_FAULT_HWPOISON_LARGE |
3867                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3868         } else {
3869                 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3870                 if (!ptep)
3871                         return VM_FAULT_OOM;
3872         }
3873
3874         mapping = vma->vm_file->f_mapping;
3875         idx = vma_hugecache_offset(h, vma, address);
3876
3877         /*
3878          * Serialize hugepage allocation and instantiation, so that we don't
3879          * get spurious allocation failures if two CPUs race to instantiate
3880          * the same page in the page cache.
3881          */
3882         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3883         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3884
3885         entry = huge_ptep_get(ptep);
3886         if (huge_pte_none(entry)) {
3887                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3888                 goto out_mutex;
3889         }
3890
3891         ret = 0;
3892
3893         /*
3894          * entry could be a migration/hwpoison entry at this point, so this
3895          * check prevents the kernel from going below assuming that we have
3896          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3897          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3898          * handle it.
3899          */
3900         if (!pte_present(entry))
3901                 goto out_mutex;
3902
3903         /*
3904          * If we are going to COW the mapping later, we examine the pending
3905          * reservations for this page now. This will ensure that any
3906          * allocations necessary to record that reservation occur outside the
3907          * spinlock. For private mappings, we also lookup the pagecache
3908          * page now as it is used to determine if a reservation has been
3909          * consumed.
3910          */
3911         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3912                 if (vma_needs_reservation(h, vma, address) < 0) {
3913                         ret = VM_FAULT_OOM;
3914                         goto out_mutex;
3915                 }
3916                 /* Just decrements count, does not deallocate */
3917                 vma_end_reservation(h, vma, address);
3918
3919                 if (!(vma->vm_flags & VM_MAYSHARE))
3920                         pagecache_page = hugetlbfs_pagecache_page(h,
3921                                                                 vma, address);
3922         }
3923
3924         ptl = huge_pte_lock(h, mm, ptep);
3925
3926         /* Check for a racing update before calling hugetlb_cow */
3927         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3928                 goto out_ptl;
3929
3930         /*
3931          * hugetlb_cow() requires page locks of pte_page(entry) and
3932          * pagecache_page, so here we need take the former one
3933          * when page != pagecache_page or !pagecache_page.
3934          */
3935         page = pte_page(entry);
3936         if (page != pagecache_page)
3937                 if (!trylock_page(page)) {
3938                         need_wait_lock = 1;
3939                         goto out_ptl;
3940                 }
3941
3942         get_page(page);
3943
3944         if (flags & FAULT_FLAG_WRITE) {
3945                 if (!huge_pte_write(entry)) {
3946                         ret = hugetlb_cow(mm, vma, address, ptep,
3947                                           pagecache_page, ptl);
3948                         goto out_put_page;
3949                 }
3950                 entry = huge_pte_mkdirty(entry);
3951         }
3952         entry = pte_mkyoung(entry);
3953         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3954                                                 flags & FAULT_FLAG_WRITE))
3955                 update_mmu_cache(vma, address, ptep);
3956 out_put_page:
3957         if (page != pagecache_page)
3958                 unlock_page(page);
3959         put_page(page);
3960 out_ptl:
3961         spin_unlock(ptl);
3962
3963         if (pagecache_page) {
3964                 unlock_page(pagecache_page);
3965                 put_page(pagecache_page);
3966         }
3967 out_mutex:
3968         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3969         /*
3970          * Generally it's safe to hold refcount during waiting page lock. But
3971          * here we just wait to defer the next page fault to avoid busy loop and
3972          * the page is not used after unlocked before returning from the current
3973          * page fault. So we are safe from accessing freed page, even if we wait
3974          * here without taking refcount.
3975          */
3976         if (need_wait_lock)
3977                 wait_on_page_locked(page);
3978         return ret;
3979 }
3980
3981 /*
3982  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
3983  * modifications for huge pages.
3984  */
3985 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
3986                             pte_t *dst_pte,
3987                             struct vm_area_struct *dst_vma,
3988                             unsigned long dst_addr,
3989                             unsigned long src_addr,
3990                             struct page **pagep)
3991 {
3992         struct address_space *mapping;
3993         pgoff_t idx;
3994         unsigned long size;
3995         int vm_shared = dst_vma->vm_flags & VM_SHARED;
3996         struct hstate *h = hstate_vma(dst_vma);
3997         pte_t _dst_pte;
3998         spinlock_t *ptl;
3999         int ret;
4000         struct page *page;
4001
4002         if (!*pagep) {
4003                 ret = -ENOMEM;
4004                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4005                 if (IS_ERR(page))
4006                         goto out;
4007
4008                 ret = copy_huge_page_from_user(page,
4009                                                 (const void __user *) src_addr,
4010                                                 pages_per_huge_page(h), false);
4011
4012                 /* fallback to copy_from_user outside mmap_sem */
4013                 if (unlikely(ret)) {
4014                         ret = -EFAULT;
4015                         *pagep = page;
4016                         /* don't free the page */
4017                         goto out;
4018                 }
4019         } else {
4020                 page = *pagep;
4021                 *pagep = NULL;
4022         }
4023
4024         /*
4025          * The memory barrier inside __SetPageUptodate makes sure that
4026          * preceding stores to the page contents become visible before
4027          * the set_pte_at() write.
4028          */
4029         __SetPageUptodate(page);
4030         set_page_huge_active(page);
4031
4032         mapping = dst_vma->vm_file->f_mapping;
4033         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4034
4035         /*
4036          * If shared, add to page cache
4037          */
4038         if (vm_shared) {
4039                 size = i_size_read(mapping->host) >> huge_page_shift(h);
4040                 ret = -EFAULT;
4041                 if (idx >= size)
4042                         goto out_release_nounlock;
4043
4044                 /*
4045                  * Serialization between remove_inode_hugepages() and
4046                  * huge_add_to_page_cache() below happens through the
4047                  * hugetlb_fault_mutex_table that here must be hold by
4048                  * the caller.
4049                  */
4050                 ret = huge_add_to_page_cache(page, mapping, idx);
4051                 if (ret)
4052                         goto out_release_nounlock;
4053         }
4054
4055         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4056         spin_lock(ptl);
4057
4058         /*
4059          * Recheck the i_size after holding PT lock to make sure not
4060          * to leave any page mapped (as page_mapped()) beyond the end
4061          * of the i_size (remove_inode_hugepages() is strict about
4062          * enforcing that). If we bail out here, we'll also leave a
4063          * page in the radix tree in the vm_shared case beyond the end
4064          * of the i_size, but remove_inode_hugepages() will take care
4065          * of it as soon as we drop the hugetlb_fault_mutex_table.
4066          */
4067         size = i_size_read(mapping->host) >> huge_page_shift(h);
4068         ret = -EFAULT;
4069         if (idx >= size)
4070                 goto out_release_unlock;
4071
4072         ret = -EEXIST;
4073         if (!huge_pte_none(huge_ptep_get(dst_pte)))
4074                 goto out_release_unlock;
4075
4076         if (vm_shared) {
4077                 page_dup_rmap(page, true);
4078         } else {
4079                 ClearPagePrivate(page);
4080                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4081         }
4082
4083         _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4084         if (dst_vma->vm_flags & VM_WRITE)
4085                 _dst_pte = huge_pte_mkdirty(_dst_pte);
4086         _dst_pte = pte_mkyoung(_dst_pte);
4087
4088         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4089
4090         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4091                                         dst_vma->vm_flags & VM_WRITE);
4092         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4093
4094         /* No need to invalidate - it was non-present before */
4095         update_mmu_cache(dst_vma, dst_addr, dst_pte);
4096
4097         spin_unlock(ptl);
4098         if (vm_shared)
4099                 unlock_page(page);
4100         ret = 0;
4101 out:
4102         return ret;
4103 out_release_unlock:
4104         spin_unlock(ptl);
4105         if (vm_shared)
4106                 unlock_page(page);
4107 out_release_nounlock:
4108         put_page(page);
4109         goto out;
4110 }
4111
4112 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4113                          struct page **pages, struct vm_area_struct **vmas,
4114                          unsigned long *position, unsigned long *nr_pages,
4115                          long i, unsigned int flags, int *nonblocking)
4116 {
4117         unsigned long pfn_offset;
4118         unsigned long vaddr = *position;
4119         unsigned long remainder = *nr_pages;
4120         struct hstate *h = hstate_vma(vma);
4121         int err = -EFAULT;
4122
4123         while (vaddr < vma->vm_end && remainder) {
4124                 pte_t *pte;
4125                 spinlock_t *ptl = NULL;
4126                 int absent;
4127                 struct page *page;
4128
4129                 /*
4130                  * If we have a pending SIGKILL, don't keep faulting pages and
4131                  * potentially allocating memory.
4132                  */
4133                 if (unlikely(fatal_signal_pending(current))) {
4134                         remainder = 0;
4135                         break;
4136                 }
4137
4138                 /*
4139                  * Some archs (sparc64, sh*) have multiple pte_ts to
4140                  * each hugepage.  We have to make sure we get the
4141                  * first, for the page indexing below to work.
4142                  *
4143                  * Note that page table lock is not held when pte is null.
4144                  */
4145                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4146                                       huge_page_size(h));
4147                 if (pte)
4148                         ptl = huge_pte_lock(h, mm, pte);
4149                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4150
4151                 /*
4152                  * When coredumping, it suits get_dump_page if we just return
4153                  * an error where there's an empty slot with no huge pagecache
4154                  * to back it.  This way, we avoid allocating a hugepage, and
4155                  * the sparse dumpfile avoids allocating disk blocks, but its
4156                  * huge holes still show up with zeroes where they need to be.
4157                  */
4158                 if (absent && (flags & FOLL_DUMP) &&
4159                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4160                         if (pte)
4161                                 spin_unlock(ptl);
4162                         remainder = 0;
4163                         break;
4164                 }
4165
4166                 /*
4167                  * We need call hugetlb_fault for both hugepages under migration
4168                  * (in which case hugetlb_fault waits for the migration,) and
4169                  * hwpoisoned hugepages (in which case we need to prevent the
4170                  * caller from accessing to them.) In order to do this, we use
4171                  * here is_swap_pte instead of is_hugetlb_entry_migration and
4172                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4173                  * both cases, and because we can't follow correct pages
4174                  * directly from any kind of swap entries.
4175                  */
4176                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4177                     ((flags & FOLL_WRITE) &&
4178                       !huge_pte_write(huge_ptep_get(pte)))) {
4179                         int ret;
4180                         unsigned int fault_flags = 0;
4181
4182                         if (pte)
4183                                 spin_unlock(ptl);
4184                         if (flags & FOLL_WRITE)
4185                                 fault_flags |= FAULT_FLAG_WRITE;
4186                         if (nonblocking)
4187                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4188                         if (flags & FOLL_NOWAIT)
4189                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4190                                         FAULT_FLAG_RETRY_NOWAIT;
4191                         if (flags & FOLL_TRIED) {
4192                                 VM_WARN_ON_ONCE(fault_flags &
4193                                                 FAULT_FLAG_ALLOW_RETRY);
4194                                 fault_flags |= FAULT_FLAG_TRIED;
4195                         }
4196                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4197                         if (ret & VM_FAULT_ERROR) {
4198                                 err = vm_fault_to_errno(ret, flags);
4199                                 remainder = 0;
4200                                 break;
4201                         }
4202                         if (ret & VM_FAULT_RETRY) {
4203                                 if (nonblocking)
4204                                         *nonblocking = 0;
4205                                 *nr_pages = 0;
4206                                 /*
4207                                  * VM_FAULT_RETRY must not return an
4208                                  * error, it will return zero
4209                                  * instead.
4210                                  *
4211                                  * No need to update "position" as the
4212                                  * caller will not check it after
4213                                  * *nr_pages is set to 0.
4214                                  */
4215                                 return i;
4216                         }
4217                         continue;
4218                 }
4219
4220                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4221                 page = pte_page(huge_ptep_get(pte));
4222 same_page:
4223                 if (pages) {
4224                         pages[i] = mem_map_offset(page, pfn_offset);
4225                         get_page(pages[i]);
4226                 }
4227
4228                 if (vmas)
4229                         vmas[i] = vma;
4230
4231                 vaddr += PAGE_SIZE;
4232                 ++pfn_offset;
4233                 --remainder;
4234                 ++i;
4235                 if (vaddr < vma->vm_end && remainder &&
4236                                 pfn_offset < pages_per_huge_page(h)) {
4237                         /*
4238                          * We use pfn_offset to avoid touching the pageframes
4239                          * of this compound page.
4240                          */
4241                         goto same_page;
4242                 }
4243                 spin_unlock(ptl);
4244         }
4245         *nr_pages = remainder;
4246         /*
4247          * setting position is actually required only if remainder is
4248          * not zero but it's faster not to add a "if (remainder)"
4249          * branch.
4250          */
4251         *position = vaddr;
4252
4253         return i ? i : err;
4254 }
4255
4256 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4257 /*
4258  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4259  * implement this.
4260  */
4261 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4262 #endif
4263
4264 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4265                 unsigned long address, unsigned long end, pgprot_t newprot)
4266 {
4267         struct mm_struct *mm = vma->vm_mm;
4268         unsigned long start = address;
4269         pte_t *ptep;
4270         pte_t pte;
4271         struct hstate *h = hstate_vma(vma);
4272         unsigned long pages = 0;
4273
4274         BUG_ON(address >= end);
4275         flush_cache_range(vma, address, end);
4276
4277         mmu_notifier_invalidate_range_start(mm, start, end);
4278         i_mmap_lock_write(vma->vm_file->f_mapping);
4279         for (; address < end; address += huge_page_size(h)) {
4280                 spinlock_t *ptl;
4281                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4282                 if (!ptep)
4283                         continue;
4284                 ptl = huge_pte_lock(h, mm, ptep);
4285                 if (huge_pmd_unshare(mm, &address, ptep)) {
4286                         pages++;
4287                         spin_unlock(ptl);
4288                         continue;
4289                 }
4290                 pte = huge_ptep_get(ptep);
4291                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4292                         spin_unlock(ptl);
4293                         continue;
4294                 }
4295                 if (unlikely(is_hugetlb_entry_migration(pte))) {
4296                         swp_entry_t entry = pte_to_swp_entry(pte);
4297
4298                         if (is_write_migration_entry(entry)) {
4299                                 pte_t newpte;
4300
4301                                 make_migration_entry_read(&entry);
4302                                 newpte = swp_entry_to_pte(entry);
4303                                 set_huge_swap_pte_at(mm, address, ptep,
4304                                                      newpte, huge_page_size(h));
4305                                 pages++;
4306                         }
4307                         spin_unlock(ptl);
4308                         continue;
4309                 }
4310                 if (!huge_pte_none(pte)) {
4311                         pte = huge_ptep_get_and_clear(mm, address, ptep);
4312                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4313                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
4314                         set_huge_pte_at(mm, address, ptep, pte);
4315                         pages++;
4316                 }
4317                 spin_unlock(ptl);
4318         }
4319         /*
4320          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4321          * may have cleared our pud entry and done put_page on the page table:
4322          * once we release i_mmap_rwsem, another task can do the final put_page
4323          * and that page table be reused and filled with junk.
4324          */
4325         flush_hugetlb_tlb_range(vma, start, end);
4326         /*
4327          * No need to call mmu_notifier_invalidate_range() we are downgrading
4328          * page table protection not changing it to point to a new page.
4329          *
4330          * See Documentation/vm/mmu_notifier.txt
4331          */
4332         i_mmap_unlock_write(vma->vm_file->f_mapping);
4333         mmu_notifier_invalidate_range_end(mm, start, end);
4334
4335         return pages << h->order;
4336 }
4337
4338 int hugetlb_reserve_pages(struct inode *inode,
4339                                         long from, long to,
4340                                         struct vm_area_struct *vma,
4341                                         vm_flags_t vm_flags)
4342 {
4343         long ret, chg;
4344         struct hstate *h = hstate_inode(inode);
4345         struct hugepage_subpool *spool = subpool_inode(inode);
4346         struct resv_map *resv_map;
4347         long gbl_reserve;
4348
4349         /*
4350          * Only apply hugepage reservation if asked. At fault time, an
4351          * attempt will be made for VM_NORESERVE to allocate a page
4352          * without using reserves
4353          */
4354         if (vm_flags & VM_NORESERVE)
4355                 return 0;
4356
4357         /*
4358          * Shared mappings base their reservation on the number of pages that
4359          * are already allocated on behalf of the file. Private mappings need
4360          * to reserve the full area even if read-only as mprotect() may be
4361          * called to make the mapping read-write. Assume !vma is a shm mapping
4362          */
4363         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4364                 resv_map = inode_resv_map(inode);
4365
4366                 chg = region_chg(resv_map, from, to);
4367
4368         } else {
4369                 resv_map = resv_map_alloc();
4370                 if (!resv_map)
4371                         return -ENOMEM;
4372
4373                 chg = to - from;
4374
4375                 set_vma_resv_map(vma, resv_map);
4376                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4377         }
4378
4379         if (chg < 0) {
4380                 ret = chg;
4381                 goto out_err;
4382         }
4383
4384         /*
4385          * There must be enough pages in the subpool for the mapping. If
4386          * the subpool has a minimum size, there may be some global
4387          * reservations already in place (gbl_reserve).
4388          */
4389         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4390         if (gbl_reserve < 0) {
4391                 ret = -ENOSPC;
4392                 goto out_err;
4393         }
4394
4395         /*
4396          * Check enough hugepages are available for the reservation.
4397          * Hand the pages back to the subpool if there are not
4398          */
4399         ret = hugetlb_acct_memory(h, gbl_reserve);
4400         if (ret < 0) {
4401                 /* put back original number of pages, chg */
4402                 (void)hugepage_subpool_put_pages(spool, chg);
4403                 goto out_err;
4404         }
4405
4406         /*
4407          * Account for the reservations made. Shared mappings record regions
4408          * that have reservations as they are shared by multiple VMAs.
4409          * When the last VMA disappears, the region map says how much
4410          * the reservation was and the page cache tells how much of
4411          * the reservation was consumed. Private mappings are per-VMA and
4412          * only the consumed reservations are tracked. When the VMA
4413          * disappears, the original reservation is the VMA size and the
4414          * consumed reservations are stored in the map. Hence, nothing
4415          * else has to be done for private mappings here
4416          */
4417         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4418                 long add = region_add(resv_map, from, to);
4419
4420                 if (unlikely(chg > add)) {
4421                         /*
4422                          * pages in this range were added to the reserve
4423                          * map between region_chg and region_add.  This
4424                          * indicates a race with alloc_huge_page.  Adjust
4425                          * the subpool and reserve counts modified above
4426                          * based on the difference.
4427                          */
4428                         long rsv_adjust;
4429
4430                         rsv_adjust = hugepage_subpool_put_pages(spool,
4431                                                                 chg - add);
4432                         hugetlb_acct_memory(h, -rsv_adjust);
4433                 }
4434         }
4435         return 0;
4436 out_err:
4437         if (!vma || vma->vm_flags & VM_MAYSHARE)
4438                 /* Don't call region_abort if region_chg failed */
4439                 if (chg >= 0)
4440                         region_abort(resv_map, from, to);
4441         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4442                 kref_put(&resv_map->refs, resv_map_release);
4443         return ret;
4444 }
4445
4446 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4447                                                                 long freed)
4448 {
4449         struct hstate *h = hstate_inode(inode);
4450         struct resv_map *resv_map = inode_resv_map(inode);
4451         long chg = 0;
4452         struct hugepage_subpool *spool = subpool_inode(inode);
4453         long gbl_reserve;
4454
4455         if (resv_map) {
4456                 chg = region_del(resv_map, start, end);
4457                 /*
4458                  * region_del() can fail in the rare case where a region
4459                  * must be split and another region descriptor can not be
4460                  * allocated.  If end == LONG_MAX, it will not fail.
4461                  */
4462                 if (chg < 0)
4463                         return chg;
4464         }
4465
4466         spin_lock(&inode->i_lock);
4467         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4468         spin_unlock(&inode->i_lock);
4469
4470         /*
4471          * If the subpool has a minimum size, the number of global
4472          * reservations to be released may be adjusted.
4473          */
4474         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4475         hugetlb_acct_memory(h, -gbl_reserve);
4476
4477         return 0;
4478 }
4479
4480 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4481 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4482                                 struct vm_area_struct *vma,
4483                                 unsigned long addr, pgoff_t idx)
4484 {
4485         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4486                                 svma->vm_start;
4487         unsigned long sbase = saddr & PUD_MASK;
4488         unsigned long s_end = sbase + PUD_SIZE;
4489
4490         /* Allow segments to share if only one is marked locked */
4491         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4492         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4493
4494         /*
4495          * match the virtual addresses, permission and the alignment of the
4496          * page table page.
4497          */
4498         if (pmd_index(addr) != pmd_index(saddr) ||
4499             vm_flags != svm_flags ||
4500             sbase < svma->vm_start || svma->vm_end < s_end)
4501                 return 0;
4502
4503         return saddr;
4504 }
4505
4506 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4507 {
4508         unsigned long base = addr & PUD_MASK;
4509         unsigned long end = base + PUD_SIZE;
4510
4511         /*
4512          * check on proper vm_flags and page table alignment
4513          */
4514         if (vma->vm_flags & VM_MAYSHARE &&
4515             vma->vm_start <= base && end <= vma->vm_end)
4516                 return true;
4517         return false;
4518 }
4519
4520 /*
4521  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4522  * and returns the corresponding pte. While this is not necessary for the
4523  * !shared pmd case because we can allocate the pmd later as well, it makes the
4524  * code much cleaner. pmd allocation is essential for the shared case because
4525  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4526  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4527  * bad pmd for sharing.
4528  */
4529 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4530 {
4531         struct vm_area_struct *vma = find_vma(mm, addr);
4532         struct address_space *mapping = vma->vm_file->f_mapping;
4533         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4534                         vma->vm_pgoff;
4535         struct vm_area_struct *svma;
4536         unsigned long saddr;
4537         pte_t *spte = NULL;
4538         pte_t *pte;
4539         spinlock_t *ptl;
4540
4541         if (!vma_shareable(vma, addr))
4542                 return (pte_t *)pmd_alloc(mm, pud, addr);
4543
4544         i_mmap_lock_write(mapping);
4545         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4546                 if (svma == vma)
4547                         continue;
4548
4549                 saddr = page_table_shareable(svma, vma, addr, idx);
4550                 if (saddr) {
4551                         spte = huge_pte_offset(svma->vm_mm, saddr,
4552                                                vma_mmu_pagesize(svma));
4553                         if (spte) {
4554                                 get_page(virt_to_page(spte));
4555                                 break;
4556                         }
4557                 }
4558         }
4559
4560         if (!spte)
4561                 goto out;
4562
4563         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4564         if (pud_none(*pud)) {
4565                 pud_populate(mm, pud,
4566                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4567                 mm_inc_nr_pmds(mm);
4568         } else {
4569                 put_page(virt_to_page(spte));
4570         }
4571         spin_unlock(ptl);
4572 out:
4573         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4574         i_mmap_unlock_write(mapping);
4575         return pte;
4576 }
4577
4578 /*
4579  * unmap huge page backed by shared pte.
4580  *
4581  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4582  * indicated by page_count > 1, unmap is achieved by clearing pud and
4583  * decrementing the ref count. If count == 1, the pte page is not shared.
4584  *
4585  * called with page table lock held.
4586  *
4587  * returns: 1 successfully unmapped a shared pte page
4588  *          0 the underlying pte page is not shared, or it is the last user
4589  */
4590 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4591 {
4592         pgd_t *pgd = pgd_offset(mm, *addr);
4593         p4d_t *p4d = p4d_offset(pgd, *addr);
4594         pud_t *pud = pud_offset(p4d, *addr);
4595
4596         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4597         if (page_count(virt_to_page(ptep)) == 1)
4598                 return 0;
4599
4600         pud_clear(pud);
4601         put_page(virt_to_page(ptep));
4602         mm_dec_nr_pmds(mm);
4603         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4604         return 1;
4605 }
4606 #define want_pmd_share()        (1)
4607 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4608 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4609 {
4610         return NULL;
4611 }
4612
4613 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4614 {
4615         return 0;
4616 }
4617 #define want_pmd_share()        (0)
4618 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4619
4620 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4621 pte_t *huge_pte_alloc(struct mm_struct *mm,
4622                         unsigned long addr, unsigned long sz)
4623 {
4624         pgd_t *pgd;
4625         p4d_t *p4d;
4626         pud_t *pud;
4627         pte_t *pte = NULL;
4628
4629         pgd = pgd_offset(mm, addr);
4630         p4d = p4d_offset(pgd, addr);
4631         pud = pud_alloc(mm, p4d, addr);
4632         if (pud) {
4633                 if (sz == PUD_SIZE) {
4634                         pte = (pte_t *)pud;
4635                 } else {
4636                         BUG_ON(sz != PMD_SIZE);
4637                         if (want_pmd_share() && pud_none(*pud))
4638                                 pte = huge_pmd_share(mm, addr, pud);
4639                         else
4640                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4641                 }
4642         }
4643         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4644
4645         return pte;
4646 }
4647
4648 /*
4649  * huge_pte_offset() - Walk the page table to resolve the hugepage
4650  * entry at address @addr
4651  *
4652  * Return: Pointer to page table or swap entry (PUD or PMD) for
4653  * address @addr, or NULL if a p*d_none() entry is encountered and the
4654  * size @sz doesn't match the hugepage size at this level of the page
4655  * table.
4656  */
4657 pte_t *huge_pte_offset(struct mm_struct *mm,
4658                        unsigned long addr, unsigned long sz)
4659 {
4660         pgd_t *pgd;
4661         p4d_t *p4d;
4662         pud_t *pud;
4663         pmd_t *pmd;
4664
4665         pgd = pgd_offset(mm, addr);
4666         if (!pgd_present(*pgd))
4667                 return NULL;
4668         p4d = p4d_offset(pgd, addr);
4669         if (!p4d_present(*p4d))
4670                 return NULL;
4671
4672         pud = pud_offset(p4d, addr);
4673         if (sz != PUD_SIZE && pud_none(*pud))
4674                 return NULL;
4675         /* hugepage or swap? */
4676         if (pud_huge(*pud) || !pud_present(*pud))
4677                 return (pte_t *)pud;
4678
4679         pmd = pmd_offset(pud, addr);
4680         if (sz != PMD_SIZE && pmd_none(*pmd))
4681                 return NULL;
4682         /* hugepage or swap? */
4683         if (pmd_huge(*pmd) || !pmd_present(*pmd))
4684                 return (pte_t *)pmd;
4685
4686         return NULL;
4687 }
4688
4689 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4690
4691 /*
4692  * These functions are overwritable if your architecture needs its own
4693  * behavior.
4694  */
4695 struct page * __weak
4696 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4697                               int write)
4698 {
4699         return ERR_PTR(-EINVAL);
4700 }
4701
4702 struct page * __weak
4703 follow_huge_pd(struct vm_area_struct *vma,
4704                unsigned long address, hugepd_t hpd, int flags, int pdshift)
4705 {
4706         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4707         return NULL;
4708 }
4709
4710 struct page * __weak
4711 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4712                 pmd_t *pmd, int flags)
4713 {
4714         struct page *page = NULL;
4715         spinlock_t *ptl;
4716         pte_t pte;
4717 retry:
4718         ptl = pmd_lockptr(mm, pmd);
4719         spin_lock(ptl);
4720         /*
4721          * make sure that the address range covered by this pmd is not
4722          * unmapped from other threads.
4723          */
4724         if (!pmd_huge(*pmd))
4725                 goto out;
4726         pte = huge_ptep_get((pte_t *)pmd);
4727         if (pte_present(pte)) {
4728                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4729                 if (flags & FOLL_GET)
4730                         get_page(page);
4731         } else {
4732                 if (is_hugetlb_entry_migration(pte)) {
4733                         spin_unlock(ptl);
4734                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4735                         goto retry;
4736                 }
4737                 /*
4738                  * hwpoisoned entry is treated as no_page_table in
4739                  * follow_page_mask().
4740                  */
4741         }
4742 out:
4743         spin_unlock(ptl);
4744         return page;
4745 }
4746
4747 struct page * __weak
4748 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4749                 pud_t *pud, int flags)
4750 {
4751         if (flags & FOLL_GET)
4752                 return NULL;
4753
4754         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4755 }
4756
4757 struct page * __weak
4758 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4759 {
4760         if (flags & FOLL_GET)
4761                 return NULL;
4762
4763         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4764 }
4765
4766 bool isolate_huge_page(struct page *page, struct list_head *list)
4767 {
4768         bool ret = true;
4769
4770         VM_BUG_ON_PAGE(!PageHead(page), page);
4771         spin_lock(&hugetlb_lock);
4772         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4773                 ret = false;
4774                 goto unlock;
4775         }
4776         clear_page_huge_active(page);
4777         list_move_tail(&page->lru, list);
4778 unlock:
4779         spin_unlock(&hugetlb_lock);
4780         return ret;
4781 }
4782
4783 void putback_active_hugepage(struct page *page)
4784 {
4785         VM_BUG_ON_PAGE(!PageHead(page), page);
4786         spin_lock(&hugetlb_lock);
4787         set_page_huge_active(page);
4788         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4789         spin_unlock(&hugetlb_lock);
4790         put_page(page);
4791 }