4137fb67cd791c5a95b959c1755694dbdd9a7e20
[sfrench/cifs-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/sched/signal.h>
22 #include <linux/rmap.h>
23 #include <linux/string_helpers.h>
24 #include <linux/swap.h>
25 #include <linux/swapops.h>
26 #include <linux/jhash.h>
27
28 #include <asm/page.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31
32 #include <linux/io.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
36 #include <linux/userfaultfd_k.h>
37 #include "internal.h"
38
39 int hugetlb_max_hstate __read_mostly;
40 unsigned int default_hstate_idx;
41 struct hstate hstates[HUGE_MAX_HSTATE];
42 /*
43  * Minimum page order among possible hugepage sizes, set to a proper value
44  * at boot time.
45  */
46 static unsigned int minimum_order __read_mostly = UINT_MAX;
47
48 __initdata LIST_HEAD(huge_boot_pages);
49
50 /* for command line parsing */
51 static struct hstate * __initdata parsed_hstate;
52 static unsigned long __initdata default_hstate_max_huge_pages;
53 static unsigned long __initdata default_hstate_size;
54 static bool __initdata parsed_valid_hugepagesz = true;
55
56 /*
57  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58  * free_huge_pages, and surplus_huge_pages.
59  */
60 DEFINE_SPINLOCK(hugetlb_lock);
61
62 /*
63  * Serializes faults on the same logical page.  This is used to
64  * prevent spurious OOMs when the hugepage pool is fully utilized.
65  */
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
68
69 /* Forward declaration */
70 static int hugetlb_acct_memory(struct hstate *h, long delta);
71
72 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
73 {
74         bool free = (spool->count == 0) && (spool->used_hpages == 0);
75
76         spin_unlock(&spool->lock);
77
78         /* If no pages are used, and no other handles to the subpool
79          * remain, give up any reservations mased on minimum size and
80          * free the subpool */
81         if (free) {
82                 if (spool->min_hpages != -1)
83                         hugetlb_acct_memory(spool->hstate,
84                                                 -spool->min_hpages);
85                 kfree(spool);
86         }
87 }
88
89 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
90                                                 long min_hpages)
91 {
92         struct hugepage_subpool *spool;
93
94         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
95         if (!spool)
96                 return NULL;
97
98         spin_lock_init(&spool->lock);
99         spool->count = 1;
100         spool->max_hpages = max_hpages;
101         spool->hstate = h;
102         spool->min_hpages = min_hpages;
103
104         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
105                 kfree(spool);
106                 return NULL;
107         }
108         spool->rsv_hpages = min_hpages;
109
110         return spool;
111 }
112
113 void hugepage_put_subpool(struct hugepage_subpool *spool)
114 {
115         spin_lock(&spool->lock);
116         BUG_ON(!spool->count);
117         spool->count--;
118         unlock_or_release_subpool(spool);
119 }
120
121 /*
122  * Subpool accounting for allocating and reserving pages.
123  * Return -ENOMEM if there are not enough resources to satisfy the
124  * the request.  Otherwise, return the number of pages by which the
125  * global pools must be adjusted (upward).  The returned value may
126  * only be different than the passed value (delta) in the case where
127  * a subpool minimum size must be manitained.
128  */
129 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
130                                       long delta)
131 {
132         long ret = delta;
133
134         if (!spool)
135                 return ret;
136
137         spin_lock(&spool->lock);
138
139         if (spool->max_hpages != -1) {          /* maximum size accounting */
140                 if ((spool->used_hpages + delta) <= spool->max_hpages)
141                         spool->used_hpages += delta;
142                 else {
143                         ret = -ENOMEM;
144                         goto unlock_ret;
145                 }
146         }
147
148         /* minimum size accounting */
149         if (spool->min_hpages != -1 && spool->rsv_hpages) {
150                 if (delta > spool->rsv_hpages) {
151                         /*
152                          * Asking for more reserves than those already taken on
153                          * behalf of subpool.  Return difference.
154                          */
155                         ret = delta - spool->rsv_hpages;
156                         spool->rsv_hpages = 0;
157                 } else {
158                         ret = 0;        /* reserves already accounted for */
159                         spool->rsv_hpages -= delta;
160                 }
161         }
162
163 unlock_ret:
164         spin_unlock(&spool->lock);
165         return ret;
166 }
167
168 /*
169  * Subpool accounting for freeing and unreserving pages.
170  * Return the number of global page reservations that must be dropped.
171  * The return value may only be different than the passed value (delta)
172  * in the case where a subpool minimum size must be maintained.
173  */
174 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
175                                        long delta)
176 {
177         long ret = delta;
178
179         if (!spool)
180                 return delta;
181
182         spin_lock(&spool->lock);
183
184         if (spool->max_hpages != -1)            /* maximum size accounting */
185                 spool->used_hpages -= delta;
186
187          /* minimum size accounting */
188         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
189                 if (spool->rsv_hpages + delta <= spool->min_hpages)
190                         ret = 0;
191                 else
192                         ret = spool->rsv_hpages + delta - spool->min_hpages;
193
194                 spool->rsv_hpages += delta;
195                 if (spool->rsv_hpages > spool->min_hpages)
196                         spool->rsv_hpages = spool->min_hpages;
197         }
198
199         /*
200          * If hugetlbfs_put_super couldn't free spool due to an outstanding
201          * quota reference, free it now.
202          */
203         unlock_or_release_subpool(spool);
204
205         return ret;
206 }
207
208 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
209 {
210         return HUGETLBFS_SB(inode->i_sb)->spool;
211 }
212
213 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
214 {
215         return subpool_inode(file_inode(vma->vm_file));
216 }
217
218 /*
219  * Region tracking -- allows tracking of reservations and instantiated pages
220  *                    across the pages in a mapping.
221  *
222  * The region data structures are embedded into a resv_map and protected
223  * by a resv_map's lock.  The set of regions within the resv_map represent
224  * reservations for huge pages, or huge pages that have already been
225  * instantiated within the map.  The from and to elements are huge page
226  * indicies into the associated mapping.  from indicates the starting index
227  * of the region.  to represents the first index past the end of  the region.
228  *
229  * For example, a file region structure with from == 0 and to == 4 represents
230  * four huge pages in a mapping.  It is important to note that the to element
231  * represents the first element past the end of the region. This is used in
232  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
233  *
234  * Interval notation of the form [from, to) will be used to indicate that
235  * the endpoint from is inclusive and to is exclusive.
236  */
237 struct file_region {
238         struct list_head link;
239         long from;
240         long to;
241 };
242
243 /*
244  * Add the huge page range represented by [f, t) to the reserve
245  * map.  In the normal case, existing regions will be expanded
246  * to accommodate the specified range.  Sufficient regions should
247  * exist for expansion due to the previous call to region_chg
248  * with the same range.  However, it is possible that region_del
249  * could have been called after region_chg and modifed the map
250  * in such a way that no region exists to be expanded.  In this
251  * case, pull a region descriptor from the cache associated with
252  * the map and use that for the new range.
253  *
254  * Return the number of new huge pages added to the map.  This
255  * number is greater than or equal to zero.
256  */
257 static long region_add(struct resv_map *resv, long f, long t)
258 {
259         struct list_head *head = &resv->regions;
260         struct file_region *rg, *nrg, *trg;
261         long add = 0;
262
263         spin_lock(&resv->lock);
264         /* Locate the region we are either in or before. */
265         list_for_each_entry(rg, head, link)
266                 if (f <= rg->to)
267                         break;
268
269         /*
270          * If no region exists which can be expanded to include the
271          * specified range, the list must have been modified by an
272          * interleving call to region_del().  Pull a region descriptor
273          * from the cache and use it for this range.
274          */
275         if (&rg->link == head || t < rg->from) {
276                 VM_BUG_ON(resv->region_cache_count <= 0);
277
278                 resv->region_cache_count--;
279                 nrg = list_first_entry(&resv->region_cache, struct file_region,
280                                         link);
281                 list_del(&nrg->link);
282
283                 nrg->from = f;
284                 nrg->to = t;
285                 list_add(&nrg->link, rg->link.prev);
286
287                 add += t - f;
288                 goto out_locked;
289         }
290
291         /* Round our left edge to the current segment if it encloses us. */
292         if (f > rg->from)
293                 f = rg->from;
294
295         /* Check for and consume any regions we now overlap with. */
296         nrg = rg;
297         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
298                 if (&rg->link == head)
299                         break;
300                 if (rg->from > t)
301                         break;
302
303                 /* If this area reaches higher then extend our area to
304                  * include it completely.  If this is not the first area
305                  * which we intend to reuse, free it. */
306                 if (rg->to > t)
307                         t = rg->to;
308                 if (rg != nrg) {
309                         /* Decrement return value by the deleted range.
310                          * Another range will span this area so that by
311                          * end of routine add will be >= zero
312                          */
313                         add -= (rg->to - rg->from);
314                         list_del(&rg->link);
315                         kfree(rg);
316                 }
317         }
318
319         add += (nrg->from - f);         /* Added to beginning of region */
320         nrg->from = f;
321         add += t - nrg->to;             /* Added to end of region */
322         nrg->to = t;
323
324 out_locked:
325         resv->adds_in_progress--;
326         spin_unlock(&resv->lock);
327         VM_BUG_ON(add < 0);
328         return add;
329 }
330
331 /*
332  * Examine the existing reserve map and determine how many
333  * huge pages in the specified range [f, t) are NOT currently
334  * represented.  This routine is called before a subsequent
335  * call to region_add that will actually modify the reserve
336  * map to add the specified range [f, t).  region_chg does
337  * not change the number of huge pages represented by the
338  * map.  However, if the existing regions in the map can not
339  * be expanded to represent the new range, a new file_region
340  * structure is added to the map as a placeholder.  This is
341  * so that the subsequent region_add call will have all the
342  * regions it needs and will not fail.
343  *
344  * Upon entry, region_chg will also examine the cache of region descriptors
345  * associated with the map.  If there are not enough descriptors cached, one
346  * will be allocated for the in progress add operation.
347  *
348  * Returns the number of huge pages that need to be added to the existing
349  * reservation map for the range [f, t).  This number is greater or equal to
350  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
351  * is needed and can not be allocated.
352  */
353 static long region_chg(struct resv_map *resv, long f, long t)
354 {
355         struct list_head *head = &resv->regions;
356         struct file_region *rg, *nrg = NULL;
357         long chg = 0;
358
359 retry:
360         spin_lock(&resv->lock);
361 retry_locked:
362         resv->adds_in_progress++;
363
364         /*
365          * Check for sufficient descriptors in the cache to accommodate
366          * the number of in progress add operations.
367          */
368         if (resv->adds_in_progress > resv->region_cache_count) {
369                 struct file_region *trg;
370
371                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
372                 /* Must drop lock to allocate a new descriptor. */
373                 resv->adds_in_progress--;
374                 spin_unlock(&resv->lock);
375
376                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
377                 if (!trg) {
378                         kfree(nrg);
379                         return -ENOMEM;
380                 }
381
382                 spin_lock(&resv->lock);
383                 list_add(&trg->link, &resv->region_cache);
384                 resv->region_cache_count++;
385                 goto retry_locked;
386         }
387
388         /* Locate the region we are before or in. */
389         list_for_each_entry(rg, head, link)
390                 if (f <= rg->to)
391                         break;
392
393         /* If we are below the current region then a new region is required.
394          * Subtle, allocate a new region at the position but make it zero
395          * size such that we can guarantee to record the reservation. */
396         if (&rg->link == head || t < rg->from) {
397                 if (!nrg) {
398                         resv->adds_in_progress--;
399                         spin_unlock(&resv->lock);
400                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
401                         if (!nrg)
402                                 return -ENOMEM;
403
404                         nrg->from = f;
405                         nrg->to   = f;
406                         INIT_LIST_HEAD(&nrg->link);
407                         goto retry;
408                 }
409
410                 list_add(&nrg->link, rg->link.prev);
411                 chg = t - f;
412                 goto out_nrg;
413         }
414
415         /* Round our left edge to the current segment if it encloses us. */
416         if (f > rg->from)
417                 f = rg->from;
418         chg = t - f;
419
420         /* Check for and consume any regions we now overlap with. */
421         list_for_each_entry(rg, rg->link.prev, link) {
422                 if (&rg->link == head)
423                         break;
424                 if (rg->from > t)
425                         goto out;
426
427                 /* We overlap with this area, if it extends further than
428                  * us then we must extend ourselves.  Account for its
429                  * existing reservation. */
430                 if (rg->to > t) {
431                         chg += rg->to - t;
432                         t = rg->to;
433                 }
434                 chg -= rg->to - rg->from;
435         }
436
437 out:
438         spin_unlock(&resv->lock);
439         /*  We already know we raced and no longer need the new region */
440         kfree(nrg);
441         return chg;
442 out_nrg:
443         spin_unlock(&resv->lock);
444         return chg;
445 }
446
447 /*
448  * Abort the in progress add operation.  The adds_in_progress field
449  * of the resv_map keeps track of the operations in progress between
450  * calls to region_chg and region_add.  Operations are sometimes
451  * aborted after the call to region_chg.  In such cases, region_abort
452  * is called to decrement the adds_in_progress counter.
453  *
454  * NOTE: The range arguments [f, t) are not needed or used in this
455  * routine.  They are kept to make reading the calling code easier as
456  * arguments will match the associated region_chg call.
457  */
458 static void region_abort(struct resv_map *resv, long f, long t)
459 {
460         spin_lock(&resv->lock);
461         VM_BUG_ON(!resv->region_cache_count);
462         resv->adds_in_progress--;
463         spin_unlock(&resv->lock);
464 }
465
466 /*
467  * Delete the specified range [f, t) from the reserve map.  If the
468  * t parameter is LONG_MAX, this indicates that ALL regions after f
469  * should be deleted.  Locate the regions which intersect [f, t)
470  * and either trim, delete or split the existing regions.
471  *
472  * Returns the number of huge pages deleted from the reserve map.
473  * In the normal case, the return value is zero or more.  In the
474  * case where a region must be split, a new region descriptor must
475  * be allocated.  If the allocation fails, -ENOMEM will be returned.
476  * NOTE: If the parameter t == LONG_MAX, then we will never split
477  * a region and possibly return -ENOMEM.  Callers specifying
478  * t == LONG_MAX do not need to check for -ENOMEM error.
479  */
480 static long region_del(struct resv_map *resv, long f, long t)
481 {
482         struct list_head *head = &resv->regions;
483         struct file_region *rg, *trg;
484         struct file_region *nrg = NULL;
485         long del = 0;
486
487 retry:
488         spin_lock(&resv->lock);
489         list_for_each_entry_safe(rg, trg, head, link) {
490                 /*
491                  * Skip regions before the range to be deleted.  file_region
492                  * ranges are normally of the form [from, to).  However, there
493                  * may be a "placeholder" entry in the map which is of the form
494                  * (from, to) with from == to.  Check for placeholder entries
495                  * at the beginning of the range to be deleted.
496                  */
497                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
498                         continue;
499
500                 if (rg->from >= t)
501                         break;
502
503                 if (f > rg->from && t < rg->to) { /* Must split region */
504                         /*
505                          * Check for an entry in the cache before dropping
506                          * lock and attempting allocation.
507                          */
508                         if (!nrg &&
509                             resv->region_cache_count > resv->adds_in_progress) {
510                                 nrg = list_first_entry(&resv->region_cache,
511                                                         struct file_region,
512                                                         link);
513                                 list_del(&nrg->link);
514                                 resv->region_cache_count--;
515                         }
516
517                         if (!nrg) {
518                                 spin_unlock(&resv->lock);
519                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
520                                 if (!nrg)
521                                         return -ENOMEM;
522                                 goto retry;
523                         }
524
525                         del += t - f;
526
527                         /* New entry for end of split region */
528                         nrg->from = t;
529                         nrg->to = rg->to;
530                         INIT_LIST_HEAD(&nrg->link);
531
532                         /* Original entry is trimmed */
533                         rg->to = f;
534
535                         list_add(&nrg->link, &rg->link);
536                         nrg = NULL;
537                         break;
538                 }
539
540                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
541                         del += rg->to - rg->from;
542                         list_del(&rg->link);
543                         kfree(rg);
544                         continue;
545                 }
546
547                 if (f <= rg->from) {    /* Trim beginning of region */
548                         del += t - rg->from;
549                         rg->from = t;
550                 } else {                /* Trim end of region */
551                         del += rg->to - f;
552                         rg->to = f;
553                 }
554         }
555
556         spin_unlock(&resv->lock);
557         kfree(nrg);
558         return del;
559 }
560
561 /*
562  * A rare out of memory error was encountered which prevented removal of
563  * the reserve map region for a page.  The huge page itself was free'ed
564  * and removed from the page cache.  This routine will adjust the subpool
565  * usage count, and the global reserve count if needed.  By incrementing
566  * these counts, the reserve map entry which could not be deleted will
567  * appear as a "reserved" entry instead of simply dangling with incorrect
568  * counts.
569  */
570 void hugetlb_fix_reserve_counts(struct inode *inode)
571 {
572         struct hugepage_subpool *spool = subpool_inode(inode);
573         long rsv_adjust;
574
575         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
576         if (rsv_adjust) {
577                 struct hstate *h = hstate_inode(inode);
578
579                 hugetlb_acct_memory(h, 1);
580         }
581 }
582
583 /*
584  * Count and return the number of huge pages in the reserve map
585  * that intersect with the range [f, t).
586  */
587 static long region_count(struct resv_map *resv, long f, long t)
588 {
589         struct list_head *head = &resv->regions;
590         struct file_region *rg;
591         long chg = 0;
592
593         spin_lock(&resv->lock);
594         /* Locate each segment we overlap with, and count that overlap. */
595         list_for_each_entry(rg, head, link) {
596                 long seg_from;
597                 long seg_to;
598
599                 if (rg->to <= f)
600                         continue;
601                 if (rg->from >= t)
602                         break;
603
604                 seg_from = max(rg->from, f);
605                 seg_to = min(rg->to, t);
606
607                 chg += seg_to - seg_from;
608         }
609         spin_unlock(&resv->lock);
610
611         return chg;
612 }
613
614 /*
615  * Convert the address within this vma to the page offset within
616  * the mapping, in pagecache page units; huge pages here.
617  */
618 static pgoff_t vma_hugecache_offset(struct hstate *h,
619                         struct vm_area_struct *vma, unsigned long address)
620 {
621         return ((address - vma->vm_start) >> huge_page_shift(h)) +
622                         (vma->vm_pgoff >> huge_page_order(h));
623 }
624
625 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
626                                      unsigned long address)
627 {
628         return vma_hugecache_offset(hstate_vma(vma), vma, address);
629 }
630 EXPORT_SYMBOL_GPL(linear_hugepage_index);
631
632 /*
633  * Return the size of the pages allocated when backing a VMA. In the majority
634  * cases this will be same size as used by the page table entries.
635  */
636 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
637 {
638         struct hstate *hstate;
639
640         if (!is_vm_hugetlb_page(vma))
641                 return PAGE_SIZE;
642
643         hstate = hstate_vma(vma);
644
645         return 1UL << huge_page_shift(hstate);
646 }
647 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
648
649 /*
650  * Return the page size being used by the MMU to back a VMA. In the majority
651  * of cases, the page size used by the kernel matches the MMU size. On
652  * architectures where it differs, an architecture-specific version of this
653  * function is required.
654  */
655 #ifndef vma_mmu_pagesize
656 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
657 {
658         return vma_kernel_pagesize(vma);
659 }
660 #endif
661
662 /*
663  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
664  * bits of the reservation map pointer, which are always clear due to
665  * alignment.
666  */
667 #define HPAGE_RESV_OWNER    (1UL << 0)
668 #define HPAGE_RESV_UNMAPPED (1UL << 1)
669 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
670
671 /*
672  * These helpers are used to track how many pages are reserved for
673  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
674  * is guaranteed to have their future faults succeed.
675  *
676  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
677  * the reserve counters are updated with the hugetlb_lock held. It is safe
678  * to reset the VMA at fork() time as it is not in use yet and there is no
679  * chance of the global counters getting corrupted as a result of the values.
680  *
681  * The private mapping reservation is represented in a subtly different
682  * manner to a shared mapping.  A shared mapping has a region map associated
683  * with the underlying file, this region map represents the backing file
684  * pages which have ever had a reservation assigned which this persists even
685  * after the page is instantiated.  A private mapping has a region map
686  * associated with the original mmap which is attached to all VMAs which
687  * reference it, this region map represents those offsets which have consumed
688  * reservation ie. where pages have been instantiated.
689  */
690 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
691 {
692         return (unsigned long)vma->vm_private_data;
693 }
694
695 static void set_vma_private_data(struct vm_area_struct *vma,
696                                                         unsigned long value)
697 {
698         vma->vm_private_data = (void *)value;
699 }
700
701 struct resv_map *resv_map_alloc(void)
702 {
703         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
704         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
705
706         if (!resv_map || !rg) {
707                 kfree(resv_map);
708                 kfree(rg);
709                 return NULL;
710         }
711
712         kref_init(&resv_map->refs);
713         spin_lock_init(&resv_map->lock);
714         INIT_LIST_HEAD(&resv_map->regions);
715
716         resv_map->adds_in_progress = 0;
717
718         INIT_LIST_HEAD(&resv_map->region_cache);
719         list_add(&rg->link, &resv_map->region_cache);
720         resv_map->region_cache_count = 1;
721
722         return resv_map;
723 }
724
725 void resv_map_release(struct kref *ref)
726 {
727         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
728         struct list_head *head = &resv_map->region_cache;
729         struct file_region *rg, *trg;
730
731         /* Clear out any active regions before we release the map. */
732         region_del(resv_map, 0, LONG_MAX);
733
734         /* ... and any entries left in the cache */
735         list_for_each_entry_safe(rg, trg, head, link) {
736                 list_del(&rg->link);
737                 kfree(rg);
738         }
739
740         VM_BUG_ON(resv_map->adds_in_progress);
741
742         kfree(resv_map);
743 }
744
745 static inline struct resv_map *inode_resv_map(struct inode *inode)
746 {
747         return inode->i_mapping->private_data;
748 }
749
750 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
751 {
752         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
753         if (vma->vm_flags & VM_MAYSHARE) {
754                 struct address_space *mapping = vma->vm_file->f_mapping;
755                 struct inode *inode = mapping->host;
756
757                 return inode_resv_map(inode);
758
759         } else {
760                 return (struct resv_map *)(get_vma_private_data(vma) &
761                                                         ~HPAGE_RESV_MASK);
762         }
763 }
764
765 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
766 {
767         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
768         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
769
770         set_vma_private_data(vma, (get_vma_private_data(vma) &
771                                 HPAGE_RESV_MASK) | (unsigned long)map);
772 }
773
774 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
775 {
776         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
777         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
778
779         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
780 }
781
782 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
783 {
784         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785
786         return (get_vma_private_data(vma) & flag) != 0;
787 }
788
789 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
790 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
791 {
792         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
793         if (!(vma->vm_flags & VM_MAYSHARE))
794                 vma->vm_private_data = (void *)0;
795 }
796
797 /* Returns true if the VMA has associated reserve pages */
798 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
799 {
800         if (vma->vm_flags & VM_NORESERVE) {
801                 /*
802                  * This address is already reserved by other process(chg == 0),
803                  * so, we should decrement reserved count. Without decrementing,
804                  * reserve count remains after releasing inode, because this
805                  * allocated page will go into page cache and is regarded as
806                  * coming from reserved pool in releasing step.  Currently, we
807                  * don't have any other solution to deal with this situation
808                  * properly, so add work-around here.
809                  */
810                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
811                         return true;
812                 else
813                         return false;
814         }
815
816         /* Shared mappings always use reserves */
817         if (vma->vm_flags & VM_MAYSHARE) {
818                 /*
819                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
820                  * be a region map for all pages.  The only situation where
821                  * there is no region map is if a hole was punched via
822                  * fallocate.  In this case, there really are no reverves to
823                  * use.  This situation is indicated if chg != 0.
824                  */
825                 if (chg)
826                         return false;
827                 else
828                         return true;
829         }
830
831         /*
832          * Only the process that called mmap() has reserves for
833          * private mappings.
834          */
835         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
836                 /*
837                  * Like the shared case above, a hole punch or truncate
838                  * could have been performed on the private mapping.
839                  * Examine the value of chg to determine if reserves
840                  * actually exist or were previously consumed.
841                  * Very Subtle - The value of chg comes from a previous
842                  * call to vma_needs_reserves().  The reserve map for
843                  * private mappings has different (opposite) semantics
844                  * than that of shared mappings.  vma_needs_reserves()
845                  * has already taken this difference in semantics into
846                  * account.  Therefore, the meaning of chg is the same
847                  * as in the shared case above.  Code could easily be
848                  * combined, but keeping it separate draws attention to
849                  * subtle differences.
850                  */
851                 if (chg)
852                         return false;
853                 else
854                         return true;
855         }
856
857         return false;
858 }
859
860 static void enqueue_huge_page(struct hstate *h, struct page *page)
861 {
862         int nid = page_to_nid(page);
863         list_move(&page->lru, &h->hugepage_freelists[nid]);
864         h->free_huge_pages++;
865         h->free_huge_pages_node[nid]++;
866 }
867
868 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
869 {
870         struct page *page;
871
872         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
873                 if (!PageHWPoison(page))
874                         break;
875         /*
876          * if 'non-isolated free hugepage' not found on the list,
877          * the allocation fails.
878          */
879         if (&h->hugepage_freelists[nid] == &page->lru)
880                 return NULL;
881         list_move(&page->lru, &h->hugepage_activelist);
882         set_page_refcounted(page);
883         h->free_huge_pages--;
884         h->free_huge_pages_node[nid]--;
885         return page;
886 }
887
888 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
889                 nodemask_t *nmask)
890 {
891         unsigned int cpuset_mems_cookie;
892         struct zonelist *zonelist;
893         struct zone *zone;
894         struct zoneref *z;
895         int node = -1;
896
897         zonelist = node_zonelist(nid, gfp_mask);
898
899 retry_cpuset:
900         cpuset_mems_cookie = read_mems_allowed_begin();
901         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
902                 struct page *page;
903
904                 if (!cpuset_zone_allowed(zone, gfp_mask))
905                         continue;
906                 /*
907                  * no need to ask again on the same node. Pool is node rather than
908                  * zone aware
909                  */
910                 if (zone_to_nid(zone) == node)
911                         continue;
912                 node = zone_to_nid(zone);
913
914                 page = dequeue_huge_page_node_exact(h, node);
915                 if (page)
916                         return page;
917         }
918         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
919                 goto retry_cpuset;
920
921         return NULL;
922 }
923
924 /* Movability of hugepages depends on migration support. */
925 static inline gfp_t htlb_alloc_mask(struct hstate *h)
926 {
927         if (hugepage_migration_supported(h))
928                 return GFP_HIGHUSER_MOVABLE;
929         else
930                 return GFP_HIGHUSER;
931 }
932
933 static struct page *dequeue_huge_page_vma(struct hstate *h,
934                                 struct vm_area_struct *vma,
935                                 unsigned long address, int avoid_reserve,
936                                 long chg)
937 {
938         struct page *page;
939         struct mempolicy *mpol;
940         gfp_t gfp_mask;
941         nodemask_t *nodemask;
942         int nid;
943
944         /*
945          * A child process with MAP_PRIVATE mappings created by their parent
946          * have no page reserves. This check ensures that reservations are
947          * not "stolen". The child may still get SIGKILLed
948          */
949         if (!vma_has_reserves(vma, chg) &&
950                         h->free_huge_pages - h->resv_huge_pages == 0)
951                 goto err;
952
953         /* If reserves cannot be used, ensure enough pages are in the pool */
954         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
955                 goto err;
956
957         gfp_mask = htlb_alloc_mask(h);
958         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
959         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
960         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
961                 SetPagePrivate(page);
962                 h->resv_huge_pages--;
963         }
964
965         mpol_cond_put(mpol);
966         return page;
967
968 err:
969         return NULL;
970 }
971
972 /*
973  * common helper functions for hstate_next_node_to_{alloc|free}.
974  * We may have allocated or freed a huge page based on a different
975  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
976  * be outside of *nodes_allowed.  Ensure that we use an allowed
977  * node for alloc or free.
978  */
979 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
980 {
981         nid = next_node_in(nid, *nodes_allowed);
982         VM_BUG_ON(nid >= MAX_NUMNODES);
983
984         return nid;
985 }
986
987 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
988 {
989         if (!node_isset(nid, *nodes_allowed))
990                 nid = next_node_allowed(nid, nodes_allowed);
991         return nid;
992 }
993
994 /*
995  * returns the previously saved node ["this node"] from which to
996  * allocate a persistent huge page for the pool and advance the
997  * next node from which to allocate, handling wrap at end of node
998  * mask.
999  */
1000 static int hstate_next_node_to_alloc(struct hstate *h,
1001                                         nodemask_t *nodes_allowed)
1002 {
1003         int nid;
1004
1005         VM_BUG_ON(!nodes_allowed);
1006
1007         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1008         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1009
1010         return nid;
1011 }
1012
1013 /*
1014  * helper for free_pool_huge_page() - return the previously saved
1015  * node ["this node"] from which to free a huge page.  Advance the
1016  * next node id whether or not we find a free huge page to free so
1017  * that the next attempt to free addresses the next node.
1018  */
1019 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1020 {
1021         int nid;
1022
1023         VM_BUG_ON(!nodes_allowed);
1024
1025         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1026         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1027
1028         return nid;
1029 }
1030
1031 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1032         for (nr_nodes = nodes_weight(*mask);                            \
1033                 nr_nodes > 0 &&                                         \
1034                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1035                 nr_nodes--)
1036
1037 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1038         for (nr_nodes = nodes_weight(*mask);                            \
1039                 nr_nodes > 0 &&                                         \
1040                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1041                 nr_nodes--)
1042
1043 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1044 static void destroy_compound_gigantic_page(struct page *page,
1045                                         unsigned int order)
1046 {
1047         int i;
1048         int nr_pages = 1 << order;
1049         struct page *p = page + 1;
1050
1051         atomic_set(compound_mapcount_ptr(page), 0);
1052         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1053                 clear_compound_head(p);
1054                 set_page_refcounted(p);
1055         }
1056
1057         set_compound_order(page, 0);
1058         __ClearPageHead(page);
1059 }
1060
1061 static void free_gigantic_page(struct page *page, unsigned int order)
1062 {
1063         free_contig_range(page_to_pfn(page), 1 << order);
1064 }
1065
1066 static int __alloc_gigantic_page(unsigned long start_pfn,
1067                                 unsigned long nr_pages, gfp_t gfp_mask)
1068 {
1069         unsigned long end_pfn = start_pfn + nr_pages;
1070         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1071                                   gfp_mask);
1072 }
1073
1074 static bool pfn_range_valid_gigantic(struct zone *z,
1075                         unsigned long start_pfn, unsigned long nr_pages)
1076 {
1077         unsigned long i, end_pfn = start_pfn + nr_pages;
1078         struct page *page;
1079
1080         for (i = start_pfn; i < end_pfn; i++) {
1081                 if (!pfn_valid(i))
1082                         return false;
1083
1084                 page = pfn_to_page(i);
1085
1086                 if (page_zone(page) != z)
1087                         return false;
1088
1089                 if (PageReserved(page))
1090                         return false;
1091
1092                 if (page_count(page) > 0)
1093                         return false;
1094
1095                 if (PageHuge(page))
1096                         return false;
1097         }
1098
1099         return true;
1100 }
1101
1102 static bool zone_spans_last_pfn(const struct zone *zone,
1103                         unsigned long start_pfn, unsigned long nr_pages)
1104 {
1105         unsigned long last_pfn = start_pfn + nr_pages - 1;
1106         return zone_spans_pfn(zone, last_pfn);
1107 }
1108
1109 static struct page *alloc_gigantic_page(int nid, struct hstate *h)
1110 {
1111         unsigned int order = huge_page_order(h);
1112         unsigned long nr_pages = 1 << order;
1113         unsigned long ret, pfn, flags;
1114         struct zonelist *zonelist;
1115         struct zone *zone;
1116         struct zoneref *z;
1117         gfp_t gfp_mask;
1118
1119         gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1120         zonelist = node_zonelist(nid, gfp_mask);
1121         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
1122                 spin_lock_irqsave(&zone->lock, flags);
1123
1124                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1125                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1126                         if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1127                                 /*
1128                                  * We release the zone lock here because
1129                                  * alloc_contig_range() will also lock the zone
1130                                  * at some point. If there's an allocation
1131                                  * spinning on this lock, it may win the race
1132                                  * and cause alloc_contig_range() to fail...
1133                                  */
1134                                 spin_unlock_irqrestore(&zone->lock, flags);
1135                                 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1136                                 if (!ret)
1137                                         return pfn_to_page(pfn);
1138                                 spin_lock_irqsave(&zone->lock, flags);
1139                         }
1140                         pfn += nr_pages;
1141                 }
1142
1143                 spin_unlock_irqrestore(&zone->lock, flags);
1144         }
1145
1146         return NULL;
1147 }
1148
1149 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1150 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1151
1152 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1153 {
1154         struct page *page;
1155
1156         page = alloc_gigantic_page(nid, h);
1157         if (page) {
1158                 prep_compound_gigantic_page(page, huge_page_order(h));
1159                 prep_new_huge_page(h, page, nid);
1160         }
1161
1162         return page;
1163 }
1164
1165 static int alloc_fresh_gigantic_page(struct hstate *h,
1166                                 nodemask_t *nodes_allowed)
1167 {
1168         struct page *page = NULL;
1169         int nr_nodes, node;
1170
1171         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1172                 page = alloc_fresh_gigantic_page_node(h, node);
1173                 if (page)
1174                         return 1;
1175         }
1176
1177         return 0;
1178 }
1179
1180 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1181 static inline bool gigantic_page_supported(void) { return false; }
1182 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1183 static inline void destroy_compound_gigantic_page(struct page *page,
1184                                                 unsigned int order) { }
1185 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1186                                         nodemask_t *nodes_allowed) { return 0; }
1187 #endif
1188
1189 static void update_and_free_page(struct hstate *h, struct page *page)
1190 {
1191         int i;
1192
1193         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1194                 return;
1195
1196         h->nr_huge_pages--;
1197         h->nr_huge_pages_node[page_to_nid(page)]--;
1198         for (i = 0; i < pages_per_huge_page(h); i++) {
1199                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1200                                 1 << PG_referenced | 1 << PG_dirty |
1201                                 1 << PG_active | 1 << PG_private |
1202                                 1 << PG_writeback);
1203         }
1204         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1205         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1206         set_page_refcounted(page);
1207         if (hstate_is_gigantic(h)) {
1208                 destroy_compound_gigantic_page(page, huge_page_order(h));
1209                 free_gigantic_page(page, huge_page_order(h));
1210         } else {
1211                 __free_pages(page, huge_page_order(h));
1212         }
1213 }
1214
1215 struct hstate *size_to_hstate(unsigned long size)
1216 {
1217         struct hstate *h;
1218
1219         for_each_hstate(h) {
1220                 if (huge_page_size(h) == size)
1221                         return h;
1222         }
1223         return NULL;
1224 }
1225
1226 /*
1227  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1228  * to hstate->hugepage_activelist.)
1229  *
1230  * This function can be called for tail pages, but never returns true for them.
1231  */
1232 bool page_huge_active(struct page *page)
1233 {
1234         VM_BUG_ON_PAGE(!PageHuge(page), page);
1235         return PageHead(page) && PagePrivate(&page[1]);
1236 }
1237
1238 /* never called for tail page */
1239 static void set_page_huge_active(struct page *page)
1240 {
1241         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1242         SetPagePrivate(&page[1]);
1243 }
1244
1245 static void clear_page_huge_active(struct page *page)
1246 {
1247         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1248         ClearPagePrivate(&page[1]);
1249 }
1250
1251 void free_huge_page(struct page *page)
1252 {
1253         /*
1254          * Can't pass hstate in here because it is called from the
1255          * compound page destructor.
1256          */
1257         struct hstate *h = page_hstate(page);
1258         int nid = page_to_nid(page);
1259         struct hugepage_subpool *spool =
1260                 (struct hugepage_subpool *)page_private(page);
1261         bool restore_reserve;
1262
1263         set_page_private(page, 0);
1264         page->mapping = NULL;
1265         VM_BUG_ON_PAGE(page_count(page), page);
1266         VM_BUG_ON_PAGE(page_mapcount(page), page);
1267         restore_reserve = PagePrivate(page);
1268         ClearPagePrivate(page);
1269
1270         /*
1271          * A return code of zero implies that the subpool will be under its
1272          * minimum size if the reservation is not restored after page is free.
1273          * Therefore, force restore_reserve operation.
1274          */
1275         if (hugepage_subpool_put_pages(spool, 1) == 0)
1276                 restore_reserve = true;
1277
1278         spin_lock(&hugetlb_lock);
1279         clear_page_huge_active(page);
1280         hugetlb_cgroup_uncharge_page(hstate_index(h),
1281                                      pages_per_huge_page(h), page);
1282         if (restore_reserve)
1283                 h->resv_huge_pages++;
1284
1285         if (h->surplus_huge_pages_node[nid]) {
1286                 /* remove the page from active list */
1287                 list_del(&page->lru);
1288                 update_and_free_page(h, page);
1289                 h->surplus_huge_pages--;
1290                 h->surplus_huge_pages_node[nid]--;
1291         } else {
1292                 arch_clear_hugepage_flags(page);
1293                 enqueue_huge_page(h, page);
1294         }
1295         spin_unlock(&hugetlb_lock);
1296 }
1297
1298 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1299 {
1300         INIT_LIST_HEAD(&page->lru);
1301         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1302         spin_lock(&hugetlb_lock);
1303         set_hugetlb_cgroup(page, NULL);
1304         h->nr_huge_pages++;
1305         h->nr_huge_pages_node[nid]++;
1306         spin_unlock(&hugetlb_lock);
1307         put_page(page); /* free it into the hugepage allocator */
1308 }
1309
1310 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1311 {
1312         int i;
1313         int nr_pages = 1 << order;
1314         struct page *p = page + 1;
1315
1316         /* we rely on prep_new_huge_page to set the destructor */
1317         set_compound_order(page, order);
1318         __ClearPageReserved(page);
1319         __SetPageHead(page);
1320         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1321                 /*
1322                  * For gigantic hugepages allocated through bootmem at
1323                  * boot, it's safer to be consistent with the not-gigantic
1324                  * hugepages and clear the PG_reserved bit from all tail pages
1325                  * too.  Otherwse drivers using get_user_pages() to access tail
1326                  * pages may get the reference counting wrong if they see
1327                  * PG_reserved set on a tail page (despite the head page not
1328                  * having PG_reserved set).  Enforcing this consistency between
1329                  * head and tail pages allows drivers to optimize away a check
1330                  * on the head page when they need know if put_page() is needed
1331                  * after get_user_pages().
1332                  */
1333                 __ClearPageReserved(p);
1334                 set_page_count(p, 0);
1335                 set_compound_head(p, page);
1336         }
1337         atomic_set(compound_mapcount_ptr(page), -1);
1338 }
1339
1340 /*
1341  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1342  * transparent huge pages.  See the PageTransHuge() documentation for more
1343  * details.
1344  */
1345 int PageHuge(struct page *page)
1346 {
1347         if (!PageCompound(page))
1348                 return 0;
1349
1350         page = compound_head(page);
1351         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1352 }
1353 EXPORT_SYMBOL_GPL(PageHuge);
1354
1355 /*
1356  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1357  * normal or transparent huge pages.
1358  */
1359 int PageHeadHuge(struct page *page_head)
1360 {
1361         if (!PageHead(page_head))
1362                 return 0;
1363
1364         return get_compound_page_dtor(page_head) == free_huge_page;
1365 }
1366
1367 pgoff_t __basepage_index(struct page *page)
1368 {
1369         struct page *page_head = compound_head(page);
1370         pgoff_t index = page_index(page_head);
1371         unsigned long compound_idx;
1372
1373         if (!PageHuge(page_head))
1374                 return page_index(page);
1375
1376         if (compound_order(page_head) >= MAX_ORDER)
1377                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1378         else
1379                 compound_idx = page - page_head;
1380
1381         return (index << compound_order(page_head)) + compound_idx;
1382 }
1383
1384 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1385 {
1386         struct page *page;
1387
1388         page = __alloc_pages_node(nid,
1389                 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1390                                                 __GFP_RETRY_MAYFAIL|__GFP_NOWARN,
1391                 huge_page_order(h));
1392         if (page) {
1393                 prep_new_huge_page(h, page, nid);
1394         }
1395
1396         return page;
1397 }
1398
1399 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1400 {
1401         struct page *page;
1402         int nr_nodes, node;
1403         int ret = 0;
1404
1405         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1406                 page = alloc_fresh_huge_page_node(h, node);
1407                 if (page) {
1408                         ret = 1;
1409                         break;
1410                 }
1411         }
1412
1413         if (ret)
1414                 count_vm_event(HTLB_BUDDY_PGALLOC);
1415         else
1416                 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1417
1418         return ret;
1419 }
1420
1421 /*
1422  * Free huge page from pool from next node to free.
1423  * Attempt to keep persistent huge pages more or less
1424  * balanced over allowed nodes.
1425  * Called with hugetlb_lock locked.
1426  */
1427 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1428                                                          bool acct_surplus)
1429 {
1430         int nr_nodes, node;
1431         int ret = 0;
1432
1433         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1434                 /*
1435                  * If we're returning unused surplus pages, only examine
1436                  * nodes with surplus pages.
1437                  */
1438                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1439                     !list_empty(&h->hugepage_freelists[node])) {
1440                         struct page *page =
1441                                 list_entry(h->hugepage_freelists[node].next,
1442                                           struct page, lru);
1443                         list_del(&page->lru);
1444                         h->free_huge_pages--;
1445                         h->free_huge_pages_node[node]--;
1446                         if (acct_surplus) {
1447                                 h->surplus_huge_pages--;
1448                                 h->surplus_huge_pages_node[node]--;
1449                         }
1450                         update_and_free_page(h, page);
1451                         ret = 1;
1452                         break;
1453                 }
1454         }
1455
1456         return ret;
1457 }
1458
1459 /*
1460  * Dissolve a given free hugepage into free buddy pages. This function does
1461  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1462  * number of free hugepages would be reduced below the number of reserved
1463  * hugepages.
1464  */
1465 int dissolve_free_huge_page(struct page *page)
1466 {
1467         int rc = 0;
1468
1469         spin_lock(&hugetlb_lock);
1470         if (PageHuge(page) && !page_count(page)) {
1471                 struct page *head = compound_head(page);
1472                 struct hstate *h = page_hstate(head);
1473                 int nid = page_to_nid(head);
1474                 if (h->free_huge_pages - h->resv_huge_pages == 0) {
1475                         rc = -EBUSY;
1476                         goto out;
1477                 }
1478                 /*
1479                  * Move PageHWPoison flag from head page to the raw error page,
1480                  * which makes any subpages rather than the error page reusable.
1481                  */
1482                 if (PageHWPoison(head) && page != head) {
1483                         SetPageHWPoison(page);
1484                         ClearPageHWPoison(head);
1485                 }
1486                 list_del(&head->lru);
1487                 h->free_huge_pages--;
1488                 h->free_huge_pages_node[nid]--;
1489                 h->max_huge_pages--;
1490                 update_and_free_page(h, head);
1491         }
1492 out:
1493         spin_unlock(&hugetlb_lock);
1494         return rc;
1495 }
1496
1497 /*
1498  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1499  * make specified memory blocks removable from the system.
1500  * Note that this will dissolve a free gigantic hugepage completely, if any
1501  * part of it lies within the given range.
1502  * Also note that if dissolve_free_huge_page() returns with an error, all
1503  * free hugepages that were dissolved before that error are lost.
1504  */
1505 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1506 {
1507         unsigned long pfn;
1508         struct page *page;
1509         int rc = 0;
1510
1511         if (!hugepages_supported())
1512                 return rc;
1513
1514         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1515                 page = pfn_to_page(pfn);
1516                 if (PageHuge(page) && !page_count(page)) {
1517                         rc = dissolve_free_huge_page(page);
1518                         if (rc)
1519                                 break;
1520                 }
1521         }
1522
1523         return rc;
1524 }
1525
1526 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1527                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1528 {
1529         int order = huge_page_order(h);
1530
1531         gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1532         if (nid == NUMA_NO_NODE)
1533                 nid = numa_mem_id();
1534         return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1535 }
1536
1537 static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
1538                 int nid, nodemask_t *nmask)
1539 {
1540         struct page *page;
1541         unsigned int r_nid;
1542
1543         if (hstate_is_gigantic(h))
1544                 return NULL;
1545
1546         /*
1547          * Assume we will successfully allocate the surplus page to
1548          * prevent racing processes from causing the surplus to exceed
1549          * overcommit
1550          *
1551          * This however introduces a different race, where a process B
1552          * tries to grow the static hugepage pool while alloc_pages() is
1553          * called by process A. B will only examine the per-node
1554          * counters in determining if surplus huge pages can be
1555          * converted to normal huge pages in adjust_pool_surplus(). A
1556          * won't be able to increment the per-node counter, until the
1557          * lock is dropped by B, but B doesn't drop hugetlb_lock until
1558          * no more huge pages can be converted from surplus to normal
1559          * state (and doesn't try to convert again). Thus, we have a
1560          * case where a surplus huge page exists, the pool is grown, and
1561          * the surplus huge page still exists after, even though it
1562          * should just have been converted to a normal huge page. This
1563          * does not leak memory, though, as the hugepage will be freed
1564          * once it is out of use. It also does not allow the counters to
1565          * go out of whack in adjust_pool_surplus() as we don't modify
1566          * the node values until we've gotten the hugepage and only the
1567          * per-node value is checked there.
1568          */
1569         spin_lock(&hugetlb_lock);
1570         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1571                 spin_unlock(&hugetlb_lock);
1572                 return NULL;
1573         } else {
1574                 h->nr_huge_pages++;
1575                 h->surplus_huge_pages++;
1576         }
1577         spin_unlock(&hugetlb_lock);
1578
1579         page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
1580
1581         spin_lock(&hugetlb_lock);
1582         if (page) {
1583                 INIT_LIST_HEAD(&page->lru);
1584                 r_nid = page_to_nid(page);
1585                 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1586                 set_hugetlb_cgroup(page, NULL);
1587                 /*
1588                  * We incremented the global counters already
1589                  */
1590                 h->nr_huge_pages_node[r_nid]++;
1591                 h->surplus_huge_pages_node[r_nid]++;
1592                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1593         } else {
1594                 h->nr_huge_pages--;
1595                 h->surplus_huge_pages--;
1596                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1597         }
1598         spin_unlock(&hugetlb_lock);
1599
1600         return page;
1601 }
1602
1603 /*
1604  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1605  */
1606 static
1607 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1608                 struct vm_area_struct *vma, unsigned long addr)
1609 {
1610         struct page *page;
1611         struct mempolicy *mpol;
1612         gfp_t gfp_mask = htlb_alloc_mask(h);
1613         int nid;
1614         nodemask_t *nodemask;
1615
1616         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1617         page = __alloc_buddy_huge_page(h, gfp_mask, nid, nodemask);
1618         mpol_cond_put(mpol);
1619
1620         return page;
1621 }
1622
1623 /*
1624  * This allocation function is useful in the context where vma is irrelevant.
1625  * E.g. soft-offlining uses this function because it only cares physical
1626  * address of error page.
1627  */
1628 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1629 {
1630         gfp_t gfp_mask = htlb_alloc_mask(h);
1631         struct page *page = NULL;
1632
1633         if (nid != NUMA_NO_NODE)
1634                 gfp_mask |= __GFP_THISNODE;
1635
1636         spin_lock(&hugetlb_lock);
1637         if (h->free_huge_pages - h->resv_huge_pages > 0)
1638                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1639         spin_unlock(&hugetlb_lock);
1640
1641         if (!page)
1642                 page = __alloc_buddy_huge_page(h, gfp_mask, nid, NULL);
1643
1644         return page;
1645 }
1646
1647
1648 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1649                 nodemask_t *nmask)
1650 {
1651         gfp_t gfp_mask = htlb_alloc_mask(h);
1652
1653         spin_lock(&hugetlb_lock);
1654         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1655                 struct page *page;
1656
1657                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1658                 if (page) {
1659                         spin_unlock(&hugetlb_lock);
1660                         return page;
1661                 }
1662         }
1663         spin_unlock(&hugetlb_lock);
1664
1665         /* No reservations, try to overcommit */
1666
1667         return __alloc_buddy_huge_page(h, gfp_mask, preferred_nid, nmask);
1668 }
1669
1670 /*
1671  * Increase the hugetlb pool such that it can accommodate a reservation
1672  * of size 'delta'.
1673  */
1674 static int gather_surplus_pages(struct hstate *h, int delta)
1675 {
1676         struct list_head surplus_list;
1677         struct page *page, *tmp;
1678         int ret, i;
1679         int needed, allocated;
1680         bool alloc_ok = true;
1681
1682         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1683         if (needed <= 0) {
1684                 h->resv_huge_pages += delta;
1685                 return 0;
1686         }
1687
1688         allocated = 0;
1689         INIT_LIST_HEAD(&surplus_list);
1690
1691         ret = -ENOMEM;
1692 retry:
1693         spin_unlock(&hugetlb_lock);
1694         for (i = 0; i < needed; i++) {
1695                 page = __alloc_buddy_huge_page(h, htlb_alloc_mask(h),
1696                                 NUMA_NO_NODE, NULL);
1697                 if (!page) {
1698                         alloc_ok = false;
1699                         break;
1700                 }
1701                 list_add(&page->lru, &surplus_list);
1702                 cond_resched();
1703         }
1704         allocated += i;
1705
1706         /*
1707          * After retaking hugetlb_lock, we need to recalculate 'needed'
1708          * because either resv_huge_pages or free_huge_pages may have changed.
1709          */
1710         spin_lock(&hugetlb_lock);
1711         needed = (h->resv_huge_pages + delta) -
1712                         (h->free_huge_pages + allocated);
1713         if (needed > 0) {
1714                 if (alloc_ok)
1715                         goto retry;
1716                 /*
1717                  * We were not able to allocate enough pages to
1718                  * satisfy the entire reservation so we free what
1719                  * we've allocated so far.
1720                  */
1721                 goto free;
1722         }
1723         /*
1724          * The surplus_list now contains _at_least_ the number of extra pages
1725          * needed to accommodate the reservation.  Add the appropriate number
1726          * of pages to the hugetlb pool and free the extras back to the buddy
1727          * allocator.  Commit the entire reservation here to prevent another
1728          * process from stealing the pages as they are added to the pool but
1729          * before they are reserved.
1730          */
1731         needed += allocated;
1732         h->resv_huge_pages += delta;
1733         ret = 0;
1734
1735         /* Free the needed pages to the hugetlb pool */
1736         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1737                 if ((--needed) < 0)
1738                         break;
1739                 /*
1740                  * This page is now managed by the hugetlb allocator and has
1741                  * no users -- drop the buddy allocator's reference.
1742                  */
1743                 put_page_testzero(page);
1744                 VM_BUG_ON_PAGE(page_count(page), page);
1745                 enqueue_huge_page(h, page);
1746         }
1747 free:
1748         spin_unlock(&hugetlb_lock);
1749
1750         /* Free unnecessary surplus pages to the buddy allocator */
1751         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1752                 put_page(page);
1753         spin_lock(&hugetlb_lock);
1754
1755         return ret;
1756 }
1757
1758 /*
1759  * This routine has two main purposes:
1760  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1761  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1762  *    to the associated reservation map.
1763  * 2) Free any unused surplus pages that may have been allocated to satisfy
1764  *    the reservation.  As many as unused_resv_pages may be freed.
1765  *
1766  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1767  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1768  * we must make sure nobody else can claim pages we are in the process of
1769  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1770  * number of huge pages we plan to free when dropping the lock.
1771  */
1772 static void return_unused_surplus_pages(struct hstate *h,
1773                                         unsigned long unused_resv_pages)
1774 {
1775         unsigned long nr_pages;
1776
1777         /* Cannot return gigantic pages currently */
1778         if (hstate_is_gigantic(h))
1779                 goto out;
1780
1781         /*
1782          * Part (or even all) of the reservation could have been backed
1783          * by pre-allocated pages. Only free surplus pages.
1784          */
1785         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1786
1787         /*
1788          * We want to release as many surplus pages as possible, spread
1789          * evenly across all nodes with memory. Iterate across these nodes
1790          * until we can no longer free unreserved surplus pages. This occurs
1791          * when the nodes with surplus pages have no free pages.
1792          * free_pool_huge_page() will balance the the freed pages across the
1793          * on-line nodes with memory and will handle the hstate accounting.
1794          *
1795          * Note that we decrement resv_huge_pages as we free the pages.  If
1796          * we drop the lock, resv_huge_pages will still be sufficiently large
1797          * to cover subsequent pages we may free.
1798          */
1799         while (nr_pages--) {
1800                 h->resv_huge_pages--;
1801                 unused_resv_pages--;
1802                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1803                         goto out;
1804                 cond_resched_lock(&hugetlb_lock);
1805         }
1806
1807 out:
1808         /* Fully uncommit the reservation */
1809         h->resv_huge_pages -= unused_resv_pages;
1810 }
1811
1812
1813 /*
1814  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1815  * are used by the huge page allocation routines to manage reservations.
1816  *
1817  * vma_needs_reservation is called to determine if the huge page at addr
1818  * within the vma has an associated reservation.  If a reservation is
1819  * needed, the value 1 is returned.  The caller is then responsible for
1820  * managing the global reservation and subpool usage counts.  After
1821  * the huge page has been allocated, vma_commit_reservation is called
1822  * to add the page to the reservation map.  If the page allocation fails,
1823  * the reservation must be ended instead of committed.  vma_end_reservation
1824  * is called in such cases.
1825  *
1826  * In the normal case, vma_commit_reservation returns the same value
1827  * as the preceding vma_needs_reservation call.  The only time this
1828  * is not the case is if a reserve map was changed between calls.  It
1829  * is the responsibility of the caller to notice the difference and
1830  * take appropriate action.
1831  *
1832  * vma_add_reservation is used in error paths where a reservation must
1833  * be restored when a newly allocated huge page must be freed.  It is
1834  * to be called after calling vma_needs_reservation to determine if a
1835  * reservation exists.
1836  */
1837 enum vma_resv_mode {
1838         VMA_NEEDS_RESV,
1839         VMA_COMMIT_RESV,
1840         VMA_END_RESV,
1841         VMA_ADD_RESV,
1842 };
1843 static long __vma_reservation_common(struct hstate *h,
1844                                 struct vm_area_struct *vma, unsigned long addr,
1845                                 enum vma_resv_mode mode)
1846 {
1847         struct resv_map *resv;
1848         pgoff_t idx;
1849         long ret;
1850
1851         resv = vma_resv_map(vma);
1852         if (!resv)
1853                 return 1;
1854
1855         idx = vma_hugecache_offset(h, vma, addr);
1856         switch (mode) {
1857         case VMA_NEEDS_RESV:
1858                 ret = region_chg(resv, idx, idx + 1);
1859                 break;
1860         case VMA_COMMIT_RESV:
1861                 ret = region_add(resv, idx, idx + 1);
1862                 break;
1863         case VMA_END_RESV:
1864                 region_abort(resv, idx, idx + 1);
1865                 ret = 0;
1866                 break;
1867         case VMA_ADD_RESV:
1868                 if (vma->vm_flags & VM_MAYSHARE)
1869                         ret = region_add(resv, idx, idx + 1);
1870                 else {
1871                         region_abort(resv, idx, idx + 1);
1872                         ret = region_del(resv, idx, idx + 1);
1873                 }
1874                 break;
1875         default:
1876                 BUG();
1877         }
1878
1879         if (vma->vm_flags & VM_MAYSHARE)
1880                 return ret;
1881         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1882                 /*
1883                  * In most cases, reserves always exist for private mappings.
1884                  * However, a file associated with mapping could have been
1885                  * hole punched or truncated after reserves were consumed.
1886                  * As subsequent fault on such a range will not use reserves.
1887                  * Subtle - The reserve map for private mappings has the
1888                  * opposite meaning than that of shared mappings.  If NO
1889                  * entry is in the reserve map, it means a reservation exists.
1890                  * If an entry exists in the reserve map, it means the
1891                  * reservation has already been consumed.  As a result, the
1892                  * return value of this routine is the opposite of the
1893                  * value returned from reserve map manipulation routines above.
1894                  */
1895                 if (ret)
1896                         return 0;
1897                 else
1898                         return 1;
1899         }
1900         else
1901                 return ret < 0 ? ret : 0;
1902 }
1903
1904 static long vma_needs_reservation(struct hstate *h,
1905                         struct vm_area_struct *vma, unsigned long addr)
1906 {
1907         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1908 }
1909
1910 static long vma_commit_reservation(struct hstate *h,
1911                         struct vm_area_struct *vma, unsigned long addr)
1912 {
1913         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1914 }
1915
1916 static void vma_end_reservation(struct hstate *h,
1917                         struct vm_area_struct *vma, unsigned long addr)
1918 {
1919         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1920 }
1921
1922 static long vma_add_reservation(struct hstate *h,
1923                         struct vm_area_struct *vma, unsigned long addr)
1924 {
1925         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1926 }
1927
1928 /*
1929  * This routine is called to restore a reservation on error paths.  In the
1930  * specific error paths, a huge page was allocated (via alloc_huge_page)
1931  * and is about to be freed.  If a reservation for the page existed,
1932  * alloc_huge_page would have consumed the reservation and set PagePrivate
1933  * in the newly allocated page.  When the page is freed via free_huge_page,
1934  * the global reservation count will be incremented if PagePrivate is set.
1935  * However, free_huge_page can not adjust the reserve map.  Adjust the
1936  * reserve map here to be consistent with global reserve count adjustments
1937  * to be made by free_huge_page.
1938  */
1939 static void restore_reserve_on_error(struct hstate *h,
1940                         struct vm_area_struct *vma, unsigned long address,
1941                         struct page *page)
1942 {
1943         if (unlikely(PagePrivate(page))) {
1944                 long rc = vma_needs_reservation(h, vma, address);
1945
1946                 if (unlikely(rc < 0)) {
1947                         /*
1948                          * Rare out of memory condition in reserve map
1949                          * manipulation.  Clear PagePrivate so that
1950                          * global reserve count will not be incremented
1951                          * by free_huge_page.  This will make it appear
1952                          * as though the reservation for this page was
1953                          * consumed.  This may prevent the task from
1954                          * faulting in the page at a later time.  This
1955                          * is better than inconsistent global huge page
1956                          * accounting of reserve counts.
1957                          */
1958                         ClearPagePrivate(page);
1959                 } else if (rc) {
1960                         rc = vma_add_reservation(h, vma, address);
1961                         if (unlikely(rc < 0))
1962                                 /*
1963                                  * See above comment about rare out of
1964                                  * memory condition.
1965                                  */
1966                                 ClearPagePrivate(page);
1967                 } else
1968                         vma_end_reservation(h, vma, address);
1969         }
1970 }
1971
1972 struct page *alloc_huge_page(struct vm_area_struct *vma,
1973                                     unsigned long addr, int avoid_reserve)
1974 {
1975         struct hugepage_subpool *spool = subpool_vma(vma);
1976         struct hstate *h = hstate_vma(vma);
1977         struct page *page;
1978         long map_chg, map_commit;
1979         long gbl_chg;
1980         int ret, idx;
1981         struct hugetlb_cgroup *h_cg;
1982
1983         idx = hstate_index(h);
1984         /*
1985          * Examine the region/reserve map to determine if the process
1986          * has a reservation for the page to be allocated.  A return
1987          * code of zero indicates a reservation exists (no change).
1988          */
1989         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1990         if (map_chg < 0)
1991                 return ERR_PTR(-ENOMEM);
1992
1993         /*
1994          * Processes that did not create the mapping will have no
1995          * reserves as indicated by the region/reserve map. Check
1996          * that the allocation will not exceed the subpool limit.
1997          * Allocations for MAP_NORESERVE mappings also need to be
1998          * checked against any subpool limit.
1999          */
2000         if (map_chg || avoid_reserve) {
2001                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2002                 if (gbl_chg < 0) {
2003                         vma_end_reservation(h, vma, addr);
2004                         return ERR_PTR(-ENOSPC);
2005                 }
2006
2007                 /*
2008                  * Even though there was no reservation in the region/reserve
2009                  * map, there could be reservations associated with the
2010                  * subpool that can be used.  This would be indicated if the
2011                  * return value of hugepage_subpool_get_pages() is zero.
2012                  * However, if avoid_reserve is specified we still avoid even
2013                  * the subpool reservations.
2014                  */
2015                 if (avoid_reserve)
2016                         gbl_chg = 1;
2017         }
2018
2019         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2020         if (ret)
2021                 goto out_subpool_put;
2022
2023         spin_lock(&hugetlb_lock);
2024         /*
2025          * glb_chg is passed to indicate whether or not a page must be taken
2026          * from the global free pool (global change).  gbl_chg == 0 indicates
2027          * a reservation exists for the allocation.
2028          */
2029         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2030         if (!page) {
2031                 spin_unlock(&hugetlb_lock);
2032                 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
2033                 if (!page)
2034                         goto out_uncharge_cgroup;
2035                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2036                         SetPagePrivate(page);
2037                         h->resv_huge_pages--;
2038                 }
2039                 spin_lock(&hugetlb_lock);
2040                 list_move(&page->lru, &h->hugepage_activelist);
2041                 /* Fall through */
2042         }
2043         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2044         spin_unlock(&hugetlb_lock);
2045
2046         set_page_private(page, (unsigned long)spool);
2047
2048         map_commit = vma_commit_reservation(h, vma, addr);
2049         if (unlikely(map_chg > map_commit)) {
2050                 /*
2051                  * The page was added to the reservation map between
2052                  * vma_needs_reservation and vma_commit_reservation.
2053                  * This indicates a race with hugetlb_reserve_pages.
2054                  * Adjust for the subpool count incremented above AND
2055                  * in hugetlb_reserve_pages for the same page.  Also,
2056                  * the reservation count added in hugetlb_reserve_pages
2057                  * no longer applies.
2058                  */
2059                 long rsv_adjust;
2060
2061                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2062                 hugetlb_acct_memory(h, -rsv_adjust);
2063         }
2064         return page;
2065
2066 out_uncharge_cgroup:
2067         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2068 out_subpool_put:
2069         if (map_chg || avoid_reserve)
2070                 hugepage_subpool_put_pages(spool, 1);
2071         vma_end_reservation(h, vma, addr);
2072         return ERR_PTR(-ENOSPC);
2073 }
2074
2075 /*
2076  * alloc_huge_page()'s wrapper which simply returns the page if allocation
2077  * succeeds, otherwise NULL. This function is called from new_vma_page(),
2078  * where no ERR_VALUE is expected to be returned.
2079  */
2080 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
2081                                 unsigned long addr, int avoid_reserve)
2082 {
2083         struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2084         if (IS_ERR(page))
2085                 page = NULL;
2086         return page;
2087 }
2088
2089 int alloc_bootmem_huge_page(struct hstate *h)
2090         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2091 int __alloc_bootmem_huge_page(struct hstate *h)
2092 {
2093         struct huge_bootmem_page *m;
2094         int nr_nodes, node;
2095
2096         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2097                 void *addr;
2098
2099                 addr = memblock_virt_alloc_try_nid_nopanic(
2100                                 huge_page_size(h), huge_page_size(h),
2101                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2102                 if (addr) {
2103                         /*
2104                          * Use the beginning of the huge page to store the
2105                          * huge_bootmem_page struct (until gather_bootmem
2106                          * puts them into the mem_map).
2107                          */
2108                         m = addr;
2109                         goto found;
2110                 }
2111         }
2112         return 0;
2113
2114 found:
2115         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2116         /* Put them into a private list first because mem_map is not up yet */
2117         list_add(&m->list, &huge_boot_pages);
2118         m->hstate = h;
2119         return 1;
2120 }
2121
2122 static void __init prep_compound_huge_page(struct page *page,
2123                 unsigned int order)
2124 {
2125         if (unlikely(order > (MAX_ORDER - 1)))
2126                 prep_compound_gigantic_page(page, order);
2127         else
2128                 prep_compound_page(page, order);
2129 }
2130
2131 /* Put bootmem huge pages into the standard lists after mem_map is up */
2132 static void __init gather_bootmem_prealloc(void)
2133 {
2134         struct huge_bootmem_page *m;
2135
2136         list_for_each_entry(m, &huge_boot_pages, list) {
2137                 struct hstate *h = m->hstate;
2138                 struct page *page;
2139
2140 #ifdef CONFIG_HIGHMEM
2141                 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2142                 memblock_free_late(__pa(m),
2143                                    sizeof(struct huge_bootmem_page));
2144 #else
2145                 page = virt_to_page(m);
2146 #endif
2147                 WARN_ON(page_count(page) != 1);
2148                 prep_compound_huge_page(page, h->order);
2149                 WARN_ON(PageReserved(page));
2150                 prep_new_huge_page(h, page, page_to_nid(page));
2151                 /*
2152                  * If we had gigantic hugepages allocated at boot time, we need
2153                  * to restore the 'stolen' pages to totalram_pages in order to
2154                  * fix confusing memory reports from free(1) and another
2155                  * side-effects, like CommitLimit going negative.
2156                  */
2157                 if (hstate_is_gigantic(h))
2158                         adjust_managed_page_count(page, 1 << h->order);
2159         }
2160 }
2161
2162 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2163 {
2164         unsigned long i;
2165
2166         for (i = 0; i < h->max_huge_pages; ++i) {
2167                 if (hstate_is_gigantic(h)) {
2168                         if (!alloc_bootmem_huge_page(h))
2169                                 break;
2170                 } else if (!alloc_fresh_huge_page(h,
2171                                          &node_states[N_MEMORY]))
2172                         break;
2173                 cond_resched();
2174         }
2175         if (i < h->max_huge_pages) {
2176                 char buf[32];
2177
2178                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2179                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2180                         h->max_huge_pages, buf, i);
2181                 h->max_huge_pages = i;
2182         }
2183 }
2184
2185 static void __init hugetlb_init_hstates(void)
2186 {
2187         struct hstate *h;
2188
2189         for_each_hstate(h) {
2190                 if (minimum_order > huge_page_order(h))
2191                         minimum_order = huge_page_order(h);
2192
2193                 /* oversize hugepages were init'ed in early boot */
2194                 if (!hstate_is_gigantic(h))
2195                         hugetlb_hstate_alloc_pages(h);
2196         }
2197         VM_BUG_ON(minimum_order == UINT_MAX);
2198 }
2199
2200 static void __init report_hugepages(void)
2201 {
2202         struct hstate *h;
2203
2204         for_each_hstate(h) {
2205                 char buf[32];
2206
2207                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2208                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2209                         buf, h->free_huge_pages);
2210         }
2211 }
2212
2213 #ifdef CONFIG_HIGHMEM
2214 static void try_to_free_low(struct hstate *h, unsigned long count,
2215                                                 nodemask_t *nodes_allowed)
2216 {
2217         int i;
2218
2219         if (hstate_is_gigantic(h))
2220                 return;
2221
2222         for_each_node_mask(i, *nodes_allowed) {
2223                 struct page *page, *next;
2224                 struct list_head *freel = &h->hugepage_freelists[i];
2225                 list_for_each_entry_safe(page, next, freel, lru) {
2226                         if (count >= h->nr_huge_pages)
2227                                 return;
2228                         if (PageHighMem(page))
2229                                 continue;
2230                         list_del(&page->lru);
2231                         update_and_free_page(h, page);
2232                         h->free_huge_pages--;
2233                         h->free_huge_pages_node[page_to_nid(page)]--;
2234                 }
2235         }
2236 }
2237 #else
2238 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2239                                                 nodemask_t *nodes_allowed)
2240 {
2241 }
2242 #endif
2243
2244 /*
2245  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2246  * balanced by operating on them in a round-robin fashion.
2247  * Returns 1 if an adjustment was made.
2248  */
2249 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2250                                 int delta)
2251 {
2252         int nr_nodes, node;
2253
2254         VM_BUG_ON(delta != -1 && delta != 1);
2255
2256         if (delta < 0) {
2257                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2258                         if (h->surplus_huge_pages_node[node])
2259                                 goto found;
2260                 }
2261         } else {
2262                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2263                         if (h->surplus_huge_pages_node[node] <
2264                                         h->nr_huge_pages_node[node])
2265                                 goto found;
2266                 }
2267         }
2268         return 0;
2269
2270 found:
2271         h->surplus_huge_pages += delta;
2272         h->surplus_huge_pages_node[node] += delta;
2273         return 1;
2274 }
2275
2276 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2277 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2278                                                 nodemask_t *nodes_allowed)
2279 {
2280         unsigned long min_count, ret;
2281
2282         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2283                 return h->max_huge_pages;
2284
2285         /*
2286          * Increase the pool size
2287          * First take pages out of surplus state.  Then make up the
2288          * remaining difference by allocating fresh huge pages.
2289          *
2290          * We might race with __alloc_buddy_huge_page() here and be unable
2291          * to convert a surplus huge page to a normal huge page. That is
2292          * not critical, though, it just means the overall size of the
2293          * pool might be one hugepage larger than it needs to be, but
2294          * within all the constraints specified by the sysctls.
2295          */
2296         spin_lock(&hugetlb_lock);
2297         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2298                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2299                         break;
2300         }
2301
2302         while (count > persistent_huge_pages(h)) {
2303                 /*
2304                  * If this allocation races such that we no longer need the
2305                  * page, free_huge_page will handle it by freeing the page
2306                  * and reducing the surplus.
2307                  */
2308                 spin_unlock(&hugetlb_lock);
2309
2310                 /* yield cpu to avoid soft lockup */
2311                 cond_resched();
2312
2313                 if (hstate_is_gigantic(h))
2314                         ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2315                 else
2316                         ret = alloc_fresh_huge_page(h, nodes_allowed);
2317                 spin_lock(&hugetlb_lock);
2318                 if (!ret)
2319                         goto out;
2320
2321                 /* Bail for signals. Probably ctrl-c from user */
2322                 if (signal_pending(current))
2323                         goto out;
2324         }
2325
2326         /*
2327          * Decrease the pool size
2328          * First return free pages to the buddy allocator (being careful
2329          * to keep enough around to satisfy reservations).  Then place
2330          * pages into surplus state as needed so the pool will shrink
2331          * to the desired size as pages become free.
2332          *
2333          * By placing pages into the surplus state independent of the
2334          * overcommit value, we are allowing the surplus pool size to
2335          * exceed overcommit. There are few sane options here. Since
2336          * __alloc_buddy_huge_page() is checking the global counter,
2337          * though, we'll note that we're not allowed to exceed surplus
2338          * and won't grow the pool anywhere else. Not until one of the
2339          * sysctls are changed, or the surplus pages go out of use.
2340          */
2341         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2342         min_count = max(count, min_count);
2343         try_to_free_low(h, min_count, nodes_allowed);
2344         while (min_count < persistent_huge_pages(h)) {
2345                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2346                         break;
2347                 cond_resched_lock(&hugetlb_lock);
2348         }
2349         while (count < persistent_huge_pages(h)) {
2350                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2351                         break;
2352         }
2353 out:
2354         ret = persistent_huge_pages(h);
2355         spin_unlock(&hugetlb_lock);
2356         return ret;
2357 }
2358
2359 #define HSTATE_ATTR_RO(_name) \
2360         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2361
2362 #define HSTATE_ATTR(_name) \
2363         static struct kobj_attribute _name##_attr = \
2364                 __ATTR(_name, 0644, _name##_show, _name##_store)
2365
2366 static struct kobject *hugepages_kobj;
2367 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2368
2369 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2370
2371 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2372 {
2373         int i;
2374
2375         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2376                 if (hstate_kobjs[i] == kobj) {
2377                         if (nidp)
2378                                 *nidp = NUMA_NO_NODE;
2379                         return &hstates[i];
2380                 }
2381
2382         return kobj_to_node_hstate(kobj, nidp);
2383 }
2384
2385 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2386                                         struct kobj_attribute *attr, char *buf)
2387 {
2388         struct hstate *h;
2389         unsigned long nr_huge_pages;
2390         int nid;
2391
2392         h = kobj_to_hstate(kobj, &nid);
2393         if (nid == NUMA_NO_NODE)
2394                 nr_huge_pages = h->nr_huge_pages;
2395         else
2396                 nr_huge_pages = h->nr_huge_pages_node[nid];
2397
2398         return sprintf(buf, "%lu\n", nr_huge_pages);
2399 }
2400
2401 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2402                                            struct hstate *h, int nid,
2403                                            unsigned long count, size_t len)
2404 {
2405         int err;
2406         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2407
2408         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2409                 err = -EINVAL;
2410                 goto out;
2411         }
2412
2413         if (nid == NUMA_NO_NODE) {
2414                 /*
2415                  * global hstate attribute
2416                  */
2417                 if (!(obey_mempolicy &&
2418                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2419                         NODEMASK_FREE(nodes_allowed);
2420                         nodes_allowed = &node_states[N_MEMORY];
2421                 }
2422         } else if (nodes_allowed) {
2423                 /*
2424                  * per node hstate attribute: adjust count to global,
2425                  * but restrict alloc/free to the specified node.
2426                  */
2427                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2428                 init_nodemask_of_node(nodes_allowed, nid);
2429         } else
2430                 nodes_allowed = &node_states[N_MEMORY];
2431
2432         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2433
2434         if (nodes_allowed != &node_states[N_MEMORY])
2435                 NODEMASK_FREE(nodes_allowed);
2436
2437         return len;
2438 out:
2439         NODEMASK_FREE(nodes_allowed);
2440         return err;
2441 }
2442
2443 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2444                                          struct kobject *kobj, const char *buf,
2445                                          size_t len)
2446 {
2447         struct hstate *h;
2448         unsigned long count;
2449         int nid;
2450         int err;
2451
2452         err = kstrtoul(buf, 10, &count);
2453         if (err)
2454                 return err;
2455
2456         h = kobj_to_hstate(kobj, &nid);
2457         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2458 }
2459
2460 static ssize_t nr_hugepages_show(struct kobject *kobj,
2461                                        struct kobj_attribute *attr, char *buf)
2462 {
2463         return nr_hugepages_show_common(kobj, attr, buf);
2464 }
2465
2466 static ssize_t nr_hugepages_store(struct kobject *kobj,
2467                struct kobj_attribute *attr, const char *buf, size_t len)
2468 {
2469         return nr_hugepages_store_common(false, kobj, buf, len);
2470 }
2471 HSTATE_ATTR(nr_hugepages);
2472
2473 #ifdef CONFIG_NUMA
2474
2475 /*
2476  * hstate attribute for optionally mempolicy-based constraint on persistent
2477  * huge page alloc/free.
2478  */
2479 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2480                                        struct kobj_attribute *attr, char *buf)
2481 {
2482         return nr_hugepages_show_common(kobj, attr, buf);
2483 }
2484
2485 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2486                struct kobj_attribute *attr, const char *buf, size_t len)
2487 {
2488         return nr_hugepages_store_common(true, kobj, buf, len);
2489 }
2490 HSTATE_ATTR(nr_hugepages_mempolicy);
2491 #endif
2492
2493
2494 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2495                                         struct kobj_attribute *attr, char *buf)
2496 {
2497         struct hstate *h = kobj_to_hstate(kobj, NULL);
2498         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2499 }
2500
2501 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2502                 struct kobj_attribute *attr, const char *buf, size_t count)
2503 {
2504         int err;
2505         unsigned long input;
2506         struct hstate *h = kobj_to_hstate(kobj, NULL);
2507
2508         if (hstate_is_gigantic(h))
2509                 return -EINVAL;
2510
2511         err = kstrtoul(buf, 10, &input);
2512         if (err)
2513                 return err;
2514
2515         spin_lock(&hugetlb_lock);
2516         h->nr_overcommit_huge_pages = input;
2517         spin_unlock(&hugetlb_lock);
2518
2519         return count;
2520 }
2521 HSTATE_ATTR(nr_overcommit_hugepages);
2522
2523 static ssize_t free_hugepages_show(struct kobject *kobj,
2524                                         struct kobj_attribute *attr, char *buf)
2525 {
2526         struct hstate *h;
2527         unsigned long free_huge_pages;
2528         int nid;
2529
2530         h = kobj_to_hstate(kobj, &nid);
2531         if (nid == NUMA_NO_NODE)
2532                 free_huge_pages = h->free_huge_pages;
2533         else
2534                 free_huge_pages = h->free_huge_pages_node[nid];
2535
2536         return sprintf(buf, "%lu\n", free_huge_pages);
2537 }
2538 HSTATE_ATTR_RO(free_hugepages);
2539
2540 static ssize_t resv_hugepages_show(struct kobject *kobj,
2541                                         struct kobj_attribute *attr, char *buf)
2542 {
2543         struct hstate *h = kobj_to_hstate(kobj, NULL);
2544         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2545 }
2546 HSTATE_ATTR_RO(resv_hugepages);
2547
2548 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2549                                         struct kobj_attribute *attr, char *buf)
2550 {
2551         struct hstate *h;
2552         unsigned long surplus_huge_pages;
2553         int nid;
2554
2555         h = kobj_to_hstate(kobj, &nid);
2556         if (nid == NUMA_NO_NODE)
2557                 surplus_huge_pages = h->surplus_huge_pages;
2558         else
2559                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2560
2561         return sprintf(buf, "%lu\n", surplus_huge_pages);
2562 }
2563 HSTATE_ATTR_RO(surplus_hugepages);
2564
2565 static struct attribute *hstate_attrs[] = {
2566         &nr_hugepages_attr.attr,
2567         &nr_overcommit_hugepages_attr.attr,
2568         &free_hugepages_attr.attr,
2569         &resv_hugepages_attr.attr,
2570         &surplus_hugepages_attr.attr,
2571 #ifdef CONFIG_NUMA
2572         &nr_hugepages_mempolicy_attr.attr,
2573 #endif
2574         NULL,
2575 };
2576
2577 static const struct attribute_group hstate_attr_group = {
2578         .attrs = hstate_attrs,
2579 };
2580
2581 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2582                                     struct kobject **hstate_kobjs,
2583                                     const struct attribute_group *hstate_attr_group)
2584 {
2585         int retval;
2586         int hi = hstate_index(h);
2587
2588         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2589         if (!hstate_kobjs[hi])
2590                 return -ENOMEM;
2591
2592         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2593         if (retval)
2594                 kobject_put(hstate_kobjs[hi]);
2595
2596         return retval;
2597 }
2598
2599 static void __init hugetlb_sysfs_init(void)
2600 {
2601         struct hstate *h;
2602         int err;
2603
2604         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2605         if (!hugepages_kobj)
2606                 return;
2607
2608         for_each_hstate(h) {
2609                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2610                                          hstate_kobjs, &hstate_attr_group);
2611                 if (err)
2612                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2613         }
2614 }
2615
2616 #ifdef CONFIG_NUMA
2617
2618 /*
2619  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2620  * with node devices in node_devices[] using a parallel array.  The array
2621  * index of a node device or _hstate == node id.
2622  * This is here to avoid any static dependency of the node device driver, in
2623  * the base kernel, on the hugetlb module.
2624  */
2625 struct node_hstate {
2626         struct kobject          *hugepages_kobj;
2627         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2628 };
2629 static struct node_hstate node_hstates[MAX_NUMNODES];
2630
2631 /*
2632  * A subset of global hstate attributes for node devices
2633  */
2634 static struct attribute *per_node_hstate_attrs[] = {
2635         &nr_hugepages_attr.attr,
2636         &free_hugepages_attr.attr,
2637         &surplus_hugepages_attr.attr,
2638         NULL,
2639 };
2640
2641 static const struct attribute_group per_node_hstate_attr_group = {
2642         .attrs = per_node_hstate_attrs,
2643 };
2644
2645 /*
2646  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2647  * Returns node id via non-NULL nidp.
2648  */
2649 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2650 {
2651         int nid;
2652
2653         for (nid = 0; nid < nr_node_ids; nid++) {
2654                 struct node_hstate *nhs = &node_hstates[nid];
2655                 int i;
2656                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2657                         if (nhs->hstate_kobjs[i] == kobj) {
2658                                 if (nidp)
2659                                         *nidp = nid;
2660                                 return &hstates[i];
2661                         }
2662         }
2663
2664         BUG();
2665         return NULL;
2666 }
2667
2668 /*
2669  * Unregister hstate attributes from a single node device.
2670  * No-op if no hstate attributes attached.
2671  */
2672 static void hugetlb_unregister_node(struct node *node)
2673 {
2674         struct hstate *h;
2675         struct node_hstate *nhs = &node_hstates[node->dev.id];
2676
2677         if (!nhs->hugepages_kobj)
2678                 return;         /* no hstate attributes */
2679
2680         for_each_hstate(h) {
2681                 int idx = hstate_index(h);
2682                 if (nhs->hstate_kobjs[idx]) {
2683                         kobject_put(nhs->hstate_kobjs[idx]);
2684                         nhs->hstate_kobjs[idx] = NULL;
2685                 }
2686         }
2687
2688         kobject_put(nhs->hugepages_kobj);
2689         nhs->hugepages_kobj = NULL;
2690 }
2691
2692
2693 /*
2694  * Register hstate attributes for a single node device.
2695  * No-op if attributes already registered.
2696  */
2697 static void hugetlb_register_node(struct node *node)
2698 {
2699         struct hstate *h;
2700         struct node_hstate *nhs = &node_hstates[node->dev.id];
2701         int err;
2702
2703         if (nhs->hugepages_kobj)
2704                 return;         /* already allocated */
2705
2706         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2707                                                         &node->dev.kobj);
2708         if (!nhs->hugepages_kobj)
2709                 return;
2710
2711         for_each_hstate(h) {
2712                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2713                                                 nhs->hstate_kobjs,
2714                                                 &per_node_hstate_attr_group);
2715                 if (err) {
2716                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2717                                 h->name, node->dev.id);
2718                         hugetlb_unregister_node(node);
2719                         break;
2720                 }
2721         }
2722 }
2723
2724 /*
2725  * hugetlb init time:  register hstate attributes for all registered node
2726  * devices of nodes that have memory.  All on-line nodes should have
2727  * registered their associated device by this time.
2728  */
2729 static void __init hugetlb_register_all_nodes(void)
2730 {
2731         int nid;
2732
2733         for_each_node_state(nid, N_MEMORY) {
2734                 struct node *node = node_devices[nid];
2735                 if (node->dev.id == nid)
2736                         hugetlb_register_node(node);
2737         }
2738
2739         /*
2740          * Let the node device driver know we're here so it can
2741          * [un]register hstate attributes on node hotplug.
2742          */
2743         register_hugetlbfs_with_node(hugetlb_register_node,
2744                                      hugetlb_unregister_node);
2745 }
2746 #else   /* !CONFIG_NUMA */
2747
2748 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2749 {
2750         BUG();
2751         if (nidp)
2752                 *nidp = -1;
2753         return NULL;
2754 }
2755
2756 static void hugetlb_register_all_nodes(void) { }
2757
2758 #endif
2759
2760 static int __init hugetlb_init(void)
2761 {
2762         int i;
2763
2764         if (!hugepages_supported())
2765                 return 0;
2766
2767         if (!size_to_hstate(default_hstate_size)) {
2768                 if (default_hstate_size != 0) {
2769                         pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2770                                default_hstate_size, HPAGE_SIZE);
2771                 }
2772
2773                 default_hstate_size = HPAGE_SIZE;
2774                 if (!size_to_hstate(default_hstate_size))
2775                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2776         }
2777         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2778         if (default_hstate_max_huge_pages) {
2779                 if (!default_hstate.max_huge_pages)
2780                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2781         }
2782
2783         hugetlb_init_hstates();
2784         gather_bootmem_prealloc();
2785         report_hugepages();
2786
2787         hugetlb_sysfs_init();
2788         hugetlb_register_all_nodes();
2789         hugetlb_cgroup_file_init();
2790
2791 #ifdef CONFIG_SMP
2792         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2793 #else
2794         num_fault_mutexes = 1;
2795 #endif
2796         hugetlb_fault_mutex_table =
2797                 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2798         BUG_ON(!hugetlb_fault_mutex_table);
2799
2800         for (i = 0; i < num_fault_mutexes; i++)
2801                 mutex_init(&hugetlb_fault_mutex_table[i]);
2802         return 0;
2803 }
2804 subsys_initcall(hugetlb_init);
2805
2806 /* Should be called on processing a hugepagesz=... option */
2807 void __init hugetlb_bad_size(void)
2808 {
2809         parsed_valid_hugepagesz = false;
2810 }
2811
2812 void __init hugetlb_add_hstate(unsigned int order)
2813 {
2814         struct hstate *h;
2815         unsigned long i;
2816
2817         if (size_to_hstate(PAGE_SIZE << order)) {
2818                 pr_warn("hugepagesz= specified twice, ignoring\n");
2819                 return;
2820         }
2821         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2822         BUG_ON(order == 0);
2823         h = &hstates[hugetlb_max_hstate++];
2824         h->order = order;
2825         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2826         h->nr_huge_pages = 0;
2827         h->free_huge_pages = 0;
2828         for (i = 0; i < MAX_NUMNODES; ++i)
2829                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2830         INIT_LIST_HEAD(&h->hugepage_activelist);
2831         h->next_nid_to_alloc = first_memory_node;
2832         h->next_nid_to_free = first_memory_node;
2833         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2834                                         huge_page_size(h)/1024);
2835
2836         parsed_hstate = h;
2837 }
2838
2839 static int __init hugetlb_nrpages_setup(char *s)
2840 {
2841         unsigned long *mhp;
2842         static unsigned long *last_mhp;
2843
2844         if (!parsed_valid_hugepagesz) {
2845                 pr_warn("hugepages = %s preceded by "
2846                         "an unsupported hugepagesz, ignoring\n", s);
2847                 parsed_valid_hugepagesz = true;
2848                 return 1;
2849         }
2850         /*
2851          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2852          * so this hugepages= parameter goes to the "default hstate".
2853          */
2854         else if (!hugetlb_max_hstate)
2855                 mhp = &default_hstate_max_huge_pages;
2856         else
2857                 mhp = &parsed_hstate->max_huge_pages;
2858
2859         if (mhp == last_mhp) {
2860                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2861                 return 1;
2862         }
2863
2864         if (sscanf(s, "%lu", mhp) <= 0)
2865                 *mhp = 0;
2866
2867         /*
2868          * Global state is always initialized later in hugetlb_init.
2869          * But we need to allocate >= MAX_ORDER hstates here early to still
2870          * use the bootmem allocator.
2871          */
2872         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2873                 hugetlb_hstate_alloc_pages(parsed_hstate);
2874
2875         last_mhp = mhp;
2876
2877         return 1;
2878 }
2879 __setup("hugepages=", hugetlb_nrpages_setup);
2880
2881 static int __init hugetlb_default_setup(char *s)
2882 {
2883         default_hstate_size = memparse(s, &s);
2884         return 1;
2885 }
2886 __setup("default_hugepagesz=", hugetlb_default_setup);
2887
2888 static unsigned int cpuset_mems_nr(unsigned int *array)
2889 {
2890         int node;
2891         unsigned int nr = 0;
2892
2893         for_each_node_mask(node, cpuset_current_mems_allowed)
2894                 nr += array[node];
2895
2896         return nr;
2897 }
2898
2899 #ifdef CONFIG_SYSCTL
2900 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2901                          struct ctl_table *table, int write,
2902                          void __user *buffer, size_t *length, loff_t *ppos)
2903 {
2904         struct hstate *h = &default_hstate;
2905         unsigned long tmp = h->max_huge_pages;
2906         int ret;
2907
2908         if (!hugepages_supported())
2909                 return -EOPNOTSUPP;
2910
2911         table->data = &tmp;
2912         table->maxlen = sizeof(unsigned long);
2913         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2914         if (ret)
2915                 goto out;
2916
2917         if (write)
2918                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2919                                                   NUMA_NO_NODE, tmp, *length);
2920 out:
2921         return ret;
2922 }
2923
2924 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2925                           void __user *buffer, size_t *length, loff_t *ppos)
2926 {
2927
2928         return hugetlb_sysctl_handler_common(false, table, write,
2929                                                         buffer, length, ppos);
2930 }
2931
2932 #ifdef CONFIG_NUMA
2933 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2934                           void __user *buffer, size_t *length, loff_t *ppos)
2935 {
2936         return hugetlb_sysctl_handler_common(true, table, write,
2937                                                         buffer, length, ppos);
2938 }
2939 #endif /* CONFIG_NUMA */
2940
2941 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2942                         void __user *buffer,
2943                         size_t *length, loff_t *ppos)
2944 {
2945         struct hstate *h = &default_hstate;
2946         unsigned long tmp;
2947         int ret;
2948
2949         if (!hugepages_supported())
2950                 return -EOPNOTSUPP;
2951
2952         tmp = h->nr_overcommit_huge_pages;
2953
2954         if (write && hstate_is_gigantic(h))
2955                 return -EINVAL;
2956
2957         table->data = &tmp;
2958         table->maxlen = sizeof(unsigned long);
2959         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2960         if (ret)
2961                 goto out;
2962
2963         if (write) {
2964                 spin_lock(&hugetlb_lock);
2965                 h->nr_overcommit_huge_pages = tmp;
2966                 spin_unlock(&hugetlb_lock);
2967         }
2968 out:
2969         return ret;
2970 }
2971
2972 #endif /* CONFIG_SYSCTL */
2973
2974 void hugetlb_report_meminfo(struct seq_file *m)
2975 {
2976         struct hstate *h;
2977         unsigned long total = 0;
2978
2979         if (!hugepages_supported())
2980                 return;
2981
2982         for_each_hstate(h) {
2983                 unsigned long count = h->nr_huge_pages;
2984
2985                 total += (PAGE_SIZE << huge_page_order(h)) * count;
2986
2987                 if (h == &default_hstate)
2988                         seq_printf(m,
2989                                    "HugePages_Total:   %5lu\n"
2990                                    "HugePages_Free:    %5lu\n"
2991                                    "HugePages_Rsvd:    %5lu\n"
2992                                    "HugePages_Surp:    %5lu\n"
2993                                    "Hugepagesize:   %8lu kB\n",
2994                                    count,
2995                                    h->free_huge_pages,
2996                                    h->resv_huge_pages,
2997                                    h->surplus_huge_pages,
2998                                    (PAGE_SIZE << huge_page_order(h)) / 1024);
2999         }
3000
3001         seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3002 }
3003
3004 int hugetlb_report_node_meminfo(int nid, char *buf)
3005 {
3006         struct hstate *h = &default_hstate;
3007         if (!hugepages_supported())
3008                 return 0;
3009         return sprintf(buf,
3010                 "Node %d HugePages_Total: %5u\n"
3011                 "Node %d HugePages_Free:  %5u\n"
3012                 "Node %d HugePages_Surp:  %5u\n",
3013                 nid, h->nr_huge_pages_node[nid],
3014                 nid, h->free_huge_pages_node[nid],
3015                 nid, h->surplus_huge_pages_node[nid]);
3016 }
3017
3018 void hugetlb_show_meminfo(void)
3019 {
3020         struct hstate *h;
3021         int nid;
3022
3023         if (!hugepages_supported())
3024                 return;
3025
3026         for_each_node_state(nid, N_MEMORY)
3027                 for_each_hstate(h)
3028                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3029                                 nid,
3030                                 h->nr_huge_pages_node[nid],
3031                                 h->free_huge_pages_node[nid],
3032                                 h->surplus_huge_pages_node[nid],
3033                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3034 }
3035
3036 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3037 {
3038         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3039                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3040 }
3041
3042 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3043 unsigned long hugetlb_total_pages(void)
3044 {
3045         struct hstate *h;
3046         unsigned long nr_total_pages = 0;
3047
3048         for_each_hstate(h)
3049                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3050         return nr_total_pages;
3051 }
3052
3053 static int hugetlb_acct_memory(struct hstate *h, long delta)
3054 {
3055         int ret = -ENOMEM;
3056
3057         spin_lock(&hugetlb_lock);
3058         /*
3059          * When cpuset is configured, it breaks the strict hugetlb page
3060          * reservation as the accounting is done on a global variable. Such
3061          * reservation is completely rubbish in the presence of cpuset because
3062          * the reservation is not checked against page availability for the
3063          * current cpuset. Application can still potentially OOM'ed by kernel
3064          * with lack of free htlb page in cpuset that the task is in.
3065          * Attempt to enforce strict accounting with cpuset is almost
3066          * impossible (or too ugly) because cpuset is too fluid that
3067          * task or memory node can be dynamically moved between cpusets.
3068          *
3069          * The change of semantics for shared hugetlb mapping with cpuset is
3070          * undesirable. However, in order to preserve some of the semantics,
3071          * we fall back to check against current free page availability as
3072          * a best attempt and hopefully to minimize the impact of changing
3073          * semantics that cpuset has.
3074          */
3075         if (delta > 0) {
3076                 if (gather_surplus_pages(h, delta) < 0)
3077                         goto out;
3078
3079                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3080                         return_unused_surplus_pages(h, delta);
3081                         goto out;
3082                 }
3083         }
3084
3085         ret = 0;
3086         if (delta < 0)
3087                 return_unused_surplus_pages(h, (unsigned long) -delta);
3088
3089 out:
3090         spin_unlock(&hugetlb_lock);
3091         return ret;
3092 }
3093
3094 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3095 {
3096         struct resv_map *resv = vma_resv_map(vma);
3097
3098         /*
3099          * This new VMA should share its siblings reservation map if present.
3100          * The VMA will only ever have a valid reservation map pointer where
3101          * it is being copied for another still existing VMA.  As that VMA
3102          * has a reference to the reservation map it cannot disappear until
3103          * after this open call completes.  It is therefore safe to take a
3104          * new reference here without additional locking.
3105          */
3106         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3107                 kref_get(&resv->refs);
3108 }
3109
3110 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3111 {
3112         struct hstate *h = hstate_vma(vma);
3113         struct resv_map *resv = vma_resv_map(vma);
3114         struct hugepage_subpool *spool = subpool_vma(vma);
3115         unsigned long reserve, start, end;
3116         long gbl_reserve;
3117
3118         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3119                 return;
3120
3121         start = vma_hugecache_offset(h, vma, vma->vm_start);
3122         end = vma_hugecache_offset(h, vma, vma->vm_end);
3123
3124         reserve = (end - start) - region_count(resv, start, end);
3125
3126         kref_put(&resv->refs, resv_map_release);
3127
3128         if (reserve) {
3129                 /*
3130                  * Decrement reserve counts.  The global reserve count may be
3131                  * adjusted if the subpool has a minimum size.
3132                  */
3133                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3134                 hugetlb_acct_memory(h, -gbl_reserve);
3135         }
3136 }
3137
3138 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3139 {
3140         if (addr & ~(huge_page_mask(hstate_vma(vma))))
3141                 return -EINVAL;
3142         return 0;
3143 }
3144
3145 /*
3146  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3147  * handle_mm_fault() to try to instantiate regular-sized pages in the
3148  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3149  * this far.
3150  */
3151 static int hugetlb_vm_op_fault(struct vm_fault *vmf)
3152 {
3153         BUG();
3154         return 0;
3155 }
3156
3157 const struct vm_operations_struct hugetlb_vm_ops = {
3158         .fault = hugetlb_vm_op_fault,
3159         .open = hugetlb_vm_op_open,
3160         .close = hugetlb_vm_op_close,
3161         .split = hugetlb_vm_op_split,
3162 };
3163
3164 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3165                                 int writable)
3166 {
3167         pte_t entry;
3168
3169         if (writable) {
3170                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3171                                          vma->vm_page_prot)));
3172         } else {
3173                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3174                                            vma->vm_page_prot));
3175         }
3176         entry = pte_mkyoung(entry);
3177         entry = pte_mkhuge(entry);
3178         entry = arch_make_huge_pte(entry, vma, page, writable);
3179
3180         return entry;
3181 }
3182
3183 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3184                                    unsigned long address, pte_t *ptep)
3185 {
3186         pte_t entry;
3187
3188         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3189         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3190                 update_mmu_cache(vma, address, ptep);
3191 }
3192
3193 bool is_hugetlb_entry_migration(pte_t pte)
3194 {
3195         swp_entry_t swp;
3196
3197         if (huge_pte_none(pte) || pte_present(pte))
3198                 return false;
3199         swp = pte_to_swp_entry(pte);
3200         if (non_swap_entry(swp) && is_migration_entry(swp))
3201                 return true;
3202         else
3203                 return false;
3204 }
3205
3206 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3207 {
3208         swp_entry_t swp;
3209
3210         if (huge_pte_none(pte) || pte_present(pte))
3211                 return 0;
3212         swp = pte_to_swp_entry(pte);
3213         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3214                 return 1;
3215         else
3216                 return 0;
3217 }
3218
3219 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3220                             struct vm_area_struct *vma)
3221 {
3222         pte_t *src_pte, *dst_pte, entry;
3223         struct page *ptepage;
3224         unsigned long addr;
3225         int cow;
3226         struct hstate *h = hstate_vma(vma);
3227         unsigned long sz = huge_page_size(h);
3228         unsigned long mmun_start;       /* For mmu_notifiers */
3229         unsigned long mmun_end;         /* For mmu_notifiers */
3230         int ret = 0;
3231
3232         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3233
3234         mmun_start = vma->vm_start;
3235         mmun_end = vma->vm_end;
3236         if (cow)
3237                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3238
3239         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3240                 spinlock_t *src_ptl, *dst_ptl;
3241                 src_pte = huge_pte_offset(src, addr, sz);
3242                 if (!src_pte)
3243                         continue;
3244                 dst_pte = huge_pte_alloc(dst, addr, sz);
3245                 if (!dst_pte) {
3246                         ret = -ENOMEM;
3247                         break;
3248                 }
3249
3250                 /* If the pagetables are shared don't copy or take references */
3251                 if (dst_pte == src_pte)
3252                         continue;
3253
3254                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3255                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3256                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3257                 entry = huge_ptep_get(src_pte);
3258                 if (huge_pte_none(entry)) { /* skip none entry */
3259                         ;
3260                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3261                                     is_hugetlb_entry_hwpoisoned(entry))) {
3262                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3263
3264                         if (is_write_migration_entry(swp_entry) && cow) {
3265                                 /*
3266                                  * COW mappings require pages in both
3267                                  * parent and child to be set to read.
3268                                  */
3269                                 make_migration_entry_read(&swp_entry);
3270                                 entry = swp_entry_to_pte(swp_entry);
3271                                 set_huge_swap_pte_at(src, addr, src_pte,
3272                                                      entry, sz);
3273                         }
3274                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3275                 } else {
3276                         if (cow) {
3277                                 /*
3278                                  * No need to notify as we are downgrading page
3279                                  * table protection not changing it to point
3280                                  * to a new page.
3281                                  *
3282                                  * See Documentation/vm/mmu_notifier.txt
3283                                  */
3284                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3285                         }
3286                         entry = huge_ptep_get(src_pte);
3287                         ptepage = pte_page(entry);
3288                         get_page(ptepage);
3289                         page_dup_rmap(ptepage, true);
3290                         set_huge_pte_at(dst, addr, dst_pte, entry);
3291                         hugetlb_count_add(pages_per_huge_page(h), dst);
3292                 }
3293                 spin_unlock(src_ptl);
3294                 spin_unlock(dst_ptl);
3295         }
3296
3297         if (cow)
3298                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3299
3300         return ret;
3301 }
3302
3303 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3304                             unsigned long start, unsigned long end,
3305                             struct page *ref_page)
3306 {
3307         struct mm_struct *mm = vma->vm_mm;
3308         unsigned long address;
3309         pte_t *ptep;
3310         pte_t pte;
3311         spinlock_t *ptl;
3312         struct page *page;
3313         struct hstate *h = hstate_vma(vma);
3314         unsigned long sz = huge_page_size(h);
3315         const unsigned long mmun_start = start; /* For mmu_notifiers */
3316         const unsigned long mmun_end   = end;   /* For mmu_notifiers */
3317
3318         WARN_ON(!is_vm_hugetlb_page(vma));
3319         BUG_ON(start & ~huge_page_mask(h));
3320         BUG_ON(end & ~huge_page_mask(h));
3321
3322         /*
3323          * This is a hugetlb vma, all the pte entries should point
3324          * to huge page.
3325          */
3326         tlb_remove_check_page_size_change(tlb, sz);
3327         tlb_start_vma(tlb, vma);
3328         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3329         address = start;
3330         for (; address < end; address += sz) {
3331                 ptep = huge_pte_offset(mm, address, sz);
3332                 if (!ptep)
3333                         continue;
3334
3335                 ptl = huge_pte_lock(h, mm, ptep);
3336                 if (huge_pmd_unshare(mm, &address, ptep)) {
3337                         spin_unlock(ptl);
3338                         continue;
3339                 }
3340
3341                 pte = huge_ptep_get(ptep);
3342                 if (huge_pte_none(pte)) {
3343                         spin_unlock(ptl);
3344                         continue;
3345                 }
3346
3347                 /*
3348                  * Migrating hugepage or HWPoisoned hugepage is already
3349                  * unmapped and its refcount is dropped, so just clear pte here.
3350                  */
3351                 if (unlikely(!pte_present(pte))) {
3352                         huge_pte_clear(mm, address, ptep, sz);
3353                         spin_unlock(ptl);
3354                         continue;
3355                 }
3356
3357                 page = pte_page(pte);
3358                 /*
3359                  * If a reference page is supplied, it is because a specific
3360                  * page is being unmapped, not a range. Ensure the page we
3361                  * are about to unmap is the actual page of interest.
3362                  */
3363                 if (ref_page) {
3364                         if (page != ref_page) {
3365                                 spin_unlock(ptl);
3366                                 continue;
3367                         }
3368                         /*
3369                          * Mark the VMA as having unmapped its page so that
3370                          * future faults in this VMA will fail rather than
3371                          * looking like data was lost
3372                          */
3373                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3374                 }
3375
3376                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3377                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3378                 if (huge_pte_dirty(pte))
3379                         set_page_dirty(page);
3380
3381                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3382                 page_remove_rmap(page, true);
3383
3384                 spin_unlock(ptl);
3385                 tlb_remove_page_size(tlb, page, huge_page_size(h));
3386                 /*
3387                  * Bail out after unmapping reference page if supplied
3388                  */
3389                 if (ref_page)
3390                         break;
3391         }
3392         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3393         tlb_end_vma(tlb, vma);
3394 }
3395
3396 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3397                           struct vm_area_struct *vma, unsigned long start,
3398                           unsigned long end, struct page *ref_page)
3399 {
3400         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3401
3402         /*
3403          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3404          * test will fail on a vma being torn down, and not grab a page table
3405          * on its way out.  We're lucky that the flag has such an appropriate
3406          * name, and can in fact be safely cleared here. We could clear it
3407          * before the __unmap_hugepage_range above, but all that's necessary
3408          * is to clear it before releasing the i_mmap_rwsem. This works
3409          * because in the context this is called, the VMA is about to be
3410          * destroyed and the i_mmap_rwsem is held.
3411          */
3412         vma->vm_flags &= ~VM_MAYSHARE;
3413 }
3414
3415 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3416                           unsigned long end, struct page *ref_page)
3417 {
3418         struct mm_struct *mm;
3419         struct mmu_gather tlb;
3420
3421         mm = vma->vm_mm;
3422
3423         tlb_gather_mmu(&tlb, mm, start, end);
3424         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3425         tlb_finish_mmu(&tlb, start, end);
3426 }
3427
3428 /*
3429  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3430  * mappping it owns the reserve page for. The intention is to unmap the page
3431  * from other VMAs and let the children be SIGKILLed if they are faulting the
3432  * same region.
3433  */
3434 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3435                               struct page *page, unsigned long address)
3436 {
3437         struct hstate *h = hstate_vma(vma);
3438         struct vm_area_struct *iter_vma;
3439         struct address_space *mapping;
3440         pgoff_t pgoff;
3441
3442         /*
3443          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3444          * from page cache lookup which is in HPAGE_SIZE units.
3445          */
3446         address = address & huge_page_mask(h);
3447         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3448                         vma->vm_pgoff;
3449         mapping = vma->vm_file->f_mapping;
3450
3451         /*
3452          * Take the mapping lock for the duration of the table walk. As
3453          * this mapping should be shared between all the VMAs,
3454          * __unmap_hugepage_range() is called as the lock is already held
3455          */
3456         i_mmap_lock_write(mapping);
3457         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3458                 /* Do not unmap the current VMA */
3459                 if (iter_vma == vma)
3460                         continue;
3461
3462                 /*
3463                  * Shared VMAs have their own reserves and do not affect
3464                  * MAP_PRIVATE accounting but it is possible that a shared
3465                  * VMA is using the same page so check and skip such VMAs.
3466                  */
3467                 if (iter_vma->vm_flags & VM_MAYSHARE)
3468                         continue;
3469
3470                 /*
3471                  * Unmap the page from other VMAs without their own reserves.
3472                  * They get marked to be SIGKILLed if they fault in these
3473                  * areas. This is because a future no-page fault on this VMA
3474                  * could insert a zeroed page instead of the data existing
3475                  * from the time of fork. This would look like data corruption
3476                  */
3477                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3478                         unmap_hugepage_range(iter_vma, address,
3479                                              address + huge_page_size(h), page);
3480         }
3481         i_mmap_unlock_write(mapping);
3482 }
3483
3484 /*
3485  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3486  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3487  * cannot race with other handlers or page migration.
3488  * Keep the pte_same checks anyway to make transition from the mutex easier.
3489  */
3490 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3491                        unsigned long address, pte_t *ptep,
3492                        struct page *pagecache_page, spinlock_t *ptl)
3493 {
3494         pte_t pte;
3495         struct hstate *h = hstate_vma(vma);
3496         struct page *old_page, *new_page;
3497         int ret = 0, outside_reserve = 0;
3498         unsigned long mmun_start;       /* For mmu_notifiers */
3499         unsigned long mmun_end;         /* For mmu_notifiers */
3500
3501         pte = huge_ptep_get(ptep);
3502         old_page = pte_page(pte);
3503
3504 retry_avoidcopy:
3505         /* If no-one else is actually using this page, avoid the copy
3506          * and just make the page writable */
3507         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3508                 page_move_anon_rmap(old_page, vma);
3509                 set_huge_ptep_writable(vma, address, ptep);
3510                 return 0;
3511         }
3512
3513         /*
3514          * If the process that created a MAP_PRIVATE mapping is about to
3515          * perform a COW due to a shared page count, attempt to satisfy
3516          * the allocation without using the existing reserves. The pagecache
3517          * page is used to determine if the reserve at this address was
3518          * consumed or not. If reserves were used, a partial faulted mapping
3519          * at the time of fork() could consume its reserves on COW instead
3520          * of the full address range.
3521          */
3522         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3523                         old_page != pagecache_page)
3524                 outside_reserve = 1;
3525
3526         get_page(old_page);
3527
3528         /*
3529          * Drop page table lock as buddy allocator may be called. It will
3530          * be acquired again before returning to the caller, as expected.
3531          */
3532         spin_unlock(ptl);
3533         new_page = alloc_huge_page(vma, address, outside_reserve);
3534
3535         if (IS_ERR(new_page)) {
3536                 /*
3537                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3538                  * it is due to references held by a child and an insufficient
3539                  * huge page pool. To guarantee the original mappers
3540                  * reliability, unmap the page from child processes. The child
3541                  * may get SIGKILLed if it later faults.
3542                  */
3543                 if (outside_reserve) {
3544                         put_page(old_page);
3545                         BUG_ON(huge_pte_none(pte));
3546                         unmap_ref_private(mm, vma, old_page, address);
3547                         BUG_ON(huge_pte_none(pte));
3548                         spin_lock(ptl);
3549                         ptep = huge_pte_offset(mm, address & huge_page_mask(h),
3550                                                huge_page_size(h));
3551                         if (likely(ptep &&
3552                                    pte_same(huge_ptep_get(ptep), pte)))
3553                                 goto retry_avoidcopy;
3554                         /*
3555                          * race occurs while re-acquiring page table
3556                          * lock, and our job is done.
3557                          */
3558                         return 0;
3559                 }
3560
3561                 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3562                         VM_FAULT_OOM : VM_FAULT_SIGBUS;
3563                 goto out_release_old;
3564         }
3565
3566         /*
3567          * When the original hugepage is shared one, it does not have
3568          * anon_vma prepared.
3569          */
3570         if (unlikely(anon_vma_prepare(vma))) {
3571                 ret = VM_FAULT_OOM;
3572                 goto out_release_all;
3573         }
3574
3575         copy_user_huge_page(new_page, old_page, address, vma,
3576                             pages_per_huge_page(h));
3577         __SetPageUptodate(new_page);
3578         set_page_huge_active(new_page);
3579
3580         mmun_start = address & huge_page_mask(h);
3581         mmun_end = mmun_start + huge_page_size(h);
3582         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3583
3584         /*
3585          * Retake the page table lock to check for racing updates
3586          * before the page tables are altered
3587          */
3588         spin_lock(ptl);
3589         ptep = huge_pte_offset(mm, address & huge_page_mask(h),
3590                                huge_page_size(h));
3591         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3592                 ClearPagePrivate(new_page);
3593
3594                 /* Break COW */
3595                 huge_ptep_clear_flush(vma, address, ptep);
3596                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3597                 set_huge_pte_at(mm, address, ptep,
3598                                 make_huge_pte(vma, new_page, 1));
3599                 page_remove_rmap(old_page, true);
3600                 hugepage_add_new_anon_rmap(new_page, vma, address);
3601                 /* Make the old page be freed below */
3602                 new_page = old_page;
3603         }
3604         spin_unlock(ptl);
3605         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3606 out_release_all:
3607         restore_reserve_on_error(h, vma, address, new_page);
3608         put_page(new_page);
3609 out_release_old:
3610         put_page(old_page);
3611
3612         spin_lock(ptl); /* Caller expects lock to be held */
3613         return ret;
3614 }
3615
3616 /* Return the pagecache page at a given address within a VMA */
3617 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3618                         struct vm_area_struct *vma, unsigned long address)
3619 {
3620         struct address_space *mapping;
3621         pgoff_t idx;
3622
3623         mapping = vma->vm_file->f_mapping;
3624         idx = vma_hugecache_offset(h, vma, address);
3625
3626         return find_lock_page(mapping, idx);
3627 }
3628
3629 /*
3630  * Return whether there is a pagecache page to back given address within VMA.
3631  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3632  */
3633 static bool hugetlbfs_pagecache_present(struct hstate *h,
3634                         struct vm_area_struct *vma, unsigned long address)
3635 {
3636         struct address_space *mapping;
3637         pgoff_t idx;
3638         struct page *page;
3639
3640         mapping = vma->vm_file->f_mapping;
3641         idx = vma_hugecache_offset(h, vma, address);
3642
3643         page = find_get_page(mapping, idx);
3644         if (page)
3645                 put_page(page);
3646         return page != NULL;
3647 }
3648
3649 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3650                            pgoff_t idx)
3651 {
3652         struct inode *inode = mapping->host;
3653         struct hstate *h = hstate_inode(inode);
3654         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3655
3656         if (err)
3657                 return err;
3658         ClearPagePrivate(page);
3659
3660         spin_lock(&inode->i_lock);
3661         inode->i_blocks += blocks_per_huge_page(h);
3662         spin_unlock(&inode->i_lock);
3663         return 0;
3664 }
3665
3666 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3667                            struct address_space *mapping, pgoff_t idx,
3668                            unsigned long address, pte_t *ptep, unsigned int flags)
3669 {
3670         struct hstate *h = hstate_vma(vma);
3671         int ret = VM_FAULT_SIGBUS;
3672         int anon_rmap = 0;
3673         unsigned long size;
3674         struct page *page;
3675         pte_t new_pte;
3676         spinlock_t *ptl;
3677
3678         /*
3679          * Currently, we are forced to kill the process in the event the
3680          * original mapper has unmapped pages from the child due to a failed
3681          * COW. Warn that such a situation has occurred as it may not be obvious
3682          */
3683         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3684                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3685                            current->pid);
3686                 return ret;
3687         }
3688
3689         /*
3690          * Use page lock to guard against racing truncation
3691          * before we get page_table_lock.
3692          */
3693 retry:
3694         page = find_lock_page(mapping, idx);
3695         if (!page) {
3696                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3697                 if (idx >= size)
3698                         goto out;
3699
3700                 /*
3701                  * Check for page in userfault range
3702                  */
3703                 if (userfaultfd_missing(vma)) {
3704                         u32 hash;
3705                         struct vm_fault vmf = {
3706                                 .vma = vma,
3707                                 .address = address,
3708                                 .flags = flags,
3709                                 /*
3710                                  * Hard to debug if it ends up being
3711                                  * used by a callee that assumes
3712                                  * something about the other
3713                                  * uninitialized fields... same as in
3714                                  * memory.c
3715                                  */
3716                         };
3717
3718                         /*
3719                          * hugetlb_fault_mutex must be dropped before
3720                          * handling userfault.  Reacquire after handling
3721                          * fault to make calling code simpler.
3722                          */
3723                         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
3724                                                         idx, address);
3725                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3726                         ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3727                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3728                         goto out;
3729                 }
3730
3731                 page = alloc_huge_page(vma, address, 0);
3732                 if (IS_ERR(page)) {
3733                         ret = PTR_ERR(page);
3734                         if (ret == -ENOMEM)
3735                                 ret = VM_FAULT_OOM;
3736                         else
3737                                 ret = VM_FAULT_SIGBUS;
3738                         goto out;
3739                 }
3740                 clear_huge_page(page, address, pages_per_huge_page(h));
3741                 __SetPageUptodate(page);
3742                 set_page_huge_active(page);
3743
3744                 if (vma->vm_flags & VM_MAYSHARE) {
3745                         int err = huge_add_to_page_cache(page, mapping, idx);
3746                         if (err) {
3747                                 put_page(page);
3748                                 if (err == -EEXIST)
3749                                         goto retry;
3750                                 goto out;
3751                         }
3752                 } else {
3753                         lock_page(page);
3754                         if (unlikely(anon_vma_prepare(vma))) {
3755                                 ret = VM_FAULT_OOM;
3756                                 goto backout_unlocked;
3757                         }
3758                         anon_rmap = 1;
3759                 }
3760         } else {
3761                 /*
3762                  * If memory error occurs between mmap() and fault, some process
3763                  * don't have hwpoisoned swap entry for errored virtual address.
3764                  * So we need to block hugepage fault by PG_hwpoison bit check.
3765                  */
3766                 if (unlikely(PageHWPoison(page))) {
3767                         ret = VM_FAULT_HWPOISON |
3768                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3769                         goto backout_unlocked;
3770                 }
3771         }
3772
3773         /*
3774          * If we are going to COW a private mapping later, we examine the
3775          * pending reservations for this page now. This will ensure that
3776          * any allocations necessary to record that reservation occur outside
3777          * the spinlock.
3778          */
3779         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3780                 if (vma_needs_reservation(h, vma, address) < 0) {
3781                         ret = VM_FAULT_OOM;
3782                         goto backout_unlocked;
3783                 }
3784                 /* Just decrements count, does not deallocate */
3785                 vma_end_reservation(h, vma, address);
3786         }
3787
3788         ptl = huge_pte_lock(h, mm, ptep);
3789         size = i_size_read(mapping->host) >> huge_page_shift(h);
3790         if (idx >= size)
3791                 goto backout;
3792
3793         ret = 0;
3794         if (!huge_pte_none(huge_ptep_get(ptep)))
3795                 goto backout;
3796
3797         if (anon_rmap) {
3798                 ClearPagePrivate(page);
3799                 hugepage_add_new_anon_rmap(page, vma, address);
3800         } else
3801                 page_dup_rmap(page, true);
3802         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3803                                 && (vma->vm_flags & VM_SHARED)));
3804         set_huge_pte_at(mm, address, ptep, new_pte);
3805
3806         hugetlb_count_add(pages_per_huge_page(h), mm);
3807         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3808                 /* Optimization, do the COW without a second fault */
3809                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3810         }
3811
3812         spin_unlock(ptl);
3813         unlock_page(page);
3814 out:
3815         return ret;
3816
3817 backout:
3818         spin_unlock(ptl);
3819 backout_unlocked:
3820         unlock_page(page);
3821         restore_reserve_on_error(h, vma, address, page);
3822         put_page(page);
3823         goto out;
3824 }
3825
3826 #ifdef CONFIG_SMP
3827 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3828                             struct vm_area_struct *vma,
3829                             struct address_space *mapping,
3830                             pgoff_t idx, unsigned long address)
3831 {
3832         unsigned long key[2];
3833         u32 hash;
3834
3835         if (vma->vm_flags & VM_SHARED) {
3836                 key[0] = (unsigned long) mapping;
3837                 key[1] = idx;
3838         } else {
3839                 key[0] = (unsigned long) mm;
3840                 key[1] = address >> huge_page_shift(h);
3841         }
3842
3843         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3844
3845         return hash & (num_fault_mutexes - 1);
3846 }
3847 #else
3848 /*
3849  * For uniprocesor systems we always use a single mutex, so just
3850  * return 0 and avoid the hashing overhead.
3851  */
3852 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
3853                             struct vm_area_struct *vma,
3854                             struct address_space *mapping,
3855                             pgoff_t idx, unsigned long address)
3856 {
3857         return 0;
3858 }
3859 #endif
3860
3861 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3862                         unsigned long address, unsigned int flags)
3863 {
3864         pte_t *ptep, entry;
3865         spinlock_t *ptl;
3866         int ret;
3867         u32 hash;
3868         pgoff_t idx;
3869         struct page *page = NULL;
3870         struct page *pagecache_page = NULL;
3871         struct hstate *h = hstate_vma(vma);
3872         struct address_space *mapping;
3873         int need_wait_lock = 0;
3874
3875         address &= huge_page_mask(h);
3876
3877         ptep = huge_pte_offset(mm, address, huge_page_size(h));
3878         if (ptep) {
3879                 entry = huge_ptep_get(ptep);
3880                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3881                         migration_entry_wait_huge(vma, mm, ptep);
3882                         return 0;
3883                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3884                         return VM_FAULT_HWPOISON_LARGE |
3885                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3886         } else {
3887                 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3888                 if (!ptep)
3889                         return VM_FAULT_OOM;
3890         }
3891
3892         mapping = vma->vm_file->f_mapping;
3893         idx = vma_hugecache_offset(h, vma, address);
3894
3895         /*
3896          * Serialize hugepage allocation and instantiation, so that we don't
3897          * get spurious allocation failures if two CPUs race to instantiate
3898          * the same page in the page cache.
3899          */
3900         hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
3901         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3902
3903         entry = huge_ptep_get(ptep);
3904         if (huge_pte_none(entry)) {
3905                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3906                 goto out_mutex;
3907         }
3908
3909         ret = 0;
3910
3911         /*
3912          * entry could be a migration/hwpoison entry at this point, so this
3913          * check prevents the kernel from going below assuming that we have
3914          * a active hugepage in pagecache. This goto expects the 2nd page fault,
3915          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3916          * handle it.
3917          */
3918         if (!pte_present(entry))
3919                 goto out_mutex;
3920
3921         /*
3922          * If we are going to COW the mapping later, we examine the pending
3923          * reservations for this page now. This will ensure that any
3924          * allocations necessary to record that reservation occur outside the
3925          * spinlock. For private mappings, we also lookup the pagecache
3926          * page now as it is used to determine if a reservation has been
3927          * consumed.
3928          */
3929         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3930                 if (vma_needs_reservation(h, vma, address) < 0) {
3931                         ret = VM_FAULT_OOM;
3932                         goto out_mutex;
3933                 }
3934                 /* Just decrements count, does not deallocate */
3935                 vma_end_reservation(h, vma, address);
3936
3937                 if (!(vma->vm_flags & VM_MAYSHARE))
3938                         pagecache_page = hugetlbfs_pagecache_page(h,
3939                                                                 vma, address);
3940         }
3941
3942         ptl = huge_pte_lock(h, mm, ptep);
3943
3944         /* Check for a racing update before calling hugetlb_cow */
3945         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3946                 goto out_ptl;
3947
3948         /*
3949          * hugetlb_cow() requires page locks of pte_page(entry) and
3950          * pagecache_page, so here we need take the former one
3951          * when page != pagecache_page or !pagecache_page.
3952          */
3953         page = pte_page(entry);
3954         if (page != pagecache_page)
3955                 if (!trylock_page(page)) {
3956                         need_wait_lock = 1;
3957                         goto out_ptl;
3958                 }
3959
3960         get_page(page);
3961
3962         if (flags & FAULT_FLAG_WRITE) {
3963                 if (!huge_pte_write(entry)) {
3964                         ret = hugetlb_cow(mm, vma, address, ptep,
3965                                           pagecache_page, ptl);
3966                         goto out_put_page;
3967                 }
3968                 entry = huge_pte_mkdirty(entry);
3969         }
3970         entry = pte_mkyoung(entry);
3971         if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3972                                                 flags & FAULT_FLAG_WRITE))
3973                 update_mmu_cache(vma, address, ptep);
3974 out_put_page:
3975         if (page != pagecache_page)
3976                 unlock_page(page);
3977         put_page(page);
3978 out_ptl:
3979         spin_unlock(ptl);
3980
3981         if (pagecache_page) {
3982                 unlock_page(pagecache_page);
3983                 put_page(pagecache_page);
3984         }
3985 out_mutex:
3986         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3987         /*
3988          * Generally it's safe to hold refcount during waiting page lock. But
3989          * here we just wait to defer the next page fault to avoid busy loop and
3990          * the page is not used after unlocked before returning from the current
3991          * page fault. So we are safe from accessing freed page, even if we wait
3992          * here without taking refcount.
3993          */
3994         if (need_wait_lock)
3995                 wait_on_page_locked(page);
3996         return ret;
3997 }
3998
3999 /*
4000  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4001  * modifications for huge pages.
4002  */
4003 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4004                             pte_t *dst_pte,
4005                             struct vm_area_struct *dst_vma,
4006                             unsigned long dst_addr,
4007                             unsigned long src_addr,
4008                             struct page **pagep)
4009 {
4010         struct address_space *mapping;
4011         pgoff_t idx;
4012         unsigned long size;
4013         int vm_shared = dst_vma->vm_flags & VM_SHARED;
4014         struct hstate *h = hstate_vma(dst_vma);
4015         pte_t _dst_pte;
4016         spinlock_t *ptl;
4017         int ret;
4018         struct page *page;
4019
4020         if (!*pagep) {
4021                 ret = -ENOMEM;
4022                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4023                 if (IS_ERR(page))
4024                         goto out;
4025
4026                 ret = copy_huge_page_from_user(page,
4027                                                 (const void __user *) src_addr,
4028                                                 pages_per_huge_page(h), false);
4029
4030                 /* fallback to copy_from_user outside mmap_sem */
4031                 if (unlikely(ret)) {
4032                         ret = -EFAULT;
4033                         *pagep = page;
4034                         /* don't free the page */
4035                         goto out;
4036                 }
4037         } else {
4038                 page = *pagep;
4039                 *pagep = NULL;
4040         }
4041
4042         /*
4043          * The memory barrier inside __SetPageUptodate makes sure that
4044          * preceding stores to the page contents become visible before
4045          * the set_pte_at() write.
4046          */
4047         __SetPageUptodate(page);
4048         set_page_huge_active(page);
4049
4050         mapping = dst_vma->vm_file->f_mapping;
4051         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4052
4053         /*
4054          * If shared, add to page cache
4055          */
4056         if (vm_shared) {
4057                 size = i_size_read(mapping->host) >> huge_page_shift(h);
4058                 ret = -EFAULT;
4059                 if (idx >= size)
4060                         goto out_release_nounlock;
4061
4062                 /*
4063                  * Serialization between remove_inode_hugepages() and
4064                  * huge_add_to_page_cache() below happens through the
4065                  * hugetlb_fault_mutex_table that here must be hold by
4066                  * the caller.
4067                  */
4068                 ret = huge_add_to_page_cache(page, mapping, idx);
4069                 if (ret)
4070                         goto out_release_nounlock;
4071         }
4072
4073         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4074         spin_lock(ptl);
4075
4076         /*
4077          * Recheck the i_size after holding PT lock to make sure not
4078          * to leave any page mapped (as page_mapped()) beyond the end
4079          * of the i_size (remove_inode_hugepages() is strict about
4080          * enforcing that). If we bail out here, we'll also leave a
4081          * page in the radix tree in the vm_shared case beyond the end
4082          * of the i_size, but remove_inode_hugepages() will take care
4083          * of it as soon as we drop the hugetlb_fault_mutex_table.
4084          */
4085         size = i_size_read(mapping->host) >> huge_page_shift(h);
4086         ret = -EFAULT;
4087         if (idx >= size)
4088                 goto out_release_unlock;
4089
4090         ret = -EEXIST;
4091         if (!huge_pte_none(huge_ptep_get(dst_pte)))
4092                 goto out_release_unlock;
4093
4094         if (vm_shared) {
4095                 page_dup_rmap(page, true);
4096         } else {
4097                 ClearPagePrivate(page);
4098                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4099         }
4100
4101         _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4102         if (dst_vma->vm_flags & VM_WRITE)
4103                 _dst_pte = huge_pte_mkdirty(_dst_pte);
4104         _dst_pte = pte_mkyoung(_dst_pte);
4105
4106         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4107
4108         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4109                                         dst_vma->vm_flags & VM_WRITE);
4110         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4111
4112         /* No need to invalidate - it was non-present before */
4113         update_mmu_cache(dst_vma, dst_addr, dst_pte);
4114
4115         spin_unlock(ptl);
4116         if (vm_shared)
4117                 unlock_page(page);
4118         ret = 0;
4119 out:
4120         return ret;
4121 out_release_unlock:
4122         spin_unlock(ptl);
4123         if (vm_shared)
4124                 unlock_page(page);
4125 out_release_nounlock:
4126         put_page(page);
4127         goto out;
4128 }
4129
4130 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4131                          struct page **pages, struct vm_area_struct **vmas,
4132                          unsigned long *position, unsigned long *nr_pages,
4133                          long i, unsigned int flags, int *nonblocking)
4134 {
4135         unsigned long pfn_offset;
4136         unsigned long vaddr = *position;
4137         unsigned long remainder = *nr_pages;
4138         struct hstate *h = hstate_vma(vma);
4139         int err = -EFAULT;
4140
4141         while (vaddr < vma->vm_end && remainder) {
4142                 pte_t *pte;
4143                 spinlock_t *ptl = NULL;
4144                 int absent;
4145                 struct page *page;
4146
4147                 /*
4148                  * If we have a pending SIGKILL, don't keep faulting pages and
4149                  * potentially allocating memory.
4150                  */
4151                 if (unlikely(fatal_signal_pending(current))) {
4152                         remainder = 0;
4153                         break;
4154                 }
4155
4156                 /*
4157                  * Some archs (sparc64, sh*) have multiple pte_ts to
4158                  * each hugepage.  We have to make sure we get the
4159                  * first, for the page indexing below to work.
4160                  *
4161                  * Note that page table lock is not held when pte is null.
4162                  */
4163                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4164                                       huge_page_size(h));
4165                 if (pte)
4166                         ptl = huge_pte_lock(h, mm, pte);
4167                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4168
4169                 /*
4170                  * When coredumping, it suits get_dump_page if we just return
4171                  * an error where there's an empty slot with no huge pagecache
4172                  * to back it.  This way, we avoid allocating a hugepage, and
4173                  * the sparse dumpfile avoids allocating disk blocks, but its
4174                  * huge holes still show up with zeroes where they need to be.
4175                  */
4176                 if (absent && (flags & FOLL_DUMP) &&
4177                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4178                         if (pte)
4179                                 spin_unlock(ptl);
4180                         remainder = 0;
4181                         break;
4182                 }
4183
4184                 /*
4185                  * We need call hugetlb_fault for both hugepages under migration
4186                  * (in which case hugetlb_fault waits for the migration,) and
4187                  * hwpoisoned hugepages (in which case we need to prevent the
4188                  * caller from accessing to them.) In order to do this, we use
4189                  * here is_swap_pte instead of is_hugetlb_entry_migration and
4190                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4191                  * both cases, and because we can't follow correct pages
4192                  * directly from any kind of swap entries.
4193                  */
4194                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4195                     ((flags & FOLL_WRITE) &&
4196                       !huge_pte_write(huge_ptep_get(pte)))) {
4197                         int ret;
4198                         unsigned int fault_flags = 0;
4199
4200                         if (pte)
4201                                 spin_unlock(ptl);
4202                         if (flags & FOLL_WRITE)
4203                                 fault_flags |= FAULT_FLAG_WRITE;
4204                         if (nonblocking)
4205                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4206                         if (flags & FOLL_NOWAIT)
4207                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4208                                         FAULT_FLAG_RETRY_NOWAIT;
4209                         if (flags & FOLL_TRIED) {
4210                                 VM_WARN_ON_ONCE(fault_flags &
4211                                                 FAULT_FLAG_ALLOW_RETRY);
4212                                 fault_flags |= FAULT_FLAG_TRIED;
4213                         }
4214                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4215                         if (ret & VM_FAULT_ERROR) {
4216                                 err = vm_fault_to_errno(ret, flags);
4217                                 remainder = 0;
4218                                 break;
4219                         }
4220                         if (ret & VM_FAULT_RETRY) {
4221                                 if (nonblocking)
4222                                         *nonblocking = 0;
4223                                 *nr_pages = 0;
4224                                 /*
4225                                  * VM_FAULT_RETRY must not return an
4226                                  * error, it will return zero
4227                                  * instead.
4228                                  *
4229                                  * No need to update "position" as the
4230                                  * caller will not check it after
4231                                  * *nr_pages is set to 0.
4232                                  */
4233                                 return i;
4234                         }
4235                         continue;
4236                 }
4237
4238                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4239                 page = pte_page(huge_ptep_get(pte));
4240 same_page:
4241                 if (pages) {
4242                         pages[i] = mem_map_offset(page, pfn_offset);
4243                         get_page(pages[i]);
4244                 }
4245
4246                 if (vmas)
4247                         vmas[i] = vma;
4248
4249                 vaddr += PAGE_SIZE;
4250                 ++pfn_offset;
4251                 --remainder;
4252                 ++i;
4253                 if (vaddr < vma->vm_end && remainder &&
4254                                 pfn_offset < pages_per_huge_page(h)) {
4255                         /*
4256                          * We use pfn_offset to avoid touching the pageframes
4257                          * of this compound page.
4258                          */
4259                         goto same_page;
4260                 }
4261                 spin_unlock(ptl);
4262         }
4263         *nr_pages = remainder;
4264         /*
4265          * setting position is actually required only if remainder is
4266          * not zero but it's faster not to add a "if (remainder)"
4267          * branch.
4268          */
4269         *position = vaddr;
4270
4271         return i ? i : err;
4272 }
4273
4274 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4275 /*
4276  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4277  * implement this.
4278  */
4279 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4280 #endif
4281
4282 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4283                 unsigned long address, unsigned long end, pgprot_t newprot)
4284 {
4285         struct mm_struct *mm = vma->vm_mm;
4286         unsigned long start = address;
4287         pte_t *ptep;
4288         pte_t pte;
4289         struct hstate *h = hstate_vma(vma);
4290         unsigned long pages = 0;
4291
4292         BUG_ON(address >= end);
4293         flush_cache_range(vma, address, end);
4294
4295         mmu_notifier_invalidate_range_start(mm, start, end);
4296         i_mmap_lock_write(vma->vm_file->f_mapping);
4297         for (; address < end; address += huge_page_size(h)) {
4298                 spinlock_t *ptl;
4299                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4300                 if (!ptep)
4301                         continue;
4302                 ptl = huge_pte_lock(h, mm, ptep);
4303                 if (huge_pmd_unshare(mm, &address, ptep)) {
4304                         pages++;
4305                         spin_unlock(ptl);
4306                         continue;
4307                 }
4308                 pte = huge_ptep_get(ptep);
4309                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4310                         spin_unlock(ptl);
4311                         continue;
4312                 }
4313                 if (unlikely(is_hugetlb_entry_migration(pte))) {
4314                         swp_entry_t entry = pte_to_swp_entry(pte);
4315
4316                         if (is_write_migration_entry(entry)) {
4317                                 pte_t newpte;
4318
4319                                 make_migration_entry_read(&entry);
4320                                 newpte = swp_entry_to_pte(entry);
4321                                 set_huge_swap_pte_at(mm, address, ptep,
4322                                                      newpte, huge_page_size(h));
4323                                 pages++;
4324                         }
4325                         spin_unlock(ptl);
4326                         continue;
4327                 }
4328                 if (!huge_pte_none(pte)) {
4329                         pte = huge_ptep_get_and_clear(mm, address, ptep);
4330                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4331                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
4332                         set_huge_pte_at(mm, address, ptep, pte);
4333                         pages++;
4334                 }
4335                 spin_unlock(ptl);
4336         }
4337         /*
4338          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4339          * may have cleared our pud entry and done put_page on the page table:
4340          * once we release i_mmap_rwsem, another task can do the final put_page
4341          * and that page table be reused and filled with junk.
4342          */
4343         flush_hugetlb_tlb_range(vma, start, end);
4344         /*
4345          * No need to call mmu_notifier_invalidate_range() we are downgrading
4346          * page table protection not changing it to point to a new page.
4347          *
4348          * See Documentation/vm/mmu_notifier.txt
4349          */
4350         i_mmap_unlock_write(vma->vm_file->f_mapping);
4351         mmu_notifier_invalidate_range_end(mm, start, end);
4352
4353         return pages << h->order;
4354 }
4355
4356 int hugetlb_reserve_pages(struct inode *inode,
4357                                         long from, long to,
4358                                         struct vm_area_struct *vma,
4359                                         vm_flags_t vm_flags)
4360 {
4361         long ret, chg;
4362         struct hstate *h = hstate_inode(inode);
4363         struct hugepage_subpool *spool = subpool_inode(inode);
4364         struct resv_map *resv_map;
4365         long gbl_reserve;
4366
4367         /*
4368          * Only apply hugepage reservation if asked. At fault time, an
4369          * attempt will be made for VM_NORESERVE to allocate a page
4370          * without using reserves
4371          */
4372         if (vm_flags & VM_NORESERVE)
4373                 return 0;
4374
4375         /*
4376          * Shared mappings base their reservation on the number of pages that
4377          * are already allocated on behalf of the file. Private mappings need
4378          * to reserve the full area even if read-only as mprotect() may be
4379          * called to make the mapping read-write. Assume !vma is a shm mapping
4380          */
4381         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4382                 resv_map = inode_resv_map(inode);
4383
4384                 chg = region_chg(resv_map, from, to);
4385
4386         } else {
4387                 resv_map = resv_map_alloc();
4388                 if (!resv_map)
4389                         return -ENOMEM;
4390
4391                 chg = to - from;
4392
4393                 set_vma_resv_map(vma, resv_map);
4394                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4395         }
4396
4397         if (chg < 0) {
4398                 ret = chg;
4399                 goto out_err;
4400         }
4401
4402         /*
4403          * There must be enough pages in the subpool for the mapping. If
4404          * the subpool has a minimum size, there may be some global
4405          * reservations already in place (gbl_reserve).
4406          */
4407         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4408         if (gbl_reserve < 0) {
4409                 ret = -ENOSPC;
4410                 goto out_err;
4411         }
4412
4413         /*
4414          * Check enough hugepages are available for the reservation.
4415          * Hand the pages back to the subpool if there are not
4416          */
4417         ret = hugetlb_acct_memory(h, gbl_reserve);
4418         if (ret < 0) {
4419                 /* put back original number of pages, chg */
4420                 (void)hugepage_subpool_put_pages(spool, chg);
4421                 goto out_err;
4422         }
4423
4424         /*
4425          * Account for the reservations made. Shared mappings record regions
4426          * that have reservations as they are shared by multiple VMAs.
4427          * When the last VMA disappears, the region map says how much
4428          * the reservation was and the page cache tells how much of
4429          * the reservation was consumed. Private mappings are per-VMA and
4430          * only the consumed reservations are tracked. When the VMA
4431          * disappears, the original reservation is the VMA size and the
4432          * consumed reservations are stored in the map. Hence, nothing
4433          * else has to be done for private mappings here
4434          */
4435         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4436                 long add = region_add(resv_map, from, to);
4437
4438                 if (unlikely(chg > add)) {
4439                         /*
4440                          * pages in this range were added to the reserve
4441                          * map between region_chg and region_add.  This
4442                          * indicates a race with alloc_huge_page.  Adjust
4443                          * the subpool and reserve counts modified above
4444                          * based on the difference.
4445                          */
4446                         long rsv_adjust;
4447
4448                         rsv_adjust = hugepage_subpool_put_pages(spool,
4449                                                                 chg - add);
4450                         hugetlb_acct_memory(h, -rsv_adjust);
4451                 }
4452         }
4453         return 0;
4454 out_err:
4455         if (!vma || vma->vm_flags & VM_MAYSHARE)
4456                 /* Don't call region_abort if region_chg failed */
4457                 if (chg >= 0)
4458                         region_abort(resv_map, from, to);
4459         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4460                 kref_put(&resv_map->refs, resv_map_release);
4461         return ret;
4462 }
4463
4464 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4465                                                                 long freed)
4466 {
4467         struct hstate *h = hstate_inode(inode);
4468         struct resv_map *resv_map = inode_resv_map(inode);
4469         long chg = 0;
4470         struct hugepage_subpool *spool = subpool_inode(inode);
4471         long gbl_reserve;
4472
4473         if (resv_map) {
4474                 chg = region_del(resv_map, start, end);
4475                 /*
4476                  * region_del() can fail in the rare case where a region
4477                  * must be split and another region descriptor can not be
4478                  * allocated.  If end == LONG_MAX, it will not fail.
4479                  */
4480                 if (chg < 0)
4481                         return chg;
4482         }
4483
4484         spin_lock(&inode->i_lock);
4485         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4486         spin_unlock(&inode->i_lock);
4487
4488         /*
4489          * If the subpool has a minimum size, the number of global
4490          * reservations to be released may be adjusted.
4491          */
4492         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4493         hugetlb_acct_memory(h, -gbl_reserve);
4494
4495         return 0;
4496 }
4497
4498 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4499 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4500                                 struct vm_area_struct *vma,
4501                                 unsigned long addr, pgoff_t idx)
4502 {
4503         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4504                                 svma->vm_start;
4505         unsigned long sbase = saddr & PUD_MASK;
4506         unsigned long s_end = sbase + PUD_SIZE;
4507
4508         /* Allow segments to share if only one is marked locked */
4509         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4510         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4511
4512         /*
4513          * match the virtual addresses, permission and the alignment of the
4514          * page table page.
4515          */
4516         if (pmd_index(addr) != pmd_index(saddr) ||
4517             vm_flags != svm_flags ||
4518             sbase < svma->vm_start || svma->vm_end < s_end)
4519                 return 0;
4520
4521         return saddr;
4522 }
4523
4524 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4525 {
4526         unsigned long base = addr & PUD_MASK;
4527         unsigned long end = base + PUD_SIZE;
4528
4529         /*
4530          * check on proper vm_flags and page table alignment
4531          */
4532         if (vma->vm_flags & VM_MAYSHARE &&
4533             vma->vm_start <= base && end <= vma->vm_end)
4534                 return true;
4535         return false;
4536 }
4537
4538 /*
4539  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4540  * and returns the corresponding pte. While this is not necessary for the
4541  * !shared pmd case because we can allocate the pmd later as well, it makes the
4542  * code much cleaner. pmd allocation is essential for the shared case because
4543  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4544  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4545  * bad pmd for sharing.
4546  */
4547 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4548 {
4549         struct vm_area_struct *vma = find_vma(mm, addr);
4550         struct address_space *mapping = vma->vm_file->f_mapping;
4551         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4552                         vma->vm_pgoff;
4553         struct vm_area_struct *svma;
4554         unsigned long saddr;
4555         pte_t *spte = NULL;
4556         pte_t *pte;
4557         spinlock_t *ptl;
4558
4559         if (!vma_shareable(vma, addr))
4560                 return (pte_t *)pmd_alloc(mm, pud, addr);
4561
4562         i_mmap_lock_write(mapping);
4563         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4564                 if (svma == vma)
4565                         continue;
4566
4567                 saddr = page_table_shareable(svma, vma, addr, idx);
4568                 if (saddr) {
4569                         spte = huge_pte_offset(svma->vm_mm, saddr,
4570                                                vma_mmu_pagesize(svma));
4571                         if (spte) {
4572                                 get_page(virt_to_page(spte));
4573                                 break;
4574                         }
4575                 }
4576         }
4577
4578         if (!spte)
4579                 goto out;
4580
4581         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4582         if (pud_none(*pud)) {
4583                 pud_populate(mm, pud,
4584                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4585                 mm_inc_nr_pmds(mm);
4586         } else {
4587                 put_page(virt_to_page(spte));
4588         }
4589         spin_unlock(ptl);
4590 out:
4591         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4592         i_mmap_unlock_write(mapping);
4593         return pte;
4594 }
4595
4596 /*
4597  * unmap huge page backed by shared pte.
4598  *
4599  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4600  * indicated by page_count > 1, unmap is achieved by clearing pud and
4601  * decrementing the ref count. If count == 1, the pte page is not shared.
4602  *
4603  * called with page table lock held.
4604  *
4605  * returns: 1 successfully unmapped a shared pte page
4606  *          0 the underlying pte page is not shared, or it is the last user
4607  */
4608 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4609 {
4610         pgd_t *pgd = pgd_offset(mm, *addr);
4611         p4d_t *p4d = p4d_offset(pgd, *addr);
4612         pud_t *pud = pud_offset(p4d, *addr);
4613
4614         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4615         if (page_count(virt_to_page(ptep)) == 1)
4616                 return 0;
4617
4618         pud_clear(pud);
4619         put_page(virt_to_page(ptep));
4620         mm_dec_nr_pmds(mm);
4621         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4622         return 1;
4623 }
4624 #define want_pmd_share()        (1)
4625 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4626 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4627 {
4628         return NULL;
4629 }
4630
4631 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4632 {
4633         return 0;
4634 }
4635 #define want_pmd_share()        (0)
4636 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4637
4638 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4639 pte_t *huge_pte_alloc(struct mm_struct *mm,
4640                         unsigned long addr, unsigned long sz)
4641 {
4642         pgd_t *pgd;
4643         p4d_t *p4d;
4644         pud_t *pud;
4645         pte_t *pte = NULL;
4646
4647         pgd = pgd_offset(mm, addr);
4648         p4d = p4d_alloc(mm, pgd, addr);
4649         if (!p4d)
4650                 return NULL;
4651         pud = pud_alloc(mm, p4d, addr);
4652         if (pud) {
4653                 if (sz == PUD_SIZE) {
4654                         pte = (pte_t *)pud;
4655                 } else {
4656                         BUG_ON(sz != PMD_SIZE);
4657                         if (want_pmd_share() && pud_none(*pud))
4658                                 pte = huge_pmd_share(mm, addr, pud);
4659                         else
4660                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4661                 }
4662         }
4663         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4664
4665         return pte;
4666 }
4667
4668 /*
4669  * huge_pte_offset() - Walk the page table to resolve the hugepage
4670  * entry at address @addr
4671  *
4672  * Return: Pointer to page table or swap entry (PUD or PMD) for
4673  * address @addr, or NULL if a p*d_none() entry is encountered and the
4674  * size @sz doesn't match the hugepage size at this level of the page
4675  * table.
4676  */
4677 pte_t *huge_pte_offset(struct mm_struct *mm,
4678                        unsigned long addr, unsigned long sz)
4679 {
4680         pgd_t *pgd;
4681         p4d_t *p4d;
4682         pud_t *pud;
4683         pmd_t *pmd;
4684
4685         pgd = pgd_offset(mm, addr);
4686         if (!pgd_present(*pgd))
4687                 return NULL;
4688         p4d = p4d_offset(pgd, addr);
4689         if (!p4d_present(*p4d))
4690                 return NULL;
4691
4692         pud = pud_offset(p4d, addr);
4693         if (sz != PUD_SIZE && pud_none(*pud))
4694                 return NULL;
4695         /* hugepage or swap? */
4696         if (pud_huge(*pud) || !pud_present(*pud))
4697                 return (pte_t *)pud;
4698
4699         pmd = pmd_offset(pud, addr);
4700         if (sz != PMD_SIZE && pmd_none(*pmd))
4701                 return NULL;
4702         /* hugepage or swap? */
4703         if (pmd_huge(*pmd) || !pmd_present(*pmd))
4704                 return (pte_t *)pmd;
4705
4706         return NULL;
4707 }
4708
4709 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4710
4711 /*
4712  * These functions are overwritable if your architecture needs its own
4713  * behavior.
4714  */
4715 struct page * __weak
4716 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4717                               int write)
4718 {
4719         return ERR_PTR(-EINVAL);
4720 }
4721
4722 struct page * __weak
4723 follow_huge_pd(struct vm_area_struct *vma,
4724                unsigned long address, hugepd_t hpd, int flags, int pdshift)
4725 {
4726         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4727         return NULL;
4728 }
4729
4730 struct page * __weak
4731 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4732                 pmd_t *pmd, int flags)
4733 {
4734         struct page *page = NULL;
4735         spinlock_t *ptl;
4736         pte_t pte;
4737 retry:
4738         ptl = pmd_lockptr(mm, pmd);
4739         spin_lock(ptl);
4740         /*
4741          * make sure that the address range covered by this pmd is not
4742          * unmapped from other threads.
4743          */
4744         if (!pmd_huge(*pmd))
4745                 goto out;
4746         pte = huge_ptep_get((pte_t *)pmd);
4747         if (pte_present(pte)) {
4748                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4749                 if (flags & FOLL_GET)
4750                         get_page(page);
4751         } else {
4752                 if (is_hugetlb_entry_migration(pte)) {
4753                         spin_unlock(ptl);
4754                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4755                         goto retry;
4756                 }
4757                 /*
4758                  * hwpoisoned entry is treated as no_page_table in
4759                  * follow_page_mask().
4760                  */
4761         }
4762 out:
4763         spin_unlock(ptl);
4764         return page;
4765 }
4766
4767 struct page * __weak
4768 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4769                 pud_t *pud, int flags)
4770 {
4771         if (flags & FOLL_GET)
4772                 return NULL;
4773
4774         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4775 }
4776
4777 struct page * __weak
4778 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4779 {
4780         if (flags & FOLL_GET)
4781                 return NULL;
4782
4783         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4784 }
4785
4786 bool isolate_huge_page(struct page *page, struct list_head *list)
4787 {
4788         bool ret = true;
4789
4790         VM_BUG_ON_PAGE(!PageHead(page), page);
4791         spin_lock(&hugetlb_lock);
4792         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4793                 ret = false;
4794                 goto unlock;
4795         }
4796         clear_page_huge_active(page);
4797         list_move_tail(&page->lru, list);
4798 unlock:
4799         spin_unlock(&hugetlb_lock);
4800         return ret;
4801 }
4802
4803 void putback_active_hugepage(struct page *page)
4804 {
4805         VM_BUG_ON_PAGE(!PageHead(page), page);
4806         spin_lock(&hugetlb_lock);
4807         set_page_huge_active(page);
4808         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4809         spin_unlock(&hugetlb_lock);
4810         put_page(page);
4811 }