lib/list_sort: simplify and remove MAX_LIST_LENGTH_BITS
[sfrench/cifs-2.6.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/memblock.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/mmdebug.h>
22 #include <linux/sched/signal.h>
23 #include <linux/rmap.h>
24 #include <linux/string_helpers.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/jhash.h>
28 #include <linux/numa.h>
29
30 #include <asm/page.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
33
34 #include <linux/io.h>
35 #include <linux/hugetlb.h>
36 #include <linux/hugetlb_cgroup.h>
37 #include <linux/node.h>
38 #include <linux/userfaultfd_k.h>
39 #include <linux/page_owner.h>
40 #include "internal.h"
41
42 int hugetlb_max_hstate __read_mostly;
43 unsigned int default_hstate_idx;
44 struct hstate hstates[HUGE_MAX_HSTATE];
45 /*
46  * Minimum page order among possible hugepage sizes, set to a proper value
47  * at boot time.
48  */
49 static unsigned int minimum_order __read_mostly = UINT_MAX;
50
51 __initdata LIST_HEAD(huge_boot_pages);
52
53 /* for command line parsing */
54 static struct hstate * __initdata parsed_hstate;
55 static unsigned long __initdata default_hstate_max_huge_pages;
56 static unsigned long __initdata default_hstate_size;
57 static bool __initdata parsed_valid_hugepagesz = true;
58
59 /*
60  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
61  * free_huge_pages, and surplus_huge_pages.
62  */
63 DEFINE_SPINLOCK(hugetlb_lock);
64
65 /*
66  * Serializes faults on the same logical page.  This is used to
67  * prevent spurious OOMs when the hugepage pool is fully utilized.
68  */
69 static int num_fault_mutexes;
70 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
71
72 /* Forward declaration */
73 static int hugetlb_acct_memory(struct hstate *h, long delta);
74
75 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
76 {
77         bool free = (spool->count == 0) && (spool->used_hpages == 0);
78
79         spin_unlock(&spool->lock);
80
81         /* If no pages are used, and no other handles to the subpool
82          * remain, give up any reservations mased on minimum size and
83          * free the subpool */
84         if (free) {
85                 if (spool->min_hpages != -1)
86                         hugetlb_acct_memory(spool->hstate,
87                                                 -spool->min_hpages);
88                 kfree(spool);
89         }
90 }
91
92 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
93                                                 long min_hpages)
94 {
95         struct hugepage_subpool *spool;
96
97         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
98         if (!spool)
99                 return NULL;
100
101         spin_lock_init(&spool->lock);
102         spool->count = 1;
103         spool->max_hpages = max_hpages;
104         spool->hstate = h;
105         spool->min_hpages = min_hpages;
106
107         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
108                 kfree(spool);
109                 return NULL;
110         }
111         spool->rsv_hpages = min_hpages;
112
113         return spool;
114 }
115
116 void hugepage_put_subpool(struct hugepage_subpool *spool)
117 {
118         spin_lock(&spool->lock);
119         BUG_ON(!spool->count);
120         spool->count--;
121         unlock_or_release_subpool(spool);
122 }
123
124 /*
125  * Subpool accounting for allocating and reserving pages.
126  * Return -ENOMEM if there are not enough resources to satisfy the
127  * the request.  Otherwise, return the number of pages by which the
128  * global pools must be adjusted (upward).  The returned value may
129  * only be different than the passed value (delta) in the case where
130  * a subpool minimum size must be manitained.
131  */
132 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
133                                       long delta)
134 {
135         long ret = delta;
136
137         if (!spool)
138                 return ret;
139
140         spin_lock(&spool->lock);
141
142         if (spool->max_hpages != -1) {          /* maximum size accounting */
143                 if ((spool->used_hpages + delta) <= spool->max_hpages)
144                         spool->used_hpages += delta;
145                 else {
146                         ret = -ENOMEM;
147                         goto unlock_ret;
148                 }
149         }
150
151         /* minimum size accounting */
152         if (spool->min_hpages != -1 && spool->rsv_hpages) {
153                 if (delta > spool->rsv_hpages) {
154                         /*
155                          * Asking for more reserves than those already taken on
156                          * behalf of subpool.  Return difference.
157                          */
158                         ret = delta - spool->rsv_hpages;
159                         spool->rsv_hpages = 0;
160                 } else {
161                         ret = 0;        /* reserves already accounted for */
162                         spool->rsv_hpages -= delta;
163                 }
164         }
165
166 unlock_ret:
167         spin_unlock(&spool->lock);
168         return ret;
169 }
170
171 /*
172  * Subpool accounting for freeing and unreserving pages.
173  * Return the number of global page reservations that must be dropped.
174  * The return value may only be different than the passed value (delta)
175  * in the case where a subpool minimum size must be maintained.
176  */
177 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
178                                        long delta)
179 {
180         long ret = delta;
181
182         if (!spool)
183                 return delta;
184
185         spin_lock(&spool->lock);
186
187         if (spool->max_hpages != -1)            /* maximum size accounting */
188                 spool->used_hpages -= delta;
189
190          /* minimum size accounting */
191         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
192                 if (spool->rsv_hpages + delta <= spool->min_hpages)
193                         ret = 0;
194                 else
195                         ret = spool->rsv_hpages + delta - spool->min_hpages;
196
197                 spool->rsv_hpages += delta;
198                 if (spool->rsv_hpages > spool->min_hpages)
199                         spool->rsv_hpages = spool->min_hpages;
200         }
201
202         /*
203          * If hugetlbfs_put_super couldn't free spool due to an outstanding
204          * quota reference, free it now.
205          */
206         unlock_or_release_subpool(spool);
207
208         return ret;
209 }
210
211 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
212 {
213         return HUGETLBFS_SB(inode->i_sb)->spool;
214 }
215
216 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
217 {
218         return subpool_inode(file_inode(vma->vm_file));
219 }
220
221 /*
222  * Region tracking -- allows tracking of reservations and instantiated pages
223  *                    across the pages in a mapping.
224  *
225  * The region data structures are embedded into a resv_map and protected
226  * by a resv_map's lock.  The set of regions within the resv_map represent
227  * reservations for huge pages, or huge pages that have already been
228  * instantiated within the map.  The from and to elements are huge page
229  * indicies into the associated mapping.  from indicates the starting index
230  * of the region.  to represents the first index past the end of  the region.
231  *
232  * For example, a file region structure with from == 0 and to == 4 represents
233  * four huge pages in a mapping.  It is important to note that the to element
234  * represents the first element past the end of the region. This is used in
235  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
236  *
237  * Interval notation of the form [from, to) will be used to indicate that
238  * the endpoint from is inclusive and to is exclusive.
239  */
240 struct file_region {
241         struct list_head link;
242         long from;
243         long to;
244 };
245
246 /*
247  * Add the huge page range represented by [f, t) to the reserve
248  * map.  In the normal case, existing regions will be expanded
249  * to accommodate the specified range.  Sufficient regions should
250  * exist for expansion due to the previous call to region_chg
251  * with the same range.  However, it is possible that region_del
252  * could have been called after region_chg and modifed the map
253  * in such a way that no region exists to be expanded.  In this
254  * case, pull a region descriptor from the cache associated with
255  * the map and use that for the new range.
256  *
257  * Return the number of new huge pages added to the map.  This
258  * number is greater than or equal to zero.
259  */
260 static long region_add(struct resv_map *resv, long f, long t)
261 {
262         struct list_head *head = &resv->regions;
263         struct file_region *rg, *nrg, *trg;
264         long add = 0;
265
266         spin_lock(&resv->lock);
267         /* Locate the region we are either in or before. */
268         list_for_each_entry(rg, head, link)
269                 if (f <= rg->to)
270                         break;
271
272         /*
273          * If no region exists which can be expanded to include the
274          * specified range, the list must have been modified by an
275          * interleving call to region_del().  Pull a region descriptor
276          * from the cache and use it for this range.
277          */
278         if (&rg->link == head || t < rg->from) {
279                 VM_BUG_ON(resv->region_cache_count <= 0);
280
281                 resv->region_cache_count--;
282                 nrg = list_first_entry(&resv->region_cache, struct file_region,
283                                         link);
284                 list_del(&nrg->link);
285
286                 nrg->from = f;
287                 nrg->to = t;
288                 list_add(&nrg->link, rg->link.prev);
289
290                 add += t - f;
291                 goto out_locked;
292         }
293
294         /* Round our left edge to the current segment if it encloses us. */
295         if (f > rg->from)
296                 f = rg->from;
297
298         /* Check for and consume any regions we now overlap with. */
299         nrg = rg;
300         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
301                 if (&rg->link == head)
302                         break;
303                 if (rg->from > t)
304                         break;
305
306                 /* If this area reaches higher then extend our area to
307                  * include it completely.  If this is not the first area
308                  * which we intend to reuse, free it. */
309                 if (rg->to > t)
310                         t = rg->to;
311                 if (rg != nrg) {
312                         /* Decrement return value by the deleted range.
313                          * Another range will span this area so that by
314                          * end of routine add will be >= zero
315                          */
316                         add -= (rg->to - rg->from);
317                         list_del(&rg->link);
318                         kfree(rg);
319                 }
320         }
321
322         add += (nrg->from - f);         /* Added to beginning of region */
323         nrg->from = f;
324         add += t - nrg->to;             /* Added to end of region */
325         nrg->to = t;
326
327 out_locked:
328         resv->adds_in_progress--;
329         spin_unlock(&resv->lock);
330         VM_BUG_ON(add < 0);
331         return add;
332 }
333
334 /*
335  * Examine the existing reserve map and determine how many
336  * huge pages in the specified range [f, t) are NOT currently
337  * represented.  This routine is called before a subsequent
338  * call to region_add that will actually modify the reserve
339  * map to add the specified range [f, t).  region_chg does
340  * not change the number of huge pages represented by the
341  * map.  However, if the existing regions in the map can not
342  * be expanded to represent the new range, a new file_region
343  * structure is added to the map as a placeholder.  This is
344  * so that the subsequent region_add call will have all the
345  * regions it needs and will not fail.
346  *
347  * Upon entry, region_chg will also examine the cache of region descriptors
348  * associated with the map.  If there are not enough descriptors cached, one
349  * will be allocated for the in progress add operation.
350  *
351  * Returns the number of huge pages that need to be added to the existing
352  * reservation map for the range [f, t).  This number is greater or equal to
353  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
354  * is needed and can not be allocated.
355  */
356 static long region_chg(struct resv_map *resv, long f, long t)
357 {
358         struct list_head *head = &resv->regions;
359         struct file_region *rg, *nrg = NULL;
360         long chg = 0;
361
362 retry:
363         spin_lock(&resv->lock);
364 retry_locked:
365         resv->adds_in_progress++;
366
367         /*
368          * Check for sufficient descriptors in the cache to accommodate
369          * the number of in progress add operations.
370          */
371         if (resv->adds_in_progress > resv->region_cache_count) {
372                 struct file_region *trg;
373
374                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
375                 /* Must drop lock to allocate a new descriptor. */
376                 resv->adds_in_progress--;
377                 spin_unlock(&resv->lock);
378
379                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
380                 if (!trg) {
381                         kfree(nrg);
382                         return -ENOMEM;
383                 }
384
385                 spin_lock(&resv->lock);
386                 list_add(&trg->link, &resv->region_cache);
387                 resv->region_cache_count++;
388                 goto retry_locked;
389         }
390
391         /* Locate the region we are before or in. */
392         list_for_each_entry(rg, head, link)
393                 if (f <= rg->to)
394                         break;
395
396         /* If we are below the current region then a new region is required.
397          * Subtle, allocate a new region at the position but make it zero
398          * size such that we can guarantee to record the reservation. */
399         if (&rg->link == head || t < rg->from) {
400                 if (!nrg) {
401                         resv->adds_in_progress--;
402                         spin_unlock(&resv->lock);
403                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
404                         if (!nrg)
405                                 return -ENOMEM;
406
407                         nrg->from = f;
408                         nrg->to   = f;
409                         INIT_LIST_HEAD(&nrg->link);
410                         goto retry;
411                 }
412
413                 list_add(&nrg->link, rg->link.prev);
414                 chg = t - f;
415                 goto out_nrg;
416         }
417
418         /* Round our left edge to the current segment if it encloses us. */
419         if (f > rg->from)
420                 f = rg->from;
421         chg = t - f;
422
423         /* Check for and consume any regions we now overlap with. */
424         list_for_each_entry(rg, rg->link.prev, link) {
425                 if (&rg->link == head)
426                         break;
427                 if (rg->from > t)
428                         goto out;
429
430                 /* We overlap with this area, if it extends further than
431                  * us then we must extend ourselves.  Account for its
432                  * existing reservation. */
433                 if (rg->to > t) {
434                         chg += rg->to - t;
435                         t = rg->to;
436                 }
437                 chg -= rg->to - rg->from;
438         }
439
440 out:
441         spin_unlock(&resv->lock);
442         /*  We already know we raced and no longer need the new region */
443         kfree(nrg);
444         return chg;
445 out_nrg:
446         spin_unlock(&resv->lock);
447         return chg;
448 }
449
450 /*
451  * Abort the in progress add operation.  The adds_in_progress field
452  * of the resv_map keeps track of the operations in progress between
453  * calls to region_chg and region_add.  Operations are sometimes
454  * aborted after the call to region_chg.  In such cases, region_abort
455  * is called to decrement the adds_in_progress counter.
456  *
457  * NOTE: The range arguments [f, t) are not needed or used in this
458  * routine.  They are kept to make reading the calling code easier as
459  * arguments will match the associated region_chg call.
460  */
461 static void region_abort(struct resv_map *resv, long f, long t)
462 {
463         spin_lock(&resv->lock);
464         VM_BUG_ON(!resv->region_cache_count);
465         resv->adds_in_progress--;
466         spin_unlock(&resv->lock);
467 }
468
469 /*
470  * Delete the specified range [f, t) from the reserve map.  If the
471  * t parameter is LONG_MAX, this indicates that ALL regions after f
472  * should be deleted.  Locate the regions which intersect [f, t)
473  * and either trim, delete or split the existing regions.
474  *
475  * Returns the number of huge pages deleted from the reserve map.
476  * In the normal case, the return value is zero or more.  In the
477  * case where a region must be split, a new region descriptor must
478  * be allocated.  If the allocation fails, -ENOMEM will be returned.
479  * NOTE: If the parameter t == LONG_MAX, then we will never split
480  * a region and possibly return -ENOMEM.  Callers specifying
481  * t == LONG_MAX do not need to check for -ENOMEM error.
482  */
483 static long region_del(struct resv_map *resv, long f, long t)
484 {
485         struct list_head *head = &resv->regions;
486         struct file_region *rg, *trg;
487         struct file_region *nrg = NULL;
488         long del = 0;
489
490 retry:
491         spin_lock(&resv->lock);
492         list_for_each_entry_safe(rg, trg, head, link) {
493                 /*
494                  * Skip regions before the range to be deleted.  file_region
495                  * ranges are normally of the form [from, to).  However, there
496                  * may be a "placeholder" entry in the map which is of the form
497                  * (from, to) with from == to.  Check for placeholder entries
498                  * at the beginning of the range to be deleted.
499                  */
500                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
501                         continue;
502
503                 if (rg->from >= t)
504                         break;
505
506                 if (f > rg->from && t < rg->to) { /* Must split region */
507                         /*
508                          * Check for an entry in the cache before dropping
509                          * lock and attempting allocation.
510                          */
511                         if (!nrg &&
512                             resv->region_cache_count > resv->adds_in_progress) {
513                                 nrg = list_first_entry(&resv->region_cache,
514                                                         struct file_region,
515                                                         link);
516                                 list_del(&nrg->link);
517                                 resv->region_cache_count--;
518                         }
519
520                         if (!nrg) {
521                                 spin_unlock(&resv->lock);
522                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
523                                 if (!nrg)
524                                         return -ENOMEM;
525                                 goto retry;
526                         }
527
528                         del += t - f;
529
530                         /* New entry for end of split region */
531                         nrg->from = t;
532                         nrg->to = rg->to;
533                         INIT_LIST_HEAD(&nrg->link);
534
535                         /* Original entry is trimmed */
536                         rg->to = f;
537
538                         list_add(&nrg->link, &rg->link);
539                         nrg = NULL;
540                         break;
541                 }
542
543                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
544                         del += rg->to - rg->from;
545                         list_del(&rg->link);
546                         kfree(rg);
547                         continue;
548                 }
549
550                 if (f <= rg->from) {    /* Trim beginning of region */
551                         del += t - rg->from;
552                         rg->from = t;
553                 } else {                /* Trim end of region */
554                         del += rg->to - f;
555                         rg->to = f;
556                 }
557         }
558
559         spin_unlock(&resv->lock);
560         kfree(nrg);
561         return del;
562 }
563
564 /*
565  * A rare out of memory error was encountered which prevented removal of
566  * the reserve map region for a page.  The huge page itself was free'ed
567  * and removed from the page cache.  This routine will adjust the subpool
568  * usage count, and the global reserve count if needed.  By incrementing
569  * these counts, the reserve map entry which could not be deleted will
570  * appear as a "reserved" entry instead of simply dangling with incorrect
571  * counts.
572  */
573 void hugetlb_fix_reserve_counts(struct inode *inode)
574 {
575         struct hugepage_subpool *spool = subpool_inode(inode);
576         long rsv_adjust;
577
578         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
579         if (rsv_adjust) {
580                 struct hstate *h = hstate_inode(inode);
581
582                 hugetlb_acct_memory(h, 1);
583         }
584 }
585
586 /*
587  * Count and return the number of huge pages in the reserve map
588  * that intersect with the range [f, t).
589  */
590 static long region_count(struct resv_map *resv, long f, long t)
591 {
592         struct list_head *head = &resv->regions;
593         struct file_region *rg;
594         long chg = 0;
595
596         spin_lock(&resv->lock);
597         /* Locate each segment we overlap with, and count that overlap. */
598         list_for_each_entry(rg, head, link) {
599                 long seg_from;
600                 long seg_to;
601
602                 if (rg->to <= f)
603                         continue;
604                 if (rg->from >= t)
605                         break;
606
607                 seg_from = max(rg->from, f);
608                 seg_to = min(rg->to, t);
609
610                 chg += seg_to - seg_from;
611         }
612         spin_unlock(&resv->lock);
613
614         return chg;
615 }
616
617 /*
618  * Convert the address within this vma to the page offset within
619  * the mapping, in pagecache page units; huge pages here.
620  */
621 static pgoff_t vma_hugecache_offset(struct hstate *h,
622                         struct vm_area_struct *vma, unsigned long address)
623 {
624         return ((address - vma->vm_start) >> huge_page_shift(h)) +
625                         (vma->vm_pgoff >> huge_page_order(h));
626 }
627
628 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
629                                      unsigned long address)
630 {
631         return vma_hugecache_offset(hstate_vma(vma), vma, address);
632 }
633 EXPORT_SYMBOL_GPL(linear_hugepage_index);
634
635 /*
636  * Return the size of the pages allocated when backing a VMA. In the majority
637  * cases this will be same size as used by the page table entries.
638  */
639 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
640 {
641         if (vma->vm_ops && vma->vm_ops->pagesize)
642                 return vma->vm_ops->pagesize(vma);
643         return PAGE_SIZE;
644 }
645 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
646
647 /*
648  * Return the page size being used by the MMU to back a VMA. In the majority
649  * of cases, the page size used by the kernel matches the MMU size. On
650  * architectures where it differs, an architecture-specific 'strong'
651  * version of this symbol is required.
652  */
653 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
654 {
655         return vma_kernel_pagesize(vma);
656 }
657
658 /*
659  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
660  * bits of the reservation map pointer, which are always clear due to
661  * alignment.
662  */
663 #define HPAGE_RESV_OWNER    (1UL << 0)
664 #define HPAGE_RESV_UNMAPPED (1UL << 1)
665 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
666
667 /*
668  * These helpers are used to track how many pages are reserved for
669  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
670  * is guaranteed to have their future faults succeed.
671  *
672  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
673  * the reserve counters are updated with the hugetlb_lock held. It is safe
674  * to reset the VMA at fork() time as it is not in use yet and there is no
675  * chance of the global counters getting corrupted as a result of the values.
676  *
677  * The private mapping reservation is represented in a subtly different
678  * manner to a shared mapping.  A shared mapping has a region map associated
679  * with the underlying file, this region map represents the backing file
680  * pages which have ever had a reservation assigned which this persists even
681  * after the page is instantiated.  A private mapping has a region map
682  * associated with the original mmap which is attached to all VMAs which
683  * reference it, this region map represents those offsets which have consumed
684  * reservation ie. where pages have been instantiated.
685  */
686 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
687 {
688         return (unsigned long)vma->vm_private_data;
689 }
690
691 static void set_vma_private_data(struct vm_area_struct *vma,
692                                                         unsigned long value)
693 {
694         vma->vm_private_data = (void *)value;
695 }
696
697 struct resv_map *resv_map_alloc(void)
698 {
699         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
700         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
701
702         if (!resv_map || !rg) {
703                 kfree(resv_map);
704                 kfree(rg);
705                 return NULL;
706         }
707
708         kref_init(&resv_map->refs);
709         spin_lock_init(&resv_map->lock);
710         INIT_LIST_HEAD(&resv_map->regions);
711
712         resv_map->adds_in_progress = 0;
713
714         INIT_LIST_HEAD(&resv_map->region_cache);
715         list_add(&rg->link, &resv_map->region_cache);
716         resv_map->region_cache_count = 1;
717
718         return resv_map;
719 }
720
721 void resv_map_release(struct kref *ref)
722 {
723         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
724         struct list_head *head = &resv_map->region_cache;
725         struct file_region *rg, *trg;
726
727         /* Clear out any active regions before we release the map. */
728         region_del(resv_map, 0, LONG_MAX);
729
730         /* ... and any entries left in the cache */
731         list_for_each_entry_safe(rg, trg, head, link) {
732                 list_del(&rg->link);
733                 kfree(rg);
734         }
735
736         VM_BUG_ON(resv_map->adds_in_progress);
737
738         kfree(resv_map);
739 }
740
741 static inline struct resv_map *inode_resv_map(struct inode *inode)
742 {
743         /*
744          * At inode evict time, i_mapping may not point to the original
745          * address space within the inode.  This original address space
746          * contains the pointer to the resv_map.  So, always use the
747          * address space embedded within the inode.
748          * The VERY common case is inode->mapping == &inode->i_data but,
749          * this may not be true for device special inodes.
750          */
751         return (struct resv_map *)(&inode->i_data)->private_data;
752 }
753
754 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
755 {
756         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
757         if (vma->vm_flags & VM_MAYSHARE) {
758                 struct address_space *mapping = vma->vm_file->f_mapping;
759                 struct inode *inode = mapping->host;
760
761                 return inode_resv_map(inode);
762
763         } else {
764                 return (struct resv_map *)(get_vma_private_data(vma) &
765                                                         ~HPAGE_RESV_MASK);
766         }
767 }
768
769 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
770 {
771         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
772         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
773
774         set_vma_private_data(vma, (get_vma_private_data(vma) &
775                                 HPAGE_RESV_MASK) | (unsigned long)map);
776 }
777
778 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
779 {
780         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
781         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
782
783         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
784 }
785
786 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
787 {
788         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789
790         return (get_vma_private_data(vma) & flag) != 0;
791 }
792
793 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
794 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
795 {
796         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
797         if (!(vma->vm_flags & VM_MAYSHARE))
798                 vma->vm_private_data = (void *)0;
799 }
800
801 /* Returns true if the VMA has associated reserve pages */
802 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
803 {
804         if (vma->vm_flags & VM_NORESERVE) {
805                 /*
806                  * This address is already reserved by other process(chg == 0),
807                  * so, we should decrement reserved count. Without decrementing,
808                  * reserve count remains after releasing inode, because this
809                  * allocated page will go into page cache and is regarded as
810                  * coming from reserved pool in releasing step.  Currently, we
811                  * don't have any other solution to deal with this situation
812                  * properly, so add work-around here.
813                  */
814                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
815                         return true;
816                 else
817                         return false;
818         }
819
820         /* Shared mappings always use reserves */
821         if (vma->vm_flags & VM_MAYSHARE) {
822                 /*
823                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
824                  * be a region map for all pages.  The only situation where
825                  * there is no region map is if a hole was punched via
826                  * fallocate.  In this case, there really are no reverves to
827                  * use.  This situation is indicated if chg != 0.
828                  */
829                 if (chg)
830                         return false;
831                 else
832                         return true;
833         }
834
835         /*
836          * Only the process that called mmap() has reserves for
837          * private mappings.
838          */
839         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
840                 /*
841                  * Like the shared case above, a hole punch or truncate
842                  * could have been performed on the private mapping.
843                  * Examine the value of chg to determine if reserves
844                  * actually exist or were previously consumed.
845                  * Very Subtle - The value of chg comes from a previous
846                  * call to vma_needs_reserves().  The reserve map for
847                  * private mappings has different (opposite) semantics
848                  * than that of shared mappings.  vma_needs_reserves()
849                  * has already taken this difference in semantics into
850                  * account.  Therefore, the meaning of chg is the same
851                  * as in the shared case above.  Code could easily be
852                  * combined, but keeping it separate draws attention to
853                  * subtle differences.
854                  */
855                 if (chg)
856                         return false;
857                 else
858                         return true;
859         }
860
861         return false;
862 }
863
864 static void enqueue_huge_page(struct hstate *h, struct page *page)
865 {
866         int nid = page_to_nid(page);
867         list_move(&page->lru, &h->hugepage_freelists[nid]);
868         h->free_huge_pages++;
869         h->free_huge_pages_node[nid]++;
870 }
871
872 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
873 {
874         struct page *page;
875
876         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
877                 if (!PageHWPoison(page))
878                         break;
879         /*
880          * if 'non-isolated free hugepage' not found on the list,
881          * the allocation fails.
882          */
883         if (&h->hugepage_freelists[nid] == &page->lru)
884                 return NULL;
885         list_move(&page->lru, &h->hugepage_activelist);
886         set_page_refcounted(page);
887         h->free_huge_pages--;
888         h->free_huge_pages_node[nid]--;
889         return page;
890 }
891
892 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
893                 nodemask_t *nmask)
894 {
895         unsigned int cpuset_mems_cookie;
896         struct zonelist *zonelist;
897         struct zone *zone;
898         struct zoneref *z;
899         int node = NUMA_NO_NODE;
900
901         zonelist = node_zonelist(nid, gfp_mask);
902
903 retry_cpuset:
904         cpuset_mems_cookie = read_mems_allowed_begin();
905         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
906                 struct page *page;
907
908                 if (!cpuset_zone_allowed(zone, gfp_mask))
909                         continue;
910                 /*
911                  * no need to ask again on the same node. Pool is node rather than
912                  * zone aware
913                  */
914                 if (zone_to_nid(zone) == node)
915                         continue;
916                 node = zone_to_nid(zone);
917
918                 page = dequeue_huge_page_node_exact(h, node);
919                 if (page)
920                         return page;
921         }
922         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
923                 goto retry_cpuset;
924
925         return NULL;
926 }
927
928 /* Movability of hugepages depends on migration support. */
929 static inline gfp_t htlb_alloc_mask(struct hstate *h)
930 {
931         if (hugepage_movable_supported(h))
932                 return GFP_HIGHUSER_MOVABLE;
933         else
934                 return GFP_HIGHUSER;
935 }
936
937 static struct page *dequeue_huge_page_vma(struct hstate *h,
938                                 struct vm_area_struct *vma,
939                                 unsigned long address, int avoid_reserve,
940                                 long chg)
941 {
942         struct page *page;
943         struct mempolicy *mpol;
944         gfp_t gfp_mask;
945         nodemask_t *nodemask;
946         int nid;
947
948         /*
949          * A child process with MAP_PRIVATE mappings created by their parent
950          * have no page reserves. This check ensures that reservations are
951          * not "stolen". The child may still get SIGKILLed
952          */
953         if (!vma_has_reserves(vma, chg) &&
954                         h->free_huge_pages - h->resv_huge_pages == 0)
955                 goto err;
956
957         /* If reserves cannot be used, ensure enough pages are in the pool */
958         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
959                 goto err;
960
961         gfp_mask = htlb_alloc_mask(h);
962         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
963         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
964         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
965                 SetPagePrivate(page);
966                 h->resv_huge_pages--;
967         }
968
969         mpol_cond_put(mpol);
970         return page;
971
972 err:
973         return NULL;
974 }
975
976 /*
977  * common helper functions for hstate_next_node_to_{alloc|free}.
978  * We may have allocated or freed a huge page based on a different
979  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
980  * be outside of *nodes_allowed.  Ensure that we use an allowed
981  * node for alloc or free.
982  */
983 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
984 {
985         nid = next_node_in(nid, *nodes_allowed);
986         VM_BUG_ON(nid >= MAX_NUMNODES);
987
988         return nid;
989 }
990
991 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
992 {
993         if (!node_isset(nid, *nodes_allowed))
994                 nid = next_node_allowed(nid, nodes_allowed);
995         return nid;
996 }
997
998 /*
999  * returns the previously saved node ["this node"] from which to
1000  * allocate a persistent huge page for the pool and advance the
1001  * next node from which to allocate, handling wrap at end of node
1002  * mask.
1003  */
1004 static int hstate_next_node_to_alloc(struct hstate *h,
1005                                         nodemask_t *nodes_allowed)
1006 {
1007         int nid;
1008
1009         VM_BUG_ON(!nodes_allowed);
1010
1011         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1012         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1013
1014         return nid;
1015 }
1016
1017 /*
1018  * helper for free_pool_huge_page() - return the previously saved
1019  * node ["this node"] from which to free a huge page.  Advance the
1020  * next node id whether or not we find a free huge page to free so
1021  * that the next attempt to free addresses the next node.
1022  */
1023 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1024 {
1025         int nid;
1026
1027         VM_BUG_ON(!nodes_allowed);
1028
1029         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1030         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1031
1032         return nid;
1033 }
1034
1035 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1036         for (nr_nodes = nodes_weight(*mask);                            \
1037                 nr_nodes > 0 &&                                         \
1038                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1039                 nr_nodes--)
1040
1041 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1042         for (nr_nodes = nodes_weight(*mask);                            \
1043                 nr_nodes > 0 &&                                         \
1044                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1045                 nr_nodes--)
1046
1047 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1048 static void destroy_compound_gigantic_page(struct page *page,
1049                                         unsigned int order)
1050 {
1051         int i;
1052         int nr_pages = 1 << order;
1053         struct page *p = page + 1;
1054
1055         atomic_set(compound_mapcount_ptr(page), 0);
1056         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1057                 clear_compound_head(p);
1058                 set_page_refcounted(p);
1059         }
1060
1061         set_compound_order(page, 0);
1062         __ClearPageHead(page);
1063 }
1064
1065 static void free_gigantic_page(struct page *page, unsigned int order)
1066 {
1067         free_contig_range(page_to_pfn(page), 1 << order);
1068 }
1069
1070 #ifdef CONFIG_CONTIG_ALLOC
1071 static int __alloc_gigantic_page(unsigned long start_pfn,
1072                                 unsigned long nr_pages, gfp_t gfp_mask)
1073 {
1074         unsigned long end_pfn = start_pfn + nr_pages;
1075         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1076                                   gfp_mask);
1077 }
1078
1079 static bool pfn_range_valid_gigantic(struct zone *z,
1080                         unsigned long start_pfn, unsigned long nr_pages)
1081 {
1082         unsigned long i, end_pfn = start_pfn + nr_pages;
1083         struct page *page;
1084
1085         for (i = start_pfn; i < end_pfn; i++) {
1086                 if (!pfn_valid(i))
1087                         return false;
1088
1089                 page = pfn_to_page(i);
1090
1091                 if (page_zone(page) != z)
1092                         return false;
1093
1094                 if (PageReserved(page))
1095                         return false;
1096
1097                 if (page_count(page) > 0)
1098                         return false;
1099
1100                 if (PageHuge(page))
1101                         return false;
1102         }
1103
1104         return true;
1105 }
1106
1107 static bool zone_spans_last_pfn(const struct zone *zone,
1108                         unsigned long start_pfn, unsigned long nr_pages)
1109 {
1110         unsigned long last_pfn = start_pfn + nr_pages - 1;
1111         return zone_spans_pfn(zone, last_pfn);
1112 }
1113
1114 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1115                 int nid, nodemask_t *nodemask)
1116 {
1117         unsigned int order = huge_page_order(h);
1118         unsigned long nr_pages = 1 << order;
1119         unsigned long ret, pfn, flags;
1120         struct zonelist *zonelist;
1121         struct zone *zone;
1122         struct zoneref *z;
1123
1124         zonelist = node_zonelist(nid, gfp_mask);
1125         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1126                 spin_lock_irqsave(&zone->lock, flags);
1127
1128                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1129                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1130                         if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1131                                 /*
1132                                  * We release the zone lock here because
1133                                  * alloc_contig_range() will also lock the zone
1134                                  * at some point. If there's an allocation
1135                                  * spinning on this lock, it may win the race
1136                                  * and cause alloc_contig_range() to fail...
1137                                  */
1138                                 spin_unlock_irqrestore(&zone->lock, flags);
1139                                 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1140                                 if (!ret)
1141                                         return pfn_to_page(pfn);
1142                                 spin_lock_irqsave(&zone->lock, flags);
1143                         }
1144                         pfn += nr_pages;
1145                 }
1146
1147                 spin_unlock_irqrestore(&zone->lock, flags);
1148         }
1149
1150         return NULL;
1151 }
1152
1153 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1154 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1155 #else /* !CONFIG_CONTIG_ALLOC */
1156 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1157                                         int nid, nodemask_t *nodemask)
1158 {
1159         return NULL;
1160 }
1161 #endif /* CONFIG_CONTIG_ALLOC */
1162
1163 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1164 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1165                                         int nid, nodemask_t *nodemask)
1166 {
1167         return NULL;
1168 }
1169 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1170 static inline void destroy_compound_gigantic_page(struct page *page,
1171                                                 unsigned int order) { }
1172 #endif
1173
1174 static void update_and_free_page(struct hstate *h, struct page *page)
1175 {
1176         int i;
1177
1178         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1179                 return;
1180
1181         h->nr_huge_pages--;
1182         h->nr_huge_pages_node[page_to_nid(page)]--;
1183         for (i = 0; i < pages_per_huge_page(h); i++) {
1184                 page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1185                                 1 << PG_referenced | 1 << PG_dirty |
1186                                 1 << PG_active | 1 << PG_private |
1187                                 1 << PG_writeback);
1188         }
1189         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1190         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1191         set_page_refcounted(page);
1192         if (hstate_is_gigantic(h)) {
1193                 destroy_compound_gigantic_page(page, huge_page_order(h));
1194                 free_gigantic_page(page, huge_page_order(h));
1195         } else {
1196                 __free_pages(page, huge_page_order(h));
1197         }
1198 }
1199
1200 struct hstate *size_to_hstate(unsigned long size)
1201 {
1202         struct hstate *h;
1203
1204         for_each_hstate(h) {
1205                 if (huge_page_size(h) == size)
1206                         return h;
1207         }
1208         return NULL;
1209 }
1210
1211 /*
1212  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1213  * to hstate->hugepage_activelist.)
1214  *
1215  * This function can be called for tail pages, but never returns true for them.
1216  */
1217 bool page_huge_active(struct page *page)
1218 {
1219         VM_BUG_ON_PAGE(!PageHuge(page), page);
1220         return PageHead(page) && PagePrivate(&page[1]);
1221 }
1222
1223 /* never called for tail page */
1224 static void set_page_huge_active(struct page *page)
1225 {
1226         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1227         SetPagePrivate(&page[1]);
1228 }
1229
1230 static void clear_page_huge_active(struct page *page)
1231 {
1232         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1233         ClearPagePrivate(&page[1]);
1234 }
1235
1236 /*
1237  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1238  * code
1239  */
1240 static inline bool PageHugeTemporary(struct page *page)
1241 {
1242         if (!PageHuge(page))
1243                 return false;
1244
1245         return (unsigned long)page[2].mapping == -1U;
1246 }
1247
1248 static inline void SetPageHugeTemporary(struct page *page)
1249 {
1250         page[2].mapping = (void *)-1U;
1251 }
1252
1253 static inline void ClearPageHugeTemporary(struct page *page)
1254 {
1255         page[2].mapping = NULL;
1256 }
1257
1258 void free_huge_page(struct page *page)
1259 {
1260         /*
1261          * Can't pass hstate in here because it is called from the
1262          * compound page destructor.
1263          */
1264         struct hstate *h = page_hstate(page);
1265         int nid = page_to_nid(page);
1266         struct hugepage_subpool *spool =
1267                 (struct hugepage_subpool *)page_private(page);
1268         bool restore_reserve;
1269
1270         VM_BUG_ON_PAGE(page_count(page), page);
1271         VM_BUG_ON_PAGE(page_mapcount(page), page);
1272
1273         set_page_private(page, 0);
1274         page->mapping = NULL;
1275         restore_reserve = PagePrivate(page);
1276         ClearPagePrivate(page);
1277
1278         /*
1279          * If PagePrivate() was set on page, page allocation consumed a
1280          * reservation.  If the page was associated with a subpool, there
1281          * would have been a page reserved in the subpool before allocation
1282          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1283          * reservtion, do not call hugepage_subpool_put_pages() as this will
1284          * remove the reserved page from the subpool.
1285          */
1286         if (!restore_reserve) {
1287                 /*
1288                  * A return code of zero implies that the subpool will be
1289                  * under its minimum size if the reservation is not restored
1290                  * after page is free.  Therefore, force restore_reserve
1291                  * operation.
1292                  */
1293                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1294                         restore_reserve = true;
1295         }
1296
1297         spin_lock(&hugetlb_lock);
1298         clear_page_huge_active(page);
1299         hugetlb_cgroup_uncharge_page(hstate_index(h),
1300                                      pages_per_huge_page(h), page);
1301         if (restore_reserve)
1302                 h->resv_huge_pages++;
1303
1304         if (PageHugeTemporary(page)) {
1305                 list_del(&page->lru);
1306                 ClearPageHugeTemporary(page);
1307                 update_and_free_page(h, page);
1308         } else if (h->surplus_huge_pages_node[nid]) {
1309                 /* remove the page from active list */
1310                 list_del(&page->lru);
1311                 update_and_free_page(h, page);
1312                 h->surplus_huge_pages--;
1313                 h->surplus_huge_pages_node[nid]--;
1314         } else {
1315                 arch_clear_hugepage_flags(page);
1316                 enqueue_huge_page(h, page);
1317         }
1318         spin_unlock(&hugetlb_lock);
1319 }
1320
1321 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1322 {
1323         INIT_LIST_HEAD(&page->lru);
1324         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1325         spin_lock(&hugetlb_lock);
1326         set_hugetlb_cgroup(page, NULL);
1327         h->nr_huge_pages++;
1328         h->nr_huge_pages_node[nid]++;
1329         spin_unlock(&hugetlb_lock);
1330 }
1331
1332 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1333 {
1334         int i;
1335         int nr_pages = 1 << order;
1336         struct page *p = page + 1;
1337
1338         /* we rely on prep_new_huge_page to set the destructor */
1339         set_compound_order(page, order);
1340         __ClearPageReserved(page);
1341         __SetPageHead(page);
1342         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1343                 /*
1344                  * For gigantic hugepages allocated through bootmem at
1345                  * boot, it's safer to be consistent with the not-gigantic
1346                  * hugepages and clear the PG_reserved bit from all tail pages
1347                  * too.  Otherwse drivers using get_user_pages() to access tail
1348                  * pages may get the reference counting wrong if they see
1349                  * PG_reserved set on a tail page (despite the head page not
1350                  * having PG_reserved set).  Enforcing this consistency between
1351                  * head and tail pages allows drivers to optimize away a check
1352                  * on the head page when they need know if put_page() is needed
1353                  * after get_user_pages().
1354                  */
1355                 __ClearPageReserved(p);
1356                 set_page_count(p, 0);
1357                 set_compound_head(p, page);
1358         }
1359         atomic_set(compound_mapcount_ptr(page), -1);
1360 }
1361
1362 /*
1363  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1364  * transparent huge pages.  See the PageTransHuge() documentation for more
1365  * details.
1366  */
1367 int PageHuge(struct page *page)
1368 {
1369         if (!PageCompound(page))
1370                 return 0;
1371
1372         page = compound_head(page);
1373         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1374 }
1375 EXPORT_SYMBOL_GPL(PageHuge);
1376
1377 /*
1378  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1379  * normal or transparent huge pages.
1380  */
1381 int PageHeadHuge(struct page *page_head)
1382 {
1383         if (!PageHead(page_head))
1384                 return 0;
1385
1386         return get_compound_page_dtor(page_head) == free_huge_page;
1387 }
1388
1389 pgoff_t __basepage_index(struct page *page)
1390 {
1391         struct page *page_head = compound_head(page);
1392         pgoff_t index = page_index(page_head);
1393         unsigned long compound_idx;
1394
1395         if (!PageHuge(page_head))
1396                 return page_index(page);
1397
1398         if (compound_order(page_head) >= MAX_ORDER)
1399                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1400         else
1401                 compound_idx = page - page_head;
1402
1403         return (index << compound_order(page_head)) + compound_idx;
1404 }
1405
1406 static struct page *alloc_buddy_huge_page(struct hstate *h,
1407                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1408 {
1409         int order = huge_page_order(h);
1410         struct page *page;
1411
1412         gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1413         if (nid == NUMA_NO_NODE)
1414                 nid = numa_mem_id();
1415         page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1416         if (page)
1417                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1418         else
1419                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1420
1421         return page;
1422 }
1423
1424 /*
1425  * Common helper to allocate a fresh hugetlb page. All specific allocators
1426  * should use this function to get new hugetlb pages
1427  */
1428 static struct page *alloc_fresh_huge_page(struct hstate *h,
1429                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1430 {
1431         struct page *page;
1432
1433         if (hstate_is_gigantic(h))
1434                 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1435         else
1436                 page = alloc_buddy_huge_page(h, gfp_mask,
1437                                 nid, nmask);
1438         if (!page)
1439                 return NULL;
1440
1441         if (hstate_is_gigantic(h))
1442                 prep_compound_gigantic_page(page, huge_page_order(h));
1443         prep_new_huge_page(h, page, page_to_nid(page));
1444
1445         return page;
1446 }
1447
1448 /*
1449  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1450  * manner.
1451  */
1452 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1453 {
1454         struct page *page;
1455         int nr_nodes, node;
1456         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1457
1458         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1459                 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1460                 if (page)
1461                         break;
1462         }
1463
1464         if (!page)
1465                 return 0;
1466
1467         put_page(page); /* free it into the hugepage allocator */
1468
1469         return 1;
1470 }
1471
1472 /*
1473  * Free huge page from pool from next node to free.
1474  * Attempt to keep persistent huge pages more or less
1475  * balanced over allowed nodes.
1476  * Called with hugetlb_lock locked.
1477  */
1478 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1479                                                          bool acct_surplus)
1480 {
1481         int nr_nodes, node;
1482         int ret = 0;
1483
1484         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1485                 /*
1486                  * If we're returning unused surplus pages, only examine
1487                  * nodes with surplus pages.
1488                  */
1489                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1490                     !list_empty(&h->hugepage_freelists[node])) {
1491                         struct page *page =
1492                                 list_entry(h->hugepage_freelists[node].next,
1493                                           struct page, lru);
1494                         list_del(&page->lru);
1495                         h->free_huge_pages--;
1496                         h->free_huge_pages_node[node]--;
1497                         if (acct_surplus) {
1498                                 h->surplus_huge_pages--;
1499                                 h->surplus_huge_pages_node[node]--;
1500                         }
1501                         update_and_free_page(h, page);
1502                         ret = 1;
1503                         break;
1504                 }
1505         }
1506
1507         return ret;
1508 }
1509
1510 /*
1511  * Dissolve a given free hugepage into free buddy pages. This function does
1512  * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
1513  * dissolution fails because a give page is not a free hugepage, or because
1514  * free hugepages are fully reserved.
1515  */
1516 int dissolve_free_huge_page(struct page *page)
1517 {
1518         int rc = -EBUSY;
1519
1520         spin_lock(&hugetlb_lock);
1521         if (PageHuge(page) && !page_count(page)) {
1522                 struct page *head = compound_head(page);
1523                 struct hstate *h = page_hstate(head);
1524                 int nid = page_to_nid(head);
1525                 if (h->free_huge_pages - h->resv_huge_pages == 0)
1526                         goto out;
1527                 /*
1528                  * Move PageHWPoison flag from head page to the raw error page,
1529                  * which makes any subpages rather than the error page reusable.
1530                  */
1531                 if (PageHWPoison(head) && page != head) {
1532                         SetPageHWPoison(page);
1533                         ClearPageHWPoison(head);
1534                 }
1535                 list_del(&head->lru);
1536                 h->free_huge_pages--;
1537                 h->free_huge_pages_node[nid]--;
1538                 h->max_huge_pages--;
1539                 update_and_free_page(h, head);
1540                 rc = 0;
1541         }
1542 out:
1543         spin_unlock(&hugetlb_lock);
1544         return rc;
1545 }
1546
1547 /*
1548  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1549  * make specified memory blocks removable from the system.
1550  * Note that this will dissolve a free gigantic hugepage completely, if any
1551  * part of it lies within the given range.
1552  * Also note that if dissolve_free_huge_page() returns with an error, all
1553  * free hugepages that were dissolved before that error are lost.
1554  */
1555 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1556 {
1557         unsigned long pfn;
1558         struct page *page;
1559         int rc = 0;
1560
1561         if (!hugepages_supported())
1562                 return rc;
1563
1564         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1565                 page = pfn_to_page(pfn);
1566                 if (PageHuge(page) && !page_count(page)) {
1567                         rc = dissolve_free_huge_page(page);
1568                         if (rc)
1569                                 break;
1570                 }
1571         }
1572
1573         return rc;
1574 }
1575
1576 /*
1577  * Allocates a fresh surplus page from the page allocator.
1578  */
1579 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1580                 int nid, nodemask_t *nmask)
1581 {
1582         struct page *page = NULL;
1583
1584         if (hstate_is_gigantic(h))
1585                 return NULL;
1586
1587         spin_lock(&hugetlb_lock);
1588         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1589                 goto out_unlock;
1590         spin_unlock(&hugetlb_lock);
1591
1592         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1593         if (!page)
1594                 return NULL;
1595
1596         spin_lock(&hugetlb_lock);
1597         /*
1598          * We could have raced with the pool size change.
1599          * Double check that and simply deallocate the new page
1600          * if we would end up overcommiting the surpluses. Abuse
1601          * temporary page to workaround the nasty free_huge_page
1602          * codeflow
1603          */
1604         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1605                 SetPageHugeTemporary(page);
1606                 spin_unlock(&hugetlb_lock);
1607                 put_page(page);
1608                 return NULL;
1609         } else {
1610                 h->surplus_huge_pages++;
1611                 h->surplus_huge_pages_node[page_to_nid(page)]++;
1612         }
1613
1614 out_unlock:
1615         spin_unlock(&hugetlb_lock);
1616
1617         return page;
1618 }
1619
1620 struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1621                                      int nid, nodemask_t *nmask)
1622 {
1623         struct page *page;
1624
1625         if (hstate_is_gigantic(h))
1626                 return NULL;
1627
1628         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1629         if (!page)
1630                 return NULL;
1631
1632         /*
1633          * We do not account these pages as surplus because they are only
1634          * temporary and will be released properly on the last reference
1635          */
1636         SetPageHugeTemporary(page);
1637
1638         return page;
1639 }
1640
1641 /*
1642  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1643  */
1644 static
1645 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1646                 struct vm_area_struct *vma, unsigned long addr)
1647 {
1648         struct page *page;
1649         struct mempolicy *mpol;
1650         gfp_t gfp_mask = htlb_alloc_mask(h);
1651         int nid;
1652         nodemask_t *nodemask;
1653
1654         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1655         page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1656         mpol_cond_put(mpol);
1657
1658         return page;
1659 }
1660
1661 /* page migration callback function */
1662 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1663 {
1664         gfp_t gfp_mask = htlb_alloc_mask(h);
1665         struct page *page = NULL;
1666
1667         if (nid != NUMA_NO_NODE)
1668                 gfp_mask |= __GFP_THISNODE;
1669
1670         spin_lock(&hugetlb_lock);
1671         if (h->free_huge_pages - h->resv_huge_pages > 0)
1672                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1673         spin_unlock(&hugetlb_lock);
1674
1675         if (!page)
1676                 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1677
1678         return page;
1679 }
1680
1681 /* page migration callback function */
1682 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1683                 nodemask_t *nmask)
1684 {
1685         gfp_t gfp_mask = htlb_alloc_mask(h);
1686
1687         spin_lock(&hugetlb_lock);
1688         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1689                 struct page *page;
1690
1691                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1692                 if (page) {
1693                         spin_unlock(&hugetlb_lock);
1694                         return page;
1695                 }
1696         }
1697         spin_unlock(&hugetlb_lock);
1698
1699         return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1700 }
1701
1702 /* mempolicy aware migration callback */
1703 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1704                 unsigned long address)
1705 {
1706         struct mempolicy *mpol;
1707         nodemask_t *nodemask;
1708         struct page *page;
1709         gfp_t gfp_mask;
1710         int node;
1711
1712         gfp_mask = htlb_alloc_mask(h);
1713         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1714         page = alloc_huge_page_nodemask(h, node, nodemask);
1715         mpol_cond_put(mpol);
1716
1717         return page;
1718 }
1719
1720 /*
1721  * Increase the hugetlb pool such that it can accommodate a reservation
1722  * of size 'delta'.
1723  */
1724 static int gather_surplus_pages(struct hstate *h, int delta)
1725 {
1726         struct list_head surplus_list;
1727         struct page *page, *tmp;
1728         int ret, i;
1729         int needed, allocated;
1730         bool alloc_ok = true;
1731
1732         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1733         if (needed <= 0) {
1734                 h->resv_huge_pages += delta;
1735                 return 0;
1736         }
1737
1738         allocated = 0;
1739         INIT_LIST_HEAD(&surplus_list);
1740
1741         ret = -ENOMEM;
1742 retry:
1743         spin_unlock(&hugetlb_lock);
1744         for (i = 0; i < needed; i++) {
1745                 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1746                                 NUMA_NO_NODE, NULL);
1747                 if (!page) {
1748                         alloc_ok = false;
1749                         break;
1750                 }
1751                 list_add(&page->lru, &surplus_list);
1752                 cond_resched();
1753         }
1754         allocated += i;
1755
1756         /*
1757          * After retaking hugetlb_lock, we need to recalculate 'needed'
1758          * because either resv_huge_pages or free_huge_pages may have changed.
1759          */
1760         spin_lock(&hugetlb_lock);
1761         needed = (h->resv_huge_pages + delta) -
1762                         (h->free_huge_pages + allocated);
1763         if (needed > 0) {
1764                 if (alloc_ok)
1765                         goto retry;
1766                 /*
1767                  * We were not able to allocate enough pages to
1768                  * satisfy the entire reservation so we free what
1769                  * we've allocated so far.
1770                  */
1771                 goto free;
1772         }
1773         /*
1774          * The surplus_list now contains _at_least_ the number of extra pages
1775          * needed to accommodate the reservation.  Add the appropriate number
1776          * of pages to the hugetlb pool and free the extras back to the buddy
1777          * allocator.  Commit the entire reservation here to prevent another
1778          * process from stealing the pages as they are added to the pool but
1779          * before they are reserved.
1780          */
1781         needed += allocated;
1782         h->resv_huge_pages += delta;
1783         ret = 0;
1784
1785         /* Free the needed pages to the hugetlb pool */
1786         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1787                 if ((--needed) < 0)
1788                         break;
1789                 /*
1790                  * This page is now managed by the hugetlb allocator and has
1791                  * no users -- drop the buddy allocator's reference.
1792                  */
1793                 put_page_testzero(page);
1794                 VM_BUG_ON_PAGE(page_count(page), page);
1795                 enqueue_huge_page(h, page);
1796         }
1797 free:
1798         spin_unlock(&hugetlb_lock);
1799
1800         /* Free unnecessary surplus pages to the buddy allocator */
1801         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1802                 put_page(page);
1803         spin_lock(&hugetlb_lock);
1804
1805         return ret;
1806 }
1807
1808 /*
1809  * This routine has two main purposes:
1810  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1811  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1812  *    to the associated reservation map.
1813  * 2) Free any unused surplus pages that may have been allocated to satisfy
1814  *    the reservation.  As many as unused_resv_pages may be freed.
1815  *
1816  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1817  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1818  * we must make sure nobody else can claim pages we are in the process of
1819  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1820  * number of huge pages we plan to free when dropping the lock.
1821  */
1822 static void return_unused_surplus_pages(struct hstate *h,
1823                                         unsigned long unused_resv_pages)
1824 {
1825         unsigned long nr_pages;
1826
1827         /* Cannot return gigantic pages currently */
1828         if (hstate_is_gigantic(h))
1829                 goto out;
1830
1831         /*
1832          * Part (or even all) of the reservation could have been backed
1833          * by pre-allocated pages. Only free surplus pages.
1834          */
1835         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1836
1837         /*
1838          * We want to release as many surplus pages as possible, spread
1839          * evenly across all nodes with memory. Iterate across these nodes
1840          * until we can no longer free unreserved surplus pages. This occurs
1841          * when the nodes with surplus pages have no free pages.
1842          * free_pool_huge_page() will balance the the freed pages across the
1843          * on-line nodes with memory and will handle the hstate accounting.
1844          *
1845          * Note that we decrement resv_huge_pages as we free the pages.  If
1846          * we drop the lock, resv_huge_pages will still be sufficiently large
1847          * to cover subsequent pages we may free.
1848          */
1849         while (nr_pages--) {
1850                 h->resv_huge_pages--;
1851                 unused_resv_pages--;
1852                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1853                         goto out;
1854                 cond_resched_lock(&hugetlb_lock);
1855         }
1856
1857 out:
1858         /* Fully uncommit the reservation */
1859         h->resv_huge_pages -= unused_resv_pages;
1860 }
1861
1862
1863 /*
1864  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1865  * are used by the huge page allocation routines to manage reservations.
1866  *
1867  * vma_needs_reservation is called to determine if the huge page at addr
1868  * within the vma has an associated reservation.  If a reservation is
1869  * needed, the value 1 is returned.  The caller is then responsible for
1870  * managing the global reservation and subpool usage counts.  After
1871  * the huge page has been allocated, vma_commit_reservation is called
1872  * to add the page to the reservation map.  If the page allocation fails,
1873  * the reservation must be ended instead of committed.  vma_end_reservation
1874  * is called in such cases.
1875  *
1876  * In the normal case, vma_commit_reservation returns the same value
1877  * as the preceding vma_needs_reservation call.  The only time this
1878  * is not the case is if a reserve map was changed between calls.  It
1879  * is the responsibility of the caller to notice the difference and
1880  * take appropriate action.
1881  *
1882  * vma_add_reservation is used in error paths where a reservation must
1883  * be restored when a newly allocated huge page must be freed.  It is
1884  * to be called after calling vma_needs_reservation to determine if a
1885  * reservation exists.
1886  */
1887 enum vma_resv_mode {
1888         VMA_NEEDS_RESV,
1889         VMA_COMMIT_RESV,
1890         VMA_END_RESV,
1891         VMA_ADD_RESV,
1892 };
1893 static long __vma_reservation_common(struct hstate *h,
1894                                 struct vm_area_struct *vma, unsigned long addr,
1895                                 enum vma_resv_mode mode)
1896 {
1897         struct resv_map *resv;
1898         pgoff_t idx;
1899         long ret;
1900
1901         resv = vma_resv_map(vma);
1902         if (!resv)
1903                 return 1;
1904
1905         idx = vma_hugecache_offset(h, vma, addr);
1906         switch (mode) {
1907         case VMA_NEEDS_RESV:
1908                 ret = region_chg(resv, idx, idx + 1);
1909                 break;
1910         case VMA_COMMIT_RESV:
1911                 ret = region_add(resv, idx, idx + 1);
1912                 break;
1913         case VMA_END_RESV:
1914                 region_abort(resv, idx, idx + 1);
1915                 ret = 0;
1916                 break;
1917         case VMA_ADD_RESV:
1918                 if (vma->vm_flags & VM_MAYSHARE)
1919                         ret = region_add(resv, idx, idx + 1);
1920                 else {
1921                         region_abort(resv, idx, idx + 1);
1922                         ret = region_del(resv, idx, idx + 1);
1923                 }
1924                 break;
1925         default:
1926                 BUG();
1927         }
1928
1929         if (vma->vm_flags & VM_MAYSHARE)
1930                 return ret;
1931         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1932                 /*
1933                  * In most cases, reserves always exist for private mappings.
1934                  * However, a file associated with mapping could have been
1935                  * hole punched or truncated after reserves were consumed.
1936                  * As subsequent fault on such a range will not use reserves.
1937                  * Subtle - The reserve map for private mappings has the
1938                  * opposite meaning than that of shared mappings.  If NO
1939                  * entry is in the reserve map, it means a reservation exists.
1940                  * If an entry exists in the reserve map, it means the
1941                  * reservation has already been consumed.  As a result, the
1942                  * return value of this routine is the opposite of the
1943                  * value returned from reserve map manipulation routines above.
1944                  */
1945                 if (ret)
1946                         return 0;
1947                 else
1948                         return 1;
1949         }
1950         else
1951                 return ret < 0 ? ret : 0;
1952 }
1953
1954 static long vma_needs_reservation(struct hstate *h,
1955                         struct vm_area_struct *vma, unsigned long addr)
1956 {
1957         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1958 }
1959
1960 static long vma_commit_reservation(struct hstate *h,
1961                         struct vm_area_struct *vma, unsigned long addr)
1962 {
1963         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1964 }
1965
1966 static void vma_end_reservation(struct hstate *h,
1967                         struct vm_area_struct *vma, unsigned long addr)
1968 {
1969         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1970 }
1971
1972 static long vma_add_reservation(struct hstate *h,
1973                         struct vm_area_struct *vma, unsigned long addr)
1974 {
1975         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
1976 }
1977
1978 /*
1979  * This routine is called to restore a reservation on error paths.  In the
1980  * specific error paths, a huge page was allocated (via alloc_huge_page)
1981  * and is about to be freed.  If a reservation for the page existed,
1982  * alloc_huge_page would have consumed the reservation and set PagePrivate
1983  * in the newly allocated page.  When the page is freed via free_huge_page,
1984  * the global reservation count will be incremented if PagePrivate is set.
1985  * However, free_huge_page can not adjust the reserve map.  Adjust the
1986  * reserve map here to be consistent with global reserve count adjustments
1987  * to be made by free_huge_page.
1988  */
1989 static void restore_reserve_on_error(struct hstate *h,
1990                         struct vm_area_struct *vma, unsigned long address,
1991                         struct page *page)
1992 {
1993         if (unlikely(PagePrivate(page))) {
1994                 long rc = vma_needs_reservation(h, vma, address);
1995
1996                 if (unlikely(rc < 0)) {
1997                         /*
1998                          * Rare out of memory condition in reserve map
1999                          * manipulation.  Clear PagePrivate so that
2000                          * global reserve count will not be incremented
2001                          * by free_huge_page.  This will make it appear
2002                          * as though the reservation for this page was
2003                          * consumed.  This may prevent the task from
2004                          * faulting in the page at a later time.  This
2005                          * is better than inconsistent global huge page
2006                          * accounting of reserve counts.
2007                          */
2008                         ClearPagePrivate(page);
2009                 } else if (rc) {
2010                         rc = vma_add_reservation(h, vma, address);
2011                         if (unlikely(rc < 0))
2012                                 /*
2013                                  * See above comment about rare out of
2014                                  * memory condition.
2015                                  */
2016                                 ClearPagePrivate(page);
2017                 } else
2018                         vma_end_reservation(h, vma, address);
2019         }
2020 }
2021
2022 struct page *alloc_huge_page(struct vm_area_struct *vma,
2023                                     unsigned long addr, int avoid_reserve)
2024 {
2025         struct hugepage_subpool *spool = subpool_vma(vma);
2026         struct hstate *h = hstate_vma(vma);
2027         struct page *page;
2028         long map_chg, map_commit;
2029         long gbl_chg;
2030         int ret, idx;
2031         struct hugetlb_cgroup *h_cg;
2032
2033         idx = hstate_index(h);
2034         /*
2035          * Examine the region/reserve map to determine if the process
2036          * has a reservation for the page to be allocated.  A return
2037          * code of zero indicates a reservation exists (no change).
2038          */
2039         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2040         if (map_chg < 0)
2041                 return ERR_PTR(-ENOMEM);
2042
2043         /*
2044          * Processes that did not create the mapping will have no
2045          * reserves as indicated by the region/reserve map. Check
2046          * that the allocation will not exceed the subpool limit.
2047          * Allocations for MAP_NORESERVE mappings also need to be
2048          * checked against any subpool limit.
2049          */
2050         if (map_chg || avoid_reserve) {
2051                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2052                 if (gbl_chg < 0) {
2053                         vma_end_reservation(h, vma, addr);
2054                         return ERR_PTR(-ENOSPC);
2055                 }
2056
2057                 /*
2058                  * Even though there was no reservation in the region/reserve
2059                  * map, there could be reservations associated with the
2060                  * subpool that can be used.  This would be indicated if the
2061                  * return value of hugepage_subpool_get_pages() is zero.
2062                  * However, if avoid_reserve is specified we still avoid even
2063                  * the subpool reservations.
2064                  */
2065                 if (avoid_reserve)
2066                         gbl_chg = 1;
2067         }
2068
2069         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2070         if (ret)
2071                 goto out_subpool_put;
2072
2073         spin_lock(&hugetlb_lock);
2074         /*
2075          * glb_chg is passed to indicate whether or not a page must be taken
2076          * from the global free pool (global change).  gbl_chg == 0 indicates
2077          * a reservation exists for the allocation.
2078          */
2079         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2080         if (!page) {
2081                 spin_unlock(&hugetlb_lock);
2082                 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2083                 if (!page)
2084                         goto out_uncharge_cgroup;
2085                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2086                         SetPagePrivate(page);
2087                         h->resv_huge_pages--;
2088                 }
2089                 spin_lock(&hugetlb_lock);
2090                 list_move(&page->lru, &h->hugepage_activelist);
2091                 /* Fall through */
2092         }
2093         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2094         spin_unlock(&hugetlb_lock);
2095
2096         set_page_private(page, (unsigned long)spool);
2097
2098         map_commit = vma_commit_reservation(h, vma, addr);
2099         if (unlikely(map_chg > map_commit)) {
2100                 /*
2101                  * The page was added to the reservation map between
2102                  * vma_needs_reservation and vma_commit_reservation.
2103                  * This indicates a race with hugetlb_reserve_pages.
2104                  * Adjust for the subpool count incremented above AND
2105                  * in hugetlb_reserve_pages for the same page.  Also,
2106                  * the reservation count added in hugetlb_reserve_pages
2107                  * no longer applies.
2108                  */
2109                 long rsv_adjust;
2110
2111                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2112                 hugetlb_acct_memory(h, -rsv_adjust);
2113         }
2114         return page;
2115
2116 out_uncharge_cgroup:
2117         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2118 out_subpool_put:
2119         if (map_chg || avoid_reserve)
2120                 hugepage_subpool_put_pages(spool, 1);
2121         vma_end_reservation(h, vma, addr);
2122         return ERR_PTR(-ENOSPC);
2123 }
2124
2125 int alloc_bootmem_huge_page(struct hstate *h)
2126         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2127 int __alloc_bootmem_huge_page(struct hstate *h)
2128 {
2129         struct huge_bootmem_page *m;
2130         int nr_nodes, node;
2131
2132         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2133                 void *addr;
2134
2135                 addr = memblock_alloc_try_nid_raw(
2136                                 huge_page_size(h), huge_page_size(h),
2137                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2138                 if (addr) {
2139                         /*
2140                          * Use the beginning of the huge page to store the
2141                          * huge_bootmem_page struct (until gather_bootmem
2142                          * puts them into the mem_map).
2143                          */
2144                         m = addr;
2145                         goto found;
2146                 }
2147         }
2148         return 0;
2149
2150 found:
2151         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2152         /* Put them into a private list first because mem_map is not up yet */
2153         INIT_LIST_HEAD(&m->list);
2154         list_add(&m->list, &huge_boot_pages);
2155         m->hstate = h;
2156         return 1;
2157 }
2158
2159 static void __init prep_compound_huge_page(struct page *page,
2160                 unsigned int order)
2161 {
2162         if (unlikely(order > (MAX_ORDER - 1)))
2163                 prep_compound_gigantic_page(page, order);
2164         else
2165                 prep_compound_page(page, order);
2166 }
2167
2168 /* Put bootmem huge pages into the standard lists after mem_map is up */
2169 static void __init gather_bootmem_prealloc(void)
2170 {
2171         struct huge_bootmem_page *m;
2172
2173         list_for_each_entry(m, &huge_boot_pages, list) {
2174                 struct page *page = virt_to_page(m);
2175                 struct hstate *h = m->hstate;
2176
2177                 WARN_ON(page_count(page) != 1);
2178                 prep_compound_huge_page(page, h->order);
2179                 WARN_ON(PageReserved(page));
2180                 prep_new_huge_page(h, page, page_to_nid(page));
2181                 put_page(page); /* free it into the hugepage allocator */
2182
2183                 /*
2184                  * If we had gigantic hugepages allocated at boot time, we need
2185                  * to restore the 'stolen' pages to totalram_pages in order to
2186                  * fix confusing memory reports from free(1) and another
2187                  * side-effects, like CommitLimit going negative.
2188                  */
2189                 if (hstate_is_gigantic(h))
2190                         adjust_managed_page_count(page, 1 << h->order);
2191                 cond_resched();
2192         }
2193 }
2194
2195 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2196 {
2197         unsigned long i;
2198
2199         for (i = 0; i < h->max_huge_pages; ++i) {
2200                 if (hstate_is_gigantic(h)) {
2201                         if (!alloc_bootmem_huge_page(h))
2202                                 break;
2203                 } else if (!alloc_pool_huge_page(h,
2204                                          &node_states[N_MEMORY]))
2205                         break;
2206                 cond_resched();
2207         }
2208         if (i < h->max_huge_pages) {
2209                 char buf[32];
2210
2211                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2212                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2213                         h->max_huge_pages, buf, i);
2214                 h->max_huge_pages = i;
2215         }
2216 }
2217
2218 static void __init hugetlb_init_hstates(void)
2219 {
2220         struct hstate *h;
2221
2222         for_each_hstate(h) {
2223                 if (minimum_order > huge_page_order(h))
2224                         minimum_order = huge_page_order(h);
2225
2226                 /* oversize hugepages were init'ed in early boot */
2227                 if (!hstate_is_gigantic(h))
2228                         hugetlb_hstate_alloc_pages(h);
2229         }
2230         VM_BUG_ON(minimum_order == UINT_MAX);
2231 }
2232
2233 static void __init report_hugepages(void)
2234 {
2235         struct hstate *h;
2236
2237         for_each_hstate(h) {
2238                 char buf[32];
2239
2240                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2241                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2242                         buf, h->free_huge_pages);
2243         }
2244 }
2245
2246 #ifdef CONFIG_HIGHMEM
2247 static void try_to_free_low(struct hstate *h, unsigned long count,
2248                                                 nodemask_t *nodes_allowed)
2249 {
2250         int i;
2251
2252         if (hstate_is_gigantic(h))
2253                 return;
2254
2255         for_each_node_mask(i, *nodes_allowed) {
2256                 struct page *page, *next;
2257                 struct list_head *freel = &h->hugepage_freelists[i];
2258                 list_for_each_entry_safe(page, next, freel, lru) {
2259                         if (count >= h->nr_huge_pages)
2260                                 return;
2261                         if (PageHighMem(page))
2262                                 continue;
2263                         list_del(&page->lru);
2264                         update_and_free_page(h, page);
2265                         h->free_huge_pages--;
2266                         h->free_huge_pages_node[page_to_nid(page)]--;
2267                 }
2268         }
2269 }
2270 #else
2271 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2272                                                 nodemask_t *nodes_allowed)
2273 {
2274 }
2275 #endif
2276
2277 /*
2278  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2279  * balanced by operating on them in a round-robin fashion.
2280  * Returns 1 if an adjustment was made.
2281  */
2282 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2283                                 int delta)
2284 {
2285         int nr_nodes, node;
2286
2287         VM_BUG_ON(delta != -1 && delta != 1);
2288
2289         if (delta < 0) {
2290                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2291                         if (h->surplus_huge_pages_node[node])
2292                                 goto found;
2293                 }
2294         } else {
2295                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2296                         if (h->surplus_huge_pages_node[node] <
2297                                         h->nr_huge_pages_node[node])
2298                                 goto found;
2299                 }
2300         }
2301         return 0;
2302
2303 found:
2304         h->surplus_huge_pages += delta;
2305         h->surplus_huge_pages_node[node] += delta;
2306         return 1;
2307 }
2308
2309 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2310 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
2311                               nodemask_t *nodes_allowed)
2312 {
2313         unsigned long min_count, ret;
2314
2315         spin_lock(&hugetlb_lock);
2316
2317         /*
2318          * Check for a node specific request.
2319          * Changing node specific huge page count may require a corresponding
2320          * change to the global count.  In any case, the passed node mask
2321          * (nodes_allowed) will restrict alloc/free to the specified node.
2322          */
2323         if (nid != NUMA_NO_NODE) {
2324                 unsigned long old_count = count;
2325
2326                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2327                 /*
2328                  * User may have specified a large count value which caused the
2329                  * above calculation to overflow.  In this case, they wanted
2330                  * to allocate as many huge pages as possible.  Set count to
2331                  * largest possible value to align with their intention.
2332                  */
2333                 if (count < old_count)
2334                         count = ULONG_MAX;
2335         }
2336
2337         /*
2338          * Gigantic pages runtime allocation depend on the capability for large
2339          * page range allocation.
2340          * If the system does not provide this feature, return an error when
2341          * the user tries to allocate gigantic pages but let the user free the
2342          * boottime allocated gigantic pages.
2343          */
2344         if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
2345                 if (count > persistent_huge_pages(h)) {
2346                         spin_unlock(&hugetlb_lock);
2347                         return -EINVAL;
2348                 }
2349                 /* Fall through to decrease pool */
2350         }
2351
2352         /*
2353          * Increase the pool size
2354          * First take pages out of surplus state.  Then make up the
2355          * remaining difference by allocating fresh huge pages.
2356          *
2357          * We might race with alloc_surplus_huge_page() here and be unable
2358          * to convert a surplus huge page to a normal huge page. That is
2359          * not critical, though, it just means the overall size of the
2360          * pool might be one hugepage larger than it needs to be, but
2361          * within all the constraints specified by the sysctls.
2362          */
2363         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2364                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2365                         break;
2366         }
2367
2368         while (count > persistent_huge_pages(h)) {
2369                 /*
2370                  * If this allocation races such that we no longer need the
2371                  * page, free_huge_page will handle it by freeing the page
2372                  * and reducing the surplus.
2373                  */
2374                 spin_unlock(&hugetlb_lock);
2375
2376                 /* yield cpu to avoid soft lockup */
2377                 cond_resched();
2378
2379                 ret = alloc_pool_huge_page(h, nodes_allowed);
2380                 spin_lock(&hugetlb_lock);
2381                 if (!ret)
2382                         goto out;
2383
2384                 /* Bail for signals. Probably ctrl-c from user */
2385                 if (signal_pending(current))
2386                         goto out;
2387         }
2388
2389         /*
2390          * Decrease the pool size
2391          * First return free pages to the buddy allocator (being careful
2392          * to keep enough around to satisfy reservations).  Then place
2393          * pages into surplus state as needed so the pool will shrink
2394          * to the desired size as pages become free.
2395          *
2396          * By placing pages into the surplus state independent of the
2397          * overcommit value, we are allowing the surplus pool size to
2398          * exceed overcommit. There are few sane options here. Since
2399          * alloc_surplus_huge_page() is checking the global counter,
2400          * though, we'll note that we're not allowed to exceed surplus
2401          * and won't grow the pool anywhere else. Not until one of the
2402          * sysctls are changed, or the surplus pages go out of use.
2403          */
2404         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2405         min_count = max(count, min_count);
2406         try_to_free_low(h, min_count, nodes_allowed);
2407         while (min_count < persistent_huge_pages(h)) {
2408                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2409                         break;
2410                 cond_resched_lock(&hugetlb_lock);
2411         }
2412         while (count < persistent_huge_pages(h)) {
2413                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2414                         break;
2415         }
2416 out:
2417         h->max_huge_pages = persistent_huge_pages(h);
2418         spin_unlock(&hugetlb_lock);
2419
2420         return 0;
2421 }
2422
2423 #define HSTATE_ATTR_RO(_name) \
2424         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2425
2426 #define HSTATE_ATTR(_name) \
2427         static struct kobj_attribute _name##_attr = \
2428                 __ATTR(_name, 0644, _name##_show, _name##_store)
2429
2430 static struct kobject *hugepages_kobj;
2431 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2432
2433 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2434
2435 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2436 {
2437         int i;
2438
2439         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2440                 if (hstate_kobjs[i] == kobj) {
2441                         if (nidp)
2442                                 *nidp = NUMA_NO_NODE;
2443                         return &hstates[i];
2444                 }
2445
2446         return kobj_to_node_hstate(kobj, nidp);
2447 }
2448
2449 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2450                                         struct kobj_attribute *attr, char *buf)
2451 {
2452         struct hstate *h;
2453         unsigned long nr_huge_pages;
2454         int nid;
2455
2456         h = kobj_to_hstate(kobj, &nid);
2457         if (nid == NUMA_NO_NODE)
2458                 nr_huge_pages = h->nr_huge_pages;
2459         else
2460                 nr_huge_pages = h->nr_huge_pages_node[nid];
2461
2462         return sprintf(buf, "%lu\n", nr_huge_pages);
2463 }
2464
2465 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2466                                            struct hstate *h, int nid,
2467                                            unsigned long count, size_t len)
2468 {
2469         int err;
2470         nodemask_t nodes_allowed, *n_mask;
2471
2472         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2473                 return -EINVAL;
2474
2475         if (nid == NUMA_NO_NODE) {
2476                 /*
2477                  * global hstate attribute
2478                  */
2479                 if (!(obey_mempolicy &&
2480                                 init_nodemask_of_mempolicy(&nodes_allowed)))
2481                         n_mask = &node_states[N_MEMORY];
2482                 else
2483                         n_mask = &nodes_allowed;
2484         } else {
2485                 /*
2486                  * Node specific request.  count adjustment happens in
2487                  * set_max_huge_pages() after acquiring hugetlb_lock.
2488                  */
2489                 init_nodemask_of_node(&nodes_allowed, nid);
2490                 n_mask = &nodes_allowed;
2491         }
2492
2493         err = set_max_huge_pages(h, count, nid, n_mask);
2494
2495         return err ? err : len;
2496 }
2497
2498 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2499                                          struct kobject *kobj, const char *buf,
2500                                          size_t len)
2501 {
2502         struct hstate *h;
2503         unsigned long count;
2504         int nid;
2505         int err;
2506
2507         err = kstrtoul(buf, 10, &count);
2508         if (err)
2509                 return err;
2510
2511         h = kobj_to_hstate(kobj, &nid);
2512         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2513 }
2514
2515 static ssize_t nr_hugepages_show(struct kobject *kobj,
2516                                        struct kobj_attribute *attr, char *buf)
2517 {
2518         return nr_hugepages_show_common(kobj, attr, buf);
2519 }
2520
2521 static ssize_t nr_hugepages_store(struct kobject *kobj,
2522                struct kobj_attribute *attr, const char *buf, size_t len)
2523 {
2524         return nr_hugepages_store_common(false, kobj, buf, len);
2525 }
2526 HSTATE_ATTR(nr_hugepages);
2527
2528 #ifdef CONFIG_NUMA
2529
2530 /*
2531  * hstate attribute for optionally mempolicy-based constraint on persistent
2532  * huge page alloc/free.
2533  */
2534 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2535                                        struct kobj_attribute *attr, char *buf)
2536 {
2537         return nr_hugepages_show_common(kobj, attr, buf);
2538 }
2539
2540 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2541                struct kobj_attribute *attr, const char *buf, size_t len)
2542 {
2543         return nr_hugepages_store_common(true, kobj, buf, len);
2544 }
2545 HSTATE_ATTR(nr_hugepages_mempolicy);
2546 #endif
2547
2548
2549 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2550                                         struct kobj_attribute *attr, char *buf)
2551 {
2552         struct hstate *h = kobj_to_hstate(kobj, NULL);
2553         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2554 }
2555
2556 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2557                 struct kobj_attribute *attr, const char *buf, size_t count)
2558 {
2559         int err;
2560         unsigned long input;
2561         struct hstate *h = kobj_to_hstate(kobj, NULL);
2562
2563         if (hstate_is_gigantic(h))
2564                 return -EINVAL;
2565
2566         err = kstrtoul(buf, 10, &input);
2567         if (err)
2568                 return err;
2569
2570         spin_lock(&hugetlb_lock);
2571         h->nr_overcommit_huge_pages = input;
2572         spin_unlock(&hugetlb_lock);
2573
2574         return count;
2575 }
2576 HSTATE_ATTR(nr_overcommit_hugepages);
2577
2578 static ssize_t free_hugepages_show(struct kobject *kobj,
2579                                         struct kobj_attribute *attr, char *buf)
2580 {
2581         struct hstate *h;
2582         unsigned long free_huge_pages;
2583         int nid;
2584
2585         h = kobj_to_hstate(kobj, &nid);
2586         if (nid == NUMA_NO_NODE)
2587                 free_huge_pages = h->free_huge_pages;
2588         else
2589                 free_huge_pages = h->free_huge_pages_node[nid];
2590
2591         return sprintf(buf, "%lu\n", free_huge_pages);
2592 }
2593 HSTATE_ATTR_RO(free_hugepages);
2594
2595 static ssize_t resv_hugepages_show(struct kobject *kobj,
2596                                         struct kobj_attribute *attr, char *buf)
2597 {
2598         struct hstate *h = kobj_to_hstate(kobj, NULL);
2599         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2600 }
2601 HSTATE_ATTR_RO(resv_hugepages);
2602
2603 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2604                                         struct kobj_attribute *attr, char *buf)
2605 {
2606         struct hstate *h;
2607         unsigned long surplus_huge_pages;
2608         int nid;
2609
2610         h = kobj_to_hstate(kobj, &nid);
2611         if (nid == NUMA_NO_NODE)
2612                 surplus_huge_pages = h->surplus_huge_pages;
2613         else
2614                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2615
2616         return sprintf(buf, "%lu\n", surplus_huge_pages);
2617 }
2618 HSTATE_ATTR_RO(surplus_hugepages);
2619
2620 static struct attribute *hstate_attrs[] = {
2621         &nr_hugepages_attr.attr,
2622         &nr_overcommit_hugepages_attr.attr,
2623         &free_hugepages_attr.attr,
2624         &resv_hugepages_attr.attr,
2625         &surplus_hugepages_attr.attr,
2626 #ifdef CONFIG_NUMA
2627         &nr_hugepages_mempolicy_attr.attr,
2628 #endif
2629         NULL,
2630 };
2631
2632 static const struct attribute_group hstate_attr_group = {
2633         .attrs = hstate_attrs,
2634 };
2635
2636 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2637                                     struct kobject **hstate_kobjs,
2638                                     const struct attribute_group *hstate_attr_group)
2639 {
2640         int retval;
2641         int hi = hstate_index(h);
2642
2643         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2644         if (!hstate_kobjs[hi])
2645                 return -ENOMEM;
2646
2647         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2648         if (retval)
2649                 kobject_put(hstate_kobjs[hi]);
2650
2651         return retval;
2652 }
2653
2654 static void __init hugetlb_sysfs_init(void)
2655 {
2656         struct hstate *h;
2657         int err;
2658
2659         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2660         if (!hugepages_kobj)
2661                 return;
2662
2663         for_each_hstate(h) {
2664                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2665                                          hstate_kobjs, &hstate_attr_group);
2666                 if (err)
2667                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2668         }
2669 }
2670
2671 #ifdef CONFIG_NUMA
2672
2673 /*
2674  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2675  * with node devices in node_devices[] using a parallel array.  The array
2676  * index of a node device or _hstate == node id.
2677  * This is here to avoid any static dependency of the node device driver, in
2678  * the base kernel, on the hugetlb module.
2679  */
2680 struct node_hstate {
2681         struct kobject          *hugepages_kobj;
2682         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2683 };
2684 static struct node_hstate node_hstates[MAX_NUMNODES];
2685
2686 /*
2687  * A subset of global hstate attributes for node devices
2688  */
2689 static struct attribute *per_node_hstate_attrs[] = {
2690         &nr_hugepages_attr.attr,
2691         &free_hugepages_attr.attr,
2692         &surplus_hugepages_attr.attr,
2693         NULL,
2694 };
2695
2696 static const struct attribute_group per_node_hstate_attr_group = {
2697         .attrs = per_node_hstate_attrs,
2698 };
2699
2700 /*
2701  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2702  * Returns node id via non-NULL nidp.
2703  */
2704 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2705 {
2706         int nid;
2707
2708         for (nid = 0; nid < nr_node_ids; nid++) {
2709                 struct node_hstate *nhs = &node_hstates[nid];
2710                 int i;
2711                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2712                         if (nhs->hstate_kobjs[i] == kobj) {
2713                                 if (nidp)
2714                                         *nidp = nid;
2715                                 return &hstates[i];
2716                         }
2717         }
2718
2719         BUG();
2720         return NULL;
2721 }
2722
2723 /*
2724  * Unregister hstate attributes from a single node device.
2725  * No-op if no hstate attributes attached.
2726  */
2727 static void hugetlb_unregister_node(struct node *node)
2728 {
2729         struct hstate *h;
2730         struct node_hstate *nhs = &node_hstates[node->dev.id];
2731
2732         if (!nhs->hugepages_kobj)
2733                 return;         /* no hstate attributes */
2734
2735         for_each_hstate(h) {
2736                 int idx = hstate_index(h);
2737                 if (nhs->hstate_kobjs[idx]) {
2738                         kobject_put(nhs->hstate_kobjs[idx]);
2739                         nhs->hstate_kobjs[idx] = NULL;
2740                 }
2741         }
2742
2743         kobject_put(nhs->hugepages_kobj);
2744         nhs->hugepages_kobj = NULL;
2745 }
2746
2747
2748 /*
2749  * Register hstate attributes for a single node device.
2750  * No-op if attributes already registered.
2751  */
2752 static void hugetlb_register_node(struct node *node)
2753 {
2754         struct hstate *h;
2755         struct node_hstate *nhs = &node_hstates[node->dev.id];
2756         int err;
2757
2758         if (nhs->hugepages_kobj)
2759                 return;         /* already allocated */
2760
2761         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2762                                                         &node->dev.kobj);
2763         if (!nhs->hugepages_kobj)
2764                 return;
2765
2766         for_each_hstate(h) {
2767                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2768                                                 nhs->hstate_kobjs,
2769                                                 &per_node_hstate_attr_group);
2770                 if (err) {
2771                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2772                                 h->name, node->dev.id);
2773                         hugetlb_unregister_node(node);
2774                         break;
2775                 }
2776         }
2777 }
2778
2779 /*
2780  * hugetlb init time:  register hstate attributes for all registered node
2781  * devices of nodes that have memory.  All on-line nodes should have
2782  * registered their associated device by this time.
2783  */
2784 static void __init hugetlb_register_all_nodes(void)
2785 {
2786         int nid;
2787
2788         for_each_node_state(nid, N_MEMORY) {
2789                 struct node *node = node_devices[nid];
2790                 if (node->dev.id == nid)
2791                         hugetlb_register_node(node);
2792         }
2793
2794         /*
2795          * Let the node device driver know we're here so it can
2796          * [un]register hstate attributes on node hotplug.
2797          */
2798         register_hugetlbfs_with_node(hugetlb_register_node,
2799                                      hugetlb_unregister_node);
2800 }
2801 #else   /* !CONFIG_NUMA */
2802
2803 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2804 {
2805         BUG();
2806         if (nidp)
2807                 *nidp = -1;
2808         return NULL;
2809 }
2810
2811 static void hugetlb_register_all_nodes(void) { }
2812
2813 #endif
2814
2815 static int __init hugetlb_init(void)
2816 {
2817         int i;
2818
2819         if (!hugepages_supported())
2820                 return 0;
2821
2822         if (!size_to_hstate(default_hstate_size)) {
2823                 if (default_hstate_size != 0) {
2824                         pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2825                                default_hstate_size, HPAGE_SIZE);
2826                 }
2827
2828                 default_hstate_size = HPAGE_SIZE;
2829                 if (!size_to_hstate(default_hstate_size))
2830                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2831         }
2832         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2833         if (default_hstate_max_huge_pages) {
2834                 if (!default_hstate.max_huge_pages)
2835                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2836         }
2837
2838         hugetlb_init_hstates();
2839         gather_bootmem_prealloc();
2840         report_hugepages();
2841
2842         hugetlb_sysfs_init();
2843         hugetlb_register_all_nodes();
2844         hugetlb_cgroup_file_init();
2845
2846 #ifdef CONFIG_SMP
2847         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2848 #else
2849         num_fault_mutexes = 1;
2850 #endif
2851         hugetlb_fault_mutex_table =
2852                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2853                               GFP_KERNEL);
2854         BUG_ON(!hugetlb_fault_mutex_table);
2855
2856         for (i = 0; i < num_fault_mutexes; i++)
2857                 mutex_init(&hugetlb_fault_mutex_table[i]);
2858         return 0;
2859 }
2860 subsys_initcall(hugetlb_init);
2861
2862 /* Should be called on processing a hugepagesz=... option */
2863 void __init hugetlb_bad_size(void)
2864 {
2865         parsed_valid_hugepagesz = false;
2866 }
2867
2868 void __init hugetlb_add_hstate(unsigned int order)
2869 {
2870         struct hstate *h;
2871         unsigned long i;
2872
2873         if (size_to_hstate(PAGE_SIZE << order)) {
2874                 pr_warn("hugepagesz= specified twice, ignoring\n");
2875                 return;
2876         }
2877         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2878         BUG_ON(order == 0);
2879         h = &hstates[hugetlb_max_hstate++];
2880         h->order = order;
2881         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2882         h->nr_huge_pages = 0;
2883         h->free_huge_pages = 0;
2884         for (i = 0; i < MAX_NUMNODES; ++i)
2885                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2886         INIT_LIST_HEAD(&h->hugepage_activelist);
2887         h->next_nid_to_alloc = first_memory_node;
2888         h->next_nid_to_free = first_memory_node;
2889         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2890                                         huge_page_size(h)/1024);
2891
2892         parsed_hstate = h;
2893 }
2894
2895 static int __init hugetlb_nrpages_setup(char *s)
2896 {
2897         unsigned long *mhp;
2898         static unsigned long *last_mhp;
2899
2900         if (!parsed_valid_hugepagesz) {
2901                 pr_warn("hugepages = %s preceded by "
2902                         "an unsupported hugepagesz, ignoring\n", s);
2903                 parsed_valid_hugepagesz = true;
2904                 return 1;
2905         }
2906         /*
2907          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2908          * so this hugepages= parameter goes to the "default hstate".
2909          */
2910         else if (!hugetlb_max_hstate)
2911                 mhp = &default_hstate_max_huge_pages;
2912         else
2913                 mhp = &parsed_hstate->max_huge_pages;
2914
2915         if (mhp == last_mhp) {
2916                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2917                 return 1;
2918         }
2919
2920         if (sscanf(s, "%lu", mhp) <= 0)
2921                 *mhp = 0;
2922
2923         /*
2924          * Global state is always initialized later in hugetlb_init.
2925          * But we need to allocate >= MAX_ORDER hstates here early to still
2926          * use the bootmem allocator.
2927          */
2928         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2929                 hugetlb_hstate_alloc_pages(parsed_hstate);
2930
2931         last_mhp = mhp;
2932
2933         return 1;
2934 }
2935 __setup("hugepages=", hugetlb_nrpages_setup);
2936
2937 static int __init hugetlb_default_setup(char *s)
2938 {
2939         default_hstate_size = memparse(s, &s);
2940         return 1;
2941 }
2942 __setup("default_hugepagesz=", hugetlb_default_setup);
2943
2944 static unsigned int cpuset_mems_nr(unsigned int *array)
2945 {
2946         int node;
2947         unsigned int nr = 0;
2948
2949         for_each_node_mask(node, cpuset_current_mems_allowed)
2950                 nr += array[node];
2951
2952         return nr;
2953 }
2954
2955 #ifdef CONFIG_SYSCTL
2956 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2957                          struct ctl_table *table, int write,
2958                          void __user *buffer, size_t *length, loff_t *ppos)
2959 {
2960         struct hstate *h = &default_hstate;
2961         unsigned long tmp = h->max_huge_pages;
2962         int ret;
2963
2964         if (!hugepages_supported())
2965                 return -EOPNOTSUPP;
2966
2967         table->data = &tmp;
2968         table->maxlen = sizeof(unsigned long);
2969         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
2970         if (ret)
2971                 goto out;
2972
2973         if (write)
2974                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2975                                                   NUMA_NO_NODE, tmp, *length);
2976 out:
2977         return ret;
2978 }
2979
2980 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2981                           void __user *buffer, size_t *length, loff_t *ppos)
2982 {
2983
2984         return hugetlb_sysctl_handler_common(false, table, write,
2985                                                         buffer, length, ppos);
2986 }
2987
2988 #ifdef CONFIG_NUMA
2989 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2990                           void __user *buffer, size_t *length, loff_t *ppos)
2991 {
2992         return hugetlb_sysctl_handler_common(true, table, write,
2993                                                         buffer, length, ppos);
2994 }
2995 #endif /* CONFIG_NUMA */
2996
2997 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2998                         void __user *buffer,
2999                         size_t *length, loff_t *ppos)
3000 {
3001         struct hstate *h = &default_hstate;
3002         unsigned long tmp;
3003         int ret;
3004
3005         if (!hugepages_supported())
3006                 return -EOPNOTSUPP;
3007
3008         tmp = h->nr_overcommit_huge_pages;
3009
3010         if (write && hstate_is_gigantic(h))
3011                 return -EINVAL;
3012
3013         table->data = &tmp;
3014         table->maxlen = sizeof(unsigned long);
3015         ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
3016         if (ret)
3017                 goto out;
3018
3019         if (write) {
3020                 spin_lock(&hugetlb_lock);
3021                 h->nr_overcommit_huge_pages = tmp;
3022                 spin_unlock(&hugetlb_lock);
3023         }
3024 out:
3025         return ret;
3026 }
3027
3028 #endif /* CONFIG_SYSCTL */
3029
3030 void hugetlb_report_meminfo(struct seq_file *m)
3031 {
3032         struct hstate *h;
3033         unsigned long total = 0;
3034
3035         if (!hugepages_supported())
3036                 return;
3037
3038         for_each_hstate(h) {
3039                 unsigned long count = h->nr_huge_pages;
3040
3041                 total += (PAGE_SIZE << huge_page_order(h)) * count;
3042
3043                 if (h == &default_hstate)
3044                         seq_printf(m,
3045                                    "HugePages_Total:   %5lu\n"
3046                                    "HugePages_Free:    %5lu\n"
3047                                    "HugePages_Rsvd:    %5lu\n"
3048                                    "HugePages_Surp:    %5lu\n"
3049                                    "Hugepagesize:   %8lu kB\n",
3050                                    count,
3051                                    h->free_huge_pages,
3052                                    h->resv_huge_pages,
3053                                    h->surplus_huge_pages,
3054                                    (PAGE_SIZE << huge_page_order(h)) / 1024);
3055         }
3056
3057         seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3058 }
3059
3060 int hugetlb_report_node_meminfo(int nid, char *buf)
3061 {
3062         struct hstate *h = &default_hstate;
3063         if (!hugepages_supported())
3064                 return 0;
3065         return sprintf(buf,
3066                 "Node %d HugePages_Total: %5u\n"
3067                 "Node %d HugePages_Free:  %5u\n"
3068                 "Node %d HugePages_Surp:  %5u\n",
3069                 nid, h->nr_huge_pages_node[nid],
3070                 nid, h->free_huge_pages_node[nid],
3071                 nid, h->surplus_huge_pages_node[nid]);
3072 }
3073
3074 void hugetlb_show_meminfo(void)
3075 {
3076         struct hstate *h;
3077         int nid;
3078
3079         if (!hugepages_supported())
3080                 return;
3081
3082         for_each_node_state(nid, N_MEMORY)
3083                 for_each_hstate(h)
3084                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3085                                 nid,
3086                                 h->nr_huge_pages_node[nid],
3087                                 h->free_huge_pages_node[nid],
3088                                 h->surplus_huge_pages_node[nid],
3089                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3090 }
3091
3092 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3093 {
3094         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3095                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3096 }
3097
3098 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3099 unsigned long hugetlb_total_pages(void)
3100 {
3101         struct hstate *h;
3102         unsigned long nr_total_pages = 0;
3103
3104         for_each_hstate(h)
3105                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3106         return nr_total_pages;
3107 }
3108
3109 static int hugetlb_acct_memory(struct hstate *h, long delta)
3110 {
3111         int ret = -ENOMEM;
3112
3113         spin_lock(&hugetlb_lock);
3114         /*
3115          * When cpuset is configured, it breaks the strict hugetlb page
3116          * reservation as the accounting is done on a global variable. Such
3117          * reservation is completely rubbish in the presence of cpuset because
3118          * the reservation is not checked against page availability for the
3119          * current cpuset. Application can still potentially OOM'ed by kernel
3120          * with lack of free htlb page in cpuset that the task is in.
3121          * Attempt to enforce strict accounting with cpuset is almost
3122          * impossible (or too ugly) because cpuset is too fluid that
3123          * task or memory node can be dynamically moved between cpusets.
3124          *
3125          * The change of semantics for shared hugetlb mapping with cpuset is
3126          * undesirable. However, in order to preserve some of the semantics,
3127          * we fall back to check against current free page availability as
3128          * a best attempt and hopefully to minimize the impact of changing
3129          * semantics that cpuset has.
3130          */
3131         if (delta > 0) {
3132                 if (gather_surplus_pages(h, delta) < 0)
3133                         goto out;
3134
3135                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3136                         return_unused_surplus_pages(h, delta);
3137                         goto out;
3138                 }
3139         }
3140
3141         ret = 0;
3142         if (delta < 0)
3143                 return_unused_surplus_pages(h, (unsigned long) -delta);
3144
3145 out:
3146         spin_unlock(&hugetlb_lock);
3147         return ret;
3148 }
3149
3150 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3151 {
3152         struct resv_map *resv = vma_resv_map(vma);
3153
3154         /*
3155          * This new VMA should share its siblings reservation map if present.
3156          * The VMA will only ever have a valid reservation map pointer where
3157          * it is being copied for another still existing VMA.  As that VMA
3158          * has a reference to the reservation map it cannot disappear until
3159          * after this open call completes.  It is therefore safe to take a
3160          * new reference here without additional locking.
3161          */
3162         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3163                 kref_get(&resv->refs);
3164 }
3165
3166 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3167 {
3168         struct hstate *h = hstate_vma(vma);
3169         struct resv_map *resv = vma_resv_map(vma);
3170         struct hugepage_subpool *spool = subpool_vma(vma);
3171         unsigned long reserve, start, end;
3172         long gbl_reserve;
3173
3174         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3175                 return;
3176
3177         start = vma_hugecache_offset(h, vma, vma->vm_start);
3178         end = vma_hugecache_offset(h, vma, vma->vm_end);
3179
3180         reserve = (end - start) - region_count(resv, start, end);
3181
3182         kref_put(&resv->refs, resv_map_release);
3183
3184         if (reserve) {
3185                 /*
3186                  * Decrement reserve counts.  The global reserve count may be
3187                  * adjusted if the subpool has a minimum size.
3188                  */
3189                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3190                 hugetlb_acct_memory(h, -gbl_reserve);
3191         }
3192 }
3193
3194 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3195 {
3196         if (addr & ~(huge_page_mask(hstate_vma(vma))))
3197                 return -EINVAL;
3198         return 0;
3199 }
3200
3201 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3202 {
3203         struct hstate *hstate = hstate_vma(vma);
3204
3205         return 1UL << huge_page_shift(hstate);
3206 }
3207
3208 /*
3209  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3210  * handle_mm_fault() to try to instantiate regular-sized pages in the
3211  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3212  * this far.
3213  */
3214 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3215 {
3216         BUG();
3217         return 0;
3218 }
3219
3220 /*
3221  * When a new function is introduced to vm_operations_struct and added
3222  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3223  * This is because under System V memory model, mappings created via
3224  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3225  * their original vm_ops are overwritten with shm_vm_ops.
3226  */
3227 const struct vm_operations_struct hugetlb_vm_ops = {
3228         .fault = hugetlb_vm_op_fault,
3229         .open = hugetlb_vm_op_open,
3230         .close = hugetlb_vm_op_close,
3231         .split = hugetlb_vm_op_split,
3232         .pagesize = hugetlb_vm_op_pagesize,
3233 };
3234
3235 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3236                                 int writable)
3237 {
3238         pte_t entry;
3239
3240         if (writable) {
3241                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3242                                          vma->vm_page_prot)));
3243         } else {
3244                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3245                                            vma->vm_page_prot));
3246         }
3247         entry = pte_mkyoung(entry);
3248         entry = pte_mkhuge(entry);
3249         entry = arch_make_huge_pte(entry, vma, page, writable);
3250
3251         return entry;
3252 }
3253
3254 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3255                                    unsigned long address, pte_t *ptep)
3256 {
3257         pte_t entry;
3258
3259         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3260         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3261                 update_mmu_cache(vma, address, ptep);
3262 }
3263
3264 bool is_hugetlb_entry_migration(pte_t pte)
3265 {
3266         swp_entry_t swp;
3267
3268         if (huge_pte_none(pte) || pte_present(pte))
3269                 return false;
3270         swp = pte_to_swp_entry(pte);
3271         if (non_swap_entry(swp) && is_migration_entry(swp))
3272                 return true;
3273         else
3274                 return false;
3275 }
3276
3277 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3278 {
3279         swp_entry_t swp;
3280
3281         if (huge_pte_none(pte) || pte_present(pte))
3282                 return 0;
3283         swp = pte_to_swp_entry(pte);
3284         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3285                 return 1;
3286         else
3287                 return 0;
3288 }
3289
3290 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3291                             struct vm_area_struct *vma)
3292 {
3293         pte_t *src_pte, *dst_pte, entry, dst_entry;
3294         struct page *ptepage;
3295         unsigned long addr;
3296         int cow;
3297         struct hstate *h = hstate_vma(vma);
3298         unsigned long sz = huge_page_size(h);
3299         struct mmu_notifier_range range;
3300         int ret = 0;
3301
3302         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3303
3304         if (cow) {
3305                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src,
3306                                         vma->vm_start,
3307                                         vma->vm_end);
3308                 mmu_notifier_invalidate_range_start(&range);
3309         }
3310
3311         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3312                 spinlock_t *src_ptl, *dst_ptl;
3313                 src_pte = huge_pte_offset(src, addr, sz);
3314                 if (!src_pte)
3315                         continue;
3316                 dst_pte = huge_pte_alloc(dst, addr, sz);
3317                 if (!dst_pte) {
3318                         ret = -ENOMEM;
3319                         break;
3320                 }
3321
3322                 /*
3323                  * If the pagetables are shared don't copy or take references.
3324                  * dst_pte == src_pte is the common case of src/dest sharing.
3325                  *
3326                  * However, src could have 'unshared' and dst shares with
3327                  * another vma.  If dst_pte !none, this implies sharing.
3328                  * Check here before taking page table lock, and once again
3329                  * after taking the lock below.
3330                  */
3331                 dst_entry = huge_ptep_get(dst_pte);
3332                 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3333                         continue;
3334
3335                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3336                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3337                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3338                 entry = huge_ptep_get(src_pte);
3339                 dst_entry = huge_ptep_get(dst_pte);
3340                 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3341                         /*
3342                          * Skip if src entry none.  Also, skip in the
3343                          * unlikely case dst entry !none as this implies
3344                          * sharing with another vma.
3345                          */
3346                         ;
3347                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3348                                     is_hugetlb_entry_hwpoisoned(entry))) {
3349                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3350
3351                         if (is_write_migration_entry(swp_entry) && cow) {
3352                                 /*
3353                                  * COW mappings require pages in both
3354                                  * parent and child to be set to read.
3355                                  */
3356                                 make_migration_entry_read(&swp_entry);
3357                                 entry = swp_entry_to_pte(swp_entry);
3358                                 set_huge_swap_pte_at(src, addr, src_pte,
3359                                                      entry, sz);
3360                         }
3361                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3362                 } else {
3363                         if (cow) {
3364                                 /*
3365                                  * No need to notify as we are downgrading page
3366                                  * table protection not changing it to point
3367                                  * to a new page.
3368                                  *
3369                                  * See Documentation/vm/mmu_notifier.rst
3370                                  */
3371                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3372                         }
3373                         entry = huge_ptep_get(src_pte);
3374                         ptepage = pte_page(entry);
3375                         get_page(ptepage);
3376                         page_dup_rmap(ptepage, true);
3377                         set_huge_pte_at(dst, addr, dst_pte, entry);
3378                         hugetlb_count_add(pages_per_huge_page(h), dst);
3379                 }
3380                 spin_unlock(src_ptl);
3381                 spin_unlock(dst_ptl);
3382         }
3383
3384         if (cow)
3385                 mmu_notifier_invalidate_range_end(&range);
3386
3387         return ret;
3388 }
3389
3390 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3391                             unsigned long start, unsigned long end,
3392                             struct page *ref_page)
3393 {
3394         struct mm_struct *mm = vma->vm_mm;
3395         unsigned long address;
3396         pte_t *ptep;
3397         pte_t pte;
3398         spinlock_t *ptl;
3399         struct page *page;
3400         struct hstate *h = hstate_vma(vma);
3401         unsigned long sz = huge_page_size(h);
3402         struct mmu_notifier_range range;
3403
3404         WARN_ON(!is_vm_hugetlb_page(vma));
3405         BUG_ON(start & ~huge_page_mask(h));
3406         BUG_ON(end & ~huge_page_mask(h));
3407
3408         /*
3409          * This is a hugetlb vma, all the pte entries should point
3410          * to huge page.
3411          */
3412         tlb_change_page_size(tlb, sz);
3413         tlb_start_vma(tlb, vma);
3414
3415         /*
3416          * If sharing possible, alert mmu notifiers of worst case.
3417          */
3418         mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
3419                                 end);
3420         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
3421         mmu_notifier_invalidate_range_start(&range);
3422         address = start;
3423         for (; address < end; address += sz) {
3424                 ptep = huge_pte_offset(mm, address, sz);
3425                 if (!ptep)
3426                         continue;
3427
3428                 ptl = huge_pte_lock(h, mm, ptep);
3429                 if (huge_pmd_unshare(mm, &address, ptep)) {
3430                         spin_unlock(ptl);
3431                         /*
3432                          * We just unmapped a page of PMDs by clearing a PUD.
3433                          * The caller's TLB flush range should cover this area.
3434                          */
3435                         continue;
3436                 }
3437
3438                 pte = huge_ptep_get(ptep);
3439                 if (huge_pte_none(pte)) {
3440                         spin_unlock(ptl);
3441                         continue;
3442                 }
3443
3444                 /*
3445                  * Migrating hugepage or HWPoisoned hugepage is already
3446                  * unmapped and its refcount is dropped, so just clear pte here.
3447                  */
3448                 if (unlikely(!pte_present(pte))) {
3449                         huge_pte_clear(mm, address, ptep, sz);
3450                         spin_unlock(ptl);
3451                         continue;
3452                 }
3453
3454                 page = pte_page(pte);
3455                 /*
3456                  * If a reference page is supplied, it is because a specific
3457                  * page is being unmapped, not a range. Ensure the page we
3458                  * are about to unmap is the actual page of interest.
3459                  */
3460                 if (ref_page) {
3461                         if (page != ref_page) {
3462                                 spin_unlock(ptl);
3463                                 continue;
3464                         }
3465                         /*
3466                          * Mark the VMA as having unmapped its page so that
3467                          * future faults in this VMA will fail rather than
3468                          * looking like data was lost
3469                          */
3470                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3471                 }
3472
3473                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3474                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3475                 if (huge_pte_dirty(pte))
3476                         set_page_dirty(page);
3477
3478                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3479                 page_remove_rmap(page, true);
3480
3481                 spin_unlock(ptl);
3482                 tlb_remove_page_size(tlb, page, huge_page_size(h));
3483                 /*
3484                  * Bail out after unmapping reference page if supplied
3485                  */
3486                 if (ref_page)
3487                         break;
3488         }
3489         mmu_notifier_invalidate_range_end(&range);
3490         tlb_end_vma(tlb, vma);
3491 }
3492
3493 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3494                           struct vm_area_struct *vma, unsigned long start,
3495                           unsigned long end, struct page *ref_page)
3496 {
3497         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3498
3499         /*
3500          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3501          * test will fail on a vma being torn down, and not grab a page table
3502          * on its way out.  We're lucky that the flag has such an appropriate
3503          * name, and can in fact be safely cleared here. We could clear it
3504          * before the __unmap_hugepage_range above, but all that's necessary
3505          * is to clear it before releasing the i_mmap_rwsem. This works
3506          * because in the context this is called, the VMA is about to be
3507          * destroyed and the i_mmap_rwsem is held.
3508          */
3509         vma->vm_flags &= ~VM_MAYSHARE;
3510 }
3511
3512 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3513                           unsigned long end, struct page *ref_page)
3514 {
3515         struct mm_struct *mm;
3516         struct mmu_gather tlb;
3517         unsigned long tlb_start = start;
3518         unsigned long tlb_end = end;
3519
3520         /*
3521          * If shared PMDs were possibly used within this vma range, adjust
3522          * start/end for worst case tlb flushing.
3523          * Note that we can not be sure if PMDs are shared until we try to
3524          * unmap pages.  However, we want to make sure TLB flushing covers
3525          * the largest possible range.
3526          */
3527         adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3528
3529         mm = vma->vm_mm;
3530
3531         tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3532         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3533         tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3534 }
3535
3536 /*
3537  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3538  * mappping it owns the reserve page for. The intention is to unmap the page
3539  * from other VMAs and let the children be SIGKILLed if they are faulting the
3540  * same region.
3541  */
3542 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3543                               struct page *page, unsigned long address)
3544 {
3545         struct hstate *h = hstate_vma(vma);
3546         struct vm_area_struct *iter_vma;
3547         struct address_space *mapping;
3548         pgoff_t pgoff;
3549
3550         /*
3551          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3552          * from page cache lookup which is in HPAGE_SIZE units.
3553          */
3554         address = address & huge_page_mask(h);
3555         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3556                         vma->vm_pgoff;
3557         mapping = vma->vm_file->f_mapping;
3558
3559         /*
3560          * Take the mapping lock for the duration of the table walk. As
3561          * this mapping should be shared between all the VMAs,
3562          * __unmap_hugepage_range() is called as the lock is already held
3563          */
3564         i_mmap_lock_write(mapping);
3565         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3566                 /* Do not unmap the current VMA */
3567                 if (iter_vma == vma)
3568                         continue;
3569
3570                 /*
3571                  * Shared VMAs have their own reserves and do not affect
3572                  * MAP_PRIVATE accounting but it is possible that a shared
3573                  * VMA is using the same page so check and skip such VMAs.
3574                  */
3575                 if (iter_vma->vm_flags & VM_MAYSHARE)
3576                         continue;
3577
3578                 /*
3579                  * Unmap the page from other VMAs without their own reserves.
3580                  * They get marked to be SIGKILLed if they fault in these
3581                  * areas. This is because a future no-page fault on this VMA
3582                  * could insert a zeroed page instead of the data existing
3583                  * from the time of fork. This would look like data corruption
3584                  */
3585                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3586                         unmap_hugepage_range(iter_vma, address,
3587                                              address + huge_page_size(h), page);
3588         }
3589         i_mmap_unlock_write(mapping);
3590 }
3591
3592 /*
3593  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3594  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3595  * cannot race with other handlers or page migration.
3596  * Keep the pte_same checks anyway to make transition from the mutex easier.
3597  */
3598 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3599                        unsigned long address, pte_t *ptep,
3600                        struct page *pagecache_page, spinlock_t *ptl)
3601 {
3602         pte_t pte;
3603         struct hstate *h = hstate_vma(vma);
3604         struct page *old_page, *new_page;
3605         int outside_reserve = 0;
3606         vm_fault_t ret = 0;
3607         unsigned long haddr = address & huge_page_mask(h);
3608         struct mmu_notifier_range range;
3609
3610         pte = huge_ptep_get(ptep);
3611         old_page = pte_page(pte);
3612
3613 retry_avoidcopy:
3614         /* If no-one else is actually using this page, avoid the copy
3615          * and just make the page writable */
3616         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3617                 page_move_anon_rmap(old_page, vma);
3618                 set_huge_ptep_writable(vma, haddr, ptep);
3619                 return 0;
3620         }
3621
3622         /*
3623          * If the process that created a MAP_PRIVATE mapping is about to
3624          * perform a COW due to a shared page count, attempt to satisfy
3625          * the allocation without using the existing reserves. The pagecache
3626          * page is used to determine if the reserve at this address was
3627          * consumed or not. If reserves were used, a partial faulted mapping
3628          * at the time of fork() could consume its reserves on COW instead
3629          * of the full address range.
3630          */
3631         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3632                         old_page != pagecache_page)
3633                 outside_reserve = 1;
3634
3635         get_page(old_page);
3636
3637         /*
3638          * Drop page table lock as buddy allocator may be called. It will
3639          * be acquired again before returning to the caller, as expected.
3640          */
3641         spin_unlock(ptl);
3642         new_page = alloc_huge_page(vma, haddr, outside_reserve);
3643
3644         if (IS_ERR(new_page)) {
3645                 /*
3646                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3647                  * it is due to references held by a child and an insufficient
3648                  * huge page pool. To guarantee the original mappers
3649                  * reliability, unmap the page from child processes. The child
3650                  * may get SIGKILLed if it later faults.
3651                  */
3652                 if (outside_reserve) {
3653                         put_page(old_page);
3654                         BUG_ON(huge_pte_none(pte));
3655                         unmap_ref_private(mm, vma, old_page, haddr);
3656                         BUG_ON(huge_pte_none(pte));
3657                         spin_lock(ptl);
3658                         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3659                         if (likely(ptep &&
3660                                    pte_same(huge_ptep_get(ptep), pte)))
3661                                 goto retry_avoidcopy;
3662                         /*
3663                          * race occurs while re-acquiring page table
3664                          * lock, and our job is done.
3665                          */
3666                         return 0;
3667                 }
3668
3669                 ret = vmf_error(PTR_ERR(new_page));
3670                 goto out_release_old;
3671         }
3672
3673         /*
3674          * When the original hugepage is shared one, it does not have
3675          * anon_vma prepared.
3676          */
3677         if (unlikely(anon_vma_prepare(vma))) {
3678                 ret = VM_FAULT_OOM;
3679                 goto out_release_all;
3680         }
3681
3682         copy_user_huge_page(new_page, old_page, address, vma,
3683                             pages_per_huge_page(h));
3684         __SetPageUptodate(new_page);
3685
3686         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
3687                                 haddr + huge_page_size(h));
3688         mmu_notifier_invalidate_range_start(&range);
3689
3690         /*
3691          * Retake the page table lock to check for racing updates
3692          * before the page tables are altered
3693          */
3694         spin_lock(ptl);
3695         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3696         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3697                 ClearPagePrivate(new_page);
3698
3699                 /* Break COW */
3700                 huge_ptep_clear_flush(vma, haddr, ptep);
3701                 mmu_notifier_invalidate_range(mm, range.start, range.end);
3702                 set_huge_pte_at(mm, haddr, ptep,
3703                                 make_huge_pte(vma, new_page, 1));
3704                 page_remove_rmap(old_page, true);
3705                 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3706                 set_page_huge_active(new_page);
3707                 /* Make the old page be freed below */
3708                 new_page = old_page;
3709         }
3710         spin_unlock(ptl);
3711         mmu_notifier_invalidate_range_end(&range);
3712 out_release_all:
3713         restore_reserve_on_error(h, vma, haddr, new_page);
3714         put_page(new_page);
3715 out_release_old:
3716         put_page(old_page);
3717
3718         spin_lock(ptl); /* Caller expects lock to be held */
3719         return ret;
3720 }
3721
3722 /* Return the pagecache page at a given address within a VMA */
3723 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3724                         struct vm_area_struct *vma, unsigned long address)
3725 {
3726         struct address_space *mapping;
3727         pgoff_t idx;
3728
3729         mapping = vma->vm_file->f_mapping;
3730         idx = vma_hugecache_offset(h, vma, address);
3731
3732         return find_lock_page(mapping, idx);
3733 }
3734
3735 /*
3736  * Return whether there is a pagecache page to back given address within VMA.
3737  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3738  */
3739 static bool hugetlbfs_pagecache_present(struct hstate *h,
3740                         struct vm_area_struct *vma, unsigned long address)
3741 {
3742         struct address_space *mapping;
3743         pgoff_t idx;
3744         struct page *page;
3745
3746         mapping = vma->vm_file->f_mapping;
3747         idx = vma_hugecache_offset(h, vma, address);
3748
3749         page = find_get_page(mapping, idx);
3750         if (page)
3751                 put_page(page);
3752         return page != NULL;
3753 }
3754
3755 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3756                            pgoff_t idx)
3757 {
3758         struct inode *inode = mapping->host;
3759         struct hstate *h = hstate_inode(inode);
3760         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3761
3762         if (err)
3763                 return err;
3764         ClearPagePrivate(page);
3765
3766         /*
3767          * set page dirty so that it will not be removed from cache/file
3768          * by non-hugetlbfs specific code paths.
3769          */
3770         set_page_dirty(page);
3771
3772         spin_lock(&inode->i_lock);
3773         inode->i_blocks += blocks_per_huge_page(h);
3774         spin_unlock(&inode->i_lock);
3775         return 0;
3776 }
3777
3778 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3779                         struct vm_area_struct *vma,
3780                         struct address_space *mapping, pgoff_t idx,
3781                         unsigned long address, pte_t *ptep, unsigned int flags)
3782 {
3783         struct hstate *h = hstate_vma(vma);
3784         vm_fault_t ret = VM_FAULT_SIGBUS;
3785         int anon_rmap = 0;
3786         unsigned long size;
3787         struct page *page;
3788         pte_t new_pte;
3789         spinlock_t *ptl;
3790         unsigned long haddr = address & huge_page_mask(h);
3791         bool new_page = false;
3792
3793         /*
3794          * Currently, we are forced to kill the process in the event the
3795          * original mapper has unmapped pages from the child due to a failed
3796          * COW. Warn that such a situation has occurred as it may not be obvious
3797          */
3798         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3799                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3800                            current->pid);
3801                 return ret;
3802         }
3803
3804         /*
3805          * Use page lock to guard against racing truncation
3806          * before we get page_table_lock.
3807          */
3808 retry:
3809         page = find_lock_page(mapping, idx);
3810         if (!page) {
3811                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3812                 if (idx >= size)
3813                         goto out;
3814
3815                 /*
3816                  * Check for page in userfault range
3817                  */
3818                 if (userfaultfd_missing(vma)) {
3819                         u32 hash;
3820                         struct vm_fault vmf = {
3821                                 .vma = vma,
3822                                 .address = haddr,
3823                                 .flags = flags,
3824                                 /*
3825                                  * Hard to debug if it ends up being
3826                                  * used by a callee that assumes
3827                                  * something about the other
3828                                  * uninitialized fields... same as in
3829                                  * memory.c
3830                                  */
3831                         };
3832
3833                         /*
3834                          * hugetlb_fault_mutex must be dropped before
3835                          * handling userfault.  Reacquire after handling
3836                          * fault to make calling code simpler.
3837                          */
3838                         hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
3839                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3840                         ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3841                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3842                         goto out;
3843                 }
3844
3845                 page = alloc_huge_page(vma, haddr, 0);
3846                 if (IS_ERR(page)) {
3847                         ret = vmf_error(PTR_ERR(page));
3848                         goto out;
3849                 }
3850                 clear_huge_page(page, address, pages_per_huge_page(h));
3851                 __SetPageUptodate(page);
3852                 new_page = true;
3853
3854                 if (vma->vm_flags & VM_MAYSHARE) {
3855                         int err = huge_add_to_page_cache(page, mapping, idx);
3856                         if (err) {
3857                                 put_page(page);
3858                                 if (err == -EEXIST)
3859                                         goto retry;
3860                                 goto out;
3861                         }
3862                 } else {
3863                         lock_page(page);
3864                         if (unlikely(anon_vma_prepare(vma))) {
3865                                 ret = VM_FAULT_OOM;
3866                                 goto backout_unlocked;
3867                         }
3868                         anon_rmap = 1;
3869                 }
3870         } else {
3871                 /*
3872                  * If memory error occurs between mmap() and fault, some process
3873                  * don't have hwpoisoned swap entry for errored virtual address.
3874                  * So we need to block hugepage fault by PG_hwpoison bit check.
3875                  */
3876                 if (unlikely(PageHWPoison(page))) {
3877                         ret = VM_FAULT_HWPOISON |
3878                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3879                         goto backout_unlocked;
3880                 }
3881         }
3882
3883         /*
3884          * If we are going to COW a private mapping later, we examine the
3885          * pending reservations for this page now. This will ensure that
3886          * any allocations necessary to record that reservation occur outside
3887          * the spinlock.
3888          */
3889         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3890                 if (vma_needs_reservation(h, vma, haddr) < 0) {
3891                         ret = VM_FAULT_OOM;
3892                         goto backout_unlocked;
3893                 }
3894                 /* Just decrements count, does not deallocate */
3895                 vma_end_reservation(h, vma, haddr);
3896         }
3897
3898         ptl = huge_pte_lock(h, mm, ptep);
3899         size = i_size_read(mapping->host) >> huge_page_shift(h);
3900         if (idx >= size)
3901                 goto backout;
3902
3903         ret = 0;
3904         if (!huge_pte_none(huge_ptep_get(ptep)))
3905                 goto backout;
3906
3907         if (anon_rmap) {
3908                 ClearPagePrivate(page);
3909                 hugepage_add_new_anon_rmap(page, vma, haddr);
3910         } else
3911                 page_dup_rmap(page, true);
3912         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3913                                 && (vma->vm_flags & VM_SHARED)));
3914         set_huge_pte_at(mm, haddr, ptep, new_pte);
3915
3916         hugetlb_count_add(pages_per_huge_page(h), mm);
3917         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3918                 /* Optimization, do the COW without a second fault */
3919                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3920         }
3921
3922         spin_unlock(ptl);
3923
3924         /*
3925          * Only make newly allocated pages active.  Existing pages found
3926          * in the pagecache could be !page_huge_active() if they have been
3927          * isolated for migration.
3928          */
3929         if (new_page)
3930                 set_page_huge_active(page);
3931
3932         unlock_page(page);
3933 out:
3934         return ret;
3935
3936 backout:
3937         spin_unlock(ptl);
3938 backout_unlocked:
3939         unlock_page(page);
3940         restore_reserve_on_error(h, vma, haddr, page);
3941         put_page(page);
3942         goto out;
3943 }
3944
3945 #ifdef CONFIG_SMP
3946 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3947                             pgoff_t idx, unsigned long address)
3948 {
3949         unsigned long key[2];
3950         u32 hash;
3951
3952         key[0] = (unsigned long) mapping;
3953         key[1] = idx;
3954
3955         hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
3956
3957         return hash & (num_fault_mutexes - 1);
3958 }
3959 #else
3960 /*
3961  * For uniprocesor systems we always use a single mutex, so just
3962  * return 0 and avoid the hashing overhead.
3963  */
3964 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3965                             pgoff_t idx, unsigned long address)
3966 {
3967         return 0;
3968 }
3969 #endif
3970
3971 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3972                         unsigned long address, unsigned int flags)
3973 {
3974         pte_t *ptep, entry;
3975         spinlock_t *ptl;
3976         vm_fault_t ret;
3977         u32 hash;
3978         pgoff_t idx;
3979         struct page *page = NULL;
3980         struct page *pagecache_page = NULL;
3981         struct hstate *h = hstate_vma(vma);
3982         struct address_space *mapping;
3983         int need_wait_lock = 0;
3984         unsigned long haddr = address & huge_page_mask(h);
3985
3986         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3987         if (ptep) {
3988                 entry = huge_ptep_get(ptep);
3989                 if (unlikely(is_hugetlb_entry_migration(entry))) {
3990                         migration_entry_wait_huge(vma, mm, ptep);
3991                         return 0;
3992                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3993                         return VM_FAULT_HWPOISON_LARGE |
3994                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3995         } else {
3996                 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
3997                 if (!ptep)
3998                         return VM_FAULT_OOM;
3999         }
4000
4001         mapping = vma->vm_file->f_mapping;
4002         idx = vma_hugecache_offset(h, vma, haddr);
4003
4004         /*
4005          * Serialize hugepage allocation and instantiation, so that we don't
4006          * get spurious allocation failures if two CPUs race to instantiate
4007          * the same page in the page cache.
4008          */
4009         hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr);
4010         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4011
4012         entry = huge_ptep_get(ptep);
4013         if (huge_pte_none(entry)) {
4014                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4015                 goto out_mutex;
4016         }
4017
4018         ret = 0;
4019
4020         /*
4021          * entry could be a migration/hwpoison entry at this point, so this
4022          * check prevents the kernel from going below assuming that we have
4023          * a active hugepage in pagecache. This goto expects the 2nd page fault,
4024          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4025          * handle it.
4026          */
4027         if (!pte_present(entry))
4028                 goto out_mutex;
4029
4030         /*
4031          * If we are going to COW the mapping later, we examine the pending
4032          * reservations for this page now. This will ensure that any
4033          * allocations necessary to record that reservation occur outside the
4034          * spinlock. For private mappings, we also lookup the pagecache
4035          * page now as it is used to determine if a reservation has been
4036          * consumed.
4037          */
4038         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4039                 if (vma_needs_reservation(h, vma, haddr) < 0) {
4040                         ret = VM_FAULT_OOM;
4041                         goto out_mutex;
4042                 }
4043                 /* Just decrements count, does not deallocate */
4044                 vma_end_reservation(h, vma, haddr);
4045
4046                 if (!(vma->vm_flags & VM_MAYSHARE))
4047                         pagecache_page = hugetlbfs_pagecache_page(h,
4048                                                                 vma, haddr);
4049         }
4050
4051         ptl = huge_pte_lock(h, mm, ptep);
4052
4053         /* Check for a racing update before calling hugetlb_cow */
4054         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4055                 goto out_ptl;
4056
4057         /*
4058          * hugetlb_cow() requires page locks of pte_page(entry) and
4059          * pagecache_page, so here we need take the former one
4060          * when page != pagecache_page or !pagecache_page.
4061          */
4062         page = pte_page(entry);
4063         if (page != pagecache_page)
4064                 if (!trylock_page(page)) {
4065                         need_wait_lock = 1;
4066                         goto out_ptl;
4067                 }
4068
4069         get_page(page);
4070
4071         if (flags & FAULT_FLAG_WRITE) {
4072                 if (!huge_pte_write(entry)) {
4073                         ret = hugetlb_cow(mm, vma, address, ptep,
4074                                           pagecache_page, ptl);
4075                         goto out_put_page;
4076                 }
4077                 entry = huge_pte_mkdirty(entry);
4078         }
4079         entry = pte_mkyoung(entry);
4080         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4081                                                 flags & FAULT_FLAG_WRITE))
4082                 update_mmu_cache(vma, haddr, ptep);
4083 out_put_page:
4084         if (page != pagecache_page)
4085                 unlock_page(page);
4086         put_page(page);
4087 out_ptl:
4088         spin_unlock(ptl);
4089
4090         if (pagecache_page) {
4091                 unlock_page(pagecache_page);
4092                 put_page(pagecache_page);
4093         }
4094 out_mutex:
4095         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4096         /*
4097          * Generally it's safe to hold refcount during waiting page lock. But
4098          * here we just wait to defer the next page fault to avoid busy loop and
4099          * the page is not used after unlocked before returning from the current
4100          * page fault. So we are safe from accessing freed page, even if we wait
4101          * here without taking refcount.
4102          */
4103         if (need_wait_lock)
4104                 wait_on_page_locked(page);
4105         return ret;
4106 }
4107
4108 /*
4109  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4110  * modifications for huge pages.
4111  */
4112 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4113                             pte_t *dst_pte,
4114                             struct vm_area_struct *dst_vma,
4115                             unsigned long dst_addr,
4116                             unsigned long src_addr,
4117                             struct page **pagep)
4118 {
4119         struct address_space *mapping;
4120         pgoff_t idx;
4121         unsigned long size;
4122         int vm_shared = dst_vma->vm_flags & VM_SHARED;
4123         struct hstate *h = hstate_vma(dst_vma);
4124         pte_t _dst_pte;
4125         spinlock_t *ptl;
4126         int ret;
4127         struct page *page;
4128
4129         if (!*pagep) {
4130                 ret = -ENOMEM;
4131                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4132                 if (IS_ERR(page))
4133                         goto out;
4134
4135                 ret = copy_huge_page_from_user(page,
4136                                                 (const void __user *) src_addr,
4137                                                 pages_per_huge_page(h), false);
4138
4139                 /* fallback to copy_from_user outside mmap_sem */
4140                 if (unlikely(ret)) {
4141                         ret = -ENOENT;
4142                         *pagep = page;
4143                         /* don't free the page */
4144                         goto out;
4145                 }
4146         } else {
4147                 page = *pagep;
4148                 *pagep = NULL;
4149         }
4150
4151         /*
4152          * The memory barrier inside __SetPageUptodate makes sure that
4153          * preceding stores to the page contents become visible before
4154          * the set_pte_at() write.
4155          */
4156         __SetPageUptodate(page);
4157
4158         mapping = dst_vma->vm_file->f_mapping;
4159         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4160
4161         /*
4162          * If shared, add to page cache
4163          */
4164         if (vm_shared) {
4165                 size = i_size_read(mapping->host) >> huge_page_shift(h);
4166                 ret = -EFAULT;
4167                 if (idx >= size)
4168                         goto out_release_nounlock;
4169
4170                 /*
4171                  * Serialization between remove_inode_hugepages() and
4172                  * huge_add_to_page_cache() below happens through the
4173                  * hugetlb_fault_mutex_table that here must be hold by
4174                  * the caller.
4175                  */
4176                 ret = huge_add_to_page_cache(page, mapping, idx);
4177                 if (ret)
4178                         goto out_release_nounlock;
4179         }
4180
4181         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4182         spin_lock(ptl);
4183
4184         /*
4185          * Recheck the i_size after holding PT lock to make sure not
4186          * to leave any page mapped (as page_mapped()) beyond the end
4187          * of the i_size (remove_inode_hugepages() is strict about
4188          * enforcing that). If we bail out here, we'll also leave a
4189          * page in the radix tree in the vm_shared case beyond the end
4190          * of the i_size, but remove_inode_hugepages() will take care
4191          * of it as soon as we drop the hugetlb_fault_mutex_table.
4192          */
4193         size = i_size_read(mapping->host) >> huge_page_shift(h);
4194         ret = -EFAULT;
4195         if (idx >= size)
4196                 goto out_release_unlock;
4197
4198         ret = -EEXIST;
4199         if (!huge_pte_none(huge_ptep_get(dst_pte)))
4200                 goto out_release_unlock;
4201
4202         if (vm_shared) {
4203                 page_dup_rmap(page, true);
4204         } else {
4205                 ClearPagePrivate(page);
4206                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4207         }
4208
4209         _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4210         if (dst_vma->vm_flags & VM_WRITE)
4211                 _dst_pte = huge_pte_mkdirty(_dst_pte);
4212         _dst_pte = pte_mkyoung(_dst_pte);
4213
4214         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4215
4216         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4217                                         dst_vma->vm_flags & VM_WRITE);
4218         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4219
4220         /* No need to invalidate - it was non-present before */
4221         update_mmu_cache(dst_vma, dst_addr, dst_pte);
4222
4223         spin_unlock(ptl);
4224         set_page_huge_active(page);
4225         if (vm_shared)
4226                 unlock_page(page);
4227         ret = 0;
4228 out:
4229         return ret;
4230 out_release_unlock:
4231         spin_unlock(ptl);
4232         if (vm_shared)
4233                 unlock_page(page);
4234 out_release_nounlock:
4235         put_page(page);
4236         goto out;
4237 }
4238
4239 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4240                          struct page **pages, struct vm_area_struct **vmas,
4241                          unsigned long *position, unsigned long *nr_pages,
4242                          long i, unsigned int flags, int *nonblocking)
4243 {
4244         unsigned long pfn_offset;
4245         unsigned long vaddr = *position;
4246         unsigned long remainder = *nr_pages;
4247         struct hstate *h = hstate_vma(vma);
4248         int err = -EFAULT;
4249
4250         while (vaddr < vma->vm_end && remainder) {
4251                 pte_t *pte;
4252                 spinlock_t *ptl = NULL;
4253                 int absent;
4254                 struct page *page;
4255
4256                 /*
4257                  * If we have a pending SIGKILL, don't keep faulting pages and
4258                  * potentially allocating memory.
4259                  */
4260                 if (fatal_signal_pending(current)) {
4261                         remainder = 0;
4262                         break;
4263                 }
4264
4265                 /*
4266                  * Some archs (sparc64, sh*) have multiple pte_ts to
4267                  * each hugepage.  We have to make sure we get the
4268                  * first, for the page indexing below to work.
4269                  *
4270                  * Note that page table lock is not held when pte is null.
4271                  */
4272                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4273                                       huge_page_size(h));
4274                 if (pte)
4275                         ptl = huge_pte_lock(h, mm, pte);
4276                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4277
4278                 /*
4279                  * When coredumping, it suits get_dump_page if we just return
4280                  * an error where there's an empty slot with no huge pagecache
4281                  * to back it.  This way, we avoid allocating a hugepage, and
4282                  * the sparse dumpfile avoids allocating disk blocks, but its
4283                  * huge holes still show up with zeroes where they need to be.
4284                  */
4285                 if (absent && (flags & FOLL_DUMP) &&
4286                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4287                         if (pte)
4288                                 spin_unlock(ptl);
4289                         remainder = 0;
4290                         break;
4291                 }
4292
4293                 /*
4294                  * We need call hugetlb_fault for both hugepages under migration
4295                  * (in which case hugetlb_fault waits for the migration,) and
4296                  * hwpoisoned hugepages (in which case we need to prevent the
4297                  * caller from accessing to them.) In order to do this, we use
4298                  * here is_swap_pte instead of is_hugetlb_entry_migration and
4299                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4300                  * both cases, and because we can't follow correct pages
4301                  * directly from any kind of swap entries.
4302                  */
4303                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4304                     ((flags & FOLL_WRITE) &&
4305                       !huge_pte_write(huge_ptep_get(pte)))) {
4306                         vm_fault_t ret;
4307                         unsigned int fault_flags = 0;
4308
4309                         if (pte)
4310                                 spin_unlock(ptl);
4311                         if (flags & FOLL_WRITE)
4312                                 fault_flags |= FAULT_FLAG_WRITE;
4313                         if (nonblocking)
4314                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4315                         if (flags & FOLL_NOWAIT)
4316                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4317                                         FAULT_FLAG_RETRY_NOWAIT;
4318                         if (flags & FOLL_TRIED) {
4319                                 VM_WARN_ON_ONCE(fault_flags &
4320                                                 FAULT_FLAG_ALLOW_RETRY);
4321                                 fault_flags |= FAULT_FLAG_TRIED;
4322                         }
4323                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4324                         if (ret & VM_FAULT_ERROR) {
4325                                 err = vm_fault_to_errno(ret, flags);
4326                                 remainder = 0;
4327                                 break;
4328                         }
4329                         if (ret & VM_FAULT_RETRY) {
4330                                 if (nonblocking &&
4331                                     !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4332                                         *nonblocking = 0;
4333                                 *nr_pages = 0;
4334                                 /*
4335                                  * VM_FAULT_RETRY must not return an
4336                                  * error, it will return zero
4337                                  * instead.
4338                                  *
4339                                  * No need to update "position" as the
4340                                  * caller will not check it after
4341                                  * *nr_pages is set to 0.
4342                                  */
4343                                 return i;
4344                         }
4345                         continue;
4346                 }
4347
4348                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4349                 page = pte_page(huge_ptep_get(pte));
4350
4351                 /*
4352                  * Instead of doing 'try_get_page()' below in the same_page
4353                  * loop, just check the count once here.
4354                  */
4355                 if (unlikely(page_count(page) <= 0)) {
4356                         if (pages) {
4357                                 spin_unlock(ptl);
4358                                 remainder = 0;
4359                                 err = -ENOMEM;
4360                                 break;
4361                         }
4362                 }
4363 same_page:
4364                 if (pages) {
4365                         pages[i] = mem_map_offset(page, pfn_offset);
4366                         get_page(pages[i]);
4367                 }
4368
4369                 if (vmas)
4370                         vmas[i] = vma;
4371
4372                 vaddr += PAGE_SIZE;
4373                 ++pfn_offset;
4374                 --remainder;
4375                 ++i;
4376                 if (vaddr < vma->vm_end && remainder &&
4377                                 pfn_offset < pages_per_huge_page(h)) {
4378                         /*
4379                          * We use pfn_offset to avoid touching the pageframes
4380                          * of this compound page.
4381                          */
4382                         goto same_page;
4383                 }
4384                 spin_unlock(ptl);
4385         }
4386         *nr_pages = remainder;
4387         /*
4388          * setting position is actually required only if remainder is
4389          * not zero but it's faster not to add a "if (remainder)"
4390          * branch.
4391          */
4392         *position = vaddr;
4393
4394         return i ? i : err;
4395 }
4396
4397 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4398 /*
4399  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4400  * implement this.
4401  */
4402 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4403 #endif
4404
4405 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4406                 unsigned long address, unsigned long end, pgprot_t newprot)
4407 {
4408         struct mm_struct *mm = vma->vm_mm;
4409         unsigned long start = address;
4410         pte_t *ptep;
4411         pte_t pte;
4412         struct hstate *h = hstate_vma(vma);
4413         unsigned long pages = 0;
4414         bool shared_pmd = false;
4415         struct mmu_notifier_range range;
4416
4417         /*
4418          * In the case of shared PMDs, the area to flush could be beyond
4419          * start/end.  Set range.start/range.end to cover the maximum possible
4420          * range if PMD sharing is possible.
4421          */
4422         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
4423                                 0, vma, mm, start, end);
4424         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
4425
4426         BUG_ON(address >= end);
4427         flush_cache_range(vma, range.start, range.end);
4428
4429         mmu_notifier_invalidate_range_start(&range);
4430         i_mmap_lock_write(vma->vm_file->f_mapping);
4431         for (; address < end; address += huge_page_size(h)) {
4432                 spinlock_t *ptl;
4433                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4434                 if (!ptep)
4435                         continue;
4436                 ptl = huge_pte_lock(h, mm, ptep);
4437                 if (huge_pmd_unshare(mm, &address, ptep)) {
4438                         pages++;
4439                         spin_unlock(ptl);
4440                         shared_pmd = true;
4441                         continue;
4442                 }
4443                 pte = huge_ptep_get(ptep);
4444                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4445                         spin_unlock(ptl);
4446                         continue;
4447                 }
4448                 if (unlikely(is_hugetlb_entry_migration(pte))) {
4449                         swp_entry_t entry = pte_to_swp_entry(pte);
4450
4451                         if (is_write_migration_entry(entry)) {
4452                                 pte_t newpte;
4453
4454                                 make_migration_entry_read(&entry);
4455                                 newpte = swp_entry_to_pte(entry);
4456                                 set_huge_swap_pte_at(mm, address, ptep,
4457                                                      newpte, huge_page_size(h));
4458                                 pages++;
4459                         }
4460                         spin_unlock(ptl);
4461                         continue;
4462                 }
4463                 if (!huge_pte_none(pte)) {
4464                         pte_t old_pte;
4465
4466                         old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
4467                         pte = pte_mkhuge(huge_pte_modify(old_pte, newprot));
4468                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
4469                         huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
4470                         pages++;
4471                 }
4472                 spin_unlock(ptl);
4473         }
4474         /*
4475          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4476          * may have cleared our pud entry and done put_page on the page table:
4477          * once we release i_mmap_rwsem, another task can do the final put_page
4478          * and that page table be reused and filled with junk.  If we actually
4479          * did unshare a page of pmds, flush the range corresponding to the pud.
4480          */
4481         if (shared_pmd)
4482                 flush_hugetlb_tlb_range(vma, range.start, range.end);
4483         else
4484                 flush_hugetlb_tlb_range(vma, start, end);
4485         /*
4486          * No need to call mmu_notifier_invalidate_range() we are downgrading
4487          * page table protection not changing it to point to a new page.
4488          *
4489          * See Documentation/vm/mmu_notifier.rst
4490          */
4491         i_mmap_unlock_write(vma->vm_file->f_mapping);
4492         mmu_notifier_invalidate_range_end(&range);
4493
4494         return pages << h->order;
4495 }
4496
4497 int hugetlb_reserve_pages(struct inode *inode,
4498                                         long from, long to,
4499                                         struct vm_area_struct *vma,
4500                                         vm_flags_t vm_flags)
4501 {
4502         long ret, chg;
4503         struct hstate *h = hstate_inode(inode);
4504         struct hugepage_subpool *spool = subpool_inode(inode);
4505         struct resv_map *resv_map;
4506         long gbl_reserve;
4507
4508         /* This should never happen */
4509         if (from > to) {
4510                 VM_WARN(1, "%s called with a negative range\n", __func__);
4511                 return -EINVAL;
4512         }
4513
4514         /*
4515          * Only apply hugepage reservation if asked. At fault time, an
4516          * attempt will be made for VM_NORESERVE to allocate a page
4517          * without using reserves
4518          */
4519         if (vm_flags & VM_NORESERVE)
4520                 return 0;
4521
4522         /*
4523          * Shared mappings base their reservation on the number of pages that
4524          * are already allocated on behalf of the file. Private mappings need
4525          * to reserve the full area even if read-only as mprotect() may be
4526          * called to make the mapping read-write. Assume !vma is a shm mapping
4527          */
4528         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4529                 /*
4530                  * resv_map can not be NULL as hugetlb_reserve_pages is only
4531                  * called for inodes for which resv_maps were created (see
4532                  * hugetlbfs_get_inode).
4533                  */
4534                 resv_map = inode_resv_map(inode);
4535
4536                 chg = region_chg(resv_map, from, to);
4537
4538         } else {
4539                 resv_map = resv_map_alloc();
4540                 if (!resv_map)
4541                         return -ENOMEM;
4542
4543                 chg = to - from;
4544
4545                 set_vma_resv_map(vma, resv_map);
4546                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4547         }
4548
4549         if (chg < 0) {
4550                 ret = chg;
4551                 goto out_err;
4552         }
4553
4554         /*
4555          * There must be enough pages in the subpool for the mapping. If
4556          * the subpool has a minimum size, there may be some global
4557          * reservations already in place (gbl_reserve).
4558          */
4559         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4560         if (gbl_reserve < 0) {
4561                 ret = -ENOSPC;
4562                 goto out_err;
4563         }
4564
4565         /*
4566          * Check enough hugepages are available for the reservation.
4567          * Hand the pages back to the subpool if there are not
4568          */
4569         ret = hugetlb_acct_memory(h, gbl_reserve);
4570         if (ret < 0) {
4571                 /* put back original number of pages, chg */
4572                 (void)hugepage_subpool_put_pages(spool, chg);
4573                 goto out_err;
4574         }
4575
4576         /*
4577          * Account for the reservations made. Shared mappings record regions
4578          * that have reservations as they are shared by multiple VMAs.
4579          * When the last VMA disappears, the region map says how much
4580          * the reservation was and the page cache tells how much of
4581          * the reservation was consumed. Private mappings are per-VMA and
4582          * only the consumed reservations are tracked. When the VMA
4583          * disappears, the original reservation is the VMA size and the
4584          * consumed reservations are stored in the map. Hence, nothing
4585          * else has to be done for private mappings here
4586          */
4587         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4588                 long add = region_add(resv_map, from, to);
4589
4590                 if (unlikely(chg > add)) {
4591                         /*
4592                          * pages in this range were added to the reserve
4593                          * map between region_chg and region_add.  This
4594                          * indicates a race with alloc_huge_page.  Adjust
4595                          * the subpool and reserve counts modified above
4596                          * based on the difference.
4597                          */
4598                         long rsv_adjust;
4599
4600                         rsv_adjust = hugepage_subpool_put_pages(spool,
4601                                                                 chg - add);
4602                         hugetlb_acct_memory(h, -rsv_adjust);
4603                 }
4604         }
4605         return 0;
4606 out_err:
4607         if (!vma || vma->vm_flags & VM_MAYSHARE)
4608                 /* Don't call region_abort if region_chg failed */
4609                 if (chg >= 0)
4610                         region_abort(resv_map, from, to);
4611         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4612                 kref_put(&resv_map->refs, resv_map_release);
4613         return ret;
4614 }
4615
4616 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4617                                                                 long freed)
4618 {
4619         struct hstate *h = hstate_inode(inode);
4620         struct resv_map *resv_map = inode_resv_map(inode);
4621         long chg = 0;
4622         struct hugepage_subpool *spool = subpool_inode(inode);
4623         long gbl_reserve;
4624
4625         /*
4626          * Since this routine can be called in the evict inode path for all
4627          * hugetlbfs inodes, resv_map could be NULL.
4628          */
4629         if (resv_map) {
4630                 chg = region_del(resv_map, start, end);
4631                 /*
4632                  * region_del() can fail in the rare case where a region
4633                  * must be split and another region descriptor can not be
4634                  * allocated.  If end == LONG_MAX, it will not fail.
4635                  */
4636                 if (chg < 0)
4637                         return chg;
4638         }
4639
4640         spin_lock(&inode->i_lock);
4641         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4642         spin_unlock(&inode->i_lock);
4643
4644         /*
4645          * If the subpool has a minimum size, the number of global
4646          * reservations to be released may be adjusted.
4647          */
4648         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4649         hugetlb_acct_memory(h, -gbl_reserve);
4650
4651         return 0;
4652 }
4653
4654 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4655 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4656                                 struct vm_area_struct *vma,
4657                                 unsigned long addr, pgoff_t idx)
4658 {
4659         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4660                                 svma->vm_start;
4661         unsigned long sbase = saddr & PUD_MASK;
4662         unsigned long s_end = sbase + PUD_SIZE;
4663
4664         /* Allow segments to share if only one is marked locked */
4665         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4666         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4667
4668         /*
4669          * match the virtual addresses, permission and the alignment of the
4670          * page table page.
4671          */
4672         if (pmd_index(addr) != pmd_index(saddr) ||
4673             vm_flags != svm_flags ||
4674             sbase < svma->vm_start || svma->vm_end < s_end)
4675                 return 0;
4676
4677         return saddr;
4678 }
4679
4680 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4681 {
4682         unsigned long base = addr & PUD_MASK;
4683         unsigned long end = base + PUD_SIZE;
4684
4685         /*
4686          * check on proper vm_flags and page table alignment
4687          */
4688         if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4689                 return true;
4690         return false;
4691 }
4692
4693 /*
4694  * Determine if start,end range within vma could be mapped by shared pmd.
4695  * If yes, adjust start and end to cover range associated with possible
4696  * shared pmd mappings.
4697  */
4698 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4699                                 unsigned long *start, unsigned long *end)
4700 {
4701         unsigned long check_addr = *start;
4702
4703         if (!(vma->vm_flags & VM_MAYSHARE))
4704                 return;
4705
4706         for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
4707                 unsigned long a_start = check_addr & PUD_MASK;
4708                 unsigned long a_end = a_start + PUD_SIZE;
4709
4710                 /*
4711                  * If sharing is possible, adjust start/end if necessary.
4712                  */
4713                 if (range_in_vma(vma, a_start, a_end)) {
4714                         if (a_start < *start)
4715                                 *start = a_start;
4716                         if (a_end > *end)
4717                                 *end = a_end;
4718                 }
4719         }
4720 }
4721
4722 /*
4723  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4724  * and returns the corresponding pte. While this is not necessary for the
4725  * !shared pmd case because we can allocate the pmd later as well, it makes the
4726  * code much cleaner. pmd allocation is essential for the shared case because
4727  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4728  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4729  * bad pmd for sharing.
4730  */
4731 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4732 {
4733         struct vm_area_struct *vma = find_vma(mm, addr);
4734         struct address_space *mapping = vma->vm_file->f_mapping;
4735         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4736                         vma->vm_pgoff;
4737         struct vm_area_struct *svma;
4738         unsigned long saddr;
4739         pte_t *spte = NULL;
4740         pte_t *pte;
4741         spinlock_t *ptl;
4742
4743         if (!vma_shareable(vma, addr))
4744                 return (pte_t *)pmd_alloc(mm, pud, addr);
4745
4746         i_mmap_lock_write(mapping);
4747         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4748                 if (svma == vma)
4749                         continue;
4750
4751                 saddr = page_table_shareable(svma, vma, addr, idx);
4752                 if (saddr) {
4753                         spte = huge_pte_offset(svma->vm_mm, saddr,
4754                                                vma_mmu_pagesize(svma));
4755                         if (spte) {
4756                                 get_page(virt_to_page(spte));
4757                                 break;
4758                         }
4759                 }
4760         }
4761
4762         if (!spte)
4763                 goto out;
4764
4765         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4766         if (pud_none(*pud)) {
4767                 pud_populate(mm, pud,
4768                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4769                 mm_inc_nr_pmds(mm);
4770         } else {
4771                 put_page(virt_to_page(spte));
4772         }
4773         spin_unlock(ptl);
4774 out:
4775         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4776         i_mmap_unlock_write(mapping);
4777         return pte;
4778 }
4779
4780 /*
4781  * unmap huge page backed by shared pte.
4782  *
4783  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4784  * indicated by page_count > 1, unmap is achieved by clearing pud and
4785  * decrementing the ref count. If count == 1, the pte page is not shared.
4786  *
4787  * called with page table lock held.
4788  *
4789  * returns: 1 successfully unmapped a shared pte page
4790  *          0 the underlying pte page is not shared, or it is the last user
4791  */
4792 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4793 {
4794         pgd_t *pgd = pgd_offset(mm, *addr);
4795         p4d_t *p4d = p4d_offset(pgd, *addr);
4796         pud_t *pud = pud_offset(p4d, *addr);
4797
4798         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4799         if (page_count(virt_to_page(ptep)) == 1)
4800                 return 0;
4801
4802         pud_clear(pud);
4803         put_page(virt_to_page(ptep));
4804         mm_dec_nr_pmds(mm);
4805         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4806         return 1;
4807 }
4808 #define want_pmd_share()        (1)
4809 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4810 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4811 {
4812         return NULL;
4813 }
4814
4815 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4816 {
4817         return 0;
4818 }
4819
4820 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4821                                 unsigned long *start, unsigned long *end)
4822 {
4823 }
4824 #define want_pmd_share()        (0)
4825 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4826
4827 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4828 pte_t *huge_pte_alloc(struct mm_struct *mm,
4829                         unsigned long addr, unsigned long sz)
4830 {
4831         pgd_t *pgd;
4832         p4d_t *p4d;
4833         pud_t *pud;
4834         pte_t *pte = NULL;
4835
4836         pgd = pgd_offset(mm, addr);
4837         p4d = p4d_alloc(mm, pgd, addr);
4838         if (!p4d)
4839                 return NULL;
4840         pud = pud_alloc(mm, p4d, addr);
4841         if (pud) {
4842                 if (sz == PUD_SIZE) {
4843                         pte = (pte_t *)pud;
4844                 } else {
4845                         BUG_ON(sz != PMD_SIZE);
4846                         if (want_pmd_share() && pud_none(*pud))
4847                                 pte = huge_pmd_share(mm, addr, pud);
4848                         else
4849                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4850                 }
4851         }
4852         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4853
4854         return pte;
4855 }
4856
4857 /*
4858  * huge_pte_offset() - Walk the page table to resolve the hugepage
4859  * entry at address @addr
4860  *
4861  * Return: Pointer to page table or swap entry (PUD or PMD) for
4862  * address @addr, or NULL if a p*d_none() entry is encountered and the
4863  * size @sz doesn't match the hugepage size at this level of the page
4864  * table.
4865  */
4866 pte_t *huge_pte_offset(struct mm_struct *mm,
4867                        unsigned long addr, unsigned long sz)
4868 {
4869         pgd_t *pgd;
4870         p4d_t *p4d;
4871         pud_t *pud;
4872         pmd_t *pmd;
4873
4874         pgd = pgd_offset(mm, addr);
4875         if (!pgd_present(*pgd))
4876                 return NULL;
4877         p4d = p4d_offset(pgd, addr);
4878         if (!p4d_present(*p4d))
4879                 return NULL;
4880
4881         pud = pud_offset(p4d, addr);
4882         if (sz != PUD_SIZE && pud_none(*pud))
4883                 return NULL;
4884         /* hugepage or swap? */
4885         if (pud_huge(*pud) || !pud_present(*pud))
4886                 return (pte_t *)pud;
4887
4888         pmd = pmd_offset(pud, addr);
4889         if (sz != PMD_SIZE && pmd_none(*pmd))
4890                 return NULL;
4891         /* hugepage or swap? */
4892         if (pmd_huge(*pmd) || !pmd_present(*pmd))
4893                 return (pte_t *)pmd;
4894
4895         return NULL;
4896 }
4897
4898 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4899
4900 /*
4901  * These functions are overwritable if your architecture needs its own
4902  * behavior.
4903  */
4904 struct page * __weak
4905 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4906                               int write)
4907 {
4908         return ERR_PTR(-EINVAL);
4909 }
4910
4911 struct page * __weak
4912 follow_huge_pd(struct vm_area_struct *vma,
4913                unsigned long address, hugepd_t hpd, int flags, int pdshift)
4914 {
4915         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4916         return NULL;
4917 }
4918
4919 struct page * __weak
4920 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4921                 pmd_t *pmd, int flags)
4922 {
4923         struct page *page = NULL;
4924         spinlock_t *ptl;
4925         pte_t pte;
4926 retry:
4927         ptl = pmd_lockptr(mm, pmd);
4928         spin_lock(ptl);
4929         /*
4930          * make sure that the address range covered by this pmd is not
4931          * unmapped from other threads.
4932          */
4933         if (!pmd_huge(*pmd))
4934                 goto out;
4935         pte = huge_ptep_get((pte_t *)pmd);
4936         if (pte_present(pte)) {
4937                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4938                 if (flags & FOLL_GET)
4939                         get_page(page);
4940         } else {
4941                 if (is_hugetlb_entry_migration(pte)) {
4942                         spin_unlock(ptl);
4943                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4944                         goto retry;
4945                 }
4946                 /*
4947                  * hwpoisoned entry is treated as no_page_table in
4948                  * follow_page_mask().
4949                  */
4950         }
4951 out:
4952         spin_unlock(ptl);
4953         return page;
4954 }
4955
4956 struct page * __weak
4957 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4958                 pud_t *pud, int flags)
4959 {
4960         if (flags & FOLL_GET)
4961                 return NULL;
4962
4963         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4964 }
4965
4966 struct page * __weak
4967 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4968 {
4969         if (flags & FOLL_GET)
4970                 return NULL;
4971
4972         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4973 }
4974
4975 bool isolate_huge_page(struct page *page, struct list_head *list)
4976 {
4977         bool ret = true;
4978
4979         VM_BUG_ON_PAGE(!PageHead(page), page);
4980         spin_lock(&hugetlb_lock);
4981         if (!page_huge_active(page) || !get_page_unless_zero(page)) {
4982                 ret = false;
4983                 goto unlock;
4984         }
4985         clear_page_huge_active(page);
4986         list_move_tail(&page->lru, list);
4987 unlock:
4988         spin_unlock(&hugetlb_lock);
4989         return ret;
4990 }
4991
4992 void putback_active_hugepage(struct page *page)
4993 {
4994         VM_BUG_ON_PAGE(!PageHead(page), page);
4995         spin_lock(&hugetlb_lock);
4996         set_page_huge_active(page);
4997         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4998         spin_unlock(&hugetlb_lock);
4999         put_page(page);
5000 }
5001
5002 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5003 {
5004         struct hstate *h = page_hstate(oldpage);
5005
5006         hugetlb_cgroup_migrate(oldpage, newpage);
5007         set_page_owner_migrate_reason(newpage, reason);
5008
5009         /*
5010          * transfer temporary state of the new huge page. This is
5011          * reverse to other transitions because the newpage is going to
5012          * be final while the old one will be freed so it takes over
5013          * the temporary status.
5014          *
5015          * Also note that we have to transfer the per-node surplus state
5016          * here as well otherwise the global surplus count will not match
5017          * the per-node's.
5018          */
5019         if (PageHugeTemporary(newpage)) {
5020                 int old_nid = page_to_nid(oldpage);
5021                 int new_nid = page_to_nid(newpage);
5022
5023                 SetPageHugeTemporary(oldpage);
5024                 ClearPageHugeTemporary(newpage);
5025
5026                 spin_lock(&hugetlb_lock);
5027                 if (h->surplus_huge_pages_node[old_nid]) {
5028                         h->surplus_huge_pages_node[old_nid]--;
5029                         h->surplus_huge_pages_node[new_nid]++;
5030                 }
5031                 spin_unlock(&hugetlb_lock);
5032         }
5033 }