tracing: Add __string_src() helper to help compilers not to get confused
[sfrench/cifs-2.6.git] / mm / hugetlb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_helpers.h>
27 #include <linux/swap.h>
28 #include <linux/swapops.h>
29 #include <linux/jhash.h>
30 #include <linux/numa.h>
31 #include <linux/llist.h>
32 #include <linux/cma.h>
33 #include <linux/migrate.h>
34 #include <linux/nospec.h>
35 #include <linux/delayacct.h>
36 #include <linux/memory.h>
37 #include <linux/mm_inline.h>
38
39 #include <asm/page.h>
40 #include <asm/pgalloc.h>
41 #include <asm/tlb.h>
42
43 #include <linux/io.h>
44 #include <linux/hugetlb.h>
45 #include <linux/hugetlb_cgroup.h>
46 #include <linux/node.h>
47 #include <linux/page_owner.h>
48 #include "internal.h"
49 #include "hugetlb_vmemmap.h"
50
51 int hugetlb_max_hstate __read_mostly;
52 unsigned int default_hstate_idx;
53 struct hstate hstates[HUGE_MAX_HSTATE];
54
55 #ifdef CONFIG_CMA
56 static struct cma *hugetlb_cma[MAX_NUMNODES];
57 static unsigned long hugetlb_cma_size_in_node[MAX_NUMNODES] __initdata;
58 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
59 {
60         return cma_pages_valid(hugetlb_cma[folio_nid(folio)], &folio->page,
61                                 1 << order);
62 }
63 #else
64 static bool hugetlb_cma_folio(struct folio *folio, unsigned int order)
65 {
66         return false;
67 }
68 #endif
69 static unsigned long hugetlb_cma_size __initdata;
70
71 __initdata LIST_HEAD(huge_boot_pages);
72
73 /* for command line parsing */
74 static struct hstate * __initdata parsed_hstate;
75 static unsigned long __initdata default_hstate_max_huge_pages;
76 static bool __initdata parsed_valid_hugepagesz = true;
77 static bool __initdata parsed_default_hugepagesz;
78 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
79
80 /*
81  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
82  * free_huge_pages, and surplus_huge_pages.
83  */
84 DEFINE_SPINLOCK(hugetlb_lock);
85
86 /*
87  * Serializes faults on the same logical page.  This is used to
88  * prevent spurious OOMs when the hugepage pool is fully utilized.
89  */
90 static int num_fault_mutexes;
91 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
92
93 /* Forward declaration */
94 static int hugetlb_acct_memory(struct hstate *h, long delta);
95 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
96 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
97 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
98 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
99                 unsigned long start, unsigned long end);
100 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
101
102 static inline bool subpool_is_free(struct hugepage_subpool *spool)
103 {
104         if (spool->count)
105                 return false;
106         if (spool->max_hpages != -1)
107                 return spool->used_hpages == 0;
108         if (spool->min_hpages != -1)
109                 return spool->rsv_hpages == spool->min_hpages;
110
111         return true;
112 }
113
114 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
115                                                 unsigned long irq_flags)
116 {
117         spin_unlock_irqrestore(&spool->lock, irq_flags);
118
119         /* If no pages are used, and no other handles to the subpool
120          * remain, give up any reservations based on minimum size and
121          * free the subpool */
122         if (subpool_is_free(spool)) {
123                 if (spool->min_hpages != -1)
124                         hugetlb_acct_memory(spool->hstate,
125                                                 -spool->min_hpages);
126                 kfree(spool);
127         }
128 }
129
130 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
131                                                 long min_hpages)
132 {
133         struct hugepage_subpool *spool;
134
135         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
136         if (!spool)
137                 return NULL;
138
139         spin_lock_init(&spool->lock);
140         spool->count = 1;
141         spool->max_hpages = max_hpages;
142         spool->hstate = h;
143         spool->min_hpages = min_hpages;
144
145         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
146                 kfree(spool);
147                 return NULL;
148         }
149         spool->rsv_hpages = min_hpages;
150
151         return spool;
152 }
153
154 void hugepage_put_subpool(struct hugepage_subpool *spool)
155 {
156         unsigned long flags;
157
158         spin_lock_irqsave(&spool->lock, flags);
159         BUG_ON(!spool->count);
160         spool->count--;
161         unlock_or_release_subpool(spool, flags);
162 }
163
164 /*
165  * Subpool accounting for allocating and reserving pages.
166  * Return -ENOMEM if there are not enough resources to satisfy the
167  * request.  Otherwise, return the number of pages by which the
168  * global pools must be adjusted (upward).  The returned value may
169  * only be different than the passed value (delta) in the case where
170  * a subpool minimum size must be maintained.
171  */
172 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
173                                       long delta)
174 {
175         long ret = delta;
176
177         if (!spool)
178                 return ret;
179
180         spin_lock_irq(&spool->lock);
181
182         if (spool->max_hpages != -1) {          /* maximum size accounting */
183                 if ((spool->used_hpages + delta) <= spool->max_hpages)
184                         spool->used_hpages += delta;
185                 else {
186                         ret = -ENOMEM;
187                         goto unlock_ret;
188                 }
189         }
190
191         /* minimum size accounting */
192         if (spool->min_hpages != -1 && spool->rsv_hpages) {
193                 if (delta > spool->rsv_hpages) {
194                         /*
195                          * Asking for more reserves than those already taken on
196                          * behalf of subpool.  Return difference.
197                          */
198                         ret = delta - spool->rsv_hpages;
199                         spool->rsv_hpages = 0;
200                 } else {
201                         ret = 0;        /* reserves already accounted for */
202                         spool->rsv_hpages -= delta;
203                 }
204         }
205
206 unlock_ret:
207         spin_unlock_irq(&spool->lock);
208         return ret;
209 }
210
211 /*
212  * Subpool accounting for freeing and unreserving pages.
213  * Return the number of global page reservations that must be dropped.
214  * The return value may only be different than the passed value (delta)
215  * in the case where a subpool minimum size must be maintained.
216  */
217 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
218                                        long delta)
219 {
220         long ret = delta;
221         unsigned long flags;
222
223         if (!spool)
224                 return delta;
225
226         spin_lock_irqsave(&spool->lock, flags);
227
228         if (spool->max_hpages != -1)            /* maximum size accounting */
229                 spool->used_hpages -= delta;
230
231          /* minimum size accounting */
232         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
233                 if (spool->rsv_hpages + delta <= spool->min_hpages)
234                         ret = 0;
235                 else
236                         ret = spool->rsv_hpages + delta - spool->min_hpages;
237
238                 spool->rsv_hpages += delta;
239                 if (spool->rsv_hpages > spool->min_hpages)
240                         spool->rsv_hpages = spool->min_hpages;
241         }
242
243         /*
244          * If hugetlbfs_put_super couldn't free spool due to an outstanding
245          * quota reference, free it now.
246          */
247         unlock_or_release_subpool(spool, flags);
248
249         return ret;
250 }
251
252 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
253 {
254         return HUGETLBFS_SB(inode->i_sb)->spool;
255 }
256
257 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
258 {
259         return subpool_inode(file_inode(vma->vm_file));
260 }
261
262 /*
263  * hugetlb vma_lock helper routines
264  */
265 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
266 {
267         if (__vma_shareable_lock(vma)) {
268                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
269
270                 down_read(&vma_lock->rw_sema);
271         } else if (__vma_private_lock(vma)) {
272                 struct resv_map *resv_map = vma_resv_map(vma);
273
274                 down_read(&resv_map->rw_sema);
275         }
276 }
277
278 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
279 {
280         if (__vma_shareable_lock(vma)) {
281                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
282
283                 up_read(&vma_lock->rw_sema);
284         } else if (__vma_private_lock(vma)) {
285                 struct resv_map *resv_map = vma_resv_map(vma);
286
287                 up_read(&resv_map->rw_sema);
288         }
289 }
290
291 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
292 {
293         if (__vma_shareable_lock(vma)) {
294                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
295
296                 down_write(&vma_lock->rw_sema);
297         } else if (__vma_private_lock(vma)) {
298                 struct resv_map *resv_map = vma_resv_map(vma);
299
300                 down_write(&resv_map->rw_sema);
301         }
302 }
303
304 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
305 {
306         if (__vma_shareable_lock(vma)) {
307                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
308
309                 up_write(&vma_lock->rw_sema);
310         } else if (__vma_private_lock(vma)) {
311                 struct resv_map *resv_map = vma_resv_map(vma);
312
313                 up_write(&resv_map->rw_sema);
314         }
315 }
316
317 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
318 {
319
320         if (__vma_shareable_lock(vma)) {
321                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
322
323                 return down_write_trylock(&vma_lock->rw_sema);
324         } else if (__vma_private_lock(vma)) {
325                 struct resv_map *resv_map = vma_resv_map(vma);
326
327                 return down_write_trylock(&resv_map->rw_sema);
328         }
329
330         return 1;
331 }
332
333 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
334 {
335         if (__vma_shareable_lock(vma)) {
336                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
337
338                 lockdep_assert_held(&vma_lock->rw_sema);
339         } else if (__vma_private_lock(vma)) {
340                 struct resv_map *resv_map = vma_resv_map(vma);
341
342                 lockdep_assert_held(&resv_map->rw_sema);
343         }
344 }
345
346 void hugetlb_vma_lock_release(struct kref *kref)
347 {
348         struct hugetlb_vma_lock *vma_lock = container_of(kref,
349                         struct hugetlb_vma_lock, refs);
350
351         kfree(vma_lock);
352 }
353
354 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
355 {
356         struct vm_area_struct *vma = vma_lock->vma;
357
358         /*
359          * vma_lock structure may or not be released as a result of put,
360          * it certainly will no longer be attached to vma so clear pointer.
361          * Semaphore synchronizes access to vma_lock->vma field.
362          */
363         vma_lock->vma = NULL;
364         vma->vm_private_data = NULL;
365         up_write(&vma_lock->rw_sema);
366         kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
367 }
368
369 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
370 {
371         if (__vma_shareable_lock(vma)) {
372                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
373
374                 __hugetlb_vma_unlock_write_put(vma_lock);
375         } else if (__vma_private_lock(vma)) {
376                 struct resv_map *resv_map = vma_resv_map(vma);
377
378                 /* no free for anon vmas, but still need to unlock */
379                 up_write(&resv_map->rw_sema);
380         }
381 }
382
383 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
384 {
385         /*
386          * Only present in sharable vmas.
387          */
388         if (!vma || !__vma_shareable_lock(vma))
389                 return;
390
391         if (vma->vm_private_data) {
392                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
393
394                 down_write(&vma_lock->rw_sema);
395                 __hugetlb_vma_unlock_write_put(vma_lock);
396         }
397 }
398
399 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
400 {
401         struct hugetlb_vma_lock *vma_lock;
402
403         /* Only establish in (flags) sharable vmas */
404         if (!vma || !(vma->vm_flags & VM_MAYSHARE))
405                 return;
406
407         /* Should never get here with non-NULL vm_private_data */
408         if (vma->vm_private_data)
409                 return;
410
411         vma_lock = kmalloc(sizeof(*vma_lock), GFP_KERNEL);
412         if (!vma_lock) {
413                 /*
414                  * If we can not allocate structure, then vma can not
415                  * participate in pmd sharing.  This is only a possible
416                  * performance enhancement and memory saving issue.
417                  * However, the lock is also used to synchronize page
418                  * faults with truncation.  If the lock is not present,
419                  * unlikely races could leave pages in a file past i_size
420                  * until the file is removed.  Warn in the unlikely case of
421                  * allocation failure.
422                  */
423                 pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
424                 return;
425         }
426
427         kref_init(&vma_lock->refs);
428         init_rwsem(&vma_lock->rw_sema);
429         vma_lock->vma = vma;
430         vma->vm_private_data = vma_lock;
431 }
432
433 /* Helper that removes a struct file_region from the resv_map cache and returns
434  * it for use.
435  */
436 static struct file_region *
437 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
438 {
439         struct file_region *nrg;
440
441         VM_BUG_ON(resv->region_cache_count <= 0);
442
443         resv->region_cache_count--;
444         nrg = list_first_entry(&resv->region_cache, struct file_region, link);
445         list_del(&nrg->link);
446
447         nrg->from = from;
448         nrg->to = to;
449
450         return nrg;
451 }
452
453 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
454                                               struct file_region *rg)
455 {
456 #ifdef CONFIG_CGROUP_HUGETLB
457         nrg->reservation_counter = rg->reservation_counter;
458         nrg->css = rg->css;
459         if (rg->css)
460                 css_get(rg->css);
461 #endif
462 }
463
464 /* Helper that records hugetlb_cgroup uncharge info. */
465 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
466                                                 struct hstate *h,
467                                                 struct resv_map *resv,
468                                                 struct file_region *nrg)
469 {
470 #ifdef CONFIG_CGROUP_HUGETLB
471         if (h_cg) {
472                 nrg->reservation_counter =
473                         &h_cg->rsvd_hugepage[hstate_index(h)];
474                 nrg->css = &h_cg->css;
475                 /*
476                  * The caller will hold exactly one h_cg->css reference for the
477                  * whole contiguous reservation region. But this area might be
478                  * scattered when there are already some file_regions reside in
479                  * it. As a result, many file_regions may share only one css
480                  * reference. In order to ensure that one file_region must hold
481                  * exactly one h_cg->css reference, we should do css_get for
482                  * each file_region and leave the reference held by caller
483                  * untouched.
484                  */
485                 css_get(&h_cg->css);
486                 if (!resv->pages_per_hpage)
487                         resv->pages_per_hpage = pages_per_huge_page(h);
488                 /* pages_per_hpage should be the same for all entries in
489                  * a resv_map.
490                  */
491                 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
492         } else {
493                 nrg->reservation_counter = NULL;
494                 nrg->css = NULL;
495         }
496 #endif
497 }
498
499 static void put_uncharge_info(struct file_region *rg)
500 {
501 #ifdef CONFIG_CGROUP_HUGETLB
502         if (rg->css)
503                 css_put(rg->css);
504 #endif
505 }
506
507 static bool has_same_uncharge_info(struct file_region *rg,
508                                    struct file_region *org)
509 {
510 #ifdef CONFIG_CGROUP_HUGETLB
511         return rg->reservation_counter == org->reservation_counter &&
512                rg->css == org->css;
513
514 #else
515         return true;
516 #endif
517 }
518
519 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
520 {
521         struct file_region *nrg, *prg;
522
523         prg = list_prev_entry(rg, link);
524         if (&prg->link != &resv->regions && prg->to == rg->from &&
525             has_same_uncharge_info(prg, rg)) {
526                 prg->to = rg->to;
527
528                 list_del(&rg->link);
529                 put_uncharge_info(rg);
530                 kfree(rg);
531
532                 rg = prg;
533         }
534
535         nrg = list_next_entry(rg, link);
536         if (&nrg->link != &resv->regions && nrg->from == rg->to &&
537             has_same_uncharge_info(nrg, rg)) {
538                 nrg->from = rg->from;
539
540                 list_del(&rg->link);
541                 put_uncharge_info(rg);
542                 kfree(rg);
543         }
544 }
545
546 static inline long
547 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
548                      long to, struct hstate *h, struct hugetlb_cgroup *cg,
549                      long *regions_needed)
550 {
551         struct file_region *nrg;
552
553         if (!regions_needed) {
554                 nrg = get_file_region_entry_from_cache(map, from, to);
555                 record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
556                 list_add(&nrg->link, rg);
557                 coalesce_file_region(map, nrg);
558         } else
559                 *regions_needed += 1;
560
561         return to - from;
562 }
563
564 /*
565  * Must be called with resv->lock held.
566  *
567  * Calling this with regions_needed != NULL will count the number of pages
568  * to be added but will not modify the linked list. And regions_needed will
569  * indicate the number of file_regions needed in the cache to carry out to add
570  * the regions for this range.
571  */
572 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
573                                      struct hugetlb_cgroup *h_cg,
574                                      struct hstate *h, long *regions_needed)
575 {
576         long add = 0;
577         struct list_head *head = &resv->regions;
578         long last_accounted_offset = f;
579         struct file_region *iter, *trg = NULL;
580         struct list_head *rg = NULL;
581
582         if (regions_needed)
583                 *regions_needed = 0;
584
585         /* In this loop, we essentially handle an entry for the range
586          * [last_accounted_offset, iter->from), at every iteration, with some
587          * bounds checking.
588          */
589         list_for_each_entry_safe(iter, trg, head, link) {
590                 /* Skip irrelevant regions that start before our range. */
591                 if (iter->from < f) {
592                         /* If this region ends after the last accounted offset,
593                          * then we need to update last_accounted_offset.
594                          */
595                         if (iter->to > last_accounted_offset)
596                                 last_accounted_offset = iter->to;
597                         continue;
598                 }
599
600                 /* When we find a region that starts beyond our range, we've
601                  * finished.
602                  */
603                 if (iter->from >= t) {
604                         rg = iter->link.prev;
605                         break;
606                 }
607
608                 /* Add an entry for last_accounted_offset -> iter->from, and
609                  * update last_accounted_offset.
610                  */
611                 if (iter->from > last_accounted_offset)
612                         add += hugetlb_resv_map_add(resv, iter->link.prev,
613                                                     last_accounted_offset,
614                                                     iter->from, h, h_cg,
615                                                     regions_needed);
616
617                 last_accounted_offset = iter->to;
618         }
619
620         /* Handle the case where our range extends beyond
621          * last_accounted_offset.
622          */
623         if (!rg)
624                 rg = head->prev;
625         if (last_accounted_offset < t)
626                 add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
627                                             t, h, h_cg, regions_needed);
628
629         return add;
630 }
631
632 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
633  */
634 static int allocate_file_region_entries(struct resv_map *resv,
635                                         int regions_needed)
636         __must_hold(&resv->lock)
637 {
638         LIST_HEAD(allocated_regions);
639         int to_allocate = 0, i = 0;
640         struct file_region *trg = NULL, *rg = NULL;
641
642         VM_BUG_ON(regions_needed < 0);
643
644         /*
645          * Check for sufficient descriptors in the cache to accommodate
646          * the number of in progress add operations plus regions_needed.
647          *
648          * This is a while loop because when we drop the lock, some other call
649          * to region_add or region_del may have consumed some region_entries,
650          * so we keep looping here until we finally have enough entries for
651          * (adds_in_progress + regions_needed).
652          */
653         while (resv->region_cache_count <
654                (resv->adds_in_progress + regions_needed)) {
655                 to_allocate = resv->adds_in_progress + regions_needed -
656                               resv->region_cache_count;
657
658                 /* At this point, we should have enough entries in the cache
659                  * for all the existing adds_in_progress. We should only be
660                  * needing to allocate for regions_needed.
661                  */
662                 VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
663
664                 spin_unlock(&resv->lock);
665                 for (i = 0; i < to_allocate; i++) {
666                         trg = kmalloc(sizeof(*trg), GFP_KERNEL);
667                         if (!trg)
668                                 goto out_of_memory;
669                         list_add(&trg->link, &allocated_regions);
670                 }
671
672                 spin_lock(&resv->lock);
673
674                 list_splice(&allocated_regions, &resv->region_cache);
675                 resv->region_cache_count += to_allocate;
676         }
677
678         return 0;
679
680 out_of_memory:
681         list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
682                 list_del(&rg->link);
683                 kfree(rg);
684         }
685         return -ENOMEM;
686 }
687
688 /*
689  * Add the huge page range represented by [f, t) to the reserve
690  * map.  Regions will be taken from the cache to fill in this range.
691  * Sufficient regions should exist in the cache due to the previous
692  * call to region_chg with the same range, but in some cases the cache will not
693  * have sufficient entries due to races with other code doing region_add or
694  * region_del.  The extra needed entries will be allocated.
695  *
696  * regions_needed is the out value provided by a previous call to region_chg.
697  *
698  * Return the number of new huge pages added to the map.  This number is greater
699  * than or equal to zero.  If file_region entries needed to be allocated for
700  * this operation and we were not able to allocate, it returns -ENOMEM.
701  * region_add of regions of length 1 never allocate file_regions and cannot
702  * fail; region_chg will always allocate at least 1 entry and a region_add for
703  * 1 page will only require at most 1 entry.
704  */
705 static long region_add(struct resv_map *resv, long f, long t,
706                        long in_regions_needed, struct hstate *h,
707                        struct hugetlb_cgroup *h_cg)
708 {
709         long add = 0, actual_regions_needed = 0;
710
711         spin_lock(&resv->lock);
712 retry:
713
714         /* Count how many regions are actually needed to execute this add. */
715         add_reservation_in_range(resv, f, t, NULL, NULL,
716                                  &actual_regions_needed);
717
718         /*
719          * Check for sufficient descriptors in the cache to accommodate
720          * this add operation. Note that actual_regions_needed may be greater
721          * than in_regions_needed, as the resv_map may have been modified since
722          * the region_chg call. In this case, we need to make sure that we
723          * allocate extra entries, such that we have enough for all the
724          * existing adds_in_progress, plus the excess needed for this
725          * operation.
726          */
727         if (actual_regions_needed > in_regions_needed &&
728             resv->region_cache_count <
729                     resv->adds_in_progress +
730                             (actual_regions_needed - in_regions_needed)) {
731                 /* region_add operation of range 1 should never need to
732                  * allocate file_region entries.
733                  */
734                 VM_BUG_ON(t - f <= 1);
735
736                 if (allocate_file_region_entries(
737                             resv, actual_regions_needed - in_regions_needed)) {
738                         return -ENOMEM;
739                 }
740
741                 goto retry;
742         }
743
744         add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
745
746         resv->adds_in_progress -= in_regions_needed;
747
748         spin_unlock(&resv->lock);
749         return add;
750 }
751
752 /*
753  * Examine the existing reserve map and determine how many
754  * huge pages in the specified range [f, t) are NOT currently
755  * represented.  This routine is called before a subsequent
756  * call to region_add that will actually modify the reserve
757  * map to add the specified range [f, t).  region_chg does
758  * not change the number of huge pages represented by the
759  * map.  A number of new file_region structures is added to the cache as a
760  * placeholder, for the subsequent region_add call to use. At least 1
761  * file_region structure is added.
762  *
763  * out_regions_needed is the number of regions added to the
764  * resv->adds_in_progress.  This value needs to be provided to a follow up call
765  * to region_add or region_abort for proper accounting.
766  *
767  * Returns the number of huge pages that need to be added to the existing
768  * reservation map for the range [f, t).  This number is greater or equal to
769  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
770  * is needed and can not be allocated.
771  */
772 static long region_chg(struct resv_map *resv, long f, long t,
773                        long *out_regions_needed)
774 {
775         long chg = 0;
776
777         spin_lock(&resv->lock);
778
779         /* Count how many hugepages in this range are NOT represented. */
780         chg = add_reservation_in_range(resv, f, t, NULL, NULL,
781                                        out_regions_needed);
782
783         if (*out_regions_needed == 0)
784                 *out_regions_needed = 1;
785
786         if (allocate_file_region_entries(resv, *out_regions_needed))
787                 return -ENOMEM;
788
789         resv->adds_in_progress += *out_regions_needed;
790
791         spin_unlock(&resv->lock);
792         return chg;
793 }
794
795 /*
796  * Abort the in progress add operation.  The adds_in_progress field
797  * of the resv_map keeps track of the operations in progress between
798  * calls to region_chg and region_add.  Operations are sometimes
799  * aborted after the call to region_chg.  In such cases, region_abort
800  * is called to decrement the adds_in_progress counter. regions_needed
801  * is the value returned by the region_chg call, it is used to decrement
802  * the adds_in_progress counter.
803  *
804  * NOTE: The range arguments [f, t) are not needed or used in this
805  * routine.  They are kept to make reading the calling code easier as
806  * arguments will match the associated region_chg call.
807  */
808 static void region_abort(struct resv_map *resv, long f, long t,
809                          long regions_needed)
810 {
811         spin_lock(&resv->lock);
812         VM_BUG_ON(!resv->region_cache_count);
813         resv->adds_in_progress -= regions_needed;
814         spin_unlock(&resv->lock);
815 }
816
817 /*
818  * Delete the specified range [f, t) from the reserve map.  If the
819  * t parameter is LONG_MAX, this indicates that ALL regions after f
820  * should be deleted.  Locate the regions which intersect [f, t)
821  * and either trim, delete or split the existing regions.
822  *
823  * Returns the number of huge pages deleted from the reserve map.
824  * In the normal case, the return value is zero or more.  In the
825  * case where a region must be split, a new region descriptor must
826  * be allocated.  If the allocation fails, -ENOMEM will be returned.
827  * NOTE: If the parameter t == LONG_MAX, then we will never split
828  * a region and possibly return -ENOMEM.  Callers specifying
829  * t == LONG_MAX do not need to check for -ENOMEM error.
830  */
831 static long region_del(struct resv_map *resv, long f, long t)
832 {
833         struct list_head *head = &resv->regions;
834         struct file_region *rg, *trg;
835         struct file_region *nrg = NULL;
836         long del = 0;
837
838 retry:
839         spin_lock(&resv->lock);
840         list_for_each_entry_safe(rg, trg, head, link) {
841                 /*
842                  * Skip regions before the range to be deleted.  file_region
843                  * ranges are normally of the form [from, to).  However, there
844                  * may be a "placeholder" entry in the map which is of the form
845                  * (from, to) with from == to.  Check for placeholder entries
846                  * at the beginning of the range to be deleted.
847                  */
848                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
849                         continue;
850
851                 if (rg->from >= t)
852                         break;
853
854                 if (f > rg->from && t < rg->to) { /* Must split region */
855                         /*
856                          * Check for an entry in the cache before dropping
857                          * lock and attempting allocation.
858                          */
859                         if (!nrg &&
860                             resv->region_cache_count > resv->adds_in_progress) {
861                                 nrg = list_first_entry(&resv->region_cache,
862                                                         struct file_region,
863                                                         link);
864                                 list_del(&nrg->link);
865                                 resv->region_cache_count--;
866                         }
867
868                         if (!nrg) {
869                                 spin_unlock(&resv->lock);
870                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
871                                 if (!nrg)
872                                         return -ENOMEM;
873                                 goto retry;
874                         }
875
876                         del += t - f;
877                         hugetlb_cgroup_uncharge_file_region(
878                                 resv, rg, t - f, false);
879
880                         /* New entry for end of split region */
881                         nrg->from = t;
882                         nrg->to = rg->to;
883
884                         copy_hugetlb_cgroup_uncharge_info(nrg, rg);
885
886                         INIT_LIST_HEAD(&nrg->link);
887
888                         /* Original entry is trimmed */
889                         rg->to = f;
890
891                         list_add(&nrg->link, &rg->link);
892                         nrg = NULL;
893                         break;
894                 }
895
896                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
897                         del += rg->to - rg->from;
898                         hugetlb_cgroup_uncharge_file_region(resv, rg,
899                                                             rg->to - rg->from, true);
900                         list_del(&rg->link);
901                         kfree(rg);
902                         continue;
903                 }
904
905                 if (f <= rg->from) {    /* Trim beginning of region */
906                         hugetlb_cgroup_uncharge_file_region(resv, rg,
907                                                             t - rg->from, false);
908
909                         del += t - rg->from;
910                         rg->from = t;
911                 } else {                /* Trim end of region */
912                         hugetlb_cgroup_uncharge_file_region(resv, rg,
913                                                             rg->to - f, false);
914
915                         del += rg->to - f;
916                         rg->to = f;
917                 }
918         }
919
920         spin_unlock(&resv->lock);
921         kfree(nrg);
922         return del;
923 }
924
925 /*
926  * A rare out of memory error was encountered which prevented removal of
927  * the reserve map region for a page.  The huge page itself was free'ed
928  * and removed from the page cache.  This routine will adjust the subpool
929  * usage count, and the global reserve count if needed.  By incrementing
930  * these counts, the reserve map entry which could not be deleted will
931  * appear as a "reserved" entry instead of simply dangling with incorrect
932  * counts.
933  */
934 void hugetlb_fix_reserve_counts(struct inode *inode)
935 {
936         struct hugepage_subpool *spool = subpool_inode(inode);
937         long rsv_adjust;
938         bool reserved = false;
939
940         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
941         if (rsv_adjust > 0) {
942                 struct hstate *h = hstate_inode(inode);
943
944                 if (!hugetlb_acct_memory(h, 1))
945                         reserved = true;
946         } else if (!rsv_adjust) {
947                 reserved = true;
948         }
949
950         if (!reserved)
951                 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
952 }
953
954 /*
955  * Count and return the number of huge pages in the reserve map
956  * that intersect with the range [f, t).
957  */
958 static long region_count(struct resv_map *resv, long f, long t)
959 {
960         struct list_head *head = &resv->regions;
961         struct file_region *rg;
962         long chg = 0;
963
964         spin_lock(&resv->lock);
965         /* Locate each segment we overlap with, and count that overlap. */
966         list_for_each_entry(rg, head, link) {
967                 long seg_from;
968                 long seg_to;
969
970                 if (rg->to <= f)
971                         continue;
972                 if (rg->from >= t)
973                         break;
974
975                 seg_from = max(rg->from, f);
976                 seg_to = min(rg->to, t);
977
978                 chg += seg_to - seg_from;
979         }
980         spin_unlock(&resv->lock);
981
982         return chg;
983 }
984
985 /*
986  * Convert the address within this vma to the page offset within
987  * the mapping, huge page units here.
988  */
989 static pgoff_t vma_hugecache_offset(struct hstate *h,
990                         struct vm_area_struct *vma, unsigned long address)
991 {
992         return ((address - vma->vm_start) >> huge_page_shift(h)) +
993                         (vma->vm_pgoff >> huge_page_order(h));
994 }
995
996 /**
997  * vma_kernel_pagesize - Page size granularity for this VMA.
998  * @vma: The user mapping.
999  *
1000  * Folios in this VMA will be aligned to, and at least the size of the
1001  * number of bytes returned by this function.
1002  *
1003  * Return: The default size of the folios allocated when backing a VMA.
1004  */
1005 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1006 {
1007         if (vma->vm_ops && vma->vm_ops->pagesize)
1008                 return vma->vm_ops->pagesize(vma);
1009         return PAGE_SIZE;
1010 }
1011 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1012
1013 /*
1014  * Return the page size being used by the MMU to back a VMA. In the majority
1015  * of cases, the page size used by the kernel matches the MMU size. On
1016  * architectures where it differs, an architecture-specific 'strong'
1017  * version of this symbol is required.
1018  */
1019 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1020 {
1021         return vma_kernel_pagesize(vma);
1022 }
1023
1024 /*
1025  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
1026  * bits of the reservation map pointer, which are always clear due to
1027  * alignment.
1028  */
1029 #define HPAGE_RESV_OWNER    (1UL << 0)
1030 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1031 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1032
1033 /*
1034  * These helpers are used to track how many pages are reserved for
1035  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1036  * is guaranteed to have their future faults succeed.
1037  *
1038  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1039  * the reserve counters are updated with the hugetlb_lock held. It is safe
1040  * to reset the VMA at fork() time as it is not in use yet and there is no
1041  * chance of the global counters getting corrupted as a result of the values.
1042  *
1043  * The private mapping reservation is represented in a subtly different
1044  * manner to a shared mapping.  A shared mapping has a region map associated
1045  * with the underlying file, this region map represents the backing file
1046  * pages which have ever had a reservation assigned which this persists even
1047  * after the page is instantiated.  A private mapping has a region map
1048  * associated with the original mmap which is attached to all VMAs which
1049  * reference it, this region map represents those offsets which have consumed
1050  * reservation ie. where pages have been instantiated.
1051  */
1052 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1053 {
1054         return (unsigned long)vma->vm_private_data;
1055 }
1056
1057 static void set_vma_private_data(struct vm_area_struct *vma,
1058                                                         unsigned long value)
1059 {
1060         vma->vm_private_data = (void *)value;
1061 }
1062
1063 static void
1064 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1065                                           struct hugetlb_cgroup *h_cg,
1066                                           struct hstate *h)
1067 {
1068 #ifdef CONFIG_CGROUP_HUGETLB
1069         if (!h_cg || !h) {
1070                 resv_map->reservation_counter = NULL;
1071                 resv_map->pages_per_hpage = 0;
1072                 resv_map->css = NULL;
1073         } else {
1074                 resv_map->reservation_counter =
1075                         &h_cg->rsvd_hugepage[hstate_index(h)];
1076                 resv_map->pages_per_hpage = pages_per_huge_page(h);
1077                 resv_map->css = &h_cg->css;
1078         }
1079 #endif
1080 }
1081
1082 struct resv_map *resv_map_alloc(void)
1083 {
1084         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
1085         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
1086
1087         if (!resv_map || !rg) {
1088                 kfree(resv_map);
1089                 kfree(rg);
1090                 return NULL;
1091         }
1092
1093         kref_init(&resv_map->refs);
1094         spin_lock_init(&resv_map->lock);
1095         INIT_LIST_HEAD(&resv_map->regions);
1096         init_rwsem(&resv_map->rw_sema);
1097
1098         resv_map->adds_in_progress = 0;
1099         /*
1100          * Initialize these to 0. On shared mappings, 0's here indicate these
1101          * fields don't do cgroup accounting. On private mappings, these will be
1102          * re-initialized to the proper values, to indicate that hugetlb cgroup
1103          * reservations are to be un-charged from here.
1104          */
1105         resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1106
1107         INIT_LIST_HEAD(&resv_map->region_cache);
1108         list_add(&rg->link, &resv_map->region_cache);
1109         resv_map->region_cache_count = 1;
1110
1111         return resv_map;
1112 }
1113
1114 void resv_map_release(struct kref *ref)
1115 {
1116         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1117         struct list_head *head = &resv_map->region_cache;
1118         struct file_region *rg, *trg;
1119
1120         /* Clear out any active regions before we release the map. */
1121         region_del(resv_map, 0, LONG_MAX);
1122
1123         /* ... and any entries left in the cache */
1124         list_for_each_entry_safe(rg, trg, head, link) {
1125                 list_del(&rg->link);
1126                 kfree(rg);
1127         }
1128
1129         VM_BUG_ON(resv_map->adds_in_progress);
1130
1131         kfree(resv_map);
1132 }
1133
1134 static inline struct resv_map *inode_resv_map(struct inode *inode)
1135 {
1136         /*
1137          * At inode evict time, i_mapping may not point to the original
1138          * address space within the inode.  This original address space
1139          * contains the pointer to the resv_map.  So, always use the
1140          * address space embedded within the inode.
1141          * The VERY common case is inode->mapping == &inode->i_data but,
1142          * this may not be true for device special inodes.
1143          */
1144         return (struct resv_map *)(&inode->i_data)->i_private_data;
1145 }
1146
1147 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1148 {
1149         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1150         if (vma->vm_flags & VM_MAYSHARE) {
1151                 struct address_space *mapping = vma->vm_file->f_mapping;
1152                 struct inode *inode = mapping->host;
1153
1154                 return inode_resv_map(inode);
1155
1156         } else {
1157                 return (struct resv_map *)(get_vma_private_data(vma) &
1158                                                         ~HPAGE_RESV_MASK);
1159         }
1160 }
1161
1162 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1163 {
1164         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1165         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1166
1167         set_vma_private_data(vma, (unsigned long)map);
1168 }
1169
1170 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1171 {
1172         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1173         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1174
1175         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1176 }
1177
1178 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1179 {
1180         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1181
1182         return (get_vma_private_data(vma) & flag) != 0;
1183 }
1184
1185 bool __vma_private_lock(struct vm_area_struct *vma)
1186 {
1187         return !(vma->vm_flags & VM_MAYSHARE) &&
1188                 get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1189                 is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1190 }
1191
1192 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1193 {
1194         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1195         /*
1196          * Clear vm_private_data
1197          * - For shared mappings this is a per-vma semaphore that may be
1198          *   allocated in a subsequent call to hugetlb_vm_op_open.
1199          *   Before clearing, make sure pointer is not associated with vma
1200          *   as this will leak the structure.  This is the case when called
1201          *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1202          *   been called to allocate a new structure.
1203          * - For MAP_PRIVATE mappings, this is the reserve map which does
1204          *   not apply to children.  Faults generated by the children are
1205          *   not guaranteed to succeed, even if read-only.
1206          */
1207         if (vma->vm_flags & VM_MAYSHARE) {
1208                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1209
1210                 if (vma_lock && vma_lock->vma != vma)
1211                         vma->vm_private_data = NULL;
1212         } else
1213                 vma->vm_private_data = NULL;
1214 }
1215
1216 /*
1217  * Reset and decrement one ref on hugepage private reservation.
1218  * Called with mm->mmap_lock writer semaphore held.
1219  * This function should be only used by move_vma() and operate on
1220  * same sized vma. It should never come here with last ref on the
1221  * reservation.
1222  */
1223 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1224 {
1225         /*
1226          * Clear the old hugetlb private page reservation.
1227          * It has already been transferred to new_vma.
1228          *
1229          * During a mremap() operation of a hugetlb vma we call move_vma()
1230          * which copies vma into new_vma and unmaps vma. After the copy
1231          * operation both new_vma and vma share a reference to the resv_map
1232          * struct, and at that point vma is about to be unmapped. We don't
1233          * want to return the reservation to the pool at unmap of vma because
1234          * the reservation still lives on in new_vma, so simply decrement the
1235          * ref here and remove the resv_map reference from this vma.
1236          */
1237         struct resv_map *reservations = vma_resv_map(vma);
1238
1239         if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1240                 resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1241                 kref_put(&reservations->refs, resv_map_release);
1242         }
1243
1244         hugetlb_dup_vma_private(vma);
1245 }
1246
1247 /* Returns true if the VMA has associated reserve pages */
1248 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
1249 {
1250         if (vma->vm_flags & VM_NORESERVE) {
1251                 /*
1252                  * This address is already reserved by other process(chg == 0),
1253                  * so, we should decrement reserved count. Without decrementing,
1254                  * reserve count remains after releasing inode, because this
1255                  * allocated page will go into page cache and is regarded as
1256                  * coming from reserved pool in releasing step.  Currently, we
1257                  * don't have any other solution to deal with this situation
1258                  * properly, so add work-around here.
1259                  */
1260                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
1261                         return true;
1262                 else
1263                         return false;
1264         }
1265
1266         /* Shared mappings always use reserves */
1267         if (vma->vm_flags & VM_MAYSHARE) {
1268                 /*
1269                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
1270                  * be a region map for all pages.  The only situation where
1271                  * there is no region map is if a hole was punched via
1272                  * fallocate.  In this case, there really are no reserves to
1273                  * use.  This situation is indicated if chg != 0.
1274                  */
1275                 if (chg)
1276                         return false;
1277                 else
1278                         return true;
1279         }
1280
1281         /*
1282          * Only the process that called mmap() has reserves for
1283          * private mappings.
1284          */
1285         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1286                 /*
1287                  * Like the shared case above, a hole punch or truncate
1288                  * could have been performed on the private mapping.
1289                  * Examine the value of chg to determine if reserves
1290                  * actually exist or were previously consumed.
1291                  * Very Subtle - The value of chg comes from a previous
1292                  * call to vma_needs_reserves().  The reserve map for
1293                  * private mappings has different (opposite) semantics
1294                  * than that of shared mappings.  vma_needs_reserves()
1295                  * has already taken this difference in semantics into
1296                  * account.  Therefore, the meaning of chg is the same
1297                  * as in the shared case above.  Code could easily be
1298                  * combined, but keeping it separate draws attention to
1299                  * subtle differences.
1300                  */
1301                 if (chg)
1302                         return false;
1303                 else
1304                         return true;
1305         }
1306
1307         return false;
1308 }
1309
1310 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1311 {
1312         int nid = folio_nid(folio);
1313
1314         lockdep_assert_held(&hugetlb_lock);
1315         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1316
1317         list_move(&folio->lru, &h->hugepage_freelists[nid]);
1318         h->free_huge_pages++;
1319         h->free_huge_pages_node[nid]++;
1320         folio_set_hugetlb_freed(folio);
1321 }
1322
1323 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1324                                                                 int nid)
1325 {
1326         struct folio *folio;
1327         bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1328
1329         lockdep_assert_held(&hugetlb_lock);
1330         list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1331                 if (pin && !folio_is_longterm_pinnable(folio))
1332                         continue;
1333
1334                 if (folio_test_hwpoison(folio))
1335                         continue;
1336
1337                 list_move(&folio->lru, &h->hugepage_activelist);
1338                 folio_ref_unfreeze(folio, 1);
1339                 folio_clear_hugetlb_freed(folio);
1340                 h->free_huge_pages--;
1341                 h->free_huge_pages_node[nid]--;
1342                 return folio;
1343         }
1344
1345         return NULL;
1346 }
1347
1348 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1349                                                         int nid, nodemask_t *nmask)
1350 {
1351         unsigned int cpuset_mems_cookie;
1352         struct zonelist *zonelist;
1353         struct zone *zone;
1354         struct zoneref *z;
1355         int node = NUMA_NO_NODE;
1356
1357         zonelist = node_zonelist(nid, gfp_mask);
1358
1359 retry_cpuset:
1360         cpuset_mems_cookie = read_mems_allowed_begin();
1361         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1362                 struct folio *folio;
1363
1364                 if (!cpuset_zone_allowed(zone, gfp_mask))
1365                         continue;
1366                 /*
1367                  * no need to ask again on the same node. Pool is node rather than
1368                  * zone aware
1369                  */
1370                 if (zone_to_nid(zone) == node)
1371                         continue;
1372                 node = zone_to_nid(zone);
1373
1374                 folio = dequeue_hugetlb_folio_node_exact(h, node);
1375                 if (folio)
1376                         return folio;
1377         }
1378         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1379                 goto retry_cpuset;
1380
1381         return NULL;
1382 }
1383
1384 static unsigned long available_huge_pages(struct hstate *h)
1385 {
1386         return h->free_huge_pages - h->resv_huge_pages;
1387 }
1388
1389 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1390                                 struct vm_area_struct *vma,
1391                                 unsigned long address, int avoid_reserve,
1392                                 long chg)
1393 {
1394         struct folio *folio = NULL;
1395         struct mempolicy *mpol;
1396         gfp_t gfp_mask;
1397         nodemask_t *nodemask;
1398         int nid;
1399
1400         /*
1401          * A child process with MAP_PRIVATE mappings created by their parent
1402          * have no page reserves. This check ensures that reservations are
1403          * not "stolen". The child may still get SIGKILLed
1404          */
1405         if (!vma_has_reserves(vma, chg) && !available_huge_pages(h))
1406                 goto err;
1407
1408         /* If reserves cannot be used, ensure enough pages are in the pool */
1409         if (avoid_reserve && !available_huge_pages(h))
1410                 goto err;
1411
1412         gfp_mask = htlb_alloc_mask(h);
1413         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1414
1415         if (mpol_is_preferred_many(mpol)) {
1416                 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1417                                                         nid, nodemask);
1418
1419                 /* Fallback to all nodes if page==NULL */
1420                 nodemask = NULL;
1421         }
1422
1423         if (!folio)
1424                 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1425                                                         nid, nodemask);
1426
1427         if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) {
1428                 folio_set_hugetlb_restore_reserve(folio);
1429                 h->resv_huge_pages--;
1430         }
1431
1432         mpol_cond_put(mpol);
1433         return folio;
1434
1435 err:
1436         return NULL;
1437 }
1438
1439 /*
1440  * common helper functions for hstate_next_node_to_{alloc|free}.
1441  * We may have allocated or freed a huge page based on a different
1442  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
1443  * be outside of *nodes_allowed.  Ensure that we use an allowed
1444  * node for alloc or free.
1445  */
1446 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
1447 {
1448         nid = next_node_in(nid, *nodes_allowed);
1449         VM_BUG_ON(nid >= MAX_NUMNODES);
1450
1451         return nid;
1452 }
1453
1454 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1455 {
1456         if (!node_isset(nid, *nodes_allowed))
1457                 nid = next_node_allowed(nid, nodes_allowed);
1458         return nid;
1459 }
1460
1461 /*
1462  * returns the previously saved node ["this node"] from which to
1463  * allocate a persistent huge page for the pool and advance the
1464  * next node from which to allocate, handling wrap at end of node
1465  * mask.
1466  */
1467 static int hstate_next_node_to_alloc(struct hstate *h,
1468                                         nodemask_t *nodes_allowed)
1469 {
1470         int nid;
1471
1472         VM_BUG_ON(!nodes_allowed);
1473
1474         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1475         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1476
1477         return nid;
1478 }
1479
1480 /*
1481  * helper for remove_pool_hugetlb_folio() - return the previously saved
1482  * node ["this node"] from which to free a huge page.  Advance the
1483  * next node id whether or not we find a free huge page to free so
1484  * that the next attempt to free addresses the next node.
1485  */
1486 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1487 {
1488         int nid;
1489
1490         VM_BUG_ON(!nodes_allowed);
1491
1492         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1493         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1494
1495         return nid;
1496 }
1497
1498 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1499         for (nr_nodes = nodes_weight(*mask);                            \
1500                 nr_nodes > 0 &&                                         \
1501                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1502                 nr_nodes--)
1503
1504 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1505         for (nr_nodes = nodes_weight(*mask);                            \
1506                 nr_nodes > 0 &&                                         \
1507                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1508                 nr_nodes--)
1509
1510 /* used to demote non-gigantic_huge pages as well */
1511 static void __destroy_compound_gigantic_folio(struct folio *folio,
1512                                         unsigned int order, bool demote)
1513 {
1514         int i;
1515         int nr_pages = 1 << order;
1516         struct page *p;
1517
1518         atomic_set(&folio->_entire_mapcount, 0);
1519         atomic_set(&folio->_nr_pages_mapped, 0);
1520         atomic_set(&folio->_pincount, 0);
1521
1522         for (i = 1; i < nr_pages; i++) {
1523                 p = folio_page(folio, i);
1524                 p->flags &= ~PAGE_FLAGS_CHECK_AT_FREE;
1525                 p->mapping = NULL;
1526                 clear_compound_head(p);
1527                 if (!demote)
1528                         set_page_refcounted(p);
1529         }
1530
1531         __folio_clear_head(folio);
1532 }
1533
1534 static void destroy_compound_hugetlb_folio_for_demote(struct folio *folio,
1535                                         unsigned int order)
1536 {
1537         __destroy_compound_gigantic_folio(folio, order, true);
1538 }
1539
1540 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1541 static void destroy_compound_gigantic_folio(struct folio *folio,
1542                                         unsigned int order)
1543 {
1544         __destroy_compound_gigantic_folio(folio, order, false);
1545 }
1546
1547 static void free_gigantic_folio(struct folio *folio, unsigned int order)
1548 {
1549         /*
1550          * If the page isn't allocated using the cma allocator,
1551          * cma_release() returns false.
1552          */
1553 #ifdef CONFIG_CMA
1554         int nid = folio_nid(folio);
1555
1556         if (cma_release(hugetlb_cma[nid], &folio->page, 1 << order))
1557                 return;
1558 #endif
1559
1560         free_contig_range(folio_pfn(folio), 1 << order);
1561 }
1562
1563 #ifdef CONFIG_CONTIG_ALLOC
1564 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1565                 int nid, nodemask_t *nodemask)
1566 {
1567         struct page *page;
1568         unsigned long nr_pages = pages_per_huge_page(h);
1569         if (nid == NUMA_NO_NODE)
1570                 nid = numa_mem_id();
1571
1572 #ifdef CONFIG_CMA
1573         {
1574                 int node;
1575
1576                 if (hugetlb_cma[nid]) {
1577                         page = cma_alloc(hugetlb_cma[nid], nr_pages,
1578                                         huge_page_order(h), true);
1579                         if (page)
1580                                 return page_folio(page);
1581                 }
1582
1583                 if (!(gfp_mask & __GFP_THISNODE)) {
1584                         for_each_node_mask(node, *nodemask) {
1585                                 if (node == nid || !hugetlb_cma[node])
1586                                         continue;
1587
1588                                 page = cma_alloc(hugetlb_cma[node], nr_pages,
1589                                                 huge_page_order(h), true);
1590                                 if (page)
1591                                         return page_folio(page);
1592                         }
1593                 }
1594         }
1595 #endif
1596
1597         page = alloc_contig_pages(nr_pages, gfp_mask, nid, nodemask);
1598         return page ? page_folio(page) : NULL;
1599 }
1600
1601 #else /* !CONFIG_CONTIG_ALLOC */
1602 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1603                                         int nid, nodemask_t *nodemask)
1604 {
1605         return NULL;
1606 }
1607 #endif /* CONFIG_CONTIG_ALLOC */
1608
1609 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1610 static struct folio *alloc_gigantic_folio(struct hstate *h, gfp_t gfp_mask,
1611                                         int nid, nodemask_t *nodemask)
1612 {
1613         return NULL;
1614 }
1615 static inline void free_gigantic_folio(struct folio *folio,
1616                                                 unsigned int order) { }
1617 static inline void destroy_compound_gigantic_folio(struct folio *folio,
1618                                                 unsigned int order) { }
1619 #endif
1620
1621 static inline void __clear_hugetlb_destructor(struct hstate *h,
1622                                                 struct folio *folio)
1623 {
1624         lockdep_assert_held(&hugetlb_lock);
1625
1626         folio_clear_hugetlb(folio);
1627 }
1628
1629 /*
1630  * Remove hugetlb folio from lists.
1631  * If vmemmap exists for the folio, update dtor so that the folio appears
1632  * as just a compound page.  Otherwise, wait until after allocating vmemmap
1633  * to update dtor.
1634  *
1635  * A reference is held on the folio, except in the case of demote.
1636  *
1637  * Must be called with hugetlb lock held.
1638  */
1639 static void __remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1640                                                         bool adjust_surplus,
1641                                                         bool demote)
1642 {
1643         int nid = folio_nid(folio);
1644
1645         VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1646         VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1647
1648         lockdep_assert_held(&hugetlb_lock);
1649         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1650                 return;
1651
1652         list_del(&folio->lru);
1653
1654         if (folio_test_hugetlb_freed(folio)) {
1655                 h->free_huge_pages--;
1656                 h->free_huge_pages_node[nid]--;
1657         }
1658         if (adjust_surplus) {
1659                 h->surplus_huge_pages--;
1660                 h->surplus_huge_pages_node[nid]--;
1661         }
1662
1663         /*
1664          * We can only clear the hugetlb destructor after allocating vmemmap
1665          * pages.  Otherwise, someone (memory error handling) may try to write
1666          * to tail struct pages.
1667          */
1668         if (!folio_test_hugetlb_vmemmap_optimized(folio))
1669                 __clear_hugetlb_destructor(h, folio);
1670
1671          /*
1672           * In the case of demote we do not ref count the page as it will soon
1673           * be turned into a page of smaller size.
1674          */
1675         if (!demote)
1676                 folio_ref_unfreeze(folio, 1);
1677
1678         h->nr_huge_pages--;
1679         h->nr_huge_pages_node[nid]--;
1680 }
1681
1682 static void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1683                                                         bool adjust_surplus)
1684 {
1685         __remove_hugetlb_folio(h, folio, adjust_surplus, false);
1686 }
1687
1688 static void remove_hugetlb_folio_for_demote(struct hstate *h, struct folio *folio,
1689                                                         bool adjust_surplus)
1690 {
1691         __remove_hugetlb_folio(h, folio, adjust_surplus, true);
1692 }
1693
1694 static void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1695                              bool adjust_surplus)
1696 {
1697         int zeroed;
1698         int nid = folio_nid(folio);
1699
1700         VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1701
1702         lockdep_assert_held(&hugetlb_lock);
1703
1704         INIT_LIST_HEAD(&folio->lru);
1705         h->nr_huge_pages++;
1706         h->nr_huge_pages_node[nid]++;
1707
1708         if (adjust_surplus) {
1709                 h->surplus_huge_pages++;
1710                 h->surplus_huge_pages_node[nid]++;
1711         }
1712
1713         folio_set_hugetlb(folio);
1714         folio_change_private(folio, NULL);
1715         /*
1716          * We have to set hugetlb_vmemmap_optimized again as above
1717          * folio_change_private(folio, NULL) cleared it.
1718          */
1719         folio_set_hugetlb_vmemmap_optimized(folio);
1720
1721         /*
1722          * This folio is about to be managed by the hugetlb allocator and
1723          * should have no users.  Drop our reference, and check for others
1724          * just in case.
1725          */
1726         zeroed = folio_put_testzero(folio);
1727         if (unlikely(!zeroed))
1728                 /*
1729                  * It is VERY unlikely soneone else has taken a ref
1730                  * on the folio.  In this case, we simply return as
1731                  * free_huge_folio() will be called when this other ref
1732                  * is dropped.
1733                  */
1734                 return;
1735
1736         arch_clear_hugepage_flags(&folio->page);
1737         enqueue_hugetlb_folio(h, folio);
1738 }
1739
1740 static void __update_and_free_hugetlb_folio(struct hstate *h,
1741                                                 struct folio *folio)
1742 {
1743         bool clear_dtor = folio_test_hugetlb_vmemmap_optimized(folio);
1744
1745         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
1746                 return;
1747
1748         /*
1749          * If we don't know which subpages are hwpoisoned, we can't free
1750          * the hugepage, so it's leaked intentionally.
1751          */
1752         if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1753                 return;
1754
1755         /*
1756          * If folio is not vmemmap optimized (!clear_dtor), then the folio
1757          * is no longer identified as a hugetlb page.  hugetlb_vmemmap_restore_folio
1758          * can only be passed hugetlb pages and will BUG otherwise.
1759          */
1760         if (clear_dtor && hugetlb_vmemmap_restore_folio(h, folio)) {
1761                 spin_lock_irq(&hugetlb_lock);
1762                 /*
1763                  * If we cannot allocate vmemmap pages, just refuse to free the
1764                  * page and put the page back on the hugetlb free list and treat
1765                  * as a surplus page.
1766                  */
1767                 add_hugetlb_folio(h, folio, true);
1768                 spin_unlock_irq(&hugetlb_lock);
1769                 return;
1770         }
1771
1772         /*
1773          * Move PageHWPoison flag from head page to the raw error pages,
1774          * which makes any healthy subpages reusable.
1775          */
1776         if (unlikely(folio_test_hwpoison(folio)))
1777                 folio_clear_hugetlb_hwpoison(folio);
1778
1779         /*
1780          * If vmemmap pages were allocated above, then we need to clear the
1781          * hugetlb destructor under the hugetlb lock.
1782          */
1783         if (clear_dtor) {
1784                 spin_lock_irq(&hugetlb_lock);
1785                 __clear_hugetlb_destructor(h, folio);
1786                 spin_unlock_irq(&hugetlb_lock);
1787         }
1788
1789         /*
1790          * Non-gigantic pages demoted from CMA allocated gigantic pages
1791          * need to be given back to CMA in free_gigantic_folio.
1792          */
1793         if (hstate_is_gigantic(h) ||
1794             hugetlb_cma_folio(folio, huge_page_order(h))) {
1795                 destroy_compound_gigantic_folio(folio, huge_page_order(h));
1796                 free_gigantic_folio(folio, huge_page_order(h));
1797         } else {
1798                 __free_pages(&folio->page, huge_page_order(h));
1799         }
1800 }
1801
1802 /*
1803  * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1804  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1805  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1806  * the vmemmap pages.
1807  *
1808  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1809  * freed and frees them one-by-one. As the page->mapping pointer is going
1810  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1811  * structure of a lockless linked list of huge pages to be freed.
1812  */
1813 static LLIST_HEAD(hpage_freelist);
1814
1815 static void free_hpage_workfn(struct work_struct *work)
1816 {
1817         struct llist_node *node;
1818
1819         node = llist_del_all(&hpage_freelist);
1820
1821         while (node) {
1822                 struct folio *folio;
1823                 struct hstate *h;
1824
1825                 folio = container_of((struct address_space **)node,
1826                                      struct folio, mapping);
1827                 node = node->next;
1828                 folio->mapping = NULL;
1829                 /*
1830                  * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1831                  * folio_hstate() is going to trigger because a previous call to
1832                  * remove_hugetlb_folio() will clear the hugetlb bit, so do
1833                  * not use folio_hstate() directly.
1834                  */
1835                 h = size_to_hstate(folio_size(folio));
1836
1837                 __update_and_free_hugetlb_folio(h, folio);
1838
1839                 cond_resched();
1840         }
1841 }
1842 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1843
1844 static inline void flush_free_hpage_work(struct hstate *h)
1845 {
1846         if (hugetlb_vmemmap_optimizable(h))
1847                 flush_work(&free_hpage_work);
1848 }
1849
1850 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1851                                  bool atomic)
1852 {
1853         if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1854                 __update_and_free_hugetlb_folio(h, folio);
1855                 return;
1856         }
1857
1858         /*
1859          * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1860          *
1861          * Only call schedule_work() if hpage_freelist is previously
1862          * empty. Otherwise, schedule_work() had been called but the workfn
1863          * hasn't retrieved the list yet.
1864          */
1865         if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1866                 schedule_work(&free_hpage_work);
1867 }
1868
1869 static void bulk_vmemmap_restore_error(struct hstate *h,
1870                                         struct list_head *folio_list,
1871                                         struct list_head *non_hvo_folios)
1872 {
1873         struct folio *folio, *t_folio;
1874
1875         if (!list_empty(non_hvo_folios)) {
1876                 /*
1877                  * Free any restored hugetlb pages so that restore of the
1878                  * entire list can be retried.
1879                  * The idea is that in the common case of ENOMEM errors freeing
1880                  * hugetlb pages with vmemmap we will free up memory so that we
1881                  * can allocate vmemmap for more hugetlb pages.
1882                  */
1883                 list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1884                         list_del(&folio->lru);
1885                         spin_lock_irq(&hugetlb_lock);
1886                         __clear_hugetlb_destructor(h, folio);
1887                         spin_unlock_irq(&hugetlb_lock);
1888                         update_and_free_hugetlb_folio(h, folio, false);
1889                         cond_resched();
1890                 }
1891         } else {
1892                 /*
1893                  * In the case where there are no folios which can be
1894                  * immediately freed, we loop through the list trying to restore
1895                  * vmemmap individually in the hope that someone elsewhere may
1896                  * have done something to cause success (such as freeing some
1897                  * memory).  If unable to restore a hugetlb page, the hugetlb
1898                  * page is made a surplus page and removed from the list.
1899                  * If are able to restore vmemmap and free one hugetlb page, we
1900                  * quit processing the list to retry the bulk operation.
1901                  */
1902                 list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1903                         if (hugetlb_vmemmap_restore_folio(h, folio)) {
1904                                 list_del(&folio->lru);
1905                                 spin_lock_irq(&hugetlb_lock);
1906                                 add_hugetlb_folio(h, folio, true);
1907                                 spin_unlock_irq(&hugetlb_lock);
1908                         } else {
1909                                 list_del(&folio->lru);
1910                                 spin_lock_irq(&hugetlb_lock);
1911                                 __clear_hugetlb_destructor(h, folio);
1912                                 spin_unlock_irq(&hugetlb_lock);
1913                                 update_and_free_hugetlb_folio(h, folio, false);
1914                                 cond_resched();
1915                                 break;
1916                         }
1917         }
1918 }
1919
1920 static void update_and_free_pages_bulk(struct hstate *h,
1921                                                 struct list_head *folio_list)
1922 {
1923         long ret;
1924         struct folio *folio, *t_folio;
1925         LIST_HEAD(non_hvo_folios);
1926
1927         /*
1928          * First allocate required vmemmmap (if necessary) for all folios.
1929          * Carefully handle errors and free up any available hugetlb pages
1930          * in an effort to make forward progress.
1931          */
1932 retry:
1933         ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1934         if (ret < 0) {
1935                 bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1936                 goto retry;
1937         }
1938
1939         /*
1940          * At this point, list should be empty, ret should be >= 0 and there
1941          * should only be pages on the non_hvo_folios list.
1942          * Do note that the non_hvo_folios list could be empty.
1943          * Without HVO enabled, ret will be 0 and there is no need to call
1944          * __clear_hugetlb_destructor as this was done previously.
1945          */
1946         VM_WARN_ON(!list_empty(folio_list));
1947         VM_WARN_ON(ret < 0);
1948         if (!list_empty(&non_hvo_folios) && ret) {
1949                 spin_lock_irq(&hugetlb_lock);
1950                 list_for_each_entry(folio, &non_hvo_folios, lru)
1951                         __clear_hugetlb_destructor(h, folio);
1952                 spin_unlock_irq(&hugetlb_lock);
1953         }
1954
1955         list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1956                 update_and_free_hugetlb_folio(h, folio, false);
1957                 cond_resched();
1958         }
1959 }
1960
1961 struct hstate *size_to_hstate(unsigned long size)
1962 {
1963         struct hstate *h;
1964
1965         for_each_hstate(h) {
1966                 if (huge_page_size(h) == size)
1967                         return h;
1968         }
1969         return NULL;
1970 }
1971
1972 void free_huge_folio(struct folio *folio)
1973 {
1974         /*
1975          * Can't pass hstate in here because it is called from the
1976          * compound page destructor.
1977          */
1978         struct hstate *h = folio_hstate(folio);
1979         int nid = folio_nid(folio);
1980         struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1981         bool restore_reserve;
1982         unsigned long flags;
1983
1984         VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1985         VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1986
1987         hugetlb_set_folio_subpool(folio, NULL);
1988         if (folio_test_anon(folio))
1989                 __ClearPageAnonExclusive(&folio->page);
1990         folio->mapping = NULL;
1991         restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1992         folio_clear_hugetlb_restore_reserve(folio);
1993
1994         /*
1995          * If HPageRestoreReserve was set on page, page allocation consumed a
1996          * reservation.  If the page was associated with a subpool, there
1997          * would have been a page reserved in the subpool before allocation
1998          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1999          * reservation, do not call hugepage_subpool_put_pages() as this will
2000          * remove the reserved page from the subpool.
2001          */
2002         if (!restore_reserve) {
2003                 /*
2004                  * A return code of zero implies that the subpool will be
2005                  * under its minimum size if the reservation is not restored
2006                  * after page is free.  Therefore, force restore_reserve
2007                  * operation.
2008                  */
2009                 if (hugepage_subpool_put_pages(spool, 1) == 0)
2010                         restore_reserve = true;
2011         }
2012
2013         spin_lock_irqsave(&hugetlb_lock, flags);
2014         folio_clear_hugetlb_migratable(folio);
2015         hugetlb_cgroup_uncharge_folio(hstate_index(h),
2016                                      pages_per_huge_page(h), folio);
2017         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
2018                                           pages_per_huge_page(h), folio);
2019         mem_cgroup_uncharge(folio);
2020         if (restore_reserve)
2021                 h->resv_huge_pages++;
2022
2023         if (folio_test_hugetlb_temporary(folio)) {
2024                 remove_hugetlb_folio(h, folio, false);
2025                 spin_unlock_irqrestore(&hugetlb_lock, flags);
2026                 update_and_free_hugetlb_folio(h, folio, true);
2027         } else if (h->surplus_huge_pages_node[nid]) {
2028                 /* remove the page from active list */
2029                 remove_hugetlb_folio(h, folio, true);
2030                 spin_unlock_irqrestore(&hugetlb_lock, flags);
2031                 update_and_free_hugetlb_folio(h, folio, true);
2032         } else {
2033                 arch_clear_hugepage_flags(&folio->page);
2034                 enqueue_hugetlb_folio(h, folio);
2035                 spin_unlock_irqrestore(&hugetlb_lock, flags);
2036         }
2037 }
2038
2039 /*
2040  * Must be called with the hugetlb lock held
2041  */
2042 static void __prep_account_new_huge_page(struct hstate *h, int nid)
2043 {
2044         lockdep_assert_held(&hugetlb_lock);
2045         h->nr_huge_pages++;
2046         h->nr_huge_pages_node[nid]++;
2047 }
2048
2049 static void init_new_hugetlb_folio(struct hstate *h, struct folio *folio)
2050 {
2051         folio_set_hugetlb(folio);
2052         INIT_LIST_HEAD(&folio->lru);
2053         hugetlb_set_folio_subpool(folio, NULL);
2054         set_hugetlb_cgroup(folio, NULL);
2055         set_hugetlb_cgroup_rsvd(folio, NULL);
2056 }
2057
2058 static void __prep_new_hugetlb_folio(struct hstate *h, struct folio *folio)
2059 {
2060         init_new_hugetlb_folio(h, folio);
2061         hugetlb_vmemmap_optimize_folio(h, folio);
2062 }
2063
2064 static void prep_new_hugetlb_folio(struct hstate *h, struct folio *folio, int nid)
2065 {
2066         __prep_new_hugetlb_folio(h, folio);
2067         spin_lock_irq(&hugetlb_lock);
2068         __prep_account_new_huge_page(h, nid);
2069         spin_unlock_irq(&hugetlb_lock);
2070 }
2071
2072 static bool __prep_compound_gigantic_folio(struct folio *folio,
2073                                         unsigned int order, bool demote)
2074 {
2075         int i, j;
2076         int nr_pages = 1 << order;
2077         struct page *p;
2078
2079         __folio_clear_reserved(folio);
2080         for (i = 0; i < nr_pages; i++) {
2081                 p = folio_page(folio, i);
2082
2083                 /*
2084                  * For gigantic hugepages allocated through bootmem at
2085                  * boot, it's safer to be consistent with the not-gigantic
2086                  * hugepages and clear the PG_reserved bit from all tail pages
2087                  * too.  Otherwise drivers using get_user_pages() to access tail
2088                  * pages may get the reference counting wrong if they see
2089                  * PG_reserved set on a tail page (despite the head page not
2090                  * having PG_reserved set).  Enforcing this consistency between
2091                  * head and tail pages allows drivers to optimize away a check
2092                  * on the head page when they need know if put_page() is needed
2093                  * after get_user_pages().
2094                  */
2095                 if (i != 0)     /* head page cleared above */
2096                         __ClearPageReserved(p);
2097                 /*
2098                  * Subtle and very unlikely
2099                  *
2100                  * Gigantic 'page allocators' such as memblock or cma will
2101                  * return a set of pages with each page ref counted.  We need
2102                  * to turn this set of pages into a compound page with tail
2103                  * page ref counts set to zero.  Code such as speculative page
2104                  * cache adding could take a ref on a 'to be' tail page.
2105                  * We need to respect any increased ref count, and only set
2106                  * the ref count to zero if count is currently 1.  If count
2107                  * is not 1, we return an error.  An error return indicates
2108                  * the set of pages can not be converted to a gigantic page.
2109                  * The caller who allocated the pages should then discard the
2110                  * pages using the appropriate free interface.
2111                  *
2112                  * In the case of demote, the ref count will be zero.
2113                  */
2114                 if (!demote) {
2115                         if (!page_ref_freeze(p, 1)) {
2116                                 pr_warn("HugeTLB page can not be used due to unexpected inflated ref count\n");
2117                                 goto out_error;
2118                         }
2119                 } else {
2120                         VM_BUG_ON_PAGE(page_count(p), p);
2121                 }
2122                 if (i != 0)
2123                         set_compound_head(p, &folio->page);
2124         }
2125         __folio_set_head(folio);
2126         /* we rely on prep_new_hugetlb_folio to set the destructor */
2127         folio_set_order(folio, order);
2128         atomic_set(&folio->_entire_mapcount, -1);
2129         atomic_set(&folio->_nr_pages_mapped, 0);
2130         atomic_set(&folio->_pincount, 0);
2131         return true;
2132
2133 out_error:
2134         /* undo page modifications made above */
2135         for (j = 0; j < i; j++) {
2136                 p = folio_page(folio, j);
2137                 if (j != 0)
2138                         clear_compound_head(p);
2139                 set_page_refcounted(p);
2140         }
2141         /* need to clear PG_reserved on remaining tail pages  */
2142         for (; j < nr_pages; j++) {
2143                 p = folio_page(folio, j);
2144                 __ClearPageReserved(p);
2145         }
2146         return false;
2147 }
2148
2149 static bool prep_compound_gigantic_folio(struct folio *folio,
2150                                                         unsigned int order)
2151 {
2152         return __prep_compound_gigantic_folio(folio, order, false);
2153 }
2154
2155 static bool prep_compound_gigantic_folio_for_demote(struct folio *folio,
2156                                                         unsigned int order)
2157 {
2158         return __prep_compound_gigantic_folio(folio, order, true);
2159 }
2160
2161 /*
2162  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
2163  * transparent huge pages.  See the PageTransHuge() documentation for more
2164  * details.
2165  */
2166 int PageHuge(struct page *page)
2167 {
2168         struct folio *folio;
2169
2170         if (!PageCompound(page))
2171                 return 0;
2172         folio = page_folio(page);
2173         return folio_test_hugetlb(folio);
2174 }
2175 EXPORT_SYMBOL_GPL(PageHuge);
2176
2177 /*
2178  * Find and lock address space (mapping) in write mode.
2179  *
2180  * Upon entry, the page is locked which means that page_mapping() is
2181  * stable.  Due to locking order, we can only trylock_write.  If we can
2182  * not get the lock, simply return NULL to caller.
2183  */
2184 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
2185 {
2186         struct address_space *mapping = page_mapping(hpage);
2187
2188         if (!mapping)
2189                 return mapping;
2190
2191         if (i_mmap_trylock_write(mapping))
2192                 return mapping;
2193
2194         return NULL;
2195 }
2196
2197 static struct folio *alloc_buddy_hugetlb_folio(struct hstate *h,
2198                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2199                 nodemask_t *node_alloc_noretry)
2200 {
2201         int order = huge_page_order(h);
2202         struct page *page;
2203         bool alloc_try_hard = true;
2204         bool retry = true;
2205
2206         /*
2207          * By default we always try hard to allocate the page with
2208          * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating pages in
2209          * a loop (to adjust global huge page counts) and previous allocation
2210          * failed, do not continue to try hard on the same node.  Use the
2211          * node_alloc_noretry bitmap to manage this state information.
2212          */
2213         if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
2214                 alloc_try_hard = false;
2215         gfp_mask |= __GFP_COMP|__GFP_NOWARN;
2216         if (alloc_try_hard)
2217                 gfp_mask |= __GFP_RETRY_MAYFAIL;
2218         if (nid == NUMA_NO_NODE)
2219                 nid = numa_mem_id();
2220 retry:
2221         page = __alloc_pages(gfp_mask, order, nid, nmask);
2222
2223         /* Freeze head page */
2224         if (page && !page_ref_freeze(page, 1)) {
2225                 __free_pages(page, order);
2226                 if (retry) {    /* retry once */
2227                         retry = false;
2228                         goto retry;
2229                 }
2230                 /* WOW!  twice in a row. */
2231                 pr_warn("HugeTLB head page unexpected inflated ref count\n");
2232                 page = NULL;
2233         }
2234
2235         /*
2236          * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
2237          * indicates an overall state change.  Clear bit so that we resume
2238          * normal 'try hard' allocations.
2239          */
2240         if (node_alloc_noretry && page && !alloc_try_hard)
2241                 node_clear(nid, *node_alloc_noretry);
2242
2243         /*
2244          * If we tried hard to get a page but failed, set bit so that
2245          * subsequent attempts will not try as hard until there is an
2246          * overall state change.
2247          */
2248         if (node_alloc_noretry && !page && alloc_try_hard)
2249                 node_set(nid, *node_alloc_noretry);
2250
2251         if (!page) {
2252                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
2253                 return NULL;
2254         }
2255
2256         __count_vm_event(HTLB_BUDDY_PGALLOC);
2257         return page_folio(page);
2258 }
2259
2260 static struct folio *__alloc_fresh_hugetlb_folio(struct hstate *h,
2261                                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2262                                 nodemask_t *node_alloc_noretry)
2263 {
2264         struct folio *folio;
2265         bool retry = false;
2266
2267 retry:
2268         if (hstate_is_gigantic(h))
2269                 folio = alloc_gigantic_folio(h, gfp_mask, nid, nmask);
2270         else
2271                 folio = alloc_buddy_hugetlb_folio(h, gfp_mask,
2272                                 nid, nmask, node_alloc_noretry);
2273         if (!folio)
2274                 return NULL;
2275
2276         if (hstate_is_gigantic(h)) {
2277                 if (!prep_compound_gigantic_folio(folio, huge_page_order(h))) {
2278                         /*
2279                          * Rare failure to convert pages to compound page.
2280                          * Free pages and try again - ONCE!
2281                          */
2282                         free_gigantic_folio(folio, huge_page_order(h));
2283                         if (!retry) {
2284                                 retry = true;
2285                                 goto retry;
2286                         }
2287                         return NULL;
2288                 }
2289         }
2290
2291         return folio;
2292 }
2293
2294 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
2295                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2296                 nodemask_t *node_alloc_noretry)
2297 {
2298         struct folio *folio;
2299
2300         folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2301                                                 node_alloc_noretry);
2302         if (folio)
2303                 init_new_hugetlb_folio(h, folio);
2304         return folio;
2305 }
2306
2307 /*
2308  * Common helper to allocate a fresh hugetlb page. All specific allocators
2309  * should use this function to get new hugetlb pages
2310  *
2311  * Note that returned page is 'frozen':  ref count of head page and all tail
2312  * pages is zero.
2313  */
2314 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
2315                 gfp_t gfp_mask, int nid, nodemask_t *nmask,
2316                 nodemask_t *node_alloc_noretry)
2317 {
2318         struct folio *folio;
2319
2320         folio = __alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask,
2321                                                 node_alloc_noretry);
2322         if (!folio)
2323                 return NULL;
2324
2325         prep_new_hugetlb_folio(h, folio, folio_nid(folio));
2326         return folio;
2327 }
2328
2329 static void prep_and_add_allocated_folios(struct hstate *h,
2330                                         struct list_head *folio_list)
2331 {
2332         unsigned long flags;
2333         struct folio *folio, *tmp_f;
2334
2335         /* Send list for bulk vmemmap optimization processing */
2336         hugetlb_vmemmap_optimize_folios(h, folio_list);
2337
2338         /* Add all new pool pages to free lists in one lock cycle */
2339         spin_lock_irqsave(&hugetlb_lock, flags);
2340         list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
2341                 __prep_account_new_huge_page(h, folio_nid(folio));
2342                 enqueue_hugetlb_folio(h, folio);
2343         }
2344         spin_unlock_irqrestore(&hugetlb_lock, flags);
2345 }
2346
2347 /*
2348  * Allocates a fresh hugetlb page in a node interleaved manner.  The page
2349  * will later be added to the appropriate hugetlb pool.
2350  */
2351 static struct folio *alloc_pool_huge_folio(struct hstate *h,
2352                                         nodemask_t *nodes_allowed,
2353                                         nodemask_t *node_alloc_noretry)
2354 {
2355         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2356         int nr_nodes, node;
2357
2358         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2359                 struct folio *folio;
2360
2361                 folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
2362                                         nodes_allowed, node_alloc_noretry);
2363                 if (folio)
2364                         return folio;
2365         }
2366
2367         return NULL;
2368 }
2369
2370 /*
2371  * Remove huge page from pool from next node to free.  Attempt to keep
2372  * persistent huge pages more or less balanced over allowed nodes.
2373  * This routine only 'removes' the hugetlb page.  The caller must make
2374  * an additional call to free the page to low level allocators.
2375  * Called with hugetlb_lock locked.
2376  */
2377 static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
2378                 nodemask_t *nodes_allowed, bool acct_surplus)
2379 {
2380         int nr_nodes, node;
2381         struct folio *folio = NULL;
2382
2383         lockdep_assert_held(&hugetlb_lock);
2384         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2385                 /*
2386                  * If we're returning unused surplus pages, only examine
2387                  * nodes with surplus pages.
2388                  */
2389                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
2390                     !list_empty(&h->hugepage_freelists[node])) {
2391                         folio = list_entry(h->hugepage_freelists[node].next,
2392                                           struct folio, lru);
2393                         remove_hugetlb_folio(h, folio, acct_surplus);
2394                         break;
2395                 }
2396         }
2397
2398         return folio;
2399 }
2400
2401 /*
2402  * Dissolve a given free hugepage into free buddy pages. This function does
2403  * nothing for in-use hugepages and non-hugepages.
2404  * This function returns values like below:
2405  *
2406  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
2407  *           when the system is under memory pressure and the feature of
2408  *           freeing unused vmemmap pages associated with each hugetlb page
2409  *           is enabled.
2410  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2411  *           (allocated or reserved.)
2412  *       0:  successfully dissolved free hugepages or the page is not a
2413  *           hugepage (considered as already dissolved)
2414  */
2415 int dissolve_free_huge_page(struct page *page)
2416 {
2417         int rc = -EBUSY;
2418         struct folio *folio = page_folio(page);
2419
2420 retry:
2421         /* Not to disrupt normal path by vainly holding hugetlb_lock */
2422         if (!folio_test_hugetlb(folio))
2423                 return 0;
2424
2425         spin_lock_irq(&hugetlb_lock);
2426         if (!folio_test_hugetlb(folio)) {
2427                 rc = 0;
2428                 goto out;
2429         }
2430
2431         if (!folio_ref_count(folio)) {
2432                 struct hstate *h = folio_hstate(folio);
2433                 if (!available_huge_pages(h))
2434                         goto out;
2435
2436                 /*
2437                  * We should make sure that the page is already on the free list
2438                  * when it is dissolved.
2439                  */
2440                 if (unlikely(!folio_test_hugetlb_freed(folio))) {
2441                         spin_unlock_irq(&hugetlb_lock);
2442                         cond_resched();
2443
2444                         /*
2445                          * Theoretically, we should return -EBUSY when we
2446                          * encounter this race. In fact, we have a chance
2447                          * to successfully dissolve the page if we do a
2448                          * retry. Because the race window is quite small.
2449                          * If we seize this opportunity, it is an optimization
2450                          * for increasing the success rate of dissolving page.
2451                          */
2452                         goto retry;
2453                 }
2454
2455                 remove_hugetlb_folio(h, folio, false);
2456                 h->max_huge_pages--;
2457                 spin_unlock_irq(&hugetlb_lock);
2458
2459                 /*
2460                  * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2461                  * before freeing the page.  update_and_free_hugtlb_folio will fail to
2462                  * free the page if it can not allocate required vmemmap.  We
2463                  * need to adjust max_huge_pages if the page is not freed.
2464                  * Attempt to allocate vmemmmap here so that we can take
2465                  * appropriate action on failure.
2466                  *
2467                  * The folio_test_hugetlb check here is because
2468                  * remove_hugetlb_folio will clear hugetlb folio flag for
2469                  * non-vmemmap optimized hugetlb folios.
2470                  */
2471                 if (folio_test_hugetlb(folio)) {
2472                         rc = hugetlb_vmemmap_restore_folio(h, folio);
2473                         if (rc) {
2474                                 spin_lock_irq(&hugetlb_lock);
2475                                 add_hugetlb_folio(h, folio, false);
2476                                 h->max_huge_pages++;
2477                                 goto out;
2478                         }
2479                 } else
2480                         rc = 0;
2481
2482                 update_and_free_hugetlb_folio(h, folio, false);
2483                 return rc;
2484         }
2485 out:
2486         spin_unlock_irq(&hugetlb_lock);
2487         return rc;
2488 }
2489
2490 /*
2491  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2492  * make specified memory blocks removable from the system.
2493  * Note that this will dissolve a free gigantic hugepage completely, if any
2494  * part of it lies within the given range.
2495  * Also note that if dissolve_free_huge_page() returns with an error, all
2496  * free hugepages that were dissolved before that error are lost.
2497  */
2498 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
2499 {
2500         unsigned long pfn;
2501         struct page *page;
2502         int rc = 0;
2503         unsigned int order;
2504         struct hstate *h;
2505
2506         if (!hugepages_supported())
2507                 return rc;
2508
2509         order = huge_page_order(&default_hstate);
2510         for_each_hstate(h)
2511                 order = min(order, huge_page_order(h));
2512
2513         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2514                 page = pfn_to_page(pfn);
2515                 rc = dissolve_free_huge_page(page);
2516                 if (rc)
2517                         break;
2518         }
2519
2520         return rc;
2521 }
2522
2523 /*
2524  * Allocates a fresh surplus page from the page allocator.
2525  */
2526 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2527                                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
2528 {
2529         struct folio *folio = NULL;
2530
2531         if (hstate_is_gigantic(h))
2532                 return NULL;
2533
2534         spin_lock_irq(&hugetlb_lock);
2535         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2536                 goto out_unlock;
2537         spin_unlock_irq(&hugetlb_lock);
2538
2539         folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2540         if (!folio)
2541                 return NULL;
2542
2543         spin_lock_irq(&hugetlb_lock);
2544         /*
2545          * We could have raced with the pool size change.
2546          * Double check that and simply deallocate the new page
2547          * if we would end up overcommiting the surpluses. Abuse
2548          * temporary page to workaround the nasty free_huge_folio
2549          * codeflow
2550          */
2551         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2552                 folio_set_hugetlb_temporary(folio);
2553                 spin_unlock_irq(&hugetlb_lock);
2554                 free_huge_folio(folio);
2555                 return NULL;
2556         }
2557
2558         h->surplus_huge_pages++;
2559         h->surplus_huge_pages_node[folio_nid(folio)]++;
2560
2561 out_unlock:
2562         spin_unlock_irq(&hugetlb_lock);
2563
2564         return folio;
2565 }
2566
2567 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2568                                      int nid, nodemask_t *nmask)
2569 {
2570         struct folio *folio;
2571
2572         if (hstate_is_gigantic(h))
2573                 return NULL;
2574
2575         folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
2576         if (!folio)
2577                 return NULL;
2578
2579         /* fresh huge pages are frozen */
2580         folio_ref_unfreeze(folio, 1);
2581         /*
2582          * We do not account these pages as surplus because they are only
2583          * temporary and will be released properly on the last reference
2584          */
2585         folio_set_hugetlb_temporary(folio);
2586
2587         return folio;
2588 }
2589
2590 /*
2591  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2592  */
2593 static
2594 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2595                 struct vm_area_struct *vma, unsigned long addr)
2596 {
2597         struct folio *folio = NULL;
2598         struct mempolicy *mpol;
2599         gfp_t gfp_mask = htlb_alloc_mask(h);
2600         int nid;
2601         nodemask_t *nodemask;
2602
2603         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2604         if (mpol_is_preferred_many(mpol)) {
2605                 gfp_t gfp = gfp_mask | __GFP_NOWARN;
2606
2607                 gfp &=  ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2608                 folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2609
2610                 /* Fallback to all nodes if page==NULL */
2611                 nodemask = NULL;
2612         }
2613
2614         if (!folio)
2615                 folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2616         mpol_cond_put(mpol);
2617         return folio;
2618 }
2619
2620 /* folio migration callback function */
2621 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2622                 nodemask_t *nmask, gfp_t gfp_mask)
2623 {
2624         spin_lock_irq(&hugetlb_lock);
2625         if (available_huge_pages(h)) {
2626                 struct folio *folio;
2627
2628                 folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2629                                                 preferred_nid, nmask);
2630                 if (folio) {
2631                         spin_unlock_irq(&hugetlb_lock);
2632                         return folio;
2633                 }
2634         }
2635         spin_unlock_irq(&hugetlb_lock);
2636
2637         return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2638 }
2639
2640 /*
2641  * Increase the hugetlb pool such that it can accommodate a reservation
2642  * of size 'delta'.
2643  */
2644 static int gather_surplus_pages(struct hstate *h, long delta)
2645         __must_hold(&hugetlb_lock)
2646 {
2647         LIST_HEAD(surplus_list);
2648         struct folio *folio, *tmp;
2649         int ret;
2650         long i;
2651         long needed, allocated;
2652         bool alloc_ok = true;
2653
2654         lockdep_assert_held(&hugetlb_lock);
2655         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2656         if (needed <= 0) {
2657                 h->resv_huge_pages += delta;
2658                 return 0;
2659         }
2660
2661         allocated = 0;
2662
2663         ret = -ENOMEM;
2664 retry:
2665         spin_unlock_irq(&hugetlb_lock);
2666         for (i = 0; i < needed; i++) {
2667                 folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2668                                 NUMA_NO_NODE, NULL);
2669                 if (!folio) {
2670                         alloc_ok = false;
2671                         break;
2672                 }
2673                 list_add(&folio->lru, &surplus_list);
2674                 cond_resched();
2675         }
2676         allocated += i;
2677
2678         /*
2679          * After retaking hugetlb_lock, we need to recalculate 'needed'
2680          * because either resv_huge_pages or free_huge_pages may have changed.
2681          */
2682         spin_lock_irq(&hugetlb_lock);
2683         needed = (h->resv_huge_pages + delta) -
2684                         (h->free_huge_pages + allocated);
2685         if (needed > 0) {
2686                 if (alloc_ok)
2687                         goto retry;
2688                 /*
2689                  * We were not able to allocate enough pages to
2690                  * satisfy the entire reservation so we free what
2691                  * we've allocated so far.
2692                  */
2693                 goto free;
2694         }
2695         /*
2696          * The surplus_list now contains _at_least_ the number of extra pages
2697          * needed to accommodate the reservation.  Add the appropriate number
2698          * of pages to the hugetlb pool and free the extras back to the buddy
2699          * allocator.  Commit the entire reservation here to prevent another
2700          * process from stealing the pages as they are added to the pool but
2701          * before they are reserved.
2702          */
2703         needed += allocated;
2704         h->resv_huge_pages += delta;
2705         ret = 0;
2706
2707         /* Free the needed pages to the hugetlb pool */
2708         list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2709                 if ((--needed) < 0)
2710                         break;
2711                 /* Add the page to the hugetlb allocator */
2712                 enqueue_hugetlb_folio(h, folio);
2713         }
2714 free:
2715         spin_unlock_irq(&hugetlb_lock);
2716
2717         /*
2718          * Free unnecessary surplus pages to the buddy allocator.
2719          * Pages have no ref count, call free_huge_folio directly.
2720          */
2721         list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2722                 free_huge_folio(folio);
2723         spin_lock_irq(&hugetlb_lock);
2724
2725         return ret;
2726 }
2727
2728 /*
2729  * This routine has two main purposes:
2730  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2731  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2732  *    to the associated reservation map.
2733  * 2) Free any unused surplus pages that may have been allocated to satisfy
2734  *    the reservation.  As many as unused_resv_pages may be freed.
2735  */
2736 static void return_unused_surplus_pages(struct hstate *h,
2737                                         unsigned long unused_resv_pages)
2738 {
2739         unsigned long nr_pages;
2740         LIST_HEAD(page_list);
2741
2742         lockdep_assert_held(&hugetlb_lock);
2743         /* Uncommit the reservation */
2744         h->resv_huge_pages -= unused_resv_pages;
2745
2746         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
2747                 goto out;
2748
2749         /*
2750          * Part (or even all) of the reservation could have been backed
2751          * by pre-allocated pages. Only free surplus pages.
2752          */
2753         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2754
2755         /*
2756          * We want to release as many surplus pages as possible, spread
2757          * evenly across all nodes with memory. Iterate across these nodes
2758          * until we can no longer free unreserved surplus pages. This occurs
2759          * when the nodes with surplus pages have no free pages.
2760          * remove_pool_hugetlb_folio() will balance the freed pages across the
2761          * on-line nodes with memory and will handle the hstate accounting.
2762          */
2763         while (nr_pages--) {
2764                 struct folio *folio;
2765
2766                 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2767                 if (!folio)
2768                         goto out;
2769
2770                 list_add(&folio->lru, &page_list);
2771         }
2772
2773 out:
2774         spin_unlock_irq(&hugetlb_lock);
2775         update_and_free_pages_bulk(h, &page_list);
2776         spin_lock_irq(&hugetlb_lock);
2777 }
2778
2779
2780 /*
2781  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2782  * are used by the huge page allocation routines to manage reservations.
2783  *
2784  * vma_needs_reservation is called to determine if the huge page at addr
2785  * within the vma has an associated reservation.  If a reservation is
2786  * needed, the value 1 is returned.  The caller is then responsible for
2787  * managing the global reservation and subpool usage counts.  After
2788  * the huge page has been allocated, vma_commit_reservation is called
2789  * to add the page to the reservation map.  If the page allocation fails,
2790  * the reservation must be ended instead of committed.  vma_end_reservation
2791  * is called in such cases.
2792  *
2793  * In the normal case, vma_commit_reservation returns the same value
2794  * as the preceding vma_needs_reservation call.  The only time this
2795  * is not the case is if a reserve map was changed between calls.  It
2796  * is the responsibility of the caller to notice the difference and
2797  * take appropriate action.
2798  *
2799  * vma_add_reservation is used in error paths where a reservation must
2800  * be restored when a newly allocated huge page must be freed.  It is
2801  * to be called after calling vma_needs_reservation to determine if a
2802  * reservation exists.
2803  *
2804  * vma_del_reservation is used in error paths where an entry in the reserve
2805  * map was created during huge page allocation and must be removed.  It is to
2806  * be called after calling vma_needs_reservation to determine if a reservation
2807  * exists.
2808  */
2809 enum vma_resv_mode {
2810         VMA_NEEDS_RESV,
2811         VMA_COMMIT_RESV,
2812         VMA_END_RESV,
2813         VMA_ADD_RESV,
2814         VMA_DEL_RESV,
2815 };
2816 static long __vma_reservation_common(struct hstate *h,
2817                                 struct vm_area_struct *vma, unsigned long addr,
2818                                 enum vma_resv_mode mode)
2819 {
2820         struct resv_map *resv;
2821         pgoff_t idx;
2822         long ret;
2823         long dummy_out_regions_needed;
2824
2825         resv = vma_resv_map(vma);
2826         if (!resv)
2827                 return 1;
2828
2829         idx = vma_hugecache_offset(h, vma, addr);
2830         switch (mode) {
2831         case VMA_NEEDS_RESV:
2832                 ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2833                 /* We assume that vma_reservation_* routines always operate on
2834                  * 1 page, and that adding to resv map a 1 page entry can only
2835                  * ever require 1 region.
2836                  */
2837                 VM_BUG_ON(dummy_out_regions_needed != 1);
2838                 break;
2839         case VMA_COMMIT_RESV:
2840                 ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2841                 /* region_add calls of range 1 should never fail. */
2842                 VM_BUG_ON(ret < 0);
2843                 break;
2844         case VMA_END_RESV:
2845                 region_abort(resv, idx, idx + 1, 1);
2846                 ret = 0;
2847                 break;
2848         case VMA_ADD_RESV:
2849                 if (vma->vm_flags & VM_MAYSHARE) {
2850                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2851                         /* region_add calls of range 1 should never fail. */
2852                         VM_BUG_ON(ret < 0);
2853                 } else {
2854                         region_abort(resv, idx, idx + 1, 1);
2855                         ret = region_del(resv, idx, idx + 1);
2856                 }
2857                 break;
2858         case VMA_DEL_RESV:
2859                 if (vma->vm_flags & VM_MAYSHARE) {
2860                         region_abort(resv, idx, idx + 1, 1);
2861                         ret = region_del(resv, idx, idx + 1);
2862                 } else {
2863                         ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2864                         /* region_add calls of range 1 should never fail. */
2865                         VM_BUG_ON(ret < 0);
2866                 }
2867                 break;
2868         default:
2869                 BUG();
2870         }
2871
2872         if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2873                 return ret;
2874         /*
2875          * We know private mapping must have HPAGE_RESV_OWNER set.
2876          *
2877          * In most cases, reserves always exist for private mappings.
2878          * However, a file associated with mapping could have been
2879          * hole punched or truncated after reserves were consumed.
2880          * As subsequent fault on such a range will not use reserves.
2881          * Subtle - The reserve map for private mappings has the
2882          * opposite meaning than that of shared mappings.  If NO
2883          * entry is in the reserve map, it means a reservation exists.
2884          * If an entry exists in the reserve map, it means the
2885          * reservation has already been consumed.  As a result, the
2886          * return value of this routine is the opposite of the
2887          * value returned from reserve map manipulation routines above.
2888          */
2889         if (ret > 0)
2890                 return 0;
2891         if (ret == 0)
2892                 return 1;
2893         return ret;
2894 }
2895
2896 static long vma_needs_reservation(struct hstate *h,
2897                         struct vm_area_struct *vma, unsigned long addr)
2898 {
2899         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2900 }
2901
2902 static long vma_commit_reservation(struct hstate *h,
2903                         struct vm_area_struct *vma, unsigned long addr)
2904 {
2905         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2906 }
2907
2908 static void vma_end_reservation(struct hstate *h,
2909                         struct vm_area_struct *vma, unsigned long addr)
2910 {
2911         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2912 }
2913
2914 static long vma_add_reservation(struct hstate *h,
2915                         struct vm_area_struct *vma, unsigned long addr)
2916 {
2917         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2918 }
2919
2920 static long vma_del_reservation(struct hstate *h,
2921                         struct vm_area_struct *vma, unsigned long addr)
2922 {
2923         return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2924 }
2925
2926 /*
2927  * This routine is called to restore reservation information on error paths.
2928  * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2929  * and the hugetlb mutex should remain held when calling this routine.
2930  *
2931  * It handles two specific cases:
2932  * 1) A reservation was in place and the folio consumed the reservation.
2933  *    hugetlb_restore_reserve is set in the folio.
2934  * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2935  *    not set.  However, alloc_hugetlb_folio always updates the reserve map.
2936  *
2937  * In case 1, free_huge_folio later in the error path will increment the
2938  * global reserve count.  But, free_huge_folio does not have enough context
2939  * to adjust the reservation map.  This case deals primarily with private
2940  * mappings.  Adjust the reserve map here to be consistent with global
2941  * reserve count adjustments to be made by free_huge_folio.  Make sure the
2942  * reserve map indicates there is a reservation present.
2943  *
2944  * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2945  */
2946 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2947                         unsigned long address, struct folio *folio)
2948 {
2949         long rc = vma_needs_reservation(h, vma, address);
2950
2951         if (folio_test_hugetlb_restore_reserve(folio)) {
2952                 if (unlikely(rc < 0))
2953                         /*
2954                          * Rare out of memory condition in reserve map
2955                          * manipulation.  Clear hugetlb_restore_reserve so
2956                          * that global reserve count will not be incremented
2957                          * by free_huge_folio.  This will make it appear
2958                          * as though the reservation for this folio was
2959                          * consumed.  This may prevent the task from
2960                          * faulting in the folio at a later time.  This
2961                          * is better than inconsistent global huge page
2962                          * accounting of reserve counts.
2963                          */
2964                         folio_clear_hugetlb_restore_reserve(folio);
2965                 else if (rc)
2966                         (void)vma_add_reservation(h, vma, address);
2967                 else
2968                         vma_end_reservation(h, vma, address);
2969         } else {
2970                 if (!rc) {
2971                         /*
2972                          * This indicates there is an entry in the reserve map
2973                          * not added by alloc_hugetlb_folio.  We know it was added
2974                          * before the alloc_hugetlb_folio call, otherwise
2975                          * hugetlb_restore_reserve would be set on the folio.
2976                          * Remove the entry so that a subsequent allocation
2977                          * does not consume a reservation.
2978                          */
2979                         rc = vma_del_reservation(h, vma, address);
2980                         if (rc < 0)
2981                                 /*
2982                                  * VERY rare out of memory condition.  Since
2983                                  * we can not delete the entry, set
2984                                  * hugetlb_restore_reserve so that the reserve
2985                                  * count will be incremented when the folio
2986                                  * is freed.  This reserve will be consumed
2987                                  * on a subsequent allocation.
2988                                  */
2989                                 folio_set_hugetlb_restore_reserve(folio);
2990                 } else if (rc < 0) {
2991                         /*
2992                          * Rare out of memory condition from
2993                          * vma_needs_reservation call.  Memory allocation is
2994                          * only attempted if a new entry is needed.  Therefore,
2995                          * this implies there is not an entry in the
2996                          * reserve map.
2997                          *
2998                          * For shared mappings, no entry in the map indicates
2999                          * no reservation.  We are done.
3000                          */
3001                         if (!(vma->vm_flags & VM_MAYSHARE))
3002                                 /*
3003                                  * For private mappings, no entry indicates
3004                                  * a reservation is present.  Since we can
3005                                  * not add an entry, set hugetlb_restore_reserve
3006                                  * on the folio so reserve count will be
3007                                  * incremented when freed.  This reserve will
3008                                  * be consumed on a subsequent allocation.
3009                                  */
3010                                 folio_set_hugetlb_restore_reserve(folio);
3011                 } else
3012                         /*
3013                          * No reservation present, do nothing
3014                          */
3015                          vma_end_reservation(h, vma, address);
3016         }
3017 }
3018
3019 /*
3020  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
3021  * the old one
3022  * @h: struct hstate old page belongs to
3023  * @old_folio: Old folio to dissolve
3024  * @list: List to isolate the page in case we need to
3025  * Returns 0 on success, otherwise negated error.
3026  */
3027 static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
3028                         struct folio *old_folio, struct list_head *list)
3029 {
3030         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3031         int nid = folio_nid(old_folio);
3032         struct folio *new_folio;
3033         int ret = 0;
3034
3035         /*
3036          * Before dissolving the folio, we need to allocate a new one for the
3037          * pool to remain stable.  Here, we allocate the folio and 'prep' it
3038          * by doing everything but actually updating counters and adding to
3039          * the pool.  This simplifies and let us do most of the processing
3040          * under the lock.
3041          */
3042         new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid, NULL, NULL);
3043         if (!new_folio)
3044                 return -ENOMEM;
3045         __prep_new_hugetlb_folio(h, new_folio);
3046
3047 retry:
3048         spin_lock_irq(&hugetlb_lock);
3049         if (!folio_test_hugetlb(old_folio)) {
3050                 /*
3051                  * Freed from under us. Drop new_folio too.
3052                  */
3053                 goto free_new;
3054         } else if (folio_ref_count(old_folio)) {
3055                 bool isolated;
3056
3057                 /*
3058                  * Someone has grabbed the folio, try to isolate it here.
3059                  * Fail with -EBUSY if not possible.
3060                  */
3061                 spin_unlock_irq(&hugetlb_lock);
3062                 isolated = isolate_hugetlb(old_folio, list);
3063                 ret = isolated ? 0 : -EBUSY;
3064                 spin_lock_irq(&hugetlb_lock);
3065                 goto free_new;
3066         } else if (!folio_test_hugetlb_freed(old_folio)) {
3067                 /*
3068                  * Folio's refcount is 0 but it has not been enqueued in the
3069                  * freelist yet. Race window is small, so we can succeed here if
3070                  * we retry.
3071                  */
3072                 spin_unlock_irq(&hugetlb_lock);
3073                 cond_resched();
3074                 goto retry;
3075         } else {
3076                 /*
3077                  * Ok, old_folio is still a genuine free hugepage. Remove it from
3078                  * the freelist and decrease the counters. These will be
3079                  * incremented again when calling __prep_account_new_huge_page()
3080                  * and enqueue_hugetlb_folio() for new_folio. The counters will
3081                  * remain stable since this happens under the lock.
3082                  */
3083                 remove_hugetlb_folio(h, old_folio, false);
3084
3085                 /*
3086                  * Ref count on new_folio is already zero as it was dropped
3087                  * earlier.  It can be directly added to the pool free list.
3088                  */
3089                 __prep_account_new_huge_page(h, nid);
3090                 enqueue_hugetlb_folio(h, new_folio);
3091
3092                 /*
3093                  * Folio has been replaced, we can safely free the old one.
3094                  */
3095                 spin_unlock_irq(&hugetlb_lock);
3096                 update_and_free_hugetlb_folio(h, old_folio, false);
3097         }
3098
3099         return ret;
3100
3101 free_new:
3102         spin_unlock_irq(&hugetlb_lock);
3103         /* Folio has a zero ref count, but needs a ref to be freed */
3104         folio_ref_unfreeze(new_folio, 1);
3105         update_and_free_hugetlb_folio(h, new_folio, false);
3106
3107         return ret;
3108 }
3109
3110 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list)
3111 {
3112         struct hstate *h;
3113         struct folio *folio = page_folio(page);
3114         int ret = -EBUSY;
3115
3116         /*
3117          * The page might have been dissolved from under our feet, so make sure
3118          * to carefully check the state under the lock.
3119          * Return success when racing as if we dissolved the page ourselves.
3120          */
3121         spin_lock_irq(&hugetlb_lock);
3122         if (folio_test_hugetlb(folio)) {
3123                 h = folio_hstate(folio);
3124         } else {
3125                 spin_unlock_irq(&hugetlb_lock);
3126                 return 0;
3127         }
3128         spin_unlock_irq(&hugetlb_lock);
3129
3130         /*
3131          * Fence off gigantic pages as there is a cyclic dependency between
3132          * alloc_contig_range and them. Return -ENOMEM as this has the effect
3133          * of bailing out right away without further retrying.
3134          */
3135         if (hstate_is_gigantic(h))
3136                 return -ENOMEM;
3137
3138         if (folio_ref_count(folio) && isolate_hugetlb(folio, list))
3139                 ret = 0;
3140         else if (!folio_ref_count(folio))
3141                 ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
3142
3143         return ret;
3144 }
3145
3146 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
3147                                     unsigned long addr, int avoid_reserve)
3148 {
3149         struct hugepage_subpool *spool = subpool_vma(vma);
3150         struct hstate *h = hstate_vma(vma);
3151         struct folio *folio;
3152         long map_chg, map_commit, nr_pages = pages_per_huge_page(h);
3153         long gbl_chg;
3154         int memcg_charge_ret, ret, idx;
3155         struct hugetlb_cgroup *h_cg = NULL;
3156         struct mem_cgroup *memcg;
3157         bool deferred_reserve;
3158         gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
3159
3160         memcg = get_mem_cgroup_from_current();
3161         memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages);
3162         if (memcg_charge_ret == -ENOMEM) {
3163                 mem_cgroup_put(memcg);
3164                 return ERR_PTR(-ENOMEM);
3165         }
3166
3167         idx = hstate_index(h);
3168         /*
3169          * Examine the region/reserve map to determine if the process
3170          * has a reservation for the page to be allocated.  A return
3171          * code of zero indicates a reservation exists (no change).
3172          */
3173         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
3174         if (map_chg < 0) {
3175                 if (!memcg_charge_ret)
3176                         mem_cgroup_cancel_charge(memcg, nr_pages);
3177                 mem_cgroup_put(memcg);
3178                 return ERR_PTR(-ENOMEM);
3179         }
3180
3181         /*
3182          * Processes that did not create the mapping will have no
3183          * reserves as indicated by the region/reserve map. Check
3184          * that the allocation will not exceed the subpool limit.
3185          * Allocations for MAP_NORESERVE mappings also need to be
3186          * checked against any subpool limit.
3187          */
3188         if (map_chg || avoid_reserve) {
3189                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
3190                 if (gbl_chg < 0)
3191                         goto out_end_reservation;
3192
3193                 /*
3194                  * Even though there was no reservation in the region/reserve
3195                  * map, there could be reservations associated with the
3196                  * subpool that can be used.  This would be indicated if the
3197                  * return value of hugepage_subpool_get_pages() is zero.
3198                  * However, if avoid_reserve is specified we still avoid even
3199                  * the subpool reservations.
3200                  */
3201                 if (avoid_reserve)
3202                         gbl_chg = 1;
3203         }
3204
3205         /* If this allocation is not consuming a reservation, charge it now.
3206          */
3207         deferred_reserve = map_chg || avoid_reserve;
3208         if (deferred_reserve) {
3209                 ret = hugetlb_cgroup_charge_cgroup_rsvd(
3210                         idx, pages_per_huge_page(h), &h_cg);
3211                 if (ret)
3212                         goto out_subpool_put;
3213         }
3214
3215         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
3216         if (ret)
3217                 goto out_uncharge_cgroup_reservation;
3218
3219         spin_lock_irq(&hugetlb_lock);
3220         /*
3221          * glb_chg is passed to indicate whether or not a page must be taken
3222          * from the global free pool (global change).  gbl_chg == 0 indicates
3223          * a reservation exists for the allocation.
3224          */
3225         folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3226         if (!folio) {
3227                 spin_unlock_irq(&hugetlb_lock);
3228                 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3229                 if (!folio)
3230                         goto out_uncharge_cgroup;
3231                 spin_lock_irq(&hugetlb_lock);
3232                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
3233                         folio_set_hugetlb_restore_reserve(folio);
3234                         h->resv_huge_pages--;
3235                 }
3236                 list_add(&folio->lru, &h->hugepage_activelist);
3237                 folio_ref_unfreeze(folio, 1);
3238                 /* Fall through */
3239         }
3240
3241         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
3242         /* If allocation is not consuming a reservation, also store the
3243          * hugetlb_cgroup pointer on the page.
3244          */
3245         if (deferred_reserve) {
3246                 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3247                                                   h_cg, folio);
3248         }
3249
3250         spin_unlock_irq(&hugetlb_lock);
3251
3252         hugetlb_set_folio_subpool(folio, spool);
3253
3254         map_commit = vma_commit_reservation(h, vma, addr);
3255         if (unlikely(map_chg > map_commit)) {
3256                 /*
3257                  * The page was added to the reservation map between
3258                  * vma_needs_reservation and vma_commit_reservation.
3259                  * This indicates a race with hugetlb_reserve_pages.
3260                  * Adjust for the subpool count incremented above AND
3261                  * in hugetlb_reserve_pages for the same page.  Also,
3262                  * the reservation count added in hugetlb_reserve_pages
3263                  * no longer applies.
3264                  */
3265                 long rsv_adjust;
3266
3267                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3268                 hugetlb_acct_memory(h, -rsv_adjust);
3269                 if (deferred_reserve)
3270                         hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
3271                                         pages_per_huge_page(h), folio);
3272         }
3273
3274         if (!memcg_charge_ret)
3275                 mem_cgroup_commit_charge(folio, memcg);
3276         mem_cgroup_put(memcg);
3277
3278         return folio;
3279
3280 out_uncharge_cgroup:
3281         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3282 out_uncharge_cgroup_reservation:
3283         if (deferred_reserve)
3284                 hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3285                                                     h_cg);
3286 out_subpool_put:
3287         if (map_chg || avoid_reserve)
3288                 hugepage_subpool_put_pages(spool, 1);
3289 out_end_reservation:
3290         vma_end_reservation(h, vma, addr);
3291         if (!memcg_charge_ret)
3292                 mem_cgroup_cancel_charge(memcg, nr_pages);
3293         mem_cgroup_put(memcg);
3294         return ERR_PTR(-ENOSPC);
3295 }
3296
3297 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3298         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
3299 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3300 {
3301         struct huge_bootmem_page *m = NULL; /* initialize for clang */
3302         int nr_nodes, node;
3303
3304         /* do node specific alloc */
3305         if (nid != NUMA_NO_NODE) {
3306                 m = memblock_alloc_try_nid_raw(huge_page_size(h), huge_page_size(h),
3307                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3308                 if (!m)
3309                         return 0;
3310                 goto found;
3311         }
3312         /* allocate from next node when distributing huge pages */
3313         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
3314                 m = memblock_alloc_try_nid_raw(
3315                                 huge_page_size(h), huge_page_size(h),
3316                                 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
3317                 /*
3318                  * Use the beginning of the huge page to store the
3319                  * huge_bootmem_page struct (until gather_bootmem
3320                  * puts them into the mem_map).
3321                  */
3322                 if (!m)
3323                         return 0;
3324                 goto found;
3325         }
3326
3327 found:
3328
3329         /*
3330          * Only initialize the head struct page in memmap_init_reserved_pages,
3331          * rest of the struct pages will be initialized by the HugeTLB
3332          * subsystem itself.
3333          * The head struct page is used to get folio information by the HugeTLB
3334          * subsystem like zone id and node id.
3335          */
3336         memblock_reserved_mark_noinit(virt_to_phys((void *)m + PAGE_SIZE),
3337                 huge_page_size(h) - PAGE_SIZE);
3338         /* Put them into a private list first because mem_map is not up yet */
3339         INIT_LIST_HEAD(&m->list);
3340         list_add(&m->list, &huge_boot_pages);
3341         m->hstate = h;
3342         return 1;
3343 }
3344
3345 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
3346 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3347                                         unsigned long start_page_number,
3348                                         unsigned long end_page_number)
3349 {
3350         enum zone_type zone = zone_idx(folio_zone(folio));
3351         int nid = folio_nid(folio);
3352         unsigned long head_pfn = folio_pfn(folio);
3353         unsigned long pfn, end_pfn = head_pfn + end_page_number;
3354         int ret;
3355
3356         for (pfn = head_pfn + start_page_number; pfn < end_pfn; pfn++) {
3357                 struct page *page = pfn_to_page(pfn);
3358
3359                 __init_single_page(page, pfn, zone, nid);
3360                 prep_compound_tail((struct page *)folio, pfn - head_pfn);
3361                 ret = page_ref_freeze(page, 1);
3362                 VM_BUG_ON(!ret);
3363         }
3364 }
3365
3366 static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3367                                               struct hstate *h,
3368                                               unsigned long nr_pages)
3369 {
3370         int ret;
3371
3372         /* Prepare folio head */
3373         __folio_clear_reserved(folio);
3374         __folio_set_head(folio);
3375         ret = folio_ref_freeze(folio, 1);
3376         VM_BUG_ON(!ret);
3377         /* Initialize the necessary tail struct pages */
3378         hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3379         prep_compound_head((struct page *)folio, huge_page_order(h));
3380 }
3381
3382 static void __init prep_and_add_bootmem_folios(struct hstate *h,
3383                                         struct list_head *folio_list)
3384 {
3385         unsigned long flags;
3386         struct folio *folio, *tmp_f;
3387
3388         /* Send list for bulk vmemmap optimization processing */
3389         hugetlb_vmemmap_optimize_folios(h, folio_list);
3390
3391         /* Add all new pool pages to free lists in one lock cycle */
3392         spin_lock_irqsave(&hugetlb_lock, flags);
3393         list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3394                 if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3395                         /*
3396                          * If HVO fails, initialize all tail struct pages
3397                          * We do not worry about potential long lock hold
3398                          * time as this is early in boot and there should
3399                          * be no contention.
3400                          */
3401                         hugetlb_folio_init_tail_vmemmap(folio,
3402                                         HUGETLB_VMEMMAP_RESERVE_PAGES,
3403                                         pages_per_huge_page(h));
3404                 }
3405                 __prep_account_new_huge_page(h, folio_nid(folio));
3406                 enqueue_hugetlb_folio(h, folio);
3407         }
3408         spin_unlock_irqrestore(&hugetlb_lock, flags);
3409 }
3410
3411 /*
3412  * Put bootmem huge pages into the standard lists after mem_map is up.
3413  * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3414  */
3415 static void __init gather_bootmem_prealloc(void)
3416 {
3417         LIST_HEAD(folio_list);
3418         struct huge_bootmem_page *m;
3419         struct hstate *h = NULL, *prev_h = NULL;
3420
3421         list_for_each_entry(m, &huge_boot_pages, list) {
3422                 struct page *page = virt_to_page(m);
3423                 struct folio *folio = (void *)page;
3424
3425                 h = m->hstate;
3426                 /*
3427                  * It is possible to have multiple huge page sizes (hstates)
3428                  * in this list.  If so, process each size separately.
3429                  */
3430                 if (h != prev_h && prev_h != NULL)
3431                         prep_and_add_bootmem_folios(prev_h, &folio_list);
3432                 prev_h = h;
3433
3434                 VM_BUG_ON(!hstate_is_gigantic(h));
3435                 WARN_ON(folio_ref_count(folio) != 1);
3436
3437                 hugetlb_folio_init_vmemmap(folio, h,
3438                                            HUGETLB_VMEMMAP_RESERVE_PAGES);
3439                 init_new_hugetlb_folio(h, folio);
3440                 list_add(&folio->lru, &folio_list);
3441
3442                 /*
3443                  * We need to restore the 'stolen' pages to totalram_pages
3444                  * in order to fix confusing memory reports from free(1) and
3445                  * other side-effects, like CommitLimit going negative.
3446                  */
3447                 adjust_managed_page_count(page, pages_per_huge_page(h));
3448                 cond_resched();
3449         }
3450
3451         prep_and_add_bootmem_folios(h, &folio_list);
3452 }
3453
3454 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3455 {
3456         unsigned long i;
3457         char buf[32];
3458
3459         for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3460                 if (hstate_is_gigantic(h)) {
3461                         if (!alloc_bootmem_huge_page(h, nid))
3462                                 break;
3463                 } else {
3464                         struct folio *folio;
3465                         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3466
3467                         folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3468                                         &node_states[N_MEMORY], NULL);
3469                         if (!folio)
3470                                 break;
3471                         free_huge_folio(folio); /* free it into the hugepage allocator */
3472                 }
3473                 cond_resched();
3474         }
3475         if (i == h->max_huge_pages_node[nid])
3476                 return;
3477
3478         string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3479         pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3480                 h->max_huge_pages_node[nid], buf, nid, i);
3481         h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3482         h->max_huge_pages_node[nid] = i;
3483 }
3484
3485 /*
3486  * NOTE: this routine is called in different contexts for gigantic and
3487  * non-gigantic pages.
3488  * - For gigantic pages, this is called early in the boot process and
3489  *   pages are allocated from memblock allocated or something similar.
3490  *   Gigantic pages are actually added to pools later with the routine
3491  *   gather_bootmem_prealloc.
3492  * - For non-gigantic pages, this is called later in the boot process after
3493  *   all of mm is up and functional.  Pages are allocated from buddy and
3494  *   then added to hugetlb pools.
3495  */
3496 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3497 {
3498         unsigned long i;
3499         struct folio *folio;
3500         LIST_HEAD(folio_list);
3501         nodemask_t *node_alloc_noretry;
3502         bool node_specific_alloc = false;
3503
3504         /* skip gigantic hugepages allocation if hugetlb_cma enabled */
3505         if (hstate_is_gigantic(h) && hugetlb_cma_size) {
3506                 pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3507                 return;
3508         }
3509
3510         /* do node specific alloc */
3511         for_each_online_node(i) {
3512                 if (h->max_huge_pages_node[i] > 0) {
3513                         hugetlb_hstate_alloc_pages_onenode(h, i);
3514                         node_specific_alloc = true;
3515                 }
3516         }
3517
3518         if (node_specific_alloc)
3519                 return;
3520
3521         /* below will do all node balanced alloc */
3522         if (!hstate_is_gigantic(h)) {
3523                 /*
3524                  * Bit mask controlling how hard we retry per-node allocations.
3525                  * Ignore errors as lower level routines can deal with
3526                  * node_alloc_noretry == NULL.  If this kmalloc fails at boot
3527                  * time, we are likely in bigger trouble.
3528                  */
3529                 node_alloc_noretry = kmalloc(sizeof(*node_alloc_noretry),
3530                                                 GFP_KERNEL);
3531         } else {
3532                 /* allocations done at boot time */
3533                 node_alloc_noretry = NULL;
3534         }
3535
3536         /* bit mask controlling how hard we retry per-node allocations */
3537         if (node_alloc_noretry)
3538                 nodes_clear(*node_alloc_noretry);
3539
3540         for (i = 0; i < h->max_huge_pages; ++i) {
3541                 if (hstate_is_gigantic(h)) {
3542                         /*
3543                          * gigantic pages not added to list as they are not
3544                          * added to pools now.
3545                          */
3546                         if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3547                                 break;
3548                 } else {
3549                         folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3550                                                         node_alloc_noretry);
3551                         if (!folio)
3552                                 break;
3553                         list_add(&folio->lru, &folio_list);
3554                 }
3555                 cond_resched();
3556         }
3557
3558         /* list will be empty if hstate_is_gigantic */
3559         prep_and_add_allocated_folios(h, &folio_list);
3560
3561         if (i < h->max_huge_pages) {
3562                 char buf[32];
3563
3564                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3565                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3566                         h->max_huge_pages, buf, i);
3567                 h->max_huge_pages = i;
3568         }
3569         kfree(node_alloc_noretry);
3570 }
3571
3572 static void __init hugetlb_init_hstates(void)
3573 {
3574         struct hstate *h, *h2;
3575
3576         for_each_hstate(h) {
3577                 /* oversize hugepages were init'ed in early boot */
3578                 if (!hstate_is_gigantic(h))
3579                         hugetlb_hstate_alloc_pages(h);
3580
3581                 /*
3582                  * Set demote order for each hstate.  Note that
3583                  * h->demote_order is initially 0.
3584                  * - We can not demote gigantic pages if runtime freeing
3585                  *   is not supported, so skip this.
3586                  * - If CMA allocation is possible, we can not demote
3587                  *   HUGETLB_PAGE_ORDER or smaller size pages.
3588                  */
3589                 if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
3590                         continue;
3591                 if (hugetlb_cma_size && h->order <= HUGETLB_PAGE_ORDER)
3592                         continue;
3593                 for_each_hstate(h2) {
3594                         if (h2 == h)
3595                                 continue;
3596                         if (h2->order < h->order &&
3597                             h2->order > h->demote_order)
3598                                 h->demote_order = h2->order;
3599                 }
3600         }
3601 }
3602
3603 static void __init report_hugepages(void)
3604 {
3605         struct hstate *h;
3606
3607         for_each_hstate(h) {
3608                 char buf[32];
3609
3610                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3611                 pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3612                         buf, h->free_huge_pages);
3613                 pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3614                         hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3615         }
3616 }
3617
3618 #ifdef CONFIG_HIGHMEM
3619 static void try_to_free_low(struct hstate *h, unsigned long count,
3620                                                 nodemask_t *nodes_allowed)
3621 {
3622         int i;
3623         LIST_HEAD(page_list);
3624
3625         lockdep_assert_held(&hugetlb_lock);
3626         if (hstate_is_gigantic(h))
3627                 return;
3628
3629         /*
3630          * Collect pages to be freed on a list, and free after dropping lock
3631          */
3632         for_each_node_mask(i, *nodes_allowed) {
3633                 struct folio *folio, *next;
3634                 struct list_head *freel = &h->hugepage_freelists[i];
3635                 list_for_each_entry_safe(folio, next, freel, lru) {
3636                         if (count >= h->nr_huge_pages)
3637                                 goto out;
3638                         if (folio_test_highmem(folio))
3639                                 continue;
3640                         remove_hugetlb_folio(h, folio, false);
3641                         list_add(&folio->lru, &page_list);
3642                 }
3643         }
3644
3645 out:
3646         spin_unlock_irq(&hugetlb_lock);
3647         update_and_free_pages_bulk(h, &page_list);
3648         spin_lock_irq(&hugetlb_lock);
3649 }
3650 #else
3651 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3652                                                 nodemask_t *nodes_allowed)
3653 {
3654 }
3655 #endif
3656
3657 /*
3658  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3659  * balanced by operating on them in a round-robin fashion.
3660  * Returns 1 if an adjustment was made.
3661  */
3662 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3663                                 int delta)
3664 {
3665         int nr_nodes, node;
3666
3667         lockdep_assert_held(&hugetlb_lock);
3668         VM_BUG_ON(delta != -1 && delta != 1);
3669
3670         if (delta < 0) {
3671                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
3672                         if (h->surplus_huge_pages_node[node])
3673                                 goto found;
3674                 }
3675         } else {
3676                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3677                         if (h->surplus_huge_pages_node[node] <
3678                                         h->nr_huge_pages_node[node])
3679                                 goto found;
3680                 }
3681         }
3682         return 0;
3683
3684 found:
3685         h->surplus_huge_pages += delta;
3686         h->surplus_huge_pages_node[node] += delta;
3687         return 1;
3688 }
3689
3690 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
3691 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3692                               nodemask_t *nodes_allowed)
3693 {
3694         unsigned long min_count;
3695         unsigned long allocated;
3696         struct folio *folio;
3697         LIST_HEAD(page_list);
3698         NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3699
3700         /*
3701          * Bit mask controlling how hard we retry per-node allocations.
3702          * If we can not allocate the bit mask, do not attempt to allocate
3703          * the requested huge pages.
3704          */
3705         if (node_alloc_noretry)
3706                 nodes_clear(*node_alloc_noretry);
3707         else
3708                 return -ENOMEM;
3709
3710         /*
3711          * resize_lock mutex prevents concurrent adjustments to number of
3712          * pages in hstate via the proc/sysfs interfaces.
3713          */
3714         mutex_lock(&h->resize_lock);
3715         flush_free_hpage_work(h);
3716         spin_lock_irq(&hugetlb_lock);
3717
3718         /*
3719          * Check for a node specific request.
3720          * Changing node specific huge page count may require a corresponding
3721          * change to the global count.  In any case, the passed node mask
3722          * (nodes_allowed) will restrict alloc/free to the specified node.
3723          */
3724         if (nid != NUMA_NO_NODE) {
3725                 unsigned long old_count = count;
3726
3727                 count += persistent_huge_pages(h) -
3728                          (h->nr_huge_pages_node[nid] -
3729                           h->surplus_huge_pages_node[nid]);
3730                 /*
3731                  * User may have specified a large count value which caused the
3732                  * above calculation to overflow.  In this case, they wanted
3733                  * to allocate as many huge pages as possible.  Set count to
3734                  * largest possible value to align with their intention.
3735                  */
3736                 if (count < old_count)
3737                         count = ULONG_MAX;
3738         }
3739
3740         /*
3741          * Gigantic pages runtime allocation depend on the capability for large
3742          * page range allocation.
3743          * If the system does not provide this feature, return an error when
3744          * the user tries to allocate gigantic pages but let the user free the
3745          * boottime allocated gigantic pages.
3746          */
3747         if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3748                 if (count > persistent_huge_pages(h)) {
3749                         spin_unlock_irq(&hugetlb_lock);
3750                         mutex_unlock(&h->resize_lock);
3751                         NODEMASK_FREE(node_alloc_noretry);
3752                         return -EINVAL;
3753                 }
3754                 /* Fall through to decrease pool */
3755         }
3756
3757         /*
3758          * Increase the pool size
3759          * First take pages out of surplus state.  Then make up the
3760          * remaining difference by allocating fresh huge pages.
3761          *
3762          * We might race with alloc_surplus_hugetlb_folio() here and be unable
3763          * to convert a surplus huge page to a normal huge page. That is
3764          * not critical, though, it just means the overall size of the
3765          * pool might be one hugepage larger than it needs to be, but
3766          * within all the constraints specified by the sysctls.
3767          */
3768         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3769                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
3770                         break;
3771         }
3772
3773         allocated = 0;
3774         while (count > (persistent_huge_pages(h) + allocated)) {
3775                 /*
3776                  * If this allocation races such that we no longer need the
3777                  * page, free_huge_folio will handle it by freeing the page
3778                  * and reducing the surplus.
3779                  */
3780                 spin_unlock_irq(&hugetlb_lock);
3781
3782                 /* yield cpu to avoid soft lockup */
3783                 cond_resched();
3784
3785                 folio = alloc_pool_huge_folio(h, nodes_allowed,
3786                                                 node_alloc_noretry);
3787                 if (!folio) {
3788                         prep_and_add_allocated_folios(h, &page_list);
3789                         spin_lock_irq(&hugetlb_lock);
3790                         goto out;
3791                 }
3792
3793                 list_add(&folio->lru, &page_list);
3794                 allocated++;
3795
3796                 /* Bail for signals. Probably ctrl-c from user */
3797                 if (signal_pending(current)) {
3798                         prep_and_add_allocated_folios(h, &page_list);
3799                         spin_lock_irq(&hugetlb_lock);
3800                         goto out;
3801                 }
3802
3803                 spin_lock_irq(&hugetlb_lock);
3804         }
3805
3806         /* Add allocated pages to the pool */
3807         if (!list_empty(&page_list)) {
3808                 spin_unlock_irq(&hugetlb_lock);
3809                 prep_and_add_allocated_folios(h, &page_list);
3810                 spin_lock_irq(&hugetlb_lock);
3811         }
3812
3813         /*
3814          * Decrease the pool size
3815          * First return free pages to the buddy allocator (being careful
3816          * to keep enough around to satisfy reservations).  Then place
3817          * pages into surplus state as needed so the pool will shrink
3818          * to the desired size as pages become free.
3819          *
3820          * By placing pages into the surplus state independent of the
3821          * overcommit value, we are allowing the surplus pool size to
3822          * exceed overcommit. There are few sane options here. Since
3823          * alloc_surplus_hugetlb_folio() is checking the global counter,
3824          * though, we'll note that we're not allowed to exceed surplus
3825          * and won't grow the pool anywhere else. Not until one of the
3826          * sysctls are changed, or the surplus pages go out of use.
3827          */
3828         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
3829         min_count = max(count, min_count);
3830         try_to_free_low(h, min_count, nodes_allowed);
3831
3832         /*
3833          * Collect pages to be removed on list without dropping lock
3834          */
3835         while (min_count < persistent_huge_pages(h)) {
3836                 folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3837                 if (!folio)
3838                         break;
3839
3840                 list_add(&folio->lru, &page_list);
3841         }
3842         /* free the pages after dropping lock */
3843         spin_unlock_irq(&hugetlb_lock);
3844         update_and_free_pages_bulk(h, &page_list);
3845         flush_free_hpage_work(h);
3846         spin_lock_irq(&hugetlb_lock);
3847
3848         while (count < persistent_huge_pages(h)) {
3849                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
3850                         break;
3851         }
3852 out:
3853         h->max_huge_pages = persistent_huge_pages(h);
3854         spin_unlock_irq(&hugetlb_lock);
3855         mutex_unlock(&h->resize_lock);
3856
3857         NODEMASK_FREE(node_alloc_noretry);
3858
3859         return 0;
3860 }
3861
3862 static int demote_free_hugetlb_folio(struct hstate *h, struct folio *folio)
3863 {
3864         int i, nid = folio_nid(folio);
3865         struct hstate *target_hstate;
3866         struct page *subpage;
3867         struct folio *inner_folio;
3868         int rc = 0;
3869
3870         target_hstate = size_to_hstate(PAGE_SIZE << h->demote_order);
3871
3872         remove_hugetlb_folio_for_demote(h, folio, false);
3873         spin_unlock_irq(&hugetlb_lock);
3874
3875         /*
3876          * If vmemmap already existed for folio, the remove routine above would
3877          * have cleared the hugetlb folio flag.  Hence the folio is technically
3878          * no longer a hugetlb folio.  hugetlb_vmemmap_restore_folio can only be
3879          * passed hugetlb folios and will BUG otherwise.
3880          */
3881         if (folio_test_hugetlb(folio)) {
3882                 rc = hugetlb_vmemmap_restore_folio(h, folio);
3883                 if (rc) {
3884                         /* Allocation of vmemmmap failed, we can not demote folio */
3885                         spin_lock_irq(&hugetlb_lock);
3886                         folio_ref_unfreeze(folio, 1);
3887                         add_hugetlb_folio(h, folio, false);
3888                         return rc;
3889                 }
3890         }
3891
3892         /*
3893          * Use destroy_compound_hugetlb_folio_for_demote for all huge page
3894          * sizes as it will not ref count folios.
3895          */
3896         destroy_compound_hugetlb_folio_for_demote(folio, huge_page_order(h));
3897
3898         /*
3899          * Taking target hstate mutex synchronizes with set_max_huge_pages.
3900          * Without the mutex, pages added to target hstate could be marked
3901          * as surplus.
3902          *
3903          * Note that we already hold h->resize_lock.  To prevent deadlock,
3904          * use the convention of always taking larger size hstate mutex first.
3905          */
3906         mutex_lock(&target_hstate->resize_lock);
3907         for (i = 0; i < pages_per_huge_page(h);
3908                                 i += pages_per_huge_page(target_hstate)) {
3909                 subpage = folio_page(folio, i);
3910                 inner_folio = page_folio(subpage);
3911                 if (hstate_is_gigantic(target_hstate))
3912                         prep_compound_gigantic_folio_for_demote(inner_folio,
3913                                                         target_hstate->order);
3914                 else
3915                         prep_compound_page(subpage, target_hstate->order);
3916                 folio_change_private(inner_folio, NULL);
3917                 prep_new_hugetlb_folio(target_hstate, inner_folio, nid);
3918                 free_huge_folio(inner_folio);
3919         }
3920         mutex_unlock(&target_hstate->resize_lock);
3921
3922         spin_lock_irq(&hugetlb_lock);
3923
3924         /*
3925          * Not absolutely necessary, but for consistency update max_huge_pages
3926          * based on pool changes for the demoted page.
3927          */
3928         h->max_huge_pages--;
3929         target_hstate->max_huge_pages +=
3930                 pages_per_huge_page(h) / pages_per_huge_page(target_hstate);
3931
3932         return rc;
3933 }
3934
3935 static int demote_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
3936         __must_hold(&hugetlb_lock)
3937 {
3938         int nr_nodes, node;
3939         struct folio *folio;
3940
3941         lockdep_assert_held(&hugetlb_lock);
3942
3943         /* We should never get here if no demote order */
3944         if (!h->demote_order) {
3945                 pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
3946                 return -EINVAL;         /* internal error */
3947         }
3948
3949         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3950                 list_for_each_entry(folio, &h->hugepage_freelists[node], lru) {
3951                         if (folio_test_hwpoison(folio))
3952                                 continue;
3953                         return demote_free_hugetlb_folio(h, folio);
3954                 }
3955         }
3956
3957         /*
3958          * Only way to get here is if all pages on free lists are poisoned.
3959          * Return -EBUSY so that caller will not retry.
3960          */
3961         return -EBUSY;
3962 }
3963
3964 #define HSTATE_ATTR_RO(_name) \
3965         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
3966
3967 #define HSTATE_ATTR_WO(_name) \
3968         static struct kobj_attribute _name##_attr = __ATTR_WO(_name)
3969
3970 #define HSTATE_ATTR(_name) \
3971         static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
3972
3973 static struct kobject *hugepages_kobj;
3974 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
3975
3976 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
3977
3978 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
3979 {
3980         int i;
3981
3982         for (i = 0; i < HUGE_MAX_HSTATE; i++)
3983                 if (hstate_kobjs[i] == kobj) {
3984                         if (nidp)
3985                                 *nidp = NUMA_NO_NODE;
3986                         return &hstates[i];
3987                 }
3988
3989         return kobj_to_node_hstate(kobj, nidp);
3990 }
3991
3992 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
3993                                         struct kobj_attribute *attr, char *buf)
3994 {
3995         struct hstate *h;
3996         unsigned long nr_huge_pages;
3997         int nid;
3998
3999         h = kobj_to_hstate(kobj, &nid);
4000         if (nid == NUMA_NO_NODE)
4001                 nr_huge_pages = h->nr_huge_pages;
4002         else
4003                 nr_huge_pages = h->nr_huge_pages_node[nid];
4004
4005         return sysfs_emit(buf, "%lu\n", nr_huge_pages);
4006 }
4007
4008 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4009                                            struct hstate *h, int nid,
4010                                            unsigned long count, size_t len)
4011 {
4012         int err;
4013         nodemask_t nodes_allowed, *n_mask;
4014
4015         if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
4016                 return -EINVAL;
4017
4018         if (nid == NUMA_NO_NODE) {
4019                 /*
4020                  * global hstate attribute
4021                  */
4022                 if (!(obey_mempolicy &&
4023                                 init_nodemask_of_mempolicy(&nodes_allowed)))
4024                         n_mask = &node_states[N_MEMORY];
4025                 else
4026                         n_mask = &nodes_allowed;
4027         } else {
4028                 /*
4029                  * Node specific request.  count adjustment happens in
4030                  * set_max_huge_pages() after acquiring hugetlb_lock.
4031                  */
4032                 init_nodemask_of_node(&nodes_allowed, nid);
4033                 n_mask = &nodes_allowed;
4034         }
4035
4036         err = set_max_huge_pages(h, count, nid, n_mask);
4037
4038         return err ? err : len;
4039 }
4040
4041 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
4042                                          struct kobject *kobj, const char *buf,
4043                                          size_t len)
4044 {
4045         struct hstate *h;
4046         unsigned long count;
4047         int nid;
4048         int err;
4049
4050         err = kstrtoul(buf, 10, &count);
4051         if (err)
4052                 return err;
4053
4054         h = kobj_to_hstate(kobj, &nid);
4055         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
4056 }
4057
4058 static ssize_t nr_hugepages_show(struct kobject *kobj,
4059                                        struct kobj_attribute *attr, char *buf)
4060 {
4061         return nr_hugepages_show_common(kobj, attr, buf);
4062 }
4063
4064 static ssize_t nr_hugepages_store(struct kobject *kobj,
4065                struct kobj_attribute *attr, const char *buf, size_t len)
4066 {
4067         return nr_hugepages_store_common(false, kobj, buf, len);
4068 }
4069 HSTATE_ATTR(nr_hugepages);
4070
4071 #ifdef CONFIG_NUMA
4072
4073 /*
4074  * hstate attribute for optionally mempolicy-based constraint on persistent
4075  * huge page alloc/free.
4076  */
4077 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
4078                                            struct kobj_attribute *attr,
4079                                            char *buf)
4080 {
4081         return nr_hugepages_show_common(kobj, attr, buf);
4082 }
4083
4084 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
4085                struct kobj_attribute *attr, const char *buf, size_t len)
4086 {
4087         return nr_hugepages_store_common(true, kobj, buf, len);
4088 }
4089 HSTATE_ATTR(nr_hugepages_mempolicy);
4090 #endif
4091
4092
4093 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
4094                                         struct kobj_attribute *attr, char *buf)
4095 {
4096         struct hstate *h = kobj_to_hstate(kobj, NULL);
4097         return sysfs_emit(buf, "%lu\n", h->nr_overcommit_huge_pages);
4098 }
4099
4100 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
4101                 struct kobj_attribute *attr, const char *buf, size_t count)
4102 {
4103         int err;
4104         unsigned long input;
4105         struct hstate *h = kobj_to_hstate(kobj, NULL);
4106
4107         if (hstate_is_gigantic(h))
4108                 return -EINVAL;
4109
4110         err = kstrtoul(buf, 10, &input);
4111         if (err)
4112                 return err;
4113
4114         spin_lock_irq(&hugetlb_lock);
4115         h->nr_overcommit_huge_pages = input;
4116         spin_unlock_irq(&hugetlb_lock);
4117
4118         return count;
4119 }
4120 HSTATE_ATTR(nr_overcommit_hugepages);
4121
4122 static ssize_t free_hugepages_show(struct kobject *kobj,
4123                                         struct kobj_attribute *attr, char *buf)
4124 {
4125         struct hstate *h;
4126         unsigned long free_huge_pages;
4127         int nid;
4128
4129         h = kobj_to_hstate(kobj, &nid);
4130         if (nid == NUMA_NO_NODE)
4131                 free_huge_pages = h->free_huge_pages;
4132         else
4133                 free_huge_pages = h->free_huge_pages_node[nid];
4134
4135         return sysfs_emit(buf, "%lu\n", free_huge_pages);
4136 }
4137 HSTATE_ATTR_RO(free_hugepages);
4138
4139 static ssize_t resv_hugepages_show(struct kobject *kobj,
4140                                         struct kobj_attribute *attr, char *buf)
4141 {
4142         struct hstate *h = kobj_to_hstate(kobj, NULL);
4143         return sysfs_emit(buf, "%lu\n", h->resv_huge_pages);
4144 }
4145 HSTATE_ATTR_RO(resv_hugepages);
4146
4147 static ssize_t surplus_hugepages_show(struct kobject *kobj,
4148                                         struct kobj_attribute *attr, char *buf)
4149 {
4150         struct hstate *h;
4151         unsigned long surplus_huge_pages;
4152         int nid;
4153
4154         h = kobj_to_hstate(kobj, &nid);
4155         if (nid == NUMA_NO_NODE)
4156                 surplus_huge_pages = h->surplus_huge_pages;
4157         else
4158                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
4159
4160         return sysfs_emit(buf, "%lu\n", surplus_huge_pages);
4161 }
4162 HSTATE_ATTR_RO(surplus_hugepages);
4163
4164 static ssize_t demote_store(struct kobject *kobj,
4165                struct kobj_attribute *attr, const char *buf, size_t len)
4166 {
4167         unsigned long nr_demote;
4168         unsigned long nr_available;
4169         nodemask_t nodes_allowed, *n_mask;
4170         struct hstate *h;
4171         int err;
4172         int nid;
4173
4174         err = kstrtoul(buf, 10, &nr_demote);
4175         if (err)
4176                 return err;
4177         h = kobj_to_hstate(kobj, &nid);
4178
4179         if (nid != NUMA_NO_NODE) {
4180                 init_nodemask_of_node(&nodes_allowed, nid);
4181                 n_mask = &nodes_allowed;
4182         } else {
4183                 n_mask = &node_states[N_MEMORY];
4184         }
4185
4186         /* Synchronize with other sysfs operations modifying huge pages */
4187         mutex_lock(&h->resize_lock);
4188         spin_lock_irq(&hugetlb_lock);
4189
4190         while (nr_demote) {
4191                 /*
4192                  * Check for available pages to demote each time thorough the
4193                  * loop as demote_pool_huge_page will drop hugetlb_lock.
4194                  */
4195                 if (nid != NUMA_NO_NODE)
4196                         nr_available = h->free_huge_pages_node[nid];
4197                 else
4198                         nr_available = h->free_huge_pages;
4199                 nr_available -= h->resv_huge_pages;
4200                 if (!nr_available)
4201                         break;
4202
4203                 err = demote_pool_huge_page(h, n_mask);
4204                 if (err)
4205                         break;
4206
4207                 nr_demote--;
4208         }
4209
4210         spin_unlock_irq(&hugetlb_lock);
4211         mutex_unlock(&h->resize_lock);
4212
4213         if (err)
4214                 return err;
4215         return len;
4216 }
4217 HSTATE_ATTR_WO(demote);
4218
4219 static ssize_t demote_size_show(struct kobject *kobj,
4220                                         struct kobj_attribute *attr, char *buf)
4221 {
4222         struct hstate *h = kobj_to_hstate(kobj, NULL);
4223         unsigned long demote_size = (PAGE_SIZE << h->demote_order) / SZ_1K;
4224
4225         return sysfs_emit(buf, "%lukB\n", demote_size);
4226 }
4227
4228 static ssize_t demote_size_store(struct kobject *kobj,
4229                                         struct kobj_attribute *attr,
4230                                         const char *buf, size_t count)
4231 {
4232         struct hstate *h, *demote_hstate;
4233         unsigned long demote_size;
4234         unsigned int demote_order;
4235
4236         demote_size = (unsigned long)memparse(buf, NULL);
4237
4238         demote_hstate = size_to_hstate(demote_size);
4239         if (!demote_hstate)
4240                 return -EINVAL;
4241         demote_order = demote_hstate->order;
4242         if (demote_order < HUGETLB_PAGE_ORDER)
4243                 return -EINVAL;
4244
4245         /* demote order must be smaller than hstate order */
4246         h = kobj_to_hstate(kobj, NULL);
4247         if (demote_order >= h->order)
4248                 return -EINVAL;
4249
4250         /* resize_lock synchronizes access to demote size and writes */
4251         mutex_lock(&h->resize_lock);
4252         h->demote_order = demote_order;
4253         mutex_unlock(&h->resize_lock);
4254
4255         return count;
4256 }
4257 HSTATE_ATTR(demote_size);
4258
4259 static struct attribute *hstate_attrs[] = {
4260         &nr_hugepages_attr.attr,
4261         &nr_overcommit_hugepages_attr.attr,
4262         &free_hugepages_attr.attr,
4263         &resv_hugepages_attr.attr,
4264         &surplus_hugepages_attr.attr,
4265 #ifdef CONFIG_NUMA
4266         &nr_hugepages_mempolicy_attr.attr,
4267 #endif
4268         NULL,
4269 };
4270
4271 static const struct attribute_group hstate_attr_group = {
4272         .attrs = hstate_attrs,
4273 };
4274
4275 static struct attribute *hstate_demote_attrs[] = {
4276         &demote_size_attr.attr,
4277         &demote_attr.attr,
4278         NULL,
4279 };
4280
4281 static const struct attribute_group hstate_demote_attr_group = {
4282         .attrs = hstate_demote_attrs,
4283 };
4284
4285 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
4286                                     struct kobject **hstate_kobjs,
4287                                     const struct attribute_group *hstate_attr_group)
4288 {
4289         int retval;
4290         int hi = hstate_index(h);
4291
4292         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
4293         if (!hstate_kobjs[hi])
4294                 return -ENOMEM;
4295
4296         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
4297         if (retval) {
4298                 kobject_put(hstate_kobjs[hi]);
4299                 hstate_kobjs[hi] = NULL;
4300                 return retval;
4301         }
4302
4303         if (h->demote_order) {
4304                 retval = sysfs_create_group(hstate_kobjs[hi],
4305                                             &hstate_demote_attr_group);
4306                 if (retval) {
4307                         pr_warn("HugeTLB unable to create demote interfaces for %s\n", h->name);
4308                         sysfs_remove_group(hstate_kobjs[hi], hstate_attr_group);
4309                         kobject_put(hstate_kobjs[hi]);
4310                         hstate_kobjs[hi] = NULL;
4311                         return retval;
4312                 }
4313         }
4314
4315         return 0;
4316 }
4317
4318 #ifdef CONFIG_NUMA
4319 static bool hugetlb_sysfs_initialized __ro_after_init;
4320
4321 /*
4322  * node_hstate/s - associate per node hstate attributes, via their kobjects,
4323  * with node devices in node_devices[] using a parallel array.  The array
4324  * index of a node device or _hstate == node id.
4325  * This is here to avoid any static dependency of the node device driver, in
4326  * the base kernel, on the hugetlb module.
4327  */
4328 struct node_hstate {
4329         struct kobject          *hugepages_kobj;
4330         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
4331 };
4332 static struct node_hstate node_hstates[MAX_NUMNODES];
4333
4334 /*
4335  * A subset of global hstate attributes for node devices
4336  */
4337 static struct attribute *per_node_hstate_attrs[] = {
4338         &nr_hugepages_attr.attr,
4339         &free_hugepages_attr.attr,
4340         &surplus_hugepages_attr.attr,
4341         NULL,
4342 };
4343
4344 static const struct attribute_group per_node_hstate_attr_group = {
4345         .attrs = per_node_hstate_attrs,
4346 };
4347
4348 /*
4349  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
4350  * Returns node id via non-NULL nidp.
4351  */
4352 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4353 {
4354         int nid;
4355
4356         for (nid = 0; nid < nr_node_ids; nid++) {
4357                 struct node_hstate *nhs = &node_hstates[nid];
4358                 int i;
4359                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
4360                         if (nhs->hstate_kobjs[i] == kobj) {
4361                                 if (nidp)
4362                                         *nidp = nid;
4363                                 return &hstates[i];
4364                         }
4365         }
4366
4367         BUG();
4368         return NULL;
4369 }
4370
4371 /*
4372  * Unregister hstate attributes from a single node device.
4373  * No-op if no hstate attributes attached.
4374  */
4375 void hugetlb_unregister_node(struct node *node)
4376 {
4377         struct hstate *h;
4378         struct node_hstate *nhs = &node_hstates[node->dev.id];
4379
4380         if (!nhs->hugepages_kobj)
4381                 return;         /* no hstate attributes */
4382
4383         for_each_hstate(h) {
4384                 int idx = hstate_index(h);
4385                 struct kobject *hstate_kobj = nhs->hstate_kobjs[idx];
4386
4387                 if (!hstate_kobj)
4388                         continue;
4389                 if (h->demote_order)
4390                         sysfs_remove_group(hstate_kobj, &hstate_demote_attr_group);
4391                 sysfs_remove_group(hstate_kobj, &per_node_hstate_attr_group);
4392                 kobject_put(hstate_kobj);
4393                 nhs->hstate_kobjs[idx] = NULL;
4394         }
4395
4396         kobject_put(nhs->hugepages_kobj);
4397         nhs->hugepages_kobj = NULL;
4398 }
4399
4400
4401 /*
4402  * Register hstate attributes for a single node device.
4403  * No-op if attributes already registered.
4404  */
4405 void hugetlb_register_node(struct node *node)
4406 {
4407         struct hstate *h;
4408         struct node_hstate *nhs = &node_hstates[node->dev.id];
4409         int err;
4410
4411         if (!hugetlb_sysfs_initialized)
4412                 return;
4413
4414         if (nhs->hugepages_kobj)
4415                 return;         /* already allocated */
4416
4417         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
4418                                                         &node->dev.kobj);
4419         if (!nhs->hugepages_kobj)
4420                 return;
4421
4422         for_each_hstate(h) {
4423                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
4424                                                 nhs->hstate_kobjs,
4425                                                 &per_node_hstate_attr_group);
4426                 if (err) {
4427                         pr_err("HugeTLB: Unable to add hstate %s for node %d\n",
4428                                 h->name, node->dev.id);
4429                         hugetlb_unregister_node(node);
4430                         break;
4431                 }
4432         }
4433 }
4434
4435 /*
4436  * hugetlb init time:  register hstate attributes for all registered node
4437  * devices of nodes that have memory.  All on-line nodes should have
4438  * registered their associated device by this time.
4439  */
4440 static void __init hugetlb_register_all_nodes(void)
4441 {
4442         int nid;
4443
4444         for_each_online_node(nid)
4445                 hugetlb_register_node(node_devices[nid]);
4446 }
4447 #else   /* !CONFIG_NUMA */
4448
4449 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
4450 {
4451         BUG();
4452         if (nidp)
4453                 *nidp = -1;
4454         return NULL;
4455 }
4456
4457 static void hugetlb_register_all_nodes(void) { }
4458
4459 #endif
4460
4461 #ifdef CONFIG_CMA
4462 static void __init hugetlb_cma_check(void);
4463 #else
4464 static inline __init void hugetlb_cma_check(void)
4465 {
4466 }
4467 #endif
4468
4469 static void __init hugetlb_sysfs_init(void)
4470 {
4471         struct hstate *h;
4472         int err;
4473
4474         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
4475         if (!hugepages_kobj)
4476                 return;
4477
4478         for_each_hstate(h) {
4479                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
4480                                          hstate_kobjs, &hstate_attr_group);
4481                 if (err)
4482                         pr_err("HugeTLB: Unable to add hstate %s", h->name);
4483         }
4484
4485 #ifdef CONFIG_NUMA
4486         hugetlb_sysfs_initialized = true;
4487 #endif
4488         hugetlb_register_all_nodes();
4489 }
4490
4491 #ifdef CONFIG_SYSCTL
4492 static void hugetlb_sysctl_init(void);
4493 #else
4494 static inline void hugetlb_sysctl_init(void) { }
4495 #endif
4496
4497 static int __init hugetlb_init(void)
4498 {
4499         int i;
4500
4501         BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4502                         __NR_HPAGEFLAGS);
4503
4504         if (!hugepages_supported()) {
4505                 if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4506                         pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4507                 return 0;
4508         }
4509
4510         /*
4511          * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4512          * architectures depend on setup being done here.
4513          */
4514         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4515         if (!parsed_default_hugepagesz) {
4516                 /*
4517                  * If we did not parse a default huge page size, set
4518                  * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4519                  * number of huge pages for this default size was implicitly
4520                  * specified, set that here as well.
4521                  * Note that the implicit setting will overwrite an explicit
4522                  * setting.  A warning will be printed in this case.
4523                  */
4524                 default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4525                 if (default_hstate_max_huge_pages) {
4526                         if (default_hstate.max_huge_pages) {
4527                                 char buf[32];
4528
4529                                 string_get_size(huge_page_size(&default_hstate),
4530                                         1, STRING_UNITS_2, buf, 32);
4531                                 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4532                                         default_hstate.max_huge_pages, buf);
4533                                 pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4534                                         default_hstate_max_huge_pages);
4535                         }
4536                         default_hstate.max_huge_pages =
4537                                 default_hstate_max_huge_pages;
4538
4539                         for_each_online_node(i)
4540                                 default_hstate.max_huge_pages_node[i] =
4541                                         default_hugepages_in_node[i];
4542                 }
4543         }
4544
4545         hugetlb_cma_check();
4546         hugetlb_init_hstates();
4547         gather_bootmem_prealloc();
4548         report_hugepages();
4549
4550         hugetlb_sysfs_init();
4551         hugetlb_cgroup_file_init();
4552         hugetlb_sysctl_init();
4553
4554 #ifdef CONFIG_SMP
4555         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4556 #else
4557         num_fault_mutexes = 1;
4558 #endif
4559         hugetlb_fault_mutex_table =
4560                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
4561                               GFP_KERNEL);
4562         BUG_ON(!hugetlb_fault_mutex_table);
4563
4564         for (i = 0; i < num_fault_mutexes; i++)
4565                 mutex_init(&hugetlb_fault_mutex_table[i]);
4566         return 0;
4567 }
4568 subsys_initcall(hugetlb_init);
4569
4570 /* Overwritten by architectures with more huge page sizes */
4571 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4572 {
4573         return size == HPAGE_SIZE;
4574 }
4575
4576 void __init hugetlb_add_hstate(unsigned int order)
4577 {
4578         struct hstate *h;
4579         unsigned long i;
4580
4581         if (size_to_hstate(PAGE_SIZE << order)) {
4582                 return;
4583         }
4584         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4585         BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4586         h = &hstates[hugetlb_max_hstate++];
4587         mutex_init(&h->resize_lock);
4588         h->order = order;
4589         h->mask = ~(huge_page_size(h) - 1);
4590         for (i = 0; i < MAX_NUMNODES; ++i)
4591                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4592         INIT_LIST_HEAD(&h->hugepage_activelist);
4593         h->next_nid_to_alloc = first_memory_node;
4594         h->next_nid_to_free = first_memory_node;
4595         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4596                                         huge_page_size(h)/SZ_1K);
4597
4598         parsed_hstate = h;
4599 }
4600
4601 bool __init __weak hugetlb_node_alloc_supported(void)
4602 {
4603         return true;
4604 }
4605
4606 static void __init hugepages_clear_pages_in_node(void)
4607 {
4608         if (!hugetlb_max_hstate) {
4609                 default_hstate_max_huge_pages = 0;
4610                 memset(default_hugepages_in_node, 0,
4611                         sizeof(default_hugepages_in_node));
4612         } else {
4613                 parsed_hstate->max_huge_pages = 0;
4614                 memset(parsed_hstate->max_huge_pages_node, 0,
4615                         sizeof(parsed_hstate->max_huge_pages_node));
4616         }
4617 }
4618
4619 /*
4620  * hugepages command line processing
4621  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4622  * specification.  If not, ignore the hugepages value.  hugepages can also
4623  * be the first huge page command line  option in which case it implicitly
4624  * specifies the number of huge pages for the default size.
4625  */
4626 static int __init hugepages_setup(char *s)
4627 {
4628         unsigned long *mhp;
4629         static unsigned long *last_mhp;
4630         int node = NUMA_NO_NODE;
4631         int count;
4632         unsigned long tmp;
4633         char *p = s;
4634
4635         if (!parsed_valid_hugepagesz) {
4636                 pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4637                 parsed_valid_hugepagesz = true;
4638                 return 1;
4639         }
4640
4641         /*
4642          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4643          * yet, so this hugepages= parameter goes to the "default hstate".
4644          * Otherwise, it goes with the previously parsed hugepagesz or
4645          * default_hugepagesz.
4646          */
4647         else if (!hugetlb_max_hstate)
4648                 mhp = &default_hstate_max_huge_pages;
4649         else
4650                 mhp = &parsed_hstate->max_huge_pages;
4651
4652         if (mhp == last_mhp) {
4653                 pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4654                 return 1;
4655         }
4656
4657         while (*p) {
4658                 count = 0;
4659                 if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4660                         goto invalid;
4661                 /* Parameter is node format */
4662                 if (p[count] == ':') {
4663                         if (!hugetlb_node_alloc_supported()) {
4664                                 pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4665                                 return 1;
4666                         }
4667                         if (tmp >= MAX_NUMNODES || !node_online(tmp))
4668                                 goto invalid;
4669                         node = array_index_nospec(tmp, MAX_NUMNODES);
4670                         p += count + 1;
4671                         /* Parse hugepages */
4672                         if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4673                                 goto invalid;
4674                         if (!hugetlb_max_hstate)
4675                                 default_hugepages_in_node[node] = tmp;
4676                         else
4677                                 parsed_hstate->max_huge_pages_node[node] = tmp;
4678                         *mhp += tmp;
4679                         /* Go to parse next node*/
4680                         if (p[count] == ',')
4681                                 p += count + 1;
4682                         else
4683                                 break;
4684                 } else {
4685                         if (p != s)
4686                                 goto invalid;
4687                         *mhp = tmp;
4688                         break;
4689                 }
4690         }
4691
4692         /*
4693          * Global state is always initialized later in hugetlb_init.
4694          * But we need to allocate gigantic hstates here early to still
4695          * use the bootmem allocator.
4696          */
4697         if (hugetlb_max_hstate && hstate_is_gigantic(parsed_hstate))
4698                 hugetlb_hstate_alloc_pages(parsed_hstate);
4699
4700         last_mhp = mhp;
4701
4702         return 1;
4703
4704 invalid:
4705         pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4706         hugepages_clear_pages_in_node();
4707         return 1;
4708 }
4709 __setup("hugepages=", hugepages_setup);
4710
4711 /*
4712  * hugepagesz command line processing
4713  * A specific huge page size can only be specified once with hugepagesz.
4714  * hugepagesz is followed by hugepages on the command line.  The global
4715  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4716  * hugepagesz argument was valid.
4717  */
4718 static int __init hugepagesz_setup(char *s)
4719 {
4720         unsigned long size;
4721         struct hstate *h;
4722
4723         parsed_valid_hugepagesz = false;
4724         size = (unsigned long)memparse(s, NULL);
4725
4726         if (!arch_hugetlb_valid_size(size)) {
4727                 pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4728                 return 1;
4729         }
4730
4731         h = size_to_hstate(size);
4732         if (h) {
4733                 /*
4734                  * hstate for this size already exists.  This is normally
4735                  * an error, but is allowed if the existing hstate is the
4736                  * default hstate.  More specifically, it is only allowed if
4737                  * the number of huge pages for the default hstate was not
4738                  * previously specified.
4739                  */
4740                 if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4741                     default_hstate.max_huge_pages) {
4742                         pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4743                         return 1;
4744                 }
4745
4746                 /*
4747                  * No need to call hugetlb_add_hstate() as hstate already
4748                  * exists.  But, do set parsed_hstate so that a following
4749                  * hugepages= parameter will be applied to this hstate.
4750                  */
4751                 parsed_hstate = h;
4752                 parsed_valid_hugepagesz = true;
4753                 return 1;
4754         }
4755
4756         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4757         parsed_valid_hugepagesz = true;
4758         return 1;
4759 }
4760 __setup("hugepagesz=", hugepagesz_setup);
4761
4762 /*
4763  * default_hugepagesz command line input
4764  * Only one instance of default_hugepagesz allowed on command line.
4765  */
4766 static int __init default_hugepagesz_setup(char *s)
4767 {
4768         unsigned long size;
4769         int i;
4770
4771         parsed_valid_hugepagesz = false;
4772         if (parsed_default_hugepagesz) {
4773                 pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4774                 return 1;
4775         }
4776
4777         size = (unsigned long)memparse(s, NULL);
4778
4779         if (!arch_hugetlb_valid_size(size)) {
4780                 pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4781                 return 1;
4782         }
4783
4784         hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4785         parsed_valid_hugepagesz = true;
4786         parsed_default_hugepagesz = true;
4787         default_hstate_idx = hstate_index(size_to_hstate(size));
4788
4789         /*
4790          * The number of default huge pages (for this size) could have been
4791          * specified as the first hugetlb parameter: hugepages=X.  If so,
4792          * then default_hstate_max_huge_pages is set.  If the default huge
4793          * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4794          * allocated here from bootmem allocator.
4795          */
4796         if (default_hstate_max_huge_pages) {
4797                 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4798                 for_each_online_node(i)
4799                         default_hstate.max_huge_pages_node[i] =
4800                                 default_hugepages_in_node[i];
4801                 if (hstate_is_gigantic(&default_hstate))
4802                         hugetlb_hstate_alloc_pages(&default_hstate);
4803                 default_hstate_max_huge_pages = 0;
4804         }
4805
4806         return 1;
4807 }
4808 __setup("default_hugepagesz=", default_hugepagesz_setup);
4809
4810 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
4811 {
4812 #ifdef CONFIG_NUMA
4813         struct mempolicy *mpol = get_task_policy(current);
4814
4815         /*
4816          * Only enforce MPOL_BIND policy which overlaps with cpuset policy
4817          * (from policy_nodemask) specifically for hugetlb case
4818          */
4819         if (mpol->mode == MPOL_BIND &&
4820                 (apply_policy_zone(mpol, gfp_zone(gfp)) &&
4821                  cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
4822                 return &mpol->nodes;
4823 #endif
4824         return NULL;
4825 }
4826
4827 static unsigned int allowed_mems_nr(struct hstate *h)
4828 {
4829         int node;
4830         unsigned int nr = 0;
4831         nodemask_t *mbind_nodemask;
4832         unsigned int *array = h->free_huge_pages_node;
4833         gfp_t gfp_mask = htlb_alloc_mask(h);
4834
4835         mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4836         for_each_node_mask(node, cpuset_current_mems_allowed) {
4837                 if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4838                         nr += array[node];
4839         }
4840
4841         return nr;
4842 }
4843
4844 #ifdef CONFIG_SYSCTL
4845 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
4846                                           void *buffer, size_t *length,
4847                                           loff_t *ppos, unsigned long *out)
4848 {
4849         struct ctl_table dup_table;
4850
4851         /*
4852          * In order to avoid races with __do_proc_doulongvec_minmax(), we
4853          * can duplicate the @table and alter the duplicate of it.
4854          */
4855         dup_table = *table;
4856         dup_table.data = out;
4857
4858         return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
4859 }
4860
4861 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
4862                          struct ctl_table *table, int write,
4863                          void *buffer, size_t *length, loff_t *ppos)
4864 {
4865         struct hstate *h = &default_hstate;
4866         unsigned long tmp = h->max_huge_pages;
4867         int ret;
4868
4869         if (!hugepages_supported())
4870                 return -EOPNOTSUPP;
4871
4872         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4873                                              &tmp);
4874         if (ret)
4875                 goto out;
4876
4877         if (write)
4878                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
4879                                                   NUMA_NO_NODE, tmp, *length);
4880 out:
4881         return ret;
4882 }
4883
4884 static int hugetlb_sysctl_handler(struct ctl_table *table, int write,
4885                           void *buffer, size_t *length, loff_t *ppos)
4886 {
4887
4888         return hugetlb_sysctl_handler_common(false, table, write,
4889                                                         buffer, length, ppos);
4890 }
4891
4892 #ifdef CONFIG_NUMA
4893 static int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
4894                           void *buffer, size_t *length, loff_t *ppos)
4895 {
4896         return hugetlb_sysctl_handler_common(true, table, write,
4897                                                         buffer, length, ppos);
4898 }
4899 #endif /* CONFIG_NUMA */
4900
4901 static int hugetlb_overcommit_handler(struct ctl_table *table, int write,
4902                 void *buffer, size_t *length, loff_t *ppos)
4903 {
4904         struct hstate *h = &default_hstate;
4905         unsigned long tmp;
4906         int ret;
4907
4908         if (!hugepages_supported())
4909                 return -EOPNOTSUPP;
4910
4911         tmp = h->nr_overcommit_huge_pages;
4912
4913         if (write && hstate_is_gigantic(h))
4914                 return -EINVAL;
4915
4916         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
4917                                              &tmp);
4918         if (ret)
4919                 goto out;
4920
4921         if (write) {
4922                 spin_lock_irq(&hugetlb_lock);
4923                 h->nr_overcommit_huge_pages = tmp;
4924                 spin_unlock_irq(&hugetlb_lock);
4925         }
4926 out:
4927         return ret;
4928 }
4929
4930 static struct ctl_table hugetlb_table[] = {
4931         {
4932                 .procname       = "nr_hugepages",
4933                 .data           = NULL,
4934                 .maxlen         = sizeof(unsigned long),
4935                 .mode           = 0644,
4936                 .proc_handler   = hugetlb_sysctl_handler,
4937         },
4938 #ifdef CONFIG_NUMA
4939         {
4940                 .procname       = "nr_hugepages_mempolicy",
4941                 .data           = NULL,
4942                 .maxlen         = sizeof(unsigned long),
4943                 .mode           = 0644,
4944                 .proc_handler   = &hugetlb_mempolicy_sysctl_handler,
4945         },
4946 #endif
4947         {
4948                 .procname       = "hugetlb_shm_group",
4949                 .data           = &sysctl_hugetlb_shm_group,
4950                 .maxlen         = sizeof(gid_t),
4951                 .mode           = 0644,
4952                 .proc_handler   = proc_dointvec,
4953         },
4954         {
4955                 .procname       = "nr_overcommit_hugepages",
4956                 .data           = NULL,
4957                 .maxlen         = sizeof(unsigned long),
4958                 .mode           = 0644,
4959                 .proc_handler   = hugetlb_overcommit_handler,
4960         },
4961         { }
4962 };
4963
4964 static void hugetlb_sysctl_init(void)
4965 {
4966         register_sysctl_init("vm", hugetlb_table);
4967 }
4968 #endif /* CONFIG_SYSCTL */
4969
4970 void hugetlb_report_meminfo(struct seq_file *m)
4971 {
4972         struct hstate *h;
4973         unsigned long total = 0;
4974
4975         if (!hugepages_supported())
4976                 return;
4977
4978         for_each_hstate(h) {
4979                 unsigned long count = h->nr_huge_pages;
4980
4981                 total += huge_page_size(h) * count;
4982
4983                 if (h == &default_hstate)
4984                         seq_printf(m,
4985                                    "HugePages_Total:   %5lu\n"
4986                                    "HugePages_Free:    %5lu\n"
4987                                    "HugePages_Rsvd:    %5lu\n"
4988                                    "HugePages_Surp:    %5lu\n"
4989                                    "Hugepagesize:   %8lu kB\n",
4990                                    count,
4991                                    h->free_huge_pages,
4992                                    h->resv_huge_pages,
4993                                    h->surplus_huge_pages,
4994                                    huge_page_size(h) / SZ_1K);
4995         }
4996
4997         seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4998 }
4999
5000 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
5001 {
5002         struct hstate *h = &default_hstate;
5003
5004         if (!hugepages_supported())
5005                 return 0;
5006
5007         return sysfs_emit_at(buf, len,
5008                              "Node %d HugePages_Total: %5u\n"
5009                              "Node %d HugePages_Free:  %5u\n"
5010                              "Node %d HugePages_Surp:  %5u\n",
5011                              nid, h->nr_huge_pages_node[nid],
5012                              nid, h->free_huge_pages_node[nid],
5013                              nid, h->surplus_huge_pages_node[nid]);
5014 }
5015
5016 void hugetlb_show_meminfo_node(int nid)
5017 {
5018         struct hstate *h;
5019
5020         if (!hugepages_supported())
5021                 return;
5022
5023         for_each_hstate(h)
5024                 printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
5025                         nid,
5026                         h->nr_huge_pages_node[nid],
5027                         h->free_huge_pages_node[nid],
5028                         h->surplus_huge_pages_node[nid],
5029                         huge_page_size(h) / SZ_1K);
5030 }
5031
5032 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
5033 {
5034         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
5035                    K(atomic_long_read(&mm->hugetlb_usage)));
5036 }
5037
5038 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
5039 unsigned long hugetlb_total_pages(void)
5040 {
5041         struct hstate *h;
5042         unsigned long nr_total_pages = 0;
5043
5044         for_each_hstate(h)
5045                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
5046         return nr_total_pages;
5047 }
5048
5049 static int hugetlb_acct_memory(struct hstate *h, long delta)
5050 {
5051         int ret = -ENOMEM;
5052
5053         if (!delta)
5054                 return 0;
5055
5056         spin_lock_irq(&hugetlb_lock);
5057         /*
5058          * When cpuset is configured, it breaks the strict hugetlb page
5059          * reservation as the accounting is done on a global variable. Such
5060          * reservation is completely rubbish in the presence of cpuset because
5061          * the reservation is not checked against page availability for the
5062          * current cpuset. Application can still potentially OOM'ed by kernel
5063          * with lack of free htlb page in cpuset that the task is in.
5064          * Attempt to enforce strict accounting with cpuset is almost
5065          * impossible (or too ugly) because cpuset is too fluid that
5066          * task or memory node can be dynamically moved between cpusets.
5067          *
5068          * The change of semantics for shared hugetlb mapping with cpuset is
5069          * undesirable. However, in order to preserve some of the semantics,
5070          * we fall back to check against current free page availability as
5071          * a best attempt and hopefully to minimize the impact of changing
5072          * semantics that cpuset has.
5073          *
5074          * Apart from cpuset, we also have memory policy mechanism that
5075          * also determines from which node the kernel will allocate memory
5076          * in a NUMA system. So similar to cpuset, we also should consider
5077          * the memory policy of the current task. Similar to the description
5078          * above.
5079          */
5080         if (delta > 0) {
5081                 if (gather_surplus_pages(h, delta) < 0)
5082                         goto out;
5083
5084                 if (delta > allowed_mems_nr(h)) {
5085                         return_unused_surplus_pages(h, delta);
5086                         goto out;
5087                 }
5088         }
5089
5090         ret = 0;
5091         if (delta < 0)
5092                 return_unused_surplus_pages(h, (unsigned long) -delta);
5093
5094 out:
5095         spin_unlock_irq(&hugetlb_lock);
5096         return ret;
5097 }
5098
5099 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
5100 {
5101         struct resv_map *resv = vma_resv_map(vma);
5102
5103         /*
5104          * HPAGE_RESV_OWNER indicates a private mapping.
5105          * This new VMA should share its siblings reservation map if present.
5106          * The VMA will only ever have a valid reservation map pointer where
5107          * it is being copied for another still existing VMA.  As that VMA
5108          * has a reference to the reservation map it cannot disappear until
5109          * after this open call completes.  It is therefore safe to take a
5110          * new reference here without additional locking.
5111          */
5112         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
5113                 resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
5114                 kref_get(&resv->refs);
5115         }
5116
5117         /*
5118          * vma_lock structure for sharable mappings is vma specific.
5119          * Clear old pointer (if copied via vm_area_dup) and allocate
5120          * new structure.  Before clearing, make sure vma_lock is not
5121          * for this vma.
5122          */
5123         if (vma->vm_flags & VM_MAYSHARE) {
5124                 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
5125
5126                 if (vma_lock) {
5127                         if (vma_lock->vma != vma) {
5128                                 vma->vm_private_data = NULL;
5129                                 hugetlb_vma_lock_alloc(vma);
5130                         } else
5131                                 pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
5132                 } else
5133                         hugetlb_vma_lock_alloc(vma);
5134         }
5135 }
5136
5137 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
5138 {
5139         struct hstate *h = hstate_vma(vma);
5140         struct resv_map *resv;
5141         struct hugepage_subpool *spool = subpool_vma(vma);
5142         unsigned long reserve, start, end;
5143         long gbl_reserve;
5144
5145         hugetlb_vma_lock_free(vma);
5146
5147         resv = vma_resv_map(vma);
5148         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
5149                 return;
5150
5151         start = vma_hugecache_offset(h, vma, vma->vm_start);
5152         end = vma_hugecache_offset(h, vma, vma->vm_end);
5153
5154         reserve = (end - start) - region_count(resv, start, end);
5155         hugetlb_cgroup_uncharge_counter(resv, start, end);
5156         if (reserve) {
5157                 /*
5158                  * Decrement reserve counts.  The global reserve count may be
5159                  * adjusted if the subpool has a minimum size.
5160                  */
5161                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
5162                 hugetlb_acct_memory(h, -gbl_reserve);
5163         }
5164
5165         kref_put(&resv->refs, resv_map_release);
5166 }
5167
5168 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
5169 {
5170         if (addr & ~(huge_page_mask(hstate_vma(vma))))
5171                 return -EINVAL;
5172
5173         /*
5174          * PMD sharing is only possible for PUD_SIZE-aligned address ranges
5175          * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
5176          * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
5177          */
5178         if (addr & ~PUD_MASK) {
5179                 /*
5180                  * hugetlb_vm_op_split is called right before we attempt to
5181                  * split the VMA. We will need to unshare PMDs in the old and
5182                  * new VMAs, so let's unshare before we split.
5183                  */
5184                 unsigned long floor = addr & PUD_MASK;
5185                 unsigned long ceil = floor + PUD_SIZE;
5186
5187                 if (floor >= vma->vm_start && ceil <= vma->vm_end)
5188                         hugetlb_unshare_pmds(vma, floor, ceil);
5189         }
5190
5191         return 0;
5192 }
5193
5194 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
5195 {
5196         return huge_page_size(hstate_vma(vma));
5197 }
5198
5199 /*
5200  * We cannot handle pagefaults against hugetlb pages at all.  They cause
5201  * handle_mm_fault() to try to instantiate regular-sized pages in the
5202  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
5203  * this far.
5204  */
5205 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
5206 {
5207         BUG();
5208         return 0;
5209 }
5210
5211 /*
5212  * When a new function is introduced to vm_operations_struct and added
5213  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
5214  * This is because under System V memory model, mappings created via
5215  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
5216  * their original vm_ops are overwritten with shm_vm_ops.
5217  */
5218 const struct vm_operations_struct hugetlb_vm_ops = {
5219         .fault = hugetlb_vm_op_fault,
5220         .open = hugetlb_vm_op_open,
5221         .close = hugetlb_vm_op_close,
5222         .may_split = hugetlb_vm_op_split,
5223         .pagesize = hugetlb_vm_op_pagesize,
5224 };
5225
5226 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
5227                                 int writable)
5228 {
5229         pte_t entry;
5230         unsigned int shift = huge_page_shift(hstate_vma(vma));
5231
5232         if (writable) {
5233                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
5234                                          vma->vm_page_prot)));
5235         } else {
5236                 entry = huge_pte_wrprotect(mk_huge_pte(page,
5237                                            vma->vm_page_prot));
5238         }
5239         entry = pte_mkyoung(entry);
5240         entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
5241
5242         return entry;
5243 }
5244
5245 static void set_huge_ptep_writable(struct vm_area_struct *vma,
5246                                    unsigned long address, pte_t *ptep)
5247 {
5248         pte_t entry;
5249
5250         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
5251         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
5252                 update_mmu_cache(vma, address, ptep);
5253 }
5254
5255 bool is_hugetlb_entry_migration(pte_t pte)
5256 {
5257         swp_entry_t swp;
5258
5259         if (huge_pte_none(pte) || pte_present(pte))
5260                 return false;
5261         swp = pte_to_swp_entry(pte);
5262         if (is_migration_entry(swp))
5263                 return true;
5264         else
5265                 return false;
5266 }
5267
5268 bool is_hugetlb_entry_hwpoisoned(pte_t pte)
5269 {
5270         swp_entry_t swp;
5271
5272         if (huge_pte_none(pte) || pte_present(pte))
5273                 return false;
5274         swp = pte_to_swp_entry(pte);
5275         if (is_hwpoison_entry(swp))
5276                 return true;
5277         else
5278                 return false;
5279 }
5280
5281 static void
5282 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
5283                       struct folio *new_folio, pte_t old, unsigned long sz)
5284 {
5285         pte_t newpte = make_huge_pte(vma, &new_folio->page, 1);
5286
5287         __folio_mark_uptodate(new_folio);
5288         hugetlb_add_new_anon_rmap(new_folio, vma, addr);
5289         if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
5290                 newpte = huge_pte_mkuffd_wp(newpte);
5291         set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
5292         hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
5293         folio_set_hugetlb_migratable(new_folio);
5294 }
5295
5296 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
5297                             struct vm_area_struct *dst_vma,
5298                             struct vm_area_struct *src_vma)
5299 {
5300         pte_t *src_pte, *dst_pte, entry;
5301         struct folio *pte_folio;
5302         unsigned long addr;
5303         bool cow = is_cow_mapping(src_vma->vm_flags);
5304         struct hstate *h = hstate_vma(src_vma);
5305         unsigned long sz = huge_page_size(h);
5306         unsigned long npages = pages_per_huge_page(h);
5307         struct mmu_notifier_range range;
5308         unsigned long last_addr_mask;
5309         int ret = 0;
5310
5311         if (cow) {
5312                 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
5313                                         src_vma->vm_start,
5314                                         src_vma->vm_end);
5315                 mmu_notifier_invalidate_range_start(&range);
5316                 vma_assert_write_locked(src_vma);
5317                 raw_write_seqcount_begin(&src->write_protect_seq);
5318         } else {
5319                 /*
5320                  * For shared mappings the vma lock must be held before
5321                  * calling hugetlb_walk() in the src vma. Otherwise, the
5322                  * returned ptep could go away if part of a shared pmd and
5323                  * another thread calls huge_pmd_unshare.
5324                  */
5325                 hugetlb_vma_lock_read(src_vma);
5326         }
5327
5328         last_addr_mask = hugetlb_mask_last_page(h);
5329         for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
5330                 spinlock_t *src_ptl, *dst_ptl;
5331                 src_pte = hugetlb_walk(src_vma, addr, sz);
5332                 if (!src_pte) {
5333                         addr |= last_addr_mask;
5334                         continue;
5335                 }
5336                 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
5337                 if (!dst_pte) {
5338                         ret = -ENOMEM;
5339                         break;
5340                 }
5341
5342                 /*
5343                  * If the pagetables are shared don't copy or take references.
5344                  *
5345                  * dst_pte == src_pte is the common case of src/dest sharing.
5346                  * However, src could have 'unshared' and dst shares with
5347                  * another vma. So page_count of ptep page is checked instead
5348                  * to reliably determine whether pte is shared.
5349                  */
5350                 if (page_count(virt_to_page(dst_pte)) > 1) {
5351                         addr |= last_addr_mask;
5352                         continue;
5353                 }
5354
5355                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5356                 src_ptl = huge_pte_lockptr(h, src, src_pte);
5357                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5358                 entry = huge_ptep_get(src_pte);
5359 again:
5360                 if (huge_pte_none(entry)) {
5361                         /*
5362                          * Skip if src entry none.
5363                          */
5364                         ;
5365                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) {
5366                         if (!userfaultfd_wp(dst_vma))
5367                                 entry = huge_pte_clear_uffd_wp(entry);
5368                         set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5369                 } else if (unlikely(is_hugetlb_entry_migration(entry))) {
5370                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
5371                         bool uffd_wp = pte_swp_uffd_wp(entry);
5372
5373                         if (!is_readable_migration_entry(swp_entry) && cow) {
5374                                 /*
5375                                  * COW mappings require pages in both
5376                                  * parent and child to be set to read.
5377                                  */
5378                                 swp_entry = make_readable_migration_entry(
5379                                                         swp_offset(swp_entry));
5380                                 entry = swp_entry_to_pte(swp_entry);
5381                                 if (userfaultfd_wp(src_vma) && uffd_wp)
5382                                         entry = pte_swp_mkuffd_wp(entry);
5383                                 set_huge_pte_at(src, addr, src_pte, entry, sz);
5384                         }
5385                         if (!userfaultfd_wp(dst_vma))
5386                                 entry = huge_pte_clear_uffd_wp(entry);
5387                         set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5388                 } else if (unlikely(is_pte_marker(entry))) {
5389                         pte_marker marker = copy_pte_marker(
5390                                 pte_to_swp_entry(entry), dst_vma);
5391
5392                         if (marker)
5393                                 set_huge_pte_at(dst, addr, dst_pte,
5394                                                 make_pte_marker(marker), sz);
5395                 } else {
5396                         entry = huge_ptep_get(src_pte);
5397                         pte_folio = page_folio(pte_page(entry));
5398                         folio_get(pte_folio);
5399
5400                         /*
5401                          * Failing to duplicate the anon rmap is a rare case
5402                          * where we see pinned hugetlb pages while they're
5403                          * prone to COW. We need to do the COW earlier during
5404                          * fork.
5405                          *
5406                          * When pre-allocating the page or copying data, we
5407                          * need to be without the pgtable locks since we could
5408                          * sleep during the process.
5409                          */
5410                         if (!folio_test_anon(pte_folio)) {
5411                                 hugetlb_add_file_rmap(pte_folio);
5412                         } else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
5413                                 pte_t src_pte_old = entry;
5414                                 struct folio *new_folio;
5415
5416                                 spin_unlock(src_ptl);
5417                                 spin_unlock(dst_ptl);
5418                                 /* Do not use reserve as it's private owned */
5419                                 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5420                                 if (IS_ERR(new_folio)) {
5421                                         folio_put(pte_folio);
5422                                         ret = PTR_ERR(new_folio);
5423                                         break;
5424                                 }
5425                                 ret = copy_user_large_folio(new_folio,
5426                                                             pte_folio,
5427                                                             addr, dst_vma);
5428                                 folio_put(pte_folio);
5429                                 if (ret) {
5430                                         folio_put(new_folio);
5431                                         break;
5432                                 }
5433
5434                                 /* Install the new hugetlb folio if src pte stable */
5435                                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
5436                                 src_ptl = huge_pte_lockptr(h, src, src_pte);
5437                                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5438                                 entry = huge_ptep_get(src_pte);
5439                                 if (!pte_same(src_pte_old, entry)) {
5440                                         restore_reserve_on_error(h, dst_vma, addr,
5441                                                                 new_folio);
5442                                         folio_put(new_folio);
5443                                         /* huge_ptep of dst_pte won't change as in child */
5444                                         goto again;
5445                                 }
5446                                 hugetlb_install_folio(dst_vma, dst_pte, addr,
5447                                                       new_folio, src_pte_old, sz);
5448                                 spin_unlock(src_ptl);
5449                                 spin_unlock(dst_ptl);
5450                                 continue;
5451                         }
5452
5453                         if (cow) {
5454                                 /*
5455                                  * No need to notify as we are downgrading page
5456                                  * table protection not changing it to point
5457                                  * to a new page.
5458                                  *
5459                                  * See Documentation/mm/mmu_notifier.rst
5460                                  */
5461                                 huge_ptep_set_wrprotect(src, addr, src_pte);
5462                                 entry = huge_pte_wrprotect(entry);
5463                         }
5464
5465                         if (!userfaultfd_wp(dst_vma))
5466                                 entry = huge_pte_clear_uffd_wp(entry);
5467
5468                         set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5469                         hugetlb_count_add(npages, dst);
5470                 }
5471                 spin_unlock(src_ptl);
5472                 spin_unlock(dst_ptl);
5473         }
5474
5475         if (cow) {
5476                 raw_write_seqcount_end(&src->write_protect_seq);
5477                 mmu_notifier_invalidate_range_end(&range);
5478         } else {
5479                 hugetlb_vma_unlock_read(src_vma);
5480         }
5481
5482         return ret;
5483 }
5484
5485 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5486                           unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5487                           unsigned long sz)
5488 {
5489         struct hstate *h = hstate_vma(vma);
5490         struct mm_struct *mm = vma->vm_mm;
5491         spinlock_t *src_ptl, *dst_ptl;
5492         pte_t pte;
5493
5494         dst_ptl = huge_pte_lock(h, mm, dst_pte);
5495         src_ptl = huge_pte_lockptr(h, mm, src_pte);
5496
5497         /*
5498          * We don't have to worry about the ordering of src and dst ptlocks
5499          * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5500          */
5501         if (src_ptl != dst_ptl)
5502                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5503
5504         pte = huge_ptep_get_and_clear(mm, old_addr, src_pte);
5505         set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5506
5507         if (src_ptl != dst_ptl)
5508                 spin_unlock(src_ptl);
5509         spin_unlock(dst_ptl);
5510 }
5511
5512 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5513                              struct vm_area_struct *new_vma,
5514                              unsigned long old_addr, unsigned long new_addr,
5515                              unsigned long len)
5516 {
5517         struct hstate *h = hstate_vma(vma);
5518         struct address_space *mapping = vma->vm_file->f_mapping;
5519         unsigned long sz = huge_page_size(h);
5520         struct mm_struct *mm = vma->vm_mm;
5521         unsigned long old_end = old_addr + len;
5522         unsigned long last_addr_mask;
5523         pte_t *src_pte, *dst_pte;
5524         struct mmu_notifier_range range;
5525         bool shared_pmd = false;
5526
5527         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5528                                 old_end);
5529         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5530         /*
5531          * In case of shared PMDs, we should cover the maximum possible
5532          * range.
5533          */
5534         flush_cache_range(vma, range.start, range.end);
5535
5536         mmu_notifier_invalidate_range_start(&range);
5537         last_addr_mask = hugetlb_mask_last_page(h);
5538         /* Prevent race with file truncation */
5539         hugetlb_vma_lock_write(vma);
5540         i_mmap_lock_write(mapping);
5541         for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5542                 src_pte = hugetlb_walk(vma, old_addr, sz);
5543                 if (!src_pte) {
5544                         old_addr |= last_addr_mask;
5545                         new_addr |= last_addr_mask;
5546                         continue;
5547                 }
5548                 if (huge_pte_none(huge_ptep_get(src_pte)))
5549                         continue;
5550
5551                 if (huge_pmd_unshare(mm, vma, old_addr, src_pte)) {
5552                         shared_pmd = true;
5553                         old_addr |= last_addr_mask;
5554                         new_addr |= last_addr_mask;
5555                         continue;
5556                 }
5557
5558                 dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5559                 if (!dst_pte)
5560                         break;
5561
5562                 move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5563         }
5564
5565         if (shared_pmd)
5566                 flush_hugetlb_tlb_range(vma, range.start, range.end);
5567         else
5568                 flush_hugetlb_tlb_range(vma, old_end - len, old_end);
5569         mmu_notifier_invalidate_range_end(&range);
5570         i_mmap_unlock_write(mapping);
5571         hugetlb_vma_unlock_write(vma);
5572
5573         return len + old_addr - old_end;
5574 }
5575
5576 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5577                             unsigned long start, unsigned long end,
5578                             struct page *ref_page, zap_flags_t zap_flags)
5579 {
5580         struct mm_struct *mm = vma->vm_mm;
5581         unsigned long address;
5582         pte_t *ptep;
5583         pte_t pte;
5584         spinlock_t *ptl;
5585         struct page *page;
5586         struct hstate *h = hstate_vma(vma);
5587         unsigned long sz = huge_page_size(h);
5588         unsigned long last_addr_mask;
5589         bool force_flush = false;
5590
5591         WARN_ON(!is_vm_hugetlb_page(vma));
5592         BUG_ON(start & ~huge_page_mask(h));
5593         BUG_ON(end & ~huge_page_mask(h));
5594
5595         /*
5596          * This is a hugetlb vma, all the pte entries should point
5597          * to huge page.
5598          */
5599         tlb_change_page_size(tlb, sz);
5600         tlb_start_vma(tlb, vma);
5601
5602         last_addr_mask = hugetlb_mask_last_page(h);
5603         address = start;
5604         for (; address < end; address += sz) {
5605                 ptep = hugetlb_walk(vma, address, sz);
5606                 if (!ptep) {
5607                         address |= last_addr_mask;
5608                         continue;
5609                 }
5610
5611                 ptl = huge_pte_lock(h, mm, ptep);
5612                 if (huge_pmd_unshare(mm, vma, address, ptep)) {
5613                         spin_unlock(ptl);
5614                         tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
5615                         force_flush = true;
5616                         address |= last_addr_mask;
5617                         continue;
5618                 }
5619
5620                 pte = huge_ptep_get(ptep);
5621                 if (huge_pte_none(pte)) {
5622                         spin_unlock(ptl);
5623                         continue;
5624                 }
5625
5626                 /*
5627                  * Migrating hugepage or HWPoisoned hugepage is already
5628                  * unmapped and its refcount is dropped, so just clear pte here.
5629                  */
5630                 if (unlikely(!pte_present(pte))) {
5631                         /*
5632                          * If the pte was wr-protected by uffd-wp in any of the
5633                          * swap forms, meanwhile the caller does not want to
5634                          * drop the uffd-wp bit in this zap, then replace the
5635                          * pte with a marker.
5636                          */
5637                         if (pte_swp_uffd_wp_any(pte) &&
5638                             !(zap_flags & ZAP_FLAG_DROP_MARKER))
5639                                 set_huge_pte_at(mm, address, ptep,
5640                                                 make_pte_marker(PTE_MARKER_UFFD_WP),
5641                                                 sz);
5642                         else
5643                                 huge_pte_clear(mm, address, ptep, sz);
5644                         spin_unlock(ptl);
5645                         continue;
5646                 }
5647
5648                 page = pte_page(pte);
5649                 /*
5650                  * If a reference page is supplied, it is because a specific
5651                  * page is being unmapped, not a range. Ensure the page we
5652                  * are about to unmap is the actual page of interest.
5653                  */
5654                 if (ref_page) {
5655                         if (page != ref_page) {
5656                                 spin_unlock(ptl);
5657                                 continue;
5658                         }
5659                         /*
5660                          * Mark the VMA as having unmapped its page so that
5661                          * future faults in this VMA will fail rather than
5662                          * looking like data was lost
5663                          */
5664                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5665                 }
5666
5667                 pte = huge_ptep_get_and_clear(mm, address, ptep);
5668                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5669                 if (huge_pte_dirty(pte))
5670                         set_page_dirty(page);
5671                 /* Leave a uffd-wp pte marker if needed */
5672                 if (huge_pte_uffd_wp(pte) &&
5673                     !(zap_flags & ZAP_FLAG_DROP_MARKER))
5674                         set_huge_pte_at(mm, address, ptep,
5675                                         make_pte_marker(PTE_MARKER_UFFD_WP),
5676                                         sz);
5677                 hugetlb_count_sub(pages_per_huge_page(h), mm);
5678                 hugetlb_remove_rmap(page_folio(page));
5679
5680                 spin_unlock(ptl);
5681                 tlb_remove_page_size(tlb, page, huge_page_size(h));
5682                 /*
5683                  * Bail out after unmapping reference page if supplied
5684                  */
5685                 if (ref_page)
5686                         break;
5687         }
5688         tlb_end_vma(tlb, vma);
5689
5690         /*
5691          * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We
5692          * could defer the flush until now, since by holding i_mmap_rwsem we
5693          * guaranteed that the last refernece would not be dropped. But we must
5694          * do the flushing before we return, as otherwise i_mmap_rwsem will be
5695          * dropped and the last reference to the shared PMDs page might be
5696          * dropped as well.
5697          *
5698          * In theory we could defer the freeing of the PMD pages as well, but
5699          * huge_pmd_unshare() relies on the exact page_count for the PMD page to
5700          * detect sharing, so we cannot defer the release of the page either.
5701          * Instead, do flush now.
5702          */
5703         if (force_flush)
5704                 tlb_flush_mmu_tlbonly(tlb);
5705 }
5706
5707 void __hugetlb_zap_begin(struct vm_area_struct *vma,
5708                          unsigned long *start, unsigned long *end)
5709 {
5710         if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
5711                 return;
5712
5713         adjust_range_if_pmd_sharing_possible(vma, start, end);
5714         hugetlb_vma_lock_write(vma);
5715         if (vma->vm_file)
5716                 i_mmap_lock_write(vma->vm_file->f_mapping);
5717 }
5718
5719 void __hugetlb_zap_end(struct vm_area_struct *vma,
5720                        struct zap_details *details)
5721 {
5722         zap_flags_t zap_flags = details ? details->zap_flags : 0;
5723
5724         if (!vma->vm_file)      /* hugetlbfs_file_mmap error */
5725                 return;
5726
5727         if (zap_flags & ZAP_FLAG_UNMAP) {       /* final unmap */
5728                 /*
5729                  * Unlock and free the vma lock before releasing i_mmap_rwsem.
5730                  * When the vma_lock is freed, this makes the vma ineligible
5731                  * for pmd sharing.  And, i_mmap_rwsem is required to set up
5732                  * pmd sharing.  This is important as page tables for this
5733                  * unmapped range will be asynchrously deleted.  If the page
5734                  * tables are shared, there will be issues when accessed by
5735                  * someone else.
5736                  */
5737                 __hugetlb_vma_unlock_write_free(vma);
5738         } else {
5739                 hugetlb_vma_unlock_write(vma);
5740         }
5741
5742         if (vma->vm_file)
5743                 i_mmap_unlock_write(vma->vm_file->f_mapping);
5744 }
5745
5746 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5747                           unsigned long end, struct page *ref_page,
5748                           zap_flags_t zap_flags)
5749 {
5750         struct mmu_notifier_range range;
5751         struct mmu_gather tlb;
5752
5753         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5754                                 start, end);
5755         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5756         mmu_notifier_invalidate_range_start(&range);
5757         tlb_gather_mmu(&tlb, vma->vm_mm);
5758
5759         __unmap_hugepage_range(&tlb, vma, start, end, ref_page, zap_flags);
5760
5761         mmu_notifier_invalidate_range_end(&range);
5762         tlb_finish_mmu(&tlb);
5763 }
5764
5765 /*
5766  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5767  * mapping it owns the reserve page for. The intention is to unmap the page
5768  * from other VMAs and let the children be SIGKILLed if they are faulting the
5769  * same region.
5770  */
5771 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5772                               struct page *page, unsigned long address)
5773 {
5774         struct hstate *h = hstate_vma(vma);
5775         struct vm_area_struct *iter_vma;
5776         struct address_space *mapping;
5777         pgoff_t pgoff;
5778
5779         /*
5780          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5781          * from page cache lookup which is in HPAGE_SIZE units.
5782          */
5783         address = address & huge_page_mask(h);
5784         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5785                         vma->vm_pgoff;
5786         mapping = vma->vm_file->f_mapping;
5787
5788         /*
5789          * Take the mapping lock for the duration of the table walk. As
5790          * this mapping should be shared between all the VMAs,
5791          * __unmap_hugepage_range() is called as the lock is already held
5792          */
5793         i_mmap_lock_write(mapping);
5794         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5795                 /* Do not unmap the current VMA */
5796                 if (iter_vma == vma)
5797                         continue;
5798
5799                 /*
5800                  * Shared VMAs have their own reserves and do not affect
5801                  * MAP_PRIVATE accounting but it is possible that a shared
5802                  * VMA is using the same page so check and skip such VMAs.
5803                  */
5804                 if (iter_vma->vm_flags & VM_MAYSHARE)
5805                         continue;
5806
5807                 /*
5808                  * Unmap the page from other VMAs without their own reserves.
5809                  * They get marked to be SIGKILLed if they fault in these
5810                  * areas. This is because a future no-page fault on this VMA
5811                  * could insert a zeroed page instead of the data existing
5812                  * from the time of fork. This would look like data corruption
5813                  */
5814                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5815                         unmap_hugepage_range(iter_vma, address,
5816                                              address + huge_page_size(h), page, 0);
5817         }
5818         i_mmap_unlock_write(mapping);
5819 }
5820
5821 /*
5822  * hugetlb_wp() should be called with page lock of the original hugepage held.
5823  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5824  * cannot race with other handlers or page migration.
5825  * Keep the pte_same checks anyway to make transition from the mutex easier.
5826  */
5827 static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
5828                        unsigned long address, pte_t *ptep, unsigned int flags,
5829                        struct folio *pagecache_folio, spinlock_t *ptl)
5830 {
5831         const bool unshare = flags & FAULT_FLAG_UNSHARE;
5832         pte_t pte = huge_ptep_get(ptep);
5833         struct hstate *h = hstate_vma(vma);
5834         struct folio *old_folio;
5835         struct folio *new_folio;
5836         int outside_reserve = 0;
5837         vm_fault_t ret = 0;
5838         unsigned long haddr = address & huge_page_mask(h);
5839         struct mmu_notifier_range range;
5840
5841         /*
5842          * Never handle CoW for uffd-wp protected pages.  It should be only
5843          * handled when the uffd-wp protection is removed.
5844          *
5845          * Note that only the CoW optimization path (in hugetlb_no_page())
5846          * can trigger this, because hugetlb_fault() will always resolve
5847          * uffd-wp bit first.
5848          */
5849         if (!unshare && huge_pte_uffd_wp(pte))
5850                 return 0;
5851
5852         /*
5853          * hugetlb does not support FOLL_FORCE-style write faults that keep the
5854          * PTE mapped R/O such as maybe_mkwrite() would do.
5855          */
5856         if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE)))
5857                 return VM_FAULT_SIGSEGV;
5858
5859         /* Let's take out MAP_SHARED mappings first. */
5860         if (vma->vm_flags & VM_MAYSHARE) {
5861                 set_huge_ptep_writable(vma, haddr, ptep);
5862                 return 0;
5863         }
5864
5865         old_folio = page_folio(pte_page(pte));
5866
5867         delayacct_wpcopy_start();
5868
5869 retry_avoidcopy:
5870         /*
5871          * If no-one else is actually using this page, we're the exclusive
5872          * owner and can reuse this page.
5873          */
5874         if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5875                 if (!PageAnonExclusive(&old_folio->page)) {
5876                         folio_move_anon_rmap(old_folio, vma);
5877                         SetPageAnonExclusive(&old_folio->page);
5878                 }
5879                 if (likely(!unshare))
5880                         set_huge_ptep_writable(vma, haddr, ptep);
5881
5882                 delayacct_wpcopy_end();
5883                 return 0;
5884         }
5885         VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5886                        PageAnonExclusive(&old_folio->page), &old_folio->page);
5887
5888         /*
5889          * If the process that created a MAP_PRIVATE mapping is about to
5890          * perform a COW due to a shared page count, attempt to satisfy
5891          * the allocation without using the existing reserves. The pagecache
5892          * page is used to determine if the reserve at this address was
5893          * consumed or not. If reserves were used, a partial faulted mapping
5894          * at the time of fork() could consume its reserves on COW instead
5895          * of the full address range.
5896          */
5897         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5898                         old_folio != pagecache_folio)
5899                 outside_reserve = 1;
5900
5901         folio_get(old_folio);
5902
5903         /*
5904          * Drop page table lock as buddy allocator may be called. It will
5905          * be acquired again before returning to the caller, as expected.
5906          */
5907         spin_unlock(ptl);
5908         new_folio = alloc_hugetlb_folio(vma, haddr, outside_reserve);
5909
5910         if (IS_ERR(new_folio)) {
5911                 /*
5912                  * If a process owning a MAP_PRIVATE mapping fails to COW,
5913                  * it is due to references held by a child and an insufficient
5914                  * huge page pool. To guarantee the original mappers
5915                  * reliability, unmap the page from child processes. The child
5916                  * may get SIGKILLed if it later faults.
5917                  */
5918                 if (outside_reserve) {
5919                         struct address_space *mapping = vma->vm_file->f_mapping;
5920                         pgoff_t idx;
5921                         u32 hash;
5922
5923                         folio_put(old_folio);
5924                         /*
5925                          * Drop hugetlb_fault_mutex and vma_lock before
5926                          * unmapping.  unmapping needs to hold vma_lock
5927                          * in write mode.  Dropping vma_lock in read mode
5928                          * here is OK as COW mappings do not interact with
5929                          * PMD sharing.
5930                          *
5931                          * Reacquire both after unmap operation.
5932                          */
5933                         idx = vma_hugecache_offset(h, vma, haddr);
5934                         hash = hugetlb_fault_mutex_hash(mapping, idx);
5935                         hugetlb_vma_unlock_read(vma);
5936                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5937
5938                         unmap_ref_private(mm, vma, &old_folio->page, haddr);
5939
5940                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
5941                         hugetlb_vma_lock_read(vma);
5942                         spin_lock(ptl);
5943                         ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5944                         if (likely(ptep &&
5945                                    pte_same(huge_ptep_get(ptep), pte)))
5946                                 goto retry_avoidcopy;
5947                         /*
5948                          * race occurs while re-acquiring page table
5949                          * lock, and our job is done.
5950                          */
5951                         delayacct_wpcopy_end();
5952                         return 0;
5953                 }
5954
5955                 ret = vmf_error(PTR_ERR(new_folio));
5956                 goto out_release_old;
5957         }
5958
5959         /*
5960          * When the original hugepage is shared one, it does not have
5961          * anon_vma prepared.
5962          */
5963         if (unlikely(anon_vma_prepare(vma))) {
5964                 ret = VM_FAULT_OOM;
5965                 goto out_release_all;
5966         }
5967
5968         if (copy_user_large_folio(new_folio, old_folio, address, vma)) {
5969                 ret = VM_FAULT_HWPOISON_LARGE;
5970                 goto out_release_all;
5971         }
5972         __folio_mark_uptodate(new_folio);
5973
5974         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
5975                                 haddr + huge_page_size(h));
5976         mmu_notifier_invalidate_range_start(&range);
5977
5978         /*
5979          * Retake the page table lock to check for racing updates
5980          * before the page tables are altered
5981          */
5982         spin_lock(ptl);
5983         ptep = hugetlb_walk(vma, haddr, huge_page_size(h));
5984         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
5985                 pte_t newpte = make_huge_pte(vma, &new_folio->page, !unshare);
5986
5987                 /* Break COW or unshare */
5988                 huge_ptep_clear_flush(vma, haddr, ptep);
5989                 hugetlb_remove_rmap(old_folio);
5990                 hugetlb_add_new_anon_rmap(new_folio, vma, haddr);
5991                 if (huge_pte_uffd_wp(pte))
5992                         newpte = huge_pte_mkuffd_wp(newpte);
5993                 set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h));
5994                 folio_set_hugetlb_migratable(new_folio);
5995                 /* Make the old page be freed below */
5996                 new_folio = old_folio;
5997         }
5998         spin_unlock(ptl);
5999         mmu_notifier_invalidate_range_end(&range);
6000 out_release_all:
6001         /*
6002          * No restore in case of successful pagetable update (Break COW or
6003          * unshare)
6004          */
6005         if (new_folio != old_folio)
6006                 restore_reserve_on_error(h, vma, haddr, new_folio);
6007         folio_put(new_folio);
6008 out_release_old:
6009         folio_put(old_folio);
6010
6011         spin_lock(ptl); /* Caller expects lock to be held */
6012
6013         delayacct_wpcopy_end();
6014         return ret;
6015 }
6016
6017 /*
6018  * Return whether there is a pagecache page to back given address within VMA.
6019  */
6020 static bool hugetlbfs_pagecache_present(struct hstate *h,
6021                         struct vm_area_struct *vma, unsigned long address)
6022 {
6023         struct address_space *mapping = vma->vm_file->f_mapping;
6024         pgoff_t idx = linear_page_index(vma, address);
6025         struct folio *folio;
6026
6027         folio = filemap_get_folio(mapping, idx);
6028         if (IS_ERR(folio))
6029                 return false;
6030         folio_put(folio);
6031         return true;
6032 }
6033
6034 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
6035                            pgoff_t idx)
6036 {
6037         struct inode *inode = mapping->host;
6038         struct hstate *h = hstate_inode(inode);
6039         int err;
6040
6041         idx <<= huge_page_order(h);
6042         __folio_set_locked(folio);
6043         err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
6044
6045         if (unlikely(err)) {
6046                 __folio_clear_locked(folio);
6047                 return err;
6048         }
6049         folio_clear_hugetlb_restore_reserve(folio);
6050
6051         /*
6052          * mark folio dirty so that it will not be removed from cache/file
6053          * by non-hugetlbfs specific code paths.
6054          */
6055         folio_mark_dirty(folio);
6056
6057         spin_lock(&inode->i_lock);
6058         inode->i_blocks += blocks_per_huge_page(h);
6059         spin_unlock(&inode->i_lock);
6060         return 0;
6061 }
6062
6063 static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
6064                                                   struct address_space *mapping,
6065                                                   pgoff_t idx,
6066                                                   unsigned int flags,
6067                                                   unsigned long haddr,
6068                                                   unsigned long addr,
6069                                                   unsigned long reason)
6070 {
6071         u32 hash;
6072         struct vm_fault vmf = {
6073                 .vma = vma,
6074                 .address = haddr,
6075                 .real_address = addr,
6076                 .flags = flags,
6077
6078                 /*
6079                  * Hard to debug if it ends up being
6080                  * used by a callee that assumes
6081                  * something about the other
6082                  * uninitialized fields... same as in
6083                  * memory.c
6084                  */
6085         };
6086
6087         /*
6088          * vma_lock and hugetlb_fault_mutex must be dropped before handling
6089          * userfault. Also mmap_lock could be dropped due to handling
6090          * userfault, any vma operation should be careful from here.
6091          */
6092         hugetlb_vma_unlock_read(vma);
6093         hash = hugetlb_fault_mutex_hash(mapping, idx);
6094         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6095         return handle_userfault(&vmf, reason);
6096 }
6097
6098 /*
6099  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
6100  * false if pte changed or is changing.
6101  */
6102 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm,
6103                                pte_t *ptep, pte_t old_pte)
6104 {
6105         spinlock_t *ptl;
6106         bool same;
6107
6108         ptl = huge_pte_lock(h, mm, ptep);
6109         same = pte_same(huge_ptep_get(ptep), old_pte);
6110         spin_unlock(ptl);
6111
6112         return same;
6113 }
6114
6115 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
6116                         struct vm_area_struct *vma,
6117                         struct address_space *mapping, pgoff_t idx,
6118                         unsigned long address, pte_t *ptep,
6119                         pte_t old_pte, unsigned int flags)
6120 {
6121         struct hstate *h = hstate_vma(vma);
6122         vm_fault_t ret = VM_FAULT_SIGBUS;
6123         int anon_rmap = 0;
6124         unsigned long size;
6125         struct folio *folio;
6126         pte_t new_pte;
6127         spinlock_t *ptl;
6128         unsigned long haddr = address & huge_page_mask(h);
6129         bool new_folio, new_pagecache_folio = false;
6130         u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
6131
6132         /*
6133          * Currently, we are forced to kill the process in the event the
6134          * original mapper has unmapped pages from the child due to a failed
6135          * COW/unsharing. Warn that such a situation has occurred as it may not
6136          * be obvious.
6137          */
6138         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
6139                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
6140                            current->pid);
6141                 goto out;
6142         }
6143
6144         /*
6145          * Use page lock to guard against racing truncation
6146          * before we get page_table_lock.
6147          */
6148         new_folio = false;
6149         folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6150         if (IS_ERR(folio)) {
6151                 size = i_size_read(mapping->host) >> huge_page_shift(h);
6152                 if (idx >= size)
6153                         goto out;
6154                 /* Check for page in userfault range */
6155                 if (userfaultfd_missing(vma)) {
6156                         /*
6157                          * Since hugetlb_no_page() was examining pte
6158                          * without pgtable lock, we need to re-test under
6159                          * lock because the pte may not be stable and could
6160                          * have changed from under us.  Try to detect
6161                          * either changed or during-changing ptes and retry
6162                          * properly when needed.
6163                          *
6164                          * Note that userfaultfd is actually fine with
6165                          * false positives (e.g. caused by pte changed),
6166                          * but not wrong logical events (e.g. caused by
6167                          * reading a pte during changing).  The latter can
6168                          * confuse the userspace, so the strictness is very
6169                          * much preferred.  E.g., MISSING event should
6170                          * never happen on the page after UFFDIO_COPY has
6171                          * correctly installed the page and returned.
6172                          */
6173                         if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
6174                                 ret = 0;
6175                                 goto out;
6176                         }
6177
6178                         return hugetlb_handle_userfault(vma, mapping, idx, flags,
6179                                                         haddr, address,
6180                                                         VM_UFFD_MISSING);
6181                 }
6182
6183                 folio = alloc_hugetlb_folio(vma, haddr, 0);
6184                 if (IS_ERR(folio)) {
6185                         /*
6186                          * Returning error will result in faulting task being
6187                          * sent SIGBUS.  The hugetlb fault mutex prevents two
6188                          * tasks from racing to fault in the same page which
6189                          * could result in false unable to allocate errors.
6190                          * Page migration does not take the fault mutex, but
6191                          * does a clear then write of pte's under page table
6192                          * lock.  Page fault code could race with migration,
6193                          * notice the clear pte and try to allocate a page
6194                          * here.  Before returning error, get ptl and make
6195                          * sure there really is no pte entry.
6196                          */
6197                         if (hugetlb_pte_stable(h, mm, ptep, old_pte))
6198                                 ret = vmf_error(PTR_ERR(folio));
6199                         else
6200                                 ret = 0;
6201                         goto out;
6202                 }
6203                 clear_huge_page(&folio->page, address, pages_per_huge_page(h));
6204                 __folio_mark_uptodate(folio);
6205                 new_folio = true;
6206
6207                 if (vma->vm_flags & VM_MAYSHARE) {
6208                         int err = hugetlb_add_to_page_cache(folio, mapping, idx);
6209                         if (err) {
6210                                 /*
6211                                  * err can't be -EEXIST which implies someone
6212                                  * else consumed the reservation since hugetlb
6213                                  * fault mutex is held when add a hugetlb page
6214                                  * to the page cache. So it's safe to call
6215                                  * restore_reserve_on_error() here.
6216                                  */
6217                                 restore_reserve_on_error(h, vma, haddr, folio);
6218                                 folio_put(folio);
6219                                 goto out;
6220                         }
6221                         new_pagecache_folio = true;
6222                 } else {
6223                         folio_lock(folio);
6224                         if (unlikely(anon_vma_prepare(vma))) {
6225                                 ret = VM_FAULT_OOM;
6226                                 goto backout_unlocked;
6227                         }
6228                         anon_rmap = 1;
6229                 }
6230         } else {
6231                 /*
6232                  * If memory error occurs between mmap() and fault, some process
6233                  * don't have hwpoisoned swap entry for errored virtual address.
6234                  * So we need to block hugepage fault by PG_hwpoison bit check.
6235                  */
6236                 if (unlikely(folio_test_hwpoison(folio))) {
6237                         ret = VM_FAULT_HWPOISON_LARGE |
6238                                 VM_FAULT_SET_HINDEX(hstate_index(h));
6239                         goto backout_unlocked;
6240                 }
6241
6242                 /* Check for page in userfault range. */
6243                 if (userfaultfd_minor(vma)) {
6244                         folio_unlock(folio);
6245                         folio_put(folio);
6246                         /* See comment in userfaultfd_missing() block above */
6247                         if (!hugetlb_pte_stable(h, mm, ptep, old_pte)) {
6248                                 ret = 0;
6249                                 goto out;
6250                         }
6251                         return hugetlb_handle_userfault(vma, mapping, idx, flags,
6252                                                         haddr, address,
6253                                                         VM_UFFD_MINOR);
6254                 }
6255         }
6256
6257         /*
6258          * If we are going to COW a private mapping later, we examine the
6259          * pending reservations for this page now. This will ensure that
6260          * any allocations necessary to record that reservation occur outside
6261          * the spinlock.
6262          */
6263         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6264                 if (vma_needs_reservation(h, vma, haddr) < 0) {
6265                         ret = VM_FAULT_OOM;
6266                         goto backout_unlocked;
6267                 }
6268                 /* Just decrements count, does not deallocate */
6269                 vma_end_reservation(h, vma, haddr);
6270         }
6271
6272         ptl = huge_pte_lock(h, mm, ptep);
6273         ret = 0;
6274         /* If pte changed from under us, retry */
6275         if (!pte_same(huge_ptep_get(ptep), old_pte))
6276                 goto backout;
6277
6278         if (anon_rmap)
6279                 hugetlb_add_new_anon_rmap(folio, vma, haddr);
6280         else
6281                 hugetlb_add_file_rmap(folio);
6282         new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE)
6283                                 && (vma->vm_flags & VM_SHARED)));
6284         /*
6285          * If this pte was previously wr-protected, keep it wr-protected even
6286          * if populated.
6287          */
6288         if (unlikely(pte_marker_uffd_wp(old_pte)))
6289                 new_pte = huge_pte_mkuffd_wp(new_pte);
6290         set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h));
6291
6292         hugetlb_count_add(pages_per_huge_page(h), mm);
6293         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
6294                 /* Optimization, do the COW without a second fault */
6295                 ret = hugetlb_wp(mm, vma, address, ptep, flags, folio, ptl);
6296         }
6297
6298         spin_unlock(ptl);
6299
6300         /*
6301          * Only set hugetlb_migratable in newly allocated pages.  Existing pages
6302          * found in the pagecache may not have hugetlb_migratable if they have
6303          * been isolated for migration.
6304          */
6305         if (new_folio)
6306                 folio_set_hugetlb_migratable(folio);
6307
6308         folio_unlock(folio);
6309 out:
6310         hugetlb_vma_unlock_read(vma);
6311         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6312         return ret;
6313
6314 backout:
6315         spin_unlock(ptl);
6316 backout_unlocked:
6317         if (new_folio && !new_pagecache_folio)
6318                 restore_reserve_on_error(h, vma, haddr, folio);
6319
6320         folio_unlock(folio);
6321         folio_put(folio);
6322         goto out;
6323 }
6324
6325 #ifdef CONFIG_SMP
6326 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6327 {
6328         unsigned long key[2];
6329         u32 hash;
6330
6331         key[0] = (unsigned long) mapping;
6332         key[1] = idx;
6333
6334         hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
6335
6336         return hash & (num_fault_mutexes - 1);
6337 }
6338 #else
6339 /*
6340  * For uniprocessor systems we always use a single mutex, so just
6341  * return 0 and avoid the hashing overhead.
6342  */
6343 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
6344 {
6345         return 0;
6346 }
6347 #endif
6348
6349 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
6350                         unsigned long address, unsigned int flags)
6351 {
6352         pte_t *ptep, entry;
6353         spinlock_t *ptl;
6354         vm_fault_t ret;
6355         u32 hash;
6356         pgoff_t idx;
6357         struct folio *folio = NULL;
6358         struct folio *pagecache_folio = NULL;
6359         struct hstate *h = hstate_vma(vma);
6360         struct address_space *mapping;
6361         int need_wait_lock = 0;
6362         unsigned long haddr = address & huge_page_mask(h);
6363
6364         /* TODO: Handle faults under the VMA lock */
6365         if (flags & FAULT_FLAG_VMA_LOCK) {
6366                 vma_end_read(vma);
6367                 return VM_FAULT_RETRY;
6368         }
6369
6370         /*
6371          * Serialize hugepage allocation and instantiation, so that we don't
6372          * get spurious allocation failures if two CPUs race to instantiate
6373          * the same page in the page cache.
6374          */
6375         mapping = vma->vm_file->f_mapping;
6376         idx = vma_hugecache_offset(h, vma, haddr);
6377         hash = hugetlb_fault_mutex_hash(mapping, idx);
6378         mutex_lock(&hugetlb_fault_mutex_table[hash]);
6379
6380         /*
6381          * Acquire vma lock before calling huge_pte_alloc and hold
6382          * until finished with ptep.  This prevents huge_pmd_unshare from
6383          * being called elsewhere and making the ptep no longer valid.
6384          */
6385         hugetlb_vma_lock_read(vma);
6386         ptep = huge_pte_alloc(mm, vma, haddr, huge_page_size(h));
6387         if (!ptep) {
6388                 hugetlb_vma_unlock_read(vma);
6389                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6390                 return VM_FAULT_OOM;
6391         }
6392
6393         entry = huge_ptep_get(ptep);
6394         if (huge_pte_none_mostly(entry)) {
6395                 if (is_pte_marker(entry)) {
6396                         pte_marker marker =
6397                                 pte_marker_get(pte_to_swp_entry(entry));
6398
6399                         if (marker & PTE_MARKER_POISONED) {
6400                                 ret = VM_FAULT_HWPOISON_LARGE;
6401                                 goto out_mutex;
6402                         }
6403                 }
6404
6405                 /*
6406                  * Other PTE markers should be handled the same way as none PTE.
6407                  *
6408                  * hugetlb_no_page will drop vma lock and hugetlb fault
6409                  * mutex internally, which make us return immediately.
6410                  */
6411                 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
6412                                       entry, flags);
6413         }
6414
6415         ret = 0;
6416
6417         /*
6418          * entry could be a migration/hwpoison entry at this point, so this
6419          * check prevents the kernel from going below assuming that we have
6420          * an active hugepage in pagecache. This goto expects the 2nd page
6421          * fault, and is_hugetlb_entry_(migration|hwpoisoned) check will
6422          * properly handle it.
6423          */
6424         if (!pte_present(entry)) {
6425                 if (unlikely(is_hugetlb_entry_migration(entry))) {
6426                         /*
6427                          * Release the hugetlb fault lock now, but retain
6428                          * the vma lock, because it is needed to guard the
6429                          * huge_pte_lockptr() later in
6430                          * migration_entry_wait_huge(). The vma lock will
6431                          * be released there.
6432                          */
6433                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6434                         migration_entry_wait_huge(vma, ptep);
6435                         return 0;
6436                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
6437                         ret = VM_FAULT_HWPOISON_LARGE |
6438                             VM_FAULT_SET_HINDEX(hstate_index(h));
6439                 goto out_mutex;
6440         }
6441
6442         /*
6443          * If we are going to COW/unshare the mapping later, we examine the
6444          * pending reservations for this page now. This will ensure that any
6445          * allocations necessary to record that reservation occur outside the
6446          * spinlock. Also lookup the pagecache page now as it is used to
6447          * determine if a reservation has been consumed.
6448          */
6449         if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6450             !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(entry)) {
6451                 if (vma_needs_reservation(h, vma, haddr) < 0) {
6452                         ret = VM_FAULT_OOM;
6453                         goto out_mutex;
6454                 }
6455                 /* Just decrements count, does not deallocate */
6456                 vma_end_reservation(h, vma, haddr);
6457
6458                 pagecache_folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6459                 if (IS_ERR(pagecache_folio))
6460                         pagecache_folio = NULL;
6461         }
6462
6463         ptl = huge_pte_lock(h, mm, ptep);
6464
6465         /* Check for a racing update before calling hugetlb_wp() */
6466         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
6467                 goto out_ptl;
6468
6469         /* Handle userfault-wp first, before trying to lock more pages */
6470         if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(ptep)) &&
6471             (flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
6472                 if (!userfaultfd_wp_async(vma)) {
6473                         struct vm_fault vmf = {
6474                                 .vma = vma,
6475                                 .address = haddr,
6476                                 .real_address = address,
6477                                 .flags = flags,
6478                         };
6479
6480                         spin_unlock(ptl);
6481                         if (pagecache_folio) {
6482                                 folio_unlock(pagecache_folio);
6483                                 folio_put(pagecache_folio);
6484                         }
6485                         hugetlb_vma_unlock_read(vma);
6486                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6487                         return handle_userfault(&vmf, VM_UFFD_WP);
6488                 }
6489
6490                 entry = huge_pte_clear_uffd_wp(entry);
6491                 set_huge_pte_at(mm, haddr, ptep, entry,
6492                                 huge_page_size(hstate_vma(vma)));
6493                 /* Fallthrough to CoW */
6494         }
6495
6496         /*
6497          * hugetlb_wp() requires page locks of pte_page(entry) and
6498          * pagecache_folio, so here we need take the former one
6499          * when folio != pagecache_folio or !pagecache_folio.
6500          */
6501         folio = page_folio(pte_page(entry));
6502         if (folio != pagecache_folio)
6503                 if (!folio_trylock(folio)) {
6504                         need_wait_lock = 1;
6505                         goto out_ptl;
6506                 }
6507
6508         folio_get(folio);
6509
6510         if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6511                 if (!huge_pte_write(entry)) {
6512                         ret = hugetlb_wp(mm, vma, address, ptep, flags,
6513                                          pagecache_folio, ptl);
6514                         goto out_put_page;
6515                 } else if (likely(flags & FAULT_FLAG_WRITE)) {
6516                         entry = huge_pte_mkdirty(entry);
6517                 }
6518         }
6519         entry = pte_mkyoung(entry);
6520         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
6521                                                 flags & FAULT_FLAG_WRITE))
6522                 update_mmu_cache(vma, haddr, ptep);
6523 out_put_page:
6524         if (folio != pagecache_folio)
6525                 folio_unlock(folio);
6526         folio_put(folio);
6527 out_ptl:
6528         spin_unlock(ptl);
6529
6530         if (pagecache_folio) {
6531                 folio_unlock(pagecache_folio);
6532                 folio_put(pagecache_folio);
6533         }
6534 out_mutex:
6535         hugetlb_vma_unlock_read(vma);
6536         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6537         /*
6538          * Generally it's safe to hold refcount during waiting page lock. But
6539          * here we just wait to defer the next page fault to avoid busy loop and
6540          * the page is not used after unlocked before returning from the current
6541          * page fault. So we are safe from accessing freed page, even if we wait
6542          * here without taking refcount.
6543          */
6544         if (need_wait_lock)
6545                 folio_wait_locked(folio);
6546         return ret;
6547 }
6548
6549 #ifdef CONFIG_USERFAULTFD
6550 /*
6551  * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6552  */
6553 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6554                 struct vm_area_struct *vma, unsigned long address)
6555 {
6556         struct mempolicy *mpol;
6557         nodemask_t *nodemask;
6558         struct folio *folio;
6559         gfp_t gfp_mask;
6560         int node;
6561
6562         gfp_mask = htlb_alloc_mask(h);
6563         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6564         folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask);
6565         mpol_cond_put(mpol);
6566
6567         return folio;
6568 }
6569
6570 /*
6571  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6572  * with modifications for hugetlb pages.
6573  */
6574 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6575                              struct vm_area_struct *dst_vma,
6576                              unsigned long dst_addr,
6577                              unsigned long src_addr,
6578                              uffd_flags_t flags,
6579                              struct folio **foliop)
6580 {
6581         struct mm_struct *dst_mm = dst_vma->vm_mm;
6582         bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6583         bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6584         struct hstate *h = hstate_vma(dst_vma);
6585         struct address_space *mapping = dst_vma->vm_file->f_mapping;
6586         pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6587         unsigned long size;
6588         int vm_shared = dst_vma->vm_flags & VM_SHARED;
6589         pte_t _dst_pte;
6590         spinlock_t *ptl;
6591         int ret = -ENOMEM;
6592         struct folio *folio;
6593         int writable;
6594         bool folio_in_pagecache = false;
6595
6596         if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6597                 ptl = huge_pte_lock(h, dst_mm, dst_pte);
6598
6599                 /* Don't overwrite any existing PTEs (even markers) */
6600                 if (!huge_pte_none(huge_ptep_get(dst_pte))) {
6601                         spin_unlock(ptl);
6602                         return -EEXIST;
6603                 }
6604
6605                 _dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6606                 set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte,
6607                                 huge_page_size(h));
6608
6609                 /* No need to invalidate - it was non-present before */
6610                 update_mmu_cache(dst_vma, dst_addr, dst_pte);
6611
6612                 spin_unlock(ptl);
6613                 return 0;
6614         }
6615
6616         if (is_continue) {
6617                 ret = -EFAULT;
6618                 folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6619                 if (IS_ERR(folio))
6620                         goto out;
6621                 folio_in_pagecache = true;
6622         } else if (!*foliop) {
6623                 /* If a folio already exists, then it's UFFDIO_COPY for
6624                  * a non-missing case. Return -EEXIST.
6625                  */
6626                 if (vm_shared &&
6627                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6628                         ret = -EEXIST;
6629                         goto out;
6630                 }
6631
6632                 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6633                 if (IS_ERR(folio)) {
6634                         ret = -ENOMEM;
6635                         goto out;
6636                 }
6637
6638                 ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6639                                            false);
6640
6641                 /* fallback to copy_from_user outside mmap_lock */
6642                 if (unlikely(ret)) {
6643                         ret = -ENOENT;
6644                         /* Free the allocated folio which may have
6645                          * consumed a reservation.
6646                          */
6647                         restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6648                         folio_put(folio);
6649
6650                         /* Allocate a temporary folio to hold the copied
6651                          * contents.
6652                          */
6653                         folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6654                         if (!folio) {
6655                                 ret = -ENOMEM;
6656                                 goto out;
6657                         }
6658                         *foliop = folio;
6659                         /* Set the outparam foliop and return to the caller to
6660                          * copy the contents outside the lock. Don't free the
6661                          * folio.
6662                          */
6663                         goto out;
6664                 }
6665         } else {
6666                 if (vm_shared &&
6667                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6668                         folio_put(*foliop);
6669                         ret = -EEXIST;
6670                         *foliop = NULL;
6671                         goto out;
6672                 }
6673
6674                 folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0);
6675                 if (IS_ERR(folio)) {
6676                         folio_put(*foliop);
6677                         ret = -ENOMEM;
6678                         *foliop = NULL;
6679                         goto out;
6680                 }
6681                 ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6682                 folio_put(*foliop);
6683                 *foliop = NULL;
6684                 if (ret) {
6685                         folio_put(folio);
6686                         goto out;
6687                 }
6688         }
6689
6690         /*
6691          * The memory barrier inside __folio_mark_uptodate makes sure that
6692          * preceding stores to the page contents become visible before
6693          * the set_pte_at() write.
6694          */
6695         __folio_mark_uptodate(folio);
6696
6697         /* Add shared, newly allocated pages to the page cache. */
6698         if (vm_shared && !is_continue) {
6699                 size = i_size_read(mapping->host) >> huge_page_shift(h);
6700                 ret = -EFAULT;
6701                 if (idx >= size)
6702                         goto out_release_nounlock;
6703
6704                 /*
6705                  * Serialization between remove_inode_hugepages() and
6706                  * hugetlb_add_to_page_cache() below happens through the
6707                  * hugetlb_fault_mutex_table that here must be hold by
6708                  * the caller.
6709                  */
6710                 ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6711                 if (ret)
6712                         goto out_release_nounlock;
6713                 folio_in_pagecache = true;
6714         }
6715
6716         ptl = huge_pte_lock(h, dst_mm, dst_pte);
6717
6718         ret = -EIO;
6719         if (folio_test_hwpoison(folio))
6720                 goto out_release_unlock;
6721
6722         /*
6723          * We allow to overwrite a pte marker: consider when both MISSING|WP
6724          * registered, we firstly wr-protect a none pte which has no page cache
6725          * page backing it, then access the page.
6726          */
6727         ret = -EEXIST;
6728         if (!huge_pte_none_mostly(huge_ptep_get(dst_pte)))
6729                 goto out_release_unlock;
6730
6731         if (folio_in_pagecache)
6732                 hugetlb_add_file_rmap(folio);
6733         else
6734                 hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6735
6736         /*
6737          * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6738          * with wp flag set, don't set pte write bit.
6739          */
6740         if (wp_enabled || (is_continue && !vm_shared))
6741                 writable = 0;
6742         else
6743                 writable = dst_vma->vm_flags & VM_WRITE;
6744
6745         _dst_pte = make_huge_pte(dst_vma, &folio->page, writable);
6746         /*
6747          * Always mark UFFDIO_COPY page dirty; note that this may not be
6748          * extremely important for hugetlbfs for now since swapping is not
6749          * supported, but we should still be clear in that this page cannot be
6750          * thrown away at will, even if write bit not set.
6751          */
6752         _dst_pte = huge_pte_mkdirty(_dst_pte);
6753         _dst_pte = pte_mkyoung(_dst_pte);
6754
6755         if (wp_enabled)
6756                 _dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6757
6758         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h));
6759
6760         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6761
6762         /* No need to invalidate - it was non-present before */
6763         update_mmu_cache(dst_vma, dst_addr, dst_pte);
6764
6765         spin_unlock(ptl);
6766         if (!is_continue)
6767                 folio_set_hugetlb_migratable(folio);
6768         if (vm_shared || is_continue)
6769                 folio_unlock(folio);
6770         ret = 0;
6771 out:
6772         return ret;
6773 out_release_unlock:
6774         spin_unlock(ptl);
6775         if (vm_shared || is_continue)
6776                 folio_unlock(folio);
6777 out_release_nounlock:
6778         if (!folio_in_pagecache)
6779                 restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6780         folio_put(folio);
6781         goto out;
6782 }
6783 #endif /* CONFIG_USERFAULTFD */
6784
6785 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
6786                                       unsigned long address, unsigned int flags,
6787                                       unsigned int *page_mask)
6788 {
6789         struct hstate *h = hstate_vma(vma);
6790         struct mm_struct *mm = vma->vm_mm;
6791         unsigned long haddr = address & huge_page_mask(h);
6792         struct page *page = NULL;
6793         spinlock_t *ptl;
6794         pte_t *pte, entry;
6795         int ret;
6796
6797         hugetlb_vma_lock_read(vma);
6798         pte = hugetlb_walk(vma, haddr, huge_page_size(h));
6799         if (!pte)
6800                 goto out_unlock;
6801
6802         ptl = huge_pte_lock(h, mm, pte);
6803         entry = huge_ptep_get(pte);
6804         if (pte_present(entry)) {
6805                 page = pte_page(entry);
6806
6807                 if (!huge_pte_write(entry)) {
6808                         if (flags & FOLL_WRITE) {
6809                                 page = NULL;
6810                                 goto out;
6811                         }
6812
6813                         if (gup_must_unshare(vma, flags, page)) {
6814                                 /* Tell the caller to do unsharing */
6815                                 page = ERR_PTR(-EMLINK);
6816                                 goto out;
6817                         }
6818                 }
6819
6820                 page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT));
6821
6822                 /*
6823                  * Note that page may be a sub-page, and with vmemmap
6824                  * optimizations the page struct may be read only.
6825                  * try_grab_page() will increase the ref count on the
6826                  * head page, so this will be OK.
6827                  *
6828                  * try_grab_page() should always be able to get the page here,
6829                  * because we hold the ptl lock and have verified pte_present().
6830                  */
6831                 ret = try_grab_page(page, flags);
6832
6833                 if (WARN_ON_ONCE(ret)) {
6834                         page = ERR_PTR(ret);
6835                         goto out;
6836                 }
6837
6838                 *page_mask = (1U << huge_page_order(h)) - 1;
6839         }
6840 out:
6841         spin_unlock(ptl);
6842 out_unlock:
6843         hugetlb_vma_unlock_read(vma);
6844
6845         /*
6846          * Fixup retval for dump requests: if pagecache doesn't exist,
6847          * don't try to allocate a new page but just skip it.
6848          */
6849         if (!page && (flags & FOLL_DUMP) &&
6850             !hugetlbfs_pagecache_present(h, vma, address))
6851                 page = ERR_PTR(-EFAULT);
6852
6853         return page;
6854 }
6855
6856 long hugetlb_change_protection(struct vm_area_struct *vma,
6857                 unsigned long address, unsigned long end,
6858                 pgprot_t newprot, unsigned long cp_flags)
6859 {
6860         struct mm_struct *mm = vma->vm_mm;
6861         unsigned long start = address;
6862         pte_t *ptep;
6863         pte_t pte;
6864         struct hstate *h = hstate_vma(vma);
6865         long pages = 0, psize = huge_page_size(h);
6866         bool shared_pmd = false;
6867         struct mmu_notifier_range range;
6868         unsigned long last_addr_mask;
6869         bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6870         bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6871
6872         /*
6873          * In the case of shared PMDs, the area to flush could be beyond
6874          * start/end.  Set range.start/range.end to cover the maximum possible
6875          * range if PMD sharing is possible.
6876          */
6877         mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6878                                 0, mm, start, end);
6879         adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6880
6881         BUG_ON(address >= end);
6882         flush_cache_range(vma, range.start, range.end);
6883
6884         mmu_notifier_invalidate_range_start(&range);
6885         hugetlb_vma_lock_write(vma);
6886         i_mmap_lock_write(vma->vm_file->f_mapping);
6887         last_addr_mask = hugetlb_mask_last_page(h);
6888         for (; address < end; address += psize) {
6889                 spinlock_t *ptl;
6890                 ptep = hugetlb_walk(vma, address, psize);
6891                 if (!ptep) {
6892                         if (!uffd_wp) {
6893                                 address |= last_addr_mask;
6894                                 continue;
6895                         }
6896                         /*
6897                          * Userfaultfd wr-protect requires pgtable
6898                          * pre-allocations to install pte markers.
6899                          */
6900                         ptep = huge_pte_alloc(mm, vma, address, psize);
6901                         if (!ptep) {
6902                                 pages = -ENOMEM;
6903                                 break;
6904                         }
6905                 }
6906                 ptl = huge_pte_lock(h, mm, ptep);
6907                 if (huge_pmd_unshare(mm, vma, address, ptep)) {
6908                         /*
6909                          * When uffd-wp is enabled on the vma, unshare
6910                          * shouldn't happen at all.  Warn about it if it
6911                          * happened due to some reason.
6912                          */
6913                         WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6914                         pages++;
6915                         spin_unlock(ptl);
6916                         shared_pmd = true;
6917                         address |= last_addr_mask;
6918                         continue;
6919                 }
6920                 pte = huge_ptep_get(ptep);
6921                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
6922                         /* Nothing to do. */
6923                 } else if (unlikely(is_hugetlb_entry_migration(pte))) {
6924                         swp_entry_t entry = pte_to_swp_entry(pte);
6925                         struct page *page = pfn_swap_entry_to_page(entry);
6926                         pte_t newpte = pte;
6927
6928                         if (is_writable_migration_entry(entry)) {
6929                                 if (PageAnon(page))
6930                                         entry = make_readable_exclusive_migration_entry(
6931                                                                 swp_offset(entry));
6932                                 else
6933                                         entry = make_readable_migration_entry(
6934                                                                 swp_offset(entry));
6935                                 newpte = swp_entry_to_pte(entry);
6936                                 pages++;
6937                         }
6938
6939                         if (uffd_wp)
6940                                 newpte = pte_swp_mkuffd_wp(newpte);
6941                         else if (uffd_wp_resolve)
6942                                 newpte = pte_swp_clear_uffd_wp(newpte);
6943                         if (!pte_same(pte, newpte))
6944                                 set_huge_pte_at(mm, address, ptep, newpte, psize);
6945                 } else if (unlikely(is_pte_marker(pte))) {
6946                         /* No other markers apply for now. */
6947                         WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
6948                         if (uffd_wp_resolve)
6949                                 /* Safe to modify directly (non-present->none). */
6950                                 huge_pte_clear(mm, address, ptep, psize);
6951                 } else if (!huge_pte_none(pte)) {
6952                         pte_t old_pte;
6953                         unsigned int shift = huge_page_shift(hstate_vma(vma));
6954
6955                         old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6956                         pte = huge_pte_modify(old_pte, newprot);
6957                         pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6958                         if (uffd_wp)
6959                                 pte = huge_pte_mkuffd_wp(pte);
6960                         else if (uffd_wp_resolve)
6961                                 pte = huge_pte_clear_uffd_wp(pte);
6962                         huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6963                         pages++;
6964                 } else {
6965                         /* None pte */
6966                         if (unlikely(uffd_wp))
6967                                 /* Safe to modify directly (none->non-present). */
6968                                 set_huge_pte_at(mm, address, ptep,
6969                                                 make_pte_marker(PTE_MARKER_UFFD_WP),
6970                                                 psize);
6971                 }
6972                 spin_unlock(ptl);
6973         }
6974         /*
6975          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
6976          * may have cleared our pud entry and done put_page on the page table:
6977          * once we release i_mmap_rwsem, another task can do the final put_page
6978          * and that page table be reused and filled with junk.  If we actually
6979          * did unshare a page of pmds, flush the range corresponding to the pud.
6980          */
6981         if (shared_pmd)
6982                 flush_hugetlb_tlb_range(vma, range.start, range.end);
6983         else
6984                 flush_hugetlb_tlb_range(vma, start, end);
6985         /*
6986          * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6987          * downgrading page table protection not changing it to point to a new
6988          * page.
6989          *
6990          * See Documentation/mm/mmu_notifier.rst
6991          */
6992         i_mmap_unlock_write(vma->vm_file->f_mapping);
6993         hugetlb_vma_unlock_write(vma);
6994         mmu_notifier_invalidate_range_end(&range);
6995
6996         return pages > 0 ? (pages << h->order) : pages;
6997 }
6998
6999 /* Return true if reservation was successful, false otherwise.  */
7000 bool hugetlb_reserve_pages(struct inode *inode,
7001                                         long from, long to,
7002                                         struct vm_area_struct *vma,
7003                                         vm_flags_t vm_flags)
7004 {
7005         long chg = -1, add = -1;
7006         struct hstate *h = hstate_inode(inode);
7007         struct hugepage_subpool *spool = subpool_inode(inode);
7008         struct resv_map *resv_map;
7009         struct hugetlb_cgroup *h_cg = NULL;
7010         long gbl_reserve, regions_needed = 0;
7011
7012         /* This should never happen */
7013         if (from > to) {
7014                 VM_WARN(1, "%s called with a negative range\n", __func__);
7015                 return false;
7016         }
7017
7018         /*
7019          * vma specific semaphore used for pmd sharing and fault/truncation
7020          * synchronization
7021          */
7022         hugetlb_vma_lock_alloc(vma);
7023
7024         /*
7025          * Only apply hugepage reservation if asked. At fault time, an
7026          * attempt will be made for VM_NORESERVE to allocate a page
7027          * without using reserves
7028          */
7029         if (vm_flags & VM_NORESERVE)
7030                 return true;
7031
7032         /*
7033          * Shared mappings base their reservation on the number of pages that
7034          * are already allocated on behalf of the file. Private mappings need
7035          * to reserve the full area even if read-only as mprotect() may be
7036          * called to make the mapping read-write. Assume !vma is a shm mapping
7037          */
7038         if (!vma || vma->vm_flags & VM_MAYSHARE) {
7039                 /*
7040                  * resv_map can not be NULL as hugetlb_reserve_pages is only
7041                  * called for inodes for which resv_maps were created (see
7042                  * hugetlbfs_get_inode).
7043                  */
7044                 resv_map = inode_resv_map(inode);
7045
7046                 chg = region_chg(resv_map, from, to, &regions_needed);
7047         } else {
7048                 /* Private mapping. */
7049                 resv_map = resv_map_alloc();
7050                 if (!resv_map)
7051                         goto out_err;
7052
7053                 chg = to - from;
7054
7055                 set_vma_resv_map(vma, resv_map);
7056                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
7057         }
7058
7059         if (chg < 0)
7060                 goto out_err;
7061
7062         if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
7063                                 chg * pages_per_huge_page(h), &h_cg) < 0)
7064                 goto out_err;
7065
7066         if (vma && !(vma->vm_flags & VM_MAYSHARE) && h_cg) {
7067                 /* For private mappings, the hugetlb_cgroup uncharge info hangs
7068                  * of the resv_map.
7069                  */
7070                 resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
7071         }
7072
7073         /*
7074          * There must be enough pages in the subpool for the mapping. If
7075          * the subpool has a minimum size, there may be some global
7076          * reservations already in place (gbl_reserve).
7077          */
7078         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
7079         if (gbl_reserve < 0)
7080                 goto out_uncharge_cgroup;
7081
7082         /*
7083          * Check enough hugepages are available for the reservation.
7084          * Hand the pages back to the subpool if there are not
7085          */
7086         if (hugetlb_acct_memory(h, gbl_reserve) < 0)
7087                 goto out_put_pages;
7088
7089         /*
7090          * Account for the reservations made. Shared mappings record regions
7091          * that have reservations as they are shared by multiple VMAs.
7092          * When the last VMA disappears, the region map says how much
7093          * the reservation was and the page cache tells how much of
7094          * the reservation was consumed. Private mappings are per-VMA and
7095          * only the consumed reservations are tracked. When the VMA
7096          * disappears, the original reservation is the VMA size and the
7097          * consumed reservations are stored in the map. Hence, nothing
7098          * else has to be done for private mappings here
7099          */
7100         if (!vma || vma->vm_flags & VM_MAYSHARE) {
7101                 add = region_add(resv_map, from, to, regions_needed, h, h_cg);
7102
7103                 if (unlikely(add < 0)) {
7104                         hugetlb_acct_memory(h, -gbl_reserve);
7105                         goto out_put_pages;
7106                 } else if (unlikely(chg > add)) {
7107                         /*
7108                          * pages in this range were added to the reserve
7109                          * map between region_chg and region_add.  This
7110                          * indicates a race with alloc_hugetlb_folio.  Adjust
7111                          * the subpool and reserve counts modified above
7112                          * based on the difference.
7113                          */
7114                         long rsv_adjust;
7115
7116                         /*
7117                          * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
7118                          * reference to h_cg->css. See comment below for detail.
7119                          */
7120                         hugetlb_cgroup_uncharge_cgroup_rsvd(
7121                                 hstate_index(h),
7122                                 (chg - add) * pages_per_huge_page(h), h_cg);
7123
7124                         rsv_adjust = hugepage_subpool_put_pages(spool,
7125                                                                 chg - add);
7126                         hugetlb_acct_memory(h, -rsv_adjust);
7127                 } else if (h_cg) {
7128                         /*
7129                          * The file_regions will hold their own reference to
7130                          * h_cg->css. So we should release the reference held
7131                          * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
7132                          * done.
7133                          */
7134                         hugetlb_cgroup_put_rsvd_cgroup(h_cg);
7135                 }
7136         }
7137         return true;
7138
7139 out_put_pages:
7140         /* put back original number of pages, chg */
7141         (void)hugepage_subpool_put_pages(spool, chg);
7142 out_uncharge_cgroup:
7143         hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
7144                                             chg * pages_per_huge_page(h), h_cg);
7145 out_err:
7146         hugetlb_vma_lock_free(vma);
7147         if (!vma || vma->vm_flags & VM_MAYSHARE)
7148                 /* Only call region_abort if the region_chg succeeded but the
7149                  * region_add failed or didn't run.
7150                  */
7151                 if (chg >= 0 && add < 0)
7152                         region_abort(resv_map, from, to, regions_needed);
7153         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
7154                 kref_put(&resv_map->refs, resv_map_release);
7155                 set_vma_resv_map(vma, NULL);
7156         }
7157         return false;
7158 }
7159
7160 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
7161                                                                 long freed)
7162 {
7163         struct hstate *h = hstate_inode(inode);
7164         struct resv_map *resv_map = inode_resv_map(inode);
7165         long chg = 0;
7166         struct hugepage_subpool *spool = subpool_inode(inode);
7167         long gbl_reserve;
7168
7169         /*
7170          * Since this routine can be called in the evict inode path for all
7171          * hugetlbfs inodes, resv_map could be NULL.
7172          */
7173         if (resv_map) {
7174                 chg = region_del(resv_map, start, end);
7175                 /*
7176                  * region_del() can fail in the rare case where a region
7177                  * must be split and another region descriptor can not be
7178                  * allocated.  If end == LONG_MAX, it will not fail.
7179                  */
7180                 if (chg < 0)
7181                         return chg;
7182         }
7183
7184         spin_lock(&inode->i_lock);
7185         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
7186         spin_unlock(&inode->i_lock);
7187
7188         /*
7189          * If the subpool has a minimum size, the number of global
7190          * reservations to be released may be adjusted.
7191          *
7192          * Note that !resv_map implies freed == 0. So (chg - freed)
7193          * won't go negative.
7194          */
7195         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
7196         hugetlb_acct_memory(h, -gbl_reserve);
7197
7198         return 0;
7199 }
7200
7201 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7202 static unsigned long page_table_shareable(struct vm_area_struct *svma,
7203                                 struct vm_area_struct *vma,
7204                                 unsigned long addr, pgoff_t idx)
7205 {
7206         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
7207                                 svma->vm_start;
7208         unsigned long sbase = saddr & PUD_MASK;
7209         unsigned long s_end = sbase + PUD_SIZE;
7210
7211         /* Allow segments to share if only one is marked locked */
7212         unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
7213         unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
7214
7215         /*
7216          * match the virtual addresses, permission and the alignment of the
7217          * page table page.
7218          *
7219          * Also, vma_lock (vm_private_data) is required for sharing.
7220          */
7221         if (pmd_index(addr) != pmd_index(saddr) ||
7222             vm_flags != svm_flags ||
7223             !range_in_vma(svma, sbase, s_end) ||
7224             !svma->vm_private_data)
7225                 return 0;
7226
7227         return saddr;
7228 }
7229
7230 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7231 {
7232         unsigned long start = addr & PUD_MASK;
7233         unsigned long end = start + PUD_SIZE;
7234
7235 #ifdef CONFIG_USERFAULTFD
7236         if (uffd_disable_huge_pmd_share(vma))
7237                 return false;
7238 #endif
7239         /*
7240          * check on proper vm_flags and page table alignment
7241          */
7242         if (!(vma->vm_flags & VM_MAYSHARE))
7243                 return false;
7244         if (!vma->vm_private_data)      /* vma lock required for sharing */
7245                 return false;
7246         if (!range_in_vma(vma, start, end))
7247                 return false;
7248         return true;
7249 }
7250
7251 /*
7252  * Determine if start,end range within vma could be mapped by shared pmd.
7253  * If yes, adjust start and end to cover range associated with possible
7254  * shared pmd mappings.
7255  */
7256 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7257                                 unsigned long *start, unsigned long *end)
7258 {
7259         unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
7260                 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
7261
7262         /*
7263          * vma needs to span at least one aligned PUD size, and the range
7264          * must be at least partially within in.
7265          */
7266         if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
7267                 (*end <= v_start) || (*start >= v_end))
7268                 return;
7269
7270         /* Extend the range to be PUD aligned for a worst case scenario */
7271         if (*start > v_start)
7272                 *start = ALIGN_DOWN(*start, PUD_SIZE);
7273
7274         if (*end < v_end)
7275                 *end = ALIGN(*end, PUD_SIZE);
7276 }
7277
7278 /*
7279  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
7280  * and returns the corresponding pte. While this is not necessary for the
7281  * !shared pmd case because we can allocate the pmd later as well, it makes the
7282  * code much cleaner. pmd allocation is essential for the shared case because
7283  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
7284  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
7285  * bad pmd for sharing.
7286  */
7287 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7288                       unsigned long addr, pud_t *pud)
7289 {
7290         struct address_space *mapping = vma->vm_file->f_mapping;
7291         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7292                         vma->vm_pgoff;
7293         struct vm_area_struct *svma;
7294         unsigned long saddr;
7295         pte_t *spte = NULL;
7296         pte_t *pte;
7297
7298         i_mmap_lock_read(mapping);
7299         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
7300                 if (svma == vma)
7301                         continue;
7302
7303                 saddr = page_table_shareable(svma, vma, addr, idx);
7304                 if (saddr) {
7305                         spte = hugetlb_walk(svma, saddr,
7306                                             vma_mmu_pagesize(svma));
7307                         if (spte) {
7308                                 get_page(virt_to_page(spte));
7309                                 break;
7310                         }
7311                 }
7312         }
7313
7314         if (!spte)
7315                 goto out;
7316
7317         spin_lock(&mm->page_table_lock);
7318         if (pud_none(*pud)) {
7319                 pud_populate(mm, pud,
7320                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
7321                 mm_inc_nr_pmds(mm);
7322         } else {
7323                 put_page(virt_to_page(spte));
7324         }
7325         spin_unlock(&mm->page_table_lock);
7326 out:
7327         pte = (pte_t *)pmd_alloc(mm, pud, addr);
7328         i_mmap_unlock_read(mapping);
7329         return pte;
7330 }
7331
7332 /*
7333  * unmap huge page backed by shared pte.
7334  *
7335  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
7336  * indicated by page_count > 1, unmap is achieved by clearing pud and
7337  * decrementing the ref count. If count == 1, the pte page is not shared.
7338  *
7339  * Called with page table lock held.
7340  *
7341  * returns: 1 successfully unmapped a shared pte page
7342  *          0 the underlying pte page is not shared, or it is the last user
7343  */
7344 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7345                                         unsigned long addr, pte_t *ptep)
7346 {
7347         pgd_t *pgd = pgd_offset(mm, addr);
7348         p4d_t *p4d = p4d_offset(pgd, addr);
7349         pud_t *pud = pud_offset(p4d, addr);
7350
7351         i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7352         hugetlb_vma_assert_locked(vma);
7353         BUG_ON(page_count(virt_to_page(ptep)) == 0);
7354         if (page_count(virt_to_page(ptep)) == 1)
7355                 return 0;
7356
7357         pud_clear(pud);
7358         put_page(virt_to_page(ptep));
7359         mm_dec_nr_pmds(mm);
7360         return 1;
7361 }
7362
7363 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7364
7365 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
7366                       unsigned long addr, pud_t *pud)
7367 {
7368         return NULL;
7369 }
7370
7371 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
7372                                 unsigned long addr, pte_t *ptep)
7373 {
7374         return 0;
7375 }
7376
7377 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
7378                                 unsigned long *start, unsigned long *end)
7379 {
7380 }
7381
7382 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7383 {
7384         return false;
7385 }
7386 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
7387
7388 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
7389 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7390                         unsigned long addr, unsigned long sz)
7391 {
7392         pgd_t *pgd;
7393         p4d_t *p4d;
7394         pud_t *pud;
7395         pte_t *pte = NULL;
7396
7397         pgd = pgd_offset(mm, addr);
7398         p4d = p4d_alloc(mm, pgd, addr);
7399         if (!p4d)
7400                 return NULL;
7401         pud = pud_alloc(mm, p4d, addr);
7402         if (pud) {
7403                 if (sz == PUD_SIZE) {
7404                         pte = (pte_t *)pud;
7405                 } else {
7406                         BUG_ON(sz != PMD_SIZE);
7407                         if (want_pmd_share(vma, addr) && pud_none(*pud))
7408                                 pte = huge_pmd_share(mm, vma, addr, pud);
7409                         else
7410                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7411                 }
7412         }
7413
7414         if (pte) {
7415                 pte_t pteval = ptep_get_lockless(pte);
7416
7417                 BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7418         }
7419
7420         return pte;
7421 }
7422
7423 /*
7424  * huge_pte_offset() - Walk the page table to resolve the hugepage
7425  * entry at address @addr
7426  *
7427  * Return: Pointer to page table entry (PUD or PMD) for
7428  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7429  * size @sz doesn't match the hugepage size at this level of the page
7430  * table.
7431  */
7432 pte_t *huge_pte_offset(struct mm_struct *mm,
7433                        unsigned long addr, unsigned long sz)
7434 {
7435         pgd_t *pgd;
7436         p4d_t *p4d;
7437         pud_t *pud;
7438         pmd_t *pmd;
7439
7440         pgd = pgd_offset(mm, addr);
7441         if (!pgd_present(*pgd))
7442                 return NULL;
7443         p4d = p4d_offset(pgd, addr);
7444         if (!p4d_present(*p4d))
7445                 return NULL;
7446
7447         pud = pud_offset(p4d, addr);
7448         if (sz == PUD_SIZE)
7449                 /* must be pud huge, non-present or none */
7450                 return (pte_t *)pud;
7451         if (!pud_present(*pud))
7452                 return NULL;
7453         /* must have a valid entry and size to go further */
7454
7455         pmd = pmd_offset(pud, addr);
7456         /* must be pmd huge, non-present or none */
7457         return (pte_t *)pmd;
7458 }
7459
7460 /*
7461  * Return a mask that can be used to update an address to the last huge
7462  * page in a page table page mapping size.  Used to skip non-present
7463  * page table entries when linearly scanning address ranges.  Architectures
7464  * with unique huge page to page table relationships can define their own
7465  * version of this routine.
7466  */
7467 unsigned long hugetlb_mask_last_page(struct hstate *h)
7468 {
7469         unsigned long hp_size = huge_page_size(h);
7470
7471         if (hp_size == PUD_SIZE)
7472                 return P4D_SIZE - PUD_SIZE;
7473         else if (hp_size == PMD_SIZE)
7474                 return PUD_SIZE - PMD_SIZE;
7475         else
7476                 return 0UL;
7477 }
7478
7479 #else
7480
7481 /* See description above.  Architectures can provide their own version. */
7482 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7483 {
7484 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
7485         if (huge_page_size(h) == PMD_SIZE)
7486                 return PUD_SIZE - PMD_SIZE;
7487 #endif
7488         return 0UL;
7489 }
7490
7491 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7492
7493 /*
7494  * These functions are overwritable if your architecture needs its own
7495  * behavior.
7496  */
7497 bool isolate_hugetlb(struct folio *folio, struct list_head *list)
7498 {
7499         bool ret = true;
7500
7501         spin_lock_irq(&hugetlb_lock);
7502         if (!folio_test_hugetlb(folio) ||
7503             !folio_test_hugetlb_migratable(folio) ||
7504             !folio_try_get(folio)) {
7505                 ret = false;
7506                 goto unlock;
7507         }
7508         folio_clear_hugetlb_migratable(folio);
7509         list_move_tail(&folio->lru, list);
7510 unlock:
7511         spin_unlock_irq(&hugetlb_lock);
7512         return ret;
7513 }
7514
7515 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7516 {
7517         int ret = 0;
7518
7519         *hugetlb = false;
7520         spin_lock_irq(&hugetlb_lock);
7521         if (folio_test_hugetlb(folio)) {
7522                 *hugetlb = true;
7523                 if (folio_test_hugetlb_freed(folio))
7524                         ret = 0;
7525                 else if (folio_test_hugetlb_migratable(folio) || unpoison)
7526                         ret = folio_try_get(folio);
7527                 else
7528                         ret = -EBUSY;
7529         }
7530         spin_unlock_irq(&hugetlb_lock);
7531         return ret;
7532 }
7533
7534 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7535                                 bool *migratable_cleared)
7536 {
7537         int ret;
7538
7539         spin_lock_irq(&hugetlb_lock);
7540         ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7541         spin_unlock_irq(&hugetlb_lock);
7542         return ret;
7543 }
7544
7545 void folio_putback_active_hugetlb(struct folio *folio)
7546 {
7547         spin_lock_irq(&hugetlb_lock);
7548         folio_set_hugetlb_migratable(folio);
7549         list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7550         spin_unlock_irq(&hugetlb_lock);
7551         folio_put(folio);
7552 }
7553
7554 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7555 {
7556         struct hstate *h = folio_hstate(old_folio);
7557
7558         hugetlb_cgroup_migrate(old_folio, new_folio);
7559         set_page_owner_migrate_reason(&new_folio->page, reason);
7560
7561         /*
7562          * transfer temporary state of the new hugetlb folio. This is
7563          * reverse to other transitions because the newpage is going to
7564          * be final while the old one will be freed so it takes over
7565          * the temporary status.
7566          *
7567          * Also note that we have to transfer the per-node surplus state
7568          * here as well otherwise the global surplus count will not match
7569          * the per-node's.
7570          */
7571         if (folio_test_hugetlb_temporary(new_folio)) {
7572                 int old_nid = folio_nid(old_folio);
7573                 int new_nid = folio_nid(new_folio);
7574
7575                 folio_set_hugetlb_temporary(old_folio);
7576                 folio_clear_hugetlb_temporary(new_folio);
7577
7578
7579                 /*
7580                  * There is no need to transfer the per-node surplus state
7581                  * when we do not cross the node.
7582                  */
7583                 if (new_nid == old_nid)
7584                         return;
7585                 spin_lock_irq(&hugetlb_lock);
7586                 if (h->surplus_huge_pages_node[old_nid]) {
7587                         h->surplus_huge_pages_node[old_nid]--;
7588                         h->surplus_huge_pages_node[new_nid]++;
7589                 }
7590                 spin_unlock_irq(&hugetlb_lock);
7591         }
7592 }
7593
7594 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7595                                    unsigned long start,
7596                                    unsigned long end)
7597 {
7598         struct hstate *h = hstate_vma(vma);
7599         unsigned long sz = huge_page_size(h);
7600         struct mm_struct *mm = vma->vm_mm;
7601         struct mmu_notifier_range range;
7602         unsigned long address;
7603         spinlock_t *ptl;
7604         pte_t *ptep;
7605
7606         if (!(vma->vm_flags & VM_MAYSHARE))
7607                 return;
7608
7609         if (start >= end)
7610                 return;
7611
7612         flush_cache_range(vma, start, end);
7613         /*
7614          * No need to call adjust_range_if_pmd_sharing_possible(), because
7615          * we have already done the PUD_SIZE alignment.
7616          */
7617         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7618                                 start, end);
7619         mmu_notifier_invalidate_range_start(&range);
7620         hugetlb_vma_lock_write(vma);
7621         i_mmap_lock_write(vma->vm_file->f_mapping);
7622         for (address = start; address < end; address += PUD_SIZE) {
7623                 ptep = hugetlb_walk(vma, address, sz);
7624                 if (!ptep)
7625                         continue;
7626                 ptl = huge_pte_lock(h, mm, ptep);
7627                 huge_pmd_unshare(mm, vma, address, ptep);
7628                 spin_unlock(ptl);
7629         }
7630         flush_hugetlb_tlb_range(vma, start, end);
7631         i_mmap_unlock_write(vma->vm_file->f_mapping);
7632         hugetlb_vma_unlock_write(vma);
7633         /*
7634          * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7635          * Documentation/mm/mmu_notifier.rst.
7636          */
7637         mmu_notifier_invalidate_range_end(&range);
7638 }
7639
7640 /*
7641  * This function will unconditionally remove all the shared pmd pgtable entries
7642  * within the specific vma for a hugetlbfs memory range.
7643  */
7644 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7645 {
7646         hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7647                         ALIGN_DOWN(vma->vm_end, PUD_SIZE));
7648 }
7649
7650 #ifdef CONFIG_CMA
7651 static bool cma_reserve_called __initdata;
7652
7653 static int __init cmdline_parse_hugetlb_cma(char *p)
7654 {
7655         int nid, count = 0;
7656         unsigned long tmp;
7657         char *s = p;
7658
7659         while (*s) {
7660                 if (sscanf(s, "%lu%n", &tmp, &count) != 1)
7661                         break;
7662
7663                 if (s[count] == ':') {
7664                         if (tmp >= MAX_NUMNODES)
7665                                 break;
7666                         nid = array_index_nospec(tmp, MAX_NUMNODES);
7667
7668                         s += count + 1;
7669                         tmp = memparse(s, &s);
7670                         hugetlb_cma_size_in_node[nid] = tmp;
7671                         hugetlb_cma_size += tmp;
7672
7673                         /*
7674                          * Skip the separator if have one, otherwise
7675                          * break the parsing.
7676                          */
7677                         if (*s == ',')
7678                                 s++;
7679                         else
7680                                 break;
7681                 } else {
7682                         hugetlb_cma_size = memparse(p, &p);
7683                         break;
7684                 }
7685         }
7686
7687         return 0;
7688 }
7689
7690 early_param("hugetlb_cma", cmdline_parse_hugetlb_cma);
7691
7692 void __init hugetlb_cma_reserve(int order)
7693 {
7694         unsigned long size, reserved, per_node;
7695         bool node_specific_cma_alloc = false;
7696         int nid;
7697
7698         cma_reserve_called = true;
7699
7700         if (!hugetlb_cma_size)
7701                 return;
7702
7703         for (nid = 0; nid < MAX_NUMNODES; nid++) {
7704                 if (hugetlb_cma_size_in_node[nid] == 0)
7705                         continue;
7706
7707                 if (!node_online(nid)) {
7708                         pr_warn("hugetlb_cma: invalid node %d specified\n", nid);
7709                         hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7710                         hugetlb_cma_size_in_node[nid] = 0;
7711                         continue;
7712                 }
7713
7714                 if (hugetlb_cma_size_in_node[nid] < (PAGE_SIZE << order)) {
7715                         pr_warn("hugetlb_cma: cma area of node %d should be at least %lu MiB\n",
7716                                 nid, (PAGE_SIZE << order) / SZ_1M);
7717                         hugetlb_cma_size -= hugetlb_cma_size_in_node[nid];
7718                         hugetlb_cma_size_in_node[nid] = 0;
7719                 } else {
7720                         node_specific_cma_alloc = true;
7721                 }
7722         }
7723
7724         /* Validate the CMA size again in case some invalid nodes specified. */
7725         if (!hugetlb_cma_size)
7726                 return;
7727
7728         if (hugetlb_cma_size < (PAGE_SIZE << order)) {
7729                 pr_warn("hugetlb_cma: cma area should be at least %lu MiB\n",
7730                         (PAGE_SIZE << order) / SZ_1M);
7731                 hugetlb_cma_size = 0;
7732                 return;
7733         }
7734
7735         if (!node_specific_cma_alloc) {
7736                 /*
7737                  * If 3 GB area is requested on a machine with 4 numa nodes,
7738                  * let's allocate 1 GB on first three nodes and ignore the last one.
7739                  */
7740                 per_node = DIV_ROUND_UP(hugetlb_cma_size, nr_online_nodes);
7741                 pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
7742                         hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
7743         }
7744
7745         reserved = 0;
7746         for_each_online_node(nid) {
7747                 int res;
7748                 char name[CMA_MAX_NAME];
7749
7750                 if (node_specific_cma_alloc) {
7751                         if (hugetlb_cma_size_in_node[nid] == 0)
7752                                 continue;
7753
7754                         size = hugetlb_cma_size_in_node[nid];
7755                 } else {
7756                         size = min(per_node, hugetlb_cma_size - reserved);
7757                 }
7758
7759                 size = round_up(size, PAGE_SIZE << order);
7760
7761                 snprintf(name, sizeof(name), "hugetlb%d", nid);
7762                 /*
7763                  * Note that 'order per bit' is based on smallest size that
7764                  * may be returned to CMA allocator in the case of
7765                  * huge page demotion.
7766                  */
7767                 res = cma_declare_contiguous_nid(0, size, 0,
7768                                                 PAGE_SIZE << HUGETLB_PAGE_ORDER,
7769                                                  0, false, name,
7770                                                  &hugetlb_cma[nid], nid);
7771                 if (res) {
7772                         pr_warn("hugetlb_cma: reservation failed: err %d, node %d",
7773                                 res, nid);
7774                         continue;
7775                 }
7776
7777                 reserved += size;
7778                 pr_info("hugetlb_cma: reserved %lu MiB on node %d\n",
7779                         size / SZ_1M, nid);
7780
7781                 if (reserved >= hugetlb_cma_size)
7782                         break;
7783         }
7784
7785         if (!reserved)
7786                 /*
7787                  * hugetlb_cma_size is used to determine if allocations from
7788                  * cma are possible.  Set to zero if no cma regions are set up.
7789                  */
7790                 hugetlb_cma_size = 0;
7791 }
7792
7793 static void __init hugetlb_cma_check(void)
7794 {
7795         if (!hugetlb_cma_size || cma_reserve_called)
7796                 return;
7797
7798         pr_warn("hugetlb_cma: the option isn't supported by current arch\n");
7799 }
7800
7801 #endif /* CONFIG_CMA */