Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[sfrench/cifs-2.6.git] / fs / hugetlbfs / inode.c
1 /*
2  * hugetlbpage-backed filesystem.  Based on ramfs.
3  *
4  * Nadia Yvette Chambers, 2002
5  *
6  * Copyright (C) 2002 Linus Torvalds.
7  * License: GPL
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched/signal.h>         /* remove ASAP */
15 #include <linux/falloc.h>
16 #include <linux/fs.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/fs_parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
39
40 #include <linux/uaccess.h>
41 #include <linux/sched/mm.h>
42
43 static const struct super_operations hugetlbfs_ops;
44 static const struct address_space_operations hugetlbfs_aops;
45 const struct file_operations hugetlbfs_file_operations;
46 static const struct inode_operations hugetlbfs_dir_inode_operations;
47 static const struct inode_operations hugetlbfs_inode_operations;
48
49 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
50
51 struct hugetlbfs_fs_context {
52         struct hstate           *hstate;
53         unsigned long long      max_size_opt;
54         unsigned long long      min_size_opt;
55         long                    max_hpages;
56         long                    nr_inodes;
57         long                    min_hpages;
58         enum hugetlbfs_size_type max_val_type;
59         enum hugetlbfs_size_type min_val_type;
60         kuid_t                  uid;
61         kgid_t                  gid;
62         umode_t                 mode;
63 };
64
65 int sysctl_hugetlb_shm_group;
66
67 enum hugetlb_param {
68         Opt_gid,
69         Opt_min_size,
70         Opt_mode,
71         Opt_nr_inodes,
72         Opt_pagesize,
73         Opt_size,
74         Opt_uid,
75 };
76
77 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
78         fsparam_u32   ("gid",           Opt_gid),
79         fsparam_string("min_size",      Opt_min_size),
80         fsparam_u32oct("mode",          Opt_mode),
81         fsparam_string("nr_inodes",     Opt_nr_inodes),
82         fsparam_string("pagesize",      Opt_pagesize),
83         fsparam_string("size",          Opt_size),
84         fsparam_u32   ("uid",           Opt_uid),
85         {}
86 };
87
88 #ifdef CONFIG_NUMA
89 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90                                         struct inode *inode, pgoff_t index)
91 {
92         vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
93                                                         index);
94 }
95
96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97 {
98         mpol_cond_put(vma->vm_policy);
99 }
100 #else
101 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102                                         struct inode *inode, pgoff_t index)
103 {
104 }
105
106 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
107 {
108 }
109 #endif
110
111 static void huge_pagevec_release(struct pagevec *pvec)
112 {
113         int i;
114
115         for (i = 0; i < pagevec_count(pvec); ++i)
116                 put_page(pvec->pages[i]);
117
118         pagevec_reinit(pvec);
119 }
120
121 /*
122  * Mask used when checking the page offset value passed in via system
123  * calls.  This value will be converted to a loff_t which is signed.
124  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
125  * value.  The extra bit (- 1 in the shift value) is to take the sign
126  * bit into account.
127  */
128 #define PGOFF_LOFFT_MAX \
129         (((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
130
131 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132 {
133         struct inode *inode = file_inode(file);
134         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
135         loff_t len, vma_len;
136         int ret;
137         struct hstate *h = hstate_file(file);
138
139         /*
140          * vma address alignment (but not the pgoff alignment) has
141          * already been checked by prepare_hugepage_range.  If you add
142          * any error returns here, do so after setting VM_HUGETLB, so
143          * is_vm_hugetlb_page tests below unmap_region go the right
144          * way when do_mmap unwinds (may be important on powerpc
145          * and ia64).
146          */
147         vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
148         vma->vm_ops = &hugetlb_vm_ops;
149
150         ret = seal_check_future_write(info->seals, vma);
151         if (ret)
152                 return ret;
153
154         /*
155          * page based offset in vm_pgoff could be sufficiently large to
156          * overflow a loff_t when converted to byte offset.  This can
157          * only happen on architectures where sizeof(loff_t) ==
158          * sizeof(unsigned long).  So, only check in those instances.
159          */
160         if (sizeof(unsigned long) == sizeof(loff_t)) {
161                 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
162                         return -EINVAL;
163         }
164
165         /* must be huge page aligned */
166         if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
167                 return -EINVAL;
168
169         vma_len = (loff_t)(vma->vm_end - vma->vm_start);
170         len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
171         /* check for overflow */
172         if (len < vma_len)
173                 return -EINVAL;
174
175         inode_lock(inode);
176         file_accessed(file);
177
178         ret = -ENOMEM;
179         if (!hugetlb_reserve_pages(inode,
180                                 vma->vm_pgoff >> huge_page_order(h),
181                                 len >> huge_page_shift(h), vma,
182                                 vma->vm_flags))
183                 goto out;
184
185         ret = 0;
186         if (vma->vm_flags & VM_WRITE && inode->i_size < len)
187                 i_size_write(inode, len);
188 out:
189         inode_unlock(inode);
190
191         return ret;
192 }
193
194 /*
195  * Called under mmap_write_lock(mm).
196  */
197
198 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
199 static unsigned long
200 hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
201                 unsigned long len, unsigned long pgoff, unsigned long flags)
202 {
203         struct hstate *h = hstate_file(file);
204         struct vm_unmapped_area_info info;
205
206         info.flags = 0;
207         info.length = len;
208         info.low_limit = current->mm->mmap_base;
209         info.high_limit = TASK_SIZE;
210         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
211         info.align_offset = 0;
212         return vm_unmapped_area(&info);
213 }
214
215 static unsigned long
216 hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
217                 unsigned long len, unsigned long pgoff, unsigned long flags)
218 {
219         struct hstate *h = hstate_file(file);
220         struct vm_unmapped_area_info info;
221
222         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
223         info.length = len;
224         info.low_limit = max(PAGE_SIZE, mmap_min_addr);
225         info.high_limit = current->mm->mmap_base;
226         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
227         info.align_offset = 0;
228         addr = vm_unmapped_area(&info);
229
230         /*
231          * A failed mmap() very likely causes application failure,
232          * so fall back to the bottom-up function here. This scenario
233          * can happen with large stack limits and large mmap()
234          * allocations.
235          */
236         if (unlikely(offset_in_page(addr))) {
237                 VM_BUG_ON(addr != -ENOMEM);
238                 info.flags = 0;
239                 info.low_limit = current->mm->mmap_base;
240                 info.high_limit = TASK_SIZE;
241                 addr = vm_unmapped_area(&info);
242         }
243
244         return addr;
245 }
246
247 static unsigned long
248 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
249                 unsigned long len, unsigned long pgoff, unsigned long flags)
250 {
251         struct mm_struct *mm = current->mm;
252         struct vm_area_struct *vma;
253         struct hstate *h = hstate_file(file);
254
255         if (len & ~huge_page_mask(h))
256                 return -EINVAL;
257         if (len > TASK_SIZE)
258                 return -ENOMEM;
259
260         if (flags & MAP_FIXED) {
261                 if (prepare_hugepage_range(file, addr, len))
262                         return -EINVAL;
263                 return addr;
264         }
265
266         if (addr) {
267                 addr = ALIGN(addr, huge_page_size(h));
268                 vma = find_vma(mm, addr);
269                 if (TASK_SIZE - len >= addr &&
270                     (!vma || addr + len <= vm_start_gap(vma)))
271                         return addr;
272         }
273
274         /*
275          * Use mm->get_unmapped_area value as a hint to use topdown routine.
276          * If architectures have special needs, they should define their own
277          * version of hugetlb_get_unmapped_area.
278          */
279         if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
280                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
281                                 pgoff, flags);
282         return hugetlb_get_unmapped_area_bottomup(file, addr, len,
283                         pgoff, flags);
284 }
285 #endif
286
287 static size_t
288 hugetlbfs_read_actor(struct page *page, unsigned long offset,
289                         struct iov_iter *to, unsigned long size)
290 {
291         size_t copied = 0;
292         int i, chunksize;
293
294         /* Find which 4k chunk and offset with in that chunk */
295         i = offset >> PAGE_SHIFT;
296         offset = offset & ~PAGE_MASK;
297
298         while (size) {
299                 size_t n;
300                 chunksize = PAGE_SIZE;
301                 if (offset)
302                         chunksize -= offset;
303                 if (chunksize > size)
304                         chunksize = size;
305                 n = copy_page_to_iter(&page[i], offset, chunksize, to);
306                 copied += n;
307                 if (n != chunksize)
308                         return copied;
309                 offset = 0;
310                 size -= chunksize;
311                 i++;
312         }
313         return copied;
314 }
315
316 /*
317  * Support for read() - Find the page attached to f_mapping and copy out the
318  * data. Its *very* similar to generic_file_buffered_read(), we can't use that
319  * since it has PAGE_SIZE assumptions.
320  */
321 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
322 {
323         struct file *file = iocb->ki_filp;
324         struct hstate *h = hstate_file(file);
325         struct address_space *mapping = file->f_mapping;
326         struct inode *inode = mapping->host;
327         unsigned long index = iocb->ki_pos >> huge_page_shift(h);
328         unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
329         unsigned long end_index;
330         loff_t isize;
331         ssize_t retval = 0;
332
333         while (iov_iter_count(to)) {
334                 struct page *page;
335                 size_t nr, copied;
336
337                 /* nr is the maximum number of bytes to copy from this page */
338                 nr = huge_page_size(h);
339                 isize = i_size_read(inode);
340                 if (!isize)
341                         break;
342                 end_index = (isize - 1) >> huge_page_shift(h);
343                 if (index > end_index)
344                         break;
345                 if (index == end_index) {
346                         nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
347                         if (nr <= offset)
348                                 break;
349                 }
350                 nr = nr - offset;
351
352                 /* Find the page */
353                 page = find_lock_page(mapping, index);
354                 if (unlikely(page == NULL)) {
355                         /*
356                          * We have a HOLE, zero out the user-buffer for the
357                          * length of the hole or request.
358                          */
359                         copied = iov_iter_zero(nr, to);
360                 } else {
361                         unlock_page(page);
362
363                         /*
364                          * We have the page, copy it to user space buffer.
365                          */
366                         copied = hugetlbfs_read_actor(page, offset, to, nr);
367                         put_page(page);
368                 }
369                 offset += copied;
370                 retval += copied;
371                 if (copied != nr && iov_iter_count(to)) {
372                         if (!retval)
373                                 retval = -EFAULT;
374                         break;
375                 }
376                 index += offset >> huge_page_shift(h);
377                 offset &= ~huge_page_mask(h);
378         }
379         iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
380         return retval;
381 }
382
383 static int hugetlbfs_write_begin(struct file *file,
384                         struct address_space *mapping,
385                         loff_t pos, unsigned len, unsigned flags,
386                         struct page **pagep, void **fsdata)
387 {
388         return -EINVAL;
389 }
390
391 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
392                         loff_t pos, unsigned len, unsigned copied,
393                         struct page *page, void *fsdata)
394 {
395         BUG();
396         return -EINVAL;
397 }
398
399 static void remove_huge_page(struct page *page)
400 {
401         ClearPageDirty(page);
402         ClearPageUptodate(page);
403         delete_from_page_cache(page);
404 }
405
406 static void
407 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
408 {
409         struct vm_area_struct *vma;
410
411         /*
412          * end == 0 indicates that the entire range after start should be
413          * unmapped.  Note, end is exclusive, whereas the interval tree takes
414          * an inclusive "last".
415          */
416         vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
417                 unsigned long v_offset;
418                 unsigned long v_end;
419
420                 /*
421                  * Can the expression below overflow on 32-bit arches?
422                  * No, because the interval tree returns us only those vmas
423                  * which overlap the truncated area starting at pgoff,
424                  * and no vma on a 32-bit arch can span beyond the 4GB.
425                  */
426                 if (vma->vm_pgoff < start)
427                         v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
428                 else
429                         v_offset = 0;
430
431                 if (!end)
432                         v_end = vma->vm_end;
433                 else {
434                         v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
435                                                         + vma->vm_start;
436                         if (v_end > vma->vm_end)
437                                 v_end = vma->vm_end;
438                 }
439
440                 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
441                                                                         NULL);
442         }
443 }
444
445 /*
446  * remove_inode_hugepages handles two distinct cases: truncation and hole
447  * punch.  There are subtle differences in operation for each case.
448  *
449  * truncation is indicated by end of range being LLONG_MAX
450  *      In this case, we first scan the range and release found pages.
451  *      After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
452  *      maps and global counts.  Page faults can not race with truncation
453  *      in this routine.  hugetlb_no_page() holds i_mmap_rwsem and prevents
454  *      page faults in the truncated range by checking i_size.  i_size is
455  *      modified while holding i_mmap_rwsem.
456  * hole punch is indicated if end is not LLONG_MAX
457  *      In the hole punch case we scan the range and release found pages.
458  *      Only when releasing a page is the associated region/reserve map
459  *      deleted.  The region/reserve map for ranges without associated
460  *      pages are not modified.  Page faults can race with hole punch.
461  *      This is indicated if we find a mapped page.
462  * Note: If the passed end of range value is beyond the end of file, but
463  * not LLONG_MAX this routine still performs a hole punch operation.
464  */
465 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
466                                    loff_t lend)
467 {
468         struct hstate *h = hstate_inode(inode);
469         struct address_space *mapping = &inode->i_data;
470         const pgoff_t start = lstart >> huge_page_shift(h);
471         const pgoff_t end = lend >> huge_page_shift(h);
472         struct pagevec pvec;
473         pgoff_t next, index;
474         int i, freed = 0;
475         bool truncate_op = (lend == LLONG_MAX);
476
477         pagevec_init(&pvec);
478         next = start;
479         while (next < end) {
480                 /*
481                  * When no more pages are found, we are done.
482                  */
483                 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
484                         break;
485
486                 for (i = 0; i < pagevec_count(&pvec); ++i) {
487                         struct page *page = pvec.pages[i];
488                         u32 hash = 0;
489
490                         index = page->index;
491                         if (!truncate_op) {
492                                 /*
493                                  * Only need to hold the fault mutex in the
494                                  * hole punch case.  This prevents races with
495                                  * page faults.  Races are not possible in the
496                                  * case of truncation.
497                                  */
498                                 hash = hugetlb_fault_mutex_hash(mapping, index);
499                                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
500                         }
501
502                         /*
503                          * If page is mapped, it was faulted in after being
504                          * unmapped in caller.  Unmap (again) now after taking
505                          * the fault mutex.  The mutex will prevent faults
506                          * until we finish removing the page.
507                          *
508                          * This race can only happen in the hole punch case.
509                          * Getting here in a truncate operation is a bug.
510                          */
511                         if (unlikely(page_mapped(page))) {
512                                 BUG_ON(truncate_op);
513
514                                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
515                                 i_mmap_lock_write(mapping);
516                                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
517                                 hugetlb_vmdelete_list(&mapping->i_mmap,
518                                         index * pages_per_huge_page(h),
519                                         (index + 1) * pages_per_huge_page(h));
520                                 i_mmap_unlock_write(mapping);
521                         }
522
523                         lock_page(page);
524                         /*
525                          * We must free the huge page and remove from page
526                          * cache (remove_huge_page) BEFORE removing the
527                          * region/reserve map (hugetlb_unreserve_pages).  In
528                          * rare out of memory conditions, removal of the
529                          * region/reserve map could fail. Correspondingly,
530                          * the subpool and global reserve usage count can need
531                          * to be adjusted.
532                          */
533                         VM_BUG_ON(HPageRestoreReserve(page));
534                         remove_huge_page(page);
535                         freed++;
536                         if (!truncate_op) {
537                                 if (unlikely(hugetlb_unreserve_pages(inode,
538                                                         index, index + 1, 1)))
539                                         hugetlb_fix_reserve_counts(inode);
540                         }
541
542                         unlock_page(page);
543                         if (!truncate_op)
544                                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
545                 }
546                 huge_pagevec_release(&pvec);
547                 cond_resched();
548         }
549
550         if (truncate_op)
551                 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
552 }
553
554 static void hugetlbfs_evict_inode(struct inode *inode)
555 {
556         struct resv_map *resv_map;
557
558         remove_inode_hugepages(inode, 0, LLONG_MAX);
559
560         /*
561          * Get the resv_map from the address space embedded in the inode.
562          * This is the address space which points to any resv_map allocated
563          * at inode creation time.  If this is a device special inode,
564          * i_mapping may not point to the original address space.
565          */
566         resv_map = (struct resv_map *)(&inode->i_data)->private_data;
567         /* Only regular and link inodes have associated reserve maps */
568         if (resv_map)
569                 resv_map_release(&resv_map->refs);
570         clear_inode(inode);
571 }
572
573 static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
574 {
575         pgoff_t pgoff;
576         struct address_space *mapping = inode->i_mapping;
577         struct hstate *h = hstate_inode(inode);
578
579         BUG_ON(offset & ~huge_page_mask(h));
580         pgoff = offset >> PAGE_SHIFT;
581
582         i_mmap_lock_write(mapping);
583         i_size_write(inode, offset);
584         if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
585                 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
586         i_mmap_unlock_write(mapping);
587         remove_inode_hugepages(inode, offset, LLONG_MAX);
588 }
589
590 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
591 {
592         struct hstate *h = hstate_inode(inode);
593         loff_t hpage_size = huge_page_size(h);
594         loff_t hole_start, hole_end;
595
596         /*
597          * For hole punch round up the beginning offset of the hole and
598          * round down the end.
599          */
600         hole_start = round_up(offset, hpage_size);
601         hole_end = round_down(offset + len, hpage_size);
602
603         if (hole_end > hole_start) {
604                 struct address_space *mapping = inode->i_mapping;
605                 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
606
607                 inode_lock(inode);
608
609                 /* protected by i_rwsem */
610                 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
611                         inode_unlock(inode);
612                         return -EPERM;
613                 }
614
615                 i_mmap_lock_write(mapping);
616                 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
617                         hugetlb_vmdelete_list(&mapping->i_mmap,
618                                                 hole_start >> PAGE_SHIFT,
619                                                 hole_end  >> PAGE_SHIFT);
620                 i_mmap_unlock_write(mapping);
621                 remove_inode_hugepages(inode, hole_start, hole_end);
622                 inode_unlock(inode);
623         }
624
625         return 0;
626 }
627
628 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
629                                 loff_t len)
630 {
631         struct inode *inode = file_inode(file);
632         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
633         struct address_space *mapping = inode->i_mapping;
634         struct hstate *h = hstate_inode(inode);
635         struct vm_area_struct pseudo_vma;
636         struct mm_struct *mm = current->mm;
637         loff_t hpage_size = huge_page_size(h);
638         unsigned long hpage_shift = huge_page_shift(h);
639         pgoff_t start, index, end;
640         int error;
641         u32 hash;
642
643         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
644                 return -EOPNOTSUPP;
645
646         if (mode & FALLOC_FL_PUNCH_HOLE)
647                 return hugetlbfs_punch_hole(inode, offset, len);
648
649         /*
650          * Default preallocate case.
651          * For this range, start is rounded down and end is rounded up
652          * as well as being converted to page offsets.
653          */
654         start = offset >> hpage_shift;
655         end = (offset + len + hpage_size - 1) >> hpage_shift;
656
657         inode_lock(inode);
658
659         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
660         error = inode_newsize_ok(inode, offset + len);
661         if (error)
662                 goto out;
663
664         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
665                 error = -EPERM;
666                 goto out;
667         }
668
669         /*
670          * Initialize a pseudo vma as this is required by the huge page
671          * allocation routines.  If NUMA is configured, use page index
672          * as input to create an allocation policy.
673          */
674         vma_init(&pseudo_vma, mm);
675         pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
676         pseudo_vma.vm_file = file;
677
678         for (index = start; index < end; index++) {
679                 /*
680                  * This is supposed to be the vaddr where the page is being
681                  * faulted in, but we have no vaddr here.
682                  */
683                 struct page *page;
684                 unsigned long addr;
685
686                 cond_resched();
687
688                 /*
689                  * fallocate(2) manpage permits EINTR; we may have been
690                  * interrupted because we are using up too much memory.
691                  */
692                 if (signal_pending(current)) {
693                         error = -EINTR;
694                         break;
695                 }
696
697                 /* Set numa allocation policy based on index */
698                 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
699
700                 /* addr is the offset within the file (zero based) */
701                 addr = index * hpage_size;
702
703                 /*
704                  * fault mutex taken here, protects against fault path
705                  * and hole punch.  inode_lock previously taken protects
706                  * against truncation.
707                  */
708                 hash = hugetlb_fault_mutex_hash(mapping, index);
709                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
710
711                 /* See if already present in mapping to avoid alloc/free */
712                 page = find_get_page(mapping, index);
713                 if (page) {
714                         put_page(page);
715                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
716                         hugetlb_drop_vma_policy(&pseudo_vma);
717                         continue;
718                 }
719
720                 /*
721                  * Allocate page without setting the avoid_reserve argument.
722                  * There certainly are no reserves associated with the
723                  * pseudo_vma.  However, there could be shared mappings with
724                  * reserves for the file at the inode level.  If we fallocate
725                  * pages in these areas, we need to consume the reserves
726                  * to keep reservation accounting consistent.
727                  */
728                 page = alloc_huge_page(&pseudo_vma, addr, 0);
729                 hugetlb_drop_vma_policy(&pseudo_vma);
730                 if (IS_ERR(page)) {
731                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
732                         error = PTR_ERR(page);
733                         goto out;
734                 }
735                 clear_huge_page(page, addr, pages_per_huge_page(h));
736                 __SetPageUptodate(page);
737                 error = huge_add_to_page_cache(page, mapping, index);
738                 if (unlikely(error)) {
739                         restore_reserve_on_error(h, &pseudo_vma, addr, page);
740                         put_page(page);
741                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
742                         goto out;
743                 }
744
745                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
746
747                 SetHPageMigratable(page);
748                 /*
749                  * unlock_page because locked by add_to_page_cache()
750                  * put_page() due to reference from alloc_huge_page()
751                  */
752                 unlock_page(page);
753                 put_page(page);
754         }
755
756         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
757                 i_size_write(inode, offset + len);
758         inode->i_ctime = current_time(inode);
759 out:
760         inode_unlock(inode);
761         return error;
762 }
763
764 static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
765                              struct dentry *dentry, struct iattr *attr)
766 {
767         struct inode *inode = d_inode(dentry);
768         struct hstate *h = hstate_inode(inode);
769         int error;
770         unsigned int ia_valid = attr->ia_valid;
771         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
772
773         error = setattr_prepare(&init_user_ns, dentry, attr);
774         if (error)
775                 return error;
776
777         if (ia_valid & ATTR_SIZE) {
778                 loff_t oldsize = inode->i_size;
779                 loff_t newsize = attr->ia_size;
780
781                 if (newsize & ~huge_page_mask(h))
782                         return -EINVAL;
783                 /* protected by i_rwsem */
784                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
785                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
786                         return -EPERM;
787                 hugetlb_vmtruncate(inode, newsize);
788         }
789
790         setattr_copy(&init_user_ns, inode, attr);
791         mark_inode_dirty(inode);
792         return 0;
793 }
794
795 static struct inode *hugetlbfs_get_root(struct super_block *sb,
796                                         struct hugetlbfs_fs_context *ctx)
797 {
798         struct inode *inode;
799
800         inode = new_inode(sb);
801         if (inode) {
802                 inode->i_ino = get_next_ino();
803                 inode->i_mode = S_IFDIR | ctx->mode;
804                 inode->i_uid = ctx->uid;
805                 inode->i_gid = ctx->gid;
806                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
807                 inode->i_op = &hugetlbfs_dir_inode_operations;
808                 inode->i_fop = &simple_dir_operations;
809                 /* directory inodes start off with i_nlink == 2 (for "." entry) */
810                 inc_nlink(inode);
811                 lockdep_annotate_inode_mutex_key(inode);
812         }
813         return inode;
814 }
815
816 /*
817  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
818  * be taken from reclaim -- unlike regular filesystems. This needs an
819  * annotation because huge_pmd_share() does an allocation under hugetlb's
820  * i_mmap_rwsem.
821  */
822 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
823
824 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
825                                         struct inode *dir,
826                                         umode_t mode, dev_t dev)
827 {
828         struct inode *inode;
829         struct resv_map *resv_map = NULL;
830
831         /*
832          * Reserve maps are only needed for inodes that can have associated
833          * page allocations.
834          */
835         if (S_ISREG(mode) || S_ISLNK(mode)) {
836                 resv_map = resv_map_alloc();
837                 if (!resv_map)
838                         return NULL;
839         }
840
841         inode = new_inode(sb);
842         if (inode) {
843                 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
844
845                 inode->i_ino = get_next_ino();
846                 inode_init_owner(&init_user_ns, inode, dir, mode);
847                 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
848                                 &hugetlbfs_i_mmap_rwsem_key);
849                 inode->i_mapping->a_ops = &hugetlbfs_aops;
850                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
851                 inode->i_mapping->private_data = resv_map;
852                 info->seals = F_SEAL_SEAL;
853                 switch (mode & S_IFMT) {
854                 default:
855                         init_special_inode(inode, mode, dev);
856                         break;
857                 case S_IFREG:
858                         inode->i_op = &hugetlbfs_inode_operations;
859                         inode->i_fop = &hugetlbfs_file_operations;
860                         break;
861                 case S_IFDIR:
862                         inode->i_op = &hugetlbfs_dir_inode_operations;
863                         inode->i_fop = &simple_dir_operations;
864
865                         /* directory inodes start off with i_nlink == 2 (for "." entry) */
866                         inc_nlink(inode);
867                         break;
868                 case S_IFLNK:
869                         inode->i_op = &page_symlink_inode_operations;
870                         inode_nohighmem(inode);
871                         break;
872                 }
873                 lockdep_annotate_inode_mutex_key(inode);
874         } else {
875                 if (resv_map)
876                         kref_put(&resv_map->refs, resv_map_release);
877         }
878
879         return inode;
880 }
881
882 /*
883  * File creation. Allocate an inode, and we're done..
884  */
885 static int do_hugetlbfs_mknod(struct inode *dir,
886                         struct dentry *dentry,
887                         umode_t mode,
888                         dev_t dev,
889                         bool tmpfile)
890 {
891         struct inode *inode;
892         int error = -ENOSPC;
893
894         inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
895         if (inode) {
896                 dir->i_ctime = dir->i_mtime = current_time(dir);
897                 if (tmpfile) {
898                         d_tmpfile(dentry, inode);
899                 } else {
900                         d_instantiate(dentry, inode);
901                         dget(dentry);/* Extra count - pin the dentry in core */
902                 }
903                 error = 0;
904         }
905         return error;
906 }
907
908 static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
909                            struct dentry *dentry, umode_t mode, dev_t dev)
910 {
911         return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
912 }
913
914 static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
915                            struct dentry *dentry, umode_t mode)
916 {
917         int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
918                                      mode | S_IFDIR, 0);
919         if (!retval)
920                 inc_nlink(dir);
921         return retval;
922 }
923
924 static int hugetlbfs_create(struct user_namespace *mnt_userns,
925                             struct inode *dir, struct dentry *dentry,
926                             umode_t mode, bool excl)
927 {
928         return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
929 }
930
931 static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
932                              struct inode *dir, struct dentry *dentry,
933                              umode_t mode)
934 {
935         return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
936 }
937
938 static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
939                              struct inode *dir, struct dentry *dentry,
940                              const char *symname)
941 {
942         struct inode *inode;
943         int error = -ENOSPC;
944
945         inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
946         if (inode) {
947                 int l = strlen(symname)+1;
948                 error = page_symlink(inode, symname, l);
949                 if (!error) {
950                         d_instantiate(dentry, inode);
951                         dget(dentry);
952                 } else
953                         iput(inode);
954         }
955         dir->i_ctime = dir->i_mtime = current_time(dir);
956
957         return error;
958 }
959
960 static int hugetlbfs_migrate_page(struct address_space *mapping,
961                                 struct page *newpage, struct page *page,
962                                 enum migrate_mode mode)
963 {
964         int rc;
965
966         rc = migrate_huge_page_move_mapping(mapping, newpage, page);
967         if (rc != MIGRATEPAGE_SUCCESS)
968                 return rc;
969
970         if (hugetlb_page_subpool(page)) {
971                 hugetlb_set_page_subpool(newpage, hugetlb_page_subpool(page));
972                 hugetlb_set_page_subpool(page, NULL);
973         }
974
975         if (mode != MIGRATE_SYNC_NO_COPY)
976                 migrate_page_copy(newpage, page);
977         else
978                 migrate_page_states(newpage, page);
979
980         return MIGRATEPAGE_SUCCESS;
981 }
982
983 static int hugetlbfs_error_remove_page(struct address_space *mapping,
984                                 struct page *page)
985 {
986         struct inode *inode = mapping->host;
987         pgoff_t index = page->index;
988
989         remove_huge_page(page);
990         if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
991                 hugetlb_fix_reserve_counts(inode);
992
993         return 0;
994 }
995
996 /*
997  * Display the mount options in /proc/mounts.
998  */
999 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1000 {
1001         struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1002         struct hugepage_subpool *spool = sbinfo->spool;
1003         unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1004         unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1005         char mod;
1006
1007         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1008                 seq_printf(m, ",uid=%u",
1009                            from_kuid_munged(&init_user_ns, sbinfo->uid));
1010         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1011                 seq_printf(m, ",gid=%u",
1012                            from_kgid_munged(&init_user_ns, sbinfo->gid));
1013         if (sbinfo->mode != 0755)
1014                 seq_printf(m, ",mode=%o", sbinfo->mode);
1015         if (sbinfo->max_inodes != -1)
1016                 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1017
1018         hpage_size /= 1024;
1019         mod = 'K';
1020         if (hpage_size >= 1024) {
1021                 hpage_size /= 1024;
1022                 mod = 'M';
1023         }
1024         seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1025         if (spool) {
1026                 if (spool->max_hpages != -1)
1027                         seq_printf(m, ",size=%llu",
1028                                    (unsigned long long)spool->max_hpages << hpage_shift);
1029                 if (spool->min_hpages != -1)
1030                         seq_printf(m, ",min_size=%llu",
1031                                    (unsigned long long)spool->min_hpages << hpage_shift);
1032         }
1033         return 0;
1034 }
1035
1036 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1037 {
1038         struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
1039         struct hstate *h = hstate_inode(d_inode(dentry));
1040
1041         buf->f_type = HUGETLBFS_MAGIC;
1042         buf->f_bsize = huge_page_size(h);
1043         if (sbinfo) {
1044                 spin_lock(&sbinfo->stat_lock);
1045                 /* If no limits set, just report 0 for max/free/used
1046                  * blocks, like simple_statfs() */
1047                 if (sbinfo->spool) {
1048                         long free_pages;
1049
1050                         spin_lock(&sbinfo->spool->lock);
1051                         buf->f_blocks = sbinfo->spool->max_hpages;
1052                         free_pages = sbinfo->spool->max_hpages
1053                                 - sbinfo->spool->used_hpages;
1054                         buf->f_bavail = buf->f_bfree = free_pages;
1055                         spin_unlock(&sbinfo->spool->lock);
1056                         buf->f_files = sbinfo->max_inodes;
1057                         buf->f_ffree = sbinfo->free_inodes;
1058                 }
1059                 spin_unlock(&sbinfo->stat_lock);
1060         }
1061         buf->f_namelen = NAME_MAX;
1062         return 0;
1063 }
1064
1065 static void hugetlbfs_put_super(struct super_block *sb)
1066 {
1067         struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1068
1069         if (sbi) {
1070                 sb->s_fs_info = NULL;
1071
1072                 if (sbi->spool)
1073                         hugepage_put_subpool(sbi->spool);
1074
1075                 kfree(sbi);
1076         }
1077 }
1078
1079 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1080 {
1081         if (sbinfo->free_inodes >= 0) {
1082                 spin_lock(&sbinfo->stat_lock);
1083                 if (unlikely(!sbinfo->free_inodes)) {
1084                         spin_unlock(&sbinfo->stat_lock);
1085                         return 0;
1086                 }
1087                 sbinfo->free_inodes--;
1088                 spin_unlock(&sbinfo->stat_lock);
1089         }
1090
1091         return 1;
1092 }
1093
1094 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1095 {
1096         if (sbinfo->free_inodes >= 0) {
1097                 spin_lock(&sbinfo->stat_lock);
1098                 sbinfo->free_inodes++;
1099                 spin_unlock(&sbinfo->stat_lock);
1100         }
1101 }
1102
1103
1104 static struct kmem_cache *hugetlbfs_inode_cachep;
1105
1106 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1107 {
1108         struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1109         struct hugetlbfs_inode_info *p;
1110
1111         if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1112                 return NULL;
1113         p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
1114         if (unlikely(!p)) {
1115                 hugetlbfs_inc_free_inodes(sbinfo);
1116                 return NULL;
1117         }
1118
1119         /*
1120          * Any time after allocation, hugetlbfs_destroy_inode can be called
1121          * for the inode.  mpol_free_shared_policy is unconditionally called
1122          * as part of hugetlbfs_destroy_inode.  So, initialize policy here
1123          * in case of a quick call to destroy.
1124          *
1125          * Note that the policy is initialized even if we are creating a
1126          * private inode.  This simplifies hugetlbfs_destroy_inode.
1127          */
1128         mpol_shared_policy_init(&p->policy, NULL);
1129
1130         return &p->vfs_inode;
1131 }
1132
1133 static void hugetlbfs_free_inode(struct inode *inode)
1134 {
1135         kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1136 }
1137
1138 static void hugetlbfs_destroy_inode(struct inode *inode)
1139 {
1140         hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1141         mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1142 }
1143
1144 static const struct address_space_operations hugetlbfs_aops = {
1145         .write_begin    = hugetlbfs_write_begin,
1146         .write_end      = hugetlbfs_write_end,
1147         .set_page_dirty =  __set_page_dirty_no_writeback,
1148         .migratepage    = hugetlbfs_migrate_page,
1149         .error_remove_page      = hugetlbfs_error_remove_page,
1150 };
1151
1152
1153 static void init_once(void *foo)
1154 {
1155         struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1156
1157         inode_init_once(&ei->vfs_inode);
1158 }
1159
1160 const struct file_operations hugetlbfs_file_operations = {
1161         .read_iter              = hugetlbfs_read_iter,
1162         .mmap                   = hugetlbfs_file_mmap,
1163         .fsync                  = noop_fsync,
1164         .get_unmapped_area      = hugetlb_get_unmapped_area,
1165         .llseek                 = default_llseek,
1166         .fallocate              = hugetlbfs_fallocate,
1167 };
1168
1169 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1170         .create         = hugetlbfs_create,
1171         .lookup         = simple_lookup,
1172         .link           = simple_link,
1173         .unlink         = simple_unlink,
1174         .symlink        = hugetlbfs_symlink,
1175         .mkdir          = hugetlbfs_mkdir,
1176         .rmdir          = simple_rmdir,
1177         .mknod          = hugetlbfs_mknod,
1178         .rename         = simple_rename,
1179         .setattr        = hugetlbfs_setattr,
1180         .tmpfile        = hugetlbfs_tmpfile,
1181 };
1182
1183 static const struct inode_operations hugetlbfs_inode_operations = {
1184         .setattr        = hugetlbfs_setattr,
1185 };
1186
1187 static const struct super_operations hugetlbfs_ops = {
1188         .alloc_inode    = hugetlbfs_alloc_inode,
1189         .free_inode     = hugetlbfs_free_inode,
1190         .destroy_inode  = hugetlbfs_destroy_inode,
1191         .evict_inode    = hugetlbfs_evict_inode,
1192         .statfs         = hugetlbfs_statfs,
1193         .put_super      = hugetlbfs_put_super,
1194         .show_options   = hugetlbfs_show_options,
1195 };
1196
1197 /*
1198  * Convert size option passed from command line to number of huge pages
1199  * in the pool specified by hstate.  Size option could be in bytes
1200  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1201  */
1202 static long
1203 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1204                          enum hugetlbfs_size_type val_type)
1205 {
1206         if (val_type == NO_SIZE)
1207                 return -1;
1208
1209         if (val_type == SIZE_PERCENT) {
1210                 size_opt <<= huge_page_shift(h);
1211                 size_opt *= h->max_huge_pages;
1212                 do_div(size_opt, 100);
1213         }
1214
1215         size_opt >>= huge_page_shift(h);
1216         return size_opt;
1217 }
1218
1219 /*
1220  * Parse one mount parameter.
1221  */
1222 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1223 {
1224         struct hugetlbfs_fs_context *ctx = fc->fs_private;
1225         struct fs_parse_result result;
1226         char *rest;
1227         unsigned long ps;
1228         int opt;
1229
1230         opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
1231         if (opt < 0)
1232                 return opt;
1233
1234         switch (opt) {
1235         case Opt_uid:
1236                 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1237                 if (!uid_valid(ctx->uid))
1238                         goto bad_val;
1239                 return 0;
1240
1241         case Opt_gid:
1242                 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1243                 if (!gid_valid(ctx->gid))
1244                         goto bad_val;
1245                 return 0;
1246
1247         case Opt_mode:
1248                 ctx->mode = result.uint_32 & 01777U;
1249                 return 0;
1250
1251         case Opt_size:
1252                 /* memparse() will accept a K/M/G without a digit */
1253                 if (!isdigit(param->string[0]))
1254                         goto bad_val;
1255                 ctx->max_size_opt = memparse(param->string, &rest);
1256                 ctx->max_val_type = SIZE_STD;
1257                 if (*rest == '%')
1258                         ctx->max_val_type = SIZE_PERCENT;
1259                 return 0;
1260
1261         case Opt_nr_inodes:
1262                 /* memparse() will accept a K/M/G without a digit */
1263                 if (!isdigit(param->string[0]))
1264                         goto bad_val;
1265                 ctx->nr_inodes = memparse(param->string, &rest);
1266                 return 0;
1267
1268         case Opt_pagesize:
1269                 ps = memparse(param->string, &rest);
1270                 ctx->hstate = size_to_hstate(ps);
1271                 if (!ctx->hstate) {
1272                         pr_err("Unsupported page size %lu MB\n", ps >> 20);
1273                         return -EINVAL;
1274                 }
1275                 return 0;
1276
1277         case Opt_min_size:
1278                 /* memparse() will accept a K/M/G without a digit */
1279                 if (!isdigit(param->string[0]))
1280                         goto bad_val;
1281                 ctx->min_size_opt = memparse(param->string, &rest);
1282                 ctx->min_val_type = SIZE_STD;
1283                 if (*rest == '%')
1284                         ctx->min_val_type = SIZE_PERCENT;
1285                 return 0;
1286
1287         default:
1288                 return -EINVAL;
1289         }
1290
1291 bad_val:
1292         return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
1293                       param->string, param->key);
1294 }
1295
1296 /*
1297  * Validate the parsed options.
1298  */
1299 static int hugetlbfs_validate(struct fs_context *fc)
1300 {
1301         struct hugetlbfs_fs_context *ctx = fc->fs_private;
1302
1303         /*
1304          * Use huge page pool size (in hstate) to convert the size
1305          * options to number of huge pages.  If NO_SIZE, -1 is returned.
1306          */
1307         ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1308                                                    ctx->max_size_opt,
1309                                                    ctx->max_val_type);
1310         ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1311                                                    ctx->min_size_opt,
1312                                                    ctx->min_val_type);
1313
1314         /*
1315          * If max_size was specified, then min_size must be smaller
1316          */
1317         if (ctx->max_val_type > NO_SIZE &&
1318             ctx->min_hpages > ctx->max_hpages) {
1319                 pr_err("Minimum size can not be greater than maximum size\n");
1320                 return -EINVAL;
1321         }
1322
1323         return 0;
1324 }
1325
1326 static int
1327 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1328 {
1329         struct hugetlbfs_fs_context *ctx = fc->fs_private;
1330         struct hugetlbfs_sb_info *sbinfo;
1331
1332         sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1333         if (!sbinfo)
1334                 return -ENOMEM;
1335         sb->s_fs_info = sbinfo;
1336         spin_lock_init(&sbinfo->stat_lock);
1337         sbinfo->hstate          = ctx->hstate;
1338         sbinfo->max_inodes      = ctx->nr_inodes;
1339         sbinfo->free_inodes     = ctx->nr_inodes;
1340         sbinfo->spool           = NULL;
1341         sbinfo->uid             = ctx->uid;
1342         sbinfo->gid             = ctx->gid;
1343         sbinfo->mode            = ctx->mode;
1344
1345         /*
1346          * Allocate and initialize subpool if maximum or minimum size is
1347          * specified.  Any needed reservations (for minimum size) are taken
1348          * taken when the subpool is created.
1349          */
1350         if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1351                 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1352                                                      ctx->max_hpages,
1353                                                      ctx->min_hpages);
1354                 if (!sbinfo->spool)
1355                         goto out_free;
1356         }
1357         sb->s_maxbytes = MAX_LFS_FILESIZE;
1358         sb->s_blocksize = huge_page_size(ctx->hstate);
1359         sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1360         sb->s_magic = HUGETLBFS_MAGIC;
1361         sb->s_op = &hugetlbfs_ops;
1362         sb->s_time_gran = 1;
1363
1364         /*
1365          * Due to the special and limited functionality of hugetlbfs, it does
1366          * not work well as a stacking filesystem.
1367          */
1368         sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
1369         sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
1370         if (!sb->s_root)
1371                 goto out_free;
1372         return 0;
1373 out_free:
1374         kfree(sbinfo->spool);
1375         kfree(sbinfo);
1376         return -ENOMEM;
1377 }
1378
1379 static int hugetlbfs_get_tree(struct fs_context *fc)
1380 {
1381         int err = hugetlbfs_validate(fc);
1382         if (err)
1383                 return err;
1384         return get_tree_nodev(fc, hugetlbfs_fill_super);
1385 }
1386
1387 static void hugetlbfs_fs_context_free(struct fs_context *fc)
1388 {
1389         kfree(fc->fs_private);
1390 }
1391
1392 static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1393         .free           = hugetlbfs_fs_context_free,
1394         .parse_param    = hugetlbfs_parse_param,
1395         .get_tree       = hugetlbfs_get_tree,
1396 };
1397
1398 static int hugetlbfs_init_fs_context(struct fs_context *fc)
1399 {
1400         struct hugetlbfs_fs_context *ctx;
1401
1402         ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1403         if (!ctx)
1404                 return -ENOMEM;
1405
1406         ctx->max_hpages = -1; /* No limit on size by default */
1407         ctx->nr_inodes  = -1; /* No limit on number of inodes by default */
1408         ctx->uid        = current_fsuid();
1409         ctx->gid        = current_fsgid();
1410         ctx->mode       = 0755;
1411         ctx->hstate     = &default_hstate;
1412         ctx->min_hpages = -1; /* No default minimum size */
1413         ctx->max_val_type = NO_SIZE;
1414         ctx->min_val_type = NO_SIZE;
1415         fc->fs_private = ctx;
1416         fc->ops = &hugetlbfs_fs_context_ops;
1417         return 0;
1418 }
1419
1420 static struct file_system_type hugetlbfs_fs_type = {
1421         .name                   = "hugetlbfs",
1422         .init_fs_context        = hugetlbfs_init_fs_context,
1423         .parameters             = hugetlb_fs_parameters,
1424         .kill_sb                = kill_litter_super,
1425 };
1426
1427 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1428
1429 static int can_do_hugetlb_shm(void)
1430 {
1431         kgid_t shm_group;
1432         shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1433         return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1434 }
1435
1436 static int get_hstate_idx(int page_size_log)
1437 {
1438         struct hstate *h = hstate_sizelog(page_size_log);
1439
1440         if (!h)
1441                 return -1;
1442         return hstate_index(h);
1443 }
1444
1445 /*
1446  * Note that size should be aligned to proper hugepage size in caller side,
1447  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1448  */
1449 struct file *hugetlb_file_setup(const char *name, size_t size,
1450                                 vm_flags_t acctflag, int creat_flags,
1451                                 int page_size_log)
1452 {
1453         struct inode *inode;
1454         struct vfsmount *mnt;
1455         int hstate_idx;
1456         struct file *file;
1457
1458         hstate_idx = get_hstate_idx(page_size_log);
1459         if (hstate_idx < 0)
1460                 return ERR_PTR(-ENODEV);
1461
1462         mnt = hugetlbfs_vfsmount[hstate_idx];
1463         if (!mnt)
1464                 return ERR_PTR(-ENOENT);
1465
1466         if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1467                 struct ucounts *ucounts = current_ucounts();
1468
1469                 if (user_shm_lock(size, ucounts)) {
1470                         pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
1471                                 current->comm, current->pid);
1472                         user_shm_unlock(size, ucounts);
1473                 }
1474                 return ERR_PTR(-EPERM);
1475         }
1476
1477         file = ERR_PTR(-ENOSPC);
1478         inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
1479         if (!inode)
1480                 goto out;
1481         if (creat_flags == HUGETLB_SHMFS_INODE)
1482                 inode->i_flags |= S_PRIVATE;
1483
1484         inode->i_size = size;
1485         clear_nlink(inode);
1486
1487         if (!hugetlb_reserve_pages(inode, 0,
1488                         size >> huge_page_shift(hstate_inode(inode)), NULL,
1489                         acctflag))
1490                 file = ERR_PTR(-ENOMEM);
1491         else
1492                 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1493                                         &hugetlbfs_file_operations);
1494         if (!IS_ERR(file))
1495                 return file;
1496
1497         iput(inode);
1498 out:
1499         return file;
1500 }
1501
1502 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1503 {
1504         struct fs_context *fc;
1505         struct vfsmount *mnt;
1506
1507         fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1508         if (IS_ERR(fc)) {
1509                 mnt = ERR_CAST(fc);
1510         } else {
1511                 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1512                 ctx->hstate = h;
1513                 mnt = fc_mount(fc);
1514                 put_fs_context(fc);
1515         }
1516         if (IS_ERR(mnt))
1517                 pr_err("Cannot mount internal hugetlbfs for page size %luK",
1518                        huge_page_size(h) >> 10);
1519         return mnt;
1520 }
1521
1522 static int __init init_hugetlbfs_fs(void)
1523 {
1524         struct vfsmount *mnt;
1525         struct hstate *h;
1526         int error;
1527         int i;
1528
1529         if (!hugepages_supported()) {
1530                 pr_info("disabling because there are no supported hugepage sizes\n");
1531                 return -ENOTSUPP;
1532         }
1533
1534         error = -ENOMEM;
1535         hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1536                                         sizeof(struct hugetlbfs_inode_info),
1537                                         0, SLAB_ACCOUNT, init_once);
1538         if (hugetlbfs_inode_cachep == NULL)
1539                 goto out;
1540
1541         error = register_filesystem(&hugetlbfs_fs_type);
1542         if (error)
1543                 goto out_free;
1544
1545         /* default hstate mount is required */
1546         mnt = mount_one_hugetlbfs(&default_hstate);
1547         if (IS_ERR(mnt)) {
1548                 error = PTR_ERR(mnt);
1549                 goto out_unreg;
1550         }
1551         hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1552
1553         /* other hstates are optional */
1554         i = 0;
1555         for_each_hstate(h) {
1556                 if (i == default_hstate_idx) {
1557                         i++;
1558                         continue;
1559                 }
1560
1561                 mnt = mount_one_hugetlbfs(h);
1562                 if (IS_ERR(mnt))
1563                         hugetlbfs_vfsmount[i] = NULL;
1564                 else
1565                         hugetlbfs_vfsmount[i] = mnt;
1566                 i++;
1567         }
1568
1569         return 0;
1570
1571  out_unreg:
1572         (void)unregister_filesystem(&hugetlbfs_fs_type);
1573  out_free:
1574         kmem_cache_destroy(hugetlbfs_inode_cachep);
1575  out:
1576         return error;
1577 }
1578 fs_initcall(init_hugetlbfs_fs)