1 // SPDX-License-Identifier: GPL-2.0
3 * IA-32 Huge TLB Page Support for Kernel.
5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
8 #include <linux/init.h>
11 #include <linux/sched/mm.h>
12 #include <linux/hugetlb.h>
13 #include <linux/pagemap.h>
14 #include <linux/err.h>
15 #include <linux/sysctl.h>
16 #include <linux/compat.h>
19 #include <asm/tlbflush.h>
23 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
24 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
25 * Otherwise, returns 0.
27 int pmd_huge(pmd_t pmd)
29 return !pmd_none(pmd) &&
30 (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
33 int pud_huge(pud_t pud)
35 return !!(pud_val(pud) & _PAGE_PSE);
38 #ifdef CONFIG_HUGETLB_PAGE
39 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
40 unsigned long addr, unsigned long len,
41 unsigned long pgoff, unsigned long flags)
43 struct hstate *h = hstate_file(file);
44 struct vm_unmapped_area_info info;
48 info.low_limit = get_mmap_base(1);
51 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
52 * in the full address space.
54 info.high_limit = in_32bit_syscall() ?
55 task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
57 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
58 info.align_offset = 0;
59 return vm_unmapped_area(&info);
62 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
63 unsigned long addr, unsigned long len,
64 unsigned long pgoff, unsigned long flags)
66 struct hstate *h = hstate_file(file);
67 struct vm_unmapped_area_info info;
69 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
71 info.low_limit = PAGE_SIZE;
72 info.high_limit = get_mmap_base(0);
75 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
76 * in the full address space.
78 if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
79 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
81 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
82 info.align_offset = 0;
83 addr = vm_unmapped_area(&info);
86 * A failed mmap() very likely causes application failure,
87 * so fall back to the bottom-up function here. This scenario
88 * can happen with large stack limits and large mmap()
91 if (addr & ~PAGE_MASK) {
92 VM_BUG_ON(addr != -ENOMEM);
94 info.low_limit = TASK_UNMAPPED_BASE;
95 info.high_limit = TASK_SIZE_LOW;
96 addr = vm_unmapped_area(&info);
103 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
104 unsigned long len, unsigned long pgoff, unsigned long flags)
106 struct hstate *h = hstate_file(file);
107 struct mm_struct *mm = current->mm;
108 struct vm_area_struct *vma;
110 if (len & ~huge_page_mask(h))
116 /* No address checking. See comment at mmap_address_hint_valid() */
117 if (flags & MAP_FIXED) {
118 if (prepare_hugepage_range(file, addr, len))
124 addr &= huge_page_mask(h);
125 if (!mmap_address_hint_valid(addr, len))
126 goto get_unmapped_area;
128 vma = find_vma(mm, addr);
129 if (!vma || addr + len <= vm_start_gap(vma))
134 if (mm->get_unmapped_area == arch_get_unmapped_area)
135 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
138 return hugetlb_get_unmapped_area_topdown(file, addr, len,
141 #endif /* CONFIG_HUGETLB_PAGE */
144 bool __init arch_hugetlb_valid_size(unsigned long size)
146 if (size == PMD_SIZE)
148 else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
154 #ifdef CONFIG_CONTIG_ALLOC
155 static __init int gigantic_pages_init(void)
157 /* With compaction or CMA we can allocate gigantic pages at runtime */
158 if (boot_cpu_has(X86_FEATURE_GBPAGES))
159 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
162 arch_initcall(gigantic_pages_init);