1 // SPDX-License-Identifier: GPL-2.0
3 * Virtual Memory Map support
5 * (C) 2007 sgi. Christoph Lameter.
7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
8 * virt_to_page, page_address() to be implemented as a base offset
9 * calculation without memory access.
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
13 * via TLBs. For those arches the virtual memory map is essentially
14 * for free if we use the same page size as the 1-1 mappings. In that
15 * case the overhead consists of a few additional pages that are
16 * allocated to create a view of memory for vmemmap.
18 * The architecture is expected to provide a vmemmap_populate() function
19 * to instantiate the mapping.
22 #include <linux/mmzone.h>
23 #include <linux/bootmem.h>
24 #include <linux/memremap.h>
25 #include <linux/highmem.h>
26 #include <linux/slab.h>
27 #include <linux/spinlock.h>
28 #include <linux/vmalloc.h>
29 #include <linux/sched.h>
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
35 * Allocate a block of memory to be used to back the virtual memory map
36 * or to back the page tables that are used to create the mapping.
37 * Uses the main allocators if they are available, else bootmem.
40 static void * __ref __earlyonly_bootmem_alloc(int node,
45 return memblock_virt_alloc_try_nid_raw(size, align, goal,
46 BOOTMEM_ALLOC_ACCESSIBLE, node);
49 static void *vmemmap_buf;
50 static void *vmemmap_buf_end;
52 void * __meminit vmemmap_alloc_block(unsigned long size, int node)
54 /* If the main allocator is up use that, fallback to bootmem. */
55 if (slab_is_available()) {
58 page = alloc_pages_node(node, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
61 return page_address(page);
64 return __earlyonly_bootmem_alloc(node, size, size,
65 __pa(MAX_DMA_ADDRESS));
68 /* need to make sure size is all the same during early stage */
69 static void * __meminit alloc_block_buf(unsigned long size, int node)
74 return vmemmap_alloc_block(size, node);
76 /* take the from buf */
77 ptr = (void *)ALIGN((unsigned long)vmemmap_buf, size);
78 if (ptr + size > vmemmap_buf_end)
79 return vmemmap_alloc_block(size, node);
81 vmemmap_buf = ptr + size;
86 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap)
88 return altmap->base_pfn + altmap->reserve + altmap->alloc
92 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap)
94 unsigned long allocated = altmap->alloc + altmap->align;
96 if (altmap->free > allocated)
97 return altmap->free - allocated;
102 * vmem_altmap_alloc - allocate pages from the vmem_altmap reservation
103 * @altmap - reserved page pool for the allocation
104 * @nr_pfns - size (in pages) of the allocation
106 * Allocations are aligned to the size of the request
108 static unsigned long __meminit vmem_altmap_alloc(struct vmem_altmap *altmap,
109 unsigned long nr_pfns)
111 unsigned long pfn = vmem_altmap_next_pfn(altmap);
112 unsigned long nr_align;
114 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG);
115 nr_align = ALIGN(pfn, nr_align) - pfn;
117 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap))
119 altmap->alloc += nr_pfns;
120 altmap->align += nr_align;
121 return pfn + nr_align;
124 static void * __meminit altmap_alloc_block_buf(unsigned long size,
125 struct vmem_altmap *altmap)
127 unsigned long pfn, nr_pfns;
130 if (size & ~PAGE_MASK) {
131 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n",
136 nr_pfns = size >> PAGE_SHIFT;
137 pfn = vmem_altmap_alloc(altmap, nr_pfns);
139 ptr = __va(__pfn_to_phys(pfn));
142 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n",
143 __func__, pfn, altmap->alloc, altmap->align, nr_pfns);
148 /* need to make sure size is all the same during early stage */
149 void * __meminit __vmemmap_alloc_block_buf(unsigned long size, int node,
150 struct vmem_altmap *altmap)
153 return altmap_alloc_block_buf(size, altmap);
154 return alloc_block_buf(size, node);
157 void __meminit vmemmap_verify(pte_t *pte, int node,
158 unsigned long start, unsigned long end)
160 unsigned long pfn = pte_pfn(*pte);
161 int actual_node = early_pfn_to_nid(pfn);
163 if (node_distance(actual_node, node) > LOCAL_DISTANCE)
164 pr_warn("[%lx-%lx] potential offnode page_structs\n",
168 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node)
170 pte_t *pte = pte_offset_kernel(pmd, addr);
171 if (pte_none(*pte)) {
173 void *p = alloc_block_buf(PAGE_SIZE, node);
176 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
177 set_pte_at(&init_mm, addr, pte, entry);
182 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node)
184 void *p = vmemmap_alloc_block(size, node);
193 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node)
195 pmd_t *pmd = pmd_offset(pud, addr);
196 if (pmd_none(*pmd)) {
197 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
200 pmd_populate_kernel(&init_mm, pmd, p);
205 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node)
207 pud_t *pud = pud_offset(p4d, addr);
208 if (pud_none(*pud)) {
209 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
212 pud_populate(&init_mm, pud, p);
217 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node)
219 p4d_t *p4d = p4d_offset(pgd, addr);
220 if (p4d_none(*p4d)) {
221 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
224 p4d_populate(&init_mm, p4d, p);
229 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
231 pgd_t *pgd = pgd_offset_k(addr);
232 if (pgd_none(*pgd)) {
233 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node);
236 pgd_populate(&init_mm, pgd, p);
241 int __meminit vmemmap_populate_basepages(unsigned long start,
242 unsigned long end, int node)
244 unsigned long addr = start;
251 for (; addr < end; addr += PAGE_SIZE) {
252 pgd = vmemmap_pgd_populate(addr, node);
255 p4d = vmemmap_p4d_populate(pgd, addr, node);
258 pud = vmemmap_pud_populate(p4d, addr, node);
261 pmd = vmemmap_pmd_populate(pud, addr, node);
264 pte = vmemmap_pte_populate(pmd, addr, node);
267 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
273 struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid)
279 map = pfn_to_page(pnum * PAGES_PER_SECTION);
280 start = (unsigned long)map;
281 end = (unsigned long)(map + PAGES_PER_SECTION);
283 if (vmemmap_populate(start, end, nid))
289 void __init sparse_mem_maps_populate_node(struct page **map_map,
290 unsigned long pnum_begin,
291 unsigned long pnum_end,
292 unsigned long map_count, int nodeid)
295 unsigned long size = sizeof(struct page) * PAGES_PER_SECTION;
296 void *vmemmap_buf_start;
298 size = ALIGN(size, PMD_SIZE);
299 vmemmap_buf_start = __earlyonly_bootmem_alloc(nodeid, size * map_count,
300 PMD_SIZE, __pa(MAX_DMA_ADDRESS));
302 if (vmemmap_buf_start) {
303 vmemmap_buf = vmemmap_buf_start;
304 vmemmap_buf_end = vmemmap_buf_start + size * map_count;
307 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
308 struct mem_section *ms;
310 if (!present_section_nr(pnum))
313 map_map[pnum] = sparse_mem_map_populate(pnum, nodeid);
316 ms = __nr_to_section(pnum);
317 pr_err("%s: sparsemem memory map backing failed some memory will not be available\n",
319 ms->section_mem_map = 0;
322 if (vmemmap_buf_start) {
323 /* need to free left buf */
324 memblock_free_early(__pa(vmemmap_buf),
325 vmemmap_buf_end - vmemmap_buf);
327 vmemmap_buf_end = NULL;