2 * Procedures for maintaining information about logical memory blocks.
4 * Peter Bergner, IBM Corp. June 2001.
5 * Copyright (C) 2001 Peter Bergner.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/poison.h>
18 #include <linux/pfn.h>
19 #include <linux/debugfs.h>
20 #include <linux/kmemleak.h>
21 #include <linux/seq_file.h>
22 #include <linux/memblock.h>
24 #include <asm/sections.h>
29 #define INIT_MEMBLOCK_REGIONS 128
30 #define INIT_PHYSMEM_REGIONS 4
32 #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
33 # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
37 * DOC: memblock overview
39 * Memblock is a method of managing memory regions during the early
40 * boot period when the usual kernel memory allocators are not up and
43 * Memblock views the system memory as collections of contiguous
44 * regions. There are several types of these collections:
46 * * ``memory`` - describes the physical memory available to the
47 * kernel; this may differ from the actual physical memory installed
48 * in the system, for instance when the memory is restricted with
49 * ``mem=`` command line parameter
50 * * ``reserved`` - describes the regions that were allocated
51 * * ``physmap`` - describes the actual physical memory regardless of
52 * the possible restrictions; the ``physmap`` type is only available
53 * on some architectures.
55 * Each region is represented by :c:type:`struct memblock_region` that
56 * defines the region extents, its attributes and NUMA node id on NUMA
57 * systems. Every memory type is described by the :c:type:`struct
58 * memblock_type` which contains an array of memory regions along with
59 * the allocator metadata. The memory types are nicely wrapped with
60 * :c:type:`struct memblock`. This structure is statically initialzed
61 * at build time. The region arrays for the "memory" and "reserved"
62 * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the
63 * "physmap" type to %INIT_PHYSMEM_REGIONS.
64 * The :c:func:`memblock_allow_resize` enables automatic resizing of
65 * the region arrays during addition of new regions. This feature
66 * should be used with care so that memory allocated for the region
67 * array will not overlap with areas that should be reserved, for
70 * The early architecture setup should tell memblock what the physical
71 * memory layout is by using :c:func:`memblock_add` or
72 * :c:func:`memblock_add_node` functions. The first function does not
73 * assign the region to a NUMA node and it is appropriate for UMA
74 * systems. Yet, it is possible to use it on NUMA systems as well and
75 * assign the region to a NUMA node later in the setup process using
76 * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node`
77 * performs such an assignment directly.
79 * Once memblock is setup the memory can be allocated using either
80 * memblock or bootmem APIs.
82 * As the system boot progresses, the architecture specific
83 * :c:func:`mem_init` function frees all the memory to the buddy page
86 * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the
87 * memblock data structures will be discarded after the system
88 * initialization compltes.
91 #ifndef CONFIG_NEED_MULTIPLE_NODES
92 struct pglist_data __refdata contig_page_data;
93 EXPORT_SYMBOL(contig_page_data);
96 unsigned long max_low_pfn;
97 unsigned long min_low_pfn;
98 unsigned long max_pfn;
99 unsigned long long max_possible_pfn;
101 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
102 static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
103 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
104 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
107 struct memblock memblock __initdata_memblock = {
108 .memory.regions = memblock_memory_init_regions,
109 .memory.cnt = 1, /* empty dummy entry */
110 .memory.max = INIT_MEMBLOCK_REGIONS,
111 .memory.name = "memory",
113 .reserved.regions = memblock_reserved_init_regions,
114 .reserved.cnt = 1, /* empty dummy entry */
115 .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
116 .reserved.name = "reserved",
118 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
119 .physmem.regions = memblock_physmem_init_regions,
120 .physmem.cnt = 1, /* empty dummy entry */
121 .physmem.max = INIT_PHYSMEM_REGIONS,
122 .physmem.name = "physmem",
126 .current_limit = MEMBLOCK_ALLOC_ANYWHERE,
129 int memblock_debug __initdata_memblock;
130 static bool system_has_some_mirror __initdata_memblock = false;
131 static int memblock_can_resize __initdata_memblock;
132 static int memblock_memory_in_slab __initdata_memblock = 0;
133 static int memblock_reserved_in_slab __initdata_memblock = 0;
135 static enum memblock_flags __init_memblock choose_memblock_flags(void)
137 return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
140 /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
141 static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
143 return *size = min(*size, PHYS_ADDR_MAX - base);
147 * Address comparison utilities
149 static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
150 phys_addr_t base2, phys_addr_t size2)
152 return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
155 bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
156 phys_addr_t base, phys_addr_t size)
160 for (i = 0; i < type->cnt; i++)
161 if (memblock_addrs_overlap(base, size, type->regions[i].base,
162 type->regions[i].size))
164 return i < type->cnt;
168 * __memblock_find_range_bottom_up - find free area utility in bottom-up
169 * @start: start of candidate range
170 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
171 * %MEMBLOCK_ALLOC_ACCESSIBLE
172 * @size: size of free area to find
173 * @align: alignment of free area to find
174 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
175 * @flags: pick from blocks based on memory attributes
177 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
180 * Found address on success, 0 on failure.
182 static phys_addr_t __init_memblock
183 __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
184 phys_addr_t size, phys_addr_t align, int nid,
185 enum memblock_flags flags)
187 phys_addr_t this_start, this_end, cand;
190 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
191 this_start = clamp(this_start, start, end);
192 this_end = clamp(this_end, start, end);
194 cand = round_up(this_start, align);
195 if (cand < this_end && this_end - cand >= size)
203 * __memblock_find_range_top_down - find free area utility, in top-down
204 * @start: start of candidate range
205 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
206 * %MEMBLOCK_ALLOC_ACCESSIBLE
207 * @size: size of free area to find
208 * @align: alignment of free area to find
209 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
210 * @flags: pick from blocks based on memory attributes
212 * Utility called from memblock_find_in_range_node(), find free area top-down.
215 * Found address on success, 0 on failure.
217 static phys_addr_t __init_memblock
218 __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
219 phys_addr_t size, phys_addr_t align, int nid,
220 enum memblock_flags flags)
222 phys_addr_t this_start, this_end, cand;
225 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
227 this_start = clamp(this_start, start, end);
228 this_end = clamp(this_end, start, end);
233 cand = round_down(this_end - size, align);
234 if (cand >= this_start)
242 * memblock_find_in_range_node - find free area in given range and node
243 * @size: size of free area to find
244 * @align: alignment of free area to find
245 * @start: start of candidate range
246 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
247 * %MEMBLOCK_ALLOC_ACCESSIBLE
248 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
249 * @flags: pick from blocks based on memory attributes
251 * Find @size free area aligned to @align in the specified range and node.
253 * When allocation direction is bottom-up, the @start should be greater
254 * than the end of the kernel image. Otherwise, it will be trimmed. The
255 * reason is that we want the bottom-up allocation just near the kernel
256 * image so it is highly likely that the allocated memory and the kernel
257 * will reside in the same node.
259 * If bottom-up allocation failed, will try to allocate memory top-down.
262 * Found address on success, 0 on failure.
264 static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
265 phys_addr_t align, phys_addr_t start,
266 phys_addr_t end, int nid,
267 enum memblock_flags flags)
269 phys_addr_t kernel_end, ret;
272 if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
273 end == MEMBLOCK_ALLOC_KASAN)
274 end = memblock.current_limit;
276 /* avoid allocating the first page */
277 start = max_t(phys_addr_t, start, PAGE_SIZE);
278 end = max(start, end);
279 kernel_end = __pa_symbol(_end);
282 * try bottom-up allocation only when bottom-up mode
283 * is set and @end is above the kernel image.
285 if (memblock_bottom_up() && end > kernel_end) {
286 phys_addr_t bottom_up_start;
288 /* make sure we will allocate above the kernel */
289 bottom_up_start = max(start, kernel_end);
291 /* ok, try bottom-up allocation first */
292 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
293 size, align, nid, flags);
298 * we always limit bottom-up allocation above the kernel,
299 * but top-down allocation doesn't have the limit, so
300 * retrying top-down allocation may succeed when bottom-up
303 * bottom-up allocation is expected to be fail very rarely,
304 * so we use WARN_ONCE() here to see the stack trace if
307 WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
308 "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
311 return __memblock_find_range_top_down(start, end, size, align, nid,
316 * memblock_find_in_range - find free area in given range
317 * @start: start of candidate range
318 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
319 * %MEMBLOCK_ALLOC_ACCESSIBLE
320 * @size: size of free area to find
321 * @align: alignment of free area to find
323 * Find @size free area aligned to @align in the specified range.
326 * Found address on success, 0 on failure.
328 phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
329 phys_addr_t end, phys_addr_t size,
333 enum memblock_flags flags = choose_memblock_flags();
336 ret = memblock_find_in_range_node(size, align, start, end,
337 NUMA_NO_NODE, flags);
339 if (!ret && (flags & MEMBLOCK_MIRROR)) {
340 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
342 flags &= ~MEMBLOCK_MIRROR;
349 static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
351 type->total_size -= type->regions[r].size;
352 memmove(&type->regions[r], &type->regions[r + 1],
353 (type->cnt - (r + 1)) * sizeof(type->regions[r]));
356 /* Special case for empty arrays */
357 if (type->cnt == 0) {
358 WARN_ON(type->total_size != 0);
360 type->regions[0].base = 0;
361 type->regions[0].size = 0;
362 type->regions[0].flags = 0;
363 memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
367 #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
369 * memblock_discard - discard memory and reserved arrays if they were allocated
371 void __init memblock_discard(void)
373 phys_addr_t addr, size;
375 if (memblock.reserved.regions != memblock_reserved_init_regions) {
376 addr = __pa(memblock.reserved.regions);
377 size = PAGE_ALIGN(sizeof(struct memblock_region) *
378 memblock.reserved.max);
379 __memblock_free_late(addr, size);
382 if (memblock.memory.regions != memblock_memory_init_regions) {
383 addr = __pa(memblock.memory.regions);
384 size = PAGE_ALIGN(sizeof(struct memblock_region) *
385 memblock.memory.max);
386 __memblock_free_late(addr, size);
392 * memblock_double_array - double the size of the memblock regions array
393 * @type: memblock type of the regions array being doubled
394 * @new_area_start: starting address of memory range to avoid overlap with
395 * @new_area_size: size of memory range to avoid overlap with
397 * Double the size of the @type regions array. If memblock is being used to
398 * allocate memory for a new reserved regions array and there is a previously
399 * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
400 * waiting to be reserved, ensure the memory used by the new array does
404 * 0 on success, -1 on failure.
406 static int __init_memblock memblock_double_array(struct memblock_type *type,
407 phys_addr_t new_area_start,
408 phys_addr_t new_area_size)
410 struct memblock_region *new_array, *old_array;
411 phys_addr_t old_alloc_size, new_alloc_size;
412 phys_addr_t old_size, new_size, addr, new_end;
413 int use_slab = slab_is_available();
416 /* We don't allow resizing until we know about the reserved regions
417 * of memory that aren't suitable for allocation
419 if (!memblock_can_resize)
422 /* Calculate new doubled size */
423 old_size = type->max * sizeof(struct memblock_region);
424 new_size = old_size << 1;
426 * We need to allocated new one align to PAGE_SIZE,
427 * so we can free them completely later.
429 old_alloc_size = PAGE_ALIGN(old_size);
430 new_alloc_size = PAGE_ALIGN(new_size);
432 /* Retrieve the slab flag */
433 if (type == &memblock.memory)
434 in_slab = &memblock_memory_in_slab;
436 in_slab = &memblock_reserved_in_slab;
438 /* Try to find some space for it.
440 * WARNING: We assume that either slab_is_available() and we use it or
441 * we use MEMBLOCK for allocations. That means that this is unsafe to
442 * use when bootmem is currently active (unless bootmem itself is
443 * implemented on top of MEMBLOCK which isn't the case yet)
445 * This should however not be an issue for now, as we currently only
446 * call into MEMBLOCK while it's still active, or much later when slab
447 * is active for memory hotplug operations
450 new_array = kmalloc(new_size, GFP_KERNEL);
451 addr = new_array ? __pa(new_array) : 0;
453 /* only exclude range when trying to double reserved.regions */
454 if (type != &memblock.reserved)
455 new_area_start = new_area_size = 0;
457 addr = memblock_find_in_range(new_area_start + new_area_size,
458 memblock.current_limit,
459 new_alloc_size, PAGE_SIZE);
460 if (!addr && new_area_size)
461 addr = memblock_find_in_range(0,
462 min(new_area_start, memblock.current_limit),
463 new_alloc_size, PAGE_SIZE);
465 new_array = addr ? __va(addr) : NULL;
468 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
469 type->name, type->max, type->max * 2);
473 new_end = addr + new_size - 1;
474 memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
475 type->name, type->max * 2, &addr, &new_end);
478 * Found space, we now need to move the array over before we add the
479 * reserved region since it may be our reserved array itself that is
482 memcpy(new_array, type->regions, old_size);
483 memset(new_array + type->max, 0, old_size);
484 old_array = type->regions;
485 type->regions = new_array;
488 /* Free old array. We needn't free it if the array is the static one */
491 else if (old_array != memblock_memory_init_regions &&
492 old_array != memblock_reserved_init_regions)
493 memblock_free(__pa(old_array), old_alloc_size);
496 * Reserve the new array if that comes from the memblock. Otherwise, we
500 BUG_ON(memblock_reserve(addr, new_alloc_size));
502 /* Update slab flag */
509 * memblock_merge_regions - merge neighboring compatible regions
510 * @type: memblock type to scan
512 * Scan @type and merge neighboring compatible regions.
514 static void __init_memblock memblock_merge_regions(struct memblock_type *type)
518 /* cnt never goes below 1 */
519 while (i < type->cnt - 1) {
520 struct memblock_region *this = &type->regions[i];
521 struct memblock_region *next = &type->regions[i + 1];
523 if (this->base + this->size != next->base ||
524 memblock_get_region_node(this) !=
525 memblock_get_region_node(next) ||
526 this->flags != next->flags) {
527 BUG_ON(this->base + this->size > next->base);
532 this->size += next->size;
533 /* move forward from next + 1, index of which is i + 2 */
534 memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
540 * memblock_insert_region - insert new memblock region
541 * @type: memblock type to insert into
542 * @idx: index for the insertion point
543 * @base: base address of the new region
544 * @size: size of the new region
545 * @nid: node id of the new region
546 * @flags: flags of the new region
548 * Insert new memblock region [@base, @base + @size) into @type at @idx.
549 * @type must already have extra room to accommodate the new region.
551 static void __init_memblock memblock_insert_region(struct memblock_type *type,
552 int idx, phys_addr_t base,
555 enum memblock_flags flags)
557 struct memblock_region *rgn = &type->regions[idx];
559 BUG_ON(type->cnt >= type->max);
560 memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
564 memblock_set_region_node(rgn, nid);
566 type->total_size += size;
570 * memblock_add_range - add new memblock region
571 * @type: memblock type to add new region into
572 * @base: base address of the new region
573 * @size: size of the new region
574 * @nid: nid of the new region
575 * @flags: flags of the new region
577 * Add new memblock region [@base, @base + @size) into @type. The new region
578 * is allowed to overlap with existing ones - overlaps don't affect already
579 * existing regions. @type is guaranteed to be minimal (all neighbouring
580 * compatible regions are merged) after the addition.
583 * 0 on success, -errno on failure.
585 int __init_memblock memblock_add_range(struct memblock_type *type,
586 phys_addr_t base, phys_addr_t size,
587 int nid, enum memblock_flags flags)
590 phys_addr_t obase = base;
591 phys_addr_t end = base + memblock_cap_size(base, &size);
593 struct memblock_region *rgn;
598 /* special case for empty array */
599 if (type->regions[0].size == 0) {
600 WARN_ON(type->cnt != 1 || type->total_size);
601 type->regions[0].base = base;
602 type->regions[0].size = size;
603 type->regions[0].flags = flags;
604 memblock_set_region_node(&type->regions[0], nid);
605 type->total_size = size;
610 * The following is executed twice. Once with %false @insert and
611 * then with %true. The first counts the number of regions needed
612 * to accommodate the new area. The second actually inserts them.
617 for_each_memblock_type(idx, type, rgn) {
618 phys_addr_t rbase = rgn->base;
619 phys_addr_t rend = rbase + rgn->size;
626 * @rgn overlaps. If it separates the lower part of new
627 * area, insert that portion.
630 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
631 WARN_ON(nid != memblock_get_region_node(rgn));
633 WARN_ON(flags != rgn->flags);
636 memblock_insert_region(type, idx++, base,
640 /* area below @rend is dealt with, forget about it */
641 base = min(rend, end);
644 /* insert the remaining portion */
648 memblock_insert_region(type, idx, base, end - base,
656 * If this was the first round, resize array and repeat for actual
657 * insertions; otherwise, merge and return.
660 while (type->cnt + nr_new > type->max)
661 if (memblock_double_array(type, obase, size) < 0)
666 memblock_merge_regions(type);
672 * memblock_add_node - add new memblock region within a NUMA node
673 * @base: base address of the new region
674 * @size: size of the new region
675 * @nid: nid of the new region
677 * Add new memblock region [@base, @base + @size) to the "memory"
678 * type. See memblock_add_range() description for mode details
681 * 0 on success, -errno on failure.
683 int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
686 return memblock_add_range(&memblock.memory, base, size, nid, 0);
690 * memblock_add - add new memblock region
691 * @base: base address of the new region
692 * @size: size of the new region
694 * Add new memblock region [@base, @base + @size) to the "memory"
695 * type. See memblock_add_range() description for mode details
698 * 0 on success, -errno on failure.
700 int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
702 phys_addr_t end = base + size - 1;
704 memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
705 &base, &end, (void *)_RET_IP_);
707 return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
711 * memblock_isolate_range - isolate given range into disjoint memblocks
712 * @type: memblock type to isolate range for
713 * @base: base of range to isolate
714 * @size: size of range to isolate
715 * @start_rgn: out parameter for the start of isolated region
716 * @end_rgn: out parameter for the end of isolated region
718 * Walk @type and ensure that regions don't cross the boundaries defined by
719 * [@base, @base + @size). Crossing regions are split at the boundaries,
720 * which may create at most two more regions. The index of the first
721 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
724 * 0 on success, -errno on failure.
726 static int __init_memblock memblock_isolate_range(struct memblock_type *type,
727 phys_addr_t base, phys_addr_t size,
728 int *start_rgn, int *end_rgn)
730 phys_addr_t end = base + memblock_cap_size(base, &size);
732 struct memblock_region *rgn;
734 *start_rgn = *end_rgn = 0;
739 /* we'll create at most two more regions */
740 while (type->cnt + 2 > type->max)
741 if (memblock_double_array(type, base, size) < 0)
744 for_each_memblock_type(idx, type, rgn) {
745 phys_addr_t rbase = rgn->base;
746 phys_addr_t rend = rbase + rgn->size;
755 * @rgn intersects from below. Split and continue
756 * to process the next region - the new top half.
759 rgn->size -= base - rbase;
760 type->total_size -= base - rbase;
761 memblock_insert_region(type, idx, rbase, base - rbase,
762 memblock_get_region_node(rgn),
764 } else if (rend > end) {
766 * @rgn intersects from above. Split and redo the
767 * current region - the new bottom half.
770 rgn->size -= end - rbase;
771 type->total_size -= end - rbase;
772 memblock_insert_region(type, idx--, rbase, end - rbase,
773 memblock_get_region_node(rgn),
776 /* @rgn is fully contained, record it */
786 static int __init_memblock memblock_remove_range(struct memblock_type *type,
787 phys_addr_t base, phys_addr_t size)
789 int start_rgn, end_rgn;
792 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
796 for (i = end_rgn - 1; i >= start_rgn; i--)
797 memblock_remove_region(type, i);
801 int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
803 phys_addr_t end = base + size - 1;
805 memblock_dbg("memblock_remove: [%pa-%pa] %pS\n",
806 &base, &end, (void *)_RET_IP_);
808 return memblock_remove_range(&memblock.memory, base, size);
812 * memblock_free - free boot memory block
813 * @base: phys starting address of the boot memory block
814 * @size: size of the boot memory block in bytes
816 * Free boot memory block previously allocated by memblock_alloc_xx() API.
817 * The freeing memory will not be released to the buddy allocator.
819 int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
821 phys_addr_t end = base + size - 1;
823 memblock_dbg(" memblock_free: [%pa-%pa] %pF\n",
824 &base, &end, (void *)_RET_IP_);
826 kmemleak_free_part_phys(base, size);
827 return memblock_remove_range(&memblock.reserved, base, size);
830 int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
832 phys_addr_t end = base + size - 1;
834 memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
835 &base, &end, (void *)_RET_IP_);
837 return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
841 * memblock_setclr_flag - set or clear flag for a memory region
842 * @base: base address of the region
843 * @size: size of the region
844 * @set: set or clear the flag
845 * @flag: the flag to udpate
847 * This function isolates region [@base, @base + @size), and sets/clears flag
849 * Return: 0 on success, -errno on failure.
851 static int __init_memblock memblock_setclr_flag(phys_addr_t base,
852 phys_addr_t size, int set, int flag)
854 struct memblock_type *type = &memblock.memory;
855 int i, ret, start_rgn, end_rgn;
857 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
861 for (i = start_rgn; i < end_rgn; i++) {
862 struct memblock_region *r = &type->regions[i];
870 memblock_merge_regions(type);
875 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
876 * @base: the base phys addr of the region
877 * @size: the size of the region
879 * Return: 0 on success, -errno on failure.
881 int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
883 return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
887 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
888 * @base: the base phys addr of the region
889 * @size: the size of the region
891 * Return: 0 on success, -errno on failure.
893 int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
895 return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
899 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
900 * @base: the base phys addr of the region
901 * @size: the size of the region
903 * Return: 0 on success, -errno on failure.
905 int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
907 system_has_some_mirror = true;
909 return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
913 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
914 * @base: the base phys addr of the region
915 * @size: the size of the region
917 * Return: 0 on success, -errno on failure.
919 int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
921 return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
925 * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
926 * @base: the base phys addr of the region
927 * @size: the size of the region
929 * Return: 0 on success, -errno on failure.
931 int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
933 return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
937 * __next_reserved_mem_region - next function for for_each_reserved_region()
938 * @idx: pointer to u64 loop variable
939 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
940 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
942 * Iterate over all reserved memory regions.
944 void __init_memblock __next_reserved_mem_region(u64 *idx,
945 phys_addr_t *out_start,
946 phys_addr_t *out_end)
948 struct memblock_type *type = &memblock.reserved;
950 if (*idx < type->cnt) {
951 struct memblock_region *r = &type->regions[*idx];
952 phys_addr_t base = r->base;
953 phys_addr_t size = r->size;
958 *out_end = base + size - 1;
964 /* signal end of iteration */
969 * __next__mem_range - next function for for_each_free_mem_range() etc.
970 * @idx: pointer to u64 loop variable
971 * @nid: node selector, %NUMA_NO_NODE for all nodes
972 * @flags: pick from blocks based on memory attributes
973 * @type_a: pointer to memblock_type from where the range is taken
974 * @type_b: pointer to memblock_type which excludes memory from being taken
975 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
976 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
977 * @out_nid: ptr to int for nid of the range, can be %NULL
979 * Find the first area from *@idx which matches @nid, fill the out
980 * parameters, and update *@idx for the next iteration. The lower 32bit of
981 * *@idx contains index into type_a and the upper 32bit indexes the
982 * areas before each region in type_b. For example, if type_b regions
983 * look like the following,
985 * 0:[0-16), 1:[32-48), 2:[128-130)
987 * The upper 32bit indexes the following regions.
989 * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
991 * As both region arrays are sorted, the function advances the two indices
992 * in lockstep and returns each intersection.
994 void __init_memblock __next_mem_range(u64 *idx, int nid,
995 enum memblock_flags flags,
996 struct memblock_type *type_a,
997 struct memblock_type *type_b,
998 phys_addr_t *out_start,
999 phys_addr_t *out_end, int *out_nid)
1001 int idx_a = *idx & 0xffffffff;
1002 int idx_b = *idx >> 32;
1004 if (WARN_ONCE(nid == MAX_NUMNODES,
1005 "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1008 for (; idx_a < type_a->cnt; idx_a++) {
1009 struct memblock_region *m = &type_a->regions[idx_a];
1011 phys_addr_t m_start = m->base;
1012 phys_addr_t m_end = m->base + m->size;
1013 int m_nid = memblock_get_region_node(m);
1015 /* only memory regions are associated with nodes, check it */
1016 if (nid != NUMA_NO_NODE && nid != m_nid)
1019 /* skip hotpluggable memory regions if needed */
1020 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1023 /* if we want mirror memory skip non-mirror memory regions */
1024 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1027 /* skip nomap memory unless we were asked for it explicitly */
1028 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1033 *out_start = m_start;
1039 *idx = (u32)idx_a | (u64)idx_b << 32;
1043 /* scan areas before each reservation */
1044 for (; idx_b < type_b->cnt + 1; idx_b++) {
1045 struct memblock_region *r;
1046 phys_addr_t r_start;
1049 r = &type_b->regions[idx_b];
1050 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1051 r_end = idx_b < type_b->cnt ?
1052 r->base : PHYS_ADDR_MAX;
1055 * if idx_b advanced past idx_a,
1056 * break out to advance idx_a
1058 if (r_start >= m_end)
1060 /* if the two regions intersect, we're done */
1061 if (m_start < r_end) {
1064 max(m_start, r_start);
1066 *out_end = min(m_end, r_end);
1070 * The region which ends first is
1071 * advanced for the next iteration.
1077 *idx = (u32)idx_a | (u64)idx_b << 32;
1083 /* signal end of iteration */
1088 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
1090 * @idx: pointer to u64 loop variable
1091 * @nid: node selector, %NUMA_NO_NODE for all nodes
1092 * @flags: pick from blocks based on memory attributes
1093 * @type_a: pointer to memblock_type from where the range is taken
1094 * @type_b: pointer to memblock_type which excludes memory from being taken
1095 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
1096 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
1097 * @out_nid: ptr to int for nid of the range, can be %NULL
1099 * Finds the next range from type_a which is not marked as unsuitable
1102 * Reverse of __next_mem_range().
1104 void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
1105 enum memblock_flags flags,
1106 struct memblock_type *type_a,
1107 struct memblock_type *type_b,
1108 phys_addr_t *out_start,
1109 phys_addr_t *out_end, int *out_nid)
1111 int idx_a = *idx & 0xffffffff;
1112 int idx_b = *idx >> 32;
1114 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1117 if (*idx == (u64)ULLONG_MAX) {
1118 idx_a = type_a->cnt - 1;
1120 idx_b = type_b->cnt;
1125 for (; idx_a >= 0; idx_a--) {
1126 struct memblock_region *m = &type_a->regions[idx_a];
1128 phys_addr_t m_start = m->base;
1129 phys_addr_t m_end = m->base + m->size;
1130 int m_nid = memblock_get_region_node(m);
1132 /* only memory regions are associated with nodes, check it */
1133 if (nid != NUMA_NO_NODE && nid != m_nid)
1136 /* skip hotpluggable memory regions if needed */
1137 if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
1140 /* if we want mirror memory skip non-mirror memory regions */
1141 if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
1144 /* skip nomap memory unless we were asked for it explicitly */
1145 if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
1150 *out_start = m_start;
1156 *idx = (u32)idx_a | (u64)idx_b << 32;
1160 /* scan areas before each reservation */
1161 for (; idx_b >= 0; idx_b--) {
1162 struct memblock_region *r;
1163 phys_addr_t r_start;
1166 r = &type_b->regions[idx_b];
1167 r_start = idx_b ? r[-1].base + r[-1].size : 0;
1168 r_end = idx_b < type_b->cnt ?
1169 r->base : PHYS_ADDR_MAX;
1171 * if idx_b advanced past idx_a,
1172 * break out to advance idx_a
1175 if (r_end <= m_start)
1177 /* if the two regions intersect, we're done */
1178 if (m_end > r_start) {
1180 *out_start = max(m_start, r_start);
1182 *out_end = min(m_end, r_end);
1185 if (m_start >= r_start)
1189 *idx = (u32)idx_a | (u64)idx_b << 32;
1194 /* signal end of iteration */
1198 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1200 * Common iterator interface used to define for_each_mem_pfn_range().
1202 void __init_memblock __next_mem_pfn_range(int *idx, int nid,
1203 unsigned long *out_start_pfn,
1204 unsigned long *out_end_pfn, int *out_nid)
1206 struct memblock_type *type = &memblock.memory;
1207 struct memblock_region *r;
1209 while (++*idx < type->cnt) {
1210 r = &type->regions[*idx];
1212 if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
1214 if (nid == MAX_NUMNODES || nid == r->nid)
1217 if (*idx >= type->cnt) {
1223 *out_start_pfn = PFN_UP(r->base);
1225 *out_end_pfn = PFN_DOWN(r->base + r->size);
1231 * memblock_set_node - set node ID on memblock regions
1232 * @base: base of area to set node ID for
1233 * @size: size of area to set node ID for
1234 * @type: memblock type to set node ID for
1235 * @nid: node ID to set
1237 * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
1238 * Regions which cross the area boundaries are split as necessary.
1241 * 0 on success, -errno on failure.
1243 int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1244 struct memblock_type *type, int nid)
1246 int start_rgn, end_rgn;
1249 ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
1253 for (i = start_rgn; i < end_rgn; i++)
1254 memblock_set_region_node(&type->regions[i], nid);
1256 memblock_merge_regions(type);
1259 #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1262 * memblock_alloc_range_nid - allocate boot memory block
1263 * @size: size of memory block to be allocated in bytes
1264 * @align: alignment of the region and block's size
1265 * @start: the lower bound of the memory region to allocate (phys address)
1266 * @end: the upper bound of the memory region to allocate (phys address)
1267 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1269 * The allocation is performed from memory region limited by
1270 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1272 * If the specified node can not hold the requested memory the
1273 * allocation falls back to any node in the system
1275 * For systems with memory mirroring, the allocation is attempted first
1276 * from the regions with mirroring enabled and then retried from any
1279 * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
1280 * allocated boot memory block, so that it is never reported as leaks.
1283 * Physical address of allocated memory block on success, %0 on failure.
1285 static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1286 phys_addr_t align, phys_addr_t start,
1287 phys_addr_t end, int nid)
1289 enum memblock_flags flags = choose_memblock_flags();
1292 if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
1296 /* Can't use WARNs this early in boot on powerpc */
1298 align = SMP_CACHE_BYTES;
1301 if (end > memblock.current_limit)
1302 end = memblock.current_limit;
1305 found = memblock_find_in_range_node(size, align, start, end, nid,
1307 if (found && !memblock_reserve(found, size))
1310 if (nid != NUMA_NO_NODE) {
1311 found = memblock_find_in_range_node(size, align, start,
1314 if (found && !memblock_reserve(found, size))
1318 if (flags & MEMBLOCK_MIRROR) {
1319 flags &= ~MEMBLOCK_MIRROR;
1320 pr_warn("Could not allocate %pap bytes of mirrored memory\n",
1328 /* Skip kmemleak for kasan_init() due to high volume. */
1329 if (end != MEMBLOCK_ALLOC_KASAN)
1331 * The min_count is set to 0 so that memblock allocated
1332 * blocks are never reported as leaks. This is because many
1333 * of these blocks are only referred via the physical
1334 * address which is not looked up by kmemleak.
1336 kmemleak_alloc_phys(found, size, 0, 0);
1341 phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
1346 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE);
1349 phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
1351 return memblock_alloc_range_nid(size, align, 0,
1352 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
1356 * memblock_alloc_internal - allocate boot memory block
1357 * @size: size of memory block to be allocated in bytes
1358 * @align: alignment of the region and block's size
1359 * @min_addr: the lower bound of the memory region to allocate (phys address)
1360 * @max_addr: the upper bound of the memory region to allocate (phys address)
1361 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1363 * Allocates memory block using memblock_alloc_range_nid() and
1364 * converts the returned physical address to virtual.
1366 * The @min_addr limit is dropped if it can not be satisfied and the allocation
1367 * will fall back to memory below @min_addr. Other constraints, such
1368 * as node and mirrored memory will be handled again in
1369 * memblock_alloc_range_nid().
1372 * Virtual address of allocated memory block on success, NULL on failure.
1374 static void * __init memblock_alloc_internal(
1375 phys_addr_t size, phys_addr_t align,
1376 phys_addr_t min_addr, phys_addr_t max_addr,
1382 * Detect any accidental use of these APIs after slab is ready, as at
1383 * this moment memblock may be deinitialized already and its
1384 * internal data may be destroyed (after execution of memblock_free_all)
1386 if (WARN_ON_ONCE(slab_is_available()))
1387 return kzalloc_node(size, GFP_NOWAIT, nid);
1389 alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
1391 /* retry allocation without lower limit */
1392 if (!alloc && min_addr)
1393 alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid);
1398 return phys_to_virt(alloc);
1402 * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
1403 * memory and without panicking
1404 * @size: size of memory block to be allocated in bytes
1405 * @align: alignment of the region and block's size
1406 * @min_addr: the lower bound of the memory region from where the allocation
1407 * is preferred (phys address)
1408 * @max_addr: the upper bound of the memory region from where the allocation
1409 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1410 * allocate only from memory limited by memblock.current_limit value
1411 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1413 * Public function, provides additional debug information (including caller
1414 * info), if enabled. Does not zero allocated memory, does not panic if request
1415 * cannot be satisfied.
1418 * Virtual address of allocated memory block on success, NULL on failure.
1420 void * __init memblock_alloc_try_nid_raw(
1421 phys_addr_t size, phys_addr_t align,
1422 phys_addr_t min_addr, phys_addr_t max_addr,
1427 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1428 __func__, (u64)size, (u64)align, nid, &min_addr,
1429 &max_addr, (void *)_RET_IP_);
1431 ptr = memblock_alloc_internal(size, align,
1432 min_addr, max_addr, nid);
1433 if (ptr && size > 0)
1434 page_init_poison(ptr, size);
1440 * memblock_alloc_try_nid - allocate boot memory block
1441 * @size: size of memory block to be allocated in bytes
1442 * @align: alignment of the region and block's size
1443 * @min_addr: the lower bound of the memory region from where the allocation
1444 * is preferred (phys address)
1445 * @max_addr: the upper bound of the memory region from where the allocation
1446 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1447 * allocate only from memory limited by memblock.current_limit value
1448 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1450 * Public function, provides additional debug information (including caller
1451 * info), if enabled. This function zeroes the allocated memory.
1454 * Virtual address of allocated memory block on success, NULL on failure.
1456 void * __init memblock_alloc_try_nid(
1457 phys_addr_t size, phys_addr_t align,
1458 phys_addr_t min_addr, phys_addr_t max_addr,
1463 memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
1464 __func__, (u64)size, (u64)align, nid, &min_addr,
1465 &max_addr, (void *)_RET_IP_);
1466 ptr = memblock_alloc_internal(size, align,
1467 min_addr, max_addr, nid);
1469 memset(ptr, 0, size);
1475 * __memblock_free_late - free bootmem block pages directly to buddy allocator
1476 * @base: phys starting address of the boot memory block
1477 * @size: size of the boot memory block in bytes
1479 * This is only useful when the bootmem allocator has already been torn
1480 * down, but we are still initializing the system. Pages are released directly
1481 * to the buddy allocator, no bootmem metadata is updated because it is gone.
1483 void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
1485 phys_addr_t cursor, end;
1487 end = base + size - 1;
1488 memblock_dbg("%s: [%pa-%pa] %pF\n",
1489 __func__, &base, &end, (void *)_RET_IP_);
1490 kmemleak_free_part_phys(base, size);
1491 cursor = PFN_UP(base);
1492 end = PFN_DOWN(base + size);
1494 for (; cursor < end; cursor++) {
1495 memblock_free_pages(pfn_to_page(cursor), cursor, 0);
1496 totalram_pages_inc();
1501 * Remaining API functions
1504 phys_addr_t __init_memblock memblock_phys_mem_size(void)
1506 return memblock.memory.total_size;
1509 phys_addr_t __init_memblock memblock_reserved_size(void)
1511 return memblock.reserved.total_size;
1514 phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
1516 unsigned long pages = 0;
1517 struct memblock_region *r;
1518 unsigned long start_pfn, end_pfn;
1520 for_each_memblock(memory, r) {
1521 start_pfn = memblock_region_memory_base_pfn(r);
1522 end_pfn = memblock_region_memory_end_pfn(r);
1523 start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
1524 end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
1525 pages += end_pfn - start_pfn;
1528 return PFN_PHYS(pages);
1531 /* lowest address */
1532 phys_addr_t __init_memblock memblock_start_of_DRAM(void)
1534 return memblock.memory.regions[0].base;
1537 phys_addr_t __init_memblock memblock_end_of_DRAM(void)
1539 int idx = memblock.memory.cnt - 1;
1541 return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
1544 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
1546 phys_addr_t max_addr = PHYS_ADDR_MAX;
1547 struct memblock_region *r;
1550 * translate the memory @limit size into the max address within one of
1551 * the memory memblock regions, if the @limit exceeds the total size
1552 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
1554 for_each_memblock(memory, r) {
1555 if (limit <= r->size) {
1556 max_addr = r->base + limit;
1565 void __init memblock_enforce_memory_limit(phys_addr_t limit)
1567 phys_addr_t max_addr = PHYS_ADDR_MAX;
1572 max_addr = __find_max_addr(limit);
1574 /* @limit exceeds the total size of the memory, do nothing */
1575 if (max_addr == PHYS_ADDR_MAX)
1578 /* truncate both memory and reserved regions */
1579 memblock_remove_range(&memblock.memory, max_addr,
1581 memblock_remove_range(&memblock.reserved, max_addr,
1585 void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
1587 int start_rgn, end_rgn;
1593 ret = memblock_isolate_range(&memblock.memory, base, size,
1594 &start_rgn, &end_rgn);
1598 /* remove all the MAP regions */
1599 for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
1600 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1601 memblock_remove_region(&memblock.memory, i);
1603 for (i = start_rgn - 1; i >= 0; i--)
1604 if (!memblock_is_nomap(&memblock.memory.regions[i]))
1605 memblock_remove_region(&memblock.memory, i);
1607 /* truncate the reserved regions */
1608 memblock_remove_range(&memblock.reserved, 0, base);
1609 memblock_remove_range(&memblock.reserved,
1610 base + size, PHYS_ADDR_MAX);
1613 void __init memblock_mem_limit_remove_map(phys_addr_t limit)
1615 phys_addr_t max_addr;
1620 max_addr = __find_max_addr(limit);
1622 /* @limit exceeds the total size of the memory, do nothing */
1623 if (max_addr == PHYS_ADDR_MAX)
1626 memblock_cap_memory_range(0, max_addr);
1629 static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1631 unsigned int left = 0, right = type->cnt;
1634 unsigned int mid = (right + left) / 2;
1636 if (addr < type->regions[mid].base)
1638 else if (addr >= (type->regions[mid].base +
1639 type->regions[mid].size))
1643 } while (left < right);
1647 bool __init_memblock memblock_is_reserved(phys_addr_t addr)
1649 return memblock_search(&memblock.reserved, addr) != -1;
1652 bool __init_memblock memblock_is_memory(phys_addr_t addr)
1654 return memblock_search(&memblock.memory, addr) != -1;
1657 bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
1659 int i = memblock_search(&memblock.memory, addr);
1663 return !memblock_is_nomap(&memblock.memory.regions[i]);
1666 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1667 int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
1668 unsigned long *start_pfn, unsigned long *end_pfn)
1670 struct memblock_type *type = &memblock.memory;
1671 int mid = memblock_search(type, PFN_PHYS(pfn));
1676 *start_pfn = PFN_DOWN(type->regions[mid].base);
1677 *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1679 return type->regions[mid].nid;
1684 * memblock_is_region_memory - check if a region is a subset of memory
1685 * @base: base of region to check
1686 * @size: size of region to check
1688 * Check if the region [@base, @base + @size) is a subset of a memory block.
1691 * 0 if false, non-zero if true
1693 bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1695 int idx = memblock_search(&memblock.memory, base);
1696 phys_addr_t end = base + memblock_cap_size(base, &size);
1700 return (memblock.memory.regions[idx].base +
1701 memblock.memory.regions[idx].size) >= end;
1705 * memblock_is_region_reserved - check if a region intersects reserved memory
1706 * @base: base of region to check
1707 * @size: size of region to check
1709 * Check if the region [@base, @base + @size) intersects a reserved
1713 * True if they intersect, false if not.
1715 bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
1717 memblock_cap_size(base, &size);
1718 return memblock_overlaps_region(&memblock.reserved, base, size);
1721 void __init_memblock memblock_trim_memory(phys_addr_t align)
1723 phys_addr_t start, end, orig_start, orig_end;
1724 struct memblock_region *r;
1726 for_each_memblock(memory, r) {
1727 orig_start = r->base;
1728 orig_end = r->base + r->size;
1729 start = round_up(orig_start, align);
1730 end = round_down(orig_end, align);
1732 if (start == orig_start && end == orig_end)
1737 r->size = end - start;
1739 memblock_remove_region(&memblock.memory,
1740 r - memblock.memory.regions);
1746 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1748 memblock.current_limit = limit;
1751 phys_addr_t __init_memblock memblock_get_current_limit(void)
1753 return memblock.current_limit;
1756 static void __init_memblock memblock_dump(struct memblock_type *type)
1758 phys_addr_t base, end, size;
1759 enum memblock_flags flags;
1761 struct memblock_region *rgn;
1763 pr_info(" %s.cnt = 0x%lx\n", type->name, type->cnt);
1765 for_each_memblock_type(idx, type, rgn) {
1766 char nid_buf[32] = "";
1770 end = base + size - 1;
1772 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1773 if (memblock_get_region_node(rgn) != MAX_NUMNODES)
1774 snprintf(nid_buf, sizeof(nid_buf), " on node %d",
1775 memblock_get_region_node(rgn));
1777 pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
1778 type->name, idx, &base, &end, &size, nid_buf, flags);
1782 void __init_memblock __memblock_dump_all(void)
1784 pr_info("MEMBLOCK configuration:\n");
1785 pr_info(" memory size = %pa reserved size = %pa\n",
1786 &memblock.memory.total_size,
1787 &memblock.reserved.total_size);
1789 memblock_dump(&memblock.memory);
1790 memblock_dump(&memblock.reserved);
1791 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1792 memblock_dump(&memblock.physmem);
1796 void __init memblock_allow_resize(void)
1798 memblock_can_resize = 1;
1801 static int __init early_memblock(char *p)
1803 if (p && strstr(p, "debug"))
1807 early_param("memblock", early_memblock);
1809 static void __init __free_pages_memory(unsigned long start, unsigned long end)
1813 while (start < end) {
1814 order = min(MAX_ORDER - 1UL, __ffs(start));
1816 while (start + (1UL << order) > end)
1819 memblock_free_pages(pfn_to_page(start), start, order);
1821 start += (1UL << order);
1825 static unsigned long __init __free_memory_core(phys_addr_t start,
1828 unsigned long start_pfn = PFN_UP(start);
1829 unsigned long end_pfn = min_t(unsigned long,
1830 PFN_DOWN(end), max_low_pfn);
1832 if (start_pfn >= end_pfn)
1835 __free_pages_memory(start_pfn, end_pfn);
1837 return end_pfn - start_pfn;
1840 static unsigned long __init free_low_memory_core_early(void)
1842 unsigned long count = 0;
1843 phys_addr_t start, end;
1846 memblock_clear_hotplug(0, -1);
1848 for_each_reserved_mem_region(i, &start, &end)
1849 reserve_bootmem_region(start, end);
1852 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
1853 * because in some case like Node0 doesn't have RAM installed
1854 * low ram will be on Node1
1856 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1858 count += __free_memory_core(start, end);
1863 static int reset_managed_pages_done __initdata;
1865 void reset_node_managed_pages(pg_data_t *pgdat)
1869 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1870 atomic_long_set(&z->managed_pages, 0);
1873 void __init reset_all_zones_managed_pages(void)
1875 struct pglist_data *pgdat;
1877 if (reset_managed_pages_done)
1880 for_each_online_pgdat(pgdat)
1881 reset_node_managed_pages(pgdat);
1883 reset_managed_pages_done = 1;
1887 * memblock_free_all - release free pages to the buddy allocator
1889 * Return: the number of pages actually released.
1891 unsigned long __init memblock_free_all(void)
1893 unsigned long pages;
1895 reset_all_zones_managed_pages();
1897 pages = free_low_memory_core_early();
1898 totalram_pages_add(pages);
1903 #if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1905 static int memblock_debug_show(struct seq_file *m, void *private)
1907 struct memblock_type *type = m->private;
1908 struct memblock_region *reg;
1912 for (i = 0; i < type->cnt; i++) {
1913 reg = &type->regions[i];
1914 end = reg->base + reg->size - 1;
1916 seq_printf(m, "%4d: ", i);
1917 seq_printf(m, "%pa..%pa\n", ®->base, &end);
1921 DEFINE_SHOW_ATTRIBUTE(memblock_debug);
1923 static int __init memblock_init_debugfs(void)
1925 struct dentry *root = debugfs_create_dir("memblock", NULL);
1927 debugfs_create_file("memory", 0444, root,
1928 &memblock.memory, &memblock_debug_fops);
1929 debugfs_create_file("reserved", 0444, root,
1930 &memblock.reserved, &memblock_debug_fops);
1931 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
1932 debugfs_create_file("physmem", 0444, root,
1933 &memblock.physmem, &memblock_debug_fops);
1938 __initcall(memblock_init_debugfs);
1940 #endif /* CONFIG_DEBUG_FS */