2 * Based on arch/arm/include/asm/memory.h
4 * Copyright (C) 2000-2002 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 * Note: this file should not be included by non-asm/.h files
21 #ifndef __ASM_MEMORY_H
22 #define __ASM_MEMORY_H
24 #include <linux/compiler.h>
25 #include <linux/const.h>
26 #include <linux/types.h>
28 #include <asm/page-def.h>
29 #include <asm/sizes.h>
32 * Allow for constants defined here to be used from assembly code
33 * by prepending the UL suffix only with actual C code compilation.
35 #define UL(x) _AC(x, UL)
38 * Size of the PCI I/O space. This must remain a power of two so that
39 * IO_SPACE_LIMIT acts as a mask for the low bits of I/O addresses.
41 #define PCI_IO_SIZE SZ_16M
44 * Log2 of the upper bound of the size of a struct page. Used for sizing
45 * the vmemmap region only, does not affect actual memory footprint.
46 * We don't use sizeof(struct page) directly since taking its size here
47 * requires its definition to be available at this point in the inclusion
48 * chain, and it may not be a power of 2 in the first place.
50 #define STRUCT_PAGE_MAX_SHIFT 6
53 * VMEMMAP_SIZE - allows the whole linear region to be covered by
56 #define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT))
59 * PAGE_OFFSET - the virtual address of the start of the linear map (top
61 * KIMAGE_VADDR - the virtual address of the start of the kernel image
62 * VA_BITS - the maximum number of bits for virtual addresses.
63 * VA_START - the first kernel virtual address.
64 * TASK_SIZE - the maximum size of a user space task.
65 * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
67 #define VA_BITS (CONFIG_ARM64_VA_BITS)
68 #define VA_START (UL(0xffffffffffffffff) - \
69 (UL(1) << VA_BITS) + 1)
70 #define PAGE_OFFSET (UL(0xffffffffffffffff) - \
71 (UL(1) << (VA_BITS - 1)) + 1)
72 #define KIMAGE_VADDR (MODULES_END)
73 #define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
74 #define MODULES_VADDR (VA_START + KASAN_SHADOW_SIZE)
75 #define MODULES_VSIZE (SZ_128M)
76 #define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE)
77 #define PCI_IO_END (VMEMMAP_START - SZ_2M)
78 #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
79 #define FIXADDR_TOP (PCI_IO_START - SZ_2M)
80 #define TASK_SIZE_64 (UL(1) << VA_BITS)
83 #define TASK_SIZE_32 UL(0x100000000)
84 #define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
85 TASK_SIZE_32 : TASK_SIZE_64)
86 #define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
87 TASK_SIZE_32 : TASK_SIZE_64)
89 #define TASK_SIZE TASK_SIZE_64
90 #endif /* CONFIG_COMPAT */
92 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
94 #define KERNEL_START _text
95 #define KERNEL_END _end
98 * KASAN requires 1/8th of the kernel virtual address space for the shadow
99 * region. KASAN can bloat the stack significantly, so double the (minimum)
100 * stack size when KASAN is in use.
103 #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
104 #define KASAN_THREAD_SHIFT 1
106 #define KASAN_SHADOW_SIZE (0)
107 #define KASAN_THREAD_SHIFT 0
110 #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT)
113 * VMAP'd stacks are allocated at page granularity, so we must ensure that such
114 * stacks are a multiple of page size.
116 #if defined(CONFIG_VMAP_STACK) && (MIN_THREAD_SHIFT < PAGE_SHIFT)
117 #define THREAD_SHIFT PAGE_SHIFT
119 #define THREAD_SHIFT MIN_THREAD_SHIFT
122 #if THREAD_SHIFT >= PAGE_SHIFT
123 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
126 #define THREAD_SIZE (UL(1) << THREAD_SHIFT)
129 * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
130 * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
133 #ifdef CONFIG_VMAP_STACK
134 #define THREAD_ALIGN (2 * THREAD_SIZE)
136 #define THREAD_ALIGN THREAD_SIZE
139 #define IRQ_STACK_SIZE THREAD_SIZE
141 #define OVERFLOW_STACK_SIZE SZ_4K
144 * Alignment of kernel segments (e.g. .text, .data).
146 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
148 * 4 KB granule: 1 level 2 entry
149 * 16 KB granule: 128 level 3 entries, with contiguous bit
150 * 64 KB granule: 32 level 3 entries, with contiguous bit
152 #define SEGMENT_ALIGN SZ_2M
155 * 4 KB granule: 16 level 3 entries, with contiguous bit
156 * 16 KB granule: 4 level 3 entries, without contiguous bit
157 * 64 KB granule: 1 level 3 entry
159 #define SEGMENT_ALIGN SZ_64K
163 * Memory types available.
165 #define MT_DEVICE_nGnRnE 0
166 #define MT_DEVICE_nGnRE 1
167 #define MT_DEVICE_GRE 2
168 #define MT_NORMAL_NC 3
170 #define MT_NORMAL_WT 5
173 * Memory types for Stage-2 translation
175 #define MT_S2_NORMAL 0xf
176 #define MT_S2_DEVICE_nGnRE 0x1
178 #ifdef CONFIG_ARM64_4K_PAGES
179 #define IOREMAP_MAX_ORDER (PUD_SHIFT)
181 #define IOREMAP_MAX_ORDER (PMD_SHIFT)
184 #ifdef CONFIG_BLK_DEV_INITRD
185 #define __early_init_dt_declare_initrd(__start, __end) \
187 initrd_start = (__start); \
188 initrd_end = (__end); \
194 #include <linux/bitops.h>
195 #include <linux/mmdebug.h>
197 extern s64 memstart_addr;
198 /* PHYS_OFFSET - the physical address of the start of memory. */
199 #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; })
201 /* the virtual base of the kernel image (minus TEXT_OFFSET) */
202 extern u64 kimage_vaddr;
204 /* the offset between the kernel virtual and physical mappings */
205 extern u64 kimage_voffset;
207 static inline unsigned long kaslr_offset(void)
209 return kimage_vaddr - KIMAGE_VADDR;
213 * Allow all memory at the discovery stage. We will clip it later.
215 #define MIN_MEMBLOCK_ADDR 0
216 #define MAX_MEMBLOCK_ADDR U64_MAX
219 * PFNs are used to describe any physical page; this means
220 * PFN 0 == physical address 0.
222 * This is the PFN of the first RAM page in the kernel
223 * direct-mapped view. We assume this is the first page
224 * of RAM in the mem_map as well.
226 #define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
229 * Physical vs virtual RAM address space conversion. These are
230 * private definitions which should NOT be used outside memory.h
231 * files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
236 * The linear kernel range starts in the middle of the virtual adddress
237 * space. Testing the top bit for the start of the region is a
240 #define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1)))
242 #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
243 #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
245 #define __virt_to_phys_nodebug(x) ({ \
246 phys_addr_t __x = (phys_addr_t)(x); \
247 __is_lm_address(__x) ? __lm_to_phys(__x) : \
248 __kimg_to_phys(__x); \
251 #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x))
253 #ifdef CONFIG_DEBUG_VIRTUAL
254 extern phys_addr_t __virt_to_phys(unsigned long x);
255 extern phys_addr_t __phys_addr_symbol(unsigned long x);
257 #define __virt_to_phys(x) __virt_to_phys_nodebug(x)
258 #define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
261 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
262 #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset))
265 * Convert a page to/from a physical address
267 #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
268 #define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
271 * Note: Drivers should NOT use these. They are the wrong
272 * translation for translating DMA addresses. Use the driver
273 * DMA support - see dma-mapping.h.
275 #define virt_to_phys virt_to_phys
276 static inline phys_addr_t virt_to_phys(const volatile void *x)
278 return __virt_to_phys((unsigned long)(x));
281 #define phys_to_virt phys_to_virt
282 static inline void *phys_to_virt(phys_addr_t x)
284 return (void *)(__phys_to_virt(x));
288 * Drivers should NOT use these either.
290 #define __pa(x) __virt_to_phys((unsigned long)(x))
291 #define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
292 #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x))
293 #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
294 #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
295 #define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
296 #define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
299 * virt_to_page(k) convert a _valid_ virtual address to struct page *
300 * virt_addr_valid(k) indicates whether a virtual address is valid
302 #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
304 #ifndef CONFIG_SPARSEMEM_VMEMMAP
305 #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
306 #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
308 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
309 #define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
311 #define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
312 #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
314 #define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
315 + PHYS_OFFSET) >> PAGE_SHIFT)
319 #define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
320 #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
321 _virt_addr_valid(kaddr))
323 #include <asm-generic/memory_model.h>