1 /* SPDX-License-Identifier: GPL-2.0 */
4 #include <asm/ptrace.h>
5 #include <asm/pgtable.h>
6 #include <asm/thread_info.h>
8 #include <asm-generic/vmlinux.lds.h>
10 OUTPUT_FORMAT("elf64-ia64-little")
20 unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
25 * unwind exit sections must be discarded before
26 * the rest of the sections get included.
29 *(.IA_64.unwind.exit.text)
30 *(.IA_64.unwind_info.exit.text)
35 v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
36 phys_start = _start - LOAD_OFFSET;
45 .text : AT(ADDR(.text) - LOAD_OFFSET) {
57 .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
62 .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
71 NOTES :code :note /* put .notes in text and mark in PT_NOTE */
73 } : code /* switch back to regular program... */
79 __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
80 __start___mca_table = .;
82 __stop___mca_table = .;
85 .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
86 __start___phys_stack_reg_patchlist = .;
87 *(.data..patch.phys_stack_reg)
88 __end___phys_stack_reg_patchlist = .;
96 /* Unwind info & table: */
98 .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
99 *(.IA_64.unwind_info*)
101 .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
111 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
116 * Initialization code and data:
118 . = ALIGN(PAGE_SIZE);
121 INIT_TEXT_SECTION(PAGE_SIZE)
122 INIT_DATA_SECTION(16)
124 .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
125 __start___vtop_patchlist = .;
127 __end___vtop_patchlist = .;
130 .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
131 __start___rse_patchlist = .;
133 __end___rse_patchlist = .;
136 .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
137 __start___mckinley_e9_bundles = .;
138 *(.data..patch.mckinley_e9)
139 __end___mckinley_e9_bundles = .;
142 #if defined(CONFIG_IA64_GENERIC)
145 .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
153 . = ALIGN(PERCPU_PAGE_SIZE);
155 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
158 . = ALIGN(PAGE_SIZE);
161 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
162 PAGE_ALIGNED_DATA(PAGE_SIZE)
163 . = ALIGN(PAGE_SIZE);
164 __start_gate_section = .;
166 __stop_gate_section = .;
169 * make sure the gate page doesn't expose
172 . = ALIGN(PAGE_SIZE);
175 . = ALIGN(PERCPU_PAGE_SIZE);
176 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
177 __phys_per_cpu_start = __per_cpu_load;
179 * ensure percpu data fits
180 * into percpu page size
182 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
186 .data : AT(ADDR(.data) - LOAD_OFFSET) {
188 INIT_TASK_DATA(PAGE_SIZE)
189 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
190 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
199 . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
200 .got : AT(ADDR(.got) - LOAD_OFFSET) {
204 __gp = ADDR(.got) + 0x200000;
207 * We want the small data sections together,
208 * so single-instruction offsets can access
209 * them all, and initialized data all before
210 * uninitialized, so we can shorten the
211 * on-disk segment size.
213 .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
230 /* Default discards */