2 * VDSO implementations.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/cache.h>
22 #include <linux/clocksource.h>
23 #include <linux/elf.h>
24 #include <linux/err.h>
25 #include <linux/errno.h>
26 #include <linux/gfp.h>
27 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/signal.h>
31 #include <linux/slab.h>
32 #include <linux/timekeeper_internal.h>
33 #include <linux/vmalloc.h>
35 #include <asm/cacheflush.h>
36 #include <asm/signal32.h>
38 #include <asm/vdso_datapage.h>
40 extern char vdso_start[], vdso_end[];
41 static unsigned long vdso_pages __ro_after_init;
47 struct vdso_data data;
49 } vdso_data_store __page_aligned_data;
50 struct vdso_data *vdso_data = &vdso_data_store.data;
54 * Create and map the vectors page for AArch32 tasks.
58 #define C_PAGES (C_SIGPAGE + 1)
59 static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
60 static const struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
62 .name = "[vectors]", /* ABI */
63 .pages = &aarch32_vdso_pages[C_VECTORS],
66 .name = "[sigpage]", /* ABI */
67 .pages = &aarch32_vdso_pages[C_SIGPAGE],
71 static int aarch32_alloc_kuser_vdso_page(void)
73 extern char __kuser_helper_start[], __kuser_helper_end[];
74 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
75 unsigned long vdso_page;
77 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
80 vdso_page = get_zeroed_page(GFP_ATOMIC);
84 memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
86 aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
87 flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
91 static int __init aarch32_alloc_vdso_pages(void)
93 extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
94 int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
95 unsigned long sigpage;
98 sigpage = get_zeroed_page(GFP_ATOMIC);
102 memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
103 aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
104 flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
106 ret = aarch32_alloc_kuser_vdso_page();
112 arch_initcall(aarch32_alloc_vdso_pages);
114 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
118 if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
122 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
123 * not safe to CoW the page containing the CPU exception vectors.
125 ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
127 VM_MAYREAD | VM_MAYEXEC,
128 &aarch32_vdso_spec[C_VECTORS]);
130 return PTR_ERR_OR_ZERO(ret);
133 static int aarch32_sigreturn_setup(struct mm_struct *mm)
138 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
139 if (IS_ERR_VALUE(addr)) {
145 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
148 ret = _install_special_mapping(mm, addr, PAGE_SIZE,
149 VM_READ | VM_EXEC | VM_MAYREAD |
150 VM_MAYWRITE | VM_MAYEXEC,
151 &aarch32_vdso_spec[C_SIGPAGE]);
155 mm->context.vdso = (void *)addr;
158 return PTR_ERR_OR_ZERO(ret);
161 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
163 struct mm_struct *mm = current->mm;
166 if (down_write_killable(&mm->mmap_sem))
169 ret = aarch32_kuser_helpers_setup(mm);
173 ret = aarch32_sigreturn_setup(mm);
176 up_write(&mm->mmap_sem);
179 #endif /* CONFIG_COMPAT */
181 static int vdso_mremap(const struct vm_special_mapping *sm,
182 struct vm_area_struct *new_vma)
184 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
185 unsigned long vdso_size = vdso_end - vdso_start;
187 if (vdso_size != new_size)
190 current->mm->context.vdso = (void *)new_vma->vm_start;
195 static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
201 .mremap = vdso_mremap,
205 static int __init vdso_init(void)
208 struct page **vdso_pagelist;
211 if (memcmp(vdso_start, "\177ELF", 4)) {
212 pr_err("vDSO is not a valid ELF object!\n");
216 vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
218 /* Allocate the vDSO pagelist, plus a page for the data. */
219 vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
221 if (vdso_pagelist == NULL)
224 /* Grab the vDSO data page. */
225 vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
228 /* Grab the vDSO code pages. */
229 pfn = sym_to_pfn(vdso_start);
231 for (i = 0; i < vdso_pages; i++)
232 vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
234 vdso_spec[0].pages = &vdso_pagelist[0];
235 vdso_spec[1].pages = &vdso_pagelist[1];
239 arch_initcall(vdso_init);
241 int arch_setup_additional_pages(struct linux_binprm *bprm,
244 struct mm_struct *mm = current->mm;
245 unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
248 vdso_text_len = vdso_pages << PAGE_SHIFT;
249 /* Be sure to map the data page */
250 vdso_mapping_len = vdso_text_len + PAGE_SIZE;
252 if (down_write_killable(&mm->mmap_sem))
254 vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
255 if (IS_ERR_VALUE(vdso_base)) {
256 ret = ERR_PTR(vdso_base);
259 ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
265 vdso_base += PAGE_SIZE;
266 mm->context.vdso = (void *)vdso_base;
267 ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
269 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
275 up_write(&mm->mmap_sem);
279 mm->context.vdso = NULL;
280 up_write(&mm->mmap_sem);
285 * Update the vDSO data page to keep in sync with kernel timekeeping.
287 void update_vsyscall(struct timekeeper *tk)
289 u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct;
291 ++vdso_data->tb_seq_count;
294 vdso_data->use_syscall = use_syscall;
295 vdso_data->xtime_coarse_sec = tk->xtime_sec;
296 vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
298 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
299 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
301 /* Read without the seqlock held by clock_getres() */
302 WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
305 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
306 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
307 vdso_data->raw_time_sec = tk->raw_sec;
308 vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
309 vdso_data->xtime_clock_sec = tk->xtime_sec;
310 vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
311 vdso_data->cs_mono_mult = tk->tkr_mono.mult;
312 vdso_data->cs_raw_mult = tk->tkr_raw.mult;
313 /* tkr_mono.shift == tkr_raw.shift */
314 vdso_data->cs_shift = tk->tkr_mono.shift;
318 ++vdso_data->tb_seq_count;
321 void update_vsyscall_tz(void)
323 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
324 vdso_data->tz_dsttime = sys_tz.tz_dsttime;