Merge tag 'Smack-for-5.19' of https://github.com/cschaufler/smack-next
[sfrench/cifs-2.6.git] / arch / s390 / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * vdso setup for s390
4  *
5  *  Copyright IBM Corp. 2008
6  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8
9 #include <linux/binfmts.h>
10 #include <linux/compat.h>
11 #include <linux/elf.h>
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/slab.h>
17 #include <linux/smp.h>
18 #include <linux/time_namespace.h>
19 #include <linux/random.h>
20 #include <vdso/datapage.h>
21 #include <asm/vdso.h>
22
23 extern char vdso64_start[], vdso64_end[];
24 extern char vdso32_start[], vdso32_end[];
25
26 static struct vm_special_mapping vvar_mapping;
27
28 static union {
29         struct vdso_data        data[CS_BASES];
30         u8                      page[PAGE_SIZE];
31 } vdso_data_store __page_aligned_data;
32
33 struct vdso_data *vdso_data = vdso_data_store.data;
34
35 enum vvar_pages {
36         VVAR_DATA_PAGE_OFFSET,
37         VVAR_TIMENS_PAGE_OFFSET,
38         VVAR_NR_PAGES,
39 };
40
41 #ifdef CONFIG_TIME_NS
42 struct vdso_data *arch_get_vdso_data(void *vvar_page)
43 {
44         return (struct vdso_data *)(vvar_page);
45 }
46
47 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
48 {
49         if (likely(vma->vm_mm == current->mm))
50                 return current->nsproxy->time_ns->vvar_page;
51         /*
52          * VM_PFNMAP | VM_IO protect .fault() handler from being called
53          * through interfaces like /proc/$pid/mem or
54          * process_vm_{readv,writev}() as long as there's no .access()
55          * in special_mapping_vmops().
56          * For more details check_vma_flags() and __access_remote_vm()
57          */
58         WARN(1, "vvar_page accessed remotely");
59         return NULL;
60 }
61
62 /*
63  * The VVAR page layout depends on whether a task belongs to the root or
64  * non-root time namespace. Whenever a task changes its namespace, the VVAR
65  * page tables are cleared and then they will be re-faulted with a
66  * corresponding layout.
67  * See also the comment near timens_setup_vdso_data() for details.
68  */
69 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
70 {
71         struct mm_struct *mm = task->mm;
72         struct vm_area_struct *vma;
73
74         mmap_read_lock(mm);
75         for (vma = mm->mmap; vma; vma = vma->vm_next) {
76                 unsigned long size = vma->vm_end - vma->vm_start;
77
78                 if (!vma_is_special_mapping(vma, &vvar_mapping))
79                         continue;
80                 zap_page_range(vma, vma->vm_start, size);
81                 break;
82         }
83         mmap_read_unlock(mm);
84         return 0;
85 }
86 #else
87 static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
88 {
89         return NULL;
90 }
91 #endif
92
93 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
94                              struct vm_area_struct *vma, struct vm_fault *vmf)
95 {
96         struct page *timens_page = find_timens_vvar_page(vma);
97         unsigned long addr, pfn;
98         vm_fault_t err;
99
100         switch (vmf->pgoff) {
101         case VVAR_DATA_PAGE_OFFSET:
102                 pfn = virt_to_pfn(vdso_data);
103                 if (timens_page) {
104                         /*
105                          * Fault in VVAR page too, since it will be accessed
106                          * to get clock data anyway.
107                          */
108                         addr = vmf->address + VVAR_TIMENS_PAGE_OFFSET * PAGE_SIZE;
109                         err = vmf_insert_pfn(vma, addr, pfn);
110                         if (unlikely(err & VM_FAULT_ERROR))
111                                 return err;
112                         pfn = page_to_pfn(timens_page);
113                 }
114                 break;
115 #ifdef CONFIG_TIME_NS
116         case VVAR_TIMENS_PAGE_OFFSET:
117                 /*
118                  * If a task belongs to a time namespace then a namespace
119                  * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
120                  * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
121                  * offset.
122                  * See also the comment near timens_setup_vdso_data().
123                  */
124                 if (!timens_page)
125                         return VM_FAULT_SIGBUS;
126                 pfn = virt_to_pfn(vdso_data);
127                 break;
128 #endif /* CONFIG_TIME_NS */
129         default:
130                 return VM_FAULT_SIGBUS;
131         }
132         return vmf_insert_pfn(vma, vmf->address, pfn);
133 }
134
135 static int vdso_mremap(const struct vm_special_mapping *sm,
136                        struct vm_area_struct *vma)
137 {
138         current->mm->context.vdso_base = vma->vm_start;
139         return 0;
140 }
141
142 static struct vm_special_mapping vvar_mapping = {
143         .name = "[vvar]",
144         .fault = vvar_fault,
145 };
146
147 static struct vm_special_mapping vdso64_mapping = {
148         .name = "[vdso]",
149         .mremap = vdso_mremap,
150 };
151
152 static struct vm_special_mapping vdso32_mapping = {
153         .name = "[vdso]",
154         .mremap = vdso_mremap,
155 };
156
157 int vdso_getcpu_init(void)
158 {
159         set_tod_programmable_field(smp_processor_id());
160         return 0;
161 }
162 early_initcall(vdso_getcpu_init); /* Must be called before SMP init */
163
164 static int map_vdso(unsigned long addr, unsigned long vdso_mapping_len)
165 {
166         unsigned long vvar_start, vdso_text_start, vdso_text_len;
167         struct vm_special_mapping *vdso_mapping;
168         struct mm_struct *mm = current->mm;
169         struct vm_area_struct *vma;
170         int rc;
171
172         BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
173         if (mmap_write_lock_killable(mm))
174                 return -EINTR;
175
176         if (is_compat_task()) {
177                 vdso_text_len = vdso32_end - vdso32_start;
178                 vdso_mapping = &vdso32_mapping;
179         } else {
180                 vdso_text_len = vdso64_end - vdso64_start;
181                 vdso_mapping = &vdso64_mapping;
182         }
183         vvar_start = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
184         rc = vvar_start;
185         if (IS_ERR_VALUE(vvar_start))
186                 goto out;
187         vma = _install_special_mapping(mm, vvar_start, VVAR_NR_PAGES*PAGE_SIZE,
188                                        VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
189                                        VM_PFNMAP,
190                                        &vvar_mapping);
191         rc = PTR_ERR(vma);
192         if (IS_ERR(vma))
193                 goto out;
194         vdso_text_start = vvar_start + VVAR_NR_PAGES * PAGE_SIZE;
195         /* VM_MAYWRITE for COW so gdb can set breakpoints */
196         vma = _install_special_mapping(mm, vdso_text_start, vdso_text_len,
197                                        VM_READ|VM_EXEC|
198                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
199                                        vdso_mapping);
200         if (IS_ERR(vma)) {
201                 do_munmap(mm, vvar_start, PAGE_SIZE, NULL);
202                 rc = PTR_ERR(vma);
203         } else {
204                 current->mm->context.vdso_base = vdso_text_start;
205                 rc = 0;
206         }
207 out:
208         mmap_write_unlock(mm);
209         return rc;
210 }
211
212 static unsigned long vdso_addr(unsigned long start, unsigned long len)
213 {
214         unsigned long addr, end, offset;
215
216         /*
217          * Round up the start address. It can start out unaligned as a result
218          * of stack start randomization.
219          */
220         start = PAGE_ALIGN(start);
221
222         /* Round the lowest possible end address up to a PMD boundary. */
223         end = (start + len + PMD_SIZE - 1) & PMD_MASK;
224         if (end >= VDSO_BASE)
225                 end = VDSO_BASE;
226         end -= len;
227
228         if (end > start) {
229                 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
230                 addr = start + (offset << PAGE_SHIFT);
231         } else {
232                 addr = start;
233         }
234         return addr;
235 }
236
237 unsigned long vdso_size(void)
238 {
239         unsigned long size = VVAR_NR_PAGES * PAGE_SIZE;
240
241         if (is_compat_task())
242                 size += vdso32_end - vdso32_start;
243         else
244                 size += vdso64_end - vdso64_start;
245         return PAGE_ALIGN(size);
246 }
247
248 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
249 {
250         unsigned long addr = VDSO_BASE;
251         unsigned long size = vdso_size();
252
253         if (current->flags & PF_RANDOMIZE)
254                 addr = vdso_addr(current->mm->start_stack + PAGE_SIZE, size);
255         return map_vdso(addr, size);
256 }
257
258 static struct page ** __init vdso_setup_pages(void *start, void *end)
259 {
260         int pages = (end - start) >> PAGE_SHIFT;
261         struct page **pagelist;
262         int i;
263
264         pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
265         if (!pagelist)
266                 panic("%s: Cannot allocate page list for VDSO", __func__);
267         for (i = 0; i < pages; i++)
268                 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
269         return pagelist;
270 }
271
272 static int __init vdso_init(void)
273 {
274         vdso64_mapping.pages = vdso_setup_pages(vdso64_start, vdso64_end);
275         if (IS_ENABLED(CONFIG_COMPAT))
276                 vdso32_mapping.pages = vdso_setup_pages(vdso32_start, vdso32_end);
277         return 0;
278 }
279 arch_initcall(vdso_init);