selinux: kill 'flags' argument in avc_has_perm_flags() and avc_audit()
[sfrench/cifs-2.6.git] / arch / arm64 / kvm / hyp / nvhe / setup.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6
7 #include <linux/kvm_host.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <asm/kvm_pgtable.h>
11
12 #include <nvhe/early_alloc.h>
13 #include <nvhe/gfp.h>
14 #include <nvhe/memory.h>
15 #include <nvhe/mem_protect.h>
16 #include <nvhe/mm.h>
17 #include <nvhe/trap_handler.h>
18
19 struct hyp_pool hpool;
20 struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
21 unsigned long hyp_nr_cpus;
22
23 #define hyp_percpu_size ((unsigned long)__per_cpu_end - \
24                          (unsigned long)__per_cpu_start)
25
26 static void *vmemmap_base;
27 static void *hyp_pgt_base;
28 static void *host_s2_mem_pgt_base;
29 static void *host_s2_dev_pgt_base;
30
31 static int divide_memory_pool(void *virt, unsigned long size)
32 {
33         unsigned long vstart, vend, nr_pages;
34
35         hyp_early_alloc_init(virt, size);
36
37         hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
38         nr_pages = (vend - vstart) >> PAGE_SHIFT;
39         vmemmap_base = hyp_early_alloc_contig(nr_pages);
40         if (!vmemmap_base)
41                 return -ENOMEM;
42
43         nr_pages = hyp_s1_pgtable_pages();
44         hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
45         if (!hyp_pgt_base)
46                 return -ENOMEM;
47
48         nr_pages = host_s2_mem_pgtable_pages();
49         host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages);
50         if (!host_s2_mem_pgt_base)
51                 return -ENOMEM;
52
53         nr_pages = host_s2_dev_pgtable_pages();
54         host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages);
55         if (!host_s2_dev_pgt_base)
56                 return -ENOMEM;
57
58         return 0;
59 }
60
61 static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
62                                  unsigned long *per_cpu_base,
63                                  u32 hyp_va_bits)
64 {
65         void *start, *end, *virt = hyp_phys_to_virt(phys);
66         unsigned long pgt_size = hyp_s1_pgtable_pages() << PAGE_SHIFT;
67         int ret, i;
68
69         /* Recreate the hyp page-table using the early page allocator */
70         hyp_early_alloc_init(hyp_pgt_base, pgt_size);
71         ret = kvm_pgtable_hyp_init(&pkvm_pgtable, hyp_va_bits,
72                                    &hyp_early_alloc_mm_ops);
73         if (ret)
74                 return ret;
75
76         ret = hyp_create_idmap(hyp_va_bits);
77         if (ret)
78                 return ret;
79
80         ret = hyp_map_vectors();
81         if (ret)
82                 return ret;
83
84         ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
85         if (ret)
86                 return ret;
87
88         ret = pkvm_create_mappings(__hyp_text_start, __hyp_text_end, PAGE_HYP_EXEC);
89         if (ret)
90                 return ret;
91
92         ret = pkvm_create_mappings(__start_rodata, __end_rodata, PAGE_HYP_RO);
93         if (ret)
94                 return ret;
95
96         ret = pkvm_create_mappings(__hyp_rodata_start, __hyp_rodata_end, PAGE_HYP_RO);
97         if (ret)
98                 return ret;
99
100         ret = pkvm_create_mappings(__hyp_bss_start, __hyp_bss_end, PAGE_HYP);
101         if (ret)
102                 return ret;
103
104         ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, PAGE_HYP_RO);
105         if (ret)
106                 return ret;
107
108         ret = pkvm_create_mappings(virt, virt + size, PAGE_HYP);
109         if (ret)
110                 return ret;
111
112         for (i = 0; i < hyp_nr_cpus; i++) {
113                 start = (void *)kern_hyp_va(per_cpu_base[i]);
114                 end = start + PAGE_ALIGN(hyp_percpu_size);
115                 ret = pkvm_create_mappings(start, end, PAGE_HYP);
116                 if (ret)
117                         return ret;
118
119                 end = (void *)per_cpu_ptr(&kvm_init_params, i)->stack_hyp_va;
120                 start = end - PAGE_SIZE;
121                 ret = pkvm_create_mappings(start, end, PAGE_HYP);
122                 if (ret)
123                         return ret;
124         }
125
126         return 0;
127 }
128
129 static void update_nvhe_init_params(void)
130 {
131         struct kvm_nvhe_init_params *params;
132         unsigned long i;
133
134         for (i = 0; i < hyp_nr_cpus; i++) {
135                 params = per_cpu_ptr(&kvm_init_params, i);
136                 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd);
137                 __flush_dcache_area(params, sizeof(*params));
138         }
139 }
140
141 static void *hyp_zalloc_hyp_page(void *arg)
142 {
143         return hyp_alloc_pages(&hpool, 0);
144 }
145
146 void __noreturn __pkvm_init_finalise(void)
147 {
148         struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
149         struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt;
150         unsigned long nr_pages, reserved_pages, pfn;
151         int ret;
152
153         /* Now that the vmemmap is backed, install the full-fledged allocator */
154         pfn = hyp_virt_to_pfn(hyp_pgt_base);
155         nr_pages = hyp_s1_pgtable_pages();
156         reserved_pages = hyp_early_alloc_nr_used_pages();
157         ret = hyp_pool_init(&hpool, pfn, nr_pages, reserved_pages);
158         if (ret)
159                 goto out;
160
161         ret = kvm_host_prepare_stage2(host_s2_mem_pgt_base, host_s2_dev_pgt_base);
162         if (ret)
163                 goto out;
164
165         pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
166                 .zalloc_page = hyp_zalloc_hyp_page,
167                 .phys_to_virt = hyp_phys_to_virt,
168                 .virt_to_phys = hyp_virt_to_phys,
169                 .get_page = hyp_get_page,
170                 .put_page = hyp_put_page,
171         };
172         pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
173
174 out:
175         /*
176          * We tail-called to here from handle___pkvm_init() and will not return,
177          * so make sure to propagate the return value to the host.
178          */
179         cpu_reg(host_ctxt, 1) = ret;
180
181         __host_enter(host_ctxt);
182 }
183
184 int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
185                 unsigned long *per_cpu_base, u32 hyp_va_bits)
186 {
187         struct kvm_nvhe_init_params *params;
188         void *virt = hyp_phys_to_virt(phys);
189         void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
190         int ret;
191
192         if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
193                 return -EINVAL;
194
195         hyp_spin_lock_init(&pkvm_pgd_lock);
196         hyp_nr_cpus = nr_cpus;
197
198         ret = divide_memory_pool(virt, size);
199         if (ret)
200                 return ret;
201
202         ret = recreate_hyp_mappings(phys, size, per_cpu_base, hyp_va_bits);
203         if (ret)
204                 return ret;
205
206         update_nvhe_init_params();
207
208         /* Jump in the idmap page to switch to the new page-tables */
209         params = this_cpu_ptr(&kvm_init_params);
210         fn = (typeof(fn))__hyp_pa(__pkvm_init_switch_pgd);
211         fn(__hyp_pa(params), __pkvm_init_finalise);
212
213         unreachable();
214 }