1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
10 #include <linux/sysctl.h>
11 #include <asm/mmu_context.h>
12 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
19 static int page_table_allocate_pgste_min = 0;
20 static int page_table_allocate_pgste_max = 1;
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
24 static struct ctl_table page_table_sysctl[] = {
26 .procname = "allocate_pgste",
27 .data = &page_table_allocate_pgste,
28 .maxlen = sizeof(int),
29 .mode = S_IRUGO | S_IWUSR,
30 .proc_handler = proc_dointvec,
31 .extra1 = &page_table_allocate_pgste_min,
32 .extra2 = &page_table_allocate_pgste_max,
37 static struct ctl_table page_table_sysctl_dir[] = {
42 .child = page_table_sysctl,
47 static int __init page_table_register_sysctl(void)
49 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
51 __initcall(page_table_register_sysctl);
53 #endif /* CONFIG_PGSTE */
55 unsigned long *crst_table_alloc(struct mm_struct *mm)
57 struct page *page = alloc_pages(GFP_KERNEL, 2);
61 arch_set_page_dat(page, 2);
62 return (unsigned long *) page_to_phys(page);
65 void crst_table_free(struct mm_struct *mm, unsigned long *table)
67 free_pages((unsigned long) table, 2);
70 static void __crst_table_upgrade(void *arg)
72 struct mm_struct *mm = arg;
74 if (current->active_mm == mm) {
81 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
83 unsigned long *table, *pgd;
86 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
87 VM_BUG_ON(mm->context.asce_limit < _REGION2_SIZE);
88 if (end >= TASK_SIZE_MAX)
92 while (mm->context.asce_limit < end) {
93 table = crst_table_alloc(mm);
98 spin_lock_bh(&mm->page_table_lock);
99 pgd = (unsigned long *) mm->pgd;
100 if (mm->context.asce_limit == _REGION2_SIZE) {
101 crst_table_init(table, _REGION2_ENTRY_EMPTY);
102 p4d_populate(mm, (p4d_t *) table, (pud_t *) pgd);
103 mm->pgd = (pgd_t *) table;
104 mm->context.asce_limit = _REGION1_SIZE;
105 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
106 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
108 crst_table_init(table, _REGION1_ENTRY_EMPTY);
109 pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
110 mm->pgd = (pgd_t *) table;
111 mm->context.asce_limit = -PAGE_SIZE;
112 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
113 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
116 spin_unlock_bh(&mm->page_table_lock);
119 on_each_cpu(__crst_table_upgrade, mm, 0);
123 void crst_table_downgrade(struct mm_struct *mm)
127 /* downgrade should only happen from 3 to 2 levels (compat only) */
128 VM_BUG_ON(mm->context.asce_limit != _REGION2_SIZE);
130 if (current->active_mm == mm) {
136 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
137 mm->context.asce_limit = _REGION3_SIZE;
138 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
139 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
140 crst_table_free(mm, (unsigned long *) pgd);
142 if (current->active_mm == mm)
146 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
148 unsigned int old, new;
151 old = atomic_read(v);
153 } while (atomic_cmpxchg(v, old, new) != old);
159 struct page *page_table_alloc_pgste(struct mm_struct *mm)
164 page = alloc_page(GFP_KERNEL);
166 table = (u64 *)page_to_phys(page);
167 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
168 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
173 void page_table_free_pgste(struct page *page)
178 #endif /* CONFIG_PGSTE */
181 * page table entry allocation/free routines.
183 unsigned long *page_table_alloc(struct mm_struct *mm)
185 unsigned long *table;
187 unsigned int mask, bit;
189 /* Try to get a fragment of a 4K page as a 2K page table */
190 if (!mm_alloc_pgste(mm)) {
192 spin_lock_bh(&mm->context.lock);
193 if (!list_empty(&mm->context.pgtable_list)) {
194 page = list_first_entry(&mm->context.pgtable_list,
196 mask = atomic_read(&page->_mapcount);
197 mask = (mask | (mask >> 4)) & 3;
199 table = (unsigned long *) page_to_phys(page);
200 bit = mask & 1; /* =1 -> second 2K */
202 table += PTRS_PER_PTE;
203 atomic_xor_bits(&page->_mapcount, 1U << bit);
204 list_del(&page->lru);
207 spin_unlock_bh(&mm->context.lock);
211 /* Allocate a fresh page */
212 page = alloc_page(GFP_KERNEL);
215 if (!pgtable_page_ctor(page)) {
219 arch_set_page_dat(page, 0);
220 /* Initialize page table */
221 table = (unsigned long *) page_to_phys(page);
222 if (mm_alloc_pgste(mm)) {
223 /* Return 4K page table with PGSTEs */
224 atomic_set(&page->_mapcount, 3);
225 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
226 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
228 /* Return the first 2K fragment of the page */
229 atomic_set(&page->_mapcount, 1);
230 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
231 spin_lock_bh(&mm->context.lock);
232 list_add(&page->lru, &mm->context.pgtable_list);
233 spin_unlock_bh(&mm->context.lock);
238 void page_table_free(struct mm_struct *mm, unsigned long *table)
241 unsigned int bit, mask;
243 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
244 if (!mm_alloc_pgste(mm)) {
245 /* Free 2K page table fragment of a 4K page */
246 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
247 spin_lock_bh(&mm->context.lock);
248 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
250 list_add(&page->lru, &mm->context.pgtable_list);
252 list_del(&page->lru);
253 spin_unlock_bh(&mm->context.lock);
258 pgtable_page_dtor(page);
259 atomic_set(&page->_mapcount, -1);
263 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
264 unsigned long vmaddr)
266 struct mm_struct *mm;
268 unsigned int bit, mask;
271 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
272 if (mm_alloc_pgste(mm)) {
273 gmap_unlink(mm, table, vmaddr);
274 table = (unsigned long *) (__pa(table) | 3);
275 tlb_remove_table(tlb, table);
278 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
279 spin_lock_bh(&mm->context.lock);
280 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
282 list_add_tail(&page->lru, &mm->context.pgtable_list);
284 list_del(&page->lru);
285 spin_unlock_bh(&mm->context.lock);
286 table = (unsigned long *) (__pa(table) | (1U << bit));
287 tlb_remove_table(tlb, table);
290 static void __tlb_remove_table(void *_table)
292 unsigned int mask = (unsigned long) _table & 3;
293 void *table = (void *)((unsigned long) _table ^ mask);
294 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
297 case 0: /* pmd, pud, or p4d */
298 free_pages((unsigned long) table, 2);
300 case 1: /* lower 2K of a 4K page table */
301 case 2: /* higher 2K of a 4K page table */
302 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
305 case 3: /* 4K page table with pgstes */
306 pgtable_page_dtor(page);
307 atomic_set(&page->_mapcount, -1);
313 static void tlb_remove_table_smp_sync(void *arg)
315 /* Simply deliver the interrupt */
318 static void tlb_remove_table_one(void *table)
321 * This isn't an RCU grace period and hence the page-tables cannot be
322 * assumed to be actually RCU-freed.
324 * It is however sufficient for software page-table walkers that rely
325 * on IRQ disabling. See the comment near struct mmu_table_batch.
327 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
328 __tlb_remove_table(table);
331 static void tlb_remove_table_rcu(struct rcu_head *head)
333 struct mmu_table_batch *batch;
336 batch = container_of(head, struct mmu_table_batch, rcu);
338 for (i = 0; i < batch->nr; i++)
339 __tlb_remove_table(batch->tables[i]);
341 free_page((unsigned long)batch);
344 void tlb_table_flush(struct mmu_gather *tlb)
346 struct mmu_table_batch **batch = &tlb->batch;
349 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
354 void tlb_remove_table(struct mmu_gather *tlb, void *table)
356 struct mmu_table_batch **batch = &tlb->batch;
358 tlb->mm->context.flush_mm = 1;
359 if (*batch == NULL) {
360 *batch = (struct mmu_table_batch *)
361 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
362 if (*batch == NULL) {
363 __tlb_flush_mm_lazy(tlb->mm);
364 tlb_remove_table_one(table);
369 (*batch)->tables[(*batch)->nr++] = table;
370 if ((*batch)->nr == MAX_TABLE_BATCH)