Merge tag 'imx-dt64-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[sfrench/cifs-2.6.git] / arch / s390 / mm / pgalloc.c
1 /*
2  *  Page table allocation functions
3  *
4  *    Copyright IBM Corp. 2016
5  *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6  */
7
8 #include <linux/mm.h>
9 #include <linux/sysctl.h>
10 #include <asm/mmu_context.h>
11 #include <asm/pgalloc.h>
12 #include <asm/gmap.h>
13 #include <asm/tlb.h>
14 #include <asm/tlbflush.h>
15
16 #ifdef CONFIG_PGSTE
17
18 static int page_table_allocate_pgste_min = 0;
19 static int page_table_allocate_pgste_max = 1;
20 int page_table_allocate_pgste = 0;
21 EXPORT_SYMBOL(page_table_allocate_pgste);
22
23 static struct ctl_table page_table_sysctl[] = {
24         {
25                 .procname       = "allocate_pgste",
26                 .data           = &page_table_allocate_pgste,
27                 .maxlen         = sizeof(int),
28                 .mode           = S_IRUGO | S_IWUSR,
29                 .proc_handler   = proc_dointvec,
30                 .extra1         = &page_table_allocate_pgste_min,
31                 .extra2         = &page_table_allocate_pgste_max,
32         },
33         { }
34 };
35
36 static struct ctl_table page_table_sysctl_dir[] = {
37         {
38                 .procname       = "vm",
39                 .maxlen         = 0,
40                 .mode           = 0555,
41                 .child          = page_table_sysctl,
42         },
43         { }
44 };
45
46 static int __init page_table_register_sysctl(void)
47 {
48         return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49 }
50 __initcall(page_table_register_sysctl);
51
52 #endif /* CONFIG_PGSTE */
53
54 unsigned long *crst_table_alloc(struct mm_struct *mm)
55 {
56         struct page *page = alloc_pages(GFP_KERNEL, 2);
57
58         if (!page)
59                 return NULL;
60         return (unsigned long *) page_to_phys(page);
61 }
62
63 void crst_table_free(struct mm_struct *mm, unsigned long *table)
64 {
65         free_pages((unsigned long) table, 2);
66 }
67
68 static void __crst_table_upgrade(void *arg)
69 {
70         struct mm_struct *mm = arg;
71
72         if (current->active_mm == mm) {
73                 clear_user_asce();
74                 set_user_asce(mm);
75         }
76         __tlb_flush_local();
77 }
78
79 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
80 {
81         unsigned long *table, *pgd;
82         unsigned long entry;
83         int flush;
84
85         BUG_ON(limit > TASK_MAX_SIZE);
86         flush = 0;
87 repeat:
88         table = crst_table_alloc(mm);
89         if (!table)
90                 return -ENOMEM;
91         spin_lock_bh(&mm->page_table_lock);
92         if (mm->context.asce_limit < limit) {
93                 pgd = (unsigned long *) mm->pgd;
94                 if (mm->context.asce_limit <= (1UL << 31)) {
95                         entry = _REGION3_ENTRY_EMPTY;
96                         mm->context.asce_limit = 1UL << 42;
97                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
98                                                 _ASCE_USER_BITS |
99                                                 _ASCE_TYPE_REGION3;
100                 } else {
101                         entry = _REGION2_ENTRY_EMPTY;
102                         mm->context.asce_limit = 1UL << 53;
103                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
104                                                 _ASCE_USER_BITS |
105                                                 _ASCE_TYPE_REGION2;
106                 }
107                 crst_table_init(table, entry);
108                 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
109                 mm->pgd = (pgd_t *) table;
110                 mm->task_size = mm->context.asce_limit;
111                 table = NULL;
112                 flush = 1;
113         }
114         spin_unlock_bh(&mm->page_table_lock);
115         if (table)
116                 crst_table_free(mm, table);
117         if (mm->context.asce_limit < limit)
118                 goto repeat;
119         if (flush)
120                 on_each_cpu(__crst_table_upgrade, mm, 0);
121         return 0;
122 }
123
124 void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
125 {
126         pgd_t *pgd;
127
128         if (current->active_mm == mm) {
129                 clear_user_asce();
130                 __tlb_flush_mm(mm);
131         }
132         while (mm->context.asce_limit > limit) {
133                 pgd = mm->pgd;
134                 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
135                 case _REGION_ENTRY_TYPE_R2:
136                         mm->context.asce_limit = 1UL << 42;
137                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
138                                                 _ASCE_USER_BITS |
139                                                 _ASCE_TYPE_REGION3;
140                         break;
141                 case _REGION_ENTRY_TYPE_R3:
142                         mm->context.asce_limit = 1UL << 31;
143                         mm->context.asce_bits = _ASCE_TABLE_LENGTH |
144                                                 _ASCE_USER_BITS |
145                                                 _ASCE_TYPE_SEGMENT;
146                         break;
147                 default:
148                         BUG();
149                 }
150                 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
151                 mm->task_size = mm->context.asce_limit;
152                 crst_table_free(mm, (unsigned long *) pgd);
153         }
154         if (current->active_mm == mm)
155                 set_user_asce(mm);
156 }
157
158 static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
159 {
160         unsigned int old, new;
161
162         do {
163                 old = atomic_read(v);
164                 new = old ^ bits;
165         } while (atomic_cmpxchg(v, old, new) != old);
166         return new;
167 }
168
169 /*
170  * page table entry allocation/free routines.
171  */
172 unsigned long *page_table_alloc(struct mm_struct *mm)
173 {
174         unsigned long *table;
175         struct page *page;
176         unsigned int mask, bit;
177
178         /* Try to get a fragment of a 4K page as a 2K page table */
179         if (!mm_alloc_pgste(mm)) {
180                 table = NULL;
181                 spin_lock_bh(&mm->context.list_lock);
182                 if (!list_empty(&mm->context.pgtable_list)) {
183                         page = list_first_entry(&mm->context.pgtable_list,
184                                                 struct page, lru);
185                         mask = atomic_read(&page->_mapcount);
186                         mask = (mask | (mask >> 4)) & 3;
187                         if (mask != 3) {
188                                 table = (unsigned long *) page_to_phys(page);
189                                 bit = mask & 1;         /* =1 -> second 2K */
190                                 if (bit)
191                                         table += PTRS_PER_PTE;
192                                 atomic_xor_bits(&page->_mapcount, 1U << bit);
193                                 list_del(&page->lru);
194                         }
195                 }
196                 spin_unlock_bh(&mm->context.list_lock);
197                 if (table)
198                         return table;
199         }
200         /* Allocate a fresh page */
201         page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
202         if (!page)
203                 return NULL;
204         if (!pgtable_page_ctor(page)) {
205                 __free_page(page);
206                 return NULL;
207         }
208         /* Initialize page table */
209         table = (unsigned long *) page_to_phys(page);
210         if (mm_alloc_pgste(mm)) {
211                 /* Return 4K page table with PGSTEs */
212                 atomic_set(&page->_mapcount, 3);
213                 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
214                 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
215         } else {
216                 /* Return the first 2K fragment of the page */
217                 atomic_set(&page->_mapcount, 1);
218                 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
219                 spin_lock_bh(&mm->context.list_lock);
220                 list_add(&page->lru, &mm->context.pgtable_list);
221                 spin_unlock_bh(&mm->context.list_lock);
222         }
223         return table;
224 }
225
226 void page_table_free(struct mm_struct *mm, unsigned long *table)
227 {
228         struct page *page;
229         unsigned int bit, mask;
230
231         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
232         if (!mm_alloc_pgste(mm)) {
233                 /* Free 2K page table fragment of a 4K page */
234                 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
235                 spin_lock_bh(&mm->context.list_lock);
236                 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
237                 if (mask & 3)
238                         list_add(&page->lru, &mm->context.pgtable_list);
239                 else
240                         list_del(&page->lru);
241                 spin_unlock_bh(&mm->context.list_lock);
242                 if (mask != 0)
243                         return;
244         }
245
246         pgtable_page_dtor(page);
247         atomic_set(&page->_mapcount, -1);
248         __free_page(page);
249 }
250
251 void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
252                          unsigned long vmaddr)
253 {
254         struct mm_struct *mm;
255         struct page *page;
256         unsigned int bit, mask;
257
258         mm = tlb->mm;
259         page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
260         if (mm_alloc_pgste(mm)) {
261                 gmap_unlink(mm, table, vmaddr);
262                 table = (unsigned long *) (__pa(table) | 3);
263                 tlb_remove_table(tlb, table);
264                 return;
265         }
266         bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
267         spin_lock_bh(&mm->context.list_lock);
268         mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
269         if (mask & 3)
270                 list_add_tail(&page->lru, &mm->context.pgtable_list);
271         else
272                 list_del(&page->lru);
273         spin_unlock_bh(&mm->context.list_lock);
274         table = (unsigned long *) (__pa(table) | (1U << bit));
275         tlb_remove_table(tlb, table);
276 }
277
278 static void __tlb_remove_table(void *_table)
279 {
280         unsigned int mask = (unsigned long) _table & 3;
281         void *table = (void *)((unsigned long) _table ^ mask);
282         struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
283
284         switch (mask) {
285         case 0:         /* pmd or pud */
286                 free_pages((unsigned long) table, 2);
287                 break;
288         case 1:         /* lower 2K of a 4K page table */
289         case 2:         /* higher 2K of a 4K page table */
290                 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
291                         break;
292                 /* fallthrough */
293         case 3:         /* 4K page table with pgstes */
294                 pgtable_page_dtor(page);
295                 atomic_set(&page->_mapcount, -1);
296                 __free_page(page);
297                 break;
298         }
299 }
300
301 static void tlb_remove_table_smp_sync(void *arg)
302 {
303         /* Simply deliver the interrupt */
304 }
305
306 static void tlb_remove_table_one(void *table)
307 {
308         /*
309          * This isn't an RCU grace period and hence the page-tables cannot be
310          * assumed to be actually RCU-freed.
311          *
312          * It is however sufficient for software page-table walkers that rely
313          * on IRQ disabling. See the comment near struct mmu_table_batch.
314          */
315         smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
316         __tlb_remove_table(table);
317 }
318
319 static void tlb_remove_table_rcu(struct rcu_head *head)
320 {
321         struct mmu_table_batch *batch;
322         int i;
323
324         batch = container_of(head, struct mmu_table_batch, rcu);
325
326         for (i = 0; i < batch->nr; i++)
327                 __tlb_remove_table(batch->tables[i]);
328
329         free_page((unsigned long)batch);
330 }
331
332 void tlb_table_flush(struct mmu_gather *tlb)
333 {
334         struct mmu_table_batch **batch = &tlb->batch;
335
336         if (*batch) {
337                 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
338                 *batch = NULL;
339         }
340 }
341
342 void tlb_remove_table(struct mmu_gather *tlb, void *table)
343 {
344         struct mmu_table_batch **batch = &tlb->batch;
345
346         tlb->mm->context.flush_mm = 1;
347         if (*batch == NULL) {
348                 *batch = (struct mmu_table_batch *)
349                         __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
350                 if (*batch == NULL) {
351                         __tlb_flush_mm_lazy(tlb->mm);
352                         tlb_remove_table_one(table);
353                         return;
354                 }
355                 (*batch)->nr = 0;
356         }
357         (*batch)->tables[(*batch)->nr++] = table;
358         if ((*batch)->nr == MAX_TABLE_BATCH)
359                 tlb_flush_mmu(tlb);
360 }