1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 #include <asm/export.h>
7 #include <asm/loongarch.h>
9 #include <asm/pgtable.h>
10 #include <asm/regdef.h>
11 #include <asm/stackframe.h>
13 #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
14 #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
15 #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
16 #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
18 .macro tlb_do_page_fault, write
19 SYM_FUNC_START(tlb_do_page_fault_\write)
21 csrrd a2, LOONGARCH_CSR_BADV
23 REG_S a2, sp, PT_BVADDR
25 la.abs t0, do_page_fault
28 SYM_FUNC_END(tlb_do_page_fault_\write)
34 SYM_FUNC_START(handle_tlb_protect)
39 csrrd a2, LOONGARCH_CSR_BADV
40 REG_S a2, sp, PT_BVADDR
41 la.abs t0, do_page_fault
44 SYM_FUNC_END(handle_tlb_protect)
46 SYM_FUNC_START(handle_tlb_load)
47 csrwr t0, EXCEPTION_KS0
48 csrwr t1, EXCEPTION_KS1
49 csrwr ra, EXCEPTION_KS2
52 * The vmalloc handling is not in the hotpath.
54 csrrd t0, LOONGARCH_CSR_BADV
56 csrrd t1, LOONGARCH_CSR_PGDL
59 /* Get PGD offset in bytes */
60 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
62 #if CONFIG_PGTABLE_LEVELS > 3
64 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
67 #if CONFIG_PGTABLE_LEVELS > 2
69 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
75 * For huge tlb entries, pmde doesn't contain an address but
76 * instead contains the tlb pte. Check the PAGE_HUGE bit and
77 * see if we need to jump to huge tlb processing.
79 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
80 bltz ra, tlb_huge_update_load
82 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
83 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
84 alsl.d t1, t0, ra, _PTE_T_LOG2
87 smp_pgtable_change_load:
92 andi ra, t0, _PAGE_PRESENT
93 beqz ra, nopage_tlb_load
95 ori t0, t0, _PAGE_VALID
98 beqz t0, smp_pgtable_change_load
103 bstrins.d t1, zero, 3, 3
106 csrwr t0, LOONGARCH_CSR_TLBELO0
107 csrwr t1, LOONGARCH_CSR_TLBELO1
110 csrrd t0, EXCEPTION_KS0
111 csrrd t1, EXCEPTION_KS1
112 csrrd ra, EXCEPTION_KS2
117 la.abs t1, swapper_pg_dir
121 /* This is the entry point of a huge page. */
122 tlb_huge_update_load:
126 andi t0, ra, _PAGE_PRESENT
127 beqz t0, nopage_tlb_load
130 ori t0, ra, _PAGE_VALID
132 beqz t0, tlb_huge_update_load
133 ori t0, ra, _PAGE_VALID
135 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
136 ori t0, ra, _PAGE_VALID
140 addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
142 csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
145 csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
148 * A huge PTE describes an area the size of the
149 * configured huge page size. This is twice the
150 * of the large TLB entry size we intend to use.
151 * A TLB entry half the size of the configured
152 * huge page size is configured into entrylo0
153 * and entrylo1 to cover the contiguous huge PTE
156 /* Huge page: Move Global bit */
157 xori t0, t0, _PAGE_HUGE
158 lu12i.w t1, _PAGE_HGLOBAL >> 12
160 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
164 csrwr ra, LOONGARCH_CSR_TLBELO0
166 /* Convert to entrylo1 */
168 slli.d t1, t1, (HPAGE_SHIFT - 1)
170 csrwr t0, LOONGARCH_CSR_TLBELO1
172 /* Set huge page tlb entry size */
173 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
174 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
175 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
179 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
180 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
181 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
183 csrrd t0, EXCEPTION_KS0
184 csrrd t1, EXCEPTION_KS1
185 csrrd ra, EXCEPTION_KS2
190 csrrd ra, EXCEPTION_KS2
191 la.abs t0, tlb_do_page_fault_0
193 SYM_FUNC_END(handle_tlb_load)
195 SYM_FUNC_START(handle_tlb_store)
196 csrwr t0, EXCEPTION_KS0
197 csrwr t1, EXCEPTION_KS1
198 csrwr ra, EXCEPTION_KS2
201 * The vmalloc handling is not in the hotpath.
203 csrrd t0, LOONGARCH_CSR_BADV
204 bltz t0, vmalloc_store
205 csrrd t1, LOONGARCH_CSR_PGDL
208 /* Get PGD offset in bytes */
209 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
211 #if CONFIG_PGTABLE_LEVELS > 3
213 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
216 #if CONFIG_PGTABLE_LEVELS > 2
218 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
224 * For huge tlb entries, pmde doesn't contain an address but
225 * instead contains the tlb pte. Check the PAGE_HUGE bit and
226 * see if we need to jump to huge tlb processing.
228 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
229 bltz ra, tlb_huge_update_store
231 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
232 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
233 alsl.d t1, t0, ra, _PTE_T_LOG2
236 smp_pgtable_change_store:
241 andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
242 xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
243 bnez ra, nopage_tlb_store
245 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
248 beqz t0, smp_pgtable_change_store
253 bstrins.d t1, zero, 3, 3
256 csrwr t0, LOONGARCH_CSR_TLBELO0
257 csrwr t1, LOONGARCH_CSR_TLBELO1
260 csrrd t0, EXCEPTION_KS0
261 csrrd t1, EXCEPTION_KS1
262 csrrd ra, EXCEPTION_KS2
267 la.abs t1, swapper_pg_dir
271 /* This is the entry point of a huge page. */
272 tlb_huge_update_store:
276 andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
277 xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
278 bnez t0, nopage_tlb_store
281 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
283 beqz t0, tlb_huge_update_store
284 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
286 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
287 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
291 addu16i.d t1, zero, -(CSR_TLBIDX_EHINV >> 16)
293 csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
296 csrxchg zero, t1, LOONGARCH_CSR_TLBIDX
298 * A huge PTE describes an area the size of the
299 * configured huge page size. This is twice the
300 * of the large TLB entry size we intend to use.
301 * A TLB entry half the size of the configured
302 * huge page size is configured into entrylo0
303 * and entrylo1 to cover the contiguous huge PTE
306 /* Huge page: Move Global bit */
307 xori t0, t0, _PAGE_HUGE
308 lu12i.w t1, _PAGE_HGLOBAL >> 12
310 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
314 csrwr ra, LOONGARCH_CSR_TLBELO0
316 /* Convert to entrylo1 */
318 slli.d t1, t1, (HPAGE_SHIFT - 1)
320 csrwr t0, LOONGARCH_CSR_TLBELO1
322 /* Set huge page tlb entry size */
323 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
324 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
325 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
329 /* Reset default page size */
330 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
331 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
332 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
334 csrrd t0, EXCEPTION_KS0
335 csrrd t1, EXCEPTION_KS1
336 csrrd ra, EXCEPTION_KS2
341 csrrd ra, EXCEPTION_KS2
342 la.abs t0, tlb_do_page_fault_1
344 SYM_FUNC_END(handle_tlb_store)
346 SYM_FUNC_START(handle_tlb_modify)
347 csrwr t0, EXCEPTION_KS0
348 csrwr t1, EXCEPTION_KS1
349 csrwr ra, EXCEPTION_KS2
352 * The vmalloc handling is not in the hotpath.
354 csrrd t0, LOONGARCH_CSR_BADV
355 bltz t0, vmalloc_modify
356 csrrd t1, LOONGARCH_CSR_PGDL
359 /* Get PGD offset in bytes */
360 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
362 #if CONFIG_PGTABLE_LEVELS > 3
364 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
367 #if CONFIG_PGTABLE_LEVELS > 2
369 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
375 * For huge tlb entries, pmde doesn't contain an address but
376 * instead contains the tlb pte. Check the PAGE_HUGE bit and
377 * see if we need to jump to huge tlb processing.
379 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
380 bltz ra, tlb_huge_update_modify
382 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
383 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
384 alsl.d t1, t0, ra, _PTE_T_LOG2
387 smp_pgtable_change_modify:
392 andi ra, t0, _PAGE_WRITE
393 beqz ra, nopage_tlb_modify
395 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
398 beqz t0, smp_pgtable_change_modify
403 bstrins.d t1, zero, 3, 3
406 csrwr t0, LOONGARCH_CSR_TLBELO0
407 csrwr t1, LOONGARCH_CSR_TLBELO1
410 csrrd t0, EXCEPTION_KS0
411 csrrd t1, EXCEPTION_KS1
412 csrrd ra, EXCEPTION_KS2
417 la.abs t1, swapper_pg_dir
418 b vmalloc_done_modify
421 /* This is the entry point of a huge page. */
422 tlb_huge_update_modify:
426 andi t0, ra, _PAGE_WRITE
427 beqz t0, nopage_tlb_modify
430 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
432 beqz t0, tlb_huge_update_modify
433 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
435 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
436 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
440 * A huge PTE describes an area the size of the
441 * configured huge page size. This is twice the
442 * of the large TLB entry size we intend to use.
443 * A TLB entry half the size of the configured
444 * huge page size is configured into entrylo0
445 * and entrylo1 to cover the contiguous huge PTE
448 /* Huge page: Move Global bit */
449 xori t0, t0, _PAGE_HUGE
450 lu12i.w t1, _PAGE_HGLOBAL >> 12
452 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
456 csrwr ra, LOONGARCH_CSR_TLBELO0
458 /* Convert to entrylo1 */
460 slli.d t1, t1, (HPAGE_SHIFT - 1)
462 csrwr t0, LOONGARCH_CSR_TLBELO1
464 /* Set huge page tlb entry size */
465 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
466 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
467 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
471 /* Reset default page size */
472 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
473 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
474 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
476 csrrd t0, EXCEPTION_KS0
477 csrrd t1, EXCEPTION_KS1
478 csrrd ra, EXCEPTION_KS2
483 csrrd ra, EXCEPTION_KS2
484 la.abs t0, tlb_do_page_fault_1
486 SYM_FUNC_END(handle_tlb_modify)
488 SYM_FUNC_START(handle_tlb_refill)
489 csrwr t0, LOONGARCH_CSR_TLBRSAVE
490 csrrd t0, LOONGARCH_CSR_PGD
492 #if CONFIG_PGTABLE_LEVELS > 3
495 #if CONFIG_PGTABLE_LEVELS > 2
501 csrrd t0, LOONGARCH_CSR_TLBRSAVE
503 SYM_FUNC_END(handle_tlb_refill)