1 // SPDX-License-Identifier: GPL-2.0
3 * Based upon linux/arch/m68k/mm/sun3mmu.c
4 * Based upon linux/arch/ppc/mm/mmu_context.c
6 * Implementations of mm routines specific to the Coldfire MMU.
8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/string.h>
16 #include <linux/memblock.h>
18 #include <asm/setup.h>
20 #include <asm/mmu_context.h>
21 #include <asm/mcf_pgalloc.h>
22 #include <asm/tlbflush.h>
23 #include <asm/pgalloc.h>
25 #define KMAPAREA(x) ((x >= VMALLOC_START) && (x < KMAP_END))
27 mm_context_t next_mmu_context;
28 unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
29 atomic_t nr_free_contexts;
30 struct mm_struct *context_mm[LAST_CONTEXT+1];
31 unsigned long num_pages;
34 * ColdFire paging_init derived from sun3.
36 void __init paging_init(void)
40 unsigned long address, size;
41 unsigned long next_pgtable, bootmem_end;
42 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
45 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
47 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
48 __func__, PAGE_SIZE, PAGE_SIZE);
50 pg_dir = swapper_pg_dir;
51 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
53 size = num_pages * sizeof(pte_t);
54 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
55 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
57 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58 __func__, size, PAGE_SIZE);
60 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
61 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
63 address = PAGE_OFFSET;
64 while (address < (unsigned long)high_memory) {
65 pg_table = (pte_t *) next_pgtable;
66 next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
67 pgd_val(*pg_dir) = (unsigned long) pg_table;
70 /* now change pg_table to kernel virtual addresses */
71 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
72 pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT);
73 if (address >= (unsigned long) high_memory)
76 set_pte(pg_table, pte);
82 max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
83 free_area_init(max_zone_pfn);
86 int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
88 unsigned long flags, mmuar, mmutr;
98 local_irq_save(flags);
100 mmuar = (dtlb) ? mmu_read(MMUAR) :
101 regs->pc + (extension_word * sizeof(long));
103 mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
107 pgd = pgd_offset(mm, mmuar);
111 p4d = p4d_offset(pgd, mmuar);
115 pud = pud_offset(p4d, mmuar);
119 pmd = pmd_offset(pud, mmuar);
123 pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
124 : pte_offset_map(pmd, mmuar);
125 if (!pte || pte_none(*pte) || !pte_present(*pte))
129 if (!pte_write(*pte))
131 set_pte(pte, pte_mkdirty(*pte));
134 set_pte(pte, pte_mkyoung(*pte));
135 asid = mm->context & 0xff;
136 if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
137 set_pte(pte, pte_wrprotect(*pte));
139 mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
140 if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
141 mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
142 mmu_write(MMUTR, mmutr);
144 mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
145 ((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
148 mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
150 mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
153 if (pte && !KMAPAREA(mmuar))
155 local_irq_restore(flags);
159 void __init cf_bootmem_alloc(void)
161 unsigned long memstart;
163 /* _rambase and _ramend will be naturally page aligned */
164 m68k_memory[0].addr = _rambase;
165 m68k_memory[0].size = _ramend - _rambase;
167 memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
170 /* compute total pages in system */
171 num_pages = PFN_DOWN(_ramend - _rambase);
174 memstart = PAGE_ALIGN(_ramstart);
175 min_low_pfn = PFN_DOWN(_rambase);
176 max_pfn = max_low_pfn = PFN_DOWN(_ramend);
177 high_memory = (void *)_ramend;
179 /* Reserve kernel text/data/bss */
180 memblock_reserve(_rambase, memstart - _rambase);
182 m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
183 module_fixup(NULL, __start_fixup, __stop_fixup);
185 /* setup node data */
190 * Initialize the context management stuff.
191 * The following was taken from arch/ppc/mmu_context.c
193 void __init cf_mmu_context_init(void)
196 * Some processors have too few contexts to reserve one for
197 * init_mm, and require using context 0 for a normal task.
198 * Other processors reserve the use of context zero for the kernel.
199 * This code assumes FIRST_CONTEXT < 32.
201 context_map[0] = (1 << FIRST_CONTEXT) - 1;
202 next_mmu_context = FIRST_CONTEXT;
203 atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
207 * Steal a context from a task that has one at the moment.
208 * This isn't an LRU system, it just frees up each context in
209 * turn (sort-of pseudo-random replacement :). This would be the
210 * place to implement an LRU scheme if anyone was motivated to do it.
213 void steal_context(void)
215 struct mm_struct *mm;
217 * free up context `next_mmu_context'
218 * if we shouldn't free context 0, don't...
220 if (next_mmu_context < FIRST_CONTEXT)
221 next_mmu_context = FIRST_CONTEXT;
222 mm = context_mm[next_mmu_context];
227 static const pgprot_t protection_map[16] = {
228 [VM_NONE] = PAGE_NONE,
229 [VM_READ] = __pgprot(CF_PAGE_VALID |
232 [VM_WRITE] = __pgprot(CF_PAGE_VALID |
235 [VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
239 [VM_EXEC] = __pgprot(CF_PAGE_VALID |
242 [VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID |
246 [VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID |
250 [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
255 [VM_SHARED] = PAGE_NONE,
256 [VM_SHARED | VM_READ] = __pgprot(CF_PAGE_VALID |
259 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
260 [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
264 [VM_SHARED | VM_EXEC] = __pgprot(CF_PAGE_VALID |
267 [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(CF_PAGE_VALID |
271 [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(CF_PAGE_VALID |
275 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(CF_PAGE_VALID |
281 DECLARE_VM_GET_PAGE_PROT