1 #ifndef __ALPHA_MMU_CONTEXT_H
2 #define __ALPHA_MMU_CONTEXT_H
5 * get a new mmu context..
7 * Copyright (C) 1996, Linus Torvalds
10 #include <linux/mm_types.h>
11 #include <linux/sched.h>
13 #include <asm/machvec.h>
14 #include <asm/compiler.h>
15 #include <asm-generic/mm_hooks.h>
18 * Force a context reload. This is needed when we change the page
19 * table pointer or when we update the ASN of the current process.
22 /* Don't get into trouble with dueling __EXTERN_INLINEs. */
23 #ifndef __EXTERN_INLINE
28 static inline unsigned long
29 __reload_thread(struct pcb_struct *pcb)
31 register unsigned long a0 __asm__("$16");
32 register unsigned long v0 __asm__("$0");
34 a0 = virt_to_phys(pcb);
36 "call_pal %2 #__reload_thread"
38 : "i"(PAL_swpctx), "r"(a0)
39 : "$1", "$22", "$23", "$24", "$25");
46 * The maximum ASN's the processor supports. On the EV4 this is 63
47 * but the PAL-code doesn't actually use this information. On the
48 * EV5 this is 127, and EV6 has 255.
50 * On the EV4, the ASNs are more-or-less useless anyway, as they are
51 * only used as an icache tag, not for TB entries. On the EV5 and EV6,
52 * ASN's also validate the TB entries, and thus make a lot more sense.
54 * The EV4 ASN's don't even match the architecture manual, ugh. And
55 * I quote: "If a processor implements address space numbers (ASNs),
56 * and the old PTE has the Address Space Match (ASM) bit clear (ASNs
57 * in use) and the Valid bit set, then entries can also effectively be
58 * made coherent by assigning a new, unused ASN to the currently
59 * running process and not reusing the previous ASN before calling the
60 * appropriate PALcode routine to invalidate the translation buffer (TB)".
62 * In short, the EV4 has a "kind of" ASN capability, but it doesn't actually
63 * work correctly and can thus not be used (explaining the lack of PAL-code
66 #define EV4_MAX_ASN 63
67 #define EV5_MAX_ASN 127
68 #define EV6_MAX_ASN 255
70 #ifdef CONFIG_ALPHA_GENERIC
71 # define MAX_ASN (alpha_mv.max_asn)
73 # ifdef CONFIG_ALPHA_EV4
74 # define MAX_ASN EV4_MAX_ASN
75 # elif defined(CONFIG_ALPHA_EV5)
76 # define MAX_ASN EV5_MAX_ASN
78 # define MAX_ASN EV6_MAX_ASN
83 * cpu_last_asn(processor):
85 * +-------------+----------------+--------------+
86 * | asn version | this processor | hardware asn |
87 * +-------------+----------------+--------------+
92 #define cpu_last_asn(cpuid) (cpu_data[cpuid].last_asn)
94 extern unsigned long last_asn;
95 #define cpu_last_asn(cpuid) last_asn
96 #endif /* CONFIG_SMP */
98 #define WIDTH_HARDWARE_ASN 8
99 #define ASN_FIRST_VERSION (1UL << WIDTH_HARDWARE_ASN)
100 #define HARDWARE_ASN_MASK ((1UL << WIDTH_HARDWARE_ASN) - 1)
103 * NOTE! The way this is set up, the high bits of the "asn_cache" (and
104 * the "mm->context") are the ASN _version_ code. A version of 0 is
105 * always considered invalid, so to invalidate another process you only
106 * need to do "p->mm->context = 0".
108 * If we need more ASN's than the processor has, we invalidate the old
109 * user TLB's (tbiap()) and start a new ASN version. That will automatically
110 * force a new asn for any other processes the next time they want to
114 #ifndef __EXTERN_INLINE
115 #define __EXTERN_INLINE extern inline
116 #define __MMU_EXTERN_INLINE
119 extern inline unsigned long
120 __get_new_mm_context(struct mm_struct *mm, long cpu)
122 unsigned long asn = cpu_last_asn(cpu);
123 unsigned long next = asn + 1;
125 if ((asn & HARDWARE_ASN_MASK) >= MAX_ASN) {
128 next = (asn & ~HARDWARE_ASN_MASK) + ASN_FIRST_VERSION;
130 cpu_last_asn(cpu) = next;
135 ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
136 struct task_struct *next)
138 /* Check if our ASN is of an older version, and thus invalid. */
141 long cpu = smp_processor_id();
144 cpu_data[cpu].asn_lock = 1;
147 asn = cpu_last_asn(cpu);
148 mmc = next_mm->context[cpu];
149 if ((mmc ^ asn) & ~HARDWARE_ASN_MASK) {
150 mmc = __get_new_mm_context(next_mm, cpu);
151 next_mm->context[cpu] = mmc;
155 cpu_data[cpu].need_new_asn = 1;
158 /* Always update the PCB ASN. Another thread may have allocated
159 a new mm->context (via flush_tlb_mm) without the ASN serial
160 number wrapping. We have no way to detect when this is needed. */
161 task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
165 ev4_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
166 struct task_struct *next)
168 /* As described, ASN's are broken for TLB usage. But we can
169 optimize for switching between threads -- if the mm is
170 unchanged from current we needn't flush. */
171 /* ??? May not be needed because EV4 PALcode recognizes that
172 ASN's are broken and does a tbiap itself on swpctx, under
173 the "Must set ASN or flush" rule. At least this is true
174 for a 1992 SRM, reports Joseph Martin (jmartin@hlo.dec.com).
175 I'm going to leave this here anyway, just to Be Sure. -- r~ */
176 if (prev_mm != next_mm)
179 /* Do continue to allocate ASNs, because we can still use them
180 to avoid flushing the icache. */
181 ev5_switch_mm(prev_mm, next_mm, next);
184 extern void __load_new_mm_context(struct mm_struct *);
187 #define check_mmu_context() \
189 int cpu = smp_processor_id(); \
190 cpu_data[cpu].asn_lock = 0; \
192 if (cpu_data[cpu].need_new_asn) { \
193 struct mm_struct * mm = current->active_mm; \
194 cpu_data[cpu].need_new_asn = 0; \
195 if (!mm->context[cpu]) \
196 __load_new_mm_context(mm); \
200 #define check_mmu_context() do { } while(0)
204 ev5_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
206 __load_new_mm_context(next_mm);
210 ev4_activate_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm)
212 __load_new_mm_context(next_mm);
216 #define deactivate_mm(tsk,mm) do { } while (0)
218 #ifdef CONFIG_ALPHA_GENERIC
219 # define switch_mm(a,b,c) alpha_mv.mv_switch_mm((a),(b),(c))
220 # define activate_mm(x,y) alpha_mv.mv_activate_mm((x),(y))
222 # ifdef CONFIG_ALPHA_EV4
223 # define switch_mm(a,b,c) ev4_switch_mm((a),(b),(c))
224 # define activate_mm(x,y) ev4_activate_mm((x),(y))
226 # define switch_mm(a,b,c) ev5_switch_mm((a),(b),(c))
227 # define activate_mm(x,y) ev5_activate_mm((x),(y))
232 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
236 for_each_online_cpu(i)
239 task_thread_info(tsk)->pcb.ptbr
240 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
245 destroy_context(struct mm_struct *mm)
251 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
253 task_thread_info(tsk)->pcb.ptbr
254 = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
257 #ifdef __MMU_EXTERN_INLINE
258 #undef __EXTERN_INLINE
259 #undef __MMU_EXTERN_INLINE
262 #endif /* __ALPHA_MMU_CONTEXT_H */