Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6
[sfrench/cifs-2.6.git] / arch / mips / mm / tlb-r3k.c
1 /*
2  * r2300.c: R2000 and R3000 specific mmu/cache code.
3  *
4  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5  *
6  * with a lot of changes to make this thing work for R3000s
7  * Tx39XX R4k style caches added. HK
8  * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9  * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10  * Copyright (C) 2002  Ralf Baechle
11  * Copyright (C) 2002  Maciej W. Rozycki
12  */
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/system.h>
22 #include <asm/isadep.h>
23 #include <asm/io.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cpu.h>
26
27 #undef DEBUG_TLB
28
29 extern void build_tlb_refill_handler(void);
30
31 /* CP0 hazard avoidance. */
32 #define BARRIER                         \
33         __asm__ __volatile__(           \
34                 ".set   push\n\t"       \
35                 ".set   noreorder\n\t"  \
36                 "nop\n\t"               \
37                 ".set   pop\n\t")
38
39 int r3k_have_wired_reg;         /* should be in cpu_data? */
40
41 /* TLB operations. */
42 void local_flush_tlb_all(void)
43 {
44         unsigned long flags;
45         unsigned long old_ctx;
46         int entry;
47
48 #ifdef DEBUG_TLB
49         printk("[tlball]");
50 #endif
51
52         local_irq_save(flags);
53         old_ctx = read_c0_entryhi() & ASID_MASK;
54         write_c0_entrylo0(0);
55         entry = r3k_have_wired_reg ? read_c0_wired() : 8;
56         for (; entry < current_cpu_data.tlbsize; entry++) {
57                 write_c0_index(entry << 8);
58                 write_c0_entryhi((entry | 0x80000) << 12);
59                 BARRIER;
60                 tlb_write_indexed();
61         }
62         write_c0_entryhi(old_ctx);
63         local_irq_restore(flags);
64 }
65
66 void local_flush_tlb_mm(struct mm_struct *mm)
67 {
68         int cpu = smp_processor_id();
69
70         if (cpu_context(cpu, mm) != 0) {
71 #ifdef DEBUG_TLB
72                 printk("[tlbmm<%lu>]", (unsigned long)cpu_context(cpu, mm));
73 #endif
74                 drop_mmu_context(mm, cpu);
75         }
76 }
77
78 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
79                            unsigned long end)
80 {
81         struct mm_struct *mm = vma->vm_mm;
82         int cpu = smp_processor_id();
83
84         if (cpu_context(cpu, mm) != 0) {
85                 unsigned long size, flags;
86
87 #ifdef DEBUG_TLB
88                 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
89                         cpu_context(cpu, mm) & ASID_MASK, start, end);
90 #endif
91                 local_irq_save(flags);
92                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
93                 if (size <= current_cpu_data.tlbsize) {
94                         int oldpid = read_c0_entryhi() & ASID_MASK;
95                         int newpid = cpu_context(cpu, mm) & ASID_MASK;
96
97                         start &= PAGE_MASK;
98                         end += PAGE_SIZE - 1;
99                         end &= PAGE_MASK;
100                         while (start < end) {
101                                 int idx;
102
103                                 write_c0_entryhi(start | newpid);
104                                 start += PAGE_SIZE;     /* BARRIER */
105                                 tlb_probe();
106                                 idx = read_c0_index();
107                                 write_c0_entrylo0(0);
108                                 write_c0_entryhi(KSEG0);
109                                 if (idx < 0)            /* BARRIER */
110                                         continue;
111                                 tlb_write_indexed();
112                         }
113                         write_c0_entryhi(oldpid);
114                 } else {
115                         drop_mmu_context(mm, cpu);
116                 }
117                 local_irq_restore(flags);
118         }
119 }
120
121 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
122 {
123         unsigned long size, flags;
124
125 #ifdef DEBUG_TLB
126         printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", start, end);
127 #endif
128         local_irq_save(flags);
129         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
130         if (size <= current_cpu_data.tlbsize) {
131                 int pid = read_c0_entryhi();
132
133                 start &= PAGE_MASK;
134                 end += PAGE_SIZE - 1;
135                 end &= PAGE_MASK;
136
137                 while (start < end) {
138                         int idx;
139
140                         write_c0_entryhi(start);
141                         start += PAGE_SIZE;             /* BARRIER */
142                         tlb_probe();
143                         idx = read_c0_index();
144                         write_c0_entrylo0(0);
145                         write_c0_entryhi(KSEG0);
146                         if (idx < 0)                    /* BARRIER */
147                                 continue;
148                         tlb_write_indexed();
149                 }
150                 write_c0_entryhi(pid);
151         } else {
152                 local_flush_tlb_all();
153         }
154         local_irq_restore(flags);
155 }
156
157 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
158 {
159         int cpu = smp_processor_id();
160
161         if (!vma || cpu_context(cpu, vma->vm_mm) != 0) {
162                 unsigned long flags;
163                 int oldpid, newpid, idx;
164
165 #ifdef DEBUG_TLB
166                 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
167 #endif
168                 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
169                 page &= PAGE_MASK;
170                 local_irq_save(flags);
171                 oldpid = read_c0_entryhi() & ASID_MASK;
172                 write_c0_entryhi(page | newpid);
173                 BARRIER;
174                 tlb_probe();
175                 idx = read_c0_index();
176                 write_c0_entrylo0(0);
177                 write_c0_entryhi(KSEG0);
178                 if (idx < 0)                            /* BARRIER */
179                         goto finish;
180                 tlb_write_indexed();
181
182 finish:
183                 write_c0_entryhi(oldpid);
184                 local_irq_restore(flags);
185         }
186 }
187
188 void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
189 {
190         unsigned long flags;
191         int idx, pid;
192
193         /*
194          * Handle debugger faulting in for debugee.
195          */
196         if (current->active_mm != vma->vm_mm)
197                 return;
198
199         pid = read_c0_entryhi() & ASID_MASK;
200
201 #ifdef DEBUG_TLB
202         if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
203                 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
204                        (cpu_context(cpu, vma->vm_mm)), pid);
205         }
206 #endif
207
208         local_irq_save(flags);
209         address &= PAGE_MASK;
210         write_c0_entryhi(address | pid);
211         BARRIER;
212         tlb_probe();
213         idx = read_c0_index();
214         write_c0_entrylo0(pte_val(pte));
215         write_c0_entryhi(address | pid);
216         if (idx < 0) {                                  /* BARRIER */
217                 tlb_write_random();
218         } else {
219                 tlb_write_indexed();
220         }
221         write_c0_entryhi(pid);
222         local_irq_restore(flags);
223 }
224
225 void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
226                             unsigned long entryhi, unsigned long pagemask)
227 {
228         unsigned long flags;
229         unsigned long old_ctx;
230         static unsigned long wired = 0;
231
232         if (r3k_have_wired_reg) {                       /* TX39XX */
233                 unsigned long old_pagemask;
234                 unsigned long w;
235
236 #ifdef DEBUG_TLB
237                 printk("[tlbwired<entry lo0 %8x, hi %8x\n, pagemask %8x>]\n",
238                        entrylo0, entryhi, pagemask);
239 #endif
240
241                 local_irq_save(flags);
242                 /* Save old context and create impossible VPN2 value */
243                 old_ctx = read_c0_entryhi() & ASID_MASK;
244                 old_pagemask = read_c0_pagemask();
245                 w = read_c0_wired();
246                 write_c0_wired(w + 1);
247                 write_c0_index(w << 8);
248                 write_c0_pagemask(pagemask);
249                 write_c0_entryhi(entryhi);
250                 write_c0_entrylo0(entrylo0);
251                 BARRIER;
252                 tlb_write_indexed();
253
254                 write_c0_entryhi(old_ctx);
255                 write_c0_pagemask(old_pagemask);
256                 local_flush_tlb_all();
257                 local_irq_restore(flags);
258
259         } else if (wired < 8) {
260 #ifdef DEBUG_TLB
261                 printk("[tlbwired<entry lo0 %8x, hi %8x\n>]\n",
262                        entrylo0, entryhi);
263 #endif
264
265                 local_irq_save(flags);
266                 old_ctx = read_c0_entryhi() & ASID_MASK;
267                 write_c0_entrylo0(entrylo0);
268                 write_c0_entryhi(entryhi);
269                 write_c0_index(wired);
270                 wired++;                                /* BARRIER */
271                 tlb_write_indexed();
272                 write_c0_entryhi(old_ctx);
273                 local_flush_tlb_all();
274                 local_irq_restore(flags);
275         }
276 }
277
278 void __cpuinit tlb_init(void)
279 {
280         local_flush_tlb_all();
281
282         build_tlb_refill_handler();
283 }