Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
[sfrench/cifs-2.6.git] / arch / mips / mm / c-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7  * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9  */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19
20 #include <asm/bcache.h>
21 #include <asm/bootinfo.h>
22 #include <asm/cache.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu.h>
25 #include <asm/cpu-features.h>
26 #include <asm/io.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/r4kcache.h>
30 #include <asm/sections.h>
31 #include <asm/system.h>
32 #include <asm/mmu_context.h>
33 #include <asm/war.h>
34 #include <asm/cacheflush.h> /* for run_uncached() */
35
36
37 /*
38  * Special Variant of smp_call_function for use by cache functions:
39  *
40  *  o No return value
41  *  o collapses to normal function call on UP kernels
42  *  o collapses to normal function call on systems with a single shared
43  *    primary cache.
44  */
45 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46                                    int retry, int wait)
47 {
48         preempt_disable();
49
50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51         smp_call_function(func, info, retry, wait);
52 #endif
53         func(info);
54         preempt_enable();
55 }
56
57 #if defined(CONFIG_MIPS_CMP)
58 #define cpu_has_safe_index_cacheops 0
59 #else
60 #define cpu_has_safe_index_cacheops 1
61 #endif
62
63 /*
64  * Must die.
65  */
66 static unsigned long icache_size __read_mostly;
67 static unsigned long dcache_size __read_mostly;
68 static unsigned long scache_size __read_mostly;
69
70 /*
71  * Dummy cache handling routines for machines without boardcaches
72  */
73 static void cache_noop(void) {}
74
75 static struct bcache_ops no_sc_ops = {
76         .bc_enable = (void *)cache_noop,
77         .bc_disable = (void *)cache_noop,
78         .bc_wback_inv = (void *)cache_noop,
79         .bc_inv = (void *)cache_noop
80 };
81
82 struct bcache_ops *bcops = &no_sc_ops;
83
84 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
85 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
86
87 #define R4600_HIT_CACHEOP_WAR_IMPL                                      \
88 do {                                                                    \
89         if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())            \
90                 *(volatile unsigned long *)CKSEG1;                      \
91         if (R4600_V1_HIT_CACHEOP_WAR)                                   \
92                 __asm__ __volatile__("nop;nop;nop;nop");                \
93 } while (0)
94
95 static void (*r4k_blast_dcache_page)(unsigned long addr);
96
97 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
98 {
99         R4600_HIT_CACHEOP_WAR_IMPL;
100         blast_dcache32_page(addr);
101 }
102
103 static void __cpuinit r4k_blast_dcache_page_setup(void)
104 {
105         unsigned long  dc_lsize = cpu_dcache_line_size();
106
107         if (dc_lsize == 0)
108                 r4k_blast_dcache_page = (void *)cache_noop;
109         else if (dc_lsize == 16)
110                 r4k_blast_dcache_page = blast_dcache16_page;
111         else if (dc_lsize == 32)
112                 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
113 }
114
115 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
116
117 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
118 {
119         unsigned long dc_lsize = cpu_dcache_line_size();
120
121         if (dc_lsize == 0)
122                 r4k_blast_dcache_page_indexed = (void *)cache_noop;
123         else if (dc_lsize == 16)
124                 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
125         else if (dc_lsize == 32)
126                 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
127 }
128
129 static void (* r4k_blast_dcache)(void);
130
131 static void __cpuinit r4k_blast_dcache_setup(void)
132 {
133         unsigned long dc_lsize = cpu_dcache_line_size();
134
135         if (dc_lsize == 0)
136                 r4k_blast_dcache = (void *)cache_noop;
137         else if (dc_lsize == 16)
138                 r4k_blast_dcache = blast_dcache16;
139         else if (dc_lsize == 32)
140                 r4k_blast_dcache = blast_dcache32;
141 }
142
143 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
144 #define JUMP_TO_ALIGN(order) \
145         __asm__ __volatile__( \
146                 "b\t1f\n\t" \
147                 ".align\t" #order "\n\t" \
148                 "1:\n\t" \
149                 )
150 #define CACHE32_UNROLL32_ALIGN  JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
151 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
152
153 static inline void blast_r4600_v1_icache32(void)
154 {
155         unsigned long flags;
156
157         local_irq_save(flags);
158         blast_icache32();
159         local_irq_restore(flags);
160 }
161
162 static inline void tx49_blast_icache32(void)
163 {
164         unsigned long start = INDEX_BASE;
165         unsigned long end = start + current_cpu_data.icache.waysize;
166         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
167         unsigned long ws_end = current_cpu_data.icache.ways <<
168                                current_cpu_data.icache.waybit;
169         unsigned long ws, addr;
170
171         CACHE32_UNROLL32_ALIGN2;
172         /* I'm in even chunk.  blast odd chunks */
173         for (ws = 0; ws < ws_end; ws += ws_inc)
174                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
175                         cache32_unroll32(addr|ws, Index_Invalidate_I);
176         CACHE32_UNROLL32_ALIGN;
177         /* I'm in odd chunk.  blast even chunks */
178         for (ws = 0; ws < ws_end; ws += ws_inc)
179                 for (addr = start; addr < end; addr += 0x400 * 2)
180                         cache32_unroll32(addr|ws, Index_Invalidate_I);
181 }
182
183 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
184 {
185         unsigned long flags;
186
187         local_irq_save(flags);
188         blast_icache32_page_indexed(page);
189         local_irq_restore(flags);
190 }
191
192 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
193 {
194         unsigned long indexmask = current_cpu_data.icache.waysize - 1;
195         unsigned long start = INDEX_BASE + (page & indexmask);
196         unsigned long end = start + PAGE_SIZE;
197         unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
198         unsigned long ws_end = current_cpu_data.icache.ways <<
199                                current_cpu_data.icache.waybit;
200         unsigned long ws, addr;
201
202         CACHE32_UNROLL32_ALIGN2;
203         /* I'm in even chunk.  blast odd chunks */
204         for (ws = 0; ws < ws_end; ws += ws_inc)
205                 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
206                         cache32_unroll32(addr|ws, Index_Invalidate_I);
207         CACHE32_UNROLL32_ALIGN;
208         /* I'm in odd chunk.  blast even chunks */
209         for (ws = 0; ws < ws_end; ws += ws_inc)
210                 for (addr = start; addr < end; addr += 0x400 * 2)
211                         cache32_unroll32(addr|ws, Index_Invalidate_I);
212 }
213
214 static void (* r4k_blast_icache_page)(unsigned long addr);
215
216 static void __cpuinit r4k_blast_icache_page_setup(void)
217 {
218         unsigned long ic_lsize = cpu_icache_line_size();
219
220         if (ic_lsize == 0)
221                 r4k_blast_icache_page = (void *)cache_noop;
222         else if (ic_lsize == 16)
223                 r4k_blast_icache_page = blast_icache16_page;
224         else if (ic_lsize == 32)
225                 r4k_blast_icache_page = blast_icache32_page;
226         else if (ic_lsize == 64)
227                 r4k_blast_icache_page = blast_icache64_page;
228 }
229
230
231 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
232
233 static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
234 {
235         unsigned long ic_lsize = cpu_icache_line_size();
236
237         if (ic_lsize == 0)
238                 r4k_blast_icache_page_indexed = (void *)cache_noop;
239         else if (ic_lsize == 16)
240                 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
241         else if (ic_lsize == 32) {
242                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
243                         r4k_blast_icache_page_indexed =
244                                 blast_icache32_r4600_v1_page_indexed;
245                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
246                         r4k_blast_icache_page_indexed =
247                                 tx49_blast_icache32_page_indexed;
248                 else
249                         r4k_blast_icache_page_indexed =
250                                 blast_icache32_page_indexed;
251         } else if (ic_lsize == 64)
252                 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
253 }
254
255 static void (* r4k_blast_icache)(void);
256
257 static void __cpuinit r4k_blast_icache_setup(void)
258 {
259         unsigned long ic_lsize = cpu_icache_line_size();
260
261         if (ic_lsize == 0)
262                 r4k_blast_icache = (void *)cache_noop;
263         else if (ic_lsize == 16)
264                 r4k_blast_icache = blast_icache16;
265         else if (ic_lsize == 32) {
266                 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
267                         r4k_blast_icache = blast_r4600_v1_icache32;
268                 else if (TX49XX_ICACHE_INDEX_INV_WAR)
269                         r4k_blast_icache = tx49_blast_icache32;
270                 else
271                         r4k_blast_icache = blast_icache32;
272         } else if (ic_lsize == 64)
273                 r4k_blast_icache = blast_icache64;
274 }
275
276 static void (* r4k_blast_scache_page)(unsigned long addr);
277
278 static void __cpuinit r4k_blast_scache_page_setup(void)
279 {
280         unsigned long sc_lsize = cpu_scache_line_size();
281
282         if (scache_size == 0)
283                 r4k_blast_scache_page = (void *)cache_noop;
284         else if (sc_lsize == 16)
285                 r4k_blast_scache_page = blast_scache16_page;
286         else if (sc_lsize == 32)
287                 r4k_blast_scache_page = blast_scache32_page;
288         else if (sc_lsize == 64)
289                 r4k_blast_scache_page = blast_scache64_page;
290         else if (sc_lsize == 128)
291                 r4k_blast_scache_page = blast_scache128_page;
292 }
293
294 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
295
296 static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
297 {
298         unsigned long sc_lsize = cpu_scache_line_size();
299
300         if (scache_size == 0)
301                 r4k_blast_scache_page_indexed = (void *)cache_noop;
302         else if (sc_lsize == 16)
303                 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
304         else if (sc_lsize == 32)
305                 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
306         else if (sc_lsize == 64)
307                 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
308         else if (sc_lsize == 128)
309                 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
310 }
311
312 static void (* r4k_blast_scache)(void);
313
314 static void __cpuinit r4k_blast_scache_setup(void)
315 {
316         unsigned long sc_lsize = cpu_scache_line_size();
317
318         if (scache_size == 0)
319                 r4k_blast_scache = (void *)cache_noop;
320         else if (sc_lsize == 16)
321                 r4k_blast_scache = blast_scache16;
322         else if (sc_lsize == 32)
323                 r4k_blast_scache = blast_scache32;
324         else if (sc_lsize == 64)
325                 r4k_blast_scache = blast_scache64;
326         else if (sc_lsize == 128)
327                 r4k_blast_scache = blast_scache128;
328 }
329
330 static inline void local_r4k___flush_cache_all(void * args)
331 {
332 #if defined(CONFIG_CPU_LOONGSON2)
333         r4k_blast_scache();
334         return;
335 #endif
336         r4k_blast_dcache();
337         r4k_blast_icache();
338
339         switch (current_cpu_type()) {
340         case CPU_R4000SC:
341         case CPU_R4000MC:
342         case CPU_R4400SC:
343         case CPU_R4400MC:
344         case CPU_R10000:
345         case CPU_R12000:
346         case CPU_R14000:
347                 r4k_blast_scache();
348         }
349 }
350
351 static void r4k___flush_cache_all(void)
352 {
353         r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
354 }
355
356 static inline int has_valid_asid(const struct mm_struct *mm)
357 {
358 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
359         int i;
360
361         for_each_online_cpu(i)
362                 if (cpu_context(i, mm))
363                         return 1;
364
365         return 0;
366 #else
367         return cpu_context(smp_processor_id(), mm);
368 #endif
369 }
370
371 static void r4k__flush_cache_vmap(void)
372 {
373         r4k_blast_dcache();
374 }
375
376 static void r4k__flush_cache_vunmap(void)
377 {
378         r4k_blast_dcache();
379 }
380
381 static inline void local_r4k_flush_cache_range(void * args)
382 {
383         struct vm_area_struct *vma = args;
384         int exec = vma->vm_flags & VM_EXEC;
385
386         if (!(has_valid_asid(vma->vm_mm)))
387                 return;
388
389         r4k_blast_dcache();
390         if (exec)
391                 r4k_blast_icache();
392 }
393
394 static void r4k_flush_cache_range(struct vm_area_struct *vma,
395         unsigned long start, unsigned long end)
396 {
397         int exec = vma->vm_flags & VM_EXEC;
398
399         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400                 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
401 }
402
403 static inline void local_r4k_flush_cache_mm(void * args)
404 {
405         struct mm_struct *mm = args;
406
407         if (!has_valid_asid(mm))
408                 return;
409
410         /*
411          * Kludge alert.  For obscure reasons R4000SC and R4400SC go nuts if we
412          * only flush the primary caches but R10000 and R12000 behave sane ...
413          * R4000SC and R4400SC indexed S-cache ops also invalidate primary
414          * caches, so we can bail out early.
415          */
416         if (current_cpu_type() == CPU_R4000SC ||
417             current_cpu_type() == CPU_R4000MC ||
418             current_cpu_type() == CPU_R4400SC ||
419             current_cpu_type() == CPU_R4400MC) {
420                 r4k_blast_scache();
421                 return;
422         }
423
424         r4k_blast_dcache();
425 }
426
427 static void r4k_flush_cache_mm(struct mm_struct *mm)
428 {
429         if (!cpu_has_dc_aliases)
430                 return;
431
432         r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
433 }
434
435 struct flush_cache_page_args {
436         struct vm_area_struct *vma;
437         unsigned long addr;
438         unsigned long pfn;
439 };
440
441 static inline void local_r4k_flush_cache_page(void *args)
442 {
443         struct flush_cache_page_args *fcp_args = args;
444         struct vm_area_struct *vma = fcp_args->vma;
445         unsigned long addr = fcp_args->addr;
446         struct page *page = pfn_to_page(fcp_args->pfn);
447         int exec = vma->vm_flags & VM_EXEC;
448         struct mm_struct *mm = vma->vm_mm;
449         pgd_t *pgdp;
450         pud_t *pudp;
451         pmd_t *pmdp;
452         pte_t *ptep;
453         void *vaddr;
454
455         /*
456          * If ownes no valid ASID yet, cannot possibly have gotten
457          * this page into the cache.
458          */
459         if (!has_valid_asid(mm))
460                 return;
461
462         addr &= PAGE_MASK;
463         pgdp = pgd_offset(mm, addr);
464         pudp = pud_offset(pgdp, addr);
465         pmdp = pmd_offset(pudp, addr);
466         ptep = pte_offset(pmdp, addr);
467
468         /*
469          * If the page isn't marked valid, the page cannot possibly be
470          * in the cache.
471          */
472         if (!(pte_present(*ptep)))
473                 return;
474
475         if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
476                 vaddr = NULL;
477         else {
478                 /*
479                  * Use kmap_coherent or kmap_atomic to do flushes for
480                  * another ASID than the current one.
481                  */
482                 if (cpu_has_dc_aliases)
483                         vaddr = kmap_coherent(page, addr);
484                 else
485                         vaddr = kmap_atomic(page, KM_USER0);
486                 addr = (unsigned long)vaddr;
487         }
488
489         if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
490                 r4k_blast_dcache_page(addr);
491                 if (exec && !cpu_icache_snoops_remote_store)
492                         r4k_blast_scache_page(addr);
493         }
494         if (exec) {
495                 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
496                         int cpu = smp_processor_id();
497
498                         if (cpu_context(cpu, mm) != 0)
499                                 drop_mmu_context(mm, cpu);
500                 } else
501                         r4k_blast_icache_page(addr);
502         }
503
504         if (vaddr) {
505                 if (cpu_has_dc_aliases)
506                         kunmap_coherent();
507                 else
508                         kunmap_atomic(vaddr, KM_USER0);
509         }
510 }
511
512 static void r4k_flush_cache_page(struct vm_area_struct *vma,
513         unsigned long addr, unsigned long pfn)
514 {
515         struct flush_cache_page_args args;
516
517         args.vma = vma;
518         args.addr = addr;
519         args.pfn = pfn;
520
521         r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
522 }
523
524 static inline void local_r4k_flush_data_cache_page(void * addr)
525 {
526         r4k_blast_dcache_page((unsigned long) addr);
527 }
528
529 static void r4k_flush_data_cache_page(unsigned long addr)
530 {
531         if (in_atomic())
532                 local_r4k_flush_data_cache_page((void *)addr);
533         else
534                 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
535                                 1, 1);
536 }
537
538 struct flush_icache_range_args {
539         unsigned long start;
540         unsigned long end;
541 };
542
543 static inline void local_r4k_flush_icache_range(void *args)
544 {
545         struct flush_icache_range_args *fir_args = args;
546         unsigned long start = fir_args->start;
547         unsigned long end = fir_args->end;
548
549         if (!cpu_has_ic_fills_f_dc) {
550                 if (end - start >= dcache_size) {
551                         r4k_blast_dcache();
552                 } else {
553                         R4600_HIT_CACHEOP_WAR_IMPL;
554                         protected_blast_dcache_range(start, end);
555                 }
556         }
557
558         if (end - start > icache_size)
559                 r4k_blast_icache();
560         else
561                 protected_blast_icache_range(start, end);
562 }
563
564 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
565 {
566         struct flush_icache_range_args args;
567
568         args.start = start;
569         args.end = end;
570
571         r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
572         instruction_hazard();
573 }
574
575 #ifdef CONFIG_DMA_NONCOHERENT
576
577 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
578 {
579         /* Catch bad driver code */
580         BUG_ON(size == 0);
581
582         if (cpu_has_inclusive_pcaches) {
583                 if (size >= scache_size)
584                         r4k_blast_scache();
585                 else
586                         blast_scache_range(addr, addr + size);
587                 return;
588         }
589
590         /*
591          * Either no secondary cache or the available caches don't have the
592          * subset property so we have to flush the primary caches
593          * explicitly
594          */
595         if (cpu_has_safe_index_cacheops && size >= dcache_size) {
596                 r4k_blast_dcache();
597         } else {
598                 R4600_HIT_CACHEOP_WAR_IMPL;
599                 blast_dcache_range(addr, addr + size);
600         }
601
602         bc_wback_inv(addr, size);
603 }
604
605 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
606 {
607         /* Catch bad driver code */
608         BUG_ON(size == 0);
609
610         if (cpu_has_inclusive_pcaches) {
611                 if (size >= scache_size)
612                         r4k_blast_scache();
613                 else
614                         blast_inv_scache_range(addr, addr + size);
615                 return;
616         }
617
618         if (cpu_has_safe_index_cacheops && size >= dcache_size) {
619                 r4k_blast_dcache();
620         } else {
621                 R4600_HIT_CACHEOP_WAR_IMPL;
622                 blast_inv_dcache_range(addr, addr + size);
623         }
624
625         bc_inv(addr, size);
626 }
627 #endif /* CONFIG_DMA_NONCOHERENT */
628
629 /*
630  * While we're protected against bad userland addresses we don't care
631  * very much about what happens in that case.  Usually a segmentation
632  * fault will dump the process later on anyway ...
633  */
634 static void local_r4k_flush_cache_sigtramp(void * arg)
635 {
636         unsigned long ic_lsize = cpu_icache_line_size();
637         unsigned long dc_lsize = cpu_dcache_line_size();
638         unsigned long sc_lsize = cpu_scache_line_size();
639         unsigned long addr = (unsigned long) arg;
640
641         R4600_HIT_CACHEOP_WAR_IMPL;
642         if (dc_lsize)
643                 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
644         if (!cpu_icache_snoops_remote_store && scache_size)
645                 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
646         if (ic_lsize)
647                 protected_flush_icache_line(addr & ~(ic_lsize - 1));
648         if (MIPS4K_ICACHE_REFILL_WAR) {
649                 __asm__ __volatile__ (
650                         ".set push\n\t"
651                         ".set noat\n\t"
652                         ".set mips3\n\t"
653 #ifdef CONFIG_32BIT
654                         "la     $at,1f\n\t"
655 #endif
656 #ifdef CONFIG_64BIT
657                         "dla    $at,1f\n\t"
658 #endif
659                         "cache  %0,($at)\n\t"
660                         "nop; nop; nop\n"
661                         "1:\n\t"
662                         ".set pop"
663                         :
664                         : "i" (Hit_Invalidate_I));
665         }
666         if (MIPS_CACHE_SYNC_WAR)
667                 __asm__ __volatile__ ("sync");
668 }
669
670 static void r4k_flush_cache_sigtramp(unsigned long addr)
671 {
672         r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
673 }
674
675 static void r4k_flush_icache_all(void)
676 {
677         if (cpu_has_vtag_icache)
678                 r4k_blast_icache();
679 }
680
681 static inline void rm7k_erratum31(void)
682 {
683         const unsigned long ic_lsize = 32;
684         unsigned long addr;
685
686         /* RM7000 erratum #31. The icache is screwed at startup. */
687         write_c0_taglo(0);
688         write_c0_taghi(0);
689
690         for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
691                 __asm__ __volatile__ (
692                         ".set push\n\t"
693                         ".set noreorder\n\t"
694                         ".set mips3\n\t"
695                         "cache\t%1, 0(%0)\n\t"
696                         "cache\t%1, 0x1000(%0)\n\t"
697                         "cache\t%1, 0x2000(%0)\n\t"
698                         "cache\t%1, 0x3000(%0)\n\t"
699                         "cache\t%2, 0(%0)\n\t"
700                         "cache\t%2, 0x1000(%0)\n\t"
701                         "cache\t%2, 0x2000(%0)\n\t"
702                         "cache\t%2, 0x3000(%0)\n\t"
703                         "cache\t%1, 0(%0)\n\t"
704                         "cache\t%1, 0x1000(%0)\n\t"
705                         "cache\t%1, 0x2000(%0)\n\t"
706                         "cache\t%1, 0x3000(%0)\n\t"
707                         ".set pop\n"
708                         :
709                         : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
710         }
711 }
712
713 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
714         "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
715 };
716
717 static void __cpuinit probe_pcache(void)
718 {
719         struct cpuinfo_mips *c = &current_cpu_data;
720         unsigned int config = read_c0_config();
721         unsigned int prid = read_c0_prid();
722         unsigned long config1;
723         unsigned int lsize;
724
725         switch (c->cputype) {
726         case CPU_R4600:                 /* QED style two way caches? */
727         case CPU_R4700:
728         case CPU_R5000:
729         case CPU_NEVADA:
730                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
731                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
732                 c->icache.ways = 2;
733                 c->icache.waybit = __ffs(icache_size/2);
734
735                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
736                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
737                 c->dcache.ways = 2;
738                 c->dcache.waybit= __ffs(dcache_size/2);
739
740                 c->options |= MIPS_CPU_CACHE_CDEX_P;
741                 break;
742
743         case CPU_R5432:
744         case CPU_R5500:
745                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
746                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
747                 c->icache.ways = 2;
748                 c->icache.waybit= 0;
749
750                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
751                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
752                 c->dcache.ways = 2;
753                 c->dcache.waybit = 0;
754
755                 c->options |= MIPS_CPU_CACHE_CDEX_P;
756                 break;
757
758         case CPU_TX49XX:
759                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
760                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
761                 c->icache.ways = 4;
762                 c->icache.waybit= 0;
763
764                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
765                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
766                 c->dcache.ways = 4;
767                 c->dcache.waybit = 0;
768
769                 c->options |= MIPS_CPU_CACHE_CDEX_P;
770                 c->options |= MIPS_CPU_PREFETCH;
771                 break;
772
773         case CPU_R4000PC:
774         case CPU_R4000SC:
775         case CPU_R4000MC:
776         case CPU_R4400PC:
777         case CPU_R4400SC:
778         case CPU_R4400MC:
779         case CPU_R4300:
780                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
781                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
782                 c->icache.ways = 1;
783                 c->icache.waybit = 0;   /* doesn't matter */
784
785                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
786                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
787                 c->dcache.ways = 1;
788                 c->dcache.waybit = 0;   /* does not matter */
789
790                 c->options |= MIPS_CPU_CACHE_CDEX_P;
791                 break;
792
793         case CPU_R10000:
794         case CPU_R12000:
795         case CPU_R14000:
796                 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
797                 c->icache.linesz = 64;
798                 c->icache.ways = 2;
799                 c->icache.waybit = 0;
800
801                 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
802                 c->dcache.linesz = 32;
803                 c->dcache.ways = 2;
804                 c->dcache.waybit = 0;
805
806                 c->options |= MIPS_CPU_PREFETCH;
807                 break;
808
809         case CPU_VR4133:
810                 write_c0_config(config & ~VR41_CONF_P4K);
811         case CPU_VR4131:
812                 /* Workaround for cache instruction bug of VR4131 */
813                 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
814                     c->processor_id == 0x0c82U) {
815                         config |= 0x00400000U;
816                         if (c->processor_id == 0x0c80U)
817                                 config |= VR41_CONF_BP;
818                         write_c0_config(config);
819                 } else
820                         c->options |= MIPS_CPU_CACHE_CDEX_P;
821
822                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
823                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
824                 c->icache.ways = 2;
825                 c->icache.waybit = __ffs(icache_size/2);
826
827                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
828                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
829                 c->dcache.ways = 2;
830                 c->dcache.waybit = __ffs(dcache_size/2);
831                 break;
832
833         case CPU_VR41XX:
834         case CPU_VR4111:
835         case CPU_VR4121:
836         case CPU_VR4122:
837         case CPU_VR4181:
838         case CPU_VR4181A:
839                 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
840                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
841                 c->icache.ways = 1;
842                 c->icache.waybit = 0;   /* doesn't matter */
843
844                 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
845                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
846                 c->dcache.ways = 1;
847                 c->dcache.waybit = 0;   /* does not matter */
848
849                 c->options |= MIPS_CPU_CACHE_CDEX_P;
850                 break;
851
852         case CPU_RM7000:
853                 rm7k_erratum31();
854
855         case CPU_RM9000:
856                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
857                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
858                 c->icache.ways = 4;
859                 c->icache.waybit = __ffs(icache_size / c->icache.ways);
860
861                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
862                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
863                 c->dcache.ways = 4;
864                 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
865
866 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
867                 c->options |= MIPS_CPU_CACHE_CDEX_P;
868 #endif
869                 c->options |= MIPS_CPU_PREFETCH;
870                 break;
871
872         case CPU_LOONGSON2:
873                 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
874                 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
875                 if (prid & 0x3)
876                         c->icache.ways = 4;
877                 else
878                         c->icache.ways = 2;
879                 c->icache.waybit = 0;
880
881                 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
882                 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
883                 if (prid & 0x3)
884                         c->dcache.ways = 4;
885                 else
886                         c->dcache.ways = 2;
887                 c->dcache.waybit = 0;
888                 break;
889
890         default:
891                 if (!(config & MIPS_CONF_M))
892                         panic("Don't know how to probe P-caches on this cpu.");
893
894                 /*
895                  * So we seem to be a MIPS32 or MIPS64 CPU
896                  * So let's probe the I-cache ...
897                  */
898                 config1 = read_c0_config1();
899
900                 if ((lsize = ((config1 >> 19) & 7)))
901                         c->icache.linesz = 2 << lsize;
902                 else
903                         c->icache.linesz = lsize;
904                 c->icache.sets = 64 << ((config1 >> 22) & 7);
905                 c->icache.ways = 1 + ((config1 >> 16) & 7);
906
907                 icache_size = c->icache.sets *
908                               c->icache.ways *
909                               c->icache.linesz;
910                 c->icache.waybit = __ffs(icache_size/c->icache.ways);
911
912                 if (config & 0x8)               /* VI bit */
913                         c->icache.flags |= MIPS_CACHE_VTAG;
914
915                 /*
916                  * Now probe the MIPS32 / MIPS64 data cache.
917                  */
918                 c->dcache.flags = 0;
919
920                 if ((lsize = ((config1 >> 10) & 7)))
921                         c->dcache.linesz = 2 << lsize;
922                 else
923                         c->dcache.linesz= lsize;
924                 c->dcache.sets = 64 << ((config1 >> 13) & 7);
925                 c->dcache.ways = 1 + ((config1 >> 7) & 7);
926
927                 dcache_size = c->dcache.sets *
928                               c->dcache.ways *
929                               c->dcache.linesz;
930                 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
931
932                 c->options |= MIPS_CPU_PREFETCH;
933                 break;
934         }
935
936         /*
937          * Processor configuration sanity check for the R4000SC erratum
938          * #5.  With page sizes larger than 32kB there is no possibility
939          * to get a VCE exception anymore so we don't care about this
940          * misconfiguration.  The case is rather theoretical anyway;
941          * presumably no vendor is shipping his hardware in the "bad"
942          * configuration.
943          */
944         if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
945             !(config & CONF_SC) && c->icache.linesz != 16 &&
946             PAGE_SIZE <= 0x8000)
947                 panic("Improper R4000SC processor configuration detected");
948
949         /* compute a couple of other cache variables */
950         c->icache.waysize = icache_size / c->icache.ways;
951         c->dcache.waysize = dcache_size / c->dcache.ways;
952
953         c->icache.sets = c->icache.linesz ?
954                 icache_size / (c->icache.linesz * c->icache.ways) : 0;
955         c->dcache.sets = c->dcache.linesz ?
956                 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
957
958         /*
959          * R10000 and R12000 P-caches are odd in a positive way.  They're 32kB
960          * 2-way virtually indexed so normally would suffer from aliases.  So
961          * normally they'd suffer from aliases but magic in the hardware deals
962          * with that for us so we don't need to take care ourselves.
963          */
964         switch (c->cputype) {
965         case CPU_20KC:
966         case CPU_25KF:
967         case CPU_SB1:
968         case CPU_SB1A:
969                 c->dcache.flags |= MIPS_CACHE_PINDEX;
970                 break;
971
972         case CPU_R10000:
973         case CPU_R12000:
974         case CPU_R14000:
975                 break;
976
977         case CPU_24K:
978         case CPU_34K:
979         case CPU_74K:
980         case CPU_1004K:
981                 if ((read_c0_config7() & (1 << 16))) {
982                         /* effectively physically indexed dcache,
983                            thus no virtual aliases. */
984                         c->dcache.flags |= MIPS_CACHE_PINDEX;
985                         break;
986                 }
987         default:
988                 if (c->dcache.waysize > PAGE_SIZE)
989                         c->dcache.flags |= MIPS_CACHE_ALIASES;
990         }
991
992         switch (c->cputype) {
993         case CPU_20KC:
994                 /*
995                  * Some older 20Kc chips doesn't have the 'VI' bit in
996                  * the config register.
997                  */
998                 c->icache.flags |= MIPS_CACHE_VTAG;
999                 break;
1000
1001         case CPU_AU1000:
1002         case CPU_AU1500:
1003         case CPU_AU1100:
1004         case CPU_AU1550:
1005         case CPU_AU1200:
1006         case CPU_AU1210:
1007         case CPU_AU1250:
1008                 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1009                 break;
1010         }
1011
1012 #ifdef  CONFIG_CPU_LOONGSON2
1013         /*
1014          * LOONGSON2 has 4 way icache, but when using indexed cache op,
1015          * one op will act on all 4 ways
1016          */
1017         c->icache.ways = 1;
1018 #endif
1019
1020         printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1021                icache_size >> 10,
1022                cpu_has_vtag_icache ? "VIVT" : "VIPT",
1023                way_string[c->icache.ways], c->icache.linesz);
1024
1025         printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1026                dcache_size >> 10, way_string[c->dcache.ways],
1027                (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1028                (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1029                         "cache aliases" : "no aliases",
1030                c->dcache.linesz);
1031 }
1032
1033 /*
1034  * If you even _breathe_ on this function, look at the gcc output and make sure
1035  * it does not pop things on and off the stack for the cache sizing loop that
1036  * executes in KSEG1 space or else you will crash and burn badly.  You have
1037  * been warned.
1038  */
1039 static int __cpuinit probe_scache(void)
1040 {
1041         unsigned long flags, addr, begin, end, pow2;
1042         unsigned int config = read_c0_config();
1043         struct cpuinfo_mips *c = &current_cpu_data;
1044         int tmp;
1045
1046         if (config & CONF_SC)
1047                 return 0;
1048
1049         begin = (unsigned long) &_stext;
1050         begin &= ~((4 * 1024 * 1024) - 1);
1051         end = begin + (4 * 1024 * 1024);
1052
1053         /*
1054          * This is such a bitch, you'd think they would make it easy to do
1055          * this.  Away you daemons of stupidity!
1056          */
1057         local_irq_save(flags);
1058
1059         /* Fill each size-multiple cache line with a valid tag. */
1060         pow2 = (64 * 1024);
1061         for (addr = begin; addr < end; addr = (begin + pow2)) {
1062                 unsigned long *p = (unsigned long *) addr;
1063                 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1064                 pow2 <<= 1;
1065         }
1066
1067         /* Load first line with zero (therefore invalid) tag. */
1068         write_c0_taglo(0);
1069         write_c0_taghi(0);
1070         __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1071         cache_op(Index_Store_Tag_I, begin);
1072         cache_op(Index_Store_Tag_D, begin);
1073         cache_op(Index_Store_Tag_SD, begin);
1074
1075         /* Now search for the wrap around point. */
1076         pow2 = (128 * 1024);
1077         tmp = 0;
1078         for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1079                 cache_op(Index_Load_Tag_SD, addr);
1080                 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1081                 if (!read_c0_taglo())
1082                         break;
1083                 pow2 <<= 1;
1084         }
1085         local_irq_restore(flags);
1086         addr -= begin;
1087
1088         scache_size = addr;
1089         c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1090         c->scache.ways = 1;
1091         c->dcache.waybit = 0;           /* does not matter */
1092
1093         return 1;
1094 }
1095
1096 #if defined(CONFIG_CPU_LOONGSON2)
1097 static void __init loongson2_sc_init(void)
1098 {
1099         struct cpuinfo_mips *c = &current_cpu_data;
1100
1101         scache_size = 512*1024;
1102         c->scache.linesz = 32;
1103         c->scache.ways = 4;
1104         c->scache.waybit = 0;
1105         c->scache.waysize = scache_size / (c->scache.ways);
1106         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1107         pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1108                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1109
1110         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1111 }
1112 #endif
1113
1114 extern int r5k_sc_init(void);
1115 extern int rm7k_sc_init(void);
1116 extern int mips_sc_init(void);
1117
1118 static void __cpuinit setup_scache(void)
1119 {
1120         struct cpuinfo_mips *c = &current_cpu_data;
1121         unsigned int config = read_c0_config();
1122         int sc_present = 0;
1123
1124         /*
1125          * Do the probing thing on R4000SC and R4400SC processors.  Other
1126          * processors don't have a S-cache that would be relevant to the
1127          * Linux memory management.
1128          */
1129         switch (c->cputype) {
1130         case CPU_R4000SC:
1131         case CPU_R4000MC:
1132         case CPU_R4400SC:
1133         case CPU_R4400MC:
1134                 sc_present = run_uncached(probe_scache);
1135                 if (sc_present)
1136                         c->options |= MIPS_CPU_CACHE_CDEX_S;
1137                 break;
1138
1139         case CPU_R10000:
1140         case CPU_R12000:
1141         case CPU_R14000:
1142                 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1143                 c->scache.linesz = 64 << ((config >> 13) & 1);
1144                 c->scache.ways = 2;
1145                 c->scache.waybit= 0;
1146                 sc_present = 1;
1147                 break;
1148
1149         case CPU_R5000:
1150         case CPU_NEVADA:
1151 #ifdef CONFIG_R5000_CPU_SCACHE
1152                 r5k_sc_init();
1153 #endif
1154                 return;
1155
1156         case CPU_RM7000:
1157         case CPU_RM9000:
1158 #ifdef CONFIG_RM7000_CPU_SCACHE
1159                 rm7k_sc_init();
1160 #endif
1161                 return;
1162
1163 #if defined(CONFIG_CPU_LOONGSON2)
1164         case CPU_LOONGSON2:
1165                 loongson2_sc_init();
1166                 return;
1167 #endif
1168
1169         default:
1170                 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1171                     c->isa_level == MIPS_CPU_ISA_M32R2 ||
1172                     c->isa_level == MIPS_CPU_ISA_M64R1 ||
1173                     c->isa_level == MIPS_CPU_ISA_M64R2) {
1174 #ifdef CONFIG_MIPS_CPU_SCACHE
1175                         if (mips_sc_init ()) {
1176                                 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1177                                 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1178                                        scache_size >> 10,
1179                                        way_string[c->scache.ways], c->scache.linesz);
1180                         }
1181 #else
1182                         if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1183                                 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1184 #endif
1185                         return;
1186                 }
1187                 sc_present = 0;
1188         }
1189
1190         if (!sc_present)
1191                 return;
1192
1193         /* compute a couple of other cache variables */
1194         c->scache.waysize = scache_size / c->scache.ways;
1195
1196         c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1197
1198         printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1199                scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1200
1201         c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1202 }
1203
1204 void au1x00_fixup_config_od(void)
1205 {
1206         /*
1207          * c0_config.od (bit 19) was write only (and read as 0)
1208          * on the early revisions of Alchemy SOCs.  It disables the bus
1209          * transaction overlapping and needs to be set to fix various errata.
1210          */
1211         switch (read_c0_prid()) {
1212         case 0x00030100: /* Au1000 DA */
1213         case 0x00030201: /* Au1000 HA */
1214         case 0x00030202: /* Au1000 HB */
1215         case 0x01030200: /* Au1500 AB */
1216         /*
1217          * Au1100 errata actually keeps silence about this bit, so we set it
1218          * just in case for those revisions that require it to be set according
1219          * to arch/mips/au1000/common/cputable.c
1220          */
1221         case 0x02030200: /* Au1100 AB */
1222         case 0x02030201: /* Au1100 BA */
1223         case 0x02030202: /* Au1100 BC */
1224                 set_c0_config(1 << 19);
1225                 break;
1226         }
1227 }
1228
1229 static int __cpuinitdata cca = -1;
1230
1231 static int __init cca_setup(char *str)
1232 {
1233         get_option(&str, &cca);
1234
1235         return 1;
1236 }
1237
1238 __setup("cca=", cca_setup);
1239
1240 static void __cpuinit coherency_setup(void)
1241 {
1242         if (cca < 0 || cca > 7)
1243                 cca = read_c0_config() & CONF_CM_CMASK;
1244         _page_cachable_default = cca << _CACHE_SHIFT;
1245
1246         pr_debug("Using cache attribute %d\n", cca);
1247         change_c0_config(CONF_CM_CMASK, cca);
1248
1249         /*
1250          * c0_status.cu=0 specifies that updates by the sc instruction use
1251          * the coherency mode specified by the TLB; 1 means cachable
1252          * coherent update on write will be used.  Not all processors have
1253          * this bit and; some wire it to zero, others like Toshiba had the
1254          * silly idea of putting something else there ...
1255          */
1256         switch (current_cpu_type()) {
1257         case CPU_R4000PC:
1258         case CPU_R4000SC:
1259         case CPU_R4000MC:
1260         case CPU_R4400PC:
1261         case CPU_R4400SC:
1262         case CPU_R4400MC:
1263                 clear_c0_config(CONF_CU);
1264                 break;
1265         /*
1266          * We need to catch the early Alchemy SOCs with
1267          * the write-only co_config.od bit and set it back to one...
1268          */
1269         case CPU_AU1000: /* rev. DA, HA, HB */
1270         case CPU_AU1100: /* rev. AB, BA, BC ?? */
1271         case CPU_AU1500: /* rev. AB */
1272                 au1x00_fixup_config_od();
1273                 break;
1274         }
1275 }
1276
1277 #if defined(CONFIG_DMA_NONCOHERENT)
1278
1279 static int __cpuinitdata coherentio;
1280
1281 static int __init setcoherentio(char *str)
1282 {
1283         coherentio = 1;
1284
1285         return 1;
1286 }
1287
1288 __setup("coherentio", setcoherentio);
1289 #endif
1290
1291 void __cpuinit r4k_cache_init(void)
1292 {
1293         extern void build_clear_page(void);
1294         extern void build_copy_page(void);
1295         extern char __weak except_vec2_generic;
1296         extern char __weak except_vec2_sb1;
1297         struct cpuinfo_mips *c = &current_cpu_data;
1298
1299         switch (c->cputype) {
1300         case CPU_SB1:
1301         case CPU_SB1A:
1302                 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1303                 break;
1304
1305         default:
1306                 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1307                 break;
1308         }
1309
1310         probe_pcache();
1311         setup_scache();
1312
1313         r4k_blast_dcache_page_setup();
1314         r4k_blast_dcache_page_indexed_setup();
1315         r4k_blast_dcache_setup();
1316         r4k_blast_icache_page_setup();
1317         r4k_blast_icache_page_indexed_setup();
1318         r4k_blast_icache_setup();
1319         r4k_blast_scache_page_setup();
1320         r4k_blast_scache_page_indexed_setup();
1321         r4k_blast_scache_setup();
1322
1323         /*
1324          * Some MIPS32 and MIPS64 processors have physically indexed caches.
1325          * This code supports virtually indexed processors and will be
1326          * unnecessarily inefficient on physically indexed processors.
1327          */
1328         if (c->dcache.linesz)
1329                 shm_align_mask = max_t( unsigned long,
1330                                         c->dcache.sets * c->dcache.linesz - 1,
1331                                         PAGE_SIZE - 1);
1332         else
1333                 shm_align_mask = PAGE_SIZE-1;
1334
1335         __flush_cache_vmap      = r4k__flush_cache_vmap;
1336         __flush_cache_vunmap    = r4k__flush_cache_vunmap;
1337
1338         flush_cache_all         = cache_noop;
1339         __flush_cache_all       = r4k___flush_cache_all;
1340         flush_cache_mm          = r4k_flush_cache_mm;
1341         flush_cache_page        = r4k_flush_cache_page;
1342         flush_cache_range       = r4k_flush_cache_range;
1343
1344         flush_cache_sigtramp    = r4k_flush_cache_sigtramp;
1345         flush_icache_all        = r4k_flush_icache_all;
1346         local_flush_data_cache_page     = local_r4k_flush_data_cache_page;
1347         flush_data_cache_page   = r4k_flush_data_cache_page;
1348         flush_icache_range      = r4k_flush_icache_range;
1349
1350 #if defined(CONFIG_DMA_NONCOHERENT)
1351         if (coherentio) {
1352                 _dma_cache_wback_inv    = (void *)cache_noop;
1353                 _dma_cache_wback        = (void *)cache_noop;
1354                 _dma_cache_inv          = (void *)cache_noop;
1355         } else {
1356                 _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
1357                 _dma_cache_wback        = r4k_dma_cache_wback_inv;
1358                 _dma_cache_inv          = r4k_dma_cache_inv;
1359         }
1360 #endif
1361
1362         build_clear_page();
1363         build_copy_page();
1364 #if !defined(CONFIG_MIPS_CMP)
1365         local_r4k___flush_cache_all(NULL);
1366 #endif
1367         coherency_setup();
1368 }