Merge tag 'fbdev-omap-dt-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tomba...
[sfrench/cifs-2.6.git] / arch / mips / include / asm / r4kcache.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/uaccess.h> /* for segment_eq() */
21
22 /*
23  * This macro return a properly sign-extended address suitable as base address
24  * for indexed cache operations.  Two issues here:
25  *
26  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
27  *    the index bits from the virtual address.  This breaks with tradition
28  *    set by the R4000.  To keep unpleasant surprises from happening we pick
29  *    an address in KSEG0 / CKSEG0.
30  *  - We need a properly sign extended address for 64-bit code.  To get away
31  *    without ifdefs we let the compiler do it by a type cast.
32  */
33 #define INDEX_BASE      CKSEG0
34
35 #define cache_op(op,addr)                                               \
36         __asm__ __volatile__(                                           \
37         "       .set    push                                    \n"     \
38         "       .set    noreorder                               \n"     \
39         "       .set    arch=r4000                              \n"     \
40         "       cache   %0, %1                                  \n"     \
41         "       .set    pop                                     \n"     \
42         :                                                               \
43         : "i" (op), "R" (*(unsigned char *)(addr)))
44
45 #ifdef CONFIG_MIPS_MT
46
47 /*
48  * Optionally force single-threaded execution during I-cache flushes.
49  */
50 #define PROTECT_CACHE_FLUSHES 1
51
52 #ifdef PROTECT_CACHE_FLUSHES
53
54 extern int mt_protiflush;
55 extern int mt_protdflush;
56 extern void mt_cflush_lockdown(void);
57 extern void mt_cflush_release(void);
58
59 #define BEGIN_MT_IPROT \
60         unsigned long flags = 0;                        \
61         unsigned long mtflags = 0;                      \
62         if(mt_protiflush) {                             \
63                 local_irq_save(flags);                  \
64                 ehb();                                  \
65                 mtflags = dvpe();                       \
66                 mt_cflush_lockdown();                   \
67         }
68
69 #define END_MT_IPROT \
70         if(mt_protiflush) {                             \
71                 mt_cflush_release();                    \
72                 evpe(mtflags);                          \
73                 local_irq_restore(flags);               \
74         }
75
76 #define BEGIN_MT_DPROT \
77         unsigned long flags = 0;                        \
78         unsigned long mtflags = 0;                      \
79         if(mt_protdflush) {                             \
80                 local_irq_save(flags);                  \
81                 ehb();                                  \
82                 mtflags = dvpe();                       \
83                 mt_cflush_lockdown();                   \
84         }
85
86 #define END_MT_DPROT \
87         if(mt_protdflush) {                             \
88                 mt_cflush_release();                    \
89                 evpe(mtflags);                          \
90                 local_irq_restore(flags);               \
91         }
92
93 #else
94
95 #define BEGIN_MT_IPROT
96 #define BEGIN_MT_DPROT
97 #define END_MT_IPROT
98 #define END_MT_DPROT
99
100 #endif /* PROTECT_CACHE_FLUSHES */
101
102 #define __iflush_prologue                                               \
103         unsigned long redundance;                                       \
104         extern int mt_n_iflushes;                                       \
105         BEGIN_MT_IPROT                                                  \
106         for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
107
108 #define __iflush_epilogue                                               \
109         END_MT_IPROT                                                    \
110         }
111
112 #define __dflush_prologue                                               \
113         unsigned long redundance;                                       \
114         extern int mt_n_dflushes;                                       \
115         BEGIN_MT_DPROT                                                  \
116         for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
117
118 #define __dflush_epilogue \
119         END_MT_DPROT     \
120         }
121
122 #define __inv_dflush_prologue __dflush_prologue
123 #define __inv_dflush_epilogue __dflush_epilogue
124 #define __sflush_prologue {
125 #define __sflush_epilogue }
126 #define __inv_sflush_prologue __sflush_prologue
127 #define __inv_sflush_epilogue __sflush_epilogue
128
129 #else /* CONFIG_MIPS_MT */
130
131 #define __iflush_prologue {
132 #define __iflush_epilogue }
133 #define __dflush_prologue {
134 #define __dflush_epilogue }
135 #define __inv_dflush_prologue {
136 #define __inv_dflush_epilogue }
137 #define __sflush_prologue {
138 #define __sflush_epilogue }
139 #define __inv_sflush_prologue {
140 #define __inv_sflush_epilogue }
141
142 #endif /* CONFIG_MIPS_MT */
143
144 static inline void flush_icache_line_indexed(unsigned long addr)
145 {
146         __iflush_prologue
147         cache_op(Index_Invalidate_I, addr);
148         __iflush_epilogue
149 }
150
151 static inline void flush_dcache_line_indexed(unsigned long addr)
152 {
153         __dflush_prologue
154         cache_op(Index_Writeback_Inv_D, addr);
155         __dflush_epilogue
156 }
157
158 static inline void flush_scache_line_indexed(unsigned long addr)
159 {
160         cache_op(Index_Writeback_Inv_SD, addr);
161 }
162
163 static inline void flush_icache_line(unsigned long addr)
164 {
165         __iflush_prologue
166         switch (boot_cpu_type()) {
167         case CPU_LOONGSON2:
168                 cache_op(Hit_Invalidate_I_Loongson2, addr);
169                 break;
170
171         default:
172                 cache_op(Hit_Invalidate_I, addr);
173                 break;
174         }
175         __iflush_epilogue
176 }
177
178 static inline void flush_dcache_line(unsigned long addr)
179 {
180         __dflush_prologue
181         cache_op(Hit_Writeback_Inv_D, addr);
182         __dflush_epilogue
183 }
184
185 static inline void invalidate_dcache_line(unsigned long addr)
186 {
187         __dflush_prologue
188         cache_op(Hit_Invalidate_D, addr);
189         __dflush_epilogue
190 }
191
192 static inline void invalidate_scache_line(unsigned long addr)
193 {
194         cache_op(Hit_Invalidate_SD, addr);
195 }
196
197 static inline void flush_scache_line(unsigned long addr)
198 {
199         cache_op(Hit_Writeback_Inv_SD, addr);
200 }
201
202 #define protected_cache_op(op,addr)                             \
203         __asm__ __volatile__(                                   \
204         "       .set    push                    \n"             \
205         "       .set    noreorder               \n"             \
206         "       .set    arch=r4000              \n"             \
207         "1:     cache   %0, (%1)                \n"             \
208         "2:     .set    pop                     \n"             \
209         "       .section __ex_table,\"a\"       \n"             \
210         "       "STR(PTR)" 1b, 2b               \n"             \
211         "       .previous"                                      \
212         :                                                       \
213         : "i" (op), "r" (addr))
214
215 #define protected_cachee_op(op,addr)                            \
216         __asm__ __volatile__(                                   \
217         "       .set    push                    \n"             \
218         "       .set    noreorder               \n"             \
219         "       .set    mips0                   \n"             \
220         "       .set    eva                     \n"             \
221         "1:     cachee  %0, (%1)                \n"             \
222         "2:     .set    pop                     \n"             \
223         "       .section __ex_table,\"a\"       \n"             \
224         "       "STR(PTR)" 1b, 2b               \n"             \
225         "       .previous"                                      \
226         :                                                       \
227         : "i" (op), "r" (addr))
228
229 /*
230  * The next two are for badland addresses like signal trampolines.
231  */
232 static inline void protected_flush_icache_line(unsigned long addr)
233 {
234         switch (boot_cpu_type()) {
235         case CPU_LOONGSON2:
236                 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
237                 break;
238
239         default:
240 #ifdef CONFIG_EVA
241                 protected_cachee_op(Hit_Invalidate_I, addr);
242 #else
243                 protected_cache_op(Hit_Invalidate_I, addr);
244 #endif
245                 break;
246         }
247 }
248
249 /*
250  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
251  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
252  * caches.  We're talking about one cacheline unnecessarily getting invalidated
253  * here so the penalty isn't overly hard.
254  */
255 static inline void protected_writeback_dcache_line(unsigned long addr)
256 {
257         protected_cache_op(Hit_Writeback_Inv_D, addr);
258 }
259
260 static inline void protected_writeback_scache_line(unsigned long addr)
261 {
262         protected_cache_op(Hit_Writeback_Inv_SD, addr);
263 }
264
265 /*
266  * This one is RM7000-specific
267  */
268 static inline void invalidate_tcache_page(unsigned long addr)
269 {
270         cache_op(Page_Invalidate_T, addr);
271 }
272
273 #define cache16_unroll32(base,op)                                       \
274         __asm__ __volatile__(                                           \
275         "       .set push                                       \n"     \
276         "       .set noreorder                                  \n"     \
277         "       .set mips3                                      \n"     \
278         "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
279         "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
280         "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
281         "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
282         "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
283         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
284         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
285         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
286         "       cache %1, 0x100(%0); cache %1, 0x110(%0)        \n"     \
287         "       cache %1, 0x120(%0); cache %1, 0x130(%0)        \n"     \
288         "       cache %1, 0x140(%0); cache %1, 0x150(%0)        \n"     \
289         "       cache %1, 0x160(%0); cache %1, 0x170(%0)        \n"     \
290         "       cache %1, 0x180(%0); cache %1, 0x190(%0)        \n"     \
291         "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
292         "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
293         "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
294         "       .set pop                                        \n"     \
295                 :                                                       \
296                 : "r" (base),                                           \
297                   "i" (op));
298
299 #define cache32_unroll32(base,op)                                       \
300         __asm__ __volatile__(                                           \
301         "       .set push                                       \n"     \
302         "       .set noreorder                                  \n"     \
303         "       .set mips3                                      \n"     \
304         "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
305         "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
306         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
307         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
308         "       cache %1, 0x100(%0); cache %1, 0x120(%0)        \n"     \
309         "       cache %1, 0x140(%0); cache %1, 0x160(%0)        \n"     \
310         "       cache %1, 0x180(%0); cache %1, 0x1a0(%0)        \n"     \
311         "       cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)        \n"     \
312         "       cache %1, 0x200(%0); cache %1, 0x220(%0)        \n"     \
313         "       cache %1, 0x240(%0); cache %1, 0x260(%0)        \n"     \
314         "       cache %1, 0x280(%0); cache %1, 0x2a0(%0)        \n"     \
315         "       cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)        \n"     \
316         "       cache %1, 0x300(%0); cache %1, 0x320(%0)        \n"     \
317         "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
318         "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
319         "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
320         "       .set pop                                        \n"     \
321                 :                                                       \
322                 : "r" (base),                                           \
323                   "i" (op));
324
325 #define cache64_unroll32(base,op)                                       \
326         __asm__ __volatile__(                                           \
327         "       .set push                                       \n"     \
328         "       .set noreorder                                  \n"     \
329         "       .set mips3                                      \n"     \
330         "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
331         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
332         "       cache %1, 0x100(%0); cache %1, 0x140(%0)        \n"     \
333         "       cache %1, 0x180(%0); cache %1, 0x1c0(%0)        \n"     \
334         "       cache %1, 0x200(%0); cache %1, 0x240(%0)        \n"     \
335         "       cache %1, 0x280(%0); cache %1, 0x2c0(%0)        \n"     \
336         "       cache %1, 0x300(%0); cache %1, 0x340(%0)        \n"     \
337         "       cache %1, 0x380(%0); cache %1, 0x3c0(%0)        \n"     \
338         "       cache %1, 0x400(%0); cache %1, 0x440(%0)        \n"     \
339         "       cache %1, 0x480(%0); cache %1, 0x4c0(%0)        \n"     \
340         "       cache %1, 0x500(%0); cache %1, 0x540(%0)        \n"     \
341         "       cache %1, 0x580(%0); cache %1, 0x5c0(%0)        \n"     \
342         "       cache %1, 0x600(%0); cache %1, 0x640(%0)        \n"     \
343         "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
344         "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
345         "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
346         "       .set pop                                        \n"     \
347                 :                                                       \
348                 : "r" (base),                                           \
349                   "i" (op));
350
351 #define cache128_unroll32(base,op)                                      \
352         __asm__ __volatile__(                                           \
353         "       .set push                                       \n"     \
354         "       .set noreorder                                  \n"     \
355         "       .set mips3                                      \n"     \
356         "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
357         "       cache %1, 0x100(%0); cache %1, 0x180(%0)        \n"     \
358         "       cache %1, 0x200(%0); cache %1, 0x280(%0)        \n"     \
359         "       cache %1, 0x300(%0); cache %1, 0x380(%0)        \n"     \
360         "       cache %1, 0x400(%0); cache %1, 0x480(%0)        \n"     \
361         "       cache %1, 0x500(%0); cache %1, 0x580(%0)        \n"     \
362         "       cache %1, 0x600(%0); cache %1, 0x680(%0)        \n"     \
363         "       cache %1, 0x700(%0); cache %1, 0x780(%0)        \n"     \
364         "       cache %1, 0x800(%0); cache %1, 0x880(%0)        \n"     \
365         "       cache %1, 0x900(%0); cache %1, 0x980(%0)        \n"     \
366         "       cache %1, 0xa00(%0); cache %1, 0xa80(%0)        \n"     \
367         "       cache %1, 0xb00(%0); cache %1, 0xb80(%0)        \n"     \
368         "       cache %1, 0xc00(%0); cache %1, 0xc80(%0)        \n"     \
369         "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
370         "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
371         "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
372         "       .set pop                                        \n"     \
373                 :                                                       \
374                 : "r" (base),                                           \
375                   "i" (op));
376
377 /*
378  * Perform the cache operation specified by op using a user mode virtual
379  * address while in kernel mode.
380  */
381 #define cache16_unroll32_user(base,op)                                  \
382         __asm__ __volatile__(                                           \
383         "       .set push                                       \n"     \
384         "       .set noreorder                                  \n"     \
385         "       .set mips0                                      \n"     \
386         "       .set eva                                        \n"     \
387         "       cachee %1, 0x000(%0); cachee %1, 0x010(%0)      \n"     \
388         "       cachee %1, 0x020(%0); cachee %1, 0x030(%0)      \n"     \
389         "       cachee %1, 0x040(%0); cachee %1, 0x050(%0)      \n"     \
390         "       cachee %1, 0x060(%0); cachee %1, 0x070(%0)      \n"     \
391         "       cachee %1, 0x080(%0); cachee %1, 0x090(%0)      \n"     \
392         "       cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)      \n"     \
393         "       cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)      \n"     \
394         "       cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)      \n"     \
395         "       cachee %1, 0x100(%0); cachee %1, 0x110(%0)      \n"     \
396         "       cachee %1, 0x120(%0); cachee %1, 0x130(%0)      \n"     \
397         "       cachee %1, 0x140(%0); cachee %1, 0x150(%0)      \n"     \
398         "       cachee %1, 0x160(%0); cachee %1, 0x170(%0)      \n"     \
399         "       cachee %1, 0x180(%0); cachee %1, 0x190(%0)      \n"     \
400         "       cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)      \n"     \
401         "       cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)      \n"     \
402         "       cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)      \n"     \
403         "       .set pop                                        \n"     \
404                 :                                                       \
405                 : "r" (base),                                           \
406                   "i" (op));
407
408 #define cache32_unroll32_user(base, op)                                 \
409         __asm__ __volatile__(                                           \
410         "       .set push                                       \n"     \
411         "       .set noreorder                                  \n"     \
412         "       .set mips0                                      \n"     \
413         "       .set eva                                        \n"     \
414         "       cachee %1, 0x000(%0); cachee %1, 0x020(%0)      \n"     \
415         "       cachee %1, 0x040(%0); cachee %1, 0x060(%0)      \n"     \
416         "       cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)      \n"     \
417         "       cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)      \n"     \
418         "       cachee %1, 0x100(%0); cachee %1, 0x120(%0)      \n"     \
419         "       cachee %1, 0x140(%0); cachee %1, 0x160(%0)      \n"     \
420         "       cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)      \n"     \
421         "       cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)      \n"     \
422         "       cachee %1, 0x200(%0); cachee %1, 0x220(%0)      \n"     \
423         "       cachee %1, 0x240(%0); cachee %1, 0x260(%0)      \n"     \
424         "       cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)      \n"     \
425         "       cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)      \n"     \
426         "       cachee %1, 0x300(%0); cachee %1, 0x320(%0)      \n"     \
427         "       cachee %1, 0x340(%0); cachee %1, 0x360(%0)      \n"     \
428         "       cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)      \n"     \
429         "       cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)      \n"     \
430         "       .set pop                                        \n"     \
431                 :                                                       \
432                 : "r" (base),                                           \
433                   "i" (op));
434
435 #define cache64_unroll32_user(base, op)                                 \
436         __asm__ __volatile__(                                           \
437         "       .set push                                       \n"     \
438         "       .set noreorder                                  \n"     \
439         "       .set mips0                                      \n"     \
440         "       .set eva                                        \n"     \
441         "       cachee %1, 0x000(%0); cachee %1, 0x040(%0)      \n"     \
442         "       cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)      \n"     \
443         "       cachee %1, 0x100(%0); cachee %1, 0x140(%0)      \n"     \
444         "       cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)      \n"     \
445         "       cachee %1, 0x200(%0); cachee %1, 0x240(%0)      \n"     \
446         "       cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)      \n"     \
447         "       cachee %1, 0x300(%0); cachee %1, 0x340(%0)      \n"     \
448         "       cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)      \n"     \
449         "       cachee %1, 0x400(%0); cachee %1, 0x440(%0)      \n"     \
450         "       cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)      \n"     \
451         "       cachee %1, 0x500(%0); cachee %1, 0x540(%0)      \n"     \
452         "       cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)      \n"     \
453         "       cachee %1, 0x600(%0); cachee %1, 0x640(%0)      \n"     \
454         "       cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)      \n"     \
455         "       cachee %1, 0x700(%0); cachee %1, 0x740(%0)      \n"     \
456         "       cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)      \n"     \
457         "       .set pop                                        \n"     \
458                 :                                                       \
459                 : "r" (base),                                           \
460                   "i" (op));
461
462 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
463 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)    \
464 static inline void extra##blast_##pfx##cache##lsize(void)               \
465 {                                                                       \
466         unsigned long start = INDEX_BASE;                               \
467         unsigned long end = start + current_cpu_data.desc.waysize;      \
468         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
469         unsigned long ws_end = current_cpu_data.desc.ways <<            \
470                                current_cpu_data.desc.waybit;            \
471         unsigned long ws, addr;                                         \
472                                                                         \
473         __##pfx##flush_prologue                                         \
474                                                                         \
475         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
476                 for (addr = start; addr < end; addr += lsize * 32)      \
477                         cache##lsize##_unroll32(addr|ws, indexop);      \
478                                                                         \
479         __##pfx##flush_epilogue                                         \
480 }                                                                       \
481                                                                         \
482 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
483 {                                                                       \
484         unsigned long start = page;                                     \
485         unsigned long end = page + PAGE_SIZE;                           \
486                                                                         \
487         __##pfx##flush_prologue                                         \
488                                                                         \
489         do {                                                            \
490                 cache##lsize##_unroll32(start, hitop);                  \
491                 start += lsize * 32;                                    \
492         } while (start < end);                                          \
493                                                                         \
494         __##pfx##flush_epilogue                                         \
495 }                                                                       \
496                                                                         \
497 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
498 {                                                                       \
499         unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
500         unsigned long start = INDEX_BASE + (page & indexmask);          \
501         unsigned long end = start + PAGE_SIZE;                          \
502         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
503         unsigned long ws_end = current_cpu_data.desc.ways <<            \
504                                current_cpu_data.desc.waybit;            \
505         unsigned long ws, addr;                                         \
506                                                                         \
507         __##pfx##flush_prologue                                         \
508                                                                         \
509         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
510                 for (addr = start; addr < end; addr += lsize * 32)      \
511                         cache##lsize##_unroll32(addr|ws, indexop);      \
512                                                                         \
513         __##pfx##flush_epilogue                                         \
514 }
515
516 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
517 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
518 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
519 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
520 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
521 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
522 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
523 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
524 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
525 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
526 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
527 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
528 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
529
530 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
531 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
532 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
533 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
534 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
535 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
536
537 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
538 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
539 {                                                                       \
540         unsigned long start = page;                                     \
541         unsigned long end = page + PAGE_SIZE;                           \
542                                                                         \
543         __##pfx##flush_prologue                                         \
544                                                                         \
545         do {                                                            \
546                 cache##lsize##_unroll32_user(start, hitop);             \
547                 start += lsize * 32;                                    \
548         } while (start < end);                                          \
549                                                                         \
550         __##pfx##flush_epilogue                                         \
551 }
552
553 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
554                          16)
555 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
556 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
557                          32)
558 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
559 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
560                          64)
561 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
562
563 /* build blast_xxx_range, protected_blast_xxx_range */
564 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)        \
565 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
566                                                     unsigned long end)  \
567 {                                                                       \
568         unsigned long lsize = cpu_##desc##_line_size();                 \
569         unsigned long addr = start & ~(lsize - 1);                      \
570         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
571                                                                         \
572         __##pfx##flush_prologue                                         \
573                                                                         \
574         while (1) {                                                     \
575                 prot##cache_op(hitop, addr);                            \
576                 if (addr == aend)                                       \
577                         break;                                          \
578                 addr += lsize;                                          \
579         }                                                               \
580                                                                         \
581         __##pfx##flush_epilogue                                         \
582 }
583
584 #ifndef CONFIG_EVA
585
586 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
587 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
588
589 #else
590
591 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)                \
592 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
593                                                         unsigned long end) \
594 {                                                                       \
595         unsigned long lsize = cpu_##desc##_line_size();                 \
596         unsigned long addr = start & ~(lsize - 1);                      \
597         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
598                                                                         \
599         __##pfx##flush_prologue                                         \
600                                                                         \
601         if (segment_eq(get_fs(), USER_DS)) {                            \
602                 while (1) {                                             \
603                         protected_cachee_op(hitop, addr);               \
604                         if (addr == aend)                               \
605                                 break;                                  \
606                         addr += lsize;                                  \
607                 }                                                       \
608         } else {                                                        \
609                 while (1) {                                             \
610                         protected_cache_op(hitop, addr);                \
611                         if (addr == aend)                               \
612                                 break;                                  \
613                         addr += lsize;                                  \
614                 }                                                       \
615                                                                         \
616         }                                                               \
617         __##pfx##flush_epilogue                                         \
618 }
619
620 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
621 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
622
623 #endif
624 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
625 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
626         protected_, loongson2_)
627 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
628 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
629 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
630 /* blast_inv_dcache_range */
631 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
632 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
633
634 #endif /* _ASM_R4KCACHE_H */