Merge tag 'efi-urgent' into x86/urgent
[sfrench/cifs-2.6.git] / arch / mips / include / asm / r4kcache.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/uaccess.h> /* for segment_eq() */
21
22 /*
23  * This macro return a properly sign-extended address suitable as base address
24  * for indexed cache operations.  Two issues here:
25  *
26  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
27  *    the index bits from the virtual address.  This breaks with tradition
28  *    set by the R4000.  To keep unpleasant surprises from happening we pick
29  *    an address in KSEG0 / CKSEG0.
30  *  - We need a properly sign extended address for 64-bit code.  To get away
31  *    without ifdefs we let the compiler do it by a type cast.
32  */
33 #define INDEX_BASE      CKSEG0
34
35 #define cache_op(op,addr)                                               \
36         __asm__ __volatile__(                                           \
37         "       .set    push                                    \n"     \
38         "       .set    noreorder                               \n"     \
39         "       .set    arch=r4000                              \n"     \
40         "       cache   %0, %1                                  \n"     \
41         "       .set    pop                                     \n"     \
42         :                                                               \
43         : "i" (op), "R" (*(unsigned char *)(addr)))
44
45 #ifdef CONFIG_MIPS_MT
46 /*
47  * Temporary hacks for SMTC debug. Optionally force single-threaded
48  * execution during I-cache flushes.
49  */
50
51 #define PROTECT_CACHE_FLUSHES 1
52
53 #ifdef PROTECT_CACHE_FLUSHES
54
55 extern int mt_protiflush;
56 extern int mt_protdflush;
57 extern void mt_cflush_lockdown(void);
58 extern void mt_cflush_release(void);
59
60 #define BEGIN_MT_IPROT \
61         unsigned long flags = 0;                        \
62         unsigned long mtflags = 0;                      \
63         if(mt_protiflush) {                             \
64                 local_irq_save(flags);                  \
65                 ehb();                                  \
66                 mtflags = dvpe();                       \
67                 mt_cflush_lockdown();                   \
68         }
69
70 #define END_MT_IPROT \
71         if(mt_protiflush) {                             \
72                 mt_cflush_release();                    \
73                 evpe(mtflags);                          \
74                 local_irq_restore(flags);               \
75         }
76
77 #define BEGIN_MT_DPROT \
78         unsigned long flags = 0;                        \
79         unsigned long mtflags = 0;                      \
80         if(mt_protdflush) {                             \
81                 local_irq_save(flags);                  \
82                 ehb();                                  \
83                 mtflags = dvpe();                       \
84                 mt_cflush_lockdown();                   \
85         }
86
87 #define END_MT_DPROT \
88         if(mt_protdflush) {                             \
89                 mt_cflush_release();                    \
90                 evpe(mtflags);                          \
91                 local_irq_restore(flags);               \
92         }
93
94 #else
95
96 #define BEGIN_MT_IPROT
97 #define BEGIN_MT_DPROT
98 #define END_MT_IPROT
99 #define END_MT_DPROT
100
101 #endif /* PROTECT_CACHE_FLUSHES */
102
103 #define __iflush_prologue                                               \
104         unsigned long redundance;                                       \
105         extern int mt_n_iflushes;                                       \
106         BEGIN_MT_IPROT                                                  \
107         for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
108
109 #define __iflush_epilogue                                               \
110         END_MT_IPROT                                                    \
111         }
112
113 #define __dflush_prologue                                               \
114         unsigned long redundance;                                       \
115         extern int mt_n_dflushes;                                       \
116         BEGIN_MT_DPROT                                                  \
117         for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
118
119 #define __dflush_epilogue \
120         END_MT_DPROT     \
121         }
122
123 #define __inv_dflush_prologue __dflush_prologue
124 #define __inv_dflush_epilogue __dflush_epilogue
125 #define __sflush_prologue {
126 #define __sflush_epilogue }
127 #define __inv_sflush_prologue __sflush_prologue
128 #define __inv_sflush_epilogue __sflush_epilogue
129
130 #else /* CONFIG_MIPS_MT */
131
132 #define __iflush_prologue {
133 #define __iflush_epilogue }
134 #define __dflush_prologue {
135 #define __dflush_epilogue }
136 #define __inv_dflush_prologue {
137 #define __inv_dflush_epilogue }
138 #define __sflush_prologue {
139 #define __sflush_epilogue }
140 #define __inv_sflush_prologue {
141 #define __inv_sflush_epilogue }
142
143 #endif /* CONFIG_MIPS_MT */
144
145 static inline void flush_icache_line_indexed(unsigned long addr)
146 {
147         __iflush_prologue
148         cache_op(Index_Invalidate_I, addr);
149         __iflush_epilogue
150 }
151
152 static inline void flush_dcache_line_indexed(unsigned long addr)
153 {
154         __dflush_prologue
155         cache_op(Index_Writeback_Inv_D, addr);
156         __dflush_epilogue
157 }
158
159 static inline void flush_scache_line_indexed(unsigned long addr)
160 {
161         cache_op(Index_Writeback_Inv_SD, addr);
162 }
163
164 static inline void flush_icache_line(unsigned long addr)
165 {
166         __iflush_prologue
167         switch (boot_cpu_type()) {
168         case CPU_LOONGSON2:
169                 cache_op(Hit_Invalidate_I_Loongson2, addr);
170                 break;
171
172         default:
173                 cache_op(Hit_Invalidate_I, addr);
174                 break;
175         }
176         __iflush_epilogue
177 }
178
179 static inline void flush_dcache_line(unsigned long addr)
180 {
181         __dflush_prologue
182         cache_op(Hit_Writeback_Inv_D, addr);
183         __dflush_epilogue
184 }
185
186 static inline void invalidate_dcache_line(unsigned long addr)
187 {
188         __dflush_prologue
189         cache_op(Hit_Invalidate_D, addr);
190         __dflush_epilogue
191 }
192
193 static inline void invalidate_scache_line(unsigned long addr)
194 {
195         cache_op(Hit_Invalidate_SD, addr);
196 }
197
198 static inline void flush_scache_line(unsigned long addr)
199 {
200         cache_op(Hit_Writeback_Inv_SD, addr);
201 }
202
203 #define protected_cache_op(op,addr)                             \
204         __asm__ __volatile__(                                   \
205         "       .set    push                    \n"             \
206         "       .set    noreorder               \n"             \
207         "       .set    arch=r4000              \n"             \
208         "1:     cache   %0, (%1)                \n"             \
209         "2:     .set    pop                     \n"             \
210         "       .section __ex_table,\"a\"       \n"             \
211         "       "STR(PTR)" 1b, 2b               \n"             \
212         "       .previous"                                      \
213         :                                                       \
214         : "i" (op), "r" (addr))
215
216 #define protected_cachee_op(op,addr)                            \
217         __asm__ __volatile__(                                   \
218         "       .set    push                    \n"             \
219         "       .set    noreorder               \n"             \
220         "       .set    mips0                   \n"             \
221         "       .set    eva                     \n"             \
222         "1:     cachee  %0, (%1)                \n"             \
223         "2:     .set    pop                     \n"             \
224         "       .section __ex_table,\"a\"       \n"             \
225         "       "STR(PTR)" 1b, 2b               \n"             \
226         "       .previous"                                      \
227         :                                                       \
228         : "i" (op), "r" (addr))
229
230 /*
231  * The next two are for badland addresses like signal trampolines.
232  */
233 static inline void protected_flush_icache_line(unsigned long addr)
234 {
235         switch (boot_cpu_type()) {
236         case CPU_LOONGSON2:
237                 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
238                 break;
239
240         default:
241 #ifdef CONFIG_EVA
242                 protected_cachee_op(Hit_Invalidate_I, addr);
243 #else
244                 protected_cache_op(Hit_Invalidate_I, addr);
245 #endif
246                 break;
247         }
248 }
249
250 /*
251  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
252  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
253  * caches.  We're talking about one cacheline unnecessarily getting invalidated
254  * here so the penalty isn't overly hard.
255  */
256 static inline void protected_writeback_dcache_line(unsigned long addr)
257 {
258         protected_cache_op(Hit_Writeback_Inv_D, addr);
259 }
260
261 static inline void protected_writeback_scache_line(unsigned long addr)
262 {
263         protected_cache_op(Hit_Writeback_Inv_SD, addr);
264 }
265
266 /*
267  * This one is RM7000-specific
268  */
269 static inline void invalidate_tcache_page(unsigned long addr)
270 {
271         cache_op(Page_Invalidate_T, addr);
272 }
273
274 #define cache16_unroll32(base,op)                                       \
275         __asm__ __volatile__(                                           \
276         "       .set push                                       \n"     \
277         "       .set noreorder                                  \n"     \
278         "       .set mips3                                      \n"     \
279         "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
280         "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
281         "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
282         "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
283         "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
284         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
285         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
286         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
287         "       cache %1, 0x100(%0); cache %1, 0x110(%0)        \n"     \
288         "       cache %1, 0x120(%0); cache %1, 0x130(%0)        \n"     \
289         "       cache %1, 0x140(%0); cache %1, 0x150(%0)        \n"     \
290         "       cache %1, 0x160(%0); cache %1, 0x170(%0)        \n"     \
291         "       cache %1, 0x180(%0); cache %1, 0x190(%0)        \n"     \
292         "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
293         "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
294         "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
295         "       .set pop                                        \n"     \
296                 :                                                       \
297                 : "r" (base),                                           \
298                   "i" (op));
299
300 #define cache32_unroll32(base,op)                                       \
301         __asm__ __volatile__(                                           \
302         "       .set push                                       \n"     \
303         "       .set noreorder                                  \n"     \
304         "       .set mips3                                      \n"     \
305         "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
306         "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
307         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
308         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
309         "       cache %1, 0x100(%0); cache %1, 0x120(%0)        \n"     \
310         "       cache %1, 0x140(%0); cache %1, 0x160(%0)        \n"     \
311         "       cache %1, 0x180(%0); cache %1, 0x1a0(%0)        \n"     \
312         "       cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)        \n"     \
313         "       cache %1, 0x200(%0); cache %1, 0x220(%0)        \n"     \
314         "       cache %1, 0x240(%0); cache %1, 0x260(%0)        \n"     \
315         "       cache %1, 0x280(%0); cache %1, 0x2a0(%0)        \n"     \
316         "       cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)        \n"     \
317         "       cache %1, 0x300(%0); cache %1, 0x320(%0)        \n"     \
318         "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
319         "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
320         "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
321         "       .set pop                                        \n"     \
322                 :                                                       \
323                 : "r" (base),                                           \
324                   "i" (op));
325
326 #define cache64_unroll32(base,op)                                       \
327         __asm__ __volatile__(                                           \
328         "       .set push                                       \n"     \
329         "       .set noreorder                                  \n"     \
330         "       .set mips3                                      \n"     \
331         "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
332         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
333         "       cache %1, 0x100(%0); cache %1, 0x140(%0)        \n"     \
334         "       cache %1, 0x180(%0); cache %1, 0x1c0(%0)        \n"     \
335         "       cache %1, 0x200(%0); cache %1, 0x240(%0)        \n"     \
336         "       cache %1, 0x280(%0); cache %1, 0x2c0(%0)        \n"     \
337         "       cache %1, 0x300(%0); cache %1, 0x340(%0)        \n"     \
338         "       cache %1, 0x380(%0); cache %1, 0x3c0(%0)        \n"     \
339         "       cache %1, 0x400(%0); cache %1, 0x440(%0)        \n"     \
340         "       cache %1, 0x480(%0); cache %1, 0x4c0(%0)        \n"     \
341         "       cache %1, 0x500(%0); cache %1, 0x540(%0)        \n"     \
342         "       cache %1, 0x580(%0); cache %1, 0x5c0(%0)        \n"     \
343         "       cache %1, 0x600(%0); cache %1, 0x640(%0)        \n"     \
344         "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
345         "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
346         "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
347         "       .set pop                                        \n"     \
348                 :                                                       \
349                 : "r" (base),                                           \
350                   "i" (op));
351
352 #define cache128_unroll32(base,op)                                      \
353         __asm__ __volatile__(                                           \
354         "       .set push                                       \n"     \
355         "       .set noreorder                                  \n"     \
356         "       .set mips3                                      \n"     \
357         "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
358         "       cache %1, 0x100(%0); cache %1, 0x180(%0)        \n"     \
359         "       cache %1, 0x200(%0); cache %1, 0x280(%0)        \n"     \
360         "       cache %1, 0x300(%0); cache %1, 0x380(%0)        \n"     \
361         "       cache %1, 0x400(%0); cache %1, 0x480(%0)        \n"     \
362         "       cache %1, 0x500(%0); cache %1, 0x580(%0)        \n"     \
363         "       cache %1, 0x600(%0); cache %1, 0x680(%0)        \n"     \
364         "       cache %1, 0x700(%0); cache %1, 0x780(%0)        \n"     \
365         "       cache %1, 0x800(%0); cache %1, 0x880(%0)        \n"     \
366         "       cache %1, 0x900(%0); cache %1, 0x980(%0)        \n"     \
367         "       cache %1, 0xa00(%0); cache %1, 0xa80(%0)        \n"     \
368         "       cache %1, 0xb00(%0); cache %1, 0xb80(%0)        \n"     \
369         "       cache %1, 0xc00(%0); cache %1, 0xc80(%0)        \n"     \
370         "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
371         "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
372         "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
373         "       .set pop                                        \n"     \
374                 :                                                       \
375                 : "r" (base),                                           \
376                   "i" (op));
377
378 /*
379  * Perform the cache operation specified by op using a user mode virtual
380  * address while in kernel mode.
381  */
382 #define cache16_unroll32_user(base,op)                                  \
383         __asm__ __volatile__(                                           \
384         "       .set push                                       \n"     \
385         "       .set noreorder                                  \n"     \
386         "       .set mips0                                      \n"     \
387         "       .set eva                                        \n"     \
388         "       cachee %1, 0x000(%0); cachee %1, 0x010(%0)      \n"     \
389         "       cachee %1, 0x020(%0); cachee %1, 0x030(%0)      \n"     \
390         "       cachee %1, 0x040(%0); cachee %1, 0x050(%0)      \n"     \
391         "       cachee %1, 0x060(%0); cachee %1, 0x070(%0)      \n"     \
392         "       cachee %1, 0x080(%0); cachee %1, 0x090(%0)      \n"     \
393         "       cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)      \n"     \
394         "       cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)      \n"     \
395         "       cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)      \n"     \
396         "       cachee %1, 0x100(%0); cachee %1, 0x110(%0)      \n"     \
397         "       cachee %1, 0x120(%0); cachee %1, 0x130(%0)      \n"     \
398         "       cachee %1, 0x140(%0); cachee %1, 0x150(%0)      \n"     \
399         "       cachee %1, 0x160(%0); cachee %1, 0x170(%0)      \n"     \
400         "       cachee %1, 0x180(%0); cachee %1, 0x190(%0)      \n"     \
401         "       cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)      \n"     \
402         "       cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)      \n"     \
403         "       cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)      \n"     \
404         "       .set pop                                        \n"     \
405                 :                                                       \
406                 : "r" (base),                                           \
407                   "i" (op));
408
409 #define cache32_unroll32_user(base, op)                                 \
410         __asm__ __volatile__(                                           \
411         "       .set push                                       \n"     \
412         "       .set noreorder                                  \n"     \
413         "       .set mips0                                      \n"     \
414         "       .set eva                                        \n"     \
415         "       cachee %1, 0x000(%0); cachee %1, 0x020(%0)      \n"     \
416         "       cachee %1, 0x040(%0); cachee %1, 0x060(%0)      \n"     \
417         "       cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)      \n"     \
418         "       cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)      \n"     \
419         "       cachee %1, 0x100(%0); cachee %1, 0x120(%0)      \n"     \
420         "       cachee %1, 0x140(%0); cachee %1, 0x160(%0)      \n"     \
421         "       cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)      \n"     \
422         "       cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)      \n"     \
423         "       cachee %1, 0x200(%0); cachee %1, 0x220(%0)      \n"     \
424         "       cachee %1, 0x240(%0); cachee %1, 0x260(%0)      \n"     \
425         "       cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)      \n"     \
426         "       cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)      \n"     \
427         "       cachee %1, 0x300(%0); cachee %1, 0x320(%0)      \n"     \
428         "       cachee %1, 0x340(%0); cachee %1, 0x360(%0)      \n"     \
429         "       cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)      \n"     \
430         "       cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)      \n"     \
431         "       .set pop                                        \n"     \
432                 :                                                       \
433                 : "r" (base),                                           \
434                   "i" (op));
435
436 #define cache64_unroll32_user(base, op)                                 \
437         __asm__ __volatile__(                                           \
438         "       .set push                                       \n"     \
439         "       .set noreorder                                  \n"     \
440         "       .set mips0                                      \n"     \
441         "       .set eva                                        \n"     \
442         "       cachee %1, 0x000(%0); cachee %1, 0x040(%0)      \n"     \
443         "       cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)      \n"     \
444         "       cachee %1, 0x100(%0); cachee %1, 0x140(%0)      \n"     \
445         "       cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)      \n"     \
446         "       cachee %1, 0x200(%0); cachee %1, 0x240(%0)      \n"     \
447         "       cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)      \n"     \
448         "       cachee %1, 0x300(%0); cachee %1, 0x340(%0)      \n"     \
449         "       cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)      \n"     \
450         "       cachee %1, 0x400(%0); cachee %1, 0x440(%0)      \n"     \
451         "       cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)      \n"     \
452         "       cachee %1, 0x500(%0); cachee %1, 0x540(%0)      \n"     \
453         "       cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)      \n"     \
454         "       cachee %1, 0x600(%0); cachee %1, 0x640(%0)      \n"     \
455         "       cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)      \n"     \
456         "       cachee %1, 0x700(%0); cachee %1, 0x740(%0)      \n"     \
457         "       cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)      \n"     \
458         "       .set pop                                        \n"     \
459                 :                                                       \
460                 : "r" (base),                                           \
461                   "i" (op));
462
463 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
464 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)    \
465 static inline void extra##blast_##pfx##cache##lsize(void)               \
466 {                                                                       \
467         unsigned long start = INDEX_BASE;                               \
468         unsigned long end = start + current_cpu_data.desc.waysize;      \
469         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
470         unsigned long ws_end = current_cpu_data.desc.ways <<            \
471                                current_cpu_data.desc.waybit;            \
472         unsigned long ws, addr;                                         \
473                                                                         \
474         __##pfx##flush_prologue                                         \
475                                                                         \
476         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
477                 for (addr = start; addr < end; addr += lsize * 32)      \
478                         cache##lsize##_unroll32(addr|ws, indexop);      \
479                                                                         \
480         __##pfx##flush_epilogue                                         \
481 }                                                                       \
482                                                                         \
483 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
484 {                                                                       \
485         unsigned long start = page;                                     \
486         unsigned long end = page + PAGE_SIZE;                           \
487                                                                         \
488         __##pfx##flush_prologue                                         \
489                                                                         \
490         do {                                                            \
491                 cache##lsize##_unroll32(start, hitop);                  \
492                 start += lsize * 32;                                    \
493         } while (start < end);                                          \
494                                                                         \
495         __##pfx##flush_epilogue                                         \
496 }                                                                       \
497                                                                         \
498 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
499 {                                                                       \
500         unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
501         unsigned long start = INDEX_BASE + (page & indexmask);          \
502         unsigned long end = start + PAGE_SIZE;                          \
503         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
504         unsigned long ws_end = current_cpu_data.desc.ways <<            \
505                                current_cpu_data.desc.waybit;            \
506         unsigned long ws, addr;                                         \
507                                                                         \
508         __##pfx##flush_prologue                                         \
509                                                                         \
510         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
511                 for (addr = start; addr < end; addr += lsize * 32)      \
512                         cache##lsize##_unroll32(addr|ws, indexop);      \
513                                                                         \
514         __##pfx##flush_epilogue                                         \
515 }
516
517 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
518 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
519 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
520 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
521 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
522 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
523 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
524 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
525 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
526 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
527 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
528
529 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
530 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
531 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
532 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
533 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
534 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
535
536 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
537 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
538 {                                                                       \
539         unsigned long start = page;                                     \
540         unsigned long end = page + PAGE_SIZE;                           \
541                                                                         \
542         __##pfx##flush_prologue                                         \
543                                                                         \
544         do {                                                            \
545                 cache##lsize##_unroll32_user(start, hitop);             \
546                 start += lsize * 32;                                    \
547         } while (start < end);                                          \
548                                                                         \
549         __##pfx##flush_epilogue                                         \
550 }
551
552 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
553                          16)
554 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
555 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
556                          32)
557 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
558 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
559                          64)
560 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
561
562 /* build blast_xxx_range, protected_blast_xxx_range */
563 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)        \
564 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
565                                                     unsigned long end)  \
566 {                                                                       \
567         unsigned long lsize = cpu_##desc##_line_size();                 \
568         unsigned long addr = start & ~(lsize - 1);                      \
569         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
570                                                                         \
571         __##pfx##flush_prologue                                         \
572                                                                         \
573         while (1) {                                                     \
574                 prot##cache_op(hitop, addr);                            \
575                 if (addr == aend)                                       \
576                         break;                                          \
577                 addr += lsize;                                          \
578         }                                                               \
579                                                                         \
580         __##pfx##flush_epilogue                                         \
581 }
582
583 #ifndef CONFIG_EVA
584
585 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
586 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
587
588 #else
589
590 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)                \
591 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
592                                                         unsigned long end) \
593 {                                                                       \
594         unsigned long lsize = cpu_##desc##_line_size();                 \
595         unsigned long addr = start & ~(lsize - 1);                      \
596         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
597                                                                         \
598         __##pfx##flush_prologue                                         \
599                                                                         \
600         if (segment_eq(get_fs(), USER_DS)) {                            \
601                 while (1) {                                             \
602                         protected_cachee_op(hitop, addr);               \
603                         if (addr == aend)                               \
604                                 break;                                  \
605                         addr += lsize;                                  \
606                 }                                                       \
607         } else {                                                        \
608                 while (1) {                                             \
609                         protected_cache_op(hitop, addr);                \
610                         if (addr == aend)                               \
611                                 break;                                  \
612                         addr += lsize;                                  \
613                 }                                                       \
614                                                                         \
615         }                                                               \
616         __##pfx##flush_epilogue                                         \
617 }
618
619 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
620 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
621
622 #endif
623 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
624 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
625         protected_, loongson2_)
626 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
627 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
628 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
629 /* blast_inv_dcache_range */
630 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
631 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
632
633 #endif /* _ASM_R4KCACHE_H */