Merge branches 'pm-opp', 'pm-cpufreq' and 'pm-tools'
[cascardo/linux.git] / arch / mips / include / asm / r4kcache.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
20 #include <asm/uaccess.h> /* for segment_eq() */
21
22 extern void (*r4k_blast_dcache)(void);
23 extern void (*r4k_blast_icache)(void);
24
25 /*
26  * This macro return a properly sign-extended address suitable as base address
27  * for indexed cache operations.  Two issues here:
28  *
29  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
30  *    the index bits from the virtual address.  This breaks with tradition
31  *    set by the R4000.  To keep unpleasant surprises from happening we pick
32  *    an address in KSEG0 / CKSEG0.
33  *  - We need a properly sign extended address for 64-bit code.  To get away
34  *    without ifdefs we let the compiler do it by a type cast.
35  */
36 #define INDEX_BASE      CKSEG0
37
38 #define cache_op(op,addr)                                               \
39         __asm__ __volatile__(                                           \
40         "       .set    push                                    \n"     \
41         "       .set    noreorder                               \n"     \
42         "       .set    arch=r4000                              \n"     \
43         "       cache   %0, %1                                  \n"     \
44         "       .set    pop                                     \n"     \
45         :                                                               \
46         : "i" (op), "R" (*(unsigned char *)(addr)))
47
48 #ifdef CONFIG_MIPS_MT
49
50 /*
51  * Optionally force single-threaded execution during I-cache flushes.
52  */
53 #define PROTECT_CACHE_FLUSHES 1
54
55 #ifdef PROTECT_CACHE_FLUSHES
56
57 extern int mt_protiflush;
58 extern int mt_protdflush;
59 extern void mt_cflush_lockdown(void);
60 extern void mt_cflush_release(void);
61
62 #define BEGIN_MT_IPROT \
63         unsigned long flags = 0;                        \
64         unsigned long mtflags = 0;                      \
65         if(mt_protiflush) {                             \
66                 local_irq_save(flags);                  \
67                 ehb();                                  \
68                 mtflags = dvpe();                       \
69                 mt_cflush_lockdown();                   \
70         }
71
72 #define END_MT_IPROT \
73         if(mt_protiflush) {                             \
74                 mt_cflush_release();                    \
75                 evpe(mtflags);                          \
76                 local_irq_restore(flags);               \
77         }
78
79 #define BEGIN_MT_DPROT \
80         unsigned long flags = 0;                        \
81         unsigned long mtflags = 0;                      \
82         if(mt_protdflush) {                             \
83                 local_irq_save(flags);                  \
84                 ehb();                                  \
85                 mtflags = dvpe();                       \
86                 mt_cflush_lockdown();                   \
87         }
88
89 #define END_MT_DPROT \
90         if(mt_protdflush) {                             \
91                 mt_cflush_release();                    \
92                 evpe(mtflags);                          \
93                 local_irq_restore(flags);               \
94         }
95
96 #else
97
98 #define BEGIN_MT_IPROT
99 #define BEGIN_MT_DPROT
100 #define END_MT_IPROT
101 #define END_MT_DPROT
102
103 #endif /* PROTECT_CACHE_FLUSHES */
104
105 #define __iflush_prologue                                               \
106         unsigned long redundance;                                       \
107         extern int mt_n_iflushes;                                       \
108         BEGIN_MT_IPROT                                                  \
109         for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
110
111 #define __iflush_epilogue                                               \
112         END_MT_IPROT                                                    \
113         }
114
115 #define __dflush_prologue                                               \
116         unsigned long redundance;                                       \
117         extern int mt_n_dflushes;                                       \
118         BEGIN_MT_DPROT                                                  \
119         for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
120
121 #define __dflush_epilogue \
122         END_MT_DPROT     \
123         }
124
125 #define __inv_dflush_prologue __dflush_prologue
126 #define __inv_dflush_epilogue __dflush_epilogue
127 #define __sflush_prologue {
128 #define __sflush_epilogue }
129 #define __inv_sflush_prologue __sflush_prologue
130 #define __inv_sflush_epilogue __sflush_epilogue
131
132 #else /* CONFIG_MIPS_MT */
133
134 #define __iflush_prologue {
135 #define __iflush_epilogue }
136 #define __dflush_prologue {
137 #define __dflush_epilogue }
138 #define __inv_dflush_prologue {
139 #define __inv_dflush_epilogue }
140 #define __sflush_prologue {
141 #define __sflush_epilogue }
142 #define __inv_sflush_prologue {
143 #define __inv_sflush_epilogue }
144
145 #endif /* CONFIG_MIPS_MT */
146
147 static inline void flush_icache_line_indexed(unsigned long addr)
148 {
149         __iflush_prologue
150         cache_op(Index_Invalidate_I, addr);
151         __iflush_epilogue
152 }
153
154 static inline void flush_dcache_line_indexed(unsigned long addr)
155 {
156         __dflush_prologue
157         cache_op(Index_Writeback_Inv_D, addr);
158         __dflush_epilogue
159 }
160
161 static inline void flush_scache_line_indexed(unsigned long addr)
162 {
163         cache_op(Index_Writeback_Inv_SD, addr);
164 }
165
166 static inline void flush_icache_line(unsigned long addr)
167 {
168         __iflush_prologue
169         switch (boot_cpu_type()) {
170         case CPU_LOONGSON2:
171                 cache_op(Hit_Invalidate_I_Loongson2, addr);
172                 break;
173
174         default:
175                 cache_op(Hit_Invalidate_I, addr);
176                 break;
177         }
178         __iflush_epilogue
179 }
180
181 static inline void flush_dcache_line(unsigned long addr)
182 {
183         __dflush_prologue
184         cache_op(Hit_Writeback_Inv_D, addr);
185         __dflush_epilogue
186 }
187
188 static inline void invalidate_dcache_line(unsigned long addr)
189 {
190         __dflush_prologue
191         cache_op(Hit_Invalidate_D, addr);
192         __dflush_epilogue
193 }
194
195 static inline void invalidate_scache_line(unsigned long addr)
196 {
197         cache_op(Hit_Invalidate_SD, addr);
198 }
199
200 static inline void flush_scache_line(unsigned long addr)
201 {
202         cache_op(Hit_Writeback_Inv_SD, addr);
203 }
204
205 #define protected_cache_op(op,addr)                             \
206         __asm__ __volatile__(                                   \
207         "       .set    push                    \n"             \
208         "       .set    noreorder               \n"             \
209         "       .set    arch=r4000              \n"             \
210         "1:     cache   %0, (%1)                \n"             \
211         "2:     .set    pop                     \n"             \
212         "       .section __ex_table,\"a\"       \n"             \
213         "       "STR(PTR)" 1b, 2b               \n"             \
214         "       .previous"                                      \
215         :                                                       \
216         : "i" (op), "r" (addr))
217
218 #define protected_cachee_op(op,addr)                            \
219         __asm__ __volatile__(                                   \
220         "       .set    push                    \n"             \
221         "       .set    noreorder               \n"             \
222         "       .set    mips0                   \n"             \
223         "       .set    eva                     \n"             \
224         "1:     cachee  %0, (%1)                \n"             \
225         "2:     .set    pop                     \n"             \
226         "       .section __ex_table,\"a\"       \n"             \
227         "       "STR(PTR)" 1b, 2b               \n"             \
228         "       .previous"                                      \
229         :                                                       \
230         : "i" (op), "r" (addr))
231
232 /*
233  * The next two are for badland addresses like signal trampolines.
234  */
235 static inline void protected_flush_icache_line(unsigned long addr)
236 {
237         switch (boot_cpu_type()) {
238         case CPU_LOONGSON2:
239                 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
240                 break;
241
242         default:
243 #ifdef CONFIG_EVA
244                 protected_cachee_op(Hit_Invalidate_I, addr);
245 #else
246                 protected_cache_op(Hit_Invalidate_I, addr);
247 #endif
248                 break;
249         }
250 }
251
252 /*
253  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
254  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
255  * caches.  We're talking about one cacheline unnecessarily getting invalidated
256  * here so the penalty isn't overly hard.
257  */
258 static inline void protected_writeback_dcache_line(unsigned long addr)
259 {
260 #ifdef CONFIG_EVA
261         protected_cachee_op(Hit_Writeback_Inv_D, addr);
262 #else
263         protected_cache_op(Hit_Writeback_Inv_D, addr);
264 #endif
265 }
266
267 static inline void protected_writeback_scache_line(unsigned long addr)
268 {
269         protected_cache_op(Hit_Writeback_Inv_SD, addr);
270 }
271
272 /*
273  * This one is RM7000-specific
274  */
275 static inline void invalidate_tcache_page(unsigned long addr)
276 {
277         cache_op(Page_Invalidate_T, addr);
278 }
279
280 #define cache16_unroll32(base,op)                                       \
281         __asm__ __volatile__(                                           \
282         "       .set push                                       \n"     \
283         "       .set noreorder                                  \n"     \
284         "       .set mips3                                      \n"     \
285         "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
286         "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
287         "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
288         "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
289         "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
290         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
291         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
292         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
293         "       cache %1, 0x100(%0); cache %1, 0x110(%0)        \n"     \
294         "       cache %1, 0x120(%0); cache %1, 0x130(%0)        \n"     \
295         "       cache %1, 0x140(%0); cache %1, 0x150(%0)        \n"     \
296         "       cache %1, 0x160(%0); cache %1, 0x170(%0)        \n"     \
297         "       cache %1, 0x180(%0); cache %1, 0x190(%0)        \n"     \
298         "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
299         "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
300         "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
301         "       .set pop                                        \n"     \
302                 :                                                       \
303                 : "r" (base),                                           \
304                   "i" (op));
305
306 #define cache32_unroll32(base,op)                                       \
307         __asm__ __volatile__(                                           \
308         "       .set push                                       \n"     \
309         "       .set noreorder                                  \n"     \
310         "       .set mips3                                      \n"     \
311         "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
312         "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
313         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
314         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
315         "       cache %1, 0x100(%0); cache %1, 0x120(%0)        \n"     \
316         "       cache %1, 0x140(%0); cache %1, 0x160(%0)        \n"     \
317         "       cache %1, 0x180(%0); cache %1, 0x1a0(%0)        \n"     \
318         "       cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)        \n"     \
319         "       cache %1, 0x200(%0); cache %1, 0x220(%0)        \n"     \
320         "       cache %1, 0x240(%0); cache %1, 0x260(%0)        \n"     \
321         "       cache %1, 0x280(%0); cache %1, 0x2a0(%0)        \n"     \
322         "       cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)        \n"     \
323         "       cache %1, 0x300(%0); cache %1, 0x320(%0)        \n"     \
324         "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
325         "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
326         "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
327         "       .set pop                                        \n"     \
328                 :                                                       \
329                 : "r" (base),                                           \
330                   "i" (op));
331
332 #define cache64_unroll32(base,op)                                       \
333         __asm__ __volatile__(                                           \
334         "       .set push                                       \n"     \
335         "       .set noreorder                                  \n"     \
336         "       .set mips3                                      \n"     \
337         "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
338         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
339         "       cache %1, 0x100(%0); cache %1, 0x140(%0)        \n"     \
340         "       cache %1, 0x180(%0); cache %1, 0x1c0(%0)        \n"     \
341         "       cache %1, 0x200(%0); cache %1, 0x240(%0)        \n"     \
342         "       cache %1, 0x280(%0); cache %1, 0x2c0(%0)        \n"     \
343         "       cache %1, 0x300(%0); cache %1, 0x340(%0)        \n"     \
344         "       cache %1, 0x380(%0); cache %1, 0x3c0(%0)        \n"     \
345         "       cache %1, 0x400(%0); cache %1, 0x440(%0)        \n"     \
346         "       cache %1, 0x480(%0); cache %1, 0x4c0(%0)        \n"     \
347         "       cache %1, 0x500(%0); cache %1, 0x540(%0)        \n"     \
348         "       cache %1, 0x580(%0); cache %1, 0x5c0(%0)        \n"     \
349         "       cache %1, 0x600(%0); cache %1, 0x640(%0)        \n"     \
350         "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
351         "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
352         "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
353         "       .set pop                                        \n"     \
354                 :                                                       \
355                 : "r" (base),                                           \
356                   "i" (op));
357
358 #define cache128_unroll32(base,op)                                      \
359         __asm__ __volatile__(                                           \
360         "       .set push                                       \n"     \
361         "       .set noreorder                                  \n"     \
362         "       .set mips3                                      \n"     \
363         "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
364         "       cache %1, 0x100(%0); cache %1, 0x180(%0)        \n"     \
365         "       cache %1, 0x200(%0); cache %1, 0x280(%0)        \n"     \
366         "       cache %1, 0x300(%0); cache %1, 0x380(%0)        \n"     \
367         "       cache %1, 0x400(%0); cache %1, 0x480(%0)        \n"     \
368         "       cache %1, 0x500(%0); cache %1, 0x580(%0)        \n"     \
369         "       cache %1, 0x600(%0); cache %1, 0x680(%0)        \n"     \
370         "       cache %1, 0x700(%0); cache %1, 0x780(%0)        \n"     \
371         "       cache %1, 0x800(%0); cache %1, 0x880(%0)        \n"     \
372         "       cache %1, 0x900(%0); cache %1, 0x980(%0)        \n"     \
373         "       cache %1, 0xa00(%0); cache %1, 0xa80(%0)        \n"     \
374         "       cache %1, 0xb00(%0); cache %1, 0xb80(%0)        \n"     \
375         "       cache %1, 0xc00(%0); cache %1, 0xc80(%0)        \n"     \
376         "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
377         "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
378         "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
379         "       .set pop                                        \n"     \
380                 :                                                       \
381                 : "r" (base),                                           \
382                   "i" (op));
383
384 /*
385  * Perform the cache operation specified by op using a user mode virtual
386  * address while in kernel mode.
387  */
388 #define cache16_unroll32_user(base,op)                                  \
389         __asm__ __volatile__(                                           \
390         "       .set push                                       \n"     \
391         "       .set noreorder                                  \n"     \
392         "       .set mips0                                      \n"     \
393         "       .set eva                                        \n"     \
394         "       cachee %1, 0x000(%0); cachee %1, 0x010(%0)      \n"     \
395         "       cachee %1, 0x020(%0); cachee %1, 0x030(%0)      \n"     \
396         "       cachee %1, 0x040(%0); cachee %1, 0x050(%0)      \n"     \
397         "       cachee %1, 0x060(%0); cachee %1, 0x070(%0)      \n"     \
398         "       cachee %1, 0x080(%0); cachee %1, 0x090(%0)      \n"     \
399         "       cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)      \n"     \
400         "       cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)      \n"     \
401         "       cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)      \n"     \
402         "       cachee %1, 0x100(%0); cachee %1, 0x110(%0)      \n"     \
403         "       cachee %1, 0x120(%0); cachee %1, 0x130(%0)      \n"     \
404         "       cachee %1, 0x140(%0); cachee %1, 0x150(%0)      \n"     \
405         "       cachee %1, 0x160(%0); cachee %1, 0x170(%0)      \n"     \
406         "       cachee %1, 0x180(%0); cachee %1, 0x190(%0)      \n"     \
407         "       cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)      \n"     \
408         "       cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)      \n"     \
409         "       cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)      \n"     \
410         "       .set pop                                        \n"     \
411                 :                                                       \
412                 : "r" (base),                                           \
413                   "i" (op));
414
415 #define cache32_unroll32_user(base, op)                                 \
416         __asm__ __volatile__(                                           \
417         "       .set push                                       \n"     \
418         "       .set noreorder                                  \n"     \
419         "       .set mips0                                      \n"     \
420         "       .set eva                                        \n"     \
421         "       cachee %1, 0x000(%0); cachee %1, 0x020(%0)      \n"     \
422         "       cachee %1, 0x040(%0); cachee %1, 0x060(%0)      \n"     \
423         "       cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)      \n"     \
424         "       cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)      \n"     \
425         "       cachee %1, 0x100(%0); cachee %1, 0x120(%0)      \n"     \
426         "       cachee %1, 0x140(%0); cachee %1, 0x160(%0)      \n"     \
427         "       cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)      \n"     \
428         "       cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)      \n"     \
429         "       cachee %1, 0x200(%0); cachee %1, 0x220(%0)      \n"     \
430         "       cachee %1, 0x240(%0); cachee %1, 0x260(%0)      \n"     \
431         "       cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)      \n"     \
432         "       cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)      \n"     \
433         "       cachee %1, 0x300(%0); cachee %1, 0x320(%0)      \n"     \
434         "       cachee %1, 0x340(%0); cachee %1, 0x360(%0)      \n"     \
435         "       cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)      \n"     \
436         "       cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)      \n"     \
437         "       .set pop                                        \n"     \
438                 :                                                       \
439                 : "r" (base),                                           \
440                   "i" (op));
441
442 #define cache64_unroll32_user(base, op)                                 \
443         __asm__ __volatile__(                                           \
444         "       .set push                                       \n"     \
445         "       .set noreorder                                  \n"     \
446         "       .set mips0                                      \n"     \
447         "       .set eva                                        \n"     \
448         "       cachee %1, 0x000(%0); cachee %1, 0x040(%0)      \n"     \
449         "       cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)      \n"     \
450         "       cachee %1, 0x100(%0); cachee %1, 0x140(%0)      \n"     \
451         "       cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)      \n"     \
452         "       cachee %1, 0x200(%0); cachee %1, 0x240(%0)      \n"     \
453         "       cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)      \n"     \
454         "       cachee %1, 0x300(%0); cachee %1, 0x340(%0)      \n"     \
455         "       cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)      \n"     \
456         "       cachee %1, 0x400(%0); cachee %1, 0x440(%0)      \n"     \
457         "       cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)      \n"     \
458         "       cachee %1, 0x500(%0); cachee %1, 0x540(%0)      \n"     \
459         "       cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)      \n"     \
460         "       cachee %1, 0x600(%0); cachee %1, 0x640(%0)      \n"     \
461         "       cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)      \n"     \
462         "       cachee %1, 0x700(%0); cachee %1, 0x740(%0)      \n"     \
463         "       cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)      \n"     \
464         "       .set pop                                        \n"     \
465                 :                                                       \
466                 : "r" (base),                                           \
467                   "i" (op));
468
469 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
470 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)    \
471 static inline void extra##blast_##pfx##cache##lsize(void)               \
472 {                                                                       \
473         unsigned long start = INDEX_BASE;                               \
474         unsigned long end = start + current_cpu_data.desc.waysize;      \
475         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
476         unsigned long ws_end = current_cpu_data.desc.ways <<            \
477                                current_cpu_data.desc.waybit;            \
478         unsigned long ws, addr;                                         \
479                                                                         \
480         __##pfx##flush_prologue                                         \
481                                                                         \
482         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
483                 for (addr = start; addr < end; addr += lsize * 32)      \
484                         cache##lsize##_unroll32(addr|ws, indexop);      \
485                                                                         \
486         __##pfx##flush_epilogue                                         \
487 }                                                                       \
488                                                                         \
489 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
490 {                                                                       \
491         unsigned long start = page;                                     \
492         unsigned long end = page + PAGE_SIZE;                           \
493                                                                         \
494         __##pfx##flush_prologue                                         \
495                                                                         \
496         do {                                                            \
497                 cache##lsize##_unroll32(start, hitop);                  \
498                 start += lsize * 32;                                    \
499         } while (start < end);                                          \
500                                                                         \
501         __##pfx##flush_epilogue                                         \
502 }                                                                       \
503                                                                         \
504 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
505 {                                                                       \
506         unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
507         unsigned long start = INDEX_BASE + (page & indexmask);          \
508         unsigned long end = start + PAGE_SIZE;                          \
509         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
510         unsigned long ws_end = current_cpu_data.desc.ways <<            \
511                                current_cpu_data.desc.waybit;            \
512         unsigned long ws, addr;                                         \
513                                                                         \
514         __##pfx##flush_prologue                                         \
515                                                                         \
516         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
517                 for (addr = start; addr < end; addr += lsize * 32)      \
518                         cache##lsize##_unroll32(addr|ws, indexop);      \
519                                                                         \
520         __##pfx##flush_epilogue                                         \
521 }
522
523 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
524 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
525 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
526 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
527 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
528 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
529 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
530 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
531 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
532 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
533 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
534 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
535 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
536
537 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
538 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
539 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
540 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
541 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
542 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
543
544 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
545 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
546 {                                                                       \
547         unsigned long start = page;                                     \
548         unsigned long end = page + PAGE_SIZE;                           \
549                                                                         \
550         __##pfx##flush_prologue                                         \
551                                                                         \
552         do {                                                            \
553                 cache##lsize##_unroll32_user(start, hitop);             \
554                 start += lsize * 32;                                    \
555         } while (start < end);                                          \
556                                                                         \
557         __##pfx##flush_epilogue                                         \
558 }
559
560 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
561                          16)
562 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
563 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
564                          32)
565 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
566 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
567                          64)
568 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
569
570 /* build blast_xxx_range, protected_blast_xxx_range */
571 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)        \
572 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
573                                                     unsigned long end)  \
574 {                                                                       \
575         unsigned long lsize = cpu_##desc##_line_size();                 \
576         unsigned long addr = start & ~(lsize - 1);                      \
577         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
578                                                                         \
579         __##pfx##flush_prologue                                         \
580                                                                         \
581         while (1) {                                                     \
582                 prot##cache_op(hitop, addr);                            \
583                 if (addr == aend)                                       \
584                         break;                                          \
585                 addr += lsize;                                          \
586         }                                                               \
587                                                                         \
588         __##pfx##flush_epilogue                                         \
589 }
590
591 #ifndef CONFIG_EVA
592
593 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
594 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
595
596 #else
597
598 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)                \
599 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
600                                                         unsigned long end) \
601 {                                                                       \
602         unsigned long lsize = cpu_##desc##_line_size();                 \
603         unsigned long addr = start & ~(lsize - 1);                      \
604         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
605                                                                         \
606         __##pfx##flush_prologue                                         \
607                                                                         \
608         if (segment_eq(get_fs(), USER_DS)) {                            \
609                 while (1) {                                             \
610                         protected_cachee_op(hitop, addr);               \
611                         if (addr == aend)                               \
612                                 break;                                  \
613                         addr += lsize;                                  \
614                 }                                                       \
615         } else {                                                        \
616                 while (1) {                                             \
617                         protected_cache_op(hitop, addr);                \
618                         if (addr == aend)                               \
619                                 break;                                  \
620                         addr += lsize;                                  \
621                 }                                                       \
622                                                                         \
623         }                                                               \
624         __##pfx##flush_epilogue                                         \
625 }
626
627 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
628 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
629
630 #endif
631 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
632 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
633         protected_, loongson2_)
634 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
635 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
636 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
637 /* blast_inv_dcache_range */
638 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
639 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
640
641 #endif /* _ASM_R4KCACHE_H */