nmi_backtrace: add more trigger_*_cpu_backtrace() methods
[cascardo/linux.git] / arch / mips / include / asm / r4kcache.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14
15 #include <linux/stringify.h>
16
17 #include <asm/asm.h>
18 #include <asm/cacheops.h>
19 #include <asm/compiler.h>
20 #include <asm/cpu-features.h>
21 #include <asm/cpu-type.h>
22 #include <asm/mipsmtregs.h>
23 #include <asm/uaccess.h> /* for segment_eq() */
24
25 extern void (*r4k_blast_dcache)(void);
26 extern void (*r4k_blast_icache)(void);
27
28 /*
29  * This macro return a properly sign-extended address suitable as base address
30  * for indexed cache operations.  Two issues here:
31  *
32  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
33  *    the index bits from the virtual address.  This breaks with tradition
34  *    set by the R4000.  To keep unpleasant surprises from happening we pick
35  *    an address in KSEG0 / CKSEG0.
36  *  - We need a properly sign extended address for 64-bit code.  To get away
37  *    without ifdefs we let the compiler do it by a type cast.
38  */
39 #define INDEX_BASE      CKSEG0
40
41 #define cache_op(op,addr)                                               \
42         __asm__ __volatile__(                                           \
43         "       .set    push                                    \n"     \
44         "       .set    noreorder                               \n"     \
45         "       .set "MIPS_ISA_ARCH_LEVEL"                      \n"     \
46         "       cache   %0, %1                                  \n"     \
47         "       .set    pop                                     \n"     \
48         :                                                               \
49         : "i" (op), "R" (*(unsigned char *)(addr)))
50
51 #ifdef CONFIG_MIPS_MT
52
53 #define __iflush_prologue                                               \
54         unsigned long redundance;                                       \
55         extern int mt_n_iflushes;                                       \
56         for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
57
58 #define __iflush_epilogue                                               \
59         }
60
61 #define __dflush_prologue                                               \
62         unsigned long redundance;                                       \
63         extern int mt_n_dflushes;                                       \
64         for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
65
66 #define __dflush_epilogue \
67         }
68
69 #define __inv_dflush_prologue __dflush_prologue
70 #define __inv_dflush_epilogue __dflush_epilogue
71 #define __sflush_prologue {
72 #define __sflush_epilogue }
73 #define __inv_sflush_prologue __sflush_prologue
74 #define __inv_sflush_epilogue __sflush_epilogue
75
76 #else /* CONFIG_MIPS_MT */
77
78 #define __iflush_prologue {
79 #define __iflush_epilogue }
80 #define __dflush_prologue {
81 #define __dflush_epilogue }
82 #define __inv_dflush_prologue {
83 #define __inv_dflush_epilogue }
84 #define __sflush_prologue {
85 #define __sflush_epilogue }
86 #define __inv_sflush_prologue {
87 #define __inv_sflush_epilogue }
88
89 #endif /* CONFIG_MIPS_MT */
90
91 static inline void flush_icache_line_indexed(unsigned long addr)
92 {
93         __iflush_prologue
94         cache_op(Index_Invalidate_I, addr);
95         __iflush_epilogue
96 }
97
98 static inline void flush_dcache_line_indexed(unsigned long addr)
99 {
100         __dflush_prologue
101         cache_op(Index_Writeback_Inv_D, addr);
102         __dflush_epilogue
103 }
104
105 static inline void flush_scache_line_indexed(unsigned long addr)
106 {
107         cache_op(Index_Writeback_Inv_SD, addr);
108 }
109
110 static inline void flush_icache_line(unsigned long addr)
111 {
112         __iflush_prologue
113         switch (boot_cpu_type()) {
114         case CPU_LOONGSON2:
115                 cache_op(Hit_Invalidate_I_Loongson2, addr);
116                 break;
117
118         default:
119                 cache_op(Hit_Invalidate_I, addr);
120                 break;
121         }
122         __iflush_epilogue
123 }
124
125 static inline void flush_dcache_line(unsigned long addr)
126 {
127         __dflush_prologue
128         cache_op(Hit_Writeback_Inv_D, addr);
129         __dflush_epilogue
130 }
131
132 static inline void invalidate_dcache_line(unsigned long addr)
133 {
134         __dflush_prologue
135         cache_op(Hit_Invalidate_D, addr);
136         __dflush_epilogue
137 }
138
139 static inline void invalidate_scache_line(unsigned long addr)
140 {
141         cache_op(Hit_Invalidate_SD, addr);
142 }
143
144 static inline void flush_scache_line(unsigned long addr)
145 {
146         cache_op(Hit_Writeback_Inv_SD, addr);
147 }
148
149 #define protected_cache_op(op,addr)                             \
150         __asm__ __volatile__(                                   \
151         "       .set    push                    \n"             \
152         "       .set    noreorder               \n"             \
153         "       .set "MIPS_ISA_ARCH_LEVEL"      \n"             \
154         "1:     cache   %0, (%1)                \n"             \
155         "2:     .set    pop                     \n"             \
156         "       .section __ex_table,\"a\"       \n"             \
157         "       "STR(PTR)" 1b, 2b               \n"             \
158         "       .previous"                                      \
159         :                                                       \
160         : "i" (op), "r" (addr))
161
162 #define protected_cachee_op(op,addr)                            \
163         __asm__ __volatile__(                                   \
164         "       .set    push                    \n"             \
165         "       .set    noreorder               \n"             \
166         "       .set    mips0                   \n"             \
167         "       .set    eva                     \n"             \
168         "1:     cachee  %0, (%1)                \n"             \
169         "2:     .set    pop                     \n"             \
170         "       .section __ex_table,\"a\"       \n"             \
171         "       "STR(PTR)" 1b, 2b               \n"             \
172         "       .previous"                                      \
173         :                                                       \
174         : "i" (op), "r" (addr))
175
176 /*
177  * The next two are for badland addresses like signal trampolines.
178  */
179 static inline void protected_flush_icache_line(unsigned long addr)
180 {
181         switch (boot_cpu_type()) {
182         case CPU_LOONGSON2:
183                 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
184                 break;
185
186         default:
187 #ifdef CONFIG_EVA
188                 protected_cachee_op(Hit_Invalidate_I, addr);
189 #else
190                 protected_cache_op(Hit_Invalidate_I, addr);
191 #endif
192                 break;
193         }
194 }
195
196 /*
197  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
198  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
199  * caches.  We're talking about one cacheline unnecessarily getting invalidated
200  * here so the penalty isn't overly hard.
201  */
202 static inline void protected_writeback_dcache_line(unsigned long addr)
203 {
204 #ifdef CONFIG_EVA
205         protected_cachee_op(Hit_Writeback_Inv_D, addr);
206 #else
207         protected_cache_op(Hit_Writeback_Inv_D, addr);
208 #endif
209 }
210
211 static inline void protected_writeback_scache_line(unsigned long addr)
212 {
213 #ifdef CONFIG_EVA
214         protected_cachee_op(Hit_Writeback_Inv_SD, addr);
215 #else
216         protected_cache_op(Hit_Writeback_Inv_SD, addr);
217 #endif
218 }
219
220 /*
221  * This one is RM7000-specific
222  */
223 static inline void invalidate_tcache_page(unsigned long addr)
224 {
225         cache_op(Page_Invalidate_T, addr);
226 }
227
228 #ifndef CONFIG_CPU_MIPSR6
229 #define cache16_unroll32(base,op)                                       \
230         __asm__ __volatile__(                                           \
231         "       .set push                                       \n"     \
232         "       .set noreorder                                  \n"     \
233         "       .set mips3                                      \n"     \
234         "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
235         "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
236         "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
237         "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
238         "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
239         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
240         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
241         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
242         "       cache %1, 0x100(%0); cache %1, 0x110(%0)        \n"     \
243         "       cache %1, 0x120(%0); cache %1, 0x130(%0)        \n"     \
244         "       cache %1, 0x140(%0); cache %1, 0x150(%0)        \n"     \
245         "       cache %1, 0x160(%0); cache %1, 0x170(%0)        \n"     \
246         "       cache %1, 0x180(%0); cache %1, 0x190(%0)        \n"     \
247         "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
248         "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
249         "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
250         "       .set pop                                        \n"     \
251                 :                                                       \
252                 : "r" (base),                                           \
253                   "i" (op));
254
255 #define cache32_unroll32(base,op)                                       \
256         __asm__ __volatile__(                                           \
257         "       .set push                                       \n"     \
258         "       .set noreorder                                  \n"     \
259         "       .set mips3                                      \n"     \
260         "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
261         "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
262         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
263         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
264         "       cache %1, 0x100(%0); cache %1, 0x120(%0)        \n"     \
265         "       cache %1, 0x140(%0); cache %1, 0x160(%0)        \n"     \
266         "       cache %1, 0x180(%0); cache %1, 0x1a0(%0)        \n"     \
267         "       cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)        \n"     \
268         "       cache %1, 0x200(%0); cache %1, 0x220(%0)        \n"     \
269         "       cache %1, 0x240(%0); cache %1, 0x260(%0)        \n"     \
270         "       cache %1, 0x280(%0); cache %1, 0x2a0(%0)        \n"     \
271         "       cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)        \n"     \
272         "       cache %1, 0x300(%0); cache %1, 0x320(%0)        \n"     \
273         "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
274         "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
275         "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
276         "       .set pop                                        \n"     \
277                 :                                                       \
278                 : "r" (base),                                           \
279                   "i" (op));
280
281 #define cache64_unroll32(base,op)                                       \
282         __asm__ __volatile__(                                           \
283         "       .set push                                       \n"     \
284         "       .set noreorder                                  \n"     \
285         "       .set mips3                                      \n"     \
286         "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
287         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
288         "       cache %1, 0x100(%0); cache %1, 0x140(%0)        \n"     \
289         "       cache %1, 0x180(%0); cache %1, 0x1c0(%0)        \n"     \
290         "       cache %1, 0x200(%0); cache %1, 0x240(%0)        \n"     \
291         "       cache %1, 0x280(%0); cache %1, 0x2c0(%0)        \n"     \
292         "       cache %1, 0x300(%0); cache %1, 0x340(%0)        \n"     \
293         "       cache %1, 0x380(%0); cache %1, 0x3c0(%0)        \n"     \
294         "       cache %1, 0x400(%0); cache %1, 0x440(%0)        \n"     \
295         "       cache %1, 0x480(%0); cache %1, 0x4c0(%0)        \n"     \
296         "       cache %1, 0x500(%0); cache %1, 0x540(%0)        \n"     \
297         "       cache %1, 0x580(%0); cache %1, 0x5c0(%0)        \n"     \
298         "       cache %1, 0x600(%0); cache %1, 0x640(%0)        \n"     \
299         "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
300         "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
301         "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
302         "       .set pop                                        \n"     \
303                 :                                                       \
304                 : "r" (base),                                           \
305                   "i" (op));
306
307 #define cache128_unroll32(base,op)                                      \
308         __asm__ __volatile__(                                           \
309         "       .set push                                       \n"     \
310         "       .set noreorder                                  \n"     \
311         "       .set mips3                                      \n"     \
312         "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
313         "       cache %1, 0x100(%0); cache %1, 0x180(%0)        \n"     \
314         "       cache %1, 0x200(%0); cache %1, 0x280(%0)        \n"     \
315         "       cache %1, 0x300(%0); cache %1, 0x380(%0)        \n"     \
316         "       cache %1, 0x400(%0); cache %1, 0x480(%0)        \n"     \
317         "       cache %1, 0x500(%0); cache %1, 0x580(%0)        \n"     \
318         "       cache %1, 0x600(%0); cache %1, 0x680(%0)        \n"     \
319         "       cache %1, 0x700(%0); cache %1, 0x780(%0)        \n"     \
320         "       cache %1, 0x800(%0); cache %1, 0x880(%0)        \n"     \
321         "       cache %1, 0x900(%0); cache %1, 0x980(%0)        \n"     \
322         "       cache %1, 0xa00(%0); cache %1, 0xa80(%0)        \n"     \
323         "       cache %1, 0xb00(%0); cache %1, 0xb80(%0)        \n"     \
324         "       cache %1, 0xc00(%0); cache %1, 0xc80(%0)        \n"     \
325         "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
326         "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
327         "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
328         "       .set pop                                        \n"     \
329                 :                                                       \
330                 : "r" (base),                                           \
331                   "i" (op));
332
333 #else
334 /*
335  * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
336  * This means we now need to increment the base register before we flush
337  * more cache lines
338  */
339 #define cache16_unroll32(base,op)                               \
340         __asm__ __volatile__(                                   \
341         "       .set push\n"                                    \
342         "       .set noreorder\n"                               \
343         "       .set mips64r6\n"                                \
344         "       .set noat\n"                                    \
345         "       cache %1, 0x000(%0); cache %1, 0x010(%0)\n"     \
346         "       cache %1, 0x020(%0); cache %1, 0x030(%0)\n"     \
347         "       cache %1, 0x040(%0); cache %1, 0x050(%0)\n"     \
348         "       cache %1, 0x060(%0); cache %1, 0x070(%0)\n"     \
349         "       cache %1, 0x080(%0); cache %1, 0x090(%0)\n"     \
350         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"     \
351         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"     \
352         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"     \
353         "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
354         "       cache %1, 0x000($1); cache %1, 0x010($1)\n"     \
355         "       cache %1, 0x020($1); cache %1, 0x030($1)\n"     \
356         "       cache %1, 0x040($1); cache %1, 0x050($1)\n"     \
357         "       cache %1, 0x060($1); cache %1, 0x070($1)\n"     \
358         "       cache %1, 0x080($1); cache %1, 0x090($1)\n"     \
359         "       cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"     \
360         "       cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"     \
361         "       cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"     \
362         "       .set pop\n"                                     \
363                 :                                               \
364                 : "r" (base),                                   \
365                   "i" (op));
366
367 #define cache32_unroll32(base,op)                               \
368         __asm__ __volatile__(                                   \
369         "       .set push\n"                                    \
370         "       .set noreorder\n"                               \
371         "       .set mips64r6\n"                                \
372         "       .set noat\n"                                    \
373         "       cache %1, 0x000(%0); cache %1, 0x020(%0)\n"     \
374         "       cache %1, 0x040(%0); cache %1, 0x060(%0)\n"     \
375         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"     \
376         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"     \
377         "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
378         "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
379         "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
380         "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
381         "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
382         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
383         "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
384         "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
385         "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
386         "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
387         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100\n"      \
388         "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
389         "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
390         "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
391         "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
392         "       .set pop\n"                                     \
393                 :                                               \
394                 : "r" (base),                                   \
395                   "i" (op));
396
397 #define cache64_unroll32(base,op)                               \
398         __asm__ __volatile__(                                   \
399         "       .set push\n"                                    \
400         "       .set noreorder\n"                               \
401         "       .set mips64r6\n"                                \
402         "       .set noat\n"                                    \
403         "       cache %1, 0x000(%0); cache %1, 0x040(%0)\n"     \
404         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"     \
405         "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
406         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
407         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
408         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
409         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
410         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
411         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
412         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
413         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
414         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
415         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
416         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
417         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
418         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
419         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
420         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
421         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
422         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
423         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
424         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
425         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
426         "       .set pop\n"                                     \
427                 :                                               \
428                 : "r" (base),                                   \
429                   "i" (op));
430
431 #define cache128_unroll32(base,op)                              \
432         __asm__ __volatile__(                                   \
433         "       .set push\n"                                    \
434         "       .set noreorder\n"                               \
435         "       .set mips64r6\n"                                \
436         "       .set noat\n"                                    \
437         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
438         "       "__stringify(LONG_ADDIU)" $1, %0, 0x100 \n"     \
439         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
440         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
441         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
442         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
443         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
444         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
445         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
446         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
447         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
448         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
449         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
450         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
451         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
452         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
453         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
454         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
455         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
456         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
457         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
458         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
459         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
460         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
461         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
462         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
463         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
464         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
465         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
466         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
467         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
468         "       "__stringify(LONG_ADDIU)" $1, $1, 0x100 \n"     \
469         "       cache %1, 0x000($1); cache %1, 0x080($1)\n"     \
470         "       .set pop\n"                                     \
471                 :                                               \
472                 : "r" (base),                                   \
473                   "i" (op));
474 #endif /* CONFIG_CPU_MIPSR6 */
475
476 /*
477  * Perform the cache operation specified by op using a user mode virtual
478  * address while in kernel mode.
479  */
480 #define cache16_unroll32_user(base,op)                                  \
481         __asm__ __volatile__(                                           \
482         "       .set push                                       \n"     \
483         "       .set noreorder                                  \n"     \
484         "       .set mips0                                      \n"     \
485         "       .set eva                                        \n"     \
486         "       cachee %1, 0x000(%0); cachee %1, 0x010(%0)      \n"     \
487         "       cachee %1, 0x020(%0); cachee %1, 0x030(%0)      \n"     \
488         "       cachee %1, 0x040(%0); cachee %1, 0x050(%0)      \n"     \
489         "       cachee %1, 0x060(%0); cachee %1, 0x070(%0)      \n"     \
490         "       cachee %1, 0x080(%0); cachee %1, 0x090(%0)      \n"     \
491         "       cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)      \n"     \
492         "       cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)      \n"     \
493         "       cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)      \n"     \
494         "       cachee %1, 0x100(%0); cachee %1, 0x110(%0)      \n"     \
495         "       cachee %1, 0x120(%0); cachee %1, 0x130(%0)      \n"     \
496         "       cachee %1, 0x140(%0); cachee %1, 0x150(%0)      \n"     \
497         "       cachee %1, 0x160(%0); cachee %1, 0x170(%0)      \n"     \
498         "       cachee %1, 0x180(%0); cachee %1, 0x190(%0)      \n"     \
499         "       cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)      \n"     \
500         "       cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)      \n"     \
501         "       cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)      \n"     \
502         "       .set pop                                        \n"     \
503                 :                                                       \
504                 : "r" (base),                                           \
505                   "i" (op));
506
507 #define cache32_unroll32_user(base, op)                                 \
508         __asm__ __volatile__(                                           \
509         "       .set push                                       \n"     \
510         "       .set noreorder                                  \n"     \
511         "       .set mips0                                      \n"     \
512         "       .set eva                                        \n"     \
513         "       cachee %1, 0x000(%0); cachee %1, 0x020(%0)      \n"     \
514         "       cachee %1, 0x040(%0); cachee %1, 0x060(%0)      \n"     \
515         "       cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)      \n"     \
516         "       cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)      \n"     \
517         "       cachee %1, 0x100(%0); cachee %1, 0x120(%0)      \n"     \
518         "       cachee %1, 0x140(%0); cachee %1, 0x160(%0)      \n"     \
519         "       cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)      \n"     \
520         "       cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)      \n"     \
521         "       cachee %1, 0x200(%0); cachee %1, 0x220(%0)      \n"     \
522         "       cachee %1, 0x240(%0); cachee %1, 0x260(%0)      \n"     \
523         "       cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)      \n"     \
524         "       cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)      \n"     \
525         "       cachee %1, 0x300(%0); cachee %1, 0x320(%0)      \n"     \
526         "       cachee %1, 0x340(%0); cachee %1, 0x360(%0)      \n"     \
527         "       cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)      \n"     \
528         "       cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)      \n"     \
529         "       .set pop                                        \n"     \
530                 :                                                       \
531                 : "r" (base),                                           \
532                   "i" (op));
533
534 #define cache64_unroll32_user(base, op)                                 \
535         __asm__ __volatile__(                                           \
536         "       .set push                                       \n"     \
537         "       .set noreorder                                  \n"     \
538         "       .set mips0                                      \n"     \
539         "       .set eva                                        \n"     \
540         "       cachee %1, 0x000(%0); cachee %1, 0x040(%0)      \n"     \
541         "       cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)      \n"     \
542         "       cachee %1, 0x100(%0); cachee %1, 0x140(%0)      \n"     \
543         "       cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)      \n"     \
544         "       cachee %1, 0x200(%0); cachee %1, 0x240(%0)      \n"     \
545         "       cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)      \n"     \
546         "       cachee %1, 0x300(%0); cachee %1, 0x340(%0)      \n"     \
547         "       cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)      \n"     \
548         "       cachee %1, 0x400(%0); cachee %1, 0x440(%0)      \n"     \
549         "       cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)      \n"     \
550         "       cachee %1, 0x500(%0); cachee %1, 0x540(%0)      \n"     \
551         "       cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)      \n"     \
552         "       cachee %1, 0x600(%0); cachee %1, 0x640(%0)      \n"     \
553         "       cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)      \n"     \
554         "       cachee %1, 0x700(%0); cachee %1, 0x740(%0)      \n"     \
555         "       cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)      \n"     \
556         "       .set pop                                        \n"     \
557                 :                                                       \
558                 : "r" (base),                                           \
559                   "i" (op));
560
561 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
562 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)    \
563 static inline void extra##blast_##pfx##cache##lsize(void)               \
564 {                                                                       \
565         unsigned long start = INDEX_BASE;                               \
566         unsigned long end = start + current_cpu_data.desc.waysize;      \
567         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
568         unsigned long ws_end = current_cpu_data.desc.ways <<            \
569                                current_cpu_data.desc.waybit;            \
570         unsigned long ws, addr;                                         \
571                                                                         \
572         __##pfx##flush_prologue                                         \
573                                                                         \
574         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
575                 for (addr = start; addr < end; addr += lsize * 32)      \
576                         cache##lsize##_unroll32(addr|ws, indexop);      \
577                                                                         \
578         __##pfx##flush_epilogue                                         \
579 }                                                                       \
580                                                                         \
581 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
582 {                                                                       \
583         unsigned long start = page;                                     \
584         unsigned long end = page + PAGE_SIZE;                           \
585                                                                         \
586         __##pfx##flush_prologue                                         \
587                                                                         \
588         do {                                                            \
589                 cache##lsize##_unroll32(start, hitop);                  \
590                 start += lsize * 32;                                    \
591         } while (start < end);                                          \
592                                                                         \
593         __##pfx##flush_epilogue                                         \
594 }                                                                       \
595                                                                         \
596 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
597 {                                                                       \
598         unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
599         unsigned long start = INDEX_BASE + (page & indexmask);          \
600         unsigned long end = start + PAGE_SIZE;                          \
601         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
602         unsigned long ws_end = current_cpu_data.desc.ways <<            \
603                                current_cpu_data.desc.waybit;            \
604         unsigned long ws, addr;                                         \
605                                                                         \
606         __##pfx##flush_prologue                                         \
607                                                                         \
608         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
609                 for (addr = start; addr < end; addr += lsize * 32)      \
610                         cache##lsize##_unroll32(addr|ws, indexop);      \
611                                                                         \
612         __##pfx##flush_epilogue                                         \
613 }
614
615 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
616 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
617 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
618 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
619 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
620 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
621 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
622 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
623 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
624 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
625 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
626 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
627 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
628
629 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
630 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
631 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
632 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
633 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
634 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
635
636 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
637 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
638 {                                                                       \
639         unsigned long start = page;                                     \
640         unsigned long end = page + PAGE_SIZE;                           \
641                                                                         \
642         __##pfx##flush_prologue                                         \
643                                                                         \
644         do {                                                            \
645                 cache##lsize##_unroll32_user(start, hitop);             \
646                 start += lsize * 32;                                    \
647         } while (start < end);                                          \
648                                                                         \
649         __##pfx##flush_epilogue                                         \
650 }
651
652 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
653                          16)
654 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
655 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
656                          32)
657 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
658 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
659                          64)
660 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
661
662 /* build blast_xxx_range, protected_blast_xxx_range */
663 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)        \
664 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
665                                                     unsigned long end)  \
666 {                                                                       \
667         unsigned long lsize = cpu_##desc##_line_size();                 \
668         unsigned long addr = start & ~(lsize - 1);                      \
669         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
670                                                                         \
671         __##pfx##flush_prologue                                         \
672                                                                         \
673         while (1) {                                                     \
674                 prot##cache_op(hitop, addr);                            \
675                 if (addr == aend)                                       \
676                         break;                                          \
677                 addr += lsize;                                          \
678         }                                                               \
679                                                                         \
680         __##pfx##flush_epilogue                                         \
681 }
682
683 #ifndef CONFIG_EVA
684
685 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
686 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
687
688 #else
689
690 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)                \
691 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
692                                                         unsigned long end) \
693 {                                                                       \
694         unsigned long lsize = cpu_##desc##_line_size();                 \
695         unsigned long addr = start & ~(lsize - 1);                      \
696         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
697                                                                         \
698         __##pfx##flush_prologue                                         \
699                                                                         \
700         if (segment_eq(get_fs(), USER_DS)) {                            \
701                 while (1) {                                             \
702                         protected_cachee_op(hitop, addr);               \
703                         if (addr == aend)                               \
704                                 break;                                  \
705                         addr += lsize;                                  \
706                 }                                                       \
707         } else {                                                        \
708                 while (1) {                                             \
709                         protected_cache_op(hitop, addr);                \
710                         if (addr == aend)                               \
711                                 break;                                  \
712                         addr += lsize;                                  \
713                 }                                                       \
714                                                                         \
715         }                                                               \
716         __##pfx##flush_epilogue                                         \
717 }
718
719 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
720 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
721
722 #endif
723 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
724 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
725         protected_, loongson2_)
726 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
727 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
728 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
729 /* blast_inv_dcache_range */
730 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
731 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
732
733 #endif /* _ASM_R4KCACHE_H */