eeaf50f5df2b7fcb909252a655b435a780adfa23
[cascardo/linux.git] / arch / mips / mm / tlb-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/module.h>
17
18 #include <asm/cpu.h>
19 #include <asm/cpu-type.h>
20 #include <asm/bootinfo.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbmisc.h>
25
26 extern void build_tlb_refill_handler(void);
27
28 /* Atomicity and interruptability */
29 #ifdef CONFIG_MIPS_MT_SMTC
30
31 #include <asm/smtc.h>
32 #include <asm/mipsmtregs.h>
33
34 #define ENTER_CRITICAL(flags) \
35         { \
36         unsigned int mvpflags; \
37         local_irq_save(flags);\
38         mvpflags = dvpe()
39 #define EXIT_CRITICAL(flags) \
40         evpe(mvpflags); \
41         local_irq_restore(flags); \
42         }
43 #else
44
45 #define ENTER_CRITICAL(flags) local_irq_save(flags)
46 #define EXIT_CRITICAL(flags) local_irq_restore(flags)
47
48 #endif /* CONFIG_MIPS_MT_SMTC */
49
50 /*
51  * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
52  * unfortunately, itlb is not totally transparent to software.
53  */
54 static inline void flush_itlb(void)
55 {
56         switch (current_cpu_type()) {
57         case CPU_LOONGSON2:
58         case CPU_LOONGSON3:
59                 write_c0_diag(4);
60                 break;
61         default:
62                 break;
63         }
64 }
65
66 static inline void flush_itlb_vm(struct vm_area_struct *vma)
67 {
68         if (vma->vm_flags & VM_EXEC)
69                 flush_itlb();
70 }
71
72 void local_flush_tlb_all(void)
73 {
74         unsigned long flags;
75         unsigned long old_ctx;
76         int entry, ftlbhighset;
77
78         ENTER_CRITICAL(flags);
79         /* Save old context and create impossible VPN2 value */
80         old_ctx = read_c0_entryhi();
81         write_c0_entrylo0(0);
82         write_c0_entrylo1(0);
83
84         entry = read_c0_wired();
85
86         /* Blast 'em all away. */
87         if (cpu_has_tlbinv) {
88                 if (current_cpu_data.tlbsizevtlb) {
89                         write_c0_index(0);
90                         mtc0_tlbw_hazard();
91                         tlbinvf();  /* invalidate VTLB */
92                 }
93                 ftlbhighset = current_cpu_data.tlbsizevtlb +
94                         current_cpu_data.tlbsizeftlbsets;
95                 for (entry = current_cpu_data.tlbsizevtlb;
96                      entry < ftlbhighset;
97                      entry++) {
98                         write_c0_index(entry);
99                         mtc0_tlbw_hazard();
100                         tlbinvf();  /* invalidate one FTLB set */
101                 }
102         } else {
103                 while (entry < current_cpu_data.tlbsize) {
104                         /* Make sure all entries differ. */
105                         write_c0_entryhi(UNIQUE_ENTRYHI(entry));
106                         write_c0_index(entry);
107                         mtc0_tlbw_hazard();
108                         tlb_write_indexed();
109                         entry++;
110                 }
111         }
112         tlbw_use_hazard();
113         write_c0_entryhi(old_ctx);
114         flush_itlb();
115         EXIT_CRITICAL(flags);
116 }
117 EXPORT_SYMBOL(local_flush_tlb_all);
118
119 /* All entries common to a mm share an asid.  To effectively flush
120    these entries, we just bump the asid. */
121 void local_flush_tlb_mm(struct mm_struct *mm)
122 {
123         int cpu;
124
125         preempt_disable();
126
127         cpu = smp_processor_id();
128
129         if (cpu_context(cpu, mm) != 0) {
130                 drop_mmu_context(mm, cpu);
131         }
132
133         preempt_enable();
134 }
135
136 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
137         unsigned long end)
138 {
139         struct mm_struct *mm = vma->vm_mm;
140         int cpu = smp_processor_id();
141
142         if (cpu_context(cpu, mm) != 0) {
143                 unsigned long size, flags;
144
145                 ENTER_CRITICAL(flags);
146                 start = round_down(start, PAGE_SIZE << 1);
147                 end = round_up(end, PAGE_SIZE << 1);
148                 size = (end - start) >> (PAGE_SHIFT + 1);
149                 if (size <= (current_cpu_data.tlbsizeftlbsets ?
150                              current_cpu_data.tlbsize / 8 :
151                              current_cpu_data.tlbsize / 2)) {
152                         int oldpid = read_c0_entryhi();
153                         int newpid = cpu_asid(cpu, mm);
154
155                         while (start < end) {
156                                 int idx;
157
158                                 write_c0_entryhi(start | newpid);
159                                 start += (PAGE_SIZE << 1);
160                                 mtc0_tlbw_hazard();
161                                 tlb_probe();
162                                 tlb_probe_hazard();
163                                 idx = read_c0_index();
164                                 write_c0_entrylo0(0);
165                                 write_c0_entrylo1(0);
166                                 if (idx < 0)
167                                         continue;
168                                 /* Make sure all entries differ. */
169                                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
170                                 mtc0_tlbw_hazard();
171                                 tlb_write_indexed();
172                         }
173                         tlbw_use_hazard();
174                         write_c0_entryhi(oldpid);
175                 } else {
176                         drop_mmu_context(mm, cpu);
177                 }
178                 flush_itlb();
179                 EXIT_CRITICAL(flags);
180         }
181 }
182
183 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
184 {
185         unsigned long size, flags;
186
187         ENTER_CRITICAL(flags);
188         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
189         size = (size + 1) >> 1;
190         if (size <= (current_cpu_data.tlbsizeftlbsets ?
191                      current_cpu_data.tlbsize / 8 :
192                      current_cpu_data.tlbsize / 2)) {
193                 int pid = read_c0_entryhi();
194
195                 start &= (PAGE_MASK << 1);
196                 end += ((PAGE_SIZE << 1) - 1);
197                 end &= (PAGE_MASK << 1);
198
199                 while (start < end) {
200                         int idx;
201
202                         write_c0_entryhi(start);
203                         start += (PAGE_SIZE << 1);
204                         mtc0_tlbw_hazard();
205                         tlb_probe();
206                         tlb_probe_hazard();
207                         idx = read_c0_index();
208                         write_c0_entrylo0(0);
209                         write_c0_entrylo1(0);
210                         if (idx < 0)
211                                 continue;
212                         /* Make sure all entries differ. */
213                         write_c0_entryhi(UNIQUE_ENTRYHI(idx));
214                         mtc0_tlbw_hazard();
215                         tlb_write_indexed();
216                 }
217                 tlbw_use_hazard();
218                 write_c0_entryhi(pid);
219         } else {
220                 local_flush_tlb_all();
221         }
222         flush_itlb();
223         EXIT_CRITICAL(flags);
224 }
225
226 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
227 {
228         int cpu = smp_processor_id();
229
230         if (cpu_context(cpu, vma->vm_mm) != 0) {
231                 unsigned long flags;
232                 int oldpid, newpid, idx;
233
234                 newpid = cpu_asid(cpu, vma->vm_mm);
235                 page &= (PAGE_MASK << 1);
236                 ENTER_CRITICAL(flags);
237                 oldpid = read_c0_entryhi();
238                 write_c0_entryhi(page | newpid);
239                 mtc0_tlbw_hazard();
240                 tlb_probe();
241                 tlb_probe_hazard();
242                 idx = read_c0_index();
243                 write_c0_entrylo0(0);
244                 write_c0_entrylo1(0);
245                 if (idx < 0)
246                         goto finish;
247                 /* Make sure all entries differ. */
248                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
249                 mtc0_tlbw_hazard();
250                 tlb_write_indexed();
251                 tlbw_use_hazard();
252
253         finish:
254                 write_c0_entryhi(oldpid);
255                 flush_itlb_vm(vma);
256                 EXIT_CRITICAL(flags);
257         }
258 }
259
260 /*
261  * This one is only used for pages with the global bit set so we don't care
262  * much about the ASID.
263  */
264 void local_flush_tlb_one(unsigned long page)
265 {
266         unsigned long flags;
267         int oldpid, idx;
268
269         ENTER_CRITICAL(flags);
270         oldpid = read_c0_entryhi();
271         page &= (PAGE_MASK << 1);
272         write_c0_entryhi(page);
273         mtc0_tlbw_hazard();
274         tlb_probe();
275         tlb_probe_hazard();
276         idx = read_c0_index();
277         write_c0_entrylo0(0);
278         write_c0_entrylo1(0);
279         if (idx >= 0) {
280                 /* Make sure all entries differ. */
281                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
282                 mtc0_tlbw_hazard();
283                 tlb_write_indexed();
284                 tlbw_use_hazard();
285         }
286         write_c0_entryhi(oldpid);
287         flush_itlb();
288         EXIT_CRITICAL(flags);
289 }
290
291 /*
292  * We will need multiple versions of update_mmu_cache(), one that just
293  * updates the TLB with the new pte(s), and another which also checks
294  * for the R4k "end of page" hardware bug and does the needy.
295  */
296 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
297 {
298         unsigned long flags;
299         pgd_t *pgdp;
300         pud_t *pudp;
301         pmd_t *pmdp;
302         pte_t *ptep;
303         int idx, pid;
304
305         /*
306          * Handle debugger faulting in for debugee.
307          */
308         if (current->active_mm != vma->vm_mm)
309                 return;
310
311         ENTER_CRITICAL(flags);
312
313         pid = read_c0_entryhi() & ASID_MASK;
314         address &= (PAGE_MASK << 1);
315         write_c0_entryhi(address | pid);
316         pgdp = pgd_offset(vma->vm_mm, address);
317         mtc0_tlbw_hazard();
318         tlb_probe();
319         tlb_probe_hazard();
320         pudp = pud_offset(pgdp, address);
321         pmdp = pmd_offset(pudp, address);
322         idx = read_c0_index();
323 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
324         /* this could be a huge page  */
325         if (pmd_huge(*pmdp)) {
326                 unsigned long lo;
327                 write_c0_pagemask(PM_HUGE_MASK);
328                 ptep = (pte_t *)pmdp;
329                 lo = pte_to_entrylo(pte_val(*ptep));
330                 write_c0_entrylo0(lo);
331                 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
332
333                 mtc0_tlbw_hazard();
334                 if (idx < 0)
335                         tlb_write_random();
336                 else
337                         tlb_write_indexed();
338                 tlbw_use_hazard();
339                 write_c0_pagemask(PM_DEFAULT_MASK);
340         } else
341 #endif
342         {
343                 ptep = pte_offset_map(pmdp, address);
344
345 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
346                 write_c0_entrylo0(ptep->pte_high);
347                 ptep++;
348                 write_c0_entrylo1(ptep->pte_high);
349 #else
350                 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
351                 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
352 #endif
353                 mtc0_tlbw_hazard();
354                 if (idx < 0)
355                         tlb_write_random();
356                 else
357                         tlb_write_indexed();
358         }
359         tlbw_use_hazard();
360         flush_itlb_vm(vma);
361         EXIT_CRITICAL(flags);
362 }
363
364 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
365                      unsigned long entryhi, unsigned long pagemask)
366 {
367         unsigned long flags;
368         unsigned long wired;
369         unsigned long old_pagemask;
370         unsigned long old_ctx;
371
372         ENTER_CRITICAL(flags);
373         /* Save old context and create impossible VPN2 value */
374         old_ctx = read_c0_entryhi();
375         old_pagemask = read_c0_pagemask();
376         wired = read_c0_wired();
377         write_c0_wired(wired + 1);
378         write_c0_index(wired);
379         tlbw_use_hazard();      /* What is the hazard here? */
380         write_c0_pagemask(pagemask);
381         write_c0_entryhi(entryhi);
382         write_c0_entrylo0(entrylo0);
383         write_c0_entrylo1(entrylo1);
384         mtc0_tlbw_hazard();
385         tlb_write_indexed();
386         tlbw_use_hazard();
387
388         write_c0_entryhi(old_ctx);
389         tlbw_use_hazard();      /* What is the hazard here? */
390         write_c0_pagemask(old_pagemask);
391         local_flush_tlb_all();
392         EXIT_CRITICAL(flags);
393 }
394
395 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
396
397 int __init has_transparent_hugepage(void)
398 {
399         unsigned int mask;
400         unsigned long flags;
401
402         ENTER_CRITICAL(flags);
403         write_c0_pagemask(PM_HUGE_MASK);
404         back_to_back_c0_hazard();
405         mask = read_c0_pagemask();
406         write_c0_pagemask(PM_DEFAULT_MASK);
407
408         EXIT_CRITICAL(flags);
409
410         return mask == PM_HUGE_MASK;
411 }
412
413 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
414
415 static int ntlb;
416 static int __init set_ntlb(char *str)
417 {
418         get_option(&str, &ntlb);
419         return 1;
420 }
421
422 __setup("ntlb=", set_ntlb);
423
424 void tlb_init(void)
425 {
426         /*
427          * You should never change this register:
428          *   - On R4600 1.7 the tlbp never hits for pages smaller than
429          *     the value in the c0_pagemask register.
430          *   - The entire mm handling assumes the c0_pagemask register to
431          *     be set to fixed-size pages.
432          */
433         write_c0_pagemask(PM_DEFAULT_MASK);
434         write_c0_wired(0);
435         if (current_cpu_type() == CPU_R10000 ||
436             current_cpu_type() == CPU_R12000 ||
437             current_cpu_type() == CPU_R14000)
438                 write_c0_framemask(0);
439
440         if (cpu_has_rixi) {
441                 /*
442                  * Enable the no read, no exec bits, and enable large virtual
443                  * address.
444                  */
445                 u32 pg = PG_RIE | PG_XIE;
446 #ifdef CONFIG_64BIT
447                 pg |= PG_ELPA;
448 #endif
449                 write_c0_pagegrain(pg);
450         }
451
452         /* From this point on the ARC firmware is dead.  */
453         local_flush_tlb_all();
454
455         /* Did I tell you that ARC SUCKS?  */
456
457         if (ntlb) {
458                 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
459                         int wired = current_cpu_data.tlbsize - ntlb;
460                         write_c0_wired(wired);
461                         write_c0_index(wired-1);
462                         printk("Restricting TLB to %d entries\n", ntlb);
463                 } else
464                         printk("Ignoring invalid argument ntlb=%d\n", ntlb);
465         }
466
467         build_tlb_refill_handler();
468 }