2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
16 #include <linux/mutex.h>
18 #include <asm/mmu_context.h>
19 #include <asm/cacheflush.h>
22 * The maximum number of pages we support up to when doing ranged dcache
23 * flushing. Anything exceeding this will simply flush the dcache in its
26 #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27 #define MAX_ICACHE_PAGES 32
29 static void __flush_cache_one(unsigned long addr, unsigned long phys,
30 unsigned long exec_offset);
33 * This is initialised here to ensure that it is not placed in the BSS. If
34 * that were to happen, note that cache_init gets called before the BSS is
35 * cleared, so this would get nulled out which would be hopeless.
37 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
41 * Write back the range of D-cache, and purge the I-cache.
43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
44 * signal handler code and kprobes code
46 static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
48 struct flusher_data *data = args;
49 unsigned long start, end;
50 unsigned long flags, v;
56 /* If there are too many pages then just blow away the caches */
57 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
58 local_flush_cache_all(NULL);
63 * Selectively flush d-cache then invalidate the i-cache.
64 * This is inefficient, so only use this for small ranges.
66 start &= ~(L1_CACHE_BYTES-1);
67 end += L1_CACHE_BYTES-1;
68 end &= ~(L1_CACHE_BYTES-1);
70 local_irq_save(flags);
73 for (v = start; v < end; v += L1_CACHE_BYTES) {
74 unsigned long icacheaddr;
78 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
79 cpu_data->icache.entry_mask);
81 /* Clear i-cache line valid-bit */
82 for (i = 0; i < cpu_data->icache.ways; i++) {
83 __raw_writel(0, icacheaddr);
84 icacheaddr += cpu_data->icache.way_incr;
89 local_irq_restore(flags);
92 static inline void flush_cache_one(unsigned long start, unsigned long phys)
94 unsigned long flags, exec_offset = 0;
97 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
98 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
100 if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
101 (start < CACHE_OC_ADDRESS_ARRAY))
102 exec_offset = 0x20000000;
104 local_irq_save(flags);
105 __flush_cache_one(start | SH_CACHE_ASSOC, P1SEGADDR(phys), exec_offset);
106 local_irq_restore(flags);
110 * Write back & invalidate the D-cache of the page.
111 * (To avoid "alias" issues)
113 static void sh4_flush_dcache_page(void *arg)
115 struct page *page = arg;
117 struct address_space *mapping = page_mapping(page);
119 if (mapping && !mapping_mapped(mapping))
120 set_bit(PG_dcache_dirty, &page->flags);
124 unsigned long phys = PHYSADDR(page_address(page));
125 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
128 /* Loop all the D-cache */
129 n = boot_cpu_data.dcache.n_aliases;
130 for (i = 0; i < n; i++, addr += PAGE_SIZE)
131 flush_cache_one(addr, phys);
137 /* TODO: Selective icache invalidation through IC address array.. */
138 static void __uses_jump_to_uncached flush_icache_all(void)
140 unsigned long flags, ccr;
142 local_irq_save(flags);
147 ccr |= CCR_CACHE_ICI;
151 * back_to_cached() will take care of the barrier for us, don't add
156 local_irq_restore(flags);
159 static inline void flush_dcache_all(void)
161 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
165 static void sh4_flush_cache_all(void *unused)
171 static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
174 unsigned long d = 0, p = start & PAGE_MASK;
175 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
176 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
177 unsigned long select_bit;
178 unsigned long all_aliases_mask;
179 unsigned long addr_offset;
186 dir = pgd_offset(mm, p);
187 pud = pud_offset(dir, p);
188 pmd = pmd_offset(pud, p);
189 end = PAGE_ALIGN(end);
191 all_aliases_mask = (1 << n_aliases) - 1;
194 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
202 pte = pte_offset_kernel(pmd, p);
208 if (!(pte_val(entry) & _PAGE_PRESENT)) {
214 phys = pte_val(entry) & PTE_PHYS_MASK;
216 if ((p ^ phys) & alias_mask) {
217 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
218 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
220 if (d == all_aliases_mask)
226 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
234 for (i = 0; i < n_aliases; i++) {
235 if (d & select_bit) {
236 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
241 addr_offset += PAGE_SIZE;
246 * Note : (RPC) since the caches are physically tagged, the only point
247 * of flush_cache_mm for SH-4 is to get rid of aliases from the
248 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
249 * lines can stay resident so long as the virtual address they were
250 * accessed with (hence cache set) is in accord with the physical
251 * address (i.e. tag). It's no different here. So I reckon we don't
252 * need to flush the I-cache, since aliases don't matter for that. We
255 * Caller takes mm->mmap_sem.
257 static void sh4_flush_cache_mm(void *arg)
259 struct mm_struct *mm = arg;
261 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
265 * If cache is only 4k-per-way, there are never any 'aliases'. Since
266 * the cache is physically tagged, the data can just be left in there.
268 if (boot_cpu_data.dcache.n_aliases == 0)
272 * Don't bother groveling around the dcache for the VMA ranges
273 * if there are too many PTEs to make it worthwhile.
275 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
278 struct vm_area_struct *vma;
281 * In this case there are reasonably sized ranges to flush,
282 * iterate through the VMA list and take care of any aliases.
284 for (vma = mm->mmap; vma; vma = vma->vm_next)
285 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
288 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
294 * Write back and invalidate I/D-caches for the page.
296 * ADDR: Virtual Address (U0 address)
297 * PFN: Physical page number
299 static void sh4_flush_cache_page(void *args)
301 struct flusher_data *data = args;
302 struct vm_area_struct *vma;
303 unsigned long address, pfn, phys;
304 unsigned int alias_mask;
307 address = data->addr1;
309 phys = pfn << PAGE_SHIFT;
311 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
314 alias_mask = boot_cpu_data.dcache.alias_mask;
316 /* We only need to flush D-cache when we have alias */
317 if ((address^phys) & alias_mask) {
318 /* Loop 4K of the D-cache */
320 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
322 /* Loop another 4K of the D-cache */
324 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
328 alias_mask = boot_cpu_data.icache.alias_mask;
329 if (vma->vm_flags & VM_EXEC) {
331 * Evict entries from the portion of the cache from which code
332 * may have been executed at this address (virtual). There's
333 * no need to evict from the portion corresponding to the
334 * physical address as for the D-cache, because we know the
335 * kernel has never executed the code through its identity
339 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
345 * Write back and invalidate D-caches.
347 * START, END: Virtual Address (U0 address)
349 * NOTE: We need to flush the _physical_ page entry.
350 * Flushing the cache lines for U0 only isn't enough.
351 * We need to flush for P1 too, which may contain aliases.
353 static void sh4_flush_cache_range(void *args)
355 struct flusher_data *data = args;
356 struct vm_area_struct *vma;
357 unsigned long start, end;
363 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
367 * If cache is only 4k-per-way, there are never any 'aliases'. Since
368 * the cache is physically tagged, the data can just be left in there.
370 if (boot_cpu_data.dcache.n_aliases == 0)
374 * Don't bother with the lookup and alias check if we have a
375 * wide range to cover, just blow away the dcache in its
376 * entirety instead. -- PFM.
378 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
381 __flush_cache_mm(vma->vm_mm, start, end);
383 if (vma->vm_flags & VM_EXEC) {
385 * TODO: Is this required??? Need to look at how I-cache
386 * coherency is assured when new programs are loaded to see if
396 * @addr: address in memory mapped cache array
397 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
398 * set i.e. associative write)
399 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
402 * The offset into the cache array implied by 'addr' selects the
403 * 'colour' of the virtual address range that will be flushed. The
404 * operation (purge/write-back) is selected by the lower 2 bits of
407 static void __flush_cache_one(unsigned long addr, unsigned long phys,
408 unsigned long exec_offset)
411 unsigned long base_addr = addr;
412 struct cache_info *dcache;
413 unsigned long way_incr;
414 unsigned long a, ea, p;
415 unsigned long temp_pc;
417 dcache = &boot_cpu_data.dcache;
418 /* Write this way for better assembly. */
419 way_count = dcache->ways;
420 way_incr = dcache->way_incr;
423 * Apply exec_offset (i.e. branch to P2 if required.).
427 * If I write "=r" for the (temp_pc), it puts this in r6 hence
428 * trashing exec_offset before it's been added on - why? Hence
429 * "=&r" as a 'workaround'
431 asm volatile("mov.l 1f, %0\n\t"
437 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
440 * We know there will be >=1 iteration, so write as do-while to avoid
441 * pointless nead-of-loop check for 0 iterations.
444 ea = base_addr + PAGE_SIZE;
449 *(volatile unsigned long *)a = p;
451 * Next line: intentionally not p+32, saves an add, p
452 * will do since only the cache tag bits need to
455 *(volatile unsigned long *)(a+32) = p;
460 base_addr += way_incr;
461 } while (--way_count != 0);
465 * Break the 1, 2 and 4 way variants of this out into separate functions to
466 * avoid nearly all the overhead of having the conditional stuff in the function
467 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
469 * We want to eliminate unnecessary bus transactions, so this code uses
470 * a non-obvious technique.
472 * Loop over a cache way sized block of, one cache line at a time. For each
473 * line, use movca.a to cause the current cache line contents to be written
474 * back, but without reading anything from main memory. However this has the
475 * side effect that the cache is now caching that memory location. So follow
476 * this with a cache invalidate to mark the cache line invalid. And do all
477 * this with interrupts disabled, to avoid the cache line being accidently
478 * evicted while it is holding garbage.
480 * This also breaks in a number of circumstances:
481 * - if there are modifications to the region of memory just above
482 * empty_zero_page (for example because a breakpoint has been placed
483 * there), then these can be lost.
485 * This is because the the memory address which the cache temporarily
486 * caches in the above description is empty_zero_page. So the
487 * movca.l hits the cache (it is assumed that it misses, or at least
488 * isn't dirty), modifies the line and then invalidates it, losing the
491 * - If caches are disabled or configured in write-through mode, then
492 * the movca.l writes garbage directly into memory.
494 static void __flush_dcache_segment_writethrough(unsigned long start,
495 unsigned long extent_per_way)
500 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
502 while (extent_per_way) {
503 for (i = 0; i < cpu_data->dcache.ways; i++)
504 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
506 addr += cpu_data->dcache.linesz;
507 extent_per_way -= cpu_data->dcache.linesz;
511 static void __flush_dcache_segment_1way(unsigned long start,
512 unsigned long extent_per_way)
514 unsigned long orig_sr, sr_with_bl;
515 unsigned long base_addr;
516 unsigned long way_incr, linesz, way_size;
517 struct cache_info *dcache;
518 register unsigned long a0, a0e;
520 asm volatile("stc sr, %0" : "=r" (orig_sr));
521 sr_with_bl = orig_sr | (1<<28);
522 base_addr = ((unsigned long)&empty_zero_page[0]);
525 * The previous code aligned base_addr to 16k, i.e. the way_size of all
526 * existing SH-4 D-caches. Whilst I don't see a need to have this
527 * aligned to any better than the cache line size (which it will be
528 * anyway by construction), let's align it to at least the way_size of
529 * any existing or conceivable SH-4 D-cache. -- RPC
531 base_addr = ((base_addr >> 16) << 16);
534 dcache = &boot_cpu_data.dcache;
535 linesz = dcache->linesz;
536 way_incr = dcache->way_incr;
537 way_size = dcache->way_size;
540 a0e = base_addr + extent_per_way;
542 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
543 asm volatile("movca.l r0, @%0\n\t"
544 "ocbi @%0" : : "r" (a0));
546 asm volatile("movca.l r0, @%0\n\t"
547 "ocbi @%0" : : "r" (a0));
549 asm volatile("movca.l r0, @%0\n\t"
550 "ocbi @%0" : : "r" (a0));
552 asm volatile("movca.l r0, @%0\n\t"
553 "ocbi @%0" : : "r" (a0));
554 asm volatile("ldc %0, sr" : : "r" (orig_sr));
559 static void __flush_dcache_segment_2way(unsigned long start,
560 unsigned long extent_per_way)
562 unsigned long orig_sr, sr_with_bl;
563 unsigned long base_addr;
564 unsigned long way_incr, linesz, way_size;
565 struct cache_info *dcache;
566 register unsigned long a0, a1, a0e;
568 asm volatile("stc sr, %0" : "=r" (orig_sr));
569 sr_with_bl = orig_sr | (1<<28);
570 base_addr = ((unsigned long)&empty_zero_page[0]);
572 /* See comment under 1-way above */
573 base_addr = ((base_addr >> 16) << 16);
576 dcache = &boot_cpu_data.dcache;
577 linesz = dcache->linesz;
578 way_incr = dcache->way_incr;
579 way_size = dcache->way_size;
583 a0e = base_addr + extent_per_way;
585 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
586 asm volatile("movca.l r0, @%0\n\t"
587 "movca.l r0, @%1\n\t"
593 asm volatile("movca.l r0, @%0\n\t"
594 "movca.l r0, @%1\n\t"
600 asm volatile("movca.l r0, @%0\n\t"
601 "movca.l r0, @%1\n\t"
607 asm volatile("movca.l r0, @%0\n\t"
608 "movca.l r0, @%1\n\t"
612 asm volatile("ldc %0, sr" : : "r" (orig_sr));
618 static void __flush_dcache_segment_4way(unsigned long start,
619 unsigned long extent_per_way)
621 unsigned long orig_sr, sr_with_bl;
622 unsigned long base_addr;
623 unsigned long way_incr, linesz, way_size;
624 struct cache_info *dcache;
625 register unsigned long a0, a1, a2, a3, a0e;
627 asm volatile("stc sr, %0" : "=r" (orig_sr));
628 sr_with_bl = orig_sr | (1<<28);
629 base_addr = ((unsigned long)&empty_zero_page[0]);
631 /* See comment under 1-way above */
632 base_addr = ((base_addr >> 16) << 16);
635 dcache = &boot_cpu_data.dcache;
636 linesz = dcache->linesz;
637 way_incr = dcache->way_incr;
638 way_size = dcache->way_size;
644 a0e = base_addr + extent_per_way;
646 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
647 asm volatile("movca.l r0, @%0\n\t"
648 "movca.l r0, @%1\n\t"
649 "movca.l r0, @%2\n\t"
650 "movca.l r0, @%3\n\t"
655 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
660 asm volatile("movca.l r0, @%0\n\t"
661 "movca.l r0, @%1\n\t"
662 "movca.l r0, @%2\n\t"
663 "movca.l r0, @%3\n\t"
668 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
673 asm volatile("movca.l r0, @%0\n\t"
674 "movca.l r0, @%1\n\t"
675 "movca.l r0, @%2\n\t"
676 "movca.l r0, @%3\n\t"
681 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
686 asm volatile("movca.l r0, @%0\n\t"
687 "movca.l r0, @%1\n\t"
688 "movca.l r0, @%2\n\t"
689 "movca.l r0, @%3\n\t"
694 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
695 asm volatile("ldc %0, sr" : : "r" (orig_sr));
703 extern void __weak sh4__flush_region_init(void);
706 * SH-4 has virtually indexed and physically tagged cache.
708 void __init sh4_cache_init(void)
710 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
712 printk("PVR=%08x CVR=%08x PRR=%08x\n",
718 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
720 switch (boot_cpu_data.dcache.ways) {
722 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
725 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
728 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
731 panic("unknown number of cache ways\n");
736 local_flush_icache_range = sh4_flush_icache_range;
737 local_flush_dcache_page = sh4_flush_dcache_page;
738 local_flush_cache_all = sh4_flush_cache_all;
739 local_flush_cache_mm = sh4_flush_cache_mm;
740 local_flush_cache_dup_mm = sh4_flush_cache_mm;
741 local_flush_cache_page = sh4_flush_cache_page;
742 local_flush_cache_range = sh4_flush_cache_range;
744 sh4__flush_region_init();