2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
4 * Copyright (C) 2007 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/cpu.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/smp.h>
23 #include <linux/spinlock.h>
24 #include <linux/log2.h>
27 #include <linux/of_address.h>
29 #include <asm/cacheflush.h>
31 #include <asm/cputype.h>
32 #include <asm/hardware/cache-l2x0.h>
33 #include "cache-tauros3.h"
34 #include "cache-aurora-l2.h"
36 struct l2c_init_data {
40 void (*of_parse)(const struct device_node *, u32 *, u32 *);
41 void (*enable)(void __iomem *, u32, unsigned);
42 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
43 void (*save)(void __iomem *);
44 struct outer_cache_fns outer_cache;
47 #define CACHE_LINE_SIZE 32
49 static void __iomem *l2x0_base;
50 static DEFINE_RAW_SPINLOCK(l2x0_lock);
51 static u32 l2x0_way_mask; /* Bitmask of active ways */
53 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
55 struct l2x0_regs l2x0_saved_regs;
58 * Common code for all cache controllers.
60 static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
62 /* wait for cache operation by line or way to complete */
63 while (readl_relaxed(reg) & mask)
68 * By default, we write directly to secure registers. Platforms must
69 * override this if they are running non-secure.
71 static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
73 if (val == readl_relaxed(base + reg))
75 if (outer_cache.write_sec)
76 outer_cache.write_sec(val, reg);
78 writel_relaxed(val, base + reg);
82 * This should only be called when we have a requirement that the
83 * register be written due to a work-around, as platforms running
84 * in non-secure mode may not be able to access this register.
86 static inline void l2c_set_debug(void __iomem *base, unsigned long val)
88 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
91 static void __l2c_op_way(void __iomem *reg)
93 writel_relaxed(l2x0_way_mask, reg);
94 l2c_wait_mask(reg, l2x0_way_mask);
97 static inline void l2c_unlock(void __iomem *base, unsigned num)
101 for (i = 0; i < num; i++) {
102 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
103 i * L2X0_LOCKDOWN_STRIDE);
104 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
105 i * L2X0_LOCKDOWN_STRIDE);
110 * Enable the L2 cache controller. This function must only be
111 * called when the cache controller is known to be disabled.
113 static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
117 l2c_write_sec(aux, base, L2X0_AUX_CTRL);
119 l2c_unlock(base, num_lock);
121 local_irq_save(flags);
122 __l2c_op_way(base + L2X0_INV_WAY);
123 writel_relaxed(0, base + sync_reg_offset);
124 l2c_wait_mask(base + sync_reg_offset, 1);
125 local_irq_restore(flags);
127 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
130 static void l2c_disable(void)
132 void __iomem *base = l2x0_base;
134 outer_cache.flush_all();
135 l2c_write_sec(0, base, L2X0_CTRL);
139 #ifdef CONFIG_CACHE_PL310
140 static inline void cache_wait(void __iomem *reg, unsigned long mask)
142 /* cache operations by line are atomic on PL310 */
145 #define cache_wait l2c_wait_mask
148 static inline void cache_sync(void)
150 void __iomem *base = l2x0_base;
152 writel_relaxed(0, base + sync_reg_offset);
153 cache_wait(base + L2X0_CACHE_SYNC, 1);
156 #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
157 static inline void debug_writel(unsigned long val)
159 l2c_set_debug(l2x0_base, val);
162 /* Optimised out for non-errata case */
163 static inline void debug_writel(unsigned long val)
168 static void l2x0_cache_sync(void)
172 raw_spin_lock_irqsave(&l2x0_lock, flags);
174 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
177 static void __l2x0_flush_all(void)
180 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
185 static void l2x0_flush_all(void)
190 raw_spin_lock_irqsave(&l2x0_lock, flags);
192 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
195 static void l2x0_disable(void)
199 raw_spin_lock_irqsave(&l2x0_lock, flags);
201 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
203 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
206 static void l2c_save(void __iomem *base)
208 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
212 * L2C-210 specific code.
214 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
215 * ensure that no background operation is running. The way operations
216 * are all background tasks.
218 * While a background operation is in progress, any new operation is
219 * ignored (unspecified whether this causes an error.) Thankfully, not
222 * Never has a different sync register other than L2X0_CACHE_SYNC, but
223 * we use sync_reg_offset here so we can share some of this with L2C-310.
225 static void __l2c210_cache_sync(void __iomem *base)
227 writel_relaxed(0, base + sync_reg_offset);
230 static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
233 while (start < end) {
234 writel_relaxed(start, reg);
235 start += CACHE_LINE_SIZE;
239 static void l2c210_inv_range(unsigned long start, unsigned long end)
241 void __iomem *base = l2x0_base;
243 if (start & (CACHE_LINE_SIZE - 1)) {
244 start &= ~(CACHE_LINE_SIZE - 1);
245 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
246 start += CACHE_LINE_SIZE;
249 if (end & (CACHE_LINE_SIZE - 1)) {
250 end &= ~(CACHE_LINE_SIZE - 1);
251 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
254 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
255 __l2c210_cache_sync(base);
258 static void l2c210_clean_range(unsigned long start, unsigned long end)
260 void __iomem *base = l2x0_base;
262 start &= ~(CACHE_LINE_SIZE - 1);
263 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
264 __l2c210_cache_sync(base);
267 static void l2c210_flush_range(unsigned long start, unsigned long end)
269 void __iomem *base = l2x0_base;
271 start &= ~(CACHE_LINE_SIZE - 1);
272 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
273 __l2c210_cache_sync(base);
276 static void l2c210_flush_all(void)
278 void __iomem *base = l2x0_base;
280 BUG_ON(!irqs_disabled());
282 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
283 __l2c210_cache_sync(base);
286 static void l2c210_sync(void)
288 __l2c210_cache_sync(l2x0_base);
291 static void l2c210_resume(void)
293 void __iomem *base = l2x0_base;
295 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
296 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
299 static const struct l2c_init_data l2c210_data __initconst = {
303 .enable = l2c_enable,
306 .inv_range = l2c210_inv_range,
307 .clean_range = l2c210_clean_range,
308 .flush_range = l2c210_flush_range,
309 .flush_all = l2c210_flush_all,
310 .disable = l2c_disable,
312 .resume = l2c210_resume,
317 * L2C-220 specific code.
319 * All operations are background operations: they have to be waited for.
320 * Conflicting requests generate a slave error (which will cause an
321 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
322 * sync register here.
324 * However, we can re-use the l2c210_resume call.
326 static inline void __l2c220_cache_sync(void __iomem *base)
328 writel_relaxed(0, base + L2X0_CACHE_SYNC);
329 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
332 static void l2c220_op_way(void __iomem *base, unsigned reg)
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
337 __l2c_op_way(base + reg);
338 __l2c220_cache_sync(base);
339 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
342 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
343 unsigned long end, unsigned long flags)
345 raw_spinlock_t *lock = &l2x0_lock;
347 while (start < end) {
348 unsigned long blk_end = start + min(end - start, 4096UL);
350 while (start < blk_end) {
351 l2c_wait_mask(reg, 1);
352 writel_relaxed(start, reg);
353 start += CACHE_LINE_SIZE;
357 raw_spin_unlock_irqrestore(lock, flags);
358 raw_spin_lock_irqsave(lock, flags);
365 static void l2c220_inv_range(unsigned long start, unsigned long end)
367 void __iomem *base = l2x0_base;
370 raw_spin_lock_irqsave(&l2x0_lock, flags);
371 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
372 if (start & (CACHE_LINE_SIZE - 1)) {
373 start &= ~(CACHE_LINE_SIZE - 1);
374 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
375 start += CACHE_LINE_SIZE;
378 if (end & (CACHE_LINE_SIZE - 1)) {
379 end &= ~(CACHE_LINE_SIZE - 1);
380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
381 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
385 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
387 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
388 __l2c220_cache_sync(base);
389 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
392 static void l2c220_clean_range(unsigned long start, unsigned long end)
394 void __iomem *base = l2x0_base;
397 start &= ~(CACHE_LINE_SIZE - 1);
398 if ((end - start) >= l2x0_size) {
399 l2c220_op_way(base, L2X0_CLEAN_WAY);
403 raw_spin_lock_irqsave(&l2x0_lock, flags);
404 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
406 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
407 __l2c220_cache_sync(base);
408 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
411 static void l2c220_flush_range(unsigned long start, unsigned long end)
413 void __iomem *base = l2x0_base;
416 start &= ~(CACHE_LINE_SIZE - 1);
417 if ((end - start) >= l2x0_size) {
418 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
422 raw_spin_lock_irqsave(&l2x0_lock, flags);
423 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
425 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
426 __l2c220_cache_sync(base);
427 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
430 static void l2c220_flush_all(void)
432 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
435 static void l2c220_sync(void)
439 raw_spin_lock_irqsave(&l2x0_lock, flags);
440 __l2c220_cache_sync(l2x0_base);
441 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
444 static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
447 * Always enable non-secure access to the lockdown registers -
448 * we write to them as part of the L2C enable sequence so they
449 * need to be accessible.
451 aux |= L220_AUX_CTRL_NS_LOCKDOWN;
453 l2c_enable(base, aux, num_lock);
456 static const struct l2c_init_data l2c220_data = {
460 .enable = l2c220_enable,
463 .inv_range = l2c220_inv_range,
464 .clean_range = l2c220_clean_range,
465 .flush_range = l2c220_flush_range,
466 .flush_all = l2c220_flush_all,
467 .disable = l2c_disable,
469 .resume = l2c210_resume,
474 * L2C-310 specific code.
476 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
477 * and the way operations are all background tasks. However, issuing an
478 * operation while a background operation is in progress results in a
479 * SLVERR response. We can reuse:
481 * __l2c210_cache_sync (using sync_reg_offset)
483 * l2c210_inv_range (if 588369 is not applicable)
485 * l2c210_flush_range (if 588369 is not applicable)
486 * l2c210_flush_all (if 727915 is not applicable)
489 * 588369: PL310 R0P0->R1P0, fixed R2P0.
490 * Affects: all clean+invalidate operations
491 * clean and invalidate skips the invalidate step, so we need to issue
492 * separate operations. We also require the above debug workaround
493 * enclosing this code fragment on affected parts. On unaffected parts,
494 * we must not use this workaround without the debug register writes
495 * to avoid exposing a problem similar to 727915.
497 * 727915: PL310 R2P0->R3P0, fixed R3P1.
498 * Affects: clean+invalidate by way
499 * clean and invalidate by way runs in the background, and a store can
500 * hit the line between the clean operation and invalidate operation,
501 * resulting in the store being lost.
503 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
504 * Affects: 8x64-bit (double fill) line fetches
505 * double fill line fetches can fail to cause dirty data to be evicted
506 * from the cache before the new data overwrites the second line.
508 * 753970: PL310 R3P0, fixed R3P1.
510 * prevents merging writes after the sync operation, until another L2C
511 * operation is performed (or a number of other conditions.)
513 * 769419: PL310 R0P0->R3P1, fixed R3P2.
514 * Affects: store buffer
515 * store buffer is not automatically drained.
517 static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
519 void __iomem *base = l2x0_base;
521 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
524 /* Erratum 588369 for both clean+invalidate operations */
525 raw_spin_lock_irqsave(&l2x0_lock, flags);
526 l2c_set_debug(base, 0x03);
528 if (start & (CACHE_LINE_SIZE - 1)) {
529 start &= ~(CACHE_LINE_SIZE - 1);
530 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
531 writel_relaxed(start, base + L2X0_INV_LINE_PA);
532 start += CACHE_LINE_SIZE;
535 if (end & (CACHE_LINE_SIZE - 1)) {
536 end &= ~(CACHE_LINE_SIZE - 1);
537 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
538 writel_relaxed(end, base + L2X0_INV_LINE_PA);
541 l2c_set_debug(base, 0x00);
542 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
545 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
546 __l2c210_cache_sync(base);
549 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
551 raw_spinlock_t *lock = &l2x0_lock;
553 void __iomem *base = l2x0_base;
555 raw_spin_lock_irqsave(lock, flags);
556 while (start < end) {
557 unsigned long blk_end = start + min(end - start, 4096UL);
559 l2c_set_debug(base, 0x03);
560 while (start < blk_end) {
561 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
562 writel_relaxed(start, base + L2X0_INV_LINE_PA);
563 start += CACHE_LINE_SIZE;
565 l2c_set_debug(base, 0x00);
568 raw_spin_unlock_irqrestore(lock, flags);
569 raw_spin_lock_irqsave(lock, flags);
572 raw_spin_unlock_irqrestore(lock, flags);
573 __l2c210_cache_sync(base);
576 static void l2c310_flush_all_erratum(void)
578 void __iomem *base = l2x0_base;
581 raw_spin_lock_irqsave(&l2x0_lock, flags);
582 l2c_set_debug(base, 0x03);
583 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
584 l2c_set_debug(base, 0x00);
585 __l2c210_cache_sync(base);
586 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
589 static void __init l2c310_save(void __iomem *base)
595 l2x0_saved_regs.tag_latency = readl_relaxed(base +
596 L310_TAG_LATENCY_CTRL);
597 l2x0_saved_regs.data_latency = readl_relaxed(base +
598 L310_DATA_LATENCY_CTRL);
599 l2x0_saved_regs.filter_end = readl_relaxed(base +
600 L310_ADDR_FILTER_END);
601 l2x0_saved_regs.filter_start = readl_relaxed(base +
602 L310_ADDR_FILTER_START);
604 revision = readl_relaxed(base + L2X0_CACHE_ID) &
605 L2X0_CACHE_ID_RTL_MASK;
607 /* From r2p0, there is Prefetch offset/control register */
608 if (revision >= L310_CACHE_ID_RTL_R2P0)
609 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
612 /* From r3p0, there is Power control register */
613 if (revision >= L310_CACHE_ID_RTL_R3P0)
614 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
618 static void l2c310_resume(void)
620 void __iomem *base = l2x0_base;
622 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
625 /* restore pl310 setup */
626 writel_relaxed(l2x0_saved_regs.tag_latency,
627 base + L310_TAG_LATENCY_CTRL);
628 writel_relaxed(l2x0_saved_regs.data_latency,
629 base + L310_DATA_LATENCY_CTRL);
630 writel_relaxed(l2x0_saved_regs.filter_end,
631 base + L310_ADDR_FILTER_END);
632 writel_relaxed(l2x0_saved_regs.filter_start,
633 base + L310_ADDR_FILTER_START);
635 revision = readl_relaxed(base + L2X0_CACHE_ID) &
636 L2X0_CACHE_ID_RTL_MASK;
638 if (revision >= L310_CACHE_ID_RTL_R2P0)
639 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
641 if (revision >= L310_CACHE_ID_RTL_R3P0)
642 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
645 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
647 /* Re-enable full-line-of-zeros for Cortex-A9 */
648 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
649 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
653 static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
655 switch (act & ~CPU_TASKS_FROZEN) {
657 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
660 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
666 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
668 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
669 bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
671 if (rev >= L310_CACHE_ID_RTL_R2P0) {
673 aux |= L310_AUX_CTRL_EARLY_BRESP;
674 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
675 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
676 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
677 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
682 u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
683 u32 acr = get_auxcr();
685 pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
687 if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
688 pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
690 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
691 pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
693 if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
694 aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
695 pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
697 } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
698 pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
699 aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
702 if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
703 u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
705 pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
706 aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
707 aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
708 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
711 /* r3p0 or later has power control register */
712 if (rev >= L310_CACHE_ID_RTL_R3P0) {
715 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
716 base, L310_POWER_CTRL);
717 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
718 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
719 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
720 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
724 * Always enable non-secure access to the lockdown registers -
725 * we write to them as part of the L2C enable sequence so they
726 * need to be accessible.
728 aux |= L310_AUX_CTRL_NS_LOCKDOWN;
730 l2c_enable(base, aux, num_lock);
732 if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
733 set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
734 cpu_notifier(l2c310_cpu_enable_flz, 0);
738 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
739 struct outer_cache_fns *fns)
741 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
742 const char *errata[8];
745 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
746 revision < L310_CACHE_ID_RTL_R2P0 &&
747 /* For bcm compatibility */
748 fns->inv_range == l2c210_inv_range) {
749 fns->inv_range = l2c310_inv_range_erratum;
750 fns->flush_range = l2c310_flush_range_erratum;
751 errata[n++] = "588369";
754 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
755 revision >= L310_CACHE_ID_RTL_R2P0 &&
756 revision < L310_CACHE_ID_RTL_R3P1) {
757 fns->flush_all = l2c310_flush_all_erratum;
758 errata[n++] = "727915";
761 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
762 revision < L310_CACHE_ID_RTL_R3P2) {
763 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
764 /* I don't think bit23 is required here... but iMX6 does so */
765 if (val & (BIT(30) | BIT(23))) {
766 val &= ~(BIT(30) | BIT(23));
767 l2c_write_sec(val, base, L310_PREFETCH_CTRL);
768 errata[n++] = "752271";
772 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
773 revision == L310_CACHE_ID_RTL_R3P0) {
774 sync_reg_offset = L2X0_DUMMY_REG;
775 errata[n++] = "753970";
778 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
779 errata[n++] = "769419";
784 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
785 for (i = 0; i < n; i++)
786 pr_cont(" %s", errata[i]);
787 pr_cont(" enabled\n");
791 static void l2c310_disable(void)
794 * If full-line-of-zeros is enabled, we must first disable it in the
795 * Cortex-A9 auxiliary control register before disabling the L2 cache.
797 if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
798 set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
803 static const struct l2c_init_data l2c310_init_fns __initconst = {
807 .enable = l2c310_enable,
808 .fixup = l2c310_fixup,
811 .inv_range = l2c210_inv_range,
812 .clean_range = l2c210_clean_range,
813 .flush_range = l2c210_flush_range,
814 .flush_all = l2c210_flush_all,
815 .disable = l2c310_disable,
817 .resume = l2c310_resume,
821 static void __init __l2c_init(const struct l2c_init_data *data,
822 u32 aux_val, u32 aux_mask, u32 cache_id)
824 struct outer_cache_fns fns;
825 unsigned way_size_bits, ways;
829 * Sanity check the aux values. aux_mask is the bits we preserve
830 * from reading the hardware register, and aux_val is the bits we
833 if (aux_val & aux_mask)
834 pr_alert("L2C: platform provided aux values permit register corruption.\n");
836 old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
841 pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
844 /* Determine the number of ways */
845 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
846 case L2X0_CACHE_ID_PART_L310:
847 if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
848 pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
855 case L2X0_CACHE_ID_PART_L210:
856 case L2X0_CACHE_ID_PART_L220:
857 ways = (aux >> 13) & 0xf;
860 case AURORA_CACHE_ID:
861 ways = (aux >> 13) & 0xf;
862 ways = 2 << ((ways + 1) >> 2);
866 /* Assume unknown chips have 8 ways */
871 l2x0_way_mask = (1 << ways) - 1;
874 * way_size_0 is the size that a way_size value of zero would be
875 * given the calculation: way_size = way_size_0 << way_size_bits.
876 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
877 * then way_size_0 would be 8k.
879 * L2 cache size = number of ways * way size.
881 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
882 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
883 l2x0_size = ways * (data->way_size_0 << way_size_bits);
885 fns = data->outer_cache;
886 fns.write_sec = outer_cache.write_sec;
888 data->fixup(l2x0_base, cache_id, &fns);
891 * Check if l2x0 controller is already enabled. If we are booting
892 * in non-secure mode accessing the below registers will fault.
894 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
895 data->enable(l2x0_base, aux, data->num_lock);
900 * It is strange to save the register state before initialisation,
901 * but hey, this is what the DT implementations decided to do.
904 data->save(l2x0_base);
906 /* Re-read it in case some bits are reserved. */
907 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
909 pr_info("%s cache controller enabled, %d ways, %d kB\n",
910 data->type, ways, l2x0_size >> 10);
911 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
912 data->type, cache_id, aux);
915 void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
917 const struct l2c_init_data *data;
922 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
924 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
926 case L2X0_CACHE_ID_PART_L210:
930 case L2X0_CACHE_ID_PART_L220:
934 case L2X0_CACHE_ID_PART_L310:
935 data = &l2c310_init_fns;
939 __l2c_init(data, aux_val, aux_mask, cache_id);
943 static int l2_wt_override;
945 /* Aurora don't have the cache ID register available, so we have to
946 * pass it though the device tree */
947 static u32 cache_id_part_number_from_dt;
950 * l2x0_cache_size_of_parse() - read cache size parameters from DT
951 * @np: the device tree node for the l2 cache
952 * @aux_val: pointer to machine-supplied auxilary register value, to
953 * be augmented by the call (bits to be set to 1)
954 * @aux_mask: pointer to machine-supplied auxilary register mask, to
955 * be augmented by the call (bits to be set to 0)
956 * @associativity: variable to return the calculated associativity in
957 * @max_way_size: the maximum size in bytes for the cache ways
959 static void __init l2x0_cache_size_of_parse(const struct device_node *np,
960 u32 *aux_val, u32 *aux_mask,
964 u32 mask = 0, val = 0;
965 u32 cache_size = 0, sets = 0;
966 u32 way_size_bits = 1;
971 of_property_read_u32(np, "cache-size", &cache_size);
972 of_property_read_u32(np, "cache-sets", &sets);
973 of_property_read_u32(np, "cache-block-size", &block_size);
974 of_property_read_u32(np, "cache-line-size", &line_size);
976 if (!cache_size || !sets)
979 /* All these l2 caches have the same line = block size actually */
982 /* If linesize if not given, it is equal to blocksize */
983 line_size = block_size;
985 /* Fall back to known size */
986 pr_warn("L2C OF: no cache block/line size given: "
987 "falling back to default size %d bytes\n",
989 line_size = CACHE_LINE_SIZE;
993 if (line_size != CACHE_LINE_SIZE)
994 pr_warn("L2C OF: DT supplied line size %d bytes does "
995 "not match hardware line size of %d bytes\n",
1001 * set size = cache size / sets
1002 * ways = cache size / (sets * line size)
1003 * way size = cache size / (cache size / (sets * line size))
1004 * way size = sets * line size
1005 * associativity = ways = cache size / way size
1007 way_size = sets * line_size;
1008 *associativity = cache_size / way_size;
1010 if (way_size > max_way_size) {
1011 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
1016 cache_size, cache_size >> 10);
1017 pr_info("L2C OF: override line size: %d bytes\n", line_size);
1018 pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
1019 way_size, way_size >> 10);
1020 pr_info("L2C OF: override associativity: %d\n", *associativity);
1023 * Calculates the bits 17:19 to set for way size:
1024 * 512KB -> 6, 256KB -> 5, ... 16KB -> 1
1026 way_size_bits = ilog2(way_size >> 10) - 3;
1027 if (way_size_bits < 1 || way_size_bits > 6) {
1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
1034 val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
1041 static void __init l2x0_of_parse(const struct device_node *np,
1042 u32 *aux_val, u32 *aux_mask)
1044 u32 data[2] = { 0, 0 };
1047 u32 val = 0, mask = 0;
1050 of_property_read_u32(np, "arm,tag-latency", &tag);
1052 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
1053 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
1056 of_property_read_u32_array(np, "arm,data-latency",
1057 data, ARRAY_SIZE(data));
1058 if (data[0] && data[1]) {
1059 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
1060 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
1061 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
1062 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
1065 of_property_read_u32(np, "arm,dirty-latency", &dirty);
1067 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
1068 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1071 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1073 pr_err("l2x0 of: cache setting yield too high associativity\n");
1074 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
1076 mask |= L2X0_AUX_CTRL_ASSOC_MASK;
1077 val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
1085 static const struct l2c_init_data of_l2c210_data __initconst = {
1087 .way_size_0 = SZ_8K,
1089 .of_parse = l2x0_of_parse,
1090 .enable = l2c_enable,
1093 .inv_range = l2c210_inv_range,
1094 .clean_range = l2c210_clean_range,
1095 .flush_range = l2c210_flush_range,
1096 .flush_all = l2c210_flush_all,
1097 .disable = l2c_disable,
1098 .sync = l2c210_sync,
1099 .resume = l2c210_resume,
1103 static const struct l2c_init_data of_l2c220_data __initconst = {
1105 .way_size_0 = SZ_8K,
1107 .of_parse = l2x0_of_parse,
1108 .enable = l2c220_enable,
1111 .inv_range = l2c220_inv_range,
1112 .clean_range = l2c220_clean_range,
1113 .flush_range = l2c220_flush_range,
1114 .flush_all = l2c220_flush_all,
1115 .disable = l2c_disable,
1116 .sync = l2c220_sync,
1117 .resume = l2c210_resume,
1121 static void __init l2c310_of_parse(const struct device_node *np,
1122 u32 *aux_val, u32 *aux_mask)
1124 u32 data[3] = { 0, 0, 0 };
1125 u32 tag[3] = { 0, 0, 0 };
1126 u32 filter[2] = { 0, 0 };
1129 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1130 if (tag[0] && tag[1] && tag[2])
1132 L310_LATENCY_CTRL_RD(tag[0] - 1) |
1133 L310_LATENCY_CTRL_WR(tag[1] - 1) |
1134 L310_LATENCY_CTRL_SETUP(tag[2] - 1),
1135 l2x0_base + L310_TAG_LATENCY_CTRL);
1137 of_property_read_u32_array(np, "arm,data-latency",
1138 data, ARRAY_SIZE(data));
1139 if (data[0] && data[1] && data[2])
1141 L310_LATENCY_CTRL_RD(data[0] - 1) |
1142 L310_LATENCY_CTRL_WR(data[1] - 1) |
1143 L310_LATENCY_CTRL_SETUP(data[2] - 1),
1144 l2x0_base + L310_DATA_LATENCY_CTRL);
1146 of_property_read_u32_array(np, "arm,filter-ranges",
1147 filter, ARRAY_SIZE(filter));
1149 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
1150 l2x0_base + L310_ADDR_FILTER_END);
1151 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
1152 l2x0_base + L310_ADDR_FILTER_START);
1155 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1158 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1159 *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
1160 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1163 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1164 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1167 pr_err("PL310 OF: cache setting yield illegal associativity\n");
1168 pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc);
1173 static const struct l2c_init_data of_l2c310_data __initconst = {
1175 .way_size_0 = SZ_8K,
1177 .of_parse = l2c310_of_parse,
1178 .enable = l2c310_enable,
1179 .fixup = l2c310_fixup,
1180 .save = l2c310_save,
1182 .inv_range = l2c210_inv_range,
1183 .clean_range = l2c210_clean_range,
1184 .flush_range = l2c210_flush_range,
1185 .flush_all = l2c210_flush_all,
1186 .disable = l2c310_disable,
1187 .sync = l2c210_sync,
1188 .resume = l2c310_resume,
1193 * This is a variant of the of_l2c310_data with .sync set to
1194 * NULL. Outer sync operations are not needed when the system is I/O
1195 * coherent, and potentially harmful in certain situations (PCIe/PL310
1196 * deadlock on Armada 375/38x due to hardware I/O coherency). The
1197 * other operations are kept because they are infrequent (therefore do
1198 * not cause the deadlock in practice) and needed for secondary CPU
1199 * boot and other power management activities.
1201 static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1202 .type = "L2C-310 Coherent",
1203 .way_size_0 = SZ_8K,
1205 .of_parse = l2c310_of_parse,
1206 .enable = l2c310_enable,
1207 .fixup = l2c310_fixup,
1208 .save = l2c310_save,
1210 .inv_range = l2c210_inv_range,
1211 .clean_range = l2c210_clean_range,
1212 .flush_range = l2c210_flush_range,
1213 .flush_all = l2c210_flush_all,
1214 .disable = l2c310_disable,
1215 .resume = l2c310_resume,
1220 * Note that the end addresses passed to Linux primitives are
1221 * noninclusive, while the hardware cache range operations use
1222 * inclusive start and end addresses.
1224 static unsigned long calc_range_end(unsigned long start, unsigned long end)
1227 * Limit the number of cache lines processed at once,
1228 * since cache range operations stall the CPU pipeline
1231 if (end > start + MAX_RANGE_SIZE)
1232 end = start + MAX_RANGE_SIZE;
1235 * Cache range operations can't straddle a page boundary.
1237 if (end > PAGE_ALIGN(start+1))
1238 end = PAGE_ALIGN(start+1);
1244 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
1245 * and range operations only do a TLB lookup on the start address.
1247 static void aurora_pa_range(unsigned long start, unsigned long end,
1248 unsigned long offset)
1250 unsigned long flags;
1252 raw_spin_lock_irqsave(&l2x0_lock, flags);
1253 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
1254 writel_relaxed(end, l2x0_base + offset);
1255 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1260 static void aurora_inv_range(unsigned long start, unsigned long end)
1263 * round start and end adresses up to cache line size
1265 start &= ~(CACHE_LINE_SIZE - 1);
1266 end = ALIGN(end, CACHE_LINE_SIZE);
1269 * Invalidate all full cache lines between 'start' and 'end'.
1271 while (start < end) {
1272 unsigned long range_end = calc_range_end(start, end);
1273 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1274 AURORA_INVAL_RANGE_REG);
1279 static void aurora_clean_range(unsigned long start, unsigned long end)
1282 * If L2 is forced to WT, the L2 will always be clean and we
1283 * don't need to do anything here.
1285 if (!l2_wt_override) {
1286 start &= ~(CACHE_LINE_SIZE - 1);
1287 end = ALIGN(end, CACHE_LINE_SIZE);
1288 while (start != end) {
1289 unsigned long range_end = calc_range_end(start, end);
1290 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1291 AURORA_CLEAN_RANGE_REG);
1297 static void aurora_flush_range(unsigned long start, unsigned long end)
1299 start &= ~(CACHE_LINE_SIZE - 1);
1300 end = ALIGN(end, CACHE_LINE_SIZE);
1301 while (start != end) {
1302 unsigned long range_end = calc_range_end(start, end);
1304 * If L2 is forced to WT, the L2 will always be clean and we
1305 * just need to invalidate.
1308 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1309 AURORA_INVAL_RANGE_REG);
1311 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1312 AURORA_FLUSH_RANGE_REG);
1317 static void aurora_save(void __iomem *base)
1319 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1320 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1323 static void aurora_resume(void)
1325 void __iomem *base = l2x0_base;
1327 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1328 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1329 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
1334 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1335 * broadcasting of cache commands to L2.
1337 static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1342 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
1343 u |= AURORA_CTRL_FW; /* Set the FW bit */
1344 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1348 l2c_enable(base, aux, num_lock);
1351 static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1352 struct outer_cache_fns *fns)
1354 sync_reg_offset = AURORA_SYNC_REG;
1357 static void __init aurora_of_parse(const struct device_node *np,
1358 u32 *aux_val, u32 *aux_mask)
1360 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1361 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1363 of_property_read_u32(np, "cache-id-part",
1364 &cache_id_part_number_from_dt);
1366 /* Determine and save the write policy */
1367 l2_wt_override = of_property_read_bool(np, "wt-override");
1369 if (l2_wt_override) {
1370 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1371 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1379 static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
1381 .way_size_0 = SZ_4K,
1383 .of_parse = aurora_of_parse,
1384 .enable = l2c_enable,
1385 .fixup = aurora_fixup,
1386 .save = aurora_save,
1388 .inv_range = aurora_inv_range,
1389 .clean_range = aurora_clean_range,
1390 .flush_range = aurora_flush_range,
1391 .flush_all = l2x0_flush_all,
1392 .disable = l2x0_disable,
1393 .sync = l2x0_cache_sync,
1394 .resume = aurora_resume,
1398 static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
1400 .way_size_0 = SZ_4K,
1402 .of_parse = aurora_of_parse,
1403 .enable = aurora_enable_no_outer,
1404 .fixup = aurora_fixup,
1405 .save = aurora_save,
1407 .resume = aurora_resume,
1412 * For certain Broadcom SoCs, depending on the address range, different offsets
1413 * need to be added to the address before passing it to L2 for
1414 * invalidation/clean/flush
1416 * Section Address Range Offset EMI
1417 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1418 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1419 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1421 * When the start and end addresses have crossed two different sections, we
1422 * need to break the L2 operation into two, each within its own section.
1423 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1424 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1425 * 0xC0000000 - 0xC0001000
1428 * By breaking a single L2 operation into two, we may potentially suffer some
1429 * performance hit, but keep in mind the cross section case is very rare
1432 * We do not need to handle the case when the start address is in
1433 * Section 1 and the end address is in Section 3, since it is not a valid use
1437 * Section 1 in practical terms can no longer be used on rev A2. Because of
1438 * that the code does not need to handle section 1 at all.
1441 #define BCM_SYS_EMI_START_ADDR 0x40000000UL
1442 #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1444 #define BCM_SYS_EMI_OFFSET 0x40000000UL
1445 #define BCM_VC_EMI_OFFSET 0x80000000UL
1447 static inline int bcm_addr_is_sys_emi(unsigned long addr)
1449 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1450 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1453 static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1455 if (bcm_addr_is_sys_emi(addr))
1456 return addr + BCM_SYS_EMI_OFFSET;
1458 return addr + BCM_VC_EMI_OFFSET;
1461 static void bcm_inv_range(unsigned long start, unsigned long end)
1463 unsigned long new_start, new_end;
1465 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1467 if (unlikely(end <= start))
1470 new_start = bcm_l2_phys_addr(start);
1471 new_end = bcm_l2_phys_addr(end);
1473 /* normal case, no cross section between start and end */
1474 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1475 l2c210_inv_range(new_start, new_end);
1479 /* They cross sections, so it can only be a cross from section
1482 l2c210_inv_range(new_start,
1483 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1484 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1488 static void bcm_clean_range(unsigned long start, unsigned long end)
1490 unsigned long new_start, new_end;
1492 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1494 if (unlikely(end <= start))
1497 new_start = bcm_l2_phys_addr(start);
1498 new_end = bcm_l2_phys_addr(end);
1500 /* normal case, no cross section between start and end */
1501 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1502 l2c210_clean_range(new_start, new_end);
1506 /* They cross sections, so it can only be a cross from section
1509 l2c210_clean_range(new_start,
1510 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1511 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1515 static void bcm_flush_range(unsigned long start, unsigned long end)
1517 unsigned long new_start, new_end;
1519 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1521 if (unlikely(end <= start))
1524 if ((end - start) >= l2x0_size) {
1525 outer_cache.flush_all();
1529 new_start = bcm_l2_phys_addr(start);
1530 new_end = bcm_l2_phys_addr(end);
1532 /* normal case, no cross section between start and end */
1533 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1534 l2c210_flush_range(new_start, new_end);
1538 /* They cross sections, so it can only be a cross from section
1541 l2c210_flush_range(new_start,
1542 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1543 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1547 /* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
1548 static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
1549 .type = "BCM-L2C-310",
1550 .way_size_0 = SZ_8K,
1552 .of_parse = l2c310_of_parse,
1553 .enable = l2c310_enable,
1554 .save = l2c310_save,
1556 .inv_range = bcm_inv_range,
1557 .clean_range = bcm_clean_range,
1558 .flush_range = bcm_flush_range,
1559 .flush_all = l2c210_flush_all,
1560 .disable = l2c310_disable,
1561 .sync = l2c210_sync,
1562 .resume = l2c310_resume,
1566 static void __init tauros3_save(void __iomem *base)
1570 l2x0_saved_regs.aux2_ctrl =
1571 readl_relaxed(base + TAUROS3_AUX2_CTRL);
1572 l2x0_saved_regs.prefetch_ctrl =
1573 readl_relaxed(base + L310_PREFETCH_CTRL);
1576 static void tauros3_resume(void)
1578 void __iomem *base = l2x0_base;
1580 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1581 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1582 base + TAUROS3_AUX2_CTRL);
1583 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1584 base + L310_PREFETCH_CTRL);
1586 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1590 static const struct l2c_init_data of_tauros3_data __initconst = {
1592 .way_size_0 = SZ_8K,
1594 .enable = l2c_enable,
1595 .save = tauros3_save,
1596 /* Tauros3 broadcasts L1 cache operations to L2 */
1598 .resume = tauros3_resume,
1602 #define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
1603 static const struct of_device_id l2x0_ids[] __initconst = {
1604 L2C_ID("arm,l210-cache", of_l2c210_data),
1605 L2C_ID("arm,l220-cache", of_l2c220_data),
1606 L2C_ID("arm,pl310-cache", of_l2c310_data),
1607 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1608 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1609 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1610 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
1611 /* Deprecated IDs */
1612 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1616 int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1618 const struct l2c_init_data *data;
1619 struct device_node *np;
1620 struct resource res;
1621 u32 cache_id, old_aux;
1623 np = of_find_matching_node(NULL, l2x0_ids);
1627 if (of_address_to_resource(np, 0, &res))
1630 l2x0_base = ioremap(res.start, resource_size(&res));
1634 l2x0_saved_regs.phy_base = res.start;
1636 data = of_match_node(l2x0_ids, np)->data;
1638 if (of_device_is_compatible(np, "arm,pl310-cache") &&
1639 of_property_read_bool(np, "arm,io-coherent"))
1640 data = &of_l2c310_coherent_data;
1642 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1643 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1644 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
1645 old_aux, (old_aux & aux_mask) | aux_val);
1646 } else if (aux_mask != ~0U && aux_val != 0) {
1647 pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
1650 /* All L2 caches are unified, so this property should be specified */
1651 if (!of_property_read_bool(np, "cache-unified"))
1652 pr_err("L2C: device tree omits to specify unified cache\n");
1654 /* L2 configuration can only be changed if the cache is disabled */
1655 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
1657 data->of_parse(np, &aux_val, &aux_mask);
1659 if (cache_id_part_number_from_dt)
1660 cache_id = cache_id_part_number_from_dt;
1662 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1664 __l2c_init(data, aux_val, aux_mask, cache_id);