ARM: mvebu: Netgear RN102: Use Hardware BCH ECC
[cascardo/linux.git] / arch / arm / mach-exynos / mcpm-exynos.c
1 /*
2  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
3  *              http://www.samsung.com
4  *
5  * arch/arm/mach-exynos/mcpm-exynos.c
6  *
7  * Based on arch/arm/mach-vexpress/dcscb.c
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13
14 #include <linux/arm-cci.h>
15 #include <linux/delay.h>
16 #include <linux/io.h>
17 #include <linux/of_address.h>
18
19 #include <asm/cputype.h>
20 #include <asm/cp15.h>
21 #include <asm/mcpm.h>
22
23 #include "regs-pmu.h"
24 #include "common.h"
25
26 #define EXYNOS5420_CPUS_PER_CLUSTER     4
27 #define EXYNOS5420_NR_CLUSTERS          2
28
29 #define EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN   BIT(9)
30 #define EXYNOS5420_USE_ARM_CORE_DOWN_STATE      BIT(29)
31 #define EXYNOS5420_USE_L2_COMMON_UP_STATE       BIT(30)
32
33 /*
34  * The common v7_exit_coherency_flush API could not be used because of the
35  * Erratum 799270 workaround. This macro is the same as the common one (in
36  * arch/arm/include/asm/cacheflush.h) except for the erratum handling.
37  */
38 #define exynos_v7_exit_coherency_flush(level) \
39         asm volatile( \
40         "stmfd  sp!, {fp, ip}\n\t"\
41         "mrc    p15, 0, r0, c1, c0, 0   @ get SCTLR\n\t" \
42         "bic    r0, r0, #"__stringify(CR_C)"\n\t" \
43         "mcr    p15, 0, r0, c1, c0, 0   @ set SCTLR\n\t" \
44         "isb\n\t"\
45         "bl     v7_flush_dcache_"__stringify(level)"\n\t" \
46         "clrex\n\t"\
47         "mrc    p15, 0, r0, c1, c0, 1   @ get ACTLR\n\t" \
48         "bic    r0, r0, #(1 << 6)       @ disable local coherency\n\t" \
49         /* Dummy Load of a device register to avoid Erratum 799270 */ \
50         "ldr    r4, [%0]\n\t" \
51         "and    r4, r4, #0\n\t" \
52         "orr    r0, r0, r4\n\t" \
53         "mcr    p15, 0, r0, c1, c0, 1   @ set ACTLR\n\t" \
54         "isb\n\t" \
55         "dsb\n\t" \
56         "ldmfd  sp!, {fp, ip}" \
57         : \
58         : "Ir" (pmu_base_addr + S5P_INFORM0) \
59         : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
60           "r9", "r10", "lr", "memory")
61
62 /*
63  * We can't use regular spinlocks. In the switcher case, it is possible
64  * for an outbound CPU to call power_down() after its inbound counterpart
65  * is already live using the same logical CPU number which trips lockdep
66  * debugging.
67  */
68 static arch_spinlock_t exynos_mcpm_lock = __ARCH_SPIN_LOCK_UNLOCKED;
69 static int
70 cpu_use_count[EXYNOS5420_CPUS_PER_CLUSTER][EXYNOS5420_NR_CLUSTERS];
71
72 #define exynos_cluster_usecnt(cluster) \
73         (cpu_use_count[0][cluster] +   \
74          cpu_use_count[1][cluster] +   \
75          cpu_use_count[2][cluster] +   \
76          cpu_use_count[3][cluster])
77
78 #define exynos_cluster_unused(cluster) !exynos_cluster_usecnt(cluster)
79
80 static int exynos_power_up(unsigned int cpu, unsigned int cluster)
81 {
82         unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
83
84         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
85         if (cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
86                 cluster >= EXYNOS5420_NR_CLUSTERS)
87                 return -EINVAL;
88
89         /*
90          * Since this is called with IRQs enabled, and no arch_spin_lock_irq
91          * variant exists, we need to disable IRQs manually here.
92          */
93         local_irq_disable();
94         arch_spin_lock(&exynos_mcpm_lock);
95
96         cpu_use_count[cpu][cluster]++;
97         if (cpu_use_count[cpu][cluster] == 1) {
98                 bool was_cluster_down =
99                         (exynos_cluster_usecnt(cluster) == 1);
100
101                 /*
102                  * Turn on the cluster (L2/COMMON) and then power on the
103                  * cores.
104                  */
105                 if (was_cluster_down)
106                         exynos_cluster_power_up(cluster);
107
108                 exynos_cpu_power_up(cpunr);
109         } else if (cpu_use_count[cpu][cluster] != 2) {
110                 /*
111                  * The only possible values are:
112                  * 0 = CPU down
113                  * 1 = CPU (still) up
114                  * 2 = CPU requested to be up before it had a chance
115                  *     to actually make itself down.
116                  * Any other value is a bug.
117                  */
118                 BUG();
119         }
120
121         arch_spin_unlock(&exynos_mcpm_lock);
122         local_irq_enable();
123
124         return 0;
125 }
126
127 /*
128  * NOTE: This function requires the stack data to be visible through power down
129  * and can only be executed on processors like A15 and A7 that hit the cache
130  * with the C bit clear in the SCTLR register.
131  */
132 static void exynos_power_down(void)
133 {
134         unsigned int mpidr, cpu, cluster;
135         bool last_man = false, skip_wfi = false;
136         unsigned int cpunr;
137
138         mpidr = read_cpuid_mpidr();
139         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
140         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
141         cpunr =  cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
142
143         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
144         BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
145                         cluster >= EXYNOS5420_NR_CLUSTERS);
146
147         __mcpm_cpu_going_down(cpu, cluster);
148
149         arch_spin_lock(&exynos_mcpm_lock);
150         BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
151         cpu_use_count[cpu][cluster]--;
152         if (cpu_use_count[cpu][cluster] == 0) {
153                 exynos_cpu_power_down(cpunr);
154
155                 if (exynos_cluster_unused(cluster)) {
156                         exynos_cluster_power_down(cluster);
157                         last_man = true;
158                 }
159         } else if (cpu_use_count[cpu][cluster] == 1) {
160                 /*
161                  * A power_up request went ahead of us.
162                  * Even if we do not want to shut this CPU down,
163                  * the caller expects a certain state as if the WFI
164                  * was aborted.  So let's continue with cache cleaning.
165                  */
166                 skip_wfi = true;
167         } else {
168                 BUG();
169         }
170
171         if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
172                 arch_spin_unlock(&exynos_mcpm_lock);
173
174                 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
175                         /*
176                          * On the Cortex-A15 we need to disable
177                          * L2 prefetching before flushing the cache.
178                          */
179                         asm volatile(
180                         "mcr    p15, 1, %0, c15, c0, 3\n\t"
181                         "isb\n\t"
182                         "dsb"
183                         : : "r" (0x400));
184                 }
185
186                 /* Flush all cache levels for this cluster. */
187                 exynos_v7_exit_coherency_flush(all);
188
189                 /*
190                  * Disable cluster-level coherency by masking
191                  * incoming snoops and DVM messages:
192                  */
193                 cci_disable_port_by_cpu(mpidr);
194
195                 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
196         } else {
197                 arch_spin_unlock(&exynos_mcpm_lock);
198
199                 /* Disable and flush the local CPU cache. */
200                 exynos_v7_exit_coherency_flush(louis);
201         }
202
203         __mcpm_cpu_down(cpu, cluster);
204
205         /* Now we are prepared for power-down, do it: */
206         if (!skip_wfi)
207                 wfi();
208
209         /* Not dead at this point?  Let our caller cope. */
210 }
211
212 static int exynos_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
213 {
214         unsigned int tries = 100;
215         unsigned int cpunr = cpu + (cluster * EXYNOS5420_CPUS_PER_CLUSTER);
216
217         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
218         BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER ||
219                         cluster >= EXYNOS5420_NR_CLUSTERS);
220
221         /* Wait for the core state to be OFF */
222         while (tries--) {
223                 if (ACCESS_ONCE(cpu_use_count[cpu][cluster]) == 0) {
224                         if ((exynos_cpu_power_state(cpunr) == 0))
225                                 return 0; /* success: the CPU is halted */
226                 }
227
228                 /* Otherwise, wait and retry: */
229                 msleep(1);
230         }
231
232         return -ETIMEDOUT; /* timeout */
233 }
234
235 static void exynos_powered_up(void)
236 {
237         unsigned int mpidr, cpu, cluster;
238
239         mpidr = read_cpuid_mpidr();
240         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
241         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
242
243         arch_spin_lock(&exynos_mcpm_lock);
244         if (cpu_use_count[cpu][cluster] == 0)
245                 cpu_use_count[cpu][cluster] = 1;
246         arch_spin_unlock(&exynos_mcpm_lock);
247 }
248
249 static void exynos_suspend(u64 residency)
250 {
251         unsigned int mpidr, cpunr;
252
253         exynos_power_down();
254
255         /*
256          * Execution reaches here only if cpu did not power down.
257          * Hence roll back the changes done in exynos_power_down function.
258          *
259          * CAUTION: "This function requires the stack data to be visible through
260          * power down and can only be executed on processors like A15 and A7
261          * that hit the cache with the C bit clear in the SCTLR register."
262         */
263         mpidr = read_cpuid_mpidr();
264         cpunr = exynos_pmu_cpunr(mpidr);
265
266         exynos_cpu_power_up(cpunr);
267 }
268
269 static const struct mcpm_platform_ops exynos_power_ops = {
270         .power_up               = exynos_power_up,
271         .power_down             = exynos_power_down,
272         .wait_for_powerdown     = exynos_wait_for_powerdown,
273         .suspend                = exynos_suspend,
274         .powered_up             = exynos_powered_up,
275 };
276
277 static void __init exynos_mcpm_usage_count_init(void)
278 {
279         unsigned int mpidr, cpu, cluster;
280
281         mpidr = read_cpuid_mpidr();
282         cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
283         cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
284
285         pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
286         BUG_ON(cpu >= EXYNOS5420_CPUS_PER_CLUSTER  ||
287                         cluster >= EXYNOS5420_NR_CLUSTERS);
288
289         cpu_use_count[cpu][cluster] = 1;
290 }
291
292 /*
293  * Enable cluster-level coherency, in preparation for turning on the MMU.
294  */
295 static void __naked exynos_pm_power_up_setup(unsigned int affinity_level)
296 {
297         asm volatile ("\n"
298         "cmp    r0, #1\n"
299         "bxne   lr\n"
300         "b      cci_enable_port_for_self");
301 }
302
303 static void __init exynos_cache_off(void)
304 {
305         if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
306                 /* disable L2 prefetching on the Cortex-A15 */
307                 asm volatile(
308                 "mcr    p15, 1, %0, c15, c0, 3\n\t"
309                 "isb\n\t"
310                 "dsb"
311                 : : "r" (0x400));
312         }
313         exynos_v7_exit_coherency_flush(all);
314 }
315
316 static const struct of_device_id exynos_dt_mcpm_match[] = {
317         { .compatible = "samsung,exynos5420" },
318         { .compatible = "samsung,exynos5800" },
319         {},
320 };
321
322 static int __init exynos_mcpm_init(void)
323 {
324         struct device_node *node;
325         void __iomem *ns_sram_base_addr;
326         unsigned int value, i;
327         int ret;
328
329         node = of_find_matching_node(NULL, exynos_dt_mcpm_match);
330         if (!node)
331                 return -ENODEV;
332         of_node_put(node);
333
334         if (!cci_probed())
335                 return -ENODEV;
336
337         node = of_find_compatible_node(NULL, NULL,
338                         "samsung,exynos4210-sysram-ns");
339         if (!node)
340                 return -ENODEV;
341
342         ns_sram_base_addr = of_iomap(node, 0);
343         of_node_put(node);
344         if (!ns_sram_base_addr) {
345                 pr_err("failed to map non-secure iRAM base address\n");
346                 return -ENOMEM;
347         }
348
349         /*
350          * To increase the stability of KFC reset we need to program
351          * the PMU SPARE3 register
352          */
353         pmu_raw_writel(EXYNOS5420_SWRESET_KFC_SEL, S5P_PMU_SPARE3);
354
355         exynos_mcpm_usage_count_init();
356
357         ret = mcpm_platform_register(&exynos_power_ops);
358         if (!ret)
359                 ret = mcpm_sync_init(exynos_pm_power_up_setup);
360         if (!ret)
361                 ret = mcpm_loopback(exynos_cache_off); /* turn on the CCI */
362         if (ret) {
363                 iounmap(ns_sram_base_addr);
364                 return ret;
365         }
366
367         mcpm_smp_set_ops();
368
369         pr_info("Exynos MCPM support installed\n");
370
371         /*
372          * On Exynos5420/5800 for the A15 and A7 clusters:
373          *
374          * EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN ensures that all the cores
375          * in a cluster are turned off before turning off the cluster L2.
376          *
377          * EXYNOS5420_USE_ARM_CORE_DOWN_STATE ensures that a cores is powered
378          * off before waking it up.
379          *
380          * EXYNOS5420_USE_L2_COMMON_UP_STATE ensures that cluster L2 will be
381          * turned on before the first man is powered up.
382          */
383         for (i = 0; i < EXYNOS5420_NR_CLUSTERS; i++) {
384                 value = pmu_raw_readl(EXYNOS_COMMON_OPTION(i));
385                 value |= EXYNOS5420_ENABLE_AUTOMATIC_CORE_DOWN |
386                          EXYNOS5420_USE_ARM_CORE_DOWN_STATE    |
387                          EXYNOS5420_USE_L2_COMMON_UP_STATE;
388                 pmu_raw_writel(value, EXYNOS_COMMON_OPTION(i));
389         }
390
391         /*
392          * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
393          * as part of secondary_cpu_start().  Let's redirect it to the
394          * mcpm_entry_point().
395          */
396         __raw_writel(0xe59f0000, ns_sram_base_addr);     /* ldr r0, [pc, #0] */
397         __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx  r0 */
398         __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
399
400         iounmap(ns_sram_base_addr);
401
402         return ret;
403 }
404
405 early_initcall(exynos_mcpm_init);