ARM: OMAP4+: CPUidle: Consolidate idle driver for OMAP5 support
[cascardo/linux.git] / arch / arm / mach-omap2 / cpuidle44xx.c
1 /*
2  * OMAP4+ CPU idle Routines
3  *
4  * Copyright (C) 2011-2013 Texas Instruments, Inc.
5  * Santosh Shilimkar <santosh.shilimkar@ti.com>
6  * Rajendra Nayak <rnayak@ti.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/sched.h>
14 #include <linux/cpuidle.h>
15 #include <linux/cpu_pm.h>
16 #include <linux/export.h>
17
18 #include <asm/proc-fns.h>
19
20 #include "common.h"
21 #include "pm.h"
22 #include "prm.h"
23 #include "clockdomain.h"
24
25 /* Machine specific information */
26 struct idle_statedata {
27         u32 cpu_state;
28         u32 mpu_logic_state;
29         u32 mpu_state;
30 };
31
32 static struct idle_statedata omap4_idle_data[] = {
33         {
34                 .cpu_state = PWRDM_POWER_ON,
35                 .mpu_state = PWRDM_POWER_ON,
36                 .mpu_logic_state = PWRDM_POWER_RET,
37         },
38         {
39                 .cpu_state = PWRDM_POWER_OFF,
40                 .mpu_state = PWRDM_POWER_RET,
41                 .mpu_logic_state = PWRDM_POWER_RET,
42         },
43         {
44                 .cpu_state = PWRDM_POWER_OFF,
45                 .mpu_state = PWRDM_POWER_RET,
46                 .mpu_logic_state = PWRDM_POWER_OFF,
47         },
48 };
49
50 static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
51 static struct clockdomain *cpu_clkdm[NR_CPUS];
52
53 static atomic_t abort_barrier;
54 static bool cpu_done[NR_CPUS];
55 static struct idle_statedata *state_ptr = &omap4_idle_data[0];
56
57 /* Private functions */
58
59 /**
60  * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
61  * @dev: cpuidle device
62  * @drv: cpuidle driver
63  * @index: the index of state to be entered
64  *
65  * Called from the CPUidle framework to program the device to the
66  * specified low power state selected by the governor.
67  * Returns the amount of time spent in the low power state.
68  */
69 static int omap_enter_idle_simple(struct cpuidle_device *dev,
70                         struct cpuidle_driver *drv,
71                         int index)
72 {
73         local_fiq_disable();
74         omap_do_wfi();
75         local_fiq_enable();
76
77         return index;
78 }
79
80 static int omap_enter_idle_coupled(struct cpuidle_device *dev,
81                         struct cpuidle_driver *drv,
82                         int index)
83 {
84         struct idle_statedata *cx = state_ptr + index;
85
86         local_fiq_disable();
87
88         /*
89          * CPU0 has to wait and stay ON until CPU1 is OFF state.
90          * This is necessary to honour hardware recommondation
91          * of triggeing all the possible low power modes once CPU1 is
92          * out of coherency and in OFF mode.
93          */
94         if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
95                 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
96                         cpu_relax();
97
98                         /*
99                          * CPU1 could have already entered & exited idle
100                          * without hitting off because of a wakeup
101                          * or a failed attempt to hit off mode.  Check for
102                          * that here, otherwise we could spin forever
103                          * waiting for CPU1 off.
104                          */
105                         if (cpu_done[1])
106                             goto fail;
107
108                 }
109         }
110
111         /*
112          * Call idle CPU PM enter notifier chain so that
113          * VFP and per CPU interrupt context is saved.
114          */
115         cpu_pm_enter();
116
117         if (dev->cpu == 0) {
118                 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
119                 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
120
121                 /*
122                  * Call idle CPU cluster PM enter notifier chain
123                  * to save GIC and wakeupgen context.
124                  */
125                 if ((cx->mpu_state == PWRDM_POWER_RET) &&
126                         (cx->mpu_logic_state == PWRDM_POWER_OFF))
127                                 cpu_cluster_pm_enter();
128         }
129
130         omap4_enter_lowpower(dev->cpu, cx->cpu_state);
131         cpu_done[dev->cpu] = true;
132
133         /* Wakeup CPU1 only if it is not offlined */
134         if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
135                 clkdm_wakeup(cpu_clkdm[1]);
136                 clkdm_allow_idle(cpu_clkdm[1]);
137         }
138
139         /*
140          * Call idle CPU PM exit notifier chain to restore
141          * VFP and per CPU IRQ context.
142          */
143         cpu_pm_exit();
144
145         /*
146          * Call idle CPU cluster PM exit notifier chain
147          * to restore GIC and wakeupgen context.
148          */
149         if ((cx->mpu_state == PWRDM_POWER_RET) &&
150                 (cx->mpu_logic_state == PWRDM_POWER_OFF))
151                 cpu_cluster_pm_exit();
152
153 fail:
154         cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
155         cpu_done[dev->cpu] = false;
156
157         local_fiq_enable();
158
159         return index;
160 }
161
162 static DEFINE_PER_CPU(struct cpuidle_device, omap_idle_dev);
163
164 static struct cpuidle_driver omap4_idle_driver = {
165         .name                           = "omap4_idle",
166         .owner                          = THIS_MODULE,
167         .en_core_tk_irqen               = 1,
168         .states = {
169                 {
170                         /* C1 - CPU0 ON + CPU1 ON + MPU ON */
171                         .exit_latency = 2 + 2,
172                         .target_residency = 5,
173                         .flags = CPUIDLE_FLAG_TIME_VALID,
174                         .enter = omap_enter_idle_simple,
175                         .name = "C1",
176                         .desc = "CPUx ON, MPUSS ON"
177                 },
178                 {
179                         /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
180                         .exit_latency = 328 + 440,
181                         .target_residency = 960,
182                         .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
183                                  CPUIDLE_FLAG_TIMER_STOP,
184                         .enter = omap_enter_idle_coupled,
185                         .name = "C2",
186                         .desc = "CPUx OFF, MPUSS CSWR",
187                 },
188                 {
189                         /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
190                         .exit_latency = 460 + 518,
191                         .target_residency = 1100,
192                         .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
193                                  CPUIDLE_FLAG_TIMER_STOP,
194                         .enter = omap_enter_idle_coupled,
195                         .name = "C3",
196                         .desc = "CPUx OFF, MPUSS OSWR",
197                 },
198         },
199         .state_count = ARRAY_SIZE(omap4_idle_data),
200         .safe_state_index = 0,
201 };
202
203 /* Public functions */
204
205 /**
206  * omap4_idle_init - Init routine for OMAP4+ idle
207  *
208  * Registers the OMAP4+ specific cpuidle driver to the cpuidle
209  * framework with the valid set of states.
210  */
211 int __init omap4_idle_init(void)
212 {
213         struct cpuidle_device *dev;
214         unsigned int cpu_id = 0;
215
216         mpu_pd = pwrdm_lookup("mpu_pwrdm");
217         cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
218         cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
219         if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
220                 return -ENODEV;
221
222         cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
223         cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
224         if (!cpu_clkdm[0] || !cpu_clkdm[1])
225                 return -ENODEV;
226
227         if (cpuidle_register_driver(&omap4_idle_driver)) {
228                 pr_err("%s: CPUidle driver register failed\n", __func__);
229                 return -EIO;
230         }
231
232         for_each_cpu(cpu_id, cpu_online_mask) {
233                 dev = &per_cpu(omap_idle_dev, cpu_id);
234                 dev->cpu = cpu_id;
235 #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
236                 dev->coupled_cpus = *cpu_online_mask;
237 #endif
238                 if (cpuidle_register_device(dev)) {
239                         pr_err("%s: CPUidle register failed\n", __func__);
240                         cpuidle_unregister_driver(&omap4_idle_driver);
241                         return -EIO;
242                 }
243         }
244
245         return 0;
246 }