5d7b7548873a1a068de2ba5216b0f6bdd8633a17
[cascardo/linux.git] / drivers / base / power / domain.c
1 /*
2  * drivers/base/power/domain.c - Common code related to device power domains.
3  *
4  * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5  *
6  * This file is released under the GPLv2.
7  */
8
9 #include <linux/kernel.h>
10 #include <linux/io.h>
11 #include <linux/platform_device.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/pm_qos.h>
15 #include <linux/pm_clock.h>
16 #include <linux/slab.h>
17 #include <linux/err.h>
18 #include <linux/sched.h>
19 #include <linux/suspend.h>
20 #include <linux/export.h>
21
22 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev)          \
23 ({                                                              \
24         type (*__routine)(struct device *__d);                  \
25         type __ret = (type)0;                                   \
26                                                                 \
27         __routine = genpd->dev_ops.callback;                    \
28         if (__routine) {                                        \
29                 __ret = __routine(dev);                         \
30         }                                                       \
31         __ret;                                                  \
32 })
33
34 #define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name)       \
35 ({                                                                              \
36         ktime_t __start = ktime_get();                                          \
37         type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev);         \
38         s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start));           \
39         struct gpd_timing_data *__td = &dev_gpd_data(dev)->td;                  \
40         if (!__retval && __elapsed > __td->field) {                             \
41                 __td->field = __elapsed;                                        \
42                 dev_dbg(dev, name " latency exceeded, new value %lld ns\n",     \
43                         __elapsed);                                             \
44                 genpd->max_off_time_changed = true;                             \
45                 __td->constraint_changed = true;                                \
46         }                                                                       \
47         __retval;                                                               \
48 })
49
50 static LIST_HEAD(gpd_list);
51 static DEFINE_MUTEX(gpd_list_lock);
52
53 static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
54 {
55         struct generic_pm_domain *genpd = NULL, *gpd;
56
57         if (IS_ERR_OR_NULL(domain_name))
58                 return NULL;
59
60         mutex_lock(&gpd_list_lock);
61         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
62                 if (!strcmp(gpd->name, domain_name)) {
63                         genpd = gpd;
64                         break;
65                 }
66         }
67         mutex_unlock(&gpd_list_lock);
68         return genpd;
69 }
70
71 struct generic_pm_domain *dev_to_genpd(struct device *dev)
72 {
73         if (IS_ERR_OR_NULL(dev->pm_domain))
74                 return ERR_PTR(-EINVAL);
75
76         return pd_to_genpd(dev->pm_domain);
77 }
78
79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
80 {
81         return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
82                                         stop_latency_ns, "stop");
83 }
84
85 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
86 {
87         return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
88                                         start_latency_ns, "start");
89 }
90
91 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
92 {
93         bool ret = false;
94
95         if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
96                 ret = !!atomic_dec_and_test(&genpd->sd_count);
97
98         return ret;
99 }
100
101 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
102 {
103         atomic_inc(&genpd->sd_count);
104         smp_mb__after_atomic();
105 }
106
107 static void genpd_acquire_lock(struct generic_pm_domain *genpd)
108 {
109         DEFINE_WAIT(wait);
110
111         mutex_lock(&genpd->lock);
112         /*
113          * Wait for the domain to transition into either the active,
114          * or the power off state.
115          */
116         for (;;) {
117                 prepare_to_wait(&genpd->status_wait_queue, &wait,
118                                 TASK_UNINTERRUPTIBLE);
119                 if (genpd->status == GPD_STATE_ACTIVE
120                     || genpd->status == GPD_STATE_POWER_OFF)
121                         break;
122                 mutex_unlock(&genpd->lock);
123
124                 schedule();
125
126                 mutex_lock(&genpd->lock);
127         }
128         finish_wait(&genpd->status_wait_queue, &wait);
129 }
130
131 static void genpd_release_lock(struct generic_pm_domain *genpd)
132 {
133         mutex_unlock(&genpd->lock);
134 }
135
136 static void genpd_set_active(struct generic_pm_domain *genpd)
137 {
138         if (genpd->resume_count == 0)
139                 genpd->status = GPD_STATE_ACTIVE;
140 }
141
142 static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
143 {
144         s64 usecs64;
145
146         if (!genpd->cpuidle_data)
147                 return;
148
149         usecs64 = genpd->power_on_latency_ns;
150         do_div(usecs64, NSEC_PER_USEC);
151         usecs64 += genpd->cpuidle_data->saved_exit_latency;
152         genpd->cpuidle_data->idle_state->exit_latency = usecs64;
153 }
154
155 static int genpd_power_on(struct generic_pm_domain *genpd)
156 {
157         ktime_t time_start;
158         s64 elapsed_ns;
159         int ret;
160
161         if (!genpd->power_on)
162                 return 0;
163
164         time_start = ktime_get();
165         ret = genpd->power_on(genpd);
166         if (ret)
167                 return ret;
168
169         elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
170         if (elapsed_ns <= genpd->power_on_latency_ns)
171                 return ret;
172
173         genpd->power_on_latency_ns = elapsed_ns;
174         genpd->max_off_time_changed = true;
175         genpd_recalc_cpu_exit_latency(genpd);
176         pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
177                 genpd->name, "on", elapsed_ns);
178
179         return ret;
180 }
181
182 static int genpd_power_off(struct generic_pm_domain *genpd)
183 {
184         ktime_t time_start;
185         s64 elapsed_ns;
186         int ret;
187
188         if (!genpd->power_off)
189                 return 0;
190
191         time_start = ktime_get();
192         ret = genpd->power_off(genpd);
193         if (ret == -EBUSY)
194                 return ret;
195
196         elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
197         if (elapsed_ns <= genpd->power_off_latency_ns)
198                 return ret;
199
200         genpd->power_off_latency_ns = elapsed_ns;
201         genpd->max_off_time_changed = true;
202         pr_warn("%s: Power-%s latency exceeded, new value %lld ns\n",
203                 genpd->name, "off", elapsed_ns);
204
205         return ret;
206 }
207
208 /**
209  * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
210  * @genpd: PM domain to power up.
211  *
212  * Restore power to @genpd and all of its masters so that it is possible to
213  * resume a device belonging to it.
214  */
215 static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
216         __releases(&genpd->lock) __acquires(&genpd->lock)
217 {
218         struct gpd_link *link;
219         DEFINE_WAIT(wait);
220         int ret = 0;
221
222         /* If the domain's master is being waited for, we have to wait too. */
223         for (;;) {
224                 prepare_to_wait(&genpd->status_wait_queue, &wait,
225                                 TASK_UNINTERRUPTIBLE);
226                 if (genpd->status != GPD_STATE_WAIT_MASTER)
227                         break;
228                 mutex_unlock(&genpd->lock);
229
230                 schedule();
231
232                 mutex_lock(&genpd->lock);
233         }
234         finish_wait(&genpd->status_wait_queue, &wait);
235
236         if (genpd->status == GPD_STATE_ACTIVE
237             || (genpd->prepared_count > 0 && genpd->suspend_power_off))
238                 return 0;
239
240         if (genpd->status != GPD_STATE_POWER_OFF) {
241                 genpd_set_active(genpd);
242                 return 0;
243         }
244
245         if (genpd->cpuidle_data) {
246                 cpuidle_pause_and_lock();
247                 genpd->cpuidle_data->idle_state->disabled = true;
248                 cpuidle_resume_and_unlock();
249                 goto out;
250         }
251
252         /*
253          * The list is guaranteed not to change while the loop below is being
254          * executed, unless one of the masters' .power_on() callbacks fiddles
255          * with it.
256          */
257         list_for_each_entry(link, &genpd->slave_links, slave_node) {
258                 genpd_sd_counter_inc(link->master);
259                 genpd->status = GPD_STATE_WAIT_MASTER;
260
261                 mutex_unlock(&genpd->lock);
262
263                 ret = pm_genpd_poweron(link->master);
264
265                 mutex_lock(&genpd->lock);
266
267                 /*
268                  * The "wait for parent" status is guaranteed not to change
269                  * while the master is powering on.
270                  */
271                 genpd->status = GPD_STATE_POWER_OFF;
272                 wake_up_all(&genpd->status_wait_queue);
273                 if (ret) {
274                         genpd_sd_counter_dec(link->master);
275                         goto err;
276                 }
277         }
278
279         ret = genpd_power_on(genpd);
280         if (ret)
281                 goto err;
282
283  out:
284         genpd_set_active(genpd);
285
286         return 0;
287
288  err:
289         list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node)
290                 genpd_sd_counter_dec(link->master);
291
292         return ret;
293 }
294
295 /**
296  * pm_genpd_poweron - Restore power to a given PM domain and its masters.
297  * @genpd: PM domain to power up.
298  */
299 int pm_genpd_poweron(struct generic_pm_domain *genpd)
300 {
301         int ret;
302
303         mutex_lock(&genpd->lock);
304         ret = __pm_genpd_poweron(genpd);
305         mutex_unlock(&genpd->lock);
306         return ret;
307 }
308
309 /**
310  * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
311  * @domain_name: Name of the PM domain to power up.
312  */
313 int pm_genpd_name_poweron(const char *domain_name)
314 {
315         struct generic_pm_domain *genpd;
316
317         genpd = pm_genpd_lookup_name(domain_name);
318         return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
319 }
320
321 #ifdef CONFIG_PM_RUNTIME
322
323 static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
324                                      struct device *dev)
325 {
326         return GENPD_DEV_CALLBACK(genpd, int, start, dev);
327 }
328
329 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
330 {
331         return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
332                                         save_state_latency_ns, "state save");
333 }
334
335 static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
336 {
337         return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
338                                         restore_state_latency_ns,
339                                         "state restore");
340 }
341
342 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
343                                      unsigned long val, void *ptr)
344 {
345         struct generic_pm_domain_data *gpd_data;
346         struct device *dev;
347
348         gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
349
350         mutex_lock(&gpd_data->lock);
351         dev = gpd_data->base.dev;
352         if (!dev) {
353                 mutex_unlock(&gpd_data->lock);
354                 return NOTIFY_DONE;
355         }
356         mutex_unlock(&gpd_data->lock);
357
358         for (;;) {
359                 struct generic_pm_domain *genpd;
360                 struct pm_domain_data *pdd;
361
362                 spin_lock_irq(&dev->power.lock);
363
364                 pdd = dev->power.subsys_data ?
365                                 dev->power.subsys_data->domain_data : NULL;
366                 if (pdd && pdd->dev) {
367                         to_gpd_data(pdd)->td.constraint_changed = true;
368                         genpd = dev_to_genpd(dev);
369                 } else {
370                         genpd = ERR_PTR(-ENODATA);
371                 }
372
373                 spin_unlock_irq(&dev->power.lock);
374
375                 if (!IS_ERR(genpd)) {
376                         mutex_lock(&genpd->lock);
377                         genpd->max_off_time_changed = true;
378                         mutex_unlock(&genpd->lock);
379                 }
380
381                 dev = dev->parent;
382                 if (!dev || dev->power.ignore_children)
383                         break;
384         }
385
386         return NOTIFY_DONE;
387 }
388
389 /**
390  * __pm_genpd_save_device - Save the pre-suspend state of a device.
391  * @pdd: Domain data of the device to save the state of.
392  * @genpd: PM domain the device belongs to.
393  */
394 static int __pm_genpd_save_device(struct pm_domain_data *pdd,
395                                   struct generic_pm_domain *genpd)
396         __releases(&genpd->lock) __acquires(&genpd->lock)
397 {
398         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
399         struct device *dev = pdd->dev;
400         int ret = 0;
401
402         if (gpd_data->need_restore > 0)
403                 return 0;
404
405         /*
406          * If the value of the need_restore flag is still unknown at this point,
407          * we trust that pm_genpd_poweroff() has verified that the device is
408          * already runtime PM suspended.
409          */
410         if (gpd_data->need_restore < 0) {
411                 gpd_data->need_restore = 1;
412                 return 0;
413         }
414
415         mutex_unlock(&genpd->lock);
416
417         genpd_start_dev(genpd, dev);
418         ret = genpd_save_dev(genpd, dev);
419         genpd_stop_dev(genpd, dev);
420
421         mutex_lock(&genpd->lock);
422
423         if (!ret)
424                 gpd_data->need_restore = 1;
425
426         return ret;
427 }
428
429 /**
430  * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
431  * @pdd: Domain data of the device to restore the state of.
432  * @genpd: PM domain the device belongs to.
433  */
434 static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
435                                       struct generic_pm_domain *genpd)
436         __releases(&genpd->lock) __acquires(&genpd->lock)
437 {
438         struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
439         struct device *dev = pdd->dev;
440         int need_restore = gpd_data->need_restore;
441
442         gpd_data->need_restore = 0;
443         mutex_unlock(&genpd->lock);
444
445         genpd_start_dev(genpd, dev);
446
447         /*
448          * Call genpd_restore_dev() for recently added devices too (need_restore
449          * is negative then).
450          */
451         if (need_restore)
452                 genpd_restore_dev(genpd, dev);
453
454         mutex_lock(&genpd->lock);
455 }
456
457 /**
458  * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
459  * @genpd: PM domain to check.
460  *
461  * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
462  * a "power off" operation, which means that a "power on" has occured in the
463  * meantime, or if its resume_count field is different from zero, which means
464  * that one of its devices has been resumed in the meantime.
465  */
466 static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
467 {
468         return genpd->status == GPD_STATE_WAIT_MASTER
469                 || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
470 }
471
472 /**
473  * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
474  * @genpd: PM domait to power off.
475  *
476  * Queue up the execution of pm_genpd_poweroff() unless it's already been done
477  * before.
478  */
479 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
480 {
481         queue_work(pm_wq, &genpd->power_off_work);
482 }
483
484 /**
485  * pm_genpd_poweroff - Remove power from a given PM domain.
486  * @genpd: PM domain to power down.
487  *
488  * If all of the @genpd's devices have been suspended and all of its subdomains
489  * have been powered down, run the runtime suspend callbacks provided by all of
490  * the @genpd's devices' drivers and remove power from @genpd.
491  */
492 static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
493         __releases(&genpd->lock) __acquires(&genpd->lock)
494 {
495         struct pm_domain_data *pdd;
496         struct gpd_link *link;
497         unsigned int not_suspended;
498         int ret = 0;
499
500  start:
501         /*
502          * Do not try to power off the domain in the following situations:
503          * (1) The domain is already in the "power off" state.
504          * (2) The domain is waiting for its master to power up.
505          * (3) One of the domain's devices is being resumed right now.
506          * (4) System suspend is in progress.
507          */
508         if (genpd->status == GPD_STATE_POWER_OFF
509             || genpd->status == GPD_STATE_WAIT_MASTER
510             || genpd->resume_count > 0 || genpd->prepared_count > 0)
511                 return 0;
512
513         if (atomic_read(&genpd->sd_count) > 0)
514                 return -EBUSY;
515
516         not_suspended = 0;
517         list_for_each_entry(pdd, &genpd->dev_list, list_node) {
518                 enum pm_qos_flags_status stat;
519
520                 stat = dev_pm_qos_flags(pdd->dev,
521                                         PM_QOS_FLAG_NO_POWER_OFF
522                                                 | PM_QOS_FLAG_REMOTE_WAKEUP);
523                 if (stat > PM_QOS_FLAGS_NONE)
524                         return -EBUSY;
525
526                 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
527                     || pdd->dev->power.irq_safe))
528                         not_suspended++;
529         }
530
531         if (not_suspended > genpd->in_progress)
532                 return -EBUSY;
533
534         if (genpd->poweroff_task) {
535                 /*
536                  * Another instance of pm_genpd_poweroff() is executing
537                  * callbacks, so tell it to start over and return.
538                  */
539                 genpd->status = GPD_STATE_REPEAT;
540                 return 0;
541         }
542
543         if (genpd->gov && genpd->gov->power_down_ok) {
544                 if (!genpd->gov->power_down_ok(&genpd->domain))
545                         return -EAGAIN;
546         }
547
548         genpd->status = GPD_STATE_BUSY;
549         genpd->poweroff_task = current;
550
551         list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
552                 ret = atomic_read(&genpd->sd_count) == 0 ?
553                         __pm_genpd_save_device(pdd, genpd) : -EBUSY;
554
555                 if (genpd_abort_poweroff(genpd))
556                         goto out;
557
558                 if (ret) {
559                         genpd_set_active(genpd);
560                         goto out;
561                 }
562
563                 if (genpd->status == GPD_STATE_REPEAT) {
564                         genpd->poweroff_task = NULL;
565                         goto start;
566                 }
567         }
568
569         if (genpd->cpuidle_data) {
570                 /*
571                  * If cpuidle_data is set, cpuidle should turn the domain off
572                  * when the CPU in it is idle.  In that case we don't decrement
573                  * the subdomain counts of the master domains, so that power is
574                  * not removed from the current domain prematurely as a result
575                  * of cutting off the masters' power.
576                  */
577                 genpd->status = GPD_STATE_POWER_OFF;
578                 cpuidle_pause_and_lock();
579                 genpd->cpuidle_data->idle_state->disabled = false;
580                 cpuidle_resume_and_unlock();
581                 goto out;
582         }
583
584         if (genpd->power_off) {
585                 if (atomic_read(&genpd->sd_count) > 0) {
586                         ret = -EBUSY;
587                         goto out;
588                 }
589
590                 /*
591                  * If sd_count > 0 at this point, one of the subdomains hasn't
592                  * managed to call pm_genpd_poweron() for the master yet after
593                  * incrementing it.  In that case pm_genpd_poweron() will wait
594                  * for us to drop the lock, so we can call .power_off() and let
595                  * the pm_genpd_poweron() restore power for us (this shouldn't
596                  * happen very often).
597                  */
598                 ret = genpd_power_off(genpd);
599                 if (ret == -EBUSY) {
600                         genpd_set_active(genpd);
601                         goto out;
602                 }
603         }
604
605         genpd->status = GPD_STATE_POWER_OFF;
606
607         list_for_each_entry(link, &genpd->slave_links, slave_node) {
608                 genpd_sd_counter_dec(link->master);
609                 genpd_queue_power_off_work(link->master);
610         }
611
612  out:
613         genpd->poweroff_task = NULL;
614         wake_up_all(&genpd->status_wait_queue);
615         return ret;
616 }
617
618 /**
619  * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
620  * @work: Work structure used for scheduling the execution of this function.
621  */
622 static void genpd_power_off_work_fn(struct work_struct *work)
623 {
624         struct generic_pm_domain *genpd;
625
626         genpd = container_of(work, struct generic_pm_domain, power_off_work);
627
628         genpd_acquire_lock(genpd);
629         pm_genpd_poweroff(genpd);
630         genpd_release_lock(genpd);
631 }
632
633 /**
634  * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
635  * @dev: Device to suspend.
636  *
637  * Carry out a runtime suspend of a device under the assumption that its
638  * pm_domain field points to the domain member of an object of type
639  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
640  */
641 static int pm_genpd_runtime_suspend(struct device *dev)
642 {
643         struct generic_pm_domain *genpd;
644         struct generic_pm_domain_data *gpd_data;
645         bool (*stop_ok)(struct device *__dev);
646         int ret;
647
648         dev_dbg(dev, "%s()\n", __func__);
649
650         genpd = dev_to_genpd(dev);
651         if (IS_ERR(genpd))
652                 return -EINVAL;
653
654         stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
655         if (stop_ok && !stop_ok(dev))
656                 return -EBUSY;
657
658         ret = genpd_stop_dev(genpd, dev);
659         if (ret)
660                 return ret;
661
662         /*
663          * If power.irq_safe is set, this routine will be run with interrupts
664          * off, so it can't use mutexes.
665          */
666         if (dev->power.irq_safe)
667                 return 0;
668
669         mutex_lock(&genpd->lock);
670
671         /*
672          * If we have an unknown state of the need_restore flag, it means none
673          * of the runtime PM callbacks has been invoked yet. Let's update the
674          * flag to reflect that the current state is active.
675          */
676         gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
677         if (gpd_data->need_restore < 0)
678                 gpd_data->need_restore = 0;
679
680         genpd->in_progress++;
681         pm_genpd_poweroff(genpd);
682         genpd->in_progress--;
683         mutex_unlock(&genpd->lock);
684
685         return 0;
686 }
687
688 /**
689  * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
690  * @dev: Device to resume.
691  *
692  * Carry out a runtime resume of a device under the assumption that its
693  * pm_domain field points to the domain member of an object of type
694  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
695  */
696 static int pm_genpd_runtime_resume(struct device *dev)
697 {
698         struct generic_pm_domain *genpd;
699         DEFINE_WAIT(wait);
700         int ret;
701
702         dev_dbg(dev, "%s()\n", __func__);
703
704         genpd = dev_to_genpd(dev);
705         if (IS_ERR(genpd))
706                 return -EINVAL;
707
708         /* If power.irq_safe, the PM domain is never powered off. */
709         if (dev->power.irq_safe)
710                 return genpd_start_dev_no_timing(genpd, dev);
711
712         mutex_lock(&genpd->lock);
713         ret = __pm_genpd_poweron(genpd);
714         if (ret) {
715                 mutex_unlock(&genpd->lock);
716                 return ret;
717         }
718         genpd->status = GPD_STATE_BUSY;
719         genpd->resume_count++;
720         for (;;) {
721                 prepare_to_wait(&genpd->status_wait_queue, &wait,
722                                 TASK_UNINTERRUPTIBLE);
723                 /*
724                  * If current is the powering off task, we have been called
725                  * reentrantly from one of the device callbacks, so we should
726                  * not wait.
727                  */
728                 if (!genpd->poweroff_task || genpd->poweroff_task == current)
729                         break;
730                 mutex_unlock(&genpd->lock);
731
732                 schedule();
733
734                 mutex_lock(&genpd->lock);
735         }
736         finish_wait(&genpd->status_wait_queue, &wait);
737         __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd);
738         genpd->resume_count--;
739         genpd_set_active(genpd);
740         wake_up_all(&genpd->status_wait_queue);
741         mutex_unlock(&genpd->lock);
742
743         return 0;
744 }
745
746 static bool pd_ignore_unused;
747 static int __init pd_ignore_unused_setup(char *__unused)
748 {
749         pd_ignore_unused = true;
750         return 1;
751 }
752 __setup("pd_ignore_unused", pd_ignore_unused_setup);
753
754 /**
755  * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
756  */
757 void pm_genpd_poweroff_unused(void)
758 {
759         struct generic_pm_domain *genpd;
760
761         if (pd_ignore_unused) {
762                 pr_warn("genpd: Not disabling unused power domains\n");
763                 return;
764         }
765
766         mutex_lock(&gpd_list_lock);
767
768         list_for_each_entry(genpd, &gpd_list, gpd_list_node)
769                 genpd_queue_power_off_work(genpd);
770
771         mutex_unlock(&gpd_list_lock);
772 }
773
774 static int __init genpd_poweroff_unused(void)
775 {
776         pm_genpd_poweroff_unused();
777         return 0;
778 }
779 late_initcall(genpd_poweroff_unused);
780
781 #else
782
783 static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
784                                             unsigned long val, void *ptr)
785 {
786         return NOTIFY_DONE;
787 }
788
789 static inline void
790 genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
791
792 static inline void genpd_power_off_work_fn(struct work_struct *work) {}
793
794 #define pm_genpd_runtime_suspend        NULL
795 #define pm_genpd_runtime_resume         NULL
796
797 #endif /* CONFIG_PM_RUNTIME */
798
799 #ifdef CONFIG_PM_SLEEP
800
801 /**
802  * pm_genpd_present - Check if the given PM domain has been initialized.
803  * @genpd: PM domain to check.
804  */
805 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
806 {
807         const struct generic_pm_domain *gpd;
808
809         if (IS_ERR_OR_NULL(genpd))
810                 return false;
811
812         list_for_each_entry(gpd, &gpd_list, gpd_list_node)
813                 if (gpd == genpd)
814                         return true;
815
816         return false;
817 }
818
819 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
820                                     struct device *dev)
821 {
822         return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
823 }
824
825 /**
826  * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
827  * @genpd: PM domain to power off, if possible.
828  *
829  * Check if the given PM domain can be powered off (during system suspend or
830  * hibernation) and do that if so.  Also, in that case propagate to its masters.
831  *
832  * This function is only called in "noirq" and "syscore" stages of system power
833  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
834  * executed sequentially, so it is guaranteed that it will never run twice in
835  * parallel).
836  */
837 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
838 {
839         struct gpd_link *link;
840
841         if (genpd->status == GPD_STATE_POWER_OFF)
842                 return;
843
844         if (genpd->suspended_count != genpd->device_count
845             || atomic_read(&genpd->sd_count) > 0)
846                 return;
847
848         genpd_power_off(genpd);
849
850         genpd->status = GPD_STATE_POWER_OFF;
851
852         list_for_each_entry(link, &genpd->slave_links, slave_node) {
853                 genpd_sd_counter_dec(link->master);
854                 pm_genpd_sync_poweroff(link->master);
855         }
856 }
857
858 /**
859  * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
860  * @genpd: PM domain to power on.
861  *
862  * This function is only called in "noirq" and "syscore" stages of system power
863  * transitions, so it need not acquire locks (all of the "noirq" callbacks are
864  * executed sequentially, so it is guaranteed that it will never run twice in
865  * parallel).
866  */
867 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd)
868 {
869         struct gpd_link *link;
870
871         if (genpd->status != GPD_STATE_POWER_OFF)
872                 return;
873
874         list_for_each_entry(link, &genpd->slave_links, slave_node) {
875                 pm_genpd_sync_poweron(link->master);
876                 genpd_sd_counter_inc(link->master);
877         }
878
879         genpd_power_on(genpd);
880
881         genpd->status = GPD_STATE_ACTIVE;
882 }
883
884 /**
885  * resume_needed - Check whether to resume a device before system suspend.
886  * @dev: Device to check.
887  * @genpd: PM domain the device belongs to.
888  *
889  * There are two cases in which a device that can wake up the system from sleep
890  * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
891  * to wake up the system and it has to remain active for this purpose while the
892  * system is in the sleep state and (2) if the device is not enabled to wake up
893  * the system from sleep states and it generally doesn't generate wakeup signals
894  * by itself (those signals are generated on its behalf by other parts of the
895  * system).  In the latter case it may be necessary to reconfigure the device's
896  * wakeup settings during system suspend, because it may have been set up to
897  * signal remote wakeup from the system's working state as needed by runtime PM.
898  * Return 'true' in either of the above cases.
899  */
900 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
901 {
902         bool active_wakeup;
903
904         if (!device_can_wakeup(dev))
905                 return false;
906
907         active_wakeup = genpd_dev_active_wakeup(genpd, dev);
908         return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
909 }
910
911 /**
912  * pm_genpd_prepare - Start power transition of a device in a PM domain.
913  * @dev: Device to start the transition of.
914  *
915  * Start a power transition of a device (during a system-wide power transition)
916  * under the assumption that its pm_domain field points to the domain member of
917  * an object of type struct generic_pm_domain representing a PM domain
918  * consisting of I/O devices.
919  */
920 static int pm_genpd_prepare(struct device *dev)
921 {
922         struct generic_pm_domain *genpd;
923         int ret;
924
925         dev_dbg(dev, "%s()\n", __func__);
926
927         genpd = dev_to_genpd(dev);
928         if (IS_ERR(genpd))
929                 return -EINVAL;
930
931         /*
932          * If a wakeup request is pending for the device, it should be woken up
933          * at this point and a system wakeup event should be reported if it's
934          * set up to wake up the system from sleep states.
935          */
936         pm_runtime_get_noresume(dev);
937         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
938                 pm_wakeup_event(dev, 0);
939
940         if (pm_wakeup_pending()) {
941                 pm_runtime_put(dev);
942                 return -EBUSY;
943         }
944
945         if (resume_needed(dev, genpd))
946                 pm_runtime_resume(dev);
947
948         genpd_acquire_lock(genpd);
949
950         if (genpd->prepared_count++ == 0) {
951                 genpd->suspended_count = 0;
952                 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
953         }
954
955         genpd_release_lock(genpd);
956
957         if (genpd->suspend_power_off) {
958                 pm_runtime_put_noidle(dev);
959                 return 0;
960         }
961
962         /*
963          * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
964          * so pm_genpd_poweron() will return immediately, but if the device
965          * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
966          * to make it operational.
967          */
968         pm_runtime_resume(dev);
969         __pm_runtime_disable(dev, false);
970
971         ret = pm_generic_prepare(dev);
972         if (ret) {
973                 mutex_lock(&genpd->lock);
974
975                 if (--genpd->prepared_count == 0)
976                         genpd->suspend_power_off = false;
977
978                 mutex_unlock(&genpd->lock);
979                 pm_runtime_enable(dev);
980         }
981
982         pm_runtime_put(dev);
983         return ret;
984 }
985
986 /**
987  * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
988  * @dev: Device to suspend.
989  *
990  * Suspend a device under the assumption that its pm_domain field points to the
991  * domain member of an object of type struct generic_pm_domain representing
992  * a PM domain consisting of I/O devices.
993  */
994 static int pm_genpd_suspend(struct device *dev)
995 {
996         struct generic_pm_domain *genpd;
997
998         dev_dbg(dev, "%s()\n", __func__);
999
1000         genpd = dev_to_genpd(dev);
1001         if (IS_ERR(genpd))
1002                 return -EINVAL;
1003
1004         return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
1005 }
1006
1007 /**
1008  * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
1009  * @dev: Device to suspend.
1010  *
1011  * Carry out a late suspend of a device under the assumption that its
1012  * pm_domain field points to the domain member of an object of type
1013  * struct generic_pm_domain representing a PM domain consisting of I/O devices.
1014  */
1015 static int pm_genpd_suspend_late(struct device *dev)
1016 {
1017         struct generic_pm_domain *genpd;
1018
1019         dev_dbg(dev, "%s()\n", __func__);
1020
1021         genpd = dev_to_genpd(dev);
1022         if (IS_ERR(genpd))
1023                 return -EINVAL;
1024
1025         return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
1026 }
1027
1028 /**
1029  * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
1030  * @dev: Device to suspend.
1031  *
1032  * Stop the device and remove power from the domain if all devices in it have
1033  * been stopped.
1034  */
1035 static int pm_genpd_suspend_noirq(struct device *dev)
1036 {
1037         struct generic_pm_domain *genpd;
1038
1039         dev_dbg(dev, "%s()\n", __func__);
1040
1041         genpd = dev_to_genpd(dev);
1042         if (IS_ERR(genpd))
1043                 return -EINVAL;
1044
1045         if (genpd->suspend_power_off
1046             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1047                 return 0;
1048
1049         genpd_stop_dev(genpd, dev);
1050
1051         /*
1052          * Since all of the "noirq" callbacks are executed sequentially, it is
1053          * guaranteed that this function will never run twice in parallel for
1054          * the same PM domain, so it is not necessary to use locking here.
1055          */
1056         genpd->suspended_count++;
1057         pm_genpd_sync_poweroff(genpd);
1058
1059         return 0;
1060 }
1061
1062 /**
1063  * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
1064  * @dev: Device to resume.
1065  *
1066  * Restore power to the device's PM domain, if necessary, and start the device.
1067  */
1068 static int pm_genpd_resume_noirq(struct device *dev)
1069 {
1070         struct generic_pm_domain *genpd;
1071
1072         dev_dbg(dev, "%s()\n", __func__);
1073
1074         genpd = dev_to_genpd(dev);
1075         if (IS_ERR(genpd))
1076                 return -EINVAL;
1077
1078         if (genpd->suspend_power_off
1079             || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
1080                 return 0;
1081
1082         /*
1083          * Since all of the "noirq" callbacks are executed sequentially, it is
1084          * guaranteed that this function will never run twice in parallel for
1085          * the same PM domain, so it is not necessary to use locking here.
1086          */
1087         pm_genpd_sync_poweron(genpd);
1088         genpd->suspended_count--;
1089
1090         return genpd_start_dev(genpd, dev);
1091 }
1092
1093 /**
1094  * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
1095  * @dev: Device to resume.
1096  *
1097  * Carry out an early resume of a device under the assumption that its
1098  * pm_domain field points to the domain member of an object of type
1099  * struct generic_pm_domain representing a power domain consisting of I/O
1100  * devices.
1101  */
1102 static int pm_genpd_resume_early(struct device *dev)
1103 {
1104         struct generic_pm_domain *genpd;
1105
1106         dev_dbg(dev, "%s()\n", __func__);
1107
1108         genpd = dev_to_genpd(dev);
1109         if (IS_ERR(genpd))
1110                 return -EINVAL;
1111
1112         return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
1113 }
1114
1115 /**
1116  * pm_genpd_resume - Resume of device in an I/O PM domain.
1117  * @dev: Device to resume.
1118  *
1119  * Resume a device under the assumption that its pm_domain field points to the
1120  * domain member of an object of type struct generic_pm_domain representing
1121  * a power domain consisting of I/O devices.
1122  */
1123 static int pm_genpd_resume(struct device *dev)
1124 {
1125         struct generic_pm_domain *genpd;
1126
1127         dev_dbg(dev, "%s()\n", __func__);
1128
1129         genpd = dev_to_genpd(dev);
1130         if (IS_ERR(genpd))
1131                 return -EINVAL;
1132
1133         return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
1134 }
1135
1136 /**
1137  * pm_genpd_freeze - Freezing a device in an I/O PM domain.
1138  * @dev: Device to freeze.
1139  *
1140  * Freeze a device under the assumption that its pm_domain field points to the
1141  * domain member of an object of type struct generic_pm_domain representing
1142  * a power domain consisting of I/O devices.
1143  */
1144 static int pm_genpd_freeze(struct device *dev)
1145 {
1146         struct generic_pm_domain *genpd;
1147
1148         dev_dbg(dev, "%s()\n", __func__);
1149
1150         genpd = dev_to_genpd(dev);
1151         if (IS_ERR(genpd))
1152                 return -EINVAL;
1153
1154         return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
1155 }
1156
1157 /**
1158  * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
1159  * @dev: Device to freeze.
1160  *
1161  * Carry out a late freeze of a device under the assumption that its
1162  * pm_domain field points to the domain member of an object of type
1163  * struct generic_pm_domain representing a power domain consisting of I/O
1164  * devices.
1165  */
1166 static int pm_genpd_freeze_late(struct device *dev)
1167 {
1168         struct generic_pm_domain *genpd;
1169
1170         dev_dbg(dev, "%s()\n", __func__);
1171
1172         genpd = dev_to_genpd(dev);
1173         if (IS_ERR(genpd))
1174                 return -EINVAL;
1175
1176         return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
1177 }
1178
1179 /**
1180  * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
1181  * @dev: Device to freeze.
1182  *
1183  * Carry out a late freeze of a device under the assumption that its
1184  * pm_domain field points to the domain member of an object of type
1185  * struct generic_pm_domain representing a power domain consisting of I/O
1186  * devices.
1187  */
1188 static int pm_genpd_freeze_noirq(struct device *dev)
1189 {
1190         struct generic_pm_domain *genpd;
1191
1192         dev_dbg(dev, "%s()\n", __func__);
1193
1194         genpd = dev_to_genpd(dev);
1195         if (IS_ERR(genpd))
1196                 return -EINVAL;
1197
1198         return genpd->suspend_power_off ? 0 : genpd_stop_dev(genpd, dev);
1199 }
1200
1201 /**
1202  * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
1203  * @dev: Device to thaw.
1204  *
1205  * Start the device, unless power has been removed from the domain already
1206  * before the system transition.
1207  */
1208 static int pm_genpd_thaw_noirq(struct device *dev)
1209 {
1210         struct generic_pm_domain *genpd;
1211
1212         dev_dbg(dev, "%s()\n", __func__);
1213
1214         genpd = dev_to_genpd(dev);
1215         if (IS_ERR(genpd))
1216                 return -EINVAL;
1217
1218         return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev);
1219 }
1220
1221 /**
1222  * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
1223  * @dev: Device to thaw.
1224  *
1225  * Carry out an early thaw of a device under the assumption that its
1226  * pm_domain field points to the domain member of an object of type
1227  * struct generic_pm_domain representing a power domain consisting of I/O
1228  * devices.
1229  */
1230 static int pm_genpd_thaw_early(struct device *dev)
1231 {
1232         struct generic_pm_domain *genpd;
1233
1234         dev_dbg(dev, "%s()\n", __func__);
1235
1236         genpd = dev_to_genpd(dev);
1237         if (IS_ERR(genpd))
1238                 return -EINVAL;
1239
1240         return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
1241 }
1242
1243 /**
1244  * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
1245  * @dev: Device to thaw.
1246  *
1247  * Thaw a device under the assumption that its pm_domain field points to the
1248  * domain member of an object of type struct generic_pm_domain representing
1249  * a power domain consisting of I/O devices.
1250  */
1251 static int pm_genpd_thaw(struct device *dev)
1252 {
1253         struct generic_pm_domain *genpd;
1254
1255         dev_dbg(dev, "%s()\n", __func__);
1256
1257         genpd = dev_to_genpd(dev);
1258         if (IS_ERR(genpd))
1259                 return -EINVAL;
1260
1261         return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
1262 }
1263
1264 /**
1265  * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1266  * @dev: Device to resume.
1267  *
1268  * Make sure the domain will be in the same power state as before the
1269  * hibernation the system is resuming from and start the device if necessary.
1270  */
1271 static int pm_genpd_restore_noirq(struct device *dev)
1272 {
1273         struct generic_pm_domain *genpd;
1274
1275         dev_dbg(dev, "%s()\n", __func__);
1276
1277         genpd = dev_to_genpd(dev);
1278         if (IS_ERR(genpd))
1279                 return -EINVAL;
1280
1281         /*
1282          * Since all of the "noirq" callbacks are executed sequentially, it is
1283          * guaranteed that this function will never run twice in parallel for
1284          * the same PM domain, so it is not necessary to use locking here.
1285          *
1286          * At this point suspended_count == 0 means we are being run for the
1287          * first time for the given domain in the present cycle.
1288          */
1289         if (genpd->suspended_count++ == 0) {
1290                 /*
1291                  * The boot kernel might put the domain into arbitrary state,
1292                  * so make it appear as powered off to pm_genpd_sync_poweron(),
1293                  * so that it tries to power it on in case it was really off.
1294                  */
1295                 genpd->status = GPD_STATE_POWER_OFF;
1296                 if (genpd->suspend_power_off) {
1297                         /*
1298                          * If the domain was off before the hibernation, make
1299                          * sure it will be off going forward.
1300                          */
1301                         genpd_power_off(genpd);
1302
1303                         return 0;
1304                 }
1305         }
1306
1307         if (genpd->suspend_power_off)
1308                 return 0;
1309
1310         pm_genpd_sync_poweron(genpd);
1311
1312         return genpd_start_dev(genpd, dev);
1313 }
1314
1315 /**
1316  * pm_genpd_complete - Complete power transition of a device in a power domain.
1317  * @dev: Device to complete the transition of.
1318  *
1319  * Complete a power transition of a device (during a system-wide power
1320  * transition) under the assumption that its pm_domain field points to the
1321  * domain member of an object of type struct generic_pm_domain representing
1322  * a power domain consisting of I/O devices.
1323  */
1324 static void pm_genpd_complete(struct device *dev)
1325 {
1326         struct generic_pm_domain *genpd;
1327         bool run_complete;
1328
1329         dev_dbg(dev, "%s()\n", __func__);
1330
1331         genpd = dev_to_genpd(dev);
1332         if (IS_ERR(genpd))
1333                 return;
1334
1335         mutex_lock(&genpd->lock);
1336
1337         run_complete = !genpd->suspend_power_off;
1338         if (--genpd->prepared_count == 0)
1339                 genpd->suspend_power_off = false;
1340
1341         mutex_unlock(&genpd->lock);
1342
1343         if (run_complete) {
1344                 pm_generic_complete(dev);
1345                 pm_runtime_set_active(dev);
1346                 pm_runtime_enable(dev);
1347                 pm_request_idle(dev);
1348         }
1349 }
1350
1351 /**
1352  * genpd_syscore_switch - Switch power during system core suspend or resume.
1353  * @dev: Device that normally is marked as "always on" to switch power for.
1354  *
1355  * This routine may only be called during the system core (syscore) suspend or
1356  * resume phase for devices whose "always on" flags are set.
1357  */
1358 static void genpd_syscore_switch(struct device *dev, bool suspend)
1359 {
1360         struct generic_pm_domain *genpd;
1361
1362         genpd = dev_to_genpd(dev);
1363         if (!pm_genpd_present(genpd))
1364                 return;
1365
1366         if (suspend) {
1367                 genpd->suspended_count++;
1368                 pm_genpd_sync_poweroff(genpd);
1369         } else {
1370                 pm_genpd_sync_poweron(genpd);
1371                 genpd->suspended_count--;
1372         }
1373 }
1374
1375 void pm_genpd_syscore_poweroff(struct device *dev)
1376 {
1377         genpd_syscore_switch(dev, true);
1378 }
1379 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
1380
1381 void pm_genpd_syscore_poweron(struct device *dev)
1382 {
1383         genpd_syscore_switch(dev, false);
1384 }
1385 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
1386
1387 #else
1388
1389 #define pm_genpd_prepare                NULL
1390 #define pm_genpd_suspend                NULL
1391 #define pm_genpd_suspend_late           NULL
1392 #define pm_genpd_suspend_noirq          NULL
1393 #define pm_genpd_resume_early           NULL
1394 #define pm_genpd_resume_noirq           NULL
1395 #define pm_genpd_resume                 NULL
1396 #define pm_genpd_freeze                 NULL
1397 #define pm_genpd_freeze_late            NULL
1398 #define pm_genpd_freeze_noirq           NULL
1399 #define pm_genpd_thaw_early             NULL
1400 #define pm_genpd_thaw_noirq             NULL
1401 #define pm_genpd_thaw                   NULL
1402 #define pm_genpd_restore_noirq          NULL
1403 #define pm_genpd_complete               NULL
1404
1405 #endif /* CONFIG_PM_SLEEP */
1406
1407 static struct generic_pm_domain_data *__pm_genpd_alloc_dev_data(struct device *dev)
1408 {
1409         struct generic_pm_domain_data *gpd_data;
1410
1411         gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1412         if (!gpd_data)
1413                 return NULL;
1414
1415         mutex_init(&gpd_data->lock);
1416         gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1417         dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1418         return gpd_data;
1419 }
1420
1421 static void __pm_genpd_free_dev_data(struct device *dev,
1422                                      struct generic_pm_domain_data *gpd_data)
1423 {
1424         dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1425         kfree(gpd_data);
1426 }
1427
1428 /**
1429  * __pm_genpd_add_device - Add a device to an I/O PM domain.
1430  * @genpd: PM domain to add the device to.
1431  * @dev: Device to be added.
1432  * @td: Set of PM QoS timing parameters to attach to the device.
1433  */
1434 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1435                           struct gpd_timing_data *td)
1436 {
1437         struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
1438         struct pm_domain_data *pdd;
1439         int ret = 0;
1440
1441         dev_dbg(dev, "%s()\n", __func__);
1442
1443         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1444                 return -EINVAL;
1445
1446         gpd_data_new = __pm_genpd_alloc_dev_data(dev);
1447         if (!gpd_data_new)
1448                 return -ENOMEM;
1449
1450         genpd_acquire_lock(genpd);
1451
1452         if (genpd->prepared_count > 0) {
1453                 ret = -EAGAIN;
1454                 goto out;
1455         }
1456
1457         list_for_each_entry(pdd, &genpd->dev_list, list_node)
1458                 if (pdd->dev == dev) {
1459                         ret = -EINVAL;
1460                         goto out;
1461                 }
1462
1463         ret = dev_pm_get_subsys_data(dev);
1464         if (ret)
1465                 goto out;
1466
1467         genpd->device_count++;
1468         genpd->max_off_time_changed = true;
1469
1470         spin_lock_irq(&dev->power.lock);
1471
1472         dev->pm_domain = &genpd->domain;
1473         if (dev->power.subsys_data->domain_data) {
1474                 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
1475         } else {
1476                 gpd_data = gpd_data_new;
1477                 dev->power.subsys_data->domain_data = &gpd_data->base;
1478         }
1479         gpd_data->refcount++;
1480         if (td)
1481                 gpd_data->td = *td;
1482
1483         spin_unlock_irq(&dev->power.lock);
1484
1485         if (genpd->attach_dev)
1486                 genpd->attach_dev(genpd, dev);
1487
1488         mutex_lock(&gpd_data->lock);
1489         gpd_data->base.dev = dev;
1490         list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1491         gpd_data->need_restore = -1;
1492         gpd_data->td.constraint_changed = true;
1493         gpd_data->td.effective_constraint_ns = -1;
1494         mutex_unlock(&gpd_data->lock);
1495
1496  out:
1497         genpd_release_lock(genpd);
1498
1499         if (gpd_data != gpd_data_new)
1500                 __pm_genpd_free_dev_data(dev, gpd_data_new);
1501
1502         return ret;
1503 }
1504
1505 /**
1506  * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
1507  * @domain_name: Name of the PM domain to add the device to.
1508  * @dev: Device to be added.
1509  * @td: Set of PM QoS timing parameters to attach to the device.
1510  */
1511 int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
1512                                struct gpd_timing_data *td)
1513 {
1514         return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
1515 }
1516
1517 /**
1518  * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1519  * @genpd: PM domain to remove the device from.
1520  * @dev: Device to be removed.
1521  */
1522 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1523                            struct device *dev)
1524 {
1525         struct generic_pm_domain_data *gpd_data;
1526         struct pm_domain_data *pdd;
1527         bool remove = false;
1528         int ret = 0;
1529
1530         dev_dbg(dev, "%s()\n", __func__);
1531
1532         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev)
1533             ||  IS_ERR_OR_NULL(dev->pm_domain)
1534             ||  pd_to_genpd(dev->pm_domain) != genpd)
1535                 return -EINVAL;
1536
1537         genpd_acquire_lock(genpd);
1538
1539         if (genpd->prepared_count > 0) {
1540                 ret = -EAGAIN;
1541                 goto out;
1542         }
1543
1544         genpd->device_count--;
1545         genpd->max_off_time_changed = true;
1546
1547         if (genpd->detach_dev)
1548                 genpd->detach_dev(genpd, dev);
1549
1550         spin_lock_irq(&dev->power.lock);
1551
1552         dev->pm_domain = NULL;
1553         pdd = dev->power.subsys_data->domain_data;
1554         list_del_init(&pdd->list_node);
1555         gpd_data = to_gpd_data(pdd);
1556         if (--gpd_data->refcount == 0) {
1557                 dev->power.subsys_data->domain_data = NULL;
1558                 remove = true;
1559         }
1560
1561         spin_unlock_irq(&dev->power.lock);
1562
1563         mutex_lock(&gpd_data->lock);
1564         pdd->dev = NULL;
1565         mutex_unlock(&gpd_data->lock);
1566
1567         genpd_release_lock(genpd);
1568
1569         dev_pm_put_subsys_data(dev);
1570         if (remove)
1571                 __pm_genpd_free_dev_data(dev, gpd_data);
1572
1573         return 0;
1574
1575  out:
1576         genpd_release_lock(genpd);
1577
1578         return ret;
1579 }
1580
1581 /**
1582  * pm_genpd_dev_need_restore - Set/unset the device's "need restore" flag.
1583  * @dev: Device to set/unset the flag for.
1584  * @val: The new value of the device's "need restore" flag.
1585  */
1586 void pm_genpd_dev_need_restore(struct device *dev, bool val)
1587 {
1588         struct pm_subsys_data *psd;
1589         unsigned long flags;
1590
1591         spin_lock_irqsave(&dev->power.lock, flags);
1592
1593         psd = dev_to_psd(dev);
1594         if (psd && psd->domain_data)
1595                 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1596
1597         spin_unlock_irqrestore(&dev->power.lock, flags);
1598 }
1599 EXPORT_SYMBOL_GPL(pm_genpd_dev_need_restore);
1600
1601 /**
1602  * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1603  * @genpd: Master PM domain to add the subdomain to.
1604  * @subdomain: Subdomain to be added.
1605  */
1606 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1607                            struct generic_pm_domain *subdomain)
1608 {
1609         struct gpd_link *link;
1610         int ret = 0;
1611
1612         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1613             || genpd == subdomain)
1614                 return -EINVAL;
1615
1616  start:
1617         genpd_acquire_lock(genpd);
1618         mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1619
1620         if (subdomain->status != GPD_STATE_POWER_OFF
1621             && subdomain->status != GPD_STATE_ACTIVE) {
1622                 mutex_unlock(&subdomain->lock);
1623                 genpd_release_lock(genpd);
1624                 goto start;
1625         }
1626
1627         if (genpd->status == GPD_STATE_POWER_OFF
1628             &&  subdomain->status != GPD_STATE_POWER_OFF) {
1629                 ret = -EINVAL;
1630                 goto out;
1631         }
1632
1633         list_for_each_entry(link, &genpd->master_links, master_node) {
1634                 if (link->slave == subdomain && link->master == genpd) {
1635                         ret = -EINVAL;
1636                         goto out;
1637                 }
1638         }
1639
1640         link = kzalloc(sizeof(*link), GFP_KERNEL);
1641         if (!link) {
1642                 ret = -ENOMEM;
1643                 goto out;
1644         }
1645         link->master = genpd;
1646         list_add_tail(&link->master_node, &genpd->master_links);
1647         link->slave = subdomain;
1648         list_add_tail(&link->slave_node, &subdomain->slave_links);
1649         if (subdomain->status != GPD_STATE_POWER_OFF)
1650                 genpd_sd_counter_inc(genpd);
1651
1652  out:
1653         mutex_unlock(&subdomain->lock);
1654         genpd_release_lock(genpd);
1655
1656         return ret;
1657 }
1658
1659 /**
1660  * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
1661  * @master_name: Name of the master PM domain to add the subdomain to.
1662  * @subdomain_name: Name of the subdomain to be added.
1663  */
1664 int pm_genpd_add_subdomain_names(const char *master_name,
1665                                  const char *subdomain_name)
1666 {
1667         struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
1668
1669         if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
1670                 return -EINVAL;
1671
1672         mutex_lock(&gpd_list_lock);
1673         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1674                 if (!master && !strcmp(gpd->name, master_name))
1675                         master = gpd;
1676
1677                 if (!subdomain && !strcmp(gpd->name, subdomain_name))
1678                         subdomain = gpd;
1679
1680                 if (master && subdomain)
1681                         break;
1682         }
1683         mutex_unlock(&gpd_list_lock);
1684
1685         return pm_genpd_add_subdomain(master, subdomain);
1686 }
1687
1688 /**
1689  * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1690  * @genpd: Master PM domain to remove the subdomain from.
1691  * @subdomain: Subdomain to be removed.
1692  */
1693 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1694                               struct generic_pm_domain *subdomain)
1695 {
1696         struct gpd_link *link;
1697         int ret = -EINVAL;
1698
1699         if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1700                 return -EINVAL;
1701
1702  start:
1703         genpd_acquire_lock(genpd);
1704
1705         list_for_each_entry(link, &genpd->master_links, master_node) {
1706                 if (link->slave != subdomain)
1707                         continue;
1708
1709                 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1710
1711                 if (subdomain->status != GPD_STATE_POWER_OFF
1712                     && subdomain->status != GPD_STATE_ACTIVE) {
1713                         mutex_unlock(&subdomain->lock);
1714                         genpd_release_lock(genpd);
1715                         goto start;
1716                 }
1717
1718                 list_del(&link->master_node);
1719                 list_del(&link->slave_node);
1720                 kfree(link);
1721                 if (subdomain->status != GPD_STATE_POWER_OFF)
1722                         genpd_sd_counter_dec(genpd);
1723
1724                 mutex_unlock(&subdomain->lock);
1725
1726                 ret = 0;
1727                 break;
1728         }
1729
1730         genpd_release_lock(genpd);
1731
1732         return ret;
1733 }
1734
1735 /**
1736  * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
1737  * @genpd: PM domain to be connected with cpuidle.
1738  * @state: cpuidle state this domain can disable/enable.
1739  *
1740  * Make a PM domain behave as though it contained a CPU core, that is, instead
1741  * of calling its power down routine it will enable the given cpuidle state so
1742  * that the cpuidle subsystem can power it down (if possible and desirable).
1743  */
1744 int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1745 {
1746         struct cpuidle_driver *cpuidle_drv;
1747         struct gpd_cpuidle_data *cpuidle_data;
1748         struct cpuidle_state *idle_state;
1749         int ret = 0;
1750
1751         if (IS_ERR_OR_NULL(genpd) || state < 0)
1752                 return -EINVAL;
1753
1754         genpd_acquire_lock(genpd);
1755
1756         if (genpd->cpuidle_data) {
1757                 ret = -EEXIST;
1758                 goto out;
1759         }
1760         cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
1761         if (!cpuidle_data) {
1762                 ret = -ENOMEM;
1763                 goto out;
1764         }
1765         cpuidle_drv = cpuidle_driver_ref();
1766         if (!cpuidle_drv) {
1767                 ret = -ENODEV;
1768                 goto err_drv;
1769         }
1770         if (cpuidle_drv->state_count <= state) {
1771                 ret = -EINVAL;
1772                 goto err;
1773         }
1774         idle_state = &cpuidle_drv->states[state];
1775         if (!idle_state->disabled) {
1776                 ret = -EAGAIN;
1777                 goto err;
1778         }
1779         cpuidle_data->idle_state = idle_state;
1780         cpuidle_data->saved_exit_latency = idle_state->exit_latency;
1781         genpd->cpuidle_data = cpuidle_data;
1782         genpd_recalc_cpu_exit_latency(genpd);
1783
1784  out:
1785         genpd_release_lock(genpd);
1786         return ret;
1787
1788  err:
1789         cpuidle_driver_unref();
1790
1791  err_drv:
1792         kfree(cpuidle_data);
1793         goto out;
1794 }
1795
1796 /**
1797  * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
1798  * @name: Name of the domain to connect to cpuidle.
1799  * @state: cpuidle state this domain can manipulate.
1800  */
1801 int pm_genpd_name_attach_cpuidle(const char *name, int state)
1802 {
1803         return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
1804 }
1805
1806 /**
1807  * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
1808  * @genpd: PM domain to remove the cpuidle connection from.
1809  *
1810  * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
1811  * given PM domain.
1812  */
1813 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1814 {
1815         struct gpd_cpuidle_data *cpuidle_data;
1816         struct cpuidle_state *idle_state;
1817         int ret = 0;
1818
1819         if (IS_ERR_OR_NULL(genpd))
1820                 return -EINVAL;
1821
1822         genpd_acquire_lock(genpd);
1823
1824         cpuidle_data = genpd->cpuidle_data;
1825         if (!cpuidle_data) {
1826                 ret = -ENODEV;
1827                 goto out;
1828         }
1829         idle_state = cpuidle_data->idle_state;
1830         if (!idle_state->disabled) {
1831                 ret = -EAGAIN;
1832                 goto out;
1833         }
1834         idle_state->exit_latency = cpuidle_data->saved_exit_latency;
1835         cpuidle_driver_unref();
1836         genpd->cpuidle_data = NULL;
1837         kfree(cpuidle_data);
1838
1839  out:
1840         genpd_release_lock(genpd);
1841         return ret;
1842 }
1843
1844 /**
1845  * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
1846  * @name: Name of the domain to disconnect cpuidle from.
1847  */
1848 int pm_genpd_name_detach_cpuidle(const char *name)
1849 {
1850         return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
1851 }
1852
1853 /* Default device callbacks for generic PM domains. */
1854
1855 /**
1856  * pm_genpd_default_save_state - Default "save device state" for PM domains.
1857  * @dev: Device to handle.
1858  */
1859 static int pm_genpd_default_save_state(struct device *dev)
1860 {
1861         int (*cb)(struct device *__dev);
1862
1863         if (dev->type && dev->type->pm)
1864                 cb = dev->type->pm->runtime_suspend;
1865         else if (dev->class && dev->class->pm)
1866                 cb = dev->class->pm->runtime_suspend;
1867         else if (dev->bus && dev->bus->pm)
1868                 cb = dev->bus->pm->runtime_suspend;
1869         else
1870                 cb = NULL;
1871
1872         if (!cb && dev->driver && dev->driver->pm)
1873                 cb = dev->driver->pm->runtime_suspend;
1874
1875         return cb ? cb(dev) : 0;
1876 }
1877
1878 /**
1879  * pm_genpd_default_restore_state - Default PM domains "restore device state".
1880  * @dev: Device to handle.
1881  */
1882 static int pm_genpd_default_restore_state(struct device *dev)
1883 {
1884         int (*cb)(struct device *__dev);
1885
1886         if (dev->type && dev->type->pm)
1887                 cb = dev->type->pm->runtime_resume;
1888         else if (dev->class && dev->class->pm)
1889                 cb = dev->class->pm->runtime_resume;
1890         else if (dev->bus && dev->bus->pm)
1891                 cb = dev->bus->pm->runtime_resume;
1892         else
1893                 cb = NULL;
1894
1895         if (!cb && dev->driver && dev->driver->pm)
1896                 cb = dev->driver->pm->runtime_resume;
1897
1898         return cb ? cb(dev) : 0;
1899 }
1900
1901 /**
1902  * pm_genpd_init - Initialize a generic I/O PM domain object.
1903  * @genpd: PM domain object to initialize.
1904  * @gov: PM domain governor to associate with the domain (may be NULL).
1905  * @is_off: Initial value of the domain's power_is_off field.
1906  */
1907 void pm_genpd_init(struct generic_pm_domain *genpd,
1908                    struct dev_power_governor *gov, bool is_off)
1909 {
1910         if (IS_ERR_OR_NULL(genpd))
1911                 return;
1912
1913         INIT_LIST_HEAD(&genpd->master_links);
1914         INIT_LIST_HEAD(&genpd->slave_links);
1915         INIT_LIST_HEAD(&genpd->dev_list);
1916         mutex_init(&genpd->lock);
1917         genpd->gov = gov;
1918         INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1919         genpd->in_progress = 0;
1920         atomic_set(&genpd->sd_count, 0);
1921         genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1922         init_waitqueue_head(&genpd->status_wait_queue);
1923         genpd->poweroff_task = NULL;
1924         genpd->resume_count = 0;
1925         genpd->device_count = 0;
1926         genpd->max_off_time_ns = -1;
1927         genpd->max_off_time_changed = true;
1928         genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1929         genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1930         genpd->domain.ops.prepare = pm_genpd_prepare;
1931         genpd->domain.ops.suspend = pm_genpd_suspend;
1932         genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1933         genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1934         genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1935         genpd->domain.ops.resume_early = pm_genpd_resume_early;
1936         genpd->domain.ops.resume = pm_genpd_resume;
1937         genpd->domain.ops.freeze = pm_genpd_freeze;
1938         genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1939         genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1940         genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1941         genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1942         genpd->domain.ops.thaw = pm_genpd_thaw;
1943         genpd->domain.ops.poweroff = pm_genpd_suspend;
1944         genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1945         genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1946         genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1947         genpd->domain.ops.restore_early = pm_genpd_resume_early;
1948         genpd->domain.ops.restore = pm_genpd_resume;
1949         genpd->domain.ops.complete = pm_genpd_complete;
1950         genpd->dev_ops.save_state = pm_genpd_default_save_state;
1951         genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
1952
1953         if (genpd->flags & GENPD_FLAG_PM_CLK) {
1954                 genpd->dev_ops.stop = pm_clk_suspend;
1955                 genpd->dev_ops.start = pm_clk_resume;
1956         }
1957
1958         mutex_lock(&gpd_list_lock);
1959         list_add(&genpd->gpd_list_node, &gpd_list);
1960         mutex_unlock(&gpd_list_lock);
1961 }
1962
1963 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1964 /*
1965  * Device Tree based PM domain providers.
1966  *
1967  * The code below implements generic device tree based PM domain providers that
1968  * bind device tree nodes with generic PM domains registered in the system.
1969  *
1970  * Any driver that registers generic PM domains and needs to support binding of
1971  * devices to these domains is supposed to register a PM domain provider, which
1972  * maps a PM domain specifier retrieved from the device tree to a PM domain.
1973  *
1974  * Two simple mapping functions have been provided for convenience:
1975  *  - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1976  *  - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
1977  *    index.
1978  */
1979
1980 /**
1981  * struct of_genpd_provider - PM domain provider registration structure
1982  * @link: Entry in global list of PM domain providers
1983  * @node: Pointer to device tree node of PM domain provider
1984  * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1985  *         into a PM domain.
1986  * @data: context pointer to be passed into @xlate callback
1987  */
1988 struct of_genpd_provider {
1989         struct list_head link;
1990         struct device_node *node;
1991         genpd_xlate_t xlate;
1992         void *data;
1993 };
1994
1995 /* List of registered PM domain providers. */
1996 static LIST_HEAD(of_genpd_providers);
1997 /* Mutex to protect the list above. */
1998 static DEFINE_MUTEX(of_genpd_mutex);
1999
2000 /**
2001  * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
2002  * @genpdspec: OF phandle args to map into a PM domain
2003  * @data: xlate function private data - pointer to struct generic_pm_domain
2004  *
2005  * This is a generic xlate function that can be used to model PM domains that
2006  * have their own device tree nodes. The private data of xlate function needs
2007  * to be a valid pointer to struct generic_pm_domain.
2008  */
2009 struct generic_pm_domain *__of_genpd_xlate_simple(
2010                                         struct of_phandle_args *genpdspec,
2011                                         void *data)
2012 {
2013         if (genpdspec->args_count != 0)
2014                 return ERR_PTR(-EINVAL);
2015         return data;
2016 }
2017 EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
2018
2019 /**
2020  * __of_genpd_xlate_onecell() - Xlate function using a single index.
2021  * @genpdspec: OF phandle args to map into a PM domain
2022  * @data: xlate function private data - pointer to struct genpd_onecell_data
2023  *
2024  * This is a generic xlate function that can be used to model simple PM domain
2025  * controllers that have one device tree node and provide multiple PM domains.
2026  * A single cell is used as an index into an array of PM domains specified in
2027  * the genpd_onecell_data struct when registering the provider.
2028  */
2029 struct generic_pm_domain *__of_genpd_xlate_onecell(
2030                                         struct of_phandle_args *genpdspec,
2031                                         void *data)
2032 {
2033         struct genpd_onecell_data *genpd_data = data;
2034         unsigned int idx = genpdspec->args[0];
2035
2036         if (genpdspec->args_count != 1)
2037                 return ERR_PTR(-EINVAL);
2038
2039         if (idx >= genpd_data->num_domains) {
2040                 pr_err("%s: invalid domain index %u\n", __func__, idx);
2041                 return ERR_PTR(-EINVAL);
2042         }
2043
2044         if (!genpd_data->domains[idx])
2045                 return ERR_PTR(-ENOENT);
2046
2047         return genpd_data->domains[idx];
2048 }
2049 EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
2050
2051 /**
2052  * __of_genpd_add_provider() - Register a PM domain provider for a node
2053  * @np: Device node pointer associated with the PM domain provider.
2054  * @xlate: Callback for decoding PM domain from phandle arguments.
2055  * @data: Context pointer for @xlate callback.
2056  */
2057 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
2058                         void *data)
2059 {
2060         struct of_genpd_provider *cp;
2061
2062         cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2063         if (!cp)
2064                 return -ENOMEM;
2065
2066         cp->node = of_node_get(np);
2067         cp->data = data;
2068         cp->xlate = xlate;
2069
2070         mutex_lock(&of_genpd_mutex);
2071         list_add(&cp->link, &of_genpd_providers);
2072         mutex_unlock(&of_genpd_mutex);
2073         pr_debug("Added domain provider from %s\n", np->full_name);
2074
2075         return 0;
2076 }
2077 EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
2078
2079 /**
2080  * of_genpd_del_provider() - Remove a previously registered PM domain provider
2081  * @np: Device node pointer associated with the PM domain provider
2082  */
2083 void of_genpd_del_provider(struct device_node *np)
2084 {
2085         struct of_genpd_provider *cp;
2086
2087         mutex_lock(&of_genpd_mutex);
2088         list_for_each_entry(cp, &of_genpd_providers, link) {
2089                 if (cp->node == np) {
2090                         list_del(&cp->link);
2091                         of_node_put(cp->node);
2092                         kfree(cp);
2093                         break;
2094                 }
2095         }
2096         mutex_unlock(&of_genpd_mutex);
2097 }
2098 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
2099
2100 /**
2101  * of_genpd_get_from_provider() - Look-up PM domain
2102  * @genpdspec: OF phandle args to use for look-up
2103  *
2104  * Looks for a PM domain provider under the node specified by @genpdspec and if
2105  * found, uses xlate function of the provider to map phandle args to a PM
2106  * domain.
2107  *
2108  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
2109  * on failure.
2110  */
2111 static struct generic_pm_domain *of_genpd_get_from_provider(
2112                                         struct of_phandle_args *genpdspec)
2113 {
2114         struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
2115         struct of_genpd_provider *provider;
2116
2117         mutex_lock(&of_genpd_mutex);
2118
2119         /* Check if we have such a provider in our array */
2120         list_for_each_entry(provider, &of_genpd_providers, link) {
2121                 if (provider->node == genpdspec->np)
2122                         genpd = provider->xlate(genpdspec, provider->data);
2123                 if (!IS_ERR(genpd))
2124                         break;
2125         }
2126
2127         mutex_unlock(&of_genpd_mutex);
2128
2129         return genpd;
2130 }
2131
2132 /**
2133  * genpd_dev_pm_detach - Detach a device from its PM domain.
2134  * @dev: Device to attach.
2135  * @power_off: Currently not used
2136  *
2137  * Try to locate a corresponding generic PM domain, which the device was
2138  * attached to previously. If such is found, the device is detached from it.
2139  */
2140 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
2141 {
2142         struct generic_pm_domain *pd = NULL, *gpd;
2143         int ret = 0;
2144
2145         if (!dev->pm_domain)
2146                 return;
2147
2148         mutex_lock(&gpd_list_lock);
2149         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2150                 if (&gpd->domain == dev->pm_domain) {
2151                         pd = gpd;
2152                         break;
2153                 }
2154         }
2155         mutex_unlock(&gpd_list_lock);
2156
2157         if (!pd)
2158                 return;
2159
2160         dev_dbg(dev, "removing from PM domain %s\n", pd->name);
2161
2162         while (1) {
2163                 ret = pm_genpd_remove_device(pd, dev);
2164                 if (ret != -EAGAIN)
2165                         break;
2166                 cond_resched();
2167         }
2168
2169         if (ret < 0) {
2170                 dev_err(dev, "failed to remove from PM domain %s: %d",
2171                         pd->name, ret);
2172                 return;
2173         }
2174
2175         /* Check if PM domain can be powered off after removing this device. */
2176         genpd_queue_power_off_work(pd);
2177 }
2178
2179 /**
2180  * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
2181  * @dev: Device to attach.
2182  *
2183  * Parse device's OF node to find a PM domain specifier. If such is found,
2184  * attaches the device to retrieved pm_domain ops.
2185  *
2186  * Both generic and legacy Samsung-specific DT bindings are supported to keep
2187  * backwards compatibility with existing DTBs.
2188  *
2189  * Returns 0 on successfully attached PM domain or negative error code.
2190  */
2191 int genpd_dev_pm_attach(struct device *dev)
2192 {
2193         struct of_phandle_args pd_args;
2194         struct generic_pm_domain *pd;
2195         int ret;
2196
2197         if (!dev->of_node)
2198                 return -ENODEV;
2199
2200         if (dev->pm_domain)
2201                 return -EEXIST;
2202
2203         ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
2204                                         "#power-domain-cells", 0, &pd_args);
2205         if (ret < 0) {
2206                 if (ret != -ENOENT)
2207                         return ret;
2208
2209                 /*
2210                  * Try legacy Samsung-specific bindings
2211                  * (for backwards compatibility of DT ABI)
2212                  */
2213                 pd_args.args_count = 0;
2214                 pd_args.np = of_parse_phandle(dev->of_node,
2215                                                 "samsung,power-domain", 0);
2216                 if (!pd_args.np)
2217                         return -ENOENT;
2218         }
2219
2220         pd = of_genpd_get_from_provider(&pd_args);
2221         if (IS_ERR(pd)) {
2222                 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
2223                         __func__, PTR_ERR(pd));
2224                 of_node_put(dev->of_node);
2225                 return PTR_ERR(pd);
2226         }
2227
2228         dev_dbg(dev, "adding to PM domain %s\n", pd->name);
2229
2230         while (1) {
2231                 ret = pm_genpd_add_device(pd, dev);
2232                 if (ret != -EAGAIN)
2233                         break;
2234                 cond_resched();
2235         }
2236
2237         if (ret < 0) {
2238                 dev_err(dev, "failed to add to PM domain %s: %d",
2239                         pd->name, ret);
2240                 of_node_put(dev->of_node);
2241                 return ret;
2242         }
2243
2244         dev->pm_domain->detach = genpd_dev_pm_detach;
2245         pm_genpd_poweron(pd);
2246
2247         return 0;
2248 }
2249 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2250 #endif
2251
2252
2253 /***        debugfs support        ***/
2254
2255 #ifdef CONFIG_PM_ADVANCED_DEBUG
2256 #include <linux/pm.h>
2257 #include <linux/device.h>
2258 #include <linux/debugfs.h>
2259 #include <linux/seq_file.h>
2260 #include <linux/init.h>
2261 #include <linux/kobject.h>
2262 static struct dentry *pm_genpd_debugfs_dir;
2263
2264 /*
2265  * TODO: This function is a slightly modified version of rtpm_status_show
2266  * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
2267  * are too loose to generalize it.
2268  */
2269 #ifdef CONFIG_PM_RUNTIME
2270 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2271 {
2272         static const char * const status_lookup[] = {
2273                 [RPM_ACTIVE] = "active",
2274                 [RPM_RESUMING] = "resuming",
2275                 [RPM_SUSPENDED] = "suspended",
2276                 [RPM_SUSPENDING] = "suspending"
2277         };
2278         const char *p = "";
2279
2280         if (dev->power.runtime_error)
2281                 p = "error";
2282         else if (dev->power.disable_depth)
2283                 p = "unsupported";
2284         else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
2285                 p = status_lookup[dev->power.runtime_status];
2286         else
2287                 WARN_ON(1);
2288
2289         seq_puts(s, p);
2290 }
2291 #else
2292 static void rtpm_status_str(struct seq_file *s, struct device *dev)
2293 {
2294         seq_puts(s, "active");
2295 }
2296 #endif
2297
2298 static int pm_genpd_summary_one(struct seq_file *s,
2299                 struct generic_pm_domain *gpd)
2300 {
2301         static const char * const status_lookup[] = {
2302                 [GPD_STATE_ACTIVE] = "on",
2303                 [GPD_STATE_WAIT_MASTER] = "wait-master",
2304                 [GPD_STATE_BUSY] = "busy",
2305                 [GPD_STATE_REPEAT] = "off-in-progress",
2306                 [GPD_STATE_POWER_OFF] = "off"
2307         };
2308         struct pm_domain_data *pm_data;
2309         const char *kobj_path;
2310         struct gpd_link *link;
2311         int ret;
2312
2313         ret = mutex_lock_interruptible(&gpd->lock);
2314         if (ret)
2315                 return -ERESTARTSYS;
2316
2317         if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
2318                 goto exit;
2319         seq_printf(s, "%-30s  %-15s  ", gpd->name, status_lookup[gpd->status]);
2320
2321         /*
2322          * Modifications on the list require holding locks on both
2323          * master and slave, so we are safe.
2324          * Also gpd->name is immutable.
2325          */
2326         list_for_each_entry(link, &gpd->master_links, master_node) {
2327                 seq_printf(s, "%s", link->slave->name);
2328                 if (!list_is_last(&link->master_node, &gpd->master_links))
2329                         seq_puts(s, ", ");
2330         }
2331
2332         list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
2333                 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
2334                 if (kobj_path == NULL)
2335                         continue;
2336
2337                 seq_printf(s, "\n    %-50s  ", kobj_path);
2338                 rtpm_status_str(s, pm_data->dev);
2339                 kfree(kobj_path);
2340         }
2341
2342         seq_puts(s, "\n");
2343 exit:
2344         mutex_unlock(&gpd->lock);
2345
2346         return 0;
2347 }
2348
2349 static int pm_genpd_summary_show(struct seq_file *s, void *data)
2350 {
2351         struct generic_pm_domain *gpd;
2352         int ret = 0;
2353
2354         seq_puts(s, "    domain                      status         slaves\n");
2355         seq_puts(s, "           /device                                      runtime status\n");
2356         seq_puts(s, "----------------------------------------------------------------------\n");
2357
2358         ret = mutex_lock_interruptible(&gpd_list_lock);
2359         if (ret)
2360                 return -ERESTARTSYS;
2361
2362         list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
2363                 ret = pm_genpd_summary_one(s, gpd);
2364                 if (ret)
2365                         break;
2366         }
2367         mutex_unlock(&gpd_list_lock);
2368
2369         return ret;
2370 }
2371
2372 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
2373 {
2374         return single_open(file, pm_genpd_summary_show, NULL);
2375 }
2376
2377 static const struct file_operations pm_genpd_summary_fops = {
2378         .open = pm_genpd_summary_open,
2379         .read = seq_read,
2380         .llseek = seq_lseek,
2381         .release = single_release,
2382 };
2383
2384 static int __init pm_genpd_debug_init(void)
2385 {
2386         struct dentry *d;
2387
2388         pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
2389
2390         if (!pm_genpd_debugfs_dir)
2391                 return -ENOMEM;
2392
2393         d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
2394                         pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
2395         if (!d)
2396                 return -ENOMEM;
2397
2398         return 0;
2399 }
2400 late_initcall(pm_genpd_debug_init);
2401
2402 static void __exit pm_genpd_debug_exit(void)
2403 {
2404         debugfs_remove_recursive(pm_genpd_debugfs_dir);
2405 }
2406 __exitcall(pm_genpd_debug_exit);
2407 #endif /* CONFIG_PM_ADVANCED_DEBUG */