2 * drivers/base/power/domain.c - Common code related to device power domains.
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
6 * This file is released under the GPLv2.
9 #include <linux/delay.h>
10 #include <linux/kernel.h>
12 #include <linux/platform_device.h>
13 #include <linux/pm_runtime.h>
14 #include <linux/pm_domain.h>
15 #include <linux/pm_qos.h>
16 #include <linux/pm_clock.h>
17 #include <linux/slab.h>
18 #include <linux/err.h>
19 #include <linux/sched.h>
20 #include <linux/suspend.h>
21 #include <linux/export.h>
25 #define GENPD_RETRY_MAX_MS 250 /* Approximate */
27 #define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
29 type (*__routine)(struct device *__d); \
30 type __ret = (type)0; \
32 __routine = genpd->dev_ops.callback; \
34 __ret = __routine(dev); \
39 static LIST_HEAD(gpd_list);
40 static DEFINE_MUTEX(gpd_list_lock);
43 * Get the generic PM domain for a particular struct device.
44 * This validates the struct device pointer, the PM domain pointer,
45 * and checks that the PM domain pointer is a real generic PM domain.
46 * Any failure results in NULL being returned.
48 static struct generic_pm_domain *genpd_lookup_dev(struct device *dev)
50 struct generic_pm_domain *genpd = NULL, *gpd;
52 if (IS_ERR_OR_NULL(dev) || IS_ERR_OR_NULL(dev->pm_domain))
55 mutex_lock(&gpd_list_lock);
56 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
57 if (&gpd->domain == dev->pm_domain) {
62 mutex_unlock(&gpd_list_lock);
68 * This should only be used where we are certain that the pm_domain
69 * attached to the device is a genpd domain.
71 static struct generic_pm_domain *dev_to_genpd(struct device *dev)
73 if (IS_ERR_OR_NULL(dev->pm_domain))
74 return ERR_PTR(-EINVAL);
76 return pd_to_genpd(dev->pm_domain);
79 static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
81 return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
84 static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
86 return GENPD_DEV_CALLBACK(genpd, int, start, dev);
89 static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
93 if (!WARN_ON(atomic_read(&genpd->sd_count) == 0))
94 ret = !!atomic_dec_and_test(&genpd->sd_count);
99 static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
101 atomic_inc(&genpd->sd_count);
102 smp_mb__after_atomic();
105 static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
107 unsigned int state_idx = genpd->state_idx;
112 if (!genpd->power_on)
116 return genpd->power_on(genpd);
118 time_start = ktime_get();
119 ret = genpd->power_on(genpd);
123 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
124 if (elapsed_ns <= genpd->states[state_idx].power_on_latency_ns)
127 genpd->states[state_idx].power_on_latency_ns = elapsed_ns;
128 genpd->max_off_time_changed = true;
129 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
130 genpd->name, "on", elapsed_ns);
135 static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
137 unsigned int state_idx = genpd->state_idx;
142 if (!genpd->power_off)
146 return genpd->power_off(genpd);
148 time_start = ktime_get();
149 ret = genpd->power_off(genpd);
153 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
154 if (elapsed_ns <= genpd->states[state_idx].power_off_latency_ns)
157 genpd->states[state_idx].power_off_latency_ns = elapsed_ns;
158 genpd->max_off_time_changed = true;
159 pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
160 genpd->name, "off", elapsed_ns);
166 * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
167 * @genpd: PM domain to power off.
169 * Queue up the execution of genpd_poweroff() unless it's already been done
172 static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
174 queue_work(pm_wq, &genpd->power_off_work);
178 * genpd_poweron - Restore power to a given PM domain and its masters.
179 * @genpd: PM domain to power up.
180 * @depth: nesting count for lockdep.
182 * Restore power to @genpd and all of its masters so that it is possible to
183 * resume a device belonging to it.
185 static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
187 struct gpd_link *link;
190 if (genpd->status == GPD_STATE_ACTIVE)
194 * The list is guaranteed not to change while the loop below is being
195 * executed, unless one of the masters' .power_on() callbacks fiddles
198 list_for_each_entry(link, &genpd->slave_links, slave_node) {
199 struct generic_pm_domain *master = link->master;
201 genpd_sd_counter_inc(master);
203 mutex_lock_nested(&master->lock, depth + 1);
204 ret = genpd_poweron(master, depth + 1);
205 mutex_unlock(&master->lock);
208 genpd_sd_counter_dec(master);
213 ret = genpd_power_on(genpd, true);
217 genpd->status = GPD_STATE_ACTIVE;
221 list_for_each_entry_continue_reverse(link,
224 genpd_sd_counter_dec(link->master);
225 genpd_queue_power_off_work(link->master);
231 static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
232 unsigned long val, void *ptr)
234 struct generic_pm_domain_data *gpd_data;
237 gpd_data = container_of(nb, struct generic_pm_domain_data, nb);
238 dev = gpd_data->base.dev;
241 struct generic_pm_domain *genpd;
242 struct pm_domain_data *pdd;
244 spin_lock_irq(&dev->power.lock);
246 pdd = dev->power.subsys_data ?
247 dev->power.subsys_data->domain_data : NULL;
248 if (pdd && pdd->dev) {
249 to_gpd_data(pdd)->td.constraint_changed = true;
250 genpd = dev_to_genpd(dev);
252 genpd = ERR_PTR(-ENODATA);
255 spin_unlock_irq(&dev->power.lock);
257 if (!IS_ERR(genpd)) {
258 mutex_lock(&genpd->lock);
259 genpd->max_off_time_changed = true;
260 mutex_unlock(&genpd->lock);
264 if (!dev || dev->power.ignore_children)
272 * genpd_poweroff - Remove power from a given PM domain.
273 * @genpd: PM domain to power down.
274 * @is_async: PM domain is powered down from a scheduled work
276 * If all of the @genpd's devices have been suspended and all of its subdomains
277 * have been powered down, remove power from @genpd.
279 static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
281 struct pm_domain_data *pdd;
282 struct gpd_link *link;
283 unsigned int not_suspended = 0;
286 * Do not try to power off the domain in the following situations:
287 * (1) The domain is already in the "power off" state.
288 * (2) System suspend is in progress.
290 if (genpd->status == GPD_STATE_POWER_OFF
291 || genpd->prepared_count > 0)
294 if (atomic_read(&genpd->sd_count) > 0)
297 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
298 enum pm_qos_flags_status stat;
300 stat = dev_pm_qos_flags(pdd->dev,
301 PM_QOS_FLAG_NO_POWER_OFF
302 | PM_QOS_FLAG_REMOTE_WAKEUP);
303 if (stat > PM_QOS_FLAGS_NONE)
306 if (!pm_runtime_suspended(pdd->dev) || pdd->dev->power.irq_safe)
310 if (not_suspended > 1 || (not_suspended == 1 && is_async))
313 if (genpd->gov && genpd->gov->power_down_ok) {
314 if (!genpd->gov->power_down_ok(&genpd->domain))
318 if (genpd->power_off) {
321 if (atomic_read(&genpd->sd_count) > 0)
325 * If sd_count > 0 at this point, one of the subdomains hasn't
326 * managed to call genpd_poweron() for the master yet after
327 * incrementing it. In that case genpd_poweron() will wait
328 * for us to drop the lock, so we can call .power_off() and let
329 * the genpd_poweron() restore power for us (this shouldn't
330 * happen very often).
332 ret = genpd_power_off(genpd, true);
337 genpd->status = GPD_STATE_POWER_OFF;
339 list_for_each_entry(link, &genpd->slave_links, slave_node) {
340 genpd_sd_counter_dec(link->master);
341 genpd_queue_power_off_work(link->master);
348 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
349 * @work: Work structure used for scheduling the execution of this function.
351 static void genpd_power_off_work_fn(struct work_struct *work)
353 struct generic_pm_domain *genpd;
355 genpd = container_of(work, struct generic_pm_domain, power_off_work);
357 mutex_lock(&genpd->lock);
358 genpd_poweroff(genpd, true);
359 mutex_unlock(&genpd->lock);
363 * __genpd_runtime_suspend - walk the hierarchy of ->runtime_suspend() callbacks
364 * @dev: Device to handle.
366 static int __genpd_runtime_suspend(struct device *dev)
368 int (*cb)(struct device *__dev);
370 if (dev->type && dev->type->pm)
371 cb = dev->type->pm->runtime_suspend;
372 else if (dev->class && dev->class->pm)
373 cb = dev->class->pm->runtime_suspend;
374 else if (dev->bus && dev->bus->pm)
375 cb = dev->bus->pm->runtime_suspend;
379 if (!cb && dev->driver && dev->driver->pm)
380 cb = dev->driver->pm->runtime_suspend;
382 return cb ? cb(dev) : 0;
386 * __genpd_runtime_resume - walk the hierarchy of ->runtime_resume() callbacks
387 * @dev: Device to handle.
389 static int __genpd_runtime_resume(struct device *dev)
391 int (*cb)(struct device *__dev);
393 if (dev->type && dev->type->pm)
394 cb = dev->type->pm->runtime_resume;
395 else if (dev->class && dev->class->pm)
396 cb = dev->class->pm->runtime_resume;
397 else if (dev->bus && dev->bus->pm)
398 cb = dev->bus->pm->runtime_resume;
402 if (!cb && dev->driver && dev->driver->pm)
403 cb = dev->driver->pm->runtime_resume;
405 return cb ? cb(dev) : 0;
409 * genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
410 * @dev: Device to suspend.
412 * Carry out a runtime suspend of a device under the assumption that its
413 * pm_domain field points to the domain member of an object of type
414 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
416 static int genpd_runtime_suspend(struct device *dev)
418 struct generic_pm_domain *genpd;
419 bool (*suspend_ok)(struct device *__dev);
420 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
421 bool runtime_pm = pm_runtime_enabled(dev);
426 dev_dbg(dev, "%s()\n", __func__);
428 genpd = dev_to_genpd(dev);
433 * A runtime PM centric subsystem/driver may re-use the runtime PM
434 * callbacks for other purposes than runtime PM. In those scenarios
435 * runtime PM is disabled. Under these circumstances, we shall skip
436 * validating/measuring the PM QoS latency.
438 suspend_ok = genpd->gov ? genpd->gov->suspend_ok : NULL;
439 if (runtime_pm && suspend_ok && !suspend_ok(dev))
442 /* Measure suspend latency. */
444 time_start = ktime_get();
446 ret = __genpd_runtime_suspend(dev);
450 ret = genpd_stop_dev(genpd, dev);
452 __genpd_runtime_resume(dev);
456 /* Update suspend latency value if the measured time exceeds it. */
458 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
459 if (elapsed_ns > td->suspend_latency_ns) {
460 td->suspend_latency_ns = elapsed_ns;
461 dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
463 genpd->max_off_time_changed = true;
464 td->constraint_changed = true;
469 * If power.irq_safe is set, this routine will be run with interrupts
470 * off, so it can't use mutexes.
472 if (dev->power.irq_safe)
475 mutex_lock(&genpd->lock);
476 genpd_poweroff(genpd, false);
477 mutex_unlock(&genpd->lock);
483 * genpd_runtime_resume - Resume a device belonging to I/O PM domain.
484 * @dev: Device to resume.
486 * Carry out a runtime resume of a device under the assumption that its
487 * pm_domain field points to the domain member of an object of type
488 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
490 static int genpd_runtime_resume(struct device *dev)
492 struct generic_pm_domain *genpd;
493 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
494 bool runtime_pm = pm_runtime_enabled(dev);
500 dev_dbg(dev, "%s()\n", __func__);
502 genpd = dev_to_genpd(dev);
506 /* If power.irq_safe, the PM domain is never powered off. */
507 if (dev->power.irq_safe) {
512 mutex_lock(&genpd->lock);
513 ret = genpd_poweron(genpd, 0);
514 mutex_unlock(&genpd->lock);
520 /* Measure resume latency. */
521 if (timed && runtime_pm)
522 time_start = ktime_get();
524 ret = genpd_start_dev(genpd, dev);
528 ret = __genpd_runtime_resume(dev);
532 /* Update resume latency value if the measured time exceeds it. */
533 if (timed && runtime_pm) {
534 elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
535 if (elapsed_ns > td->resume_latency_ns) {
536 td->resume_latency_ns = elapsed_ns;
537 dev_dbg(dev, "resume latency exceeded, %lld ns\n",
539 genpd->max_off_time_changed = true;
540 td->constraint_changed = true;
547 genpd_stop_dev(genpd, dev);
549 if (!dev->power.irq_safe) {
550 mutex_lock(&genpd->lock);
551 genpd_poweroff(genpd, 0);
552 mutex_unlock(&genpd->lock);
558 static bool pd_ignore_unused;
559 static int __init pd_ignore_unused_setup(char *__unused)
561 pd_ignore_unused = true;
564 __setup("pd_ignore_unused", pd_ignore_unused_setup);
567 * genpd_poweroff_unused - Power off all PM domains with no devices in use.
569 static int __init genpd_poweroff_unused(void)
571 struct generic_pm_domain *genpd;
573 if (pd_ignore_unused) {
574 pr_warn("genpd: Not disabling unused power domains\n");
578 mutex_lock(&gpd_list_lock);
580 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
581 genpd_queue_power_off_work(genpd);
583 mutex_unlock(&gpd_list_lock);
587 late_initcall(genpd_poweroff_unused);
589 #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_GENERIC_DOMAINS_OF)
592 * pm_genpd_present - Check if the given PM domain has been initialized.
593 * @genpd: PM domain to check.
595 static bool pm_genpd_present(const struct generic_pm_domain *genpd)
597 const struct generic_pm_domain *gpd;
599 if (IS_ERR_OR_NULL(genpd))
602 list_for_each_entry(gpd, &gpd_list, gpd_list_node)
611 #ifdef CONFIG_PM_SLEEP
613 static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
616 return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
620 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
621 * @genpd: PM domain to power off, if possible.
622 * @timed: True if latency measurements are allowed.
624 * Check if the given PM domain can be powered off (during system suspend or
625 * hibernation) and do that if so. Also, in that case propagate to its masters.
627 * This function is only called in "noirq" and "syscore" stages of system power
628 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
629 * executed sequentially, so it is guaranteed that it will never run twice in
632 static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd,
635 struct gpd_link *link;
637 if (genpd->status == GPD_STATE_POWER_OFF)
640 if (genpd->suspended_count != genpd->device_count
641 || atomic_read(&genpd->sd_count) > 0)
644 /* Choose the deepest state when suspending */
645 genpd->state_idx = genpd->state_count - 1;
646 genpd_power_off(genpd, timed);
648 genpd->status = GPD_STATE_POWER_OFF;
650 list_for_each_entry(link, &genpd->slave_links, slave_node) {
651 genpd_sd_counter_dec(link->master);
652 pm_genpd_sync_poweroff(link->master, timed);
657 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
658 * @genpd: PM domain to power on.
659 * @timed: True if latency measurements are allowed.
661 * This function is only called in "noirq" and "syscore" stages of system power
662 * transitions, so it need not acquire locks (all of the "noirq" callbacks are
663 * executed sequentially, so it is guaranteed that it will never run twice in
666 static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd,
669 struct gpd_link *link;
671 if (genpd->status == GPD_STATE_ACTIVE)
674 list_for_each_entry(link, &genpd->slave_links, slave_node) {
675 pm_genpd_sync_poweron(link->master, timed);
676 genpd_sd_counter_inc(link->master);
679 genpd_power_on(genpd, timed);
681 genpd->status = GPD_STATE_ACTIVE;
685 * resume_needed - Check whether to resume a device before system suspend.
686 * @dev: Device to check.
687 * @genpd: PM domain the device belongs to.
689 * There are two cases in which a device that can wake up the system from sleep
690 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
691 * to wake up the system and it has to remain active for this purpose while the
692 * system is in the sleep state and (2) if the device is not enabled to wake up
693 * the system from sleep states and it generally doesn't generate wakeup signals
694 * by itself (those signals are generated on its behalf by other parts of the
695 * system). In the latter case it may be necessary to reconfigure the device's
696 * wakeup settings during system suspend, because it may have been set up to
697 * signal remote wakeup from the system's working state as needed by runtime PM.
698 * Return 'true' in either of the above cases.
700 static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
704 if (!device_can_wakeup(dev))
707 active_wakeup = genpd_dev_active_wakeup(genpd, dev);
708 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
712 * pm_genpd_prepare - Start power transition of a device in a PM domain.
713 * @dev: Device to start the transition of.
715 * Start a power transition of a device (during a system-wide power transition)
716 * under the assumption that its pm_domain field points to the domain member of
717 * an object of type struct generic_pm_domain representing a PM domain
718 * consisting of I/O devices.
720 static int pm_genpd_prepare(struct device *dev)
722 struct generic_pm_domain *genpd;
725 dev_dbg(dev, "%s()\n", __func__);
727 genpd = dev_to_genpd(dev);
732 * If a wakeup request is pending for the device, it should be woken up
733 * at this point and a system wakeup event should be reported if it's
734 * set up to wake up the system from sleep states.
736 if (resume_needed(dev, genpd))
737 pm_runtime_resume(dev);
739 mutex_lock(&genpd->lock);
741 if (genpd->prepared_count++ == 0)
742 genpd->suspended_count = 0;
744 mutex_unlock(&genpd->lock);
746 ret = pm_generic_prepare(dev);
748 mutex_lock(&genpd->lock);
750 genpd->prepared_count--;
752 mutex_unlock(&genpd->lock);
759 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
760 * @dev: Device to suspend.
762 * Stop the device and remove power from the domain if all devices in it have
765 static int pm_genpd_suspend_noirq(struct device *dev)
767 struct generic_pm_domain *genpd;
770 dev_dbg(dev, "%s()\n", __func__);
772 genpd = dev_to_genpd(dev);
776 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
779 if (genpd->dev_ops.stop && genpd->dev_ops.start) {
780 ret = pm_runtime_force_suspend(dev);
786 * Since all of the "noirq" callbacks are executed sequentially, it is
787 * guaranteed that this function will never run twice in parallel for
788 * the same PM domain, so it is not necessary to use locking here.
790 genpd->suspended_count++;
791 pm_genpd_sync_poweroff(genpd, true);
797 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
798 * @dev: Device to resume.
800 * Restore power to the device's PM domain, if necessary, and start the device.
802 static int pm_genpd_resume_noirq(struct device *dev)
804 struct generic_pm_domain *genpd;
807 dev_dbg(dev, "%s()\n", __func__);
809 genpd = dev_to_genpd(dev);
813 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
817 * Since all of the "noirq" callbacks are executed sequentially, it is
818 * guaranteed that this function will never run twice in parallel for
819 * the same PM domain, so it is not necessary to use locking here.
821 pm_genpd_sync_poweron(genpd, true);
822 genpd->suspended_count--;
824 if (genpd->dev_ops.stop && genpd->dev_ops.start)
825 ret = pm_runtime_force_resume(dev);
831 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
832 * @dev: Device to freeze.
834 * Carry out a late freeze of a device under the assumption that its
835 * pm_domain field points to the domain member of an object of type
836 * struct generic_pm_domain representing a power domain consisting of I/O
839 static int pm_genpd_freeze_noirq(struct device *dev)
841 struct generic_pm_domain *genpd;
844 dev_dbg(dev, "%s()\n", __func__);
846 genpd = dev_to_genpd(dev);
850 if (genpd->dev_ops.stop && genpd->dev_ops.start)
851 ret = pm_runtime_force_suspend(dev);
857 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
858 * @dev: Device to thaw.
860 * Start the device, unless power has been removed from the domain already
861 * before the system transition.
863 static int pm_genpd_thaw_noirq(struct device *dev)
865 struct generic_pm_domain *genpd;
868 dev_dbg(dev, "%s()\n", __func__);
870 genpd = dev_to_genpd(dev);
874 if (genpd->dev_ops.stop && genpd->dev_ops.start)
875 ret = pm_runtime_force_resume(dev);
881 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
882 * @dev: Device to resume.
884 * Make sure the domain will be in the same power state as before the
885 * hibernation the system is resuming from and start the device if necessary.
887 static int pm_genpd_restore_noirq(struct device *dev)
889 struct generic_pm_domain *genpd;
892 dev_dbg(dev, "%s()\n", __func__);
894 genpd = dev_to_genpd(dev);
899 * Since all of the "noirq" callbacks are executed sequentially, it is
900 * guaranteed that this function will never run twice in parallel for
901 * the same PM domain, so it is not necessary to use locking here.
903 * At this point suspended_count == 0 means we are being run for the
904 * first time for the given domain in the present cycle.
906 if (genpd->suspended_count++ == 0)
908 * The boot kernel might put the domain into arbitrary state,
909 * so make it appear as powered off to pm_genpd_sync_poweron(),
910 * so that it tries to power it on in case it was really off.
912 genpd->status = GPD_STATE_POWER_OFF;
914 pm_genpd_sync_poweron(genpd, true);
916 if (genpd->dev_ops.stop && genpd->dev_ops.start)
917 ret = pm_runtime_force_resume(dev);
923 * pm_genpd_complete - Complete power transition of a device in a power domain.
924 * @dev: Device to complete the transition of.
926 * Complete a power transition of a device (during a system-wide power
927 * transition) under the assumption that its pm_domain field points to the
928 * domain member of an object of type struct generic_pm_domain representing
929 * a power domain consisting of I/O devices.
931 static void pm_genpd_complete(struct device *dev)
933 struct generic_pm_domain *genpd;
935 dev_dbg(dev, "%s()\n", __func__);
937 genpd = dev_to_genpd(dev);
941 pm_generic_complete(dev);
943 mutex_lock(&genpd->lock);
945 genpd->prepared_count--;
946 if (!genpd->prepared_count)
947 genpd_queue_power_off_work(genpd);
949 mutex_unlock(&genpd->lock);
953 * genpd_syscore_switch - Switch power during system core suspend or resume.
954 * @dev: Device that normally is marked as "always on" to switch power for.
956 * This routine may only be called during the system core (syscore) suspend or
957 * resume phase for devices whose "always on" flags are set.
959 static void genpd_syscore_switch(struct device *dev, bool suspend)
961 struct generic_pm_domain *genpd;
963 genpd = dev_to_genpd(dev);
964 if (!pm_genpd_present(genpd))
968 genpd->suspended_count++;
969 pm_genpd_sync_poweroff(genpd, false);
971 pm_genpd_sync_poweron(genpd, false);
972 genpd->suspended_count--;
976 void pm_genpd_syscore_poweroff(struct device *dev)
978 genpd_syscore_switch(dev, true);
980 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
982 void pm_genpd_syscore_poweron(struct device *dev)
984 genpd_syscore_switch(dev, false);
986 EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
988 #else /* !CONFIG_PM_SLEEP */
990 #define pm_genpd_prepare NULL
991 #define pm_genpd_suspend_noirq NULL
992 #define pm_genpd_resume_noirq NULL
993 #define pm_genpd_freeze_noirq NULL
994 #define pm_genpd_thaw_noirq NULL
995 #define pm_genpd_restore_noirq NULL
996 #define pm_genpd_complete NULL
998 #endif /* CONFIG_PM_SLEEP */
1000 static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev,
1001 struct generic_pm_domain *genpd,
1002 struct gpd_timing_data *td)
1004 struct generic_pm_domain_data *gpd_data;
1007 ret = dev_pm_get_subsys_data(dev);
1009 return ERR_PTR(ret);
1011 gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL);
1020 gpd_data->base.dev = dev;
1021 gpd_data->td.constraint_changed = true;
1022 gpd_data->td.effective_constraint_ns = -1;
1023 gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier;
1025 spin_lock_irq(&dev->power.lock);
1027 if (dev->power.subsys_data->domain_data) {
1032 dev->power.subsys_data->domain_data = &gpd_data->base;
1034 spin_unlock_irq(&dev->power.lock);
1036 dev_pm_domain_set(dev, &genpd->domain);
1041 spin_unlock_irq(&dev->power.lock);
1044 dev_pm_put_subsys_data(dev);
1045 return ERR_PTR(ret);
1048 static void genpd_free_dev_data(struct device *dev,
1049 struct generic_pm_domain_data *gpd_data)
1051 dev_pm_domain_set(dev, NULL);
1053 spin_lock_irq(&dev->power.lock);
1055 dev->power.subsys_data->domain_data = NULL;
1057 spin_unlock_irq(&dev->power.lock);
1060 dev_pm_put_subsys_data(dev);
1064 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1065 * @genpd: PM domain to add the device to.
1066 * @dev: Device to be added.
1067 * @td: Set of PM QoS timing parameters to attach to the device.
1069 int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1070 struct gpd_timing_data *td)
1072 struct generic_pm_domain_data *gpd_data;
1075 dev_dbg(dev, "%s()\n", __func__);
1077 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1080 gpd_data = genpd_alloc_dev_data(dev, genpd, td);
1081 if (IS_ERR(gpd_data))
1082 return PTR_ERR(gpd_data);
1084 mutex_lock(&genpd->lock);
1086 if (genpd->prepared_count > 0) {
1091 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1095 genpd->device_count++;
1096 genpd->max_off_time_changed = true;
1098 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1101 mutex_unlock(&genpd->lock);
1104 genpd_free_dev_data(dev, gpd_data);
1106 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1110 EXPORT_SYMBOL_GPL(__pm_genpd_add_device);
1113 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1114 * @genpd: PM domain to remove the device from.
1115 * @dev: Device to be removed.
1117 int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1120 struct generic_pm_domain_data *gpd_data;
1121 struct pm_domain_data *pdd;
1124 dev_dbg(dev, "%s()\n", __func__);
1126 if (!genpd || genpd != genpd_lookup_dev(dev))
1129 /* The above validation also means we have existing domain_data. */
1130 pdd = dev->power.subsys_data->domain_data;
1131 gpd_data = to_gpd_data(pdd);
1132 dev_pm_qos_remove_notifier(dev, &gpd_data->nb);
1134 mutex_lock(&genpd->lock);
1136 if (genpd->prepared_count > 0) {
1141 genpd->device_count--;
1142 genpd->max_off_time_changed = true;
1144 if (genpd->detach_dev)
1145 genpd->detach_dev(genpd, dev);
1147 list_del_init(&pdd->list_node);
1149 mutex_unlock(&genpd->lock);
1151 genpd_free_dev_data(dev, gpd_data);
1156 mutex_unlock(&genpd->lock);
1157 dev_pm_qos_add_notifier(dev, &gpd_data->nb);
1161 EXPORT_SYMBOL_GPL(pm_genpd_remove_device);
1164 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1165 * @genpd: Master PM domain to add the subdomain to.
1166 * @subdomain: Subdomain to be added.
1168 int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1169 struct generic_pm_domain *subdomain)
1171 struct gpd_link *link, *itr;
1174 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)
1175 || genpd == subdomain)
1178 link = kzalloc(sizeof(*link), GFP_KERNEL);
1182 mutex_lock(&subdomain->lock);
1183 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1185 if (genpd->status == GPD_STATE_POWER_OFF
1186 && subdomain->status != GPD_STATE_POWER_OFF) {
1191 list_for_each_entry(itr, &genpd->master_links, master_node) {
1192 if (itr->slave == subdomain && itr->master == genpd) {
1198 link->master = genpd;
1199 list_add_tail(&link->master_node, &genpd->master_links);
1200 link->slave = subdomain;
1201 list_add_tail(&link->slave_node, &subdomain->slave_links);
1202 if (subdomain->status != GPD_STATE_POWER_OFF)
1203 genpd_sd_counter_inc(genpd);
1206 mutex_unlock(&genpd->lock);
1207 mutex_unlock(&subdomain->lock);
1212 EXPORT_SYMBOL_GPL(pm_genpd_add_subdomain);
1215 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1216 * @genpd: Master PM domain to remove the subdomain from.
1217 * @subdomain: Subdomain to be removed.
1219 int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1220 struct generic_pm_domain *subdomain)
1222 struct gpd_link *link;
1225 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
1228 mutex_lock(&subdomain->lock);
1229 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
1231 if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
1232 pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
1238 list_for_each_entry(link, &genpd->master_links, master_node) {
1239 if (link->slave != subdomain)
1242 list_del(&link->master_node);
1243 list_del(&link->slave_node);
1245 if (subdomain->status != GPD_STATE_POWER_OFF)
1246 genpd_sd_counter_dec(genpd);
1253 mutex_unlock(&genpd->lock);
1254 mutex_unlock(&subdomain->lock);
1258 EXPORT_SYMBOL_GPL(pm_genpd_remove_subdomain);
1261 * pm_genpd_init - Initialize a generic I/O PM domain object.
1262 * @genpd: PM domain object to initialize.
1263 * @gov: PM domain governor to associate with the domain (may be NULL).
1264 * @is_off: Initial value of the domain's power_is_off field.
1266 * Returns 0 on successful initialization, else a negative error code.
1268 int pm_genpd_init(struct generic_pm_domain *genpd,
1269 struct dev_power_governor *gov, bool is_off)
1271 if (IS_ERR_OR_NULL(genpd))
1274 INIT_LIST_HEAD(&genpd->master_links);
1275 INIT_LIST_HEAD(&genpd->slave_links);
1276 INIT_LIST_HEAD(&genpd->dev_list);
1277 mutex_init(&genpd->lock);
1279 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1280 atomic_set(&genpd->sd_count, 0);
1281 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1282 genpd->device_count = 0;
1283 genpd->max_off_time_ns = -1;
1284 genpd->max_off_time_changed = true;
1285 genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
1286 genpd->domain.ops.runtime_resume = genpd_runtime_resume;
1287 genpd->domain.ops.prepare = pm_genpd_prepare;
1288 genpd->domain.ops.suspend = pm_generic_suspend;
1289 genpd->domain.ops.suspend_late = pm_generic_suspend_late;
1290 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1291 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1292 genpd->domain.ops.resume_early = pm_generic_resume_early;
1293 genpd->domain.ops.resume = pm_generic_resume;
1294 genpd->domain.ops.freeze = pm_generic_freeze;
1295 genpd->domain.ops.freeze_late = pm_generic_freeze_late;
1296 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1297 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1298 genpd->domain.ops.thaw_early = pm_generic_thaw_early;
1299 genpd->domain.ops.thaw = pm_generic_thaw;
1300 genpd->domain.ops.poweroff = pm_generic_poweroff;
1301 genpd->domain.ops.poweroff_late = pm_generic_poweroff_late;
1302 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1303 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1304 genpd->domain.ops.restore_early = pm_generic_restore_early;
1305 genpd->domain.ops.restore = pm_generic_restore;
1306 genpd->domain.ops.complete = pm_genpd_complete;
1308 if (genpd->flags & GENPD_FLAG_PM_CLK) {
1309 genpd->dev_ops.stop = pm_clk_suspend;
1310 genpd->dev_ops.start = pm_clk_resume;
1313 if (genpd->state_idx >= GENPD_MAX_NUM_STATES) {
1314 pr_warn("Initial state index out of bounds.\n");
1315 genpd->state_idx = GENPD_MAX_NUM_STATES - 1;
1318 if (genpd->state_count > GENPD_MAX_NUM_STATES) {
1319 pr_warn("Limiting states to %d\n", GENPD_MAX_NUM_STATES);
1320 genpd->state_count = GENPD_MAX_NUM_STATES;
1323 /* Use only one "off" state if there were no states declared */
1324 if (genpd->state_count == 0)
1325 genpd->state_count = 1;
1327 mutex_lock(&gpd_list_lock);
1328 list_add(&genpd->gpd_list_node, &gpd_list);
1329 mutex_unlock(&gpd_list_lock);
1333 EXPORT_SYMBOL_GPL(pm_genpd_init);
1335 #ifdef CONFIG_PM_GENERIC_DOMAINS_OF
1337 typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
1341 * Device Tree based PM domain providers.
1343 * The code below implements generic device tree based PM domain providers that
1344 * bind device tree nodes with generic PM domains registered in the system.
1346 * Any driver that registers generic PM domains and needs to support binding of
1347 * devices to these domains is supposed to register a PM domain provider, which
1348 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1350 * Two simple mapping functions have been provided for convenience:
1351 * - genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1352 * - genpd_xlate_onecell() for mapping of multiple PM domains per node by
1357 * struct of_genpd_provider - PM domain provider registration structure
1358 * @link: Entry in global list of PM domain providers
1359 * @node: Pointer to device tree node of PM domain provider
1360 * @xlate: Provider-specific xlate callback mapping a set of specifier cells
1362 * @data: context pointer to be passed into @xlate callback
1364 struct of_genpd_provider {
1365 struct list_head link;
1366 struct device_node *node;
1367 genpd_xlate_t xlate;
1371 /* List of registered PM domain providers. */
1372 static LIST_HEAD(of_genpd_providers);
1373 /* Mutex to protect the list above. */
1374 static DEFINE_MUTEX(of_genpd_mutex);
1377 * genpd_xlate_simple() - Xlate function for direct node-domain mapping
1378 * @genpdspec: OF phandle args to map into a PM domain
1379 * @data: xlate function private data - pointer to struct generic_pm_domain
1381 * This is a generic xlate function that can be used to model PM domains that
1382 * have their own device tree nodes. The private data of xlate function needs
1383 * to be a valid pointer to struct generic_pm_domain.
1385 static struct generic_pm_domain *genpd_xlate_simple(
1386 struct of_phandle_args *genpdspec,
1389 if (genpdspec->args_count != 0)
1390 return ERR_PTR(-EINVAL);
1395 * genpd_xlate_onecell() - Xlate function using a single index.
1396 * @genpdspec: OF phandle args to map into a PM domain
1397 * @data: xlate function private data - pointer to struct genpd_onecell_data
1399 * This is a generic xlate function that can be used to model simple PM domain
1400 * controllers that have one device tree node and provide multiple PM domains.
1401 * A single cell is used as an index into an array of PM domains specified in
1402 * the genpd_onecell_data struct when registering the provider.
1404 static struct generic_pm_domain *genpd_xlate_onecell(
1405 struct of_phandle_args *genpdspec,
1408 struct genpd_onecell_data *genpd_data = data;
1409 unsigned int idx = genpdspec->args[0];
1411 if (genpdspec->args_count != 1)
1412 return ERR_PTR(-EINVAL);
1414 if (idx >= genpd_data->num_domains) {
1415 pr_err("%s: invalid domain index %u\n", __func__, idx);
1416 return ERR_PTR(-EINVAL);
1419 if (!genpd_data->domains[idx])
1420 return ERR_PTR(-ENOENT);
1422 return genpd_data->domains[idx];
1426 * genpd_add_provider() - Register a PM domain provider for a node
1427 * @np: Device node pointer associated with the PM domain provider.
1428 * @xlate: Callback for decoding PM domain from phandle arguments.
1429 * @data: Context pointer for @xlate callback.
1431 static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
1434 struct of_genpd_provider *cp;
1436 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
1440 cp->node = of_node_get(np);
1444 mutex_lock(&of_genpd_mutex);
1445 list_add(&cp->link, &of_genpd_providers);
1446 mutex_unlock(&of_genpd_mutex);
1447 pr_debug("Added domain provider from %s\n", np->full_name);
1453 * of_genpd_add_provider_simple() - Register a simple PM domain provider
1454 * @np: Device node pointer associated with the PM domain provider.
1455 * @genpd: Pointer to PM domain associated with the PM domain provider.
1457 int of_genpd_add_provider_simple(struct device_node *np,
1458 struct generic_pm_domain *genpd)
1465 mutex_lock(&gpd_list_lock);
1467 if (pm_genpd_present(genpd))
1468 ret = genpd_add_provider(np, genpd_xlate_simple, genpd);
1470 mutex_unlock(&gpd_list_lock);
1474 EXPORT_SYMBOL_GPL(of_genpd_add_provider_simple);
1477 * of_genpd_add_provider_onecell() - Register a onecell PM domain provider
1478 * @np: Device node pointer associated with the PM domain provider.
1479 * @data: Pointer to the data associated with the PM domain provider.
1481 int of_genpd_add_provider_onecell(struct device_node *np,
1482 struct genpd_onecell_data *data)
1490 mutex_lock(&gpd_list_lock);
1492 for (i = 0; i < data->num_domains; i++) {
1493 if (!pm_genpd_present(data->domains[i])) {
1494 mutex_unlock(&gpd_list_lock);
1499 ret = genpd_add_provider(np, genpd_xlate_onecell, data);
1501 mutex_unlock(&gpd_list_lock);
1505 EXPORT_SYMBOL_GPL(of_genpd_add_provider_onecell);
1508 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1509 * @np: Device node pointer associated with the PM domain provider
1511 void of_genpd_del_provider(struct device_node *np)
1513 struct of_genpd_provider *cp;
1515 mutex_lock(&of_genpd_mutex);
1516 list_for_each_entry(cp, &of_genpd_providers, link) {
1517 if (cp->node == np) {
1518 list_del(&cp->link);
1519 of_node_put(cp->node);
1524 mutex_unlock(&of_genpd_mutex);
1526 EXPORT_SYMBOL_GPL(of_genpd_del_provider);
1529 * genpd_get_from_provider() - Look-up PM domain
1530 * @genpdspec: OF phandle args to use for look-up
1532 * Looks for a PM domain provider under the node specified by @genpdspec and if
1533 * found, uses xlate function of the provider to map phandle args to a PM
1536 * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
1539 static struct generic_pm_domain *genpd_get_from_provider(
1540 struct of_phandle_args *genpdspec)
1542 struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
1543 struct of_genpd_provider *provider;
1546 return ERR_PTR(-EINVAL);
1548 mutex_lock(&of_genpd_mutex);
1550 /* Check if we have such a provider in our array */
1551 list_for_each_entry(provider, &of_genpd_providers, link) {
1552 if (provider->node == genpdspec->np)
1553 genpd = provider->xlate(genpdspec, provider->data);
1558 mutex_unlock(&of_genpd_mutex);
1564 * of_genpd_add_device() - Add a device to an I/O PM domain
1565 * @genpdspec: OF phandle args to use for look-up PM domain
1566 * @dev: Device to be added.
1568 * Looks-up an I/O PM domain based upon phandle args provided and adds
1569 * the device to the PM domain. Returns a negative error code on failure.
1571 int of_genpd_add_device(struct of_phandle_args *genpdspec, struct device *dev)
1573 struct generic_pm_domain *genpd;
1575 genpd = genpd_get_from_provider(genpdspec);
1577 return PTR_ERR(genpd);
1579 return pm_genpd_add_device(genpd, dev);
1581 EXPORT_SYMBOL_GPL(of_genpd_add_device);
1584 * of_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1585 * @parent_spec: OF phandle args to use for parent PM domain look-up
1586 * @subdomain_spec: OF phandle args to use for subdomain look-up
1588 * Looks-up a parent PM domain and subdomain based upon phandle args
1589 * provided and adds the subdomain to the parent PM domain. Returns a
1590 * negative error code on failure.
1592 int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
1593 struct of_phandle_args *subdomain_spec)
1595 struct generic_pm_domain *parent, *subdomain;
1597 parent = genpd_get_from_provider(parent_spec);
1599 return PTR_ERR(parent);
1601 subdomain = genpd_get_from_provider(subdomain_spec);
1602 if (IS_ERR(subdomain))
1603 return PTR_ERR(subdomain);
1605 return pm_genpd_add_subdomain(parent, subdomain);
1607 EXPORT_SYMBOL_GPL(of_genpd_add_subdomain);
1610 * genpd_dev_pm_detach - Detach a device from its PM domain.
1611 * @dev: Device to detach.
1612 * @power_off: Currently not used
1614 * Try to locate a corresponding generic PM domain, which the device was
1615 * attached to previously. If such is found, the device is detached from it.
1617 static void genpd_dev_pm_detach(struct device *dev, bool power_off)
1619 struct generic_pm_domain *pd;
1623 pd = genpd_lookup_dev(dev);
1627 dev_dbg(dev, "removing from PM domain %s\n", pd->name);
1629 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1630 ret = pm_genpd_remove_device(pd, dev);
1639 dev_err(dev, "failed to remove from PM domain %s: %d",
1644 /* Check if PM domain can be powered off after removing this device. */
1645 genpd_queue_power_off_work(pd);
1648 static void genpd_dev_pm_sync(struct device *dev)
1650 struct generic_pm_domain *pd;
1652 pd = dev_to_genpd(dev);
1656 genpd_queue_power_off_work(pd);
1660 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1661 * @dev: Device to attach.
1663 * Parse device's OF node to find a PM domain specifier. If such is found,
1664 * attaches the device to retrieved pm_domain ops.
1666 * Both generic and legacy Samsung-specific DT bindings are supported to keep
1667 * backwards compatibility with existing DTBs.
1669 * Returns 0 on successfully attached PM domain or negative error code. Note
1670 * that if a power-domain exists for the device, but it cannot be found or
1671 * turned on, then return -EPROBE_DEFER to ensure that the device is not
1672 * probed and to re-try again later.
1674 int genpd_dev_pm_attach(struct device *dev)
1676 struct of_phandle_args pd_args;
1677 struct generic_pm_domain *pd;
1687 ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
1688 "#power-domain-cells", 0, &pd_args);
1694 * Try legacy Samsung-specific bindings
1695 * (for backwards compatibility of DT ABI)
1697 pd_args.args_count = 0;
1698 pd_args.np = of_parse_phandle(dev->of_node,
1699 "samsung,power-domain", 0);
1704 pd = genpd_get_from_provider(&pd_args);
1705 of_node_put(pd_args.np);
1707 dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
1708 __func__, PTR_ERR(pd));
1709 return -EPROBE_DEFER;
1712 dev_dbg(dev, "adding to PM domain %s\n", pd->name);
1714 for (i = 1; i < GENPD_RETRY_MAX_MS; i <<= 1) {
1715 ret = pm_genpd_add_device(pd, dev);
1724 dev_err(dev, "failed to add to PM domain %s: %d",
1729 dev->pm_domain->detach = genpd_dev_pm_detach;
1730 dev->pm_domain->sync = genpd_dev_pm_sync;
1732 mutex_lock(&pd->lock);
1733 ret = genpd_poweron(pd, 0);
1734 mutex_unlock(&pd->lock);
1736 return ret ? -EPROBE_DEFER : 0;
1738 EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
1739 #endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
1742 /*** debugfs support ***/
1744 #ifdef CONFIG_DEBUG_FS
1745 #include <linux/pm.h>
1746 #include <linux/device.h>
1747 #include <linux/debugfs.h>
1748 #include <linux/seq_file.h>
1749 #include <linux/init.h>
1750 #include <linux/kobject.h>
1751 static struct dentry *pm_genpd_debugfs_dir;
1754 * TODO: This function is a slightly modified version of rtpm_status_show
1755 * from sysfs.c, so generalize it.
1757 static void rtpm_status_str(struct seq_file *s, struct device *dev)
1759 static const char * const status_lookup[] = {
1760 [RPM_ACTIVE] = "active",
1761 [RPM_RESUMING] = "resuming",
1762 [RPM_SUSPENDED] = "suspended",
1763 [RPM_SUSPENDING] = "suspending"
1767 if (dev->power.runtime_error)
1769 else if (dev->power.disable_depth)
1771 else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
1772 p = status_lookup[dev->power.runtime_status];
1779 static int pm_genpd_summary_one(struct seq_file *s,
1780 struct generic_pm_domain *genpd)
1782 static const char * const status_lookup[] = {
1783 [GPD_STATE_ACTIVE] = "on",
1784 [GPD_STATE_POWER_OFF] = "off"
1786 struct pm_domain_data *pm_data;
1787 const char *kobj_path;
1788 struct gpd_link *link;
1792 ret = mutex_lock_interruptible(&genpd->lock);
1794 return -ERESTARTSYS;
1796 if (WARN_ON(genpd->status >= ARRAY_SIZE(status_lookup)))
1798 if (genpd->status == GPD_STATE_POWER_OFF)
1799 snprintf(state, sizeof(state), "%s-%u",
1800 status_lookup[genpd->status], genpd->state_idx);
1802 snprintf(state, sizeof(state), "%s",
1803 status_lookup[genpd->status]);
1804 seq_printf(s, "%-30s %-15s ", genpd->name, state);
1807 * Modifications on the list require holding locks on both
1808 * master and slave, so we are safe.
1809 * Also genpd->name is immutable.
1811 list_for_each_entry(link, &genpd->master_links, master_node) {
1812 seq_printf(s, "%s", link->slave->name);
1813 if (!list_is_last(&link->master_node, &genpd->master_links))
1817 list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
1818 kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
1819 if (kobj_path == NULL)
1822 seq_printf(s, "\n %-50s ", kobj_path);
1823 rtpm_status_str(s, pm_data->dev);
1829 mutex_unlock(&genpd->lock);
1834 static int pm_genpd_summary_show(struct seq_file *s, void *data)
1836 struct generic_pm_domain *genpd;
1839 seq_puts(s, "domain status slaves\n");
1840 seq_puts(s, " /device runtime status\n");
1841 seq_puts(s, "----------------------------------------------------------------------\n");
1843 ret = mutex_lock_interruptible(&gpd_list_lock);
1845 return -ERESTARTSYS;
1847 list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
1848 ret = pm_genpd_summary_one(s, genpd);
1852 mutex_unlock(&gpd_list_lock);
1857 static int pm_genpd_summary_open(struct inode *inode, struct file *file)
1859 return single_open(file, pm_genpd_summary_show, NULL);
1862 static const struct file_operations pm_genpd_summary_fops = {
1863 .open = pm_genpd_summary_open,
1865 .llseek = seq_lseek,
1866 .release = single_release,
1869 static int __init pm_genpd_debug_init(void)
1873 pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
1875 if (!pm_genpd_debugfs_dir)
1878 d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
1879 pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
1885 late_initcall(pm_genpd_debug_init);
1887 static void __exit pm_genpd_debug_exit(void)
1889 debugfs_remove_recursive(pm_genpd_debugfs_dir);
1891 __exitcall(pm_genpd_debug_exit);
1892 #endif /* CONFIG_DEBUG_FS */