cpufreq: fix garbage kobjects on errors during suspend/resume
[cascardo/linux.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/resume-trace.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/async.h>
30 #include <linux/suspend.h>
31 #include <trace/events/power.h>
32 #include <linux/cpufreq.h>
33 #include <linux/cpuidle.h>
34 #include "../base.h"
35 #include "power.h"
36
37 typedef int (*pm_callback_t)(struct device *);
38
39 /*
40  * The entries in the dpm_list list are in a depth first order, simply
41  * because children are guaranteed to be discovered after parents, and
42  * are inserted at the back of the list on discovery.
43  *
44  * Since device_pm_add() may be called with a device lock held,
45  * we must never try to acquire a device lock while holding
46  * dpm_list_mutex.
47  */
48
49 LIST_HEAD(dpm_list);
50 static LIST_HEAD(dpm_prepared_list);
51 static LIST_HEAD(dpm_suspended_list);
52 static LIST_HEAD(dpm_late_early_list);
53 static LIST_HEAD(dpm_noirq_list);
54
55 struct suspend_stats suspend_stats;
56 static DEFINE_MUTEX(dpm_list_mtx);
57 static pm_message_t pm_transition;
58
59 static int async_error;
60
61 static char *pm_verb(int event)
62 {
63         switch (event) {
64         case PM_EVENT_SUSPEND:
65                 return "suspend";
66         case PM_EVENT_RESUME:
67                 return "resume";
68         case PM_EVENT_FREEZE:
69                 return "freeze";
70         case PM_EVENT_QUIESCE:
71                 return "quiesce";
72         case PM_EVENT_HIBERNATE:
73                 return "hibernate";
74         case PM_EVENT_THAW:
75                 return "thaw";
76         case PM_EVENT_RESTORE:
77                 return "restore";
78         case PM_EVENT_RECOVER:
79                 return "recover";
80         default:
81                 return "(unknown PM event)";
82         }
83 }
84
85 /**
86  * device_pm_sleep_init - Initialize system suspend-related device fields.
87  * @dev: Device object being initialized.
88  */
89 void device_pm_sleep_init(struct device *dev)
90 {
91         dev->power.is_prepared = false;
92         dev->power.is_suspended = false;
93         init_completion(&dev->power.completion);
94         complete_all(&dev->power.completion);
95         dev->power.wakeup = NULL;
96         INIT_LIST_HEAD(&dev->power.entry);
97 }
98
99 /**
100  * device_pm_lock - Lock the list of active devices used by the PM core.
101  */
102 void device_pm_lock(void)
103 {
104         mutex_lock(&dpm_list_mtx);
105 }
106
107 /**
108  * device_pm_unlock - Unlock the list of active devices used by the PM core.
109  */
110 void device_pm_unlock(void)
111 {
112         mutex_unlock(&dpm_list_mtx);
113 }
114
115 /**
116  * device_pm_add - Add a device to the PM core's list of active devices.
117  * @dev: Device to add to the list.
118  */
119 void device_pm_add(struct device *dev)
120 {
121         pr_debug("PM: Adding info for %s:%s\n",
122                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
123         mutex_lock(&dpm_list_mtx);
124         if (dev->parent && dev->parent->power.is_prepared)
125                 dev_warn(dev, "parent %s should not be sleeping\n",
126                         dev_name(dev->parent));
127         list_add_tail(&dev->power.entry, &dpm_list);
128         mutex_unlock(&dpm_list_mtx);
129 }
130
131 /**
132  * device_pm_remove - Remove a device from the PM core's list of active devices.
133  * @dev: Device to be removed from the list.
134  */
135 void device_pm_remove(struct device *dev)
136 {
137         pr_debug("PM: Removing info for %s:%s\n",
138                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
139         complete_all(&dev->power.completion);
140         mutex_lock(&dpm_list_mtx);
141         list_del_init(&dev->power.entry);
142         mutex_unlock(&dpm_list_mtx);
143         device_wakeup_disable(dev);
144         pm_runtime_remove(dev);
145 }
146
147 /**
148  * device_pm_move_before - Move device in the PM core's list of active devices.
149  * @deva: Device to move in dpm_list.
150  * @devb: Device @deva should come before.
151  */
152 void device_pm_move_before(struct device *deva, struct device *devb)
153 {
154         pr_debug("PM: Moving %s:%s before %s:%s\n",
155                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
156                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
157         /* Delete deva from dpm_list and reinsert before devb. */
158         list_move_tail(&deva->power.entry, &devb->power.entry);
159 }
160
161 /**
162  * device_pm_move_after - Move device in the PM core's list of active devices.
163  * @deva: Device to move in dpm_list.
164  * @devb: Device @deva should come after.
165  */
166 void device_pm_move_after(struct device *deva, struct device *devb)
167 {
168         pr_debug("PM: Moving %s:%s after %s:%s\n",
169                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
170                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
171         /* Delete deva from dpm_list and reinsert after devb. */
172         list_move(&deva->power.entry, &devb->power.entry);
173 }
174
175 /**
176  * device_pm_move_last - Move device to end of the PM core's list of devices.
177  * @dev: Device to move in dpm_list.
178  */
179 void device_pm_move_last(struct device *dev)
180 {
181         pr_debug("PM: Moving %s:%s to end of list\n",
182                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
183         list_move_tail(&dev->power.entry, &dpm_list);
184 }
185
186 static ktime_t initcall_debug_start(struct device *dev)
187 {
188         ktime_t calltime = ktime_set(0, 0);
189
190         if (pm_print_times_enabled) {
191                 pr_info("calling  %s+ @ %i, parent: %s\n",
192                         dev_name(dev), task_pid_nr(current),
193                         dev->parent ? dev_name(dev->parent) : "none");
194                 calltime = ktime_get();
195         }
196
197         return calltime;
198 }
199
200 static void initcall_debug_report(struct device *dev, ktime_t calltime,
201                                   int error, pm_message_t state, char *info)
202 {
203         ktime_t rettime;
204         s64 nsecs;
205
206         rettime = ktime_get();
207         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
208
209         if (pm_print_times_enabled) {
210                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
211                         error, (unsigned long long)nsecs >> 10);
212         }
213
214         trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
215                                     error);
216 }
217
218 /**
219  * dpm_wait - Wait for a PM operation to complete.
220  * @dev: Device to wait for.
221  * @async: If unset, wait only if the device's power.async_suspend flag is set.
222  */
223 static void dpm_wait(struct device *dev, bool async)
224 {
225         if (!dev)
226                 return;
227
228         if (async || (pm_async_enabled && dev->power.async_suspend))
229                 wait_for_completion(&dev->power.completion);
230 }
231
232 static int dpm_wait_fn(struct device *dev, void *async_ptr)
233 {
234         dpm_wait(dev, *((bool *)async_ptr));
235         return 0;
236 }
237
238 static void dpm_wait_for_children(struct device *dev, bool async)
239 {
240        device_for_each_child(dev, &async, dpm_wait_fn);
241 }
242
243 /**
244  * pm_op - Return the PM operation appropriate for given PM event.
245  * @ops: PM operations to choose from.
246  * @state: PM transition of the system being carried out.
247  */
248 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
249 {
250         switch (state.event) {
251 #ifdef CONFIG_SUSPEND
252         case PM_EVENT_SUSPEND:
253                 return ops->suspend;
254         case PM_EVENT_RESUME:
255                 return ops->resume;
256 #endif /* CONFIG_SUSPEND */
257 #ifdef CONFIG_HIBERNATE_CALLBACKS
258         case PM_EVENT_FREEZE:
259         case PM_EVENT_QUIESCE:
260                 return ops->freeze;
261         case PM_EVENT_HIBERNATE:
262                 return ops->poweroff;
263         case PM_EVENT_THAW:
264         case PM_EVENT_RECOVER:
265                 return ops->thaw;
266                 break;
267         case PM_EVENT_RESTORE:
268                 return ops->restore;
269 #endif /* CONFIG_HIBERNATE_CALLBACKS */
270         }
271
272         return NULL;
273 }
274
275 /**
276  * pm_late_early_op - Return the PM operation appropriate for given PM event.
277  * @ops: PM operations to choose from.
278  * @state: PM transition of the system being carried out.
279  *
280  * Runtime PM is disabled for @dev while this function is being executed.
281  */
282 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
283                                       pm_message_t state)
284 {
285         switch (state.event) {
286 #ifdef CONFIG_SUSPEND
287         case PM_EVENT_SUSPEND:
288                 return ops->suspend_late;
289         case PM_EVENT_RESUME:
290                 return ops->resume_early;
291 #endif /* CONFIG_SUSPEND */
292 #ifdef CONFIG_HIBERNATE_CALLBACKS
293         case PM_EVENT_FREEZE:
294         case PM_EVENT_QUIESCE:
295                 return ops->freeze_late;
296         case PM_EVENT_HIBERNATE:
297                 return ops->poweroff_late;
298         case PM_EVENT_THAW:
299         case PM_EVENT_RECOVER:
300                 return ops->thaw_early;
301         case PM_EVENT_RESTORE:
302                 return ops->restore_early;
303 #endif /* CONFIG_HIBERNATE_CALLBACKS */
304         }
305
306         return NULL;
307 }
308
309 /**
310  * pm_noirq_op - Return the PM operation appropriate for given PM event.
311  * @ops: PM operations to choose from.
312  * @state: PM transition of the system being carried out.
313  *
314  * The driver of @dev will not receive interrupts while this function is being
315  * executed.
316  */
317 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
318 {
319         switch (state.event) {
320 #ifdef CONFIG_SUSPEND
321         case PM_EVENT_SUSPEND:
322                 return ops->suspend_noirq;
323         case PM_EVENT_RESUME:
324                 return ops->resume_noirq;
325 #endif /* CONFIG_SUSPEND */
326 #ifdef CONFIG_HIBERNATE_CALLBACKS
327         case PM_EVENT_FREEZE:
328         case PM_EVENT_QUIESCE:
329                 return ops->freeze_noirq;
330         case PM_EVENT_HIBERNATE:
331                 return ops->poweroff_noirq;
332         case PM_EVENT_THAW:
333         case PM_EVENT_RECOVER:
334                 return ops->thaw_noirq;
335         case PM_EVENT_RESTORE:
336                 return ops->restore_noirq;
337 #endif /* CONFIG_HIBERNATE_CALLBACKS */
338         }
339
340         return NULL;
341 }
342
343 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
344 {
345         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
346                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
347                 ", may wakeup" : "");
348 }
349
350 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
351                         int error)
352 {
353         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
354                 dev_name(dev), pm_verb(state.event), info, error);
355 }
356
357 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
358 {
359         ktime_t calltime;
360         u64 usecs64;
361         int usecs;
362
363         calltime = ktime_get();
364         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
365         do_div(usecs64, NSEC_PER_USEC);
366         usecs = usecs64;
367         if (usecs == 0)
368                 usecs = 1;
369         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
370                 info ?: "", info ? " " : "", pm_verb(state.event),
371                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
372 }
373
374 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
375                             pm_message_t state, char *info)
376 {
377         ktime_t calltime;
378         int error;
379
380         if (!cb)
381                 return 0;
382
383         calltime = initcall_debug_start(dev);
384
385         pm_dev_dbg(dev, state, info);
386         error = cb(dev);
387         suspend_report_result(cb, error);
388
389         initcall_debug_report(dev, calltime, error, state, info);
390
391         return error;
392 }
393
394 /*------------------------- Resume routines -------------------------*/
395
396 /**
397  * device_resume_noirq - Execute an "early resume" callback for given device.
398  * @dev: Device to handle.
399  * @state: PM transition of the system being carried out.
400  *
401  * The driver of @dev will not receive interrupts while this function is being
402  * executed.
403  */
404 static int device_resume_noirq(struct device *dev, pm_message_t state)
405 {
406         pm_callback_t callback = NULL;
407         char *info = NULL;
408         int error = 0;
409
410         TRACE_DEVICE(dev);
411         TRACE_RESUME(0);
412
413         if (dev->power.syscore)
414                 goto Out;
415
416         if (dev->pm_domain) {
417                 info = "noirq power domain ";
418                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
419         } else if (dev->type && dev->type->pm) {
420                 info = "noirq type ";
421                 callback = pm_noirq_op(dev->type->pm, state);
422         } else if (dev->class && dev->class->pm) {
423                 info = "noirq class ";
424                 callback = pm_noirq_op(dev->class->pm, state);
425         } else if (dev->bus && dev->bus->pm) {
426                 info = "noirq bus ";
427                 callback = pm_noirq_op(dev->bus->pm, state);
428         }
429
430         if (!callback && dev->driver && dev->driver->pm) {
431                 info = "noirq driver ";
432                 callback = pm_noirq_op(dev->driver->pm, state);
433         }
434
435         error = dpm_run_callback(callback, dev, state, info);
436
437  Out:
438         TRACE_RESUME(error);
439         return error;
440 }
441
442 /**
443  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
444  * @state: PM transition of the system being carried out.
445  *
446  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
447  * enable device drivers to receive interrupts.
448  */
449 static void dpm_resume_noirq(pm_message_t state)
450 {
451         ktime_t starttime = ktime_get();
452
453         mutex_lock(&dpm_list_mtx);
454         while (!list_empty(&dpm_noirq_list)) {
455                 struct device *dev = to_device(dpm_noirq_list.next);
456                 int error;
457
458                 get_device(dev);
459                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
460                 mutex_unlock(&dpm_list_mtx);
461
462                 error = device_resume_noirq(dev, state);
463                 if (error) {
464                         suspend_stats.failed_resume_noirq++;
465                         dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
466                         dpm_save_failed_dev(dev_name(dev));
467                         pm_dev_err(dev, state, " noirq", error);
468                 }
469
470                 mutex_lock(&dpm_list_mtx);
471                 put_device(dev);
472         }
473         mutex_unlock(&dpm_list_mtx);
474         dpm_show_time(starttime, state, "noirq");
475         resume_device_irqs();
476         cpuidle_resume();
477         cpufreq_resume();
478 }
479
480 /**
481  * device_resume_early - Execute an "early resume" callback for given device.
482  * @dev: Device to handle.
483  * @state: PM transition of the system being carried out.
484  *
485  * Runtime PM is disabled for @dev while this function is being executed.
486  */
487 static int device_resume_early(struct device *dev, pm_message_t state)
488 {
489         pm_callback_t callback = NULL;
490         char *info = NULL;
491         int error = 0;
492
493         TRACE_DEVICE(dev);
494         TRACE_RESUME(0);
495
496         if (dev->power.syscore)
497                 goto Out;
498
499         if (dev->pm_domain) {
500                 info = "early power domain ";
501                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
502         } else if (dev->type && dev->type->pm) {
503                 info = "early type ";
504                 callback = pm_late_early_op(dev->type->pm, state);
505         } else if (dev->class && dev->class->pm) {
506                 info = "early class ";
507                 callback = pm_late_early_op(dev->class->pm, state);
508         } else if (dev->bus && dev->bus->pm) {
509                 info = "early bus ";
510                 callback = pm_late_early_op(dev->bus->pm, state);
511         }
512
513         if (!callback && dev->driver && dev->driver->pm) {
514                 info = "early driver ";
515                 callback = pm_late_early_op(dev->driver->pm, state);
516         }
517
518         error = dpm_run_callback(callback, dev, state, info);
519
520  Out:
521         TRACE_RESUME(error);
522
523         pm_runtime_enable(dev);
524         return error;
525 }
526
527 /**
528  * dpm_resume_early - Execute "early resume" callbacks for all devices.
529  * @state: PM transition of the system being carried out.
530  */
531 static void dpm_resume_early(pm_message_t state)
532 {
533         ktime_t starttime = ktime_get();
534
535         mutex_lock(&dpm_list_mtx);
536         while (!list_empty(&dpm_late_early_list)) {
537                 struct device *dev = to_device(dpm_late_early_list.next);
538                 int error;
539
540                 get_device(dev);
541                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
542                 mutex_unlock(&dpm_list_mtx);
543
544                 error = device_resume_early(dev, state);
545                 if (error) {
546                         suspend_stats.failed_resume_early++;
547                         dpm_save_failed_step(SUSPEND_RESUME_EARLY);
548                         dpm_save_failed_dev(dev_name(dev));
549                         pm_dev_err(dev, state, " early", error);
550                 }
551
552                 mutex_lock(&dpm_list_mtx);
553                 put_device(dev);
554         }
555         mutex_unlock(&dpm_list_mtx);
556         dpm_show_time(starttime, state, "early");
557 }
558
559 /**
560  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
561  * @state: PM transition of the system being carried out.
562  */
563 void dpm_resume_start(pm_message_t state)
564 {
565         dpm_resume_noirq(state);
566         dpm_resume_early(state);
567 }
568 EXPORT_SYMBOL_GPL(dpm_resume_start);
569
570 /**
571  * device_resume - Execute "resume" callbacks for given device.
572  * @dev: Device to handle.
573  * @state: PM transition of the system being carried out.
574  * @async: If true, the device is being resumed asynchronously.
575  */
576 static int device_resume(struct device *dev, pm_message_t state, bool async)
577 {
578         pm_callback_t callback = NULL;
579         char *info = NULL;
580         int error = 0;
581
582         TRACE_DEVICE(dev);
583         TRACE_RESUME(0);
584
585         if (dev->power.syscore)
586                 goto Complete;
587
588         dpm_wait(dev->parent, async);
589         device_lock(dev);
590
591         /*
592          * This is a fib.  But we'll allow new children to be added below
593          * a resumed device, even if the device hasn't been completed yet.
594          */
595         dev->power.is_prepared = false;
596
597         if (!dev->power.is_suspended)
598                 goto Unlock;
599
600         if (dev->pm_domain) {
601                 info = "power domain ";
602                 callback = pm_op(&dev->pm_domain->ops, state);
603                 goto Driver;
604         }
605
606         if (dev->type && dev->type->pm) {
607                 info = "type ";
608                 callback = pm_op(dev->type->pm, state);
609                 goto Driver;
610         }
611
612         if (dev->class) {
613                 if (dev->class->pm) {
614                         info = "class ";
615                         callback = pm_op(dev->class->pm, state);
616                         goto Driver;
617                 } else if (dev->class->resume) {
618                         info = "legacy class ";
619                         callback = dev->class->resume;
620                         goto End;
621                 }
622         }
623
624         if (dev->bus) {
625                 if (dev->bus->pm) {
626                         info = "bus ";
627                         callback = pm_op(dev->bus->pm, state);
628                 } else if (dev->bus->resume) {
629                         info = "legacy bus ";
630                         callback = dev->bus->resume;
631                         goto End;
632                 }
633         }
634
635  Driver:
636         if (!callback && dev->driver && dev->driver->pm) {
637                 info = "driver ";
638                 callback = pm_op(dev->driver->pm, state);
639         }
640
641  End:
642         error = dpm_run_callback(callback, dev, state, info);
643         dev->power.is_suspended = false;
644
645  Unlock:
646         device_unlock(dev);
647
648  Complete:
649         complete_all(&dev->power.completion);
650
651         TRACE_RESUME(error);
652
653         return error;
654 }
655
656 static void async_resume(void *data, async_cookie_t cookie)
657 {
658         struct device *dev = (struct device *)data;
659         int error;
660
661         error = device_resume(dev, pm_transition, true);
662         if (error)
663                 pm_dev_err(dev, pm_transition, " async", error);
664         put_device(dev);
665 }
666
667 static bool is_async(struct device *dev)
668 {
669         return dev->power.async_suspend && pm_async_enabled
670                 && !pm_trace_is_enabled();
671 }
672
673 /**
674  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
675  * @state: PM transition of the system being carried out.
676  *
677  * Execute the appropriate "resume" callback for all devices whose status
678  * indicates that they are suspended.
679  */
680 void dpm_resume(pm_message_t state)
681 {
682         struct device *dev;
683         ktime_t starttime = ktime_get();
684
685         might_sleep();
686
687         mutex_lock(&dpm_list_mtx);
688         pm_transition = state;
689         async_error = 0;
690
691         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
692                 INIT_COMPLETION(dev->power.completion);
693                 if (is_async(dev)) {
694                         get_device(dev);
695                         async_schedule(async_resume, dev);
696                 }
697         }
698
699         while (!list_empty(&dpm_suspended_list)) {
700                 dev = to_device(dpm_suspended_list.next);
701                 get_device(dev);
702                 if (!is_async(dev)) {
703                         int error;
704
705                         mutex_unlock(&dpm_list_mtx);
706
707                         error = device_resume(dev, state, false);
708                         if (error) {
709                                 suspend_stats.failed_resume++;
710                                 dpm_save_failed_step(SUSPEND_RESUME);
711                                 dpm_save_failed_dev(dev_name(dev));
712                                 pm_dev_err(dev, state, "", error);
713                         }
714
715                         mutex_lock(&dpm_list_mtx);
716                 }
717                 if (!list_empty(&dev->power.entry))
718                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
719                 put_device(dev);
720         }
721         mutex_unlock(&dpm_list_mtx);
722         async_synchronize_full();
723         dpm_show_time(starttime, state, NULL);
724 }
725
726 /**
727  * device_complete - Complete a PM transition for given device.
728  * @dev: Device to handle.
729  * @state: PM transition of the system being carried out.
730  */
731 static void device_complete(struct device *dev, pm_message_t state)
732 {
733         void (*callback)(struct device *) = NULL;
734         char *info = NULL;
735
736         if (dev->power.syscore)
737                 return;
738
739         device_lock(dev);
740
741         if (dev->pm_domain) {
742                 info = "completing power domain ";
743                 callback = dev->pm_domain->ops.complete;
744         } else if (dev->type && dev->type->pm) {
745                 info = "completing type ";
746                 callback = dev->type->pm->complete;
747         } else if (dev->class && dev->class->pm) {
748                 info = "completing class ";
749                 callback = dev->class->pm->complete;
750         } else if (dev->bus && dev->bus->pm) {
751                 info = "completing bus ";
752                 callback = dev->bus->pm->complete;
753         }
754
755         if (!callback && dev->driver && dev->driver->pm) {
756                 info = "completing driver ";
757                 callback = dev->driver->pm->complete;
758         }
759
760         if (callback) {
761                 pm_dev_dbg(dev, state, info);
762                 callback(dev);
763         }
764
765         device_unlock(dev);
766
767         pm_runtime_put(dev);
768 }
769
770 /**
771  * dpm_complete - Complete a PM transition for all non-sysdev devices.
772  * @state: PM transition of the system being carried out.
773  *
774  * Execute the ->complete() callbacks for all devices whose PM status is not
775  * DPM_ON (this allows new devices to be registered).
776  */
777 void dpm_complete(pm_message_t state)
778 {
779         struct list_head list;
780
781         might_sleep();
782
783         INIT_LIST_HEAD(&list);
784         mutex_lock(&dpm_list_mtx);
785         while (!list_empty(&dpm_prepared_list)) {
786                 struct device *dev = to_device(dpm_prepared_list.prev);
787
788                 get_device(dev);
789                 dev->power.is_prepared = false;
790                 list_move(&dev->power.entry, &list);
791                 mutex_unlock(&dpm_list_mtx);
792
793                 device_complete(dev, state);
794
795                 mutex_lock(&dpm_list_mtx);
796                 put_device(dev);
797         }
798         list_splice(&list, &dpm_list);
799         mutex_unlock(&dpm_list_mtx);
800 }
801
802 /**
803  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
804  * @state: PM transition of the system being carried out.
805  *
806  * Execute "resume" callbacks for all devices and complete the PM transition of
807  * the system.
808  */
809 void dpm_resume_end(pm_message_t state)
810 {
811         dpm_resume(state);
812         dpm_complete(state);
813 }
814 EXPORT_SYMBOL_GPL(dpm_resume_end);
815
816
817 /*------------------------- Suspend routines -------------------------*/
818
819 /**
820  * resume_event - Return a "resume" message for given "suspend" sleep state.
821  * @sleep_state: PM message representing a sleep state.
822  *
823  * Return a PM message representing the resume event corresponding to given
824  * sleep state.
825  */
826 static pm_message_t resume_event(pm_message_t sleep_state)
827 {
828         switch (sleep_state.event) {
829         case PM_EVENT_SUSPEND:
830                 return PMSG_RESUME;
831         case PM_EVENT_FREEZE:
832         case PM_EVENT_QUIESCE:
833                 return PMSG_RECOVER;
834         case PM_EVENT_HIBERNATE:
835                 return PMSG_RESTORE;
836         }
837         return PMSG_ON;
838 }
839
840 /**
841  * device_suspend_noirq - Execute a "late suspend" callback for given device.
842  * @dev: Device to handle.
843  * @state: PM transition of the system being carried out.
844  *
845  * The driver of @dev will not receive interrupts while this function is being
846  * executed.
847  */
848 static int device_suspend_noirq(struct device *dev, pm_message_t state)
849 {
850         pm_callback_t callback = NULL;
851         char *info = NULL;
852
853         if (dev->power.syscore)
854                 return 0;
855
856         if (dev->pm_domain) {
857                 info = "noirq power domain ";
858                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
859         } else if (dev->type && dev->type->pm) {
860                 info = "noirq type ";
861                 callback = pm_noirq_op(dev->type->pm, state);
862         } else if (dev->class && dev->class->pm) {
863                 info = "noirq class ";
864                 callback = pm_noirq_op(dev->class->pm, state);
865         } else if (dev->bus && dev->bus->pm) {
866                 info = "noirq bus ";
867                 callback = pm_noirq_op(dev->bus->pm, state);
868         }
869
870         if (!callback && dev->driver && dev->driver->pm) {
871                 info = "noirq driver ";
872                 callback = pm_noirq_op(dev->driver->pm, state);
873         }
874
875         return dpm_run_callback(callback, dev, state, info);
876 }
877
878 /**
879  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
880  * @state: PM transition of the system being carried out.
881  *
882  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
883  * handlers for all non-sysdev devices.
884  */
885 static int dpm_suspend_noirq(pm_message_t state)
886 {
887         ktime_t starttime = ktime_get();
888         int error = 0;
889
890         cpufreq_suspend();
891         cpuidle_pause();
892         suspend_device_irqs();
893         mutex_lock(&dpm_list_mtx);
894         while (!list_empty(&dpm_late_early_list)) {
895                 struct device *dev = to_device(dpm_late_early_list.prev);
896
897                 get_device(dev);
898                 mutex_unlock(&dpm_list_mtx);
899
900                 error = device_suspend_noirq(dev, state);
901
902                 mutex_lock(&dpm_list_mtx);
903                 if (error) {
904                         pm_dev_err(dev, state, " noirq", error);
905                         suspend_stats.failed_suspend_noirq++;
906                         dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
907                         dpm_save_failed_dev(dev_name(dev));
908                         put_device(dev);
909                         break;
910                 }
911                 if (!list_empty(&dev->power.entry))
912                         list_move(&dev->power.entry, &dpm_noirq_list);
913                 put_device(dev);
914
915                 if (pm_wakeup_pending()) {
916                         error = -EBUSY;
917                         break;
918                 }
919         }
920         mutex_unlock(&dpm_list_mtx);
921         if (error)
922                 dpm_resume_noirq(resume_event(state));
923         else
924                 dpm_show_time(starttime, state, "noirq");
925         return error;
926 }
927
928 /**
929  * device_suspend_late - Execute a "late suspend" callback for given device.
930  * @dev: Device to handle.
931  * @state: PM transition of the system being carried out.
932  *
933  * Runtime PM is disabled for @dev while this function is being executed.
934  */
935 static int device_suspend_late(struct device *dev, pm_message_t state)
936 {
937         pm_callback_t callback = NULL;
938         char *info = NULL;
939
940         __pm_runtime_disable(dev, false);
941
942         if (dev->power.syscore)
943                 return 0;
944
945         if (dev->pm_domain) {
946                 info = "late power domain ";
947                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
948         } else if (dev->type && dev->type->pm) {
949                 info = "late type ";
950                 callback = pm_late_early_op(dev->type->pm, state);
951         } else if (dev->class && dev->class->pm) {
952                 info = "late class ";
953                 callback = pm_late_early_op(dev->class->pm, state);
954         } else if (dev->bus && dev->bus->pm) {
955                 info = "late bus ";
956                 callback = pm_late_early_op(dev->bus->pm, state);
957         }
958
959         if (!callback && dev->driver && dev->driver->pm) {
960                 info = "late driver ";
961                 callback = pm_late_early_op(dev->driver->pm, state);
962         }
963
964         return dpm_run_callback(callback, dev, state, info);
965 }
966
967 /**
968  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
969  * @state: PM transition of the system being carried out.
970  */
971 static int dpm_suspend_late(pm_message_t state)
972 {
973         ktime_t starttime = ktime_get();
974         int error = 0;
975
976         mutex_lock(&dpm_list_mtx);
977         while (!list_empty(&dpm_suspended_list)) {
978                 struct device *dev = to_device(dpm_suspended_list.prev);
979
980                 get_device(dev);
981                 mutex_unlock(&dpm_list_mtx);
982
983                 error = device_suspend_late(dev, state);
984
985                 mutex_lock(&dpm_list_mtx);
986                 if (error) {
987                         pm_dev_err(dev, state, " late", error);
988                         suspend_stats.failed_suspend_late++;
989                         dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
990                         dpm_save_failed_dev(dev_name(dev));
991                         put_device(dev);
992                         break;
993                 }
994                 if (!list_empty(&dev->power.entry))
995                         list_move(&dev->power.entry, &dpm_late_early_list);
996                 put_device(dev);
997
998                 if (pm_wakeup_pending()) {
999                         error = -EBUSY;
1000                         break;
1001                 }
1002         }
1003         mutex_unlock(&dpm_list_mtx);
1004         if (error)
1005                 dpm_resume_early(resume_event(state));
1006         else
1007                 dpm_show_time(starttime, state, "late");
1008
1009         return error;
1010 }
1011
1012 /**
1013  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1014  * @state: PM transition of the system being carried out.
1015  */
1016 int dpm_suspend_end(pm_message_t state)
1017 {
1018         int error = dpm_suspend_late(state);
1019         if (error)
1020                 return error;
1021
1022         error = dpm_suspend_noirq(state);
1023         if (error) {
1024                 dpm_resume_early(resume_event(state));
1025                 return error;
1026         }
1027
1028         return 0;
1029 }
1030 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1031
1032 /**
1033  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1034  * @dev: Device to suspend.
1035  * @state: PM transition of the system being carried out.
1036  * @cb: Suspend callback to execute.
1037  */
1038 static int legacy_suspend(struct device *dev, pm_message_t state,
1039                           int (*cb)(struct device *dev, pm_message_t state),
1040                           char *info)
1041 {
1042         int error;
1043         ktime_t calltime;
1044
1045         calltime = initcall_debug_start(dev);
1046
1047         error = cb(dev, state);
1048         suspend_report_result(cb, error);
1049
1050         initcall_debug_report(dev, calltime, error, state, info);
1051
1052         return error;
1053 }
1054
1055 /**
1056  * device_suspend - Execute "suspend" callbacks for given device.
1057  * @dev: Device to handle.
1058  * @state: PM transition of the system being carried out.
1059  * @async: If true, the device is being suspended asynchronously.
1060  */
1061 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1062 {
1063         pm_callback_t callback = NULL;
1064         char *info = NULL;
1065         int error = 0;
1066
1067         dpm_wait_for_children(dev, async);
1068
1069         if (async_error)
1070                 goto Complete;
1071
1072         /*
1073          * If a device configured to wake up the system from sleep states
1074          * has been suspended at run time and there's a resume request pending
1075          * for it, this is equivalent to the device signaling wakeup, so the
1076          * system suspend operation should be aborted.
1077          */
1078         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1079                 pm_wakeup_event(dev, 0);
1080
1081         if (pm_wakeup_pending()) {
1082                 async_error = -EBUSY;
1083                 goto Complete;
1084         }
1085
1086         if (dev->power.syscore)
1087                 goto Complete;
1088
1089         device_lock(dev);
1090
1091         if (dev->pm_domain) {
1092                 info = "power domain ";
1093                 callback = pm_op(&dev->pm_domain->ops, state);
1094                 goto Run;
1095         }
1096
1097         if (dev->type && dev->type->pm) {
1098                 info = "type ";
1099                 callback = pm_op(dev->type->pm, state);
1100                 goto Run;
1101         }
1102
1103         if (dev->class) {
1104                 if (dev->class->pm) {
1105                         info = "class ";
1106                         callback = pm_op(dev->class->pm, state);
1107                         goto Run;
1108                 } else if (dev->class->suspend) {
1109                         pm_dev_dbg(dev, state, "legacy class ");
1110                         error = legacy_suspend(dev, state, dev->class->suspend,
1111                                                 "legacy class ");
1112                         goto End;
1113                 }
1114         }
1115
1116         if (dev->bus) {
1117                 if (dev->bus->pm) {
1118                         info = "bus ";
1119                         callback = pm_op(dev->bus->pm, state);
1120                 } else if (dev->bus->suspend) {
1121                         pm_dev_dbg(dev, state, "legacy bus ");
1122                         error = legacy_suspend(dev, state, dev->bus->suspend,
1123                                                 "legacy bus ");
1124                         goto End;
1125                 }
1126         }
1127
1128  Run:
1129         if (!callback && dev->driver && dev->driver->pm) {
1130                 info = "driver ";
1131                 callback = pm_op(dev->driver->pm, state);
1132         }
1133
1134         error = dpm_run_callback(callback, dev, state, info);
1135
1136  End:
1137         if (!error) {
1138                 dev->power.is_suspended = true;
1139                 if (dev->power.wakeup_path
1140                     && dev->parent && !dev->parent->power.ignore_children)
1141                         dev->parent->power.wakeup_path = true;
1142         }
1143
1144         device_unlock(dev);
1145
1146  Complete:
1147         complete_all(&dev->power.completion);
1148         if (error)
1149                 async_error = error;
1150
1151         return error;
1152 }
1153
1154 static void async_suspend(void *data, async_cookie_t cookie)
1155 {
1156         struct device *dev = (struct device *)data;
1157         int error;
1158
1159         error = __device_suspend(dev, pm_transition, true);
1160         if (error) {
1161                 dpm_save_failed_dev(dev_name(dev));
1162                 pm_dev_err(dev, pm_transition, " async", error);
1163         }
1164
1165         put_device(dev);
1166 }
1167
1168 static int device_suspend(struct device *dev)
1169 {
1170         INIT_COMPLETION(dev->power.completion);
1171
1172         if (pm_async_enabled && dev->power.async_suspend) {
1173                 get_device(dev);
1174                 async_schedule(async_suspend, dev);
1175                 return 0;
1176         }
1177
1178         return __device_suspend(dev, pm_transition, false);
1179 }
1180
1181 /**
1182  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1183  * @state: PM transition of the system being carried out.
1184  */
1185 int dpm_suspend(pm_message_t state)
1186 {
1187         ktime_t starttime = ktime_get();
1188         int error = 0;
1189
1190         might_sleep();
1191
1192         mutex_lock(&dpm_list_mtx);
1193         pm_transition = state;
1194         async_error = 0;
1195         while (!list_empty(&dpm_prepared_list)) {
1196                 struct device *dev = to_device(dpm_prepared_list.prev);
1197
1198                 get_device(dev);
1199                 mutex_unlock(&dpm_list_mtx);
1200
1201                 error = device_suspend(dev);
1202
1203                 mutex_lock(&dpm_list_mtx);
1204                 if (error) {
1205                         pm_dev_err(dev, state, "", error);
1206                         dpm_save_failed_dev(dev_name(dev));
1207                         put_device(dev);
1208                         break;
1209                 }
1210                 if (!list_empty(&dev->power.entry))
1211                         list_move(&dev->power.entry, &dpm_suspended_list);
1212                 put_device(dev);
1213                 if (async_error)
1214                         break;
1215         }
1216         mutex_unlock(&dpm_list_mtx);
1217         async_synchronize_full();
1218         if (!error)
1219                 error = async_error;
1220         if (error) {
1221                 suspend_stats.failed_suspend++;
1222                 dpm_save_failed_step(SUSPEND_SUSPEND);
1223         } else
1224                 dpm_show_time(starttime, state, NULL);
1225         return error;
1226 }
1227
1228 /**
1229  * device_prepare - Prepare a device for system power transition.
1230  * @dev: Device to handle.
1231  * @state: PM transition of the system being carried out.
1232  *
1233  * Execute the ->prepare() callback(s) for given device.  No new children of the
1234  * device may be registered after this function has returned.
1235  */
1236 static int device_prepare(struct device *dev, pm_message_t state)
1237 {
1238         int (*callback)(struct device *) = NULL;
1239         char *info = NULL;
1240         int error = 0;
1241
1242         if (dev->power.syscore)
1243                 return 0;
1244
1245         /*
1246          * If a device's parent goes into runtime suspend at the wrong time,
1247          * it won't be possible to resume the device.  To prevent this we
1248          * block runtime suspend here, during the prepare phase, and allow
1249          * it again during the complete phase.
1250          */
1251         pm_runtime_get_noresume(dev);
1252
1253         device_lock(dev);
1254
1255         dev->power.wakeup_path = device_may_wakeup(dev);
1256
1257         if (dev->pm_domain) {
1258                 info = "preparing power domain ";
1259                 callback = dev->pm_domain->ops.prepare;
1260         } else if (dev->type && dev->type->pm) {
1261                 info = "preparing type ";
1262                 callback = dev->type->pm->prepare;
1263         } else if (dev->class && dev->class->pm) {
1264                 info = "preparing class ";
1265                 callback = dev->class->pm->prepare;
1266         } else if (dev->bus && dev->bus->pm) {
1267                 info = "preparing bus ";
1268                 callback = dev->bus->pm->prepare;
1269         }
1270
1271         if (!callback && dev->driver && dev->driver->pm) {
1272                 info = "preparing driver ";
1273                 callback = dev->driver->pm->prepare;
1274         }
1275
1276         if (callback) {
1277                 error = callback(dev);
1278                 suspend_report_result(callback, error);
1279         }
1280
1281         device_unlock(dev);
1282
1283         return error;
1284 }
1285
1286 /**
1287  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1288  * @state: PM transition of the system being carried out.
1289  *
1290  * Execute the ->prepare() callback(s) for all devices.
1291  */
1292 int dpm_prepare(pm_message_t state)
1293 {
1294         int error = 0;
1295
1296         might_sleep();
1297
1298         mutex_lock(&dpm_list_mtx);
1299         while (!list_empty(&dpm_list)) {
1300                 struct device *dev = to_device(dpm_list.next);
1301
1302                 get_device(dev);
1303                 mutex_unlock(&dpm_list_mtx);
1304
1305                 error = device_prepare(dev, state);
1306
1307                 mutex_lock(&dpm_list_mtx);
1308                 if (error) {
1309                         if (error == -EAGAIN) {
1310                                 put_device(dev);
1311                                 error = 0;
1312                                 continue;
1313                         }
1314                         printk(KERN_INFO "PM: Device %s not prepared "
1315                                 "for power transition: code %d\n",
1316                                 dev_name(dev), error);
1317                         put_device(dev);
1318                         break;
1319                 }
1320                 dev->power.is_prepared = true;
1321                 if (!list_empty(&dev->power.entry))
1322                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1323                 put_device(dev);
1324         }
1325         mutex_unlock(&dpm_list_mtx);
1326         return error;
1327 }
1328
1329 /**
1330  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1331  * @state: PM transition of the system being carried out.
1332  *
1333  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1334  * callbacks for them.
1335  */
1336 int dpm_suspend_start(pm_message_t state)
1337 {
1338         int error;
1339
1340         error = dpm_prepare(state);
1341         if (error) {
1342                 suspend_stats.failed_prepare++;
1343                 dpm_save_failed_step(SUSPEND_PREPARE);
1344         } else
1345                 error = dpm_suspend(state);
1346         return error;
1347 }
1348 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1349
1350 void __suspend_report_result(const char *function, void *fn, int ret)
1351 {
1352         if (ret)
1353                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1354 }
1355 EXPORT_SYMBOL_GPL(__suspend_report_result);
1356
1357 /**
1358  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1359  * @dev: Device to wait for.
1360  * @subordinate: Device that needs to wait for @dev.
1361  */
1362 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1363 {
1364         dpm_wait(dev, subordinate->power.async_suspend);
1365         return async_error;
1366 }
1367 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1368
1369 /**
1370  * dpm_for_each_dev - device iterator.
1371  * @data: data for the callback.
1372  * @fn: function to be called for each device.
1373  *
1374  * Iterate over devices in dpm_list, and call @fn for each device,
1375  * passing it @data.
1376  */
1377 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1378 {
1379         struct device *dev;
1380
1381         if (!fn)
1382                 return;
1383
1384         device_pm_lock();
1385         list_for_each_entry(dev, &dpm_list, power.entry)
1386                 fn(dev, data);
1387         device_pm_unlock();
1388 }
1389 EXPORT_SYMBOL_GPL(dpm_for_each_dev);