1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36
37 #include "../base.h"
38 #include "power.h"
39
40 typedef int (*pm_callback_t)(struct device *);
41
42 /*
43 * The entries in the dpm_list list are in a depth first order, simply
44 * because children are guaranteed to be discovered after parents, and
45 * are inserted at the back of the list on discovery.
46 *
47 * Since device_pm_add() may be called with a device lock held,
48 * we must never try to acquire a device lock while holding
49 * dpm_list_mutex.
50 */
51
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
57
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static int async_error;
63
pm_verb(int event)64 static const char *pm_verb(int event)
65 {
66 switch (event) {
67 case PM_EVENT_SUSPEND:
68 return "suspend";
69 case PM_EVENT_RESUME:
70 return "resume";
71 case PM_EVENT_FREEZE:
72 return "freeze";
73 case PM_EVENT_QUIESCE:
74 return "quiesce";
75 case PM_EVENT_HIBERNATE:
76 return "hibernate";
77 case PM_EVENT_THAW:
78 return "thaw";
79 case PM_EVENT_RESTORE:
80 return "restore";
81 case PM_EVENT_RECOVER:
82 return "recover";
83 default:
84 return "(unknown PM event)";
85 }
86 }
87
88 /**
89 * device_pm_sleep_init - Initialize system suspend-related device fields.
90 * @dev: Device object being initialized.
91 */
device_pm_sleep_init(struct device * dev)92 void device_pm_sleep_init(struct device *dev)
93 {
94 dev->power.is_prepared = false;
95 dev->power.is_suspended = false;
96 dev->power.is_noirq_suspended = false;
97 dev->power.is_late_suspended = false;
98 init_completion(&dev->power.completion);
99 complete_all(&dev->power.completion);
100 dev->power.wakeup = NULL;
101 INIT_LIST_HEAD(&dev->power.entry);
102 }
103
104 /**
105 * device_pm_lock - Lock the list of active devices used by the PM core.
106 */
device_pm_lock(void)107 void device_pm_lock(void)
108 {
109 mutex_lock(&dpm_list_mtx);
110 }
111
112 /**
113 * device_pm_unlock - Unlock the list of active devices used by the PM core.
114 */
device_pm_unlock(void)115 void device_pm_unlock(void)
116 {
117 mutex_unlock(&dpm_list_mtx);
118 }
119
120 /**
121 * device_pm_add - Add a device to the PM core's list of active devices.
122 * @dev: Device to add to the list.
123 */
device_pm_add(struct device * dev)124 void device_pm_add(struct device *dev)
125 {
126 pr_debug("PM: Adding info for %s:%s\n",
127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128 device_pm_check_callbacks(dev);
129 mutex_lock(&dpm_list_mtx);
130 if (dev->parent && dev->parent->power.is_prepared)
131 dev_warn(dev, "parent %s should not be sleeping\n",
132 dev_name(dev->parent));
133 list_add_tail(&dev->power.entry, &dpm_list);
134 dev->power.in_dpm_list = true;
135 mutex_unlock(&dpm_list_mtx);
136 }
137
138 /**
139 * device_pm_remove - Remove a device from the PM core's list of active devices.
140 * @dev: Device to be removed from the list.
141 */
device_pm_remove(struct device * dev)142 void device_pm_remove(struct device *dev)
143 {
144 pr_debug("PM: Removing info for %s:%s\n",
145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 complete_all(&dev->power.completion);
147 mutex_lock(&dpm_list_mtx);
148 list_del_init(&dev->power.entry);
149 dev->power.in_dpm_list = false;
150 mutex_unlock(&dpm_list_mtx);
151 device_wakeup_disable(dev);
152 pm_runtime_remove(dev);
153 device_pm_check_callbacks(dev);
154 }
155
156 /**
157 * device_pm_move_before - Move device in the PM core's list of active devices.
158 * @deva: Device to move in dpm_list.
159 * @devb: Device @deva should come before.
160 */
device_pm_move_before(struct device * deva,struct device * devb)161 void device_pm_move_before(struct device *deva, struct device *devb)
162 {
163 pr_debug("PM: Moving %s:%s before %s:%s\n",
164 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
165 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
166 /* Delete deva from dpm_list and reinsert before devb. */
167 list_move_tail(&deva->power.entry, &devb->power.entry);
168 }
169
170 /**
171 * device_pm_move_after - Move device in the PM core's list of active devices.
172 * @deva: Device to move in dpm_list.
173 * @devb: Device @deva should come after.
174 */
device_pm_move_after(struct device * deva,struct device * devb)175 void device_pm_move_after(struct device *deva, struct device *devb)
176 {
177 pr_debug("PM: Moving %s:%s after %s:%s\n",
178 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
179 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
180 /* Delete deva from dpm_list and reinsert after devb. */
181 list_move(&deva->power.entry, &devb->power.entry);
182 }
183
184 /**
185 * device_pm_move_last - Move device to end of the PM core's list of devices.
186 * @dev: Device to move in dpm_list.
187 */
device_pm_move_last(struct device * dev)188 void device_pm_move_last(struct device *dev)
189 {
190 pr_debug("PM: Moving %s:%s to end of list\n",
191 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
192 list_move_tail(&dev->power.entry, &dpm_list);
193 }
194
initcall_debug_start(struct device * dev,void * cb)195 static ktime_t initcall_debug_start(struct device *dev, void *cb)
196 {
197 if (!pm_print_times_enabled)
198 return 0;
199
200 dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
201 task_pid_nr(current),
202 dev->parent ? dev_name(dev->parent) : "none");
203 return ktime_get();
204 }
205
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)206 static void initcall_debug_report(struct device *dev, ktime_t calltime,
207 void *cb, int error)
208 {
209 ktime_t rettime;
210 s64 nsecs;
211
212 if (!pm_print_times_enabled)
213 return;
214
215 rettime = ktime_get();
216 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
217
218 dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
219 (unsigned long long)nsecs >> 10);
220 }
221
222 /**
223 * dpm_wait - Wait for a PM operation to complete.
224 * @dev: Device to wait for.
225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
226 */
dpm_wait(struct device * dev,bool async)227 static void dpm_wait(struct device *dev, bool async)
228 {
229 if (!dev)
230 return;
231
232 if (async || (pm_async_enabled && dev->power.async_suspend))
233 wait_for_completion(&dev->power.completion);
234 }
235
dpm_wait_fn(struct device * dev,void * async_ptr)236 static int dpm_wait_fn(struct device *dev, void *async_ptr)
237 {
238 dpm_wait(dev, *((bool *)async_ptr));
239 return 0;
240 }
241
dpm_wait_for_children(struct device * dev,bool async)242 static void dpm_wait_for_children(struct device *dev, bool async)
243 {
244 device_for_each_child(dev, &async, dpm_wait_fn);
245 }
246
dpm_wait_for_suppliers(struct device * dev,bool async)247 static void dpm_wait_for_suppliers(struct device *dev, bool async)
248 {
249 struct device_link *link;
250 int idx;
251
252 idx = device_links_read_lock();
253
254 /*
255 * If the supplier goes away right after we've checked the link to it,
256 * we'll wait for its completion to change the state, but that's fine,
257 * because the only things that will block as a result are the SRCU
258 * callbacks freeing the link objects for the links in the list we're
259 * walking.
260 */
261 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
262 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
263 dpm_wait(link->supplier, async);
264
265 device_links_read_unlock(idx);
266 }
267
dpm_wait_for_superior(struct device * dev,bool async)268 static bool dpm_wait_for_superior(struct device *dev, bool async)
269 {
270 struct device *parent;
271
272 /*
273 * If the device is resumed asynchronously and the parent's callback
274 * deletes both the device and the parent itself, the parent object may
275 * be freed while this function is running, so avoid that by reference
276 * counting the parent once more unless the device has been deleted
277 * already (in which case return right away).
278 */
279 mutex_lock(&dpm_list_mtx);
280
281 if (!device_pm_initialized(dev)) {
282 mutex_unlock(&dpm_list_mtx);
283 return false;
284 }
285
286 parent = get_device(dev->parent);
287
288 mutex_unlock(&dpm_list_mtx);
289
290 dpm_wait(parent, async);
291 put_device(parent);
292
293 dpm_wait_for_suppliers(dev, async);
294
295 /*
296 * If the parent's callback has deleted the device, attempting to resume
297 * it would be invalid, so avoid doing that then.
298 */
299 return device_pm_initialized(dev);
300 }
301
dpm_wait_for_consumers(struct device * dev,bool async)302 static void dpm_wait_for_consumers(struct device *dev, bool async)
303 {
304 struct device_link *link;
305 int idx;
306
307 idx = device_links_read_lock();
308
309 /*
310 * The status of a device link can only be changed from "dormant" by a
311 * probe, but that cannot happen during system suspend/resume. In
312 * theory it can change to "dormant" at that time, but then it is
313 * reasonable to wait for the target device anyway (eg. if it goes
314 * away, it's better to wait for it to go away completely and then
315 * continue instead of trying to continue in parallel with its
316 * unregistration).
317 */
318 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
319 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
320 dpm_wait(link->consumer, async);
321
322 device_links_read_unlock(idx);
323 }
324
dpm_wait_for_subordinate(struct device * dev,bool async)325 static void dpm_wait_for_subordinate(struct device *dev, bool async)
326 {
327 dpm_wait_for_children(dev, async);
328 dpm_wait_for_consumers(dev, async);
329 }
330
331 /**
332 * pm_op - Return the PM operation appropriate for given PM event.
333 * @ops: PM operations to choose from.
334 * @state: PM transition of the system being carried out.
335 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)336 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
337 {
338 switch (state.event) {
339 #ifdef CONFIG_SUSPEND
340 case PM_EVENT_SUSPEND:
341 return ops->suspend;
342 case PM_EVENT_RESUME:
343 return ops->resume;
344 #endif /* CONFIG_SUSPEND */
345 #ifdef CONFIG_HIBERNATE_CALLBACKS
346 case PM_EVENT_FREEZE:
347 case PM_EVENT_QUIESCE:
348 return ops->freeze;
349 case PM_EVENT_HIBERNATE:
350 return ops->poweroff;
351 case PM_EVENT_THAW:
352 case PM_EVENT_RECOVER:
353 return ops->thaw;
354 break;
355 case PM_EVENT_RESTORE:
356 return ops->restore;
357 #endif /* CONFIG_HIBERNATE_CALLBACKS */
358 }
359
360 return NULL;
361 }
362
363 /**
364 * pm_late_early_op - Return the PM operation appropriate for given PM event.
365 * @ops: PM operations to choose from.
366 * @state: PM transition of the system being carried out.
367 *
368 * Runtime PM is disabled for @dev while this function is being executed.
369 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)370 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
371 pm_message_t state)
372 {
373 switch (state.event) {
374 #ifdef CONFIG_SUSPEND
375 case PM_EVENT_SUSPEND:
376 return ops->suspend_late;
377 case PM_EVENT_RESUME:
378 return ops->resume_early;
379 #endif /* CONFIG_SUSPEND */
380 #ifdef CONFIG_HIBERNATE_CALLBACKS
381 case PM_EVENT_FREEZE:
382 case PM_EVENT_QUIESCE:
383 return ops->freeze_late;
384 case PM_EVENT_HIBERNATE:
385 return ops->poweroff_late;
386 case PM_EVENT_THAW:
387 case PM_EVENT_RECOVER:
388 return ops->thaw_early;
389 case PM_EVENT_RESTORE:
390 return ops->restore_early;
391 #endif /* CONFIG_HIBERNATE_CALLBACKS */
392 }
393
394 return NULL;
395 }
396
397 /**
398 * pm_noirq_op - Return the PM operation appropriate for given PM event.
399 * @ops: PM operations to choose from.
400 * @state: PM transition of the system being carried out.
401 *
402 * The driver of @dev will not receive interrupts while this function is being
403 * executed.
404 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)405 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
406 {
407 switch (state.event) {
408 #ifdef CONFIG_SUSPEND
409 case PM_EVENT_SUSPEND:
410 return ops->suspend_noirq;
411 case PM_EVENT_RESUME:
412 return ops->resume_noirq;
413 #endif /* CONFIG_SUSPEND */
414 #ifdef CONFIG_HIBERNATE_CALLBACKS
415 case PM_EVENT_FREEZE:
416 case PM_EVENT_QUIESCE:
417 return ops->freeze_noirq;
418 case PM_EVENT_HIBERNATE:
419 return ops->poweroff_noirq;
420 case PM_EVENT_THAW:
421 case PM_EVENT_RECOVER:
422 return ops->thaw_noirq;
423 case PM_EVENT_RESTORE:
424 return ops->restore_noirq;
425 #endif /* CONFIG_HIBERNATE_CALLBACKS */
426 }
427
428 return NULL;
429 }
430
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)431 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
432 {
433 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
434 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
435 ", may wakeup" : "");
436 }
437
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)438 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
439 int error)
440 {
441 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
442 dev_name(dev), pm_verb(state.event), info, error);
443 }
444
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)445 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
446 const char *info)
447 {
448 ktime_t calltime;
449 u64 usecs64;
450 int usecs;
451
452 calltime = ktime_get();
453 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
454 do_div(usecs64, NSEC_PER_USEC);
455 usecs = usecs64;
456 if (usecs == 0)
457 usecs = 1;
458
459 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
460 info ?: "", info ? " " : "", pm_verb(state.event),
461 error ? "aborted" : "complete",
462 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
463 }
464
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)465 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
466 pm_message_t state, const char *info)
467 {
468 ktime_t calltime;
469 int error;
470
471 if (!cb)
472 return 0;
473
474 calltime = initcall_debug_start(dev, cb);
475
476 pm_dev_dbg(dev, state, info);
477 trace_device_pm_callback_start(dev, info, state.event);
478 error = cb(dev);
479 trace_device_pm_callback_end(dev, error);
480 suspend_report_result(cb, error);
481
482 initcall_debug_report(dev, calltime, cb, error);
483
484 return error;
485 }
486
487 #ifdef CONFIG_DPM_WATCHDOG
488 struct dpm_watchdog {
489 struct device *dev;
490 struct task_struct *tsk;
491 struct timer_list timer;
492 };
493
494 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
495 struct dpm_watchdog wd
496
497 /**
498 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
499 * @data: Watchdog object address.
500 *
501 * Called when a driver has timed out suspending or resuming.
502 * There's not much we can do here to recover so panic() to
503 * capture a crash-dump in pstore.
504 */
dpm_watchdog_handler(struct timer_list * t)505 static void dpm_watchdog_handler(struct timer_list *t)
506 {
507 struct dpm_watchdog *wd = from_timer(wd, t, timer);
508
509 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
510 show_stack(wd->tsk, NULL);
511 panic("%s %s: unrecoverable failure\n",
512 dev_driver_string(wd->dev), dev_name(wd->dev));
513 }
514
515 /**
516 * dpm_watchdog_set - Enable pm watchdog for given device.
517 * @wd: Watchdog. Must be allocated on the stack.
518 * @dev: Device to handle.
519 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)520 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
521 {
522 struct timer_list *timer = &wd->timer;
523
524 wd->dev = dev;
525 wd->tsk = current;
526
527 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
528 /* use same timeout value for both suspend and resume */
529 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
530 add_timer(timer);
531 }
532
533 /**
534 * dpm_watchdog_clear - Disable suspend/resume watchdog.
535 * @wd: Watchdog to disable.
536 */
dpm_watchdog_clear(struct dpm_watchdog * wd)537 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
538 {
539 struct timer_list *timer = &wd->timer;
540
541 del_timer_sync(timer);
542 destroy_timer_on_stack(timer);
543 }
544 #else
545 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
546 #define dpm_watchdog_set(x, y)
547 #define dpm_watchdog_clear(x)
548 #endif
549
550 /*------------------------- Resume routines -------------------------*/
551
552 /**
553 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
554 * @dev: Target device.
555 *
556 * Make the core skip the "early resume" and "resume" phases for @dev.
557 *
558 * This function can be called by middle-layer code during the "noirq" phase of
559 * system resume if necessary, but not by device drivers.
560 */
dev_pm_skip_next_resume_phases(struct device * dev)561 void dev_pm_skip_next_resume_phases(struct device *dev)
562 {
563 dev->power.is_late_suspended = false;
564 dev->power.is_suspended = false;
565 }
566
567 /**
568 * suspend_event - Return a "suspend" message for given "resume" one.
569 * @resume_msg: PM message representing a system-wide resume transition.
570 */
suspend_event(pm_message_t resume_msg)571 static pm_message_t suspend_event(pm_message_t resume_msg)
572 {
573 switch (resume_msg.event) {
574 case PM_EVENT_RESUME:
575 return PMSG_SUSPEND;
576 case PM_EVENT_THAW:
577 case PM_EVENT_RESTORE:
578 return PMSG_FREEZE;
579 case PM_EVENT_RECOVER:
580 return PMSG_HIBERNATE;
581 }
582 return PMSG_ON;
583 }
584
585 /**
586 * dev_pm_may_skip_resume - System-wide device resume optimization check.
587 * @dev: Target device.
588 *
589 * Checks whether or not the device may be left in suspend after a system-wide
590 * transition to the working state.
591 */
dev_pm_may_skip_resume(struct device * dev)592 bool dev_pm_may_skip_resume(struct device *dev)
593 {
594 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
595 }
596
dpm_subsys_resume_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)597 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
598 pm_message_t state,
599 const char **info_p)
600 {
601 pm_callback_t callback;
602 const char *info;
603
604 if (dev->pm_domain) {
605 info = "noirq power domain ";
606 callback = pm_noirq_op(&dev->pm_domain->ops, state);
607 } else if (dev->type && dev->type->pm) {
608 info = "noirq type ";
609 callback = pm_noirq_op(dev->type->pm, state);
610 } else if (dev->class && dev->class->pm) {
611 info = "noirq class ";
612 callback = pm_noirq_op(dev->class->pm, state);
613 } else if (dev->bus && dev->bus->pm) {
614 info = "noirq bus ";
615 callback = pm_noirq_op(dev->bus->pm, state);
616 } else {
617 return NULL;
618 }
619
620 if (info_p)
621 *info_p = info;
622
623 return callback;
624 }
625
626 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
627 pm_message_t state,
628 const char **info_p);
629
630 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
631 pm_message_t state,
632 const char **info_p);
633
634 /**
635 * device_resume_noirq - Execute a "noirq resume" callback for given device.
636 * @dev: Device to handle.
637 * @state: PM transition of the system being carried out.
638 * @async: If true, the device is being resumed asynchronously.
639 *
640 * The driver of @dev will not receive interrupts while this function is being
641 * executed.
642 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)643 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
644 {
645 pm_callback_t callback;
646 const char *info;
647 bool skip_resume;
648 int error = 0;
649
650 TRACE_DEVICE(dev);
651 TRACE_RESUME(0);
652
653 if (dev->power.syscore || dev->power.direct_complete)
654 goto Out;
655
656 if (!dev->power.is_noirq_suspended)
657 goto Out;
658
659 if (!dpm_wait_for_superior(dev, async))
660 goto Out;
661
662 skip_resume = dev_pm_may_skip_resume(dev);
663
664 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
665 if (callback)
666 goto Run;
667
668 if (skip_resume)
669 goto Skip;
670
671 if (dev_pm_smart_suspend_and_suspended(dev)) {
672 pm_message_t suspend_msg = suspend_event(state);
673
674 /*
675 * If "freeze" callbacks have been skipped during a transition
676 * related to hibernation, the subsequent "thaw" callbacks must
677 * be skipped too or bad things may happen. Otherwise, resume
678 * callbacks are going to be run for the device, so its runtime
679 * PM status must be changed to reflect the new state after the
680 * transition under way.
681 */
682 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
683 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
684 if (state.event == PM_EVENT_THAW) {
685 skip_resume = true;
686 goto Skip;
687 } else {
688 pm_runtime_set_active(dev);
689 }
690 }
691 }
692
693 if (dev->driver && dev->driver->pm) {
694 info = "noirq driver ";
695 callback = pm_noirq_op(dev->driver->pm, state);
696 }
697
698 Run:
699 error = dpm_run_callback(callback, dev, state, info);
700
701 Skip:
702 dev->power.is_noirq_suspended = false;
703
704 if (skip_resume) {
705 /*
706 * The device is going to be left in suspend, but it might not
707 * have been in runtime suspend before the system suspended, so
708 * its runtime PM status needs to be updated to avoid confusing
709 * the runtime PM framework when runtime PM is enabled for the
710 * device again.
711 */
712 pm_runtime_set_suspended(dev);
713 dev_pm_skip_next_resume_phases(dev);
714 }
715
716 Out:
717 complete_all(&dev->power.completion);
718 TRACE_RESUME(error);
719 return error;
720 }
721
is_async(struct device * dev)722 static bool is_async(struct device *dev)
723 {
724 return dev->power.async_suspend && pm_async_enabled
725 && !pm_trace_is_enabled();
726 }
727
async_resume_noirq(void * data,async_cookie_t cookie)728 static void async_resume_noirq(void *data, async_cookie_t cookie)
729 {
730 struct device *dev = (struct device *)data;
731 int error;
732
733 error = device_resume_noirq(dev, pm_transition, true);
734 if (error)
735 pm_dev_err(dev, pm_transition, " async", error);
736
737 put_device(dev);
738 }
739
dpm_noirq_resume_devices(pm_message_t state)740 void dpm_noirq_resume_devices(pm_message_t state)
741 {
742 struct device *dev;
743 ktime_t starttime = ktime_get();
744
745 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
746 mutex_lock(&dpm_list_mtx);
747 pm_transition = state;
748
749 /*
750 * Advanced the async threads upfront,
751 * in case the starting of async threads is
752 * delayed by non-async resuming devices.
753 */
754 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
755 reinit_completion(&dev->power.completion);
756 if (is_async(dev)) {
757 get_device(dev);
758 async_schedule(async_resume_noirq, dev);
759 }
760 }
761
762 while (!list_empty(&dpm_noirq_list)) {
763 dev = to_device(dpm_noirq_list.next);
764 get_device(dev);
765 list_move_tail(&dev->power.entry, &dpm_late_early_list);
766 mutex_unlock(&dpm_list_mtx);
767
768 if (!is_async(dev)) {
769 int error;
770
771 error = device_resume_noirq(dev, state, false);
772 if (error) {
773 suspend_stats.failed_resume_noirq++;
774 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
775 dpm_save_failed_dev(dev_name(dev));
776 pm_dev_err(dev, state, " noirq", error);
777 }
778 }
779
780 mutex_lock(&dpm_list_mtx);
781 put_device(dev);
782 }
783 mutex_unlock(&dpm_list_mtx);
784 async_synchronize_full();
785 dpm_show_time(starttime, state, 0, "noirq");
786 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
787 }
788
dpm_noirq_end(void)789 void dpm_noirq_end(void)
790 {
791 resume_device_irqs();
792 device_wakeup_disarm_wake_irqs();
793 cpuidle_resume();
794 }
795
796 /**
797 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
798 * @state: PM transition of the system being carried out.
799 *
800 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
801 * allow device drivers' interrupt handlers to be called.
802 */
dpm_resume_noirq(pm_message_t state)803 void dpm_resume_noirq(pm_message_t state)
804 {
805 dpm_noirq_resume_devices(state);
806 dpm_noirq_end();
807 }
808
dpm_subsys_resume_early_cb(struct device * dev,pm_message_t state,const char ** info_p)809 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
810 pm_message_t state,
811 const char **info_p)
812 {
813 pm_callback_t callback;
814 const char *info;
815
816 if (dev->pm_domain) {
817 info = "early power domain ";
818 callback = pm_late_early_op(&dev->pm_domain->ops, state);
819 } else if (dev->type && dev->type->pm) {
820 info = "early type ";
821 callback = pm_late_early_op(dev->type->pm, state);
822 } else if (dev->class && dev->class->pm) {
823 info = "early class ";
824 callback = pm_late_early_op(dev->class->pm, state);
825 } else if (dev->bus && dev->bus->pm) {
826 info = "early bus ";
827 callback = pm_late_early_op(dev->bus->pm, state);
828 } else {
829 return NULL;
830 }
831
832 if (info_p)
833 *info_p = info;
834
835 return callback;
836 }
837
838 /**
839 * device_resume_early - Execute an "early resume" callback for given device.
840 * @dev: Device to handle.
841 * @state: PM transition of the system being carried out.
842 * @async: If true, the device is being resumed asynchronously.
843 *
844 * Runtime PM is disabled for @dev while this function is being executed.
845 */
device_resume_early(struct device * dev,pm_message_t state,bool async)846 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
847 {
848 pm_callback_t callback;
849 const char *info;
850 int error = 0;
851
852 TRACE_DEVICE(dev);
853 TRACE_RESUME(0);
854
855 if (dev->power.syscore || dev->power.direct_complete)
856 goto Out;
857
858 if (!dev->power.is_late_suspended)
859 goto Out;
860
861 if (!dpm_wait_for_superior(dev, async))
862 goto Out;
863
864 callback = dpm_subsys_resume_early_cb(dev, state, &info);
865
866 if (!callback && dev->driver && dev->driver->pm) {
867 info = "early driver ";
868 callback = pm_late_early_op(dev->driver->pm, state);
869 }
870
871 error = dpm_run_callback(callback, dev, state, info);
872 dev->power.is_late_suspended = false;
873
874 Out:
875 TRACE_RESUME(error);
876
877 pm_runtime_enable(dev);
878 complete_all(&dev->power.completion);
879 return error;
880 }
881
async_resume_early(void * data,async_cookie_t cookie)882 static void async_resume_early(void *data, async_cookie_t cookie)
883 {
884 struct device *dev = (struct device *)data;
885 int error;
886
887 error = device_resume_early(dev, pm_transition, true);
888 if (error)
889 pm_dev_err(dev, pm_transition, " async", error);
890
891 put_device(dev);
892 }
893
894 /**
895 * dpm_resume_early - Execute "early resume" callbacks for all devices.
896 * @state: PM transition of the system being carried out.
897 */
dpm_resume_early(pm_message_t state)898 void dpm_resume_early(pm_message_t state)
899 {
900 struct device *dev;
901 ktime_t starttime = ktime_get();
902
903 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
904 mutex_lock(&dpm_list_mtx);
905 pm_transition = state;
906
907 /*
908 * Advanced the async threads upfront,
909 * in case the starting of async threads is
910 * delayed by non-async resuming devices.
911 */
912 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
913 reinit_completion(&dev->power.completion);
914 if (is_async(dev)) {
915 get_device(dev);
916 async_schedule(async_resume_early, dev);
917 }
918 }
919
920 while (!list_empty(&dpm_late_early_list)) {
921 dev = to_device(dpm_late_early_list.next);
922 get_device(dev);
923 list_move_tail(&dev->power.entry, &dpm_suspended_list);
924 mutex_unlock(&dpm_list_mtx);
925
926 if (!is_async(dev)) {
927 int error;
928
929 error = device_resume_early(dev, state, false);
930 if (error) {
931 suspend_stats.failed_resume_early++;
932 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
933 dpm_save_failed_dev(dev_name(dev));
934 pm_dev_err(dev, state, " early", error);
935 }
936 }
937 mutex_lock(&dpm_list_mtx);
938 put_device(dev);
939 }
940 mutex_unlock(&dpm_list_mtx);
941 async_synchronize_full();
942 dpm_show_time(starttime, state, 0, "early");
943 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
944 }
945
946 /**
947 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
948 * @state: PM transition of the system being carried out.
949 */
dpm_resume_start(pm_message_t state)950 void dpm_resume_start(pm_message_t state)
951 {
952 dpm_resume_noirq(state);
953 dpm_resume_early(state);
954 }
955 EXPORT_SYMBOL_GPL(dpm_resume_start);
956
957 /**
958 * device_resume - Execute "resume" callbacks for given device.
959 * @dev: Device to handle.
960 * @state: PM transition of the system being carried out.
961 * @async: If true, the device is being resumed asynchronously.
962 */
device_resume(struct device * dev,pm_message_t state,bool async)963 static int device_resume(struct device *dev, pm_message_t state, bool async)
964 {
965 pm_callback_t callback = NULL;
966 const char *info = NULL;
967 int error = 0;
968 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
969
970 TRACE_DEVICE(dev);
971 TRACE_RESUME(0);
972
973 if (dev->power.syscore)
974 goto Complete;
975
976 if (dev->power.direct_complete) {
977 /* Match the pm_runtime_disable() in __device_suspend(). */
978 pm_runtime_enable(dev);
979 goto Complete;
980 }
981
982 if (!dpm_wait_for_superior(dev, async))
983 goto Complete;
984
985 dpm_watchdog_set(&wd, dev);
986 device_lock(dev);
987
988 /*
989 * This is a fib. But we'll allow new children to be added below
990 * a resumed device, even if the device hasn't been completed yet.
991 */
992 dev->power.is_prepared = false;
993
994 if (!dev->power.is_suspended)
995 goto Unlock;
996
997 if (dev->pm_domain) {
998 info = "power domain ";
999 callback = pm_op(&dev->pm_domain->ops, state);
1000 goto Driver;
1001 }
1002
1003 if (dev->type && dev->type->pm) {
1004 info = "type ";
1005 callback = pm_op(dev->type->pm, state);
1006 goto Driver;
1007 }
1008
1009 if (dev->class && dev->class->pm) {
1010 info = "class ";
1011 callback = pm_op(dev->class->pm, state);
1012 goto Driver;
1013 }
1014
1015 if (dev->bus) {
1016 if (dev->bus->pm) {
1017 info = "bus ";
1018 callback = pm_op(dev->bus->pm, state);
1019 } else if (dev->bus->resume) {
1020 info = "legacy bus ";
1021 callback = dev->bus->resume;
1022 goto End;
1023 }
1024 }
1025
1026 Driver:
1027 if (!callback && dev->driver && dev->driver->pm) {
1028 info = "driver ";
1029 callback = pm_op(dev->driver->pm, state);
1030 }
1031
1032 End:
1033 error = dpm_run_callback(callback, dev, state, info);
1034 dev->power.is_suspended = false;
1035
1036 Unlock:
1037 device_unlock(dev);
1038 dpm_watchdog_clear(&wd);
1039
1040 Complete:
1041 complete_all(&dev->power.completion);
1042
1043 TRACE_RESUME(error);
1044
1045 return error;
1046 }
1047
async_resume(void * data,async_cookie_t cookie)1048 static void async_resume(void *data, async_cookie_t cookie)
1049 {
1050 struct device *dev = (struct device *)data;
1051 int error;
1052
1053 error = device_resume(dev, pm_transition, true);
1054 if (error)
1055 pm_dev_err(dev, pm_transition, " async", error);
1056 put_device(dev);
1057 }
1058
1059 /**
1060 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1061 * @state: PM transition of the system being carried out.
1062 *
1063 * Execute the appropriate "resume" callback for all devices whose status
1064 * indicates that they are suspended.
1065 */
dpm_resume(pm_message_t state)1066 void dpm_resume(pm_message_t state)
1067 {
1068 struct device *dev;
1069 ktime_t starttime = ktime_get();
1070
1071 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1072 might_sleep();
1073
1074 mutex_lock(&dpm_list_mtx);
1075 pm_transition = state;
1076 async_error = 0;
1077
1078 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1079 reinit_completion(&dev->power.completion);
1080 if (is_async(dev)) {
1081 get_device(dev);
1082 async_schedule(async_resume, dev);
1083 }
1084 }
1085
1086 while (!list_empty(&dpm_suspended_list)) {
1087 dev = to_device(dpm_suspended_list.next);
1088 get_device(dev);
1089 if (!is_async(dev)) {
1090 int error;
1091
1092 mutex_unlock(&dpm_list_mtx);
1093
1094 error = device_resume(dev, state, false);
1095 if (error) {
1096 suspend_stats.failed_resume++;
1097 dpm_save_failed_step(SUSPEND_RESUME);
1098 dpm_save_failed_dev(dev_name(dev));
1099 pm_dev_err(dev, state, "", error);
1100 }
1101
1102 mutex_lock(&dpm_list_mtx);
1103 }
1104 if (!list_empty(&dev->power.entry))
1105 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1106 put_device(dev);
1107 }
1108 mutex_unlock(&dpm_list_mtx);
1109 async_synchronize_full();
1110 dpm_show_time(starttime, state, 0, NULL);
1111
1112 cpufreq_resume();
1113 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1114 }
1115
1116 /**
1117 * device_complete - Complete a PM transition for given device.
1118 * @dev: Device to handle.
1119 * @state: PM transition of the system being carried out.
1120 */
device_complete(struct device * dev,pm_message_t state)1121 static void device_complete(struct device *dev, pm_message_t state)
1122 {
1123 void (*callback)(struct device *) = NULL;
1124 const char *info = NULL;
1125
1126 if (dev->power.syscore)
1127 return;
1128
1129 device_lock(dev);
1130
1131 if (dev->pm_domain) {
1132 info = "completing power domain ";
1133 callback = dev->pm_domain->ops.complete;
1134 } else if (dev->type && dev->type->pm) {
1135 info = "completing type ";
1136 callback = dev->type->pm->complete;
1137 } else if (dev->class && dev->class->pm) {
1138 info = "completing class ";
1139 callback = dev->class->pm->complete;
1140 } else if (dev->bus && dev->bus->pm) {
1141 info = "completing bus ";
1142 callback = dev->bus->pm->complete;
1143 }
1144
1145 if (!callback && dev->driver && dev->driver->pm) {
1146 info = "completing driver ";
1147 callback = dev->driver->pm->complete;
1148 }
1149
1150 if (callback) {
1151 pm_dev_dbg(dev, state, info);
1152 callback(dev);
1153 }
1154
1155 device_unlock(dev);
1156
1157 pm_runtime_put(dev);
1158 }
1159
1160 /**
1161 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1162 * @state: PM transition of the system being carried out.
1163 *
1164 * Execute the ->complete() callbacks for all devices whose PM status is not
1165 * DPM_ON (this allows new devices to be registered).
1166 */
dpm_complete(pm_message_t state)1167 void dpm_complete(pm_message_t state)
1168 {
1169 struct list_head list;
1170
1171 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1172 might_sleep();
1173
1174 INIT_LIST_HEAD(&list);
1175 mutex_lock(&dpm_list_mtx);
1176 while (!list_empty(&dpm_prepared_list)) {
1177 struct device *dev = to_device(dpm_prepared_list.prev);
1178
1179 get_device(dev);
1180 dev->power.is_prepared = false;
1181 list_move(&dev->power.entry, &list);
1182 mutex_unlock(&dpm_list_mtx);
1183
1184 trace_device_pm_callback_start(dev, "", state.event);
1185 device_complete(dev, state);
1186 trace_device_pm_callback_end(dev, 0);
1187
1188 mutex_lock(&dpm_list_mtx);
1189 put_device(dev);
1190 }
1191 list_splice(&list, &dpm_list);
1192 mutex_unlock(&dpm_list_mtx);
1193
1194 /* Allow device probing and trigger re-probing of deferred devices */
1195 device_unblock_probing();
1196 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1197 }
1198
1199 /**
1200 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1201 * @state: PM transition of the system being carried out.
1202 *
1203 * Execute "resume" callbacks for all devices and complete the PM transition of
1204 * the system.
1205 */
dpm_resume_end(pm_message_t state)1206 void dpm_resume_end(pm_message_t state)
1207 {
1208 dpm_resume(state);
1209 dpm_complete(state);
1210 }
1211 EXPORT_SYMBOL_GPL(dpm_resume_end);
1212
1213
1214 /*------------------------- Suspend routines -------------------------*/
1215
1216 /**
1217 * resume_event - Return a "resume" message for given "suspend" sleep state.
1218 * @sleep_state: PM message representing a sleep state.
1219 *
1220 * Return a PM message representing the resume event corresponding to given
1221 * sleep state.
1222 */
resume_event(pm_message_t sleep_state)1223 static pm_message_t resume_event(pm_message_t sleep_state)
1224 {
1225 switch (sleep_state.event) {
1226 case PM_EVENT_SUSPEND:
1227 return PMSG_RESUME;
1228 case PM_EVENT_FREEZE:
1229 case PM_EVENT_QUIESCE:
1230 return PMSG_RECOVER;
1231 case PM_EVENT_HIBERNATE:
1232 return PMSG_RESTORE;
1233 }
1234 return PMSG_ON;
1235 }
1236
dpm_superior_set_must_resume(struct device * dev)1237 static void dpm_superior_set_must_resume(struct device *dev)
1238 {
1239 struct device_link *link;
1240 int idx;
1241
1242 if (dev->parent)
1243 dev->parent->power.must_resume = true;
1244
1245 idx = device_links_read_lock();
1246
1247 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1248 link->supplier->power.must_resume = true;
1249
1250 device_links_read_unlock(idx);
1251 }
1252
dpm_subsys_suspend_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)1253 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1254 pm_message_t state,
1255 const char **info_p)
1256 {
1257 pm_callback_t callback;
1258 const char *info;
1259
1260 if (dev->pm_domain) {
1261 info = "noirq power domain ";
1262 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1263 } else if (dev->type && dev->type->pm) {
1264 info = "noirq type ";
1265 callback = pm_noirq_op(dev->type->pm, state);
1266 } else if (dev->class && dev->class->pm) {
1267 info = "noirq class ";
1268 callback = pm_noirq_op(dev->class->pm, state);
1269 } else if (dev->bus && dev->bus->pm) {
1270 info = "noirq bus ";
1271 callback = pm_noirq_op(dev->bus->pm, state);
1272 } else {
1273 return NULL;
1274 }
1275
1276 if (info_p)
1277 *info_p = info;
1278
1279 return callback;
1280 }
1281
device_must_resume(struct device * dev,pm_message_t state,bool no_subsys_suspend_noirq)1282 static bool device_must_resume(struct device *dev, pm_message_t state,
1283 bool no_subsys_suspend_noirq)
1284 {
1285 pm_message_t resume_msg = resume_event(state);
1286
1287 /*
1288 * If all of the device driver's "noirq", "late" and "early" callbacks
1289 * are invoked directly by the core, the decision to allow the device to
1290 * stay in suspend can be based on its current runtime PM status and its
1291 * wakeup settings.
1292 */
1293 if (no_subsys_suspend_noirq &&
1294 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1295 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1296 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1297 return !pm_runtime_status_suspended(dev) &&
1298 (resume_msg.event != PM_EVENT_RESUME ||
1299 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1300
1301 /*
1302 * The only safe strategy here is to require that if the device may not
1303 * be left in suspend, resume callbacks must be invoked for it.
1304 */
1305 return !dev->power.may_skip_resume;
1306 }
1307
1308 /**
1309 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1310 * @dev: Device to handle.
1311 * @state: PM transition of the system being carried out.
1312 * @async: If true, the device is being suspended asynchronously.
1313 *
1314 * The driver of @dev will not receive interrupts while this function is being
1315 * executed.
1316 */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1317 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1318 {
1319 pm_callback_t callback;
1320 const char *info;
1321 bool no_subsys_cb = false;
1322 int error = 0;
1323
1324 TRACE_DEVICE(dev);
1325 TRACE_SUSPEND(0);
1326
1327 dpm_wait_for_subordinate(dev, async);
1328
1329 if (async_error)
1330 goto Complete;
1331
1332 if (pm_wakeup_pending()) {
1333 async_error = -EBUSY;
1334 goto Complete;
1335 }
1336
1337 if (dev->power.syscore || dev->power.direct_complete)
1338 goto Complete;
1339
1340 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1341 if (callback)
1342 goto Run;
1343
1344 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1345
1346 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1347 goto Skip;
1348
1349 if (dev->driver && dev->driver->pm) {
1350 info = "noirq driver ";
1351 callback = pm_noirq_op(dev->driver->pm, state);
1352 }
1353
1354 Run:
1355 error = dpm_run_callback(callback, dev, state, info);
1356 if (error) {
1357 async_error = error;
1358 goto Complete;
1359 }
1360
1361 Skip:
1362 dev->power.is_noirq_suspended = true;
1363
1364 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1365 dev->power.must_resume = dev->power.must_resume ||
1366 atomic_read(&dev->power.usage_count) > 1 ||
1367 device_must_resume(dev, state, no_subsys_cb);
1368 } else {
1369 dev->power.must_resume = true;
1370 }
1371
1372 if (dev->power.must_resume)
1373 dpm_superior_set_must_resume(dev);
1374
1375 Complete:
1376 complete_all(&dev->power.completion);
1377 TRACE_SUSPEND(error);
1378 return error;
1379 }
1380
async_suspend_noirq(void * data,async_cookie_t cookie)1381 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1382 {
1383 struct device *dev = (struct device *)data;
1384 int error;
1385
1386 error = __device_suspend_noirq(dev, pm_transition, true);
1387 if (error) {
1388 dpm_save_failed_dev(dev_name(dev));
1389 pm_dev_err(dev, pm_transition, " async", error);
1390 }
1391
1392 put_device(dev);
1393 }
1394
device_suspend_noirq(struct device * dev)1395 static int device_suspend_noirq(struct device *dev)
1396 {
1397 reinit_completion(&dev->power.completion);
1398
1399 if (is_async(dev)) {
1400 get_device(dev);
1401 async_schedule(async_suspend_noirq, dev);
1402 return 0;
1403 }
1404 return __device_suspend_noirq(dev, pm_transition, false);
1405 }
1406
dpm_noirq_begin(void)1407 void dpm_noirq_begin(void)
1408 {
1409 cpuidle_pause();
1410 device_wakeup_arm_wake_irqs();
1411 suspend_device_irqs();
1412 }
1413
dpm_noirq_suspend_devices(pm_message_t state)1414 int dpm_noirq_suspend_devices(pm_message_t state)
1415 {
1416 ktime_t starttime = ktime_get();
1417 int error = 0;
1418
1419 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1420 mutex_lock(&dpm_list_mtx);
1421 pm_transition = state;
1422 async_error = 0;
1423
1424 while (!list_empty(&dpm_late_early_list)) {
1425 struct device *dev = to_device(dpm_late_early_list.prev);
1426
1427 get_device(dev);
1428 mutex_unlock(&dpm_list_mtx);
1429
1430 error = device_suspend_noirq(dev);
1431
1432 mutex_lock(&dpm_list_mtx);
1433 if (error) {
1434 pm_dev_err(dev, state, " noirq", error);
1435 dpm_save_failed_dev(dev_name(dev));
1436 put_device(dev);
1437 break;
1438 }
1439 if (!list_empty(&dev->power.entry))
1440 list_move(&dev->power.entry, &dpm_noirq_list);
1441 put_device(dev);
1442
1443 if (async_error)
1444 break;
1445 }
1446 mutex_unlock(&dpm_list_mtx);
1447 async_synchronize_full();
1448 if (!error)
1449 error = async_error;
1450
1451 if (error) {
1452 suspend_stats.failed_suspend_noirq++;
1453 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1454 }
1455 dpm_show_time(starttime, state, error, "noirq");
1456 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1457 return error;
1458 }
1459
1460 /**
1461 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1462 * @state: PM transition of the system being carried out.
1463 *
1464 * Prevent device drivers' interrupt handlers from being called and invoke
1465 * "noirq" suspend callbacks for all non-sysdev devices.
1466 */
dpm_suspend_noirq(pm_message_t state)1467 int dpm_suspend_noirq(pm_message_t state)
1468 {
1469 int ret;
1470
1471 dpm_noirq_begin();
1472 ret = dpm_noirq_suspend_devices(state);
1473 if (ret)
1474 dpm_resume_noirq(resume_event(state));
1475
1476 return ret;
1477 }
1478
dpm_propagate_wakeup_to_parent(struct device * dev)1479 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1480 {
1481 struct device *parent = dev->parent;
1482
1483 if (!parent)
1484 return;
1485
1486 spin_lock_irq(&parent->power.lock);
1487
1488 if (dev->power.wakeup_path && !parent->power.ignore_children)
1489 parent->power.wakeup_path = true;
1490
1491 spin_unlock_irq(&parent->power.lock);
1492 }
1493
dpm_subsys_suspend_late_cb(struct device * dev,pm_message_t state,const char ** info_p)1494 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1495 pm_message_t state,
1496 const char **info_p)
1497 {
1498 pm_callback_t callback;
1499 const char *info;
1500
1501 if (dev->pm_domain) {
1502 info = "late power domain ";
1503 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1504 } else if (dev->type && dev->type->pm) {
1505 info = "late type ";
1506 callback = pm_late_early_op(dev->type->pm, state);
1507 } else if (dev->class && dev->class->pm) {
1508 info = "late class ";
1509 callback = pm_late_early_op(dev->class->pm, state);
1510 } else if (dev->bus && dev->bus->pm) {
1511 info = "late bus ";
1512 callback = pm_late_early_op(dev->bus->pm, state);
1513 } else {
1514 return NULL;
1515 }
1516
1517 if (info_p)
1518 *info_p = info;
1519
1520 return callback;
1521 }
1522
1523 /**
1524 * __device_suspend_late - Execute a "late suspend" callback for given device.
1525 * @dev: Device to handle.
1526 * @state: PM transition of the system being carried out.
1527 * @async: If true, the device is being suspended asynchronously.
1528 *
1529 * Runtime PM is disabled for @dev while this function is being executed.
1530 */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1531 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1532 {
1533 pm_callback_t callback;
1534 const char *info;
1535 int error = 0;
1536
1537 TRACE_DEVICE(dev);
1538 TRACE_SUSPEND(0);
1539
1540 __pm_runtime_disable(dev, false);
1541
1542 dpm_wait_for_subordinate(dev, async);
1543
1544 if (async_error)
1545 goto Complete;
1546
1547 if (pm_wakeup_pending()) {
1548 async_error = -EBUSY;
1549 goto Complete;
1550 }
1551
1552 if (dev->power.syscore || dev->power.direct_complete)
1553 goto Complete;
1554
1555 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1556 if (callback)
1557 goto Run;
1558
1559 if (dev_pm_smart_suspend_and_suspended(dev) &&
1560 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1561 goto Skip;
1562
1563 if (dev->driver && dev->driver->pm) {
1564 info = "late driver ";
1565 callback = pm_late_early_op(dev->driver->pm, state);
1566 }
1567
1568 Run:
1569 error = dpm_run_callback(callback, dev, state, info);
1570 if (error) {
1571 async_error = error;
1572 goto Complete;
1573 }
1574 dpm_propagate_wakeup_to_parent(dev);
1575
1576 Skip:
1577 dev->power.is_late_suspended = true;
1578
1579 Complete:
1580 TRACE_SUSPEND(error);
1581 complete_all(&dev->power.completion);
1582 return error;
1583 }
1584
async_suspend_late(void * data,async_cookie_t cookie)1585 static void async_suspend_late(void *data, async_cookie_t cookie)
1586 {
1587 struct device *dev = (struct device *)data;
1588 int error;
1589
1590 error = __device_suspend_late(dev, pm_transition, true);
1591 if (error) {
1592 dpm_save_failed_dev(dev_name(dev));
1593 pm_dev_err(dev, pm_transition, " async", error);
1594 }
1595 put_device(dev);
1596 }
1597
device_suspend_late(struct device * dev)1598 static int device_suspend_late(struct device *dev)
1599 {
1600 reinit_completion(&dev->power.completion);
1601
1602 if (is_async(dev)) {
1603 get_device(dev);
1604 async_schedule(async_suspend_late, dev);
1605 return 0;
1606 }
1607
1608 return __device_suspend_late(dev, pm_transition, false);
1609 }
1610
1611 /**
1612 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1613 * @state: PM transition of the system being carried out.
1614 */
dpm_suspend_late(pm_message_t state)1615 int dpm_suspend_late(pm_message_t state)
1616 {
1617 ktime_t starttime = ktime_get();
1618 int error = 0;
1619
1620 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1621 mutex_lock(&dpm_list_mtx);
1622 pm_transition = state;
1623 async_error = 0;
1624
1625 while (!list_empty(&dpm_suspended_list)) {
1626 struct device *dev = to_device(dpm_suspended_list.prev);
1627
1628 get_device(dev);
1629 mutex_unlock(&dpm_list_mtx);
1630
1631 error = device_suspend_late(dev);
1632
1633 mutex_lock(&dpm_list_mtx);
1634 if (!list_empty(&dev->power.entry))
1635 list_move(&dev->power.entry, &dpm_late_early_list);
1636
1637 if (error) {
1638 pm_dev_err(dev, state, " late", error);
1639 dpm_save_failed_dev(dev_name(dev));
1640 put_device(dev);
1641 break;
1642 }
1643 put_device(dev);
1644
1645 if (async_error)
1646 break;
1647 }
1648 mutex_unlock(&dpm_list_mtx);
1649 async_synchronize_full();
1650 if (!error)
1651 error = async_error;
1652 if (error) {
1653 suspend_stats.failed_suspend_late++;
1654 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1655 dpm_resume_early(resume_event(state));
1656 }
1657 dpm_show_time(starttime, state, error, "late");
1658 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1659 return error;
1660 }
1661
1662 /**
1663 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1664 * @state: PM transition of the system being carried out.
1665 */
dpm_suspend_end(pm_message_t state)1666 int dpm_suspend_end(pm_message_t state)
1667 {
1668 int error = dpm_suspend_late(state);
1669 if (error)
1670 return error;
1671
1672 error = dpm_suspend_noirq(state);
1673 if (error) {
1674 dpm_resume_early(resume_event(state));
1675 return error;
1676 }
1677
1678 return 0;
1679 }
1680 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1681
1682 /**
1683 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1684 * @dev: Device to suspend.
1685 * @state: PM transition of the system being carried out.
1686 * @cb: Suspend callback to execute.
1687 * @info: string description of caller.
1688 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1689 static int legacy_suspend(struct device *dev, pm_message_t state,
1690 int (*cb)(struct device *dev, pm_message_t state),
1691 const char *info)
1692 {
1693 int error;
1694 ktime_t calltime;
1695
1696 calltime = initcall_debug_start(dev, cb);
1697
1698 trace_device_pm_callback_start(dev, info, state.event);
1699 error = cb(dev, state);
1700 trace_device_pm_callback_end(dev, error);
1701 suspend_report_result(cb, error);
1702
1703 initcall_debug_report(dev, calltime, cb, error);
1704
1705 return error;
1706 }
1707
dpm_clear_superiors_direct_complete(struct device * dev)1708 static void dpm_clear_superiors_direct_complete(struct device *dev)
1709 {
1710 struct device_link *link;
1711 int idx;
1712
1713 if (dev->parent) {
1714 spin_lock_irq(&dev->parent->power.lock);
1715 dev->parent->power.direct_complete = false;
1716 spin_unlock_irq(&dev->parent->power.lock);
1717 }
1718
1719 idx = device_links_read_lock();
1720
1721 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1722 spin_lock_irq(&link->supplier->power.lock);
1723 link->supplier->power.direct_complete = false;
1724 spin_unlock_irq(&link->supplier->power.lock);
1725 }
1726
1727 device_links_read_unlock(idx);
1728 }
1729
1730 /**
1731 * __device_suspend - Execute "suspend" callbacks for given device.
1732 * @dev: Device to handle.
1733 * @state: PM transition of the system being carried out.
1734 * @async: If true, the device is being suspended asynchronously.
1735 */
__device_suspend(struct device * dev,pm_message_t state,bool async)1736 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1737 {
1738 pm_callback_t callback = NULL;
1739 const char *info = NULL;
1740 int error = 0;
1741 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1742
1743 TRACE_DEVICE(dev);
1744 TRACE_SUSPEND(0);
1745
1746 dpm_wait_for_subordinate(dev, async);
1747
1748 if (async_error) {
1749 dev->power.direct_complete = false;
1750 goto Complete;
1751 }
1752
1753 /*
1754 * Wait for possible runtime PM transitions of the device in progress
1755 * to complete and if there's a runtime resume request pending for it,
1756 * resume it before proceeding with invoking the system-wide suspend
1757 * callbacks for it.
1758 *
1759 * If the system-wide suspend callbacks below change the configuration
1760 * of the device, they must disable runtime PM for it or otherwise
1761 * ensure that its runtime-resume callbacks will not be confused by that
1762 * change in case they are invoked going forward.
1763 */
1764 pm_runtime_barrier(dev);
1765
1766 if (pm_wakeup_pending()) {
1767 dev->power.direct_complete = false;
1768 async_error = -EBUSY;
1769 goto Complete;
1770 }
1771
1772 if (dev->power.syscore)
1773 goto Complete;
1774
1775 /* Avoid direct_complete to let wakeup_path propagate. */
1776 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1777 dev->power.direct_complete = false;
1778
1779 if (dev->power.direct_complete) {
1780 if (pm_runtime_status_suspended(dev)) {
1781 pm_runtime_disable(dev);
1782 if (pm_runtime_status_suspended(dev))
1783 goto Complete;
1784
1785 pm_runtime_enable(dev);
1786 }
1787 dev->power.direct_complete = false;
1788 }
1789
1790 dev->power.may_skip_resume = false;
1791 dev->power.must_resume = false;
1792
1793 dpm_watchdog_set(&wd, dev);
1794 device_lock(dev);
1795
1796 if (dev->pm_domain) {
1797 info = "power domain ";
1798 callback = pm_op(&dev->pm_domain->ops, state);
1799 goto Run;
1800 }
1801
1802 if (dev->type && dev->type->pm) {
1803 info = "type ";
1804 callback = pm_op(dev->type->pm, state);
1805 goto Run;
1806 }
1807
1808 if (dev->class && dev->class->pm) {
1809 info = "class ";
1810 callback = pm_op(dev->class->pm, state);
1811 goto Run;
1812 }
1813
1814 if (dev->bus) {
1815 if (dev->bus->pm) {
1816 info = "bus ";
1817 callback = pm_op(dev->bus->pm, state);
1818 } else if (dev->bus->suspend) {
1819 pm_dev_dbg(dev, state, "legacy bus ");
1820 error = legacy_suspend(dev, state, dev->bus->suspend,
1821 "legacy bus ");
1822 goto End;
1823 }
1824 }
1825
1826 Run:
1827 if (!callback && dev->driver && dev->driver->pm) {
1828 info = "driver ";
1829 callback = pm_op(dev->driver->pm, state);
1830 }
1831
1832 error = dpm_run_callback(callback, dev, state, info);
1833
1834 End:
1835 if (!error) {
1836 dev->power.is_suspended = true;
1837 if (device_may_wakeup(dev))
1838 dev->power.wakeup_path = true;
1839
1840 dpm_propagate_wakeup_to_parent(dev);
1841 dpm_clear_superiors_direct_complete(dev);
1842 }
1843
1844 device_unlock(dev);
1845 dpm_watchdog_clear(&wd);
1846
1847 Complete:
1848 if (error)
1849 async_error = error;
1850
1851 complete_all(&dev->power.completion);
1852 TRACE_SUSPEND(error);
1853 return error;
1854 }
1855
async_suspend(void * data,async_cookie_t cookie)1856 static void async_suspend(void *data, async_cookie_t cookie)
1857 {
1858 struct device *dev = (struct device *)data;
1859 int error;
1860
1861 error = __device_suspend(dev, pm_transition, true);
1862 if (error) {
1863 dpm_save_failed_dev(dev_name(dev));
1864 pm_dev_err(dev, pm_transition, " async", error);
1865 }
1866
1867 put_device(dev);
1868 }
1869
device_suspend(struct device * dev)1870 static int device_suspend(struct device *dev)
1871 {
1872 reinit_completion(&dev->power.completion);
1873
1874 if (is_async(dev)) {
1875 get_device(dev);
1876 async_schedule(async_suspend, dev);
1877 return 0;
1878 }
1879
1880 return __device_suspend(dev, pm_transition, false);
1881 }
1882
1883 /**
1884 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1885 * @state: PM transition of the system being carried out.
1886 */
dpm_suspend(pm_message_t state)1887 int dpm_suspend(pm_message_t state)
1888 {
1889 ktime_t starttime = ktime_get();
1890 int error = 0;
1891
1892 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1893 might_sleep();
1894
1895 cpufreq_suspend();
1896
1897 mutex_lock(&dpm_list_mtx);
1898 pm_transition = state;
1899 async_error = 0;
1900 while (!list_empty(&dpm_prepared_list)) {
1901 struct device *dev = to_device(dpm_prepared_list.prev);
1902
1903 get_device(dev);
1904 mutex_unlock(&dpm_list_mtx);
1905
1906 error = device_suspend(dev);
1907
1908 mutex_lock(&dpm_list_mtx);
1909 if (error) {
1910 pm_dev_err(dev, state, "", error);
1911 dpm_save_failed_dev(dev_name(dev));
1912 put_device(dev);
1913 break;
1914 }
1915 if (!list_empty(&dev->power.entry))
1916 list_move(&dev->power.entry, &dpm_suspended_list);
1917 put_device(dev);
1918 if (async_error)
1919 break;
1920 }
1921 mutex_unlock(&dpm_list_mtx);
1922 async_synchronize_full();
1923 if (!error)
1924 error = async_error;
1925 if (error) {
1926 suspend_stats.failed_suspend++;
1927 dpm_save_failed_step(SUSPEND_SUSPEND);
1928 }
1929 dpm_show_time(starttime, state, error, NULL);
1930 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1931 return error;
1932 }
1933
1934 /**
1935 * device_prepare - Prepare a device for system power transition.
1936 * @dev: Device to handle.
1937 * @state: PM transition of the system being carried out.
1938 *
1939 * Execute the ->prepare() callback(s) for given device. No new children of the
1940 * device may be registered after this function has returned.
1941 */
device_prepare(struct device * dev,pm_message_t state)1942 static int device_prepare(struct device *dev, pm_message_t state)
1943 {
1944 int (*callback)(struct device *) = NULL;
1945 int ret = 0;
1946
1947 if (dev->power.syscore)
1948 return 0;
1949
1950 WARN_ON(!pm_runtime_enabled(dev) &&
1951 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1952 DPM_FLAG_LEAVE_SUSPENDED));
1953
1954 /*
1955 * If a device's parent goes into runtime suspend at the wrong time,
1956 * it won't be possible to resume the device. To prevent this we
1957 * block runtime suspend here, during the prepare phase, and allow
1958 * it again during the complete phase.
1959 */
1960 pm_runtime_get_noresume(dev);
1961
1962 device_lock(dev);
1963
1964 dev->power.wakeup_path = false;
1965
1966 if (dev->power.no_pm_callbacks)
1967 goto unlock;
1968
1969 if (dev->pm_domain)
1970 callback = dev->pm_domain->ops.prepare;
1971 else if (dev->type && dev->type->pm)
1972 callback = dev->type->pm->prepare;
1973 else if (dev->class && dev->class->pm)
1974 callback = dev->class->pm->prepare;
1975 else if (dev->bus && dev->bus->pm)
1976 callback = dev->bus->pm->prepare;
1977
1978 if (!callback && dev->driver && dev->driver->pm)
1979 callback = dev->driver->pm->prepare;
1980
1981 if (callback)
1982 ret = callback(dev);
1983
1984 unlock:
1985 device_unlock(dev);
1986
1987 if (ret < 0) {
1988 suspend_report_result(callback, ret);
1989 pm_runtime_put(dev);
1990 return ret;
1991 }
1992 /*
1993 * A positive return value from ->prepare() means "this device appears
1994 * to be runtime-suspended and its state is fine, so if it really is
1995 * runtime-suspended, you can leave it in that state provided that you
1996 * will do the same thing with all of its descendants". This only
1997 * applies to suspend transitions, however.
1998 */
1999 spin_lock_irq(&dev->power.lock);
2000 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
2001 ((pm_runtime_suspended(dev) && ret > 0) ||
2002 dev->power.no_pm_callbacks) &&
2003 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
2004 spin_unlock_irq(&dev->power.lock);
2005 return 0;
2006 }
2007
2008 /**
2009 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
2010 * @state: PM transition of the system being carried out.
2011 *
2012 * Execute the ->prepare() callback(s) for all devices.
2013 */
dpm_prepare(pm_message_t state)2014 int dpm_prepare(pm_message_t state)
2015 {
2016 int error = 0;
2017
2018 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
2019 might_sleep();
2020
2021 /*
2022 * Give a chance for the known devices to complete their probes, before
2023 * disable probing of devices. This sync point is important at least
2024 * at boot time + hibernation restore.
2025 */
2026 wait_for_device_probe();
2027 /*
2028 * It is unsafe if probing of devices will happen during suspend or
2029 * hibernation and system behavior will be unpredictable in this case.
2030 * So, let's prohibit device's probing here and defer their probes
2031 * instead. The normal behavior will be restored in dpm_complete().
2032 */
2033 device_block_probing();
2034
2035 mutex_lock(&dpm_list_mtx);
2036 while (!list_empty(&dpm_list)) {
2037 struct device *dev = to_device(dpm_list.next);
2038
2039 get_device(dev);
2040 mutex_unlock(&dpm_list_mtx);
2041
2042 trace_device_pm_callback_start(dev, "", state.event);
2043 error = device_prepare(dev, state);
2044 trace_device_pm_callback_end(dev, error);
2045
2046 mutex_lock(&dpm_list_mtx);
2047 if (error) {
2048 if (error == -EAGAIN) {
2049 put_device(dev);
2050 error = 0;
2051 continue;
2052 }
2053 printk(KERN_INFO "PM: Device %s not prepared "
2054 "for power transition: code %d\n",
2055 dev_name(dev), error);
2056 put_device(dev);
2057 break;
2058 }
2059 dev->power.is_prepared = true;
2060 if (!list_empty(&dev->power.entry))
2061 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2062 put_device(dev);
2063 }
2064 mutex_unlock(&dpm_list_mtx);
2065 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2066 return error;
2067 }
2068
2069 /**
2070 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2071 * @state: PM transition of the system being carried out.
2072 *
2073 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2074 * callbacks for them.
2075 */
dpm_suspend_start(pm_message_t state)2076 int dpm_suspend_start(pm_message_t state)
2077 {
2078 int error;
2079
2080 error = dpm_prepare(state);
2081 if (error) {
2082 suspend_stats.failed_prepare++;
2083 dpm_save_failed_step(SUSPEND_PREPARE);
2084 } else
2085 error = dpm_suspend(state);
2086 return error;
2087 }
2088 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2089
__suspend_report_result(const char * function,void * fn,int ret)2090 void __suspend_report_result(const char *function, void *fn, int ret)
2091 {
2092 if (ret)
2093 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
2094 }
2095 EXPORT_SYMBOL_GPL(__suspend_report_result);
2096
2097 /**
2098 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2099 * @dev: Device to wait for.
2100 * @subordinate: Device that needs to wait for @dev.
2101 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2102 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2103 {
2104 dpm_wait(dev, subordinate->power.async_suspend);
2105 return async_error;
2106 }
2107 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2108
2109 /**
2110 * dpm_for_each_dev - device iterator.
2111 * @data: data for the callback.
2112 * @fn: function to be called for each device.
2113 *
2114 * Iterate over devices in dpm_list, and call @fn for each device,
2115 * passing it @data.
2116 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2117 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2118 {
2119 struct device *dev;
2120
2121 if (!fn)
2122 return;
2123
2124 device_pm_lock();
2125 list_for_each_entry(dev, &dpm_list, power.entry)
2126 fn(dev, data);
2127 device_pm_unlock();
2128 }
2129 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2130
pm_ops_is_empty(const struct dev_pm_ops * ops)2131 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2132 {
2133 if (!ops)
2134 return true;
2135
2136 return !ops->prepare &&
2137 !ops->suspend &&
2138 !ops->suspend_late &&
2139 !ops->suspend_noirq &&
2140 !ops->resume_noirq &&
2141 !ops->resume_early &&
2142 !ops->resume &&
2143 !ops->complete;
2144 }
2145
device_pm_check_callbacks(struct device * dev)2146 void device_pm_check_callbacks(struct device *dev)
2147 {
2148 unsigned long flags;
2149
2150 spin_lock_irqsave(&dev->power.lock, flags);
2151 dev->power.no_pm_callbacks =
2152 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2153 !dev->bus->suspend && !dev->bus->resume)) &&
2154 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2155 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2156 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2157 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2158 !dev->driver->suspend && !dev->driver->resume));
2159 spin_unlock_irqrestore(&dev->power.lock, flags);
2160 }
2161
dev_pm_smart_suspend_and_suspended(struct device * dev)2162 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2163 {
2164 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2165 pm_runtime_status_suspended(dev);
2166 }
2167