Lines Matching refs:dev

92 void device_pm_sleep_init(struct device *dev)  in device_pm_sleep_init()  argument
94 dev->power.is_prepared = false; in device_pm_sleep_init()
95 dev->power.is_suspended = false; in device_pm_sleep_init()
96 dev->power.is_noirq_suspended = false; in device_pm_sleep_init()
97 dev->power.is_late_suspended = false; in device_pm_sleep_init()
98 init_completion(&dev->power.completion); in device_pm_sleep_init()
99 complete_all(&dev->power.completion); in device_pm_sleep_init()
100 dev->power.wakeup = NULL; in device_pm_sleep_init()
101 INIT_LIST_HEAD(&dev->power.entry); in device_pm_sleep_init()
124 void device_pm_add(struct device *dev) in device_pm_add() argument
127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_add()
128 device_pm_check_callbacks(dev); in device_pm_add()
130 if (dev->parent && dev->parent->power.is_prepared) in device_pm_add()
131 dev_warn(dev, "parent %s should not be sleeping\n", in device_pm_add()
132 dev_name(dev->parent)); in device_pm_add()
133 list_add_tail(&dev->power.entry, &dpm_list); in device_pm_add()
134 dev->power.in_dpm_list = true; in device_pm_add()
142 void device_pm_remove(struct device *dev) in device_pm_remove() argument
145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_remove()
146 complete_all(&dev->power.completion); in device_pm_remove()
148 list_del_init(&dev->power.entry); in device_pm_remove()
149 dev->power.in_dpm_list = false; in device_pm_remove()
151 device_wakeup_disable(dev); in device_pm_remove()
152 pm_runtime_remove(dev); in device_pm_remove()
153 device_pm_check_callbacks(dev); in device_pm_remove()
188 void device_pm_move_last(struct device *dev) in device_pm_move_last() argument
191 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_move_last()
192 list_move_tail(&dev->power.entry, &dpm_list); in device_pm_move_last()
195 static ktime_t initcall_debug_start(struct device *dev, void *cb) in initcall_debug_start() argument
200 dev_info(dev, "calling %pF @ %i, parent: %s\n", cb, in initcall_debug_start()
202 dev->parent ? dev_name(dev->parent) : "none"); in initcall_debug_start()
206 static void initcall_debug_report(struct device *dev, ktime_t calltime, in initcall_debug_report() argument
218 dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error, in initcall_debug_report()
227 static void dpm_wait(struct device *dev, bool async) in dpm_wait() argument
229 if (!dev) in dpm_wait()
232 if (async || (pm_async_enabled && dev->power.async_suspend)) in dpm_wait()
233 wait_for_completion(&dev->power.completion); in dpm_wait()
236 static int dpm_wait_fn(struct device *dev, void *async_ptr) in dpm_wait_fn() argument
238 dpm_wait(dev, *((bool *)async_ptr)); in dpm_wait_fn()
242 static void dpm_wait_for_children(struct device *dev, bool async) in dpm_wait_for_children() argument
244 device_for_each_child(dev, &async, dpm_wait_fn); in dpm_wait_for_children()
247 static void dpm_wait_for_suppliers(struct device *dev, bool async) in dpm_wait_for_suppliers() argument
261 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) in dpm_wait_for_suppliers()
268 static bool dpm_wait_for_superior(struct device *dev, bool async) in dpm_wait_for_superior() argument
281 if (!device_pm_initialized(dev)) { in dpm_wait_for_superior()
286 parent = get_device(dev->parent); in dpm_wait_for_superior()
293 dpm_wait_for_suppliers(dev, async); in dpm_wait_for_superior()
299 return device_pm_initialized(dev); in dpm_wait_for_superior()
302 static void dpm_wait_for_consumers(struct device *dev, bool async) in dpm_wait_for_consumers() argument
318 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) in dpm_wait_for_consumers()
325 static void dpm_wait_for_subordinate(struct device *dev, bool async) in dpm_wait_for_subordinate() argument
327 dpm_wait_for_children(dev, async); in dpm_wait_for_subordinate()
328 dpm_wait_for_consumers(dev, async); in dpm_wait_for_subordinate()
431 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info) in pm_dev_dbg() argument
433 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), in pm_dev_dbg()
434 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? in pm_dev_dbg()
438 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info, in pm_dev_err() argument
442 dev_name(dev), pm_verb(state.event), info, error); in pm_dev_err()
465 static int dpm_run_callback(pm_callback_t cb, struct device *dev, in dpm_run_callback() argument
474 calltime = initcall_debug_start(dev, cb); in dpm_run_callback()
476 pm_dev_dbg(dev, state, info); in dpm_run_callback()
477 trace_device_pm_callback_start(dev, info, state.event); in dpm_run_callback()
478 error = cb(dev); in dpm_run_callback()
479 trace_device_pm_callback_end(dev, error); in dpm_run_callback()
482 initcall_debug_report(dev, calltime, cb, error); in dpm_run_callback()
489 struct device *dev; member
509 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); in dpm_watchdog_handler()
512 dev_driver_string(wd->dev), dev_name(wd->dev)); in dpm_watchdog_handler()
520 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) in dpm_watchdog_set() argument
524 wd->dev = dev; in dpm_watchdog_set()
561 void dev_pm_skip_next_resume_phases(struct device *dev) in dev_pm_skip_next_resume_phases() argument
563 dev->power.is_late_suspended = false; in dev_pm_skip_next_resume_phases()
564 dev->power.is_suspended = false; in dev_pm_skip_next_resume_phases()
592 bool dev_pm_may_skip_resume(struct device *dev) in dev_pm_may_skip_resume() argument
594 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE; in dev_pm_may_skip_resume()
597 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev, in dpm_subsys_resume_noirq_cb() argument
604 if (dev->pm_domain) { in dpm_subsys_resume_noirq_cb()
606 callback = pm_noirq_op(&dev->pm_domain->ops, state); in dpm_subsys_resume_noirq_cb()
607 } else if (dev->type && dev->type->pm) { in dpm_subsys_resume_noirq_cb()
609 callback = pm_noirq_op(dev->type->pm, state); in dpm_subsys_resume_noirq_cb()
610 } else if (dev->class && dev->class->pm) { in dpm_subsys_resume_noirq_cb()
612 callback = pm_noirq_op(dev->class->pm, state); in dpm_subsys_resume_noirq_cb()
613 } else if (dev->bus && dev->bus->pm) { in dpm_subsys_resume_noirq_cb()
615 callback = pm_noirq_op(dev->bus->pm, state); in dpm_subsys_resume_noirq_cb()
626 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
630 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
643 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) in device_resume_noirq() argument
650 TRACE_DEVICE(dev); in device_resume_noirq()
653 if (dev->power.syscore || dev->power.direct_complete) in device_resume_noirq()
656 if (!dev->power.is_noirq_suspended) in device_resume_noirq()
659 if (!dpm_wait_for_superior(dev, async)) in device_resume_noirq()
662 skip_resume = dev_pm_may_skip_resume(dev); in device_resume_noirq()
664 callback = dpm_subsys_resume_noirq_cb(dev, state, &info); in device_resume_noirq()
671 if (dev_pm_smart_suspend_and_suspended(dev)) { in device_resume_noirq()
682 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) && in device_resume_noirq()
683 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) { in device_resume_noirq()
688 pm_runtime_set_active(dev); in device_resume_noirq()
693 if (dev->driver && dev->driver->pm) { in device_resume_noirq()
695 callback = pm_noirq_op(dev->driver->pm, state); in device_resume_noirq()
699 error = dpm_run_callback(callback, dev, state, info); in device_resume_noirq()
702 dev->power.is_noirq_suspended = false; in device_resume_noirq()
712 pm_runtime_set_suspended(dev); in device_resume_noirq()
713 dev_pm_skip_next_resume_phases(dev); in device_resume_noirq()
717 complete_all(&dev->power.completion); in device_resume_noirq()
722 static bool is_async(struct device *dev) in is_async() argument
724 return dev->power.async_suspend && pm_async_enabled in is_async()
730 struct device *dev = (struct device *)data; in async_resume_noirq() local
733 error = device_resume_noirq(dev, pm_transition, true); in async_resume_noirq()
735 pm_dev_err(dev, pm_transition, " async", error); in async_resume_noirq()
737 put_device(dev); in async_resume_noirq()
742 struct device *dev; in dpm_noirq_resume_devices() local
754 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { in dpm_noirq_resume_devices()
755 reinit_completion(&dev->power.completion); in dpm_noirq_resume_devices()
756 if (is_async(dev)) { in dpm_noirq_resume_devices()
757 get_device(dev); in dpm_noirq_resume_devices()
758 async_schedule(async_resume_noirq, dev); in dpm_noirq_resume_devices()
763 dev = to_device(dpm_noirq_list.next); in dpm_noirq_resume_devices()
764 get_device(dev); in dpm_noirq_resume_devices()
765 list_move_tail(&dev->power.entry, &dpm_late_early_list); in dpm_noirq_resume_devices()
768 if (!is_async(dev)) { in dpm_noirq_resume_devices()
771 error = device_resume_noirq(dev, state, false); in dpm_noirq_resume_devices()
775 dpm_save_failed_dev(dev_name(dev)); in dpm_noirq_resume_devices()
776 pm_dev_err(dev, state, " noirq", error); in dpm_noirq_resume_devices()
781 put_device(dev); in dpm_noirq_resume_devices()
809 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev, in dpm_subsys_resume_early_cb() argument
816 if (dev->pm_domain) { in dpm_subsys_resume_early_cb()
818 callback = pm_late_early_op(&dev->pm_domain->ops, state); in dpm_subsys_resume_early_cb()
819 } else if (dev->type && dev->type->pm) { in dpm_subsys_resume_early_cb()
821 callback = pm_late_early_op(dev->type->pm, state); in dpm_subsys_resume_early_cb()
822 } else if (dev->class && dev->class->pm) { in dpm_subsys_resume_early_cb()
824 callback = pm_late_early_op(dev->class->pm, state); in dpm_subsys_resume_early_cb()
825 } else if (dev->bus && dev->bus->pm) { in dpm_subsys_resume_early_cb()
827 callback = pm_late_early_op(dev->bus->pm, state); in dpm_subsys_resume_early_cb()
846 static int device_resume_early(struct device *dev, pm_message_t state, bool async) in device_resume_early() argument
852 TRACE_DEVICE(dev); in device_resume_early()
855 if (dev->power.syscore || dev->power.direct_complete) in device_resume_early()
858 if (!dev->power.is_late_suspended) in device_resume_early()
861 if (!dpm_wait_for_superior(dev, async)) in device_resume_early()
864 callback = dpm_subsys_resume_early_cb(dev, state, &info); in device_resume_early()
866 if (!callback && dev->driver && dev->driver->pm) { in device_resume_early()
868 callback = pm_late_early_op(dev->driver->pm, state); in device_resume_early()
871 error = dpm_run_callback(callback, dev, state, info); in device_resume_early()
872 dev->power.is_late_suspended = false; in device_resume_early()
877 pm_runtime_enable(dev); in device_resume_early()
878 complete_all(&dev->power.completion); in device_resume_early()
884 struct device *dev = (struct device *)data; in async_resume_early() local
887 error = device_resume_early(dev, pm_transition, true); in async_resume_early()
889 pm_dev_err(dev, pm_transition, " async", error); in async_resume_early()
891 put_device(dev); in async_resume_early()
900 struct device *dev; in dpm_resume_early() local
912 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { in dpm_resume_early()
913 reinit_completion(&dev->power.completion); in dpm_resume_early()
914 if (is_async(dev)) { in dpm_resume_early()
915 get_device(dev); in dpm_resume_early()
916 async_schedule(async_resume_early, dev); in dpm_resume_early()
921 dev = to_device(dpm_late_early_list.next); in dpm_resume_early()
922 get_device(dev); in dpm_resume_early()
923 list_move_tail(&dev->power.entry, &dpm_suspended_list); in dpm_resume_early()
926 if (!is_async(dev)) { in dpm_resume_early()
929 error = device_resume_early(dev, state, false); in dpm_resume_early()
933 dpm_save_failed_dev(dev_name(dev)); in dpm_resume_early()
934 pm_dev_err(dev, state, " early", error); in dpm_resume_early()
938 put_device(dev); in dpm_resume_early()
963 static int device_resume(struct device *dev, pm_message_t state, bool async) in device_resume() argument
970 TRACE_DEVICE(dev); in device_resume()
973 if (dev->power.syscore) in device_resume()
976 if (dev->power.direct_complete) { in device_resume()
978 pm_runtime_enable(dev); in device_resume()
982 if (!dpm_wait_for_superior(dev, async)) in device_resume()
985 dpm_watchdog_set(&wd, dev); in device_resume()
986 device_lock(dev); in device_resume()
992 dev->power.is_prepared = false; in device_resume()
994 if (!dev->power.is_suspended) in device_resume()
997 if (dev->pm_domain) { in device_resume()
999 callback = pm_op(&dev->pm_domain->ops, state); in device_resume()
1003 if (dev->type && dev->type->pm) { in device_resume()
1005 callback = pm_op(dev->type->pm, state); in device_resume()
1009 if (dev->class && dev->class->pm) { in device_resume()
1011 callback = pm_op(dev->class->pm, state); in device_resume()
1015 if (dev->bus) { in device_resume()
1016 if (dev->bus->pm) { in device_resume()
1018 callback = pm_op(dev->bus->pm, state); in device_resume()
1019 } else if (dev->bus->resume) { in device_resume()
1021 callback = dev->bus->resume; in device_resume()
1027 if (!callback && dev->driver && dev->driver->pm) { in device_resume()
1029 callback = pm_op(dev->driver->pm, state); in device_resume()
1033 error = dpm_run_callback(callback, dev, state, info); in device_resume()
1034 dev->power.is_suspended = false; in device_resume()
1037 device_unlock(dev); in device_resume()
1041 complete_all(&dev->power.completion); in device_resume()
1050 struct device *dev = (struct device *)data; in async_resume() local
1053 error = device_resume(dev, pm_transition, true); in async_resume()
1055 pm_dev_err(dev, pm_transition, " async", error); in async_resume()
1056 put_device(dev); in async_resume()
1068 struct device *dev; in dpm_resume() local
1078 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { in dpm_resume()
1079 reinit_completion(&dev->power.completion); in dpm_resume()
1080 if (is_async(dev)) { in dpm_resume()
1081 get_device(dev); in dpm_resume()
1082 async_schedule(async_resume, dev); in dpm_resume()
1087 dev = to_device(dpm_suspended_list.next); in dpm_resume()
1088 get_device(dev); in dpm_resume()
1089 if (!is_async(dev)) { in dpm_resume()
1094 error = device_resume(dev, state, false); in dpm_resume()
1098 dpm_save_failed_dev(dev_name(dev)); in dpm_resume()
1099 pm_dev_err(dev, state, "", error); in dpm_resume()
1104 if (!list_empty(&dev->power.entry)) in dpm_resume()
1105 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_resume()
1106 put_device(dev); in dpm_resume()
1121 static void device_complete(struct device *dev, pm_message_t state) in device_complete() argument
1126 if (dev->power.syscore) in device_complete()
1129 device_lock(dev); in device_complete()
1131 if (dev->pm_domain) { in device_complete()
1133 callback = dev->pm_domain->ops.complete; in device_complete()
1134 } else if (dev->type && dev->type->pm) { in device_complete()
1136 callback = dev->type->pm->complete; in device_complete()
1137 } else if (dev->class && dev->class->pm) { in device_complete()
1139 callback = dev->class->pm->complete; in device_complete()
1140 } else if (dev->bus && dev->bus->pm) { in device_complete()
1142 callback = dev->bus->pm->complete; in device_complete()
1145 if (!callback && dev->driver && dev->driver->pm) { in device_complete()
1147 callback = dev->driver->pm->complete; in device_complete()
1151 pm_dev_dbg(dev, state, info); in device_complete()
1152 callback(dev); in device_complete()
1155 device_unlock(dev); in device_complete()
1157 pm_runtime_put(dev); in device_complete()
1177 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_complete() local
1179 get_device(dev); in dpm_complete()
1180 dev->power.is_prepared = false; in dpm_complete()
1181 list_move(&dev->power.entry, &list); in dpm_complete()
1184 trace_device_pm_callback_start(dev, "", state.event); in dpm_complete()
1185 device_complete(dev, state); in dpm_complete()
1186 trace_device_pm_callback_end(dev, 0); in dpm_complete()
1189 put_device(dev); in dpm_complete()
1237 static void dpm_superior_set_must_resume(struct device *dev) in dpm_superior_set_must_resume() argument
1242 if (dev->parent) in dpm_superior_set_must_resume()
1243 dev->parent->power.must_resume = true; in dpm_superior_set_must_resume()
1247 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) in dpm_superior_set_must_resume()
1253 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev, in dpm_subsys_suspend_noirq_cb() argument
1260 if (dev->pm_domain) { in dpm_subsys_suspend_noirq_cb()
1262 callback = pm_noirq_op(&dev->pm_domain->ops, state); in dpm_subsys_suspend_noirq_cb()
1263 } else if (dev->type && dev->type->pm) { in dpm_subsys_suspend_noirq_cb()
1265 callback = pm_noirq_op(dev->type->pm, state); in dpm_subsys_suspend_noirq_cb()
1266 } else if (dev->class && dev->class->pm) { in dpm_subsys_suspend_noirq_cb()
1268 callback = pm_noirq_op(dev->class->pm, state); in dpm_subsys_suspend_noirq_cb()
1269 } else if (dev->bus && dev->bus->pm) { in dpm_subsys_suspend_noirq_cb()
1271 callback = pm_noirq_op(dev->bus->pm, state); in dpm_subsys_suspend_noirq_cb()
1282 static bool device_must_resume(struct device *dev, pm_message_t state, in device_must_resume() argument
1294 !dpm_subsys_suspend_late_cb(dev, state, NULL) && in device_must_resume()
1295 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) && in device_must_resume()
1296 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL)) in device_must_resume()
1297 return !pm_runtime_status_suspended(dev) && in device_must_resume()
1299 (device_can_wakeup(dev) && !device_may_wakeup(dev))); in device_must_resume()
1305 return !dev->power.may_skip_resume; in device_must_resume()
1317 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) in __device_suspend_noirq() argument
1324 TRACE_DEVICE(dev); in __device_suspend_noirq()
1327 dpm_wait_for_subordinate(dev, async); in __device_suspend_noirq()
1337 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_noirq()
1340 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info); in __device_suspend_noirq()
1344 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL); in __device_suspend_noirq()
1346 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb) in __device_suspend_noirq()
1349 if (dev->driver && dev->driver->pm) { in __device_suspend_noirq()
1351 callback = pm_noirq_op(dev->driver->pm, state); in __device_suspend_noirq()
1355 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_noirq()
1362 dev->power.is_noirq_suspended = true; in __device_suspend_noirq()
1364 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) { in __device_suspend_noirq()
1365 dev->power.must_resume = dev->power.must_resume || in __device_suspend_noirq()
1366 atomic_read(&dev->power.usage_count) > 1 || in __device_suspend_noirq()
1367 device_must_resume(dev, state, no_subsys_cb); in __device_suspend_noirq()
1369 dev->power.must_resume = true; in __device_suspend_noirq()
1372 if (dev->power.must_resume) in __device_suspend_noirq()
1373 dpm_superior_set_must_resume(dev); in __device_suspend_noirq()
1376 complete_all(&dev->power.completion); in __device_suspend_noirq()
1383 struct device *dev = (struct device *)data; in async_suspend_noirq() local
1386 error = __device_suspend_noirq(dev, pm_transition, true); in async_suspend_noirq()
1388 dpm_save_failed_dev(dev_name(dev)); in async_suspend_noirq()
1389 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_noirq()
1392 put_device(dev); in async_suspend_noirq()
1395 static int device_suspend_noirq(struct device *dev) in device_suspend_noirq() argument
1397 reinit_completion(&dev->power.completion); in device_suspend_noirq()
1399 if (is_async(dev)) { in device_suspend_noirq()
1400 get_device(dev); in device_suspend_noirq()
1401 async_schedule(async_suspend_noirq, dev); in device_suspend_noirq()
1404 return __device_suspend_noirq(dev, pm_transition, false); in device_suspend_noirq()
1425 struct device *dev = to_device(dpm_late_early_list.prev); in dpm_noirq_suspend_devices() local
1427 get_device(dev); in dpm_noirq_suspend_devices()
1430 error = device_suspend_noirq(dev); in dpm_noirq_suspend_devices()
1434 pm_dev_err(dev, state, " noirq", error); in dpm_noirq_suspend_devices()
1435 dpm_save_failed_dev(dev_name(dev)); in dpm_noirq_suspend_devices()
1436 put_device(dev); in dpm_noirq_suspend_devices()
1439 if (!list_empty(&dev->power.entry)) in dpm_noirq_suspend_devices()
1440 list_move(&dev->power.entry, &dpm_noirq_list); in dpm_noirq_suspend_devices()
1441 put_device(dev); in dpm_noirq_suspend_devices()
1479 static void dpm_propagate_wakeup_to_parent(struct device *dev) in dpm_propagate_wakeup_to_parent() argument
1481 struct device *parent = dev->parent; in dpm_propagate_wakeup_to_parent()
1488 if (dev->power.wakeup_path && !parent->power.ignore_children) in dpm_propagate_wakeup_to_parent()
1494 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev, in dpm_subsys_suspend_late_cb() argument
1501 if (dev->pm_domain) { in dpm_subsys_suspend_late_cb()
1503 callback = pm_late_early_op(&dev->pm_domain->ops, state); in dpm_subsys_suspend_late_cb()
1504 } else if (dev->type && dev->type->pm) { in dpm_subsys_suspend_late_cb()
1506 callback = pm_late_early_op(dev->type->pm, state); in dpm_subsys_suspend_late_cb()
1507 } else if (dev->class && dev->class->pm) { in dpm_subsys_suspend_late_cb()
1509 callback = pm_late_early_op(dev->class->pm, state); in dpm_subsys_suspend_late_cb()
1510 } else if (dev->bus && dev->bus->pm) { in dpm_subsys_suspend_late_cb()
1512 callback = pm_late_early_op(dev->bus->pm, state); in dpm_subsys_suspend_late_cb()
1531 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) in __device_suspend_late() argument
1537 TRACE_DEVICE(dev); in __device_suspend_late()
1540 __pm_runtime_disable(dev, false); in __device_suspend_late()
1542 dpm_wait_for_subordinate(dev, async); in __device_suspend_late()
1552 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_late()
1555 callback = dpm_subsys_suspend_late_cb(dev, state, &info); in __device_suspend_late()
1559 if (dev_pm_smart_suspend_and_suspended(dev) && in __device_suspend_late()
1560 !dpm_subsys_suspend_noirq_cb(dev, state, NULL)) in __device_suspend_late()
1563 if (dev->driver && dev->driver->pm) { in __device_suspend_late()
1565 callback = pm_late_early_op(dev->driver->pm, state); in __device_suspend_late()
1569 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_late()
1574 dpm_propagate_wakeup_to_parent(dev); in __device_suspend_late()
1577 dev->power.is_late_suspended = true; in __device_suspend_late()
1581 complete_all(&dev->power.completion); in __device_suspend_late()
1587 struct device *dev = (struct device *)data; in async_suspend_late() local
1590 error = __device_suspend_late(dev, pm_transition, true); in async_suspend_late()
1592 dpm_save_failed_dev(dev_name(dev)); in async_suspend_late()
1593 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_late()
1595 put_device(dev); in async_suspend_late()
1598 static int device_suspend_late(struct device *dev) in device_suspend_late() argument
1600 reinit_completion(&dev->power.completion); in device_suspend_late()
1602 if (is_async(dev)) { in device_suspend_late()
1603 get_device(dev); in device_suspend_late()
1604 async_schedule(async_suspend_late, dev); in device_suspend_late()
1608 return __device_suspend_late(dev, pm_transition, false); in device_suspend_late()
1626 struct device *dev = to_device(dpm_suspended_list.prev); in dpm_suspend_late() local
1628 get_device(dev); in dpm_suspend_late()
1631 error = device_suspend_late(dev); in dpm_suspend_late()
1634 if (!list_empty(&dev->power.entry)) in dpm_suspend_late()
1635 list_move(&dev->power.entry, &dpm_late_early_list); in dpm_suspend_late()
1638 pm_dev_err(dev, state, " late", error); in dpm_suspend_late()
1639 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_late()
1640 put_device(dev); in dpm_suspend_late()
1643 put_device(dev); in dpm_suspend_late()
1689 static int legacy_suspend(struct device *dev, pm_message_t state, in legacy_suspend() argument
1690 int (*cb)(struct device *dev, pm_message_t state), in legacy_suspend() argument
1696 calltime = initcall_debug_start(dev, cb); in legacy_suspend()
1698 trace_device_pm_callback_start(dev, info, state.event); in legacy_suspend()
1699 error = cb(dev, state); in legacy_suspend()
1700 trace_device_pm_callback_end(dev, error); in legacy_suspend()
1703 initcall_debug_report(dev, calltime, cb, error); in legacy_suspend()
1708 static void dpm_clear_superiors_direct_complete(struct device *dev) in dpm_clear_superiors_direct_complete() argument
1713 if (dev->parent) { in dpm_clear_superiors_direct_complete()
1714 spin_lock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1715 dev->parent->power.direct_complete = false; in dpm_clear_superiors_direct_complete()
1716 spin_unlock_irq(&dev->parent->power.lock); in dpm_clear_superiors_direct_complete()
1721 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { in dpm_clear_superiors_direct_complete()
1736 static int __device_suspend(struct device *dev, pm_message_t state, bool async) in __device_suspend() argument
1743 TRACE_DEVICE(dev); in __device_suspend()
1746 dpm_wait_for_subordinate(dev, async); in __device_suspend()
1749 dev->power.direct_complete = false; in __device_suspend()
1764 pm_runtime_barrier(dev); in __device_suspend()
1767 dev->power.direct_complete = false; in __device_suspend()
1772 if (dev->power.syscore) in __device_suspend()
1776 if (device_may_wakeup(dev) || dev->power.wakeup_path) in __device_suspend()
1777 dev->power.direct_complete = false; in __device_suspend()
1779 if (dev->power.direct_complete) { in __device_suspend()
1780 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1781 pm_runtime_disable(dev); in __device_suspend()
1782 if (pm_runtime_status_suspended(dev)) in __device_suspend()
1785 pm_runtime_enable(dev); in __device_suspend()
1787 dev->power.direct_complete = false; in __device_suspend()
1790 dev->power.may_skip_resume = false; in __device_suspend()
1791 dev->power.must_resume = false; in __device_suspend()
1793 dpm_watchdog_set(&wd, dev); in __device_suspend()
1794 device_lock(dev); in __device_suspend()
1796 if (dev->pm_domain) { in __device_suspend()
1798 callback = pm_op(&dev->pm_domain->ops, state); in __device_suspend()
1802 if (dev->type && dev->type->pm) { in __device_suspend()
1804 callback = pm_op(dev->type->pm, state); in __device_suspend()
1808 if (dev->class && dev->class->pm) { in __device_suspend()
1810 callback = pm_op(dev->class->pm, state); in __device_suspend()
1814 if (dev->bus) { in __device_suspend()
1815 if (dev->bus->pm) { in __device_suspend()
1817 callback = pm_op(dev->bus->pm, state); in __device_suspend()
1818 } else if (dev->bus->suspend) { in __device_suspend()
1819 pm_dev_dbg(dev, state, "legacy bus "); in __device_suspend()
1820 error = legacy_suspend(dev, state, dev->bus->suspend, in __device_suspend()
1827 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend()
1829 callback = pm_op(dev->driver->pm, state); in __device_suspend()
1832 error = dpm_run_callback(callback, dev, state, info); in __device_suspend()
1836 dev->power.is_suspended = true; in __device_suspend()
1837 if (device_may_wakeup(dev)) in __device_suspend()
1838 dev->power.wakeup_path = true; in __device_suspend()
1840 dpm_propagate_wakeup_to_parent(dev); in __device_suspend()
1841 dpm_clear_superiors_direct_complete(dev); in __device_suspend()
1844 device_unlock(dev); in __device_suspend()
1851 complete_all(&dev->power.completion); in __device_suspend()
1858 struct device *dev = (struct device *)data; in async_suspend() local
1861 error = __device_suspend(dev, pm_transition, true); in async_suspend()
1863 dpm_save_failed_dev(dev_name(dev)); in async_suspend()
1864 pm_dev_err(dev, pm_transition, " async", error); in async_suspend()
1867 put_device(dev); in async_suspend()
1870 static int device_suspend(struct device *dev) in device_suspend() argument
1872 reinit_completion(&dev->power.completion); in device_suspend()
1874 if (is_async(dev)) { in device_suspend()
1875 get_device(dev); in device_suspend()
1876 async_schedule(async_suspend, dev); in device_suspend()
1880 return __device_suspend(dev, pm_transition, false); in device_suspend()
1901 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_suspend() local
1903 get_device(dev); in dpm_suspend()
1906 error = device_suspend(dev); in dpm_suspend()
1910 pm_dev_err(dev, state, "", error); in dpm_suspend()
1911 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend()
1912 put_device(dev); in dpm_suspend()
1915 if (!list_empty(&dev->power.entry)) in dpm_suspend()
1916 list_move(&dev->power.entry, &dpm_suspended_list); in dpm_suspend()
1917 put_device(dev); in dpm_suspend()
1942 static int device_prepare(struct device *dev, pm_message_t state) in device_prepare() argument
1947 if (dev->power.syscore) in device_prepare()
1950 WARN_ON(!pm_runtime_enabled(dev) && in device_prepare()
1951 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | in device_prepare()
1960 pm_runtime_get_noresume(dev); in device_prepare()
1962 device_lock(dev); in device_prepare()
1964 dev->power.wakeup_path = false; in device_prepare()
1966 if (dev->power.no_pm_callbacks) in device_prepare()
1969 if (dev->pm_domain) in device_prepare()
1970 callback = dev->pm_domain->ops.prepare; in device_prepare()
1971 else if (dev->type && dev->type->pm) in device_prepare()
1972 callback = dev->type->pm->prepare; in device_prepare()
1973 else if (dev->class && dev->class->pm) in device_prepare()
1974 callback = dev->class->pm->prepare; in device_prepare()
1975 else if (dev->bus && dev->bus->pm) in device_prepare()
1976 callback = dev->bus->pm->prepare; in device_prepare()
1978 if (!callback && dev->driver && dev->driver->pm) in device_prepare()
1979 callback = dev->driver->pm->prepare; in device_prepare()
1982 ret = callback(dev); in device_prepare()
1985 device_unlock(dev); in device_prepare()
1989 pm_runtime_put(dev); in device_prepare()
1999 spin_lock_irq(&dev->power.lock); in device_prepare()
2000 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && in device_prepare()
2001 ((pm_runtime_suspended(dev) && ret > 0) || in device_prepare()
2002 dev->power.no_pm_callbacks) && in device_prepare()
2003 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); in device_prepare()
2004 spin_unlock_irq(&dev->power.lock); in device_prepare()
2037 struct device *dev = to_device(dpm_list.next); in dpm_prepare() local
2039 get_device(dev); in dpm_prepare()
2042 trace_device_pm_callback_start(dev, "", state.event); in dpm_prepare()
2043 error = device_prepare(dev, state); in dpm_prepare()
2044 trace_device_pm_callback_end(dev, error); in dpm_prepare()
2049 put_device(dev); in dpm_prepare()
2055 dev_name(dev), error); in dpm_prepare()
2056 put_device(dev); in dpm_prepare()
2059 dev->power.is_prepared = true; in dpm_prepare()
2060 if (!list_empty(&dev->power.entry)) in dpm_prepare()
2061 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_prepare()
2062 put_device(dev); in dpm_prepare()
2102 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) in device_pm_wait_for_dev() argument
2104 dpm_wait(dev, subordinate->power.async_suspend); in device_pm_wait_for_dev()
2119 struct device *dev; in dpm_for_each_dev() local
2125 list_for_each_entry(dev, &dpm_list, power.entry) in dpm_for_each_dev()
2126 fn(dev, data); in dpm_for_each_dev()
2146 void device_pm_check_callbacks(struct device *dev) in device_pm_check_callbacks() argument
2150 spin_lock_irqsave(&dev->power.lock, flags); in device_pm_check_callbacks()
2151 dev->power.no_pm_callbacks = in device_pm_check_callbacks()
2152 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) && in device_pm_check_callbacks()
2153 !dev->bus->suspend && !dev->bus->resume)) && in device_pm_check_callbacks()
2154 (!dev->class || pm_ops_is_empty(dev->class->pm)) && in device_pm_check_callbacks()
2155 (!dev->type || pm_ops_is_empty(dev->type->pm)) && in device_pm_check_callbacks()
2156 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && in device_pm_check_callbacks()
2157 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) && in device_pm_check_callbacks()
2158 !dev->driver->suspend && !dev->driver->resume)); in device_pm_check_callbacks()
2159 spin_unlock_irqrestore(&dev->power.lock, flags); in device_pm_check_callbacks()
2162 bool dev_pm_smart_suspend_and_suspended(struct device *dev) in dev_pm_smart_suspend_and_suspended() argument
2164 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && in dev_pm_smart_suspend_and_suspended()
2165 pm_runtime_status_suspended(dev); in dev_pm_smart_suspend_and_suspended()