1 /*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36
37 #include "../base.h"
38 #include "power.h"
39
40 typedef int (*pm_callback_t)(struct device *);
41
42 /*
43 * The entries in the dpm_list list are in a depth first order, simply
44 * because children are guaranteed to be discovered after parents, and
45 * are inserted at the back of the list on discovery.
46 *
47 * Since device_pm_add() may be called with a device lock held,
48 * we must never try to acquire a device lock while holding
49 * dpm_list_mutex.
50 */
51
52 LIST_HEAD(dpm_list);
53 static LIST_HEAD(dpm_prepared_list);
54 static LIST_HEAD(dpm_suspended_list);
55 static LIST_HEAD(dpm_late_early_list);
56 static LIST_HEAD(dpm_noirq_list);
57
58 struct suspend_stats suspend_stats;
59 static DEFINE_MUTEX(dpm_list_mtx);
60 static pm_message_t pm_transition;
61
62 static int async_error;
63
pm_verb(int event)64 static const char *pm_verb(int event)
65 {
66 switch (event) {
67 case PM_EVENT_SUSPEND:
68 return "suspend";
69 case PM_EVENT_RESUME:
70 return "resume";
71 case PM_EVENT_FREEZE:
72 return "freeze";
73 case PM_EVENT_QUIESCE:
74 return "quiesce";
75 case PM_EVENT_HIBERNATE:
76 return "hibernate";
77 case PM_EVENT_THAW:
78 return "thaw";
79 case PM_EVENT_RESTORE:
80 return "restore";
81 case PM_EVENT_RECOVER:
82 return "recover";
83 default:
84 return "(unknown PM event)";
85 }
86 }
87
88 /**
89 * device_pm_sleep_init - Initialize system suspend-related device fields.
90 * @dev: Device object being initialized.
91 */
device_pm_sleep_init(struct device * dev)92 void device_pm_sleep_init(struct device *dev)
93 {
94 dev->power.is_prepared = false;
95 dev->power.is_suspended = false;
96 dev->power.is_noirq_suspended = false;
97 dev->power.is_late_suspended = false;
98 init_completion(&dev->power.completion);
99 complete_all(&dev->power.completion);
100 dev->power.wakeup = NULL;
101 INIT_LIST_HEAD(&dev->power.entry);
102 }
103
104 /**
105 * device_pm_lock - Lock the list of active devices used by the PM core.
106 */
device_pm_lock(void)107 void device_pm_lock(void)
108 {
109 mutex_lock(&dpm_list_mtx);
110 }
111
112 /**
113 * device_pm_unlock - Unlock the list of active devices used by the PM core.
114 */
device_pm_unlock(void)115 void device_pm_unlock(void)
116 {
117 mutex_unlock(&dpm_list_mtx);
118 }
119
120 /**
121 * device_pm_add - Add a device to the PM core's list of active devices.
122 * @dev: Device to add to the list.
123 */
device_pm_add(struct device * dev)124 void device_pm_add(struct device *dev)
125 {
126 pr_debug("PM: Adding info for %s:%s\n",
127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
128 device_pm_check_callbacks(dev);
129 mutex_lock(&dpm_list_mtx);
130 if (dev->parent && dev->parent->power.is_prepared)
131 dev_warn(dev, "parent %s should not be sleeping\n",
132 dev_name(dev->parent));
133 list_add_tail(&dev->power.entry, &dpm_list);
134 dev->power.in_dpm_list = true;
135 mutex_unlock(&dpm_list_mtx);
136 }
137
138 /**
139 * device_pm_remove - Remove a device from the PM core's list of active devices.
140 * @dev: Device to be removed from the list.
141 */
device_pm_remove(struct device * dev)142 void device_pm_remove(struct device *dev)
143 {
144 pr_debug("PM: Removing info for %s:%s\n",
145 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146 complete_all(&dev->power.completion);
147 mutex_lock(&dpm_list_mtx);
148 list_del_init(&dev->power.entry);
149 dev->power.in_dpm_list = false;
150 mutex_unlock(&dpm_list_mtx);
151 device_wakeup_disable(dev);
152 pm_runtime_remove(dev);
153 device_pm_check_callbacks(dev);
154 }
155
156 /**
157 * device_pm_move_before - Move device in the PM core's list of active devices.
158 * @deva: Device to move in dpm_list.
159 * @devb: Device @deva should come before.
160 */
device_pm_move_before(struct device * deva,struct device * devb)161 void device_pm_move_before(struct device *deva, struct device *devb)
162 {
163 pr_debug("PM: Moving %s:%s before %s:%s\n",
164 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
165 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
166 /* Delete deva from dpm_list and reinsert before devb. */
167 list_move_tail(&deva->power.entry, &devb->power.entry);
168 }
169
170 /**
171 * device_pm_move_after - Move device in the PM core's list of active devices.
172 * @deva: Device to move in dpm_list.
173 * @devb: Device @deva should come after.
174 */
device_pm_move_after(struct device * deva,struct device * devb)175 void device_pm_move_after(struct device *deva, struct device *devb)
176 {
177 pr_debug("PM: Moving %s:%s after %s:%s\n",
178 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
179 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
180 /* Delete deva from dpm_list and reinsert after devb. */
181 list_move(&deva->power.entry, &devb->power.entry);
182 }
183
184 /**
185 * device_pm_move_last - Move device to end of the PM core's list of devices.
186 * @dev: Device to move in dpm_list.
187 */
device_pm_move_last(struct device * dev)188 void device_pm_move_last(struct device *dev)
189 {
190 pr_debug("PM: Moving %s:%s to end of list\n",
191 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
192 list_move_tail(&dev->power.entry, &dpm_list);
193 }
194
initcall_debug_start(struct device * dev,void * cb)195 static ktime_t initcall_debug_start(struct device *dev, void *cb)
196 {
197 if (!pm_print_times_enabled)
198 return 0;
199
200 dev_info(dev, "calling %pF @ %i, parent: %s\n", cb,
201 task_pid_nr(current),
202 dev->parent ? dev_name(dev->parent) : "none");
203 return ktime_get();
204 }
205
initcall_debug_report(struct device * dev,ktime_t calltime,void * cb,int error)206 static void initcall_debug_report(struct device *dev, ktime_t calltime,
207 void *cb, int error)
208 {
209 ktime_t rettime;
210 s64 nsecs;
211
212 if (!pm_print_times_enabled)
213 return;
214
215 rettime = ktime_get();
216 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
217
218 dev_info(dev, "%pF returned %d after %Ld usecs\n", cb, error,
219 (unsigned long long)nsecs >> 10);
220 }
221
222 /**
223 * dpm_wait - Wait for a PM operation to complete.
224 * @dev: Device to wait for.
225 * @async: If unset, wait only if the device's power.async_suspend flag is set.
226 */
dpm_wait(struct device * dev,bool async)227 static void dpm_wait(struct device *dev, bool async)
228 {
229 if (!dev)
230 return;
231
232 if (async || (pm_async_enabled && dev->power.async_suspend))
233 wait_for_completion(&dev->power.completion);
234 }
235
dpm_wait_fn(struct device * dev,void * async_ptr)236 static int dpm_wait_fn(struct device *dev, void *async_ptr)
237 {
238 dpm_wait(dev, *((bool *)async_ptr));
239 return 0;
240 }
241
dpm_wait_for_children(struct device * dev,bool async)242 static void dpm_wait_for_children(struct device *dev, bool async)
243 {
244 device_for_each_child(dev, &async, dpm_wait_fn);
245 }
246
dpm_wait_for_suppliers(struct device * dev,bool async)247 static void dpm_wait_for_suppliers(struct device *dev, bool async)
248 {
249 struct device_link *link;
250 int idx;
251
252 idx = device_links_read_lock();
253
254 /*
255 * If the supplier goes away right after we've checked the link to it,
256 * we'll wait for its completion to change the state, but that's fine,
257 * because the only things that will block as a result are the SRCU
258 * callbacks freeing the link objects for the links in the list we're
259 * walking.
260 */
261 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
262 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
263 dpm_wait(link->supplier, async);
264
265 device_links_read_unlock(idx);
266 }
267
dpm_wait_for_superior(struct device * dev,bool async)268 static void dpm_wait_for_superior(struct device *dev, bool async)
269 {
270 dpm_wait(dev->parent, async);
271 dpm_wait_for_suppliers(dev, async);
272 }
273
dpm_wait_for_consumers(struct device * dev,bool async)274 static void dpm_wait_for_consumers(struct device *dev, bool async)
275 {
276 struct device_link *link;
277 int idx;
278
279 idx = device_links_read_lock();
280
281 /*
282 * The status of a device link can only be changed from "dormant" by a
283 * probe, but that cannot happen during system suspend/resume. In
284 * theory it can change to "dormant" at that time, but then it is
285 * reasonable to wait for the target device anyway (eg. if it goes
286 * away, it's better to wait for it to go away completely and then
287 * continue instead of trying to continue in parallel with its
288 * unregistration).
289 */
290 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
291 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
292 dpm_wait(link->consumer, async);
293
294 device_links_read_unlock(idx);
295 }
296
dpm_wait_for_subordinate(struct device * dev,bool async)297 static void dpm_wait_for_subordinate(struct device *dev, bool async)
298 {
299 dpm_wait_for_children(dev, async);
300 dpm_wait_for_consumers(dev, async);
301 }
302
303 /**
304 * pm_op - Return the PM operation appropriate for given PM event.
305 * @ops: PM operations to choose from.
306 * @state: PM transition of the system being carried out.
307 */
pm_op(const struct dev_pm_ops * ops,pm_message_t state)308 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
309 {
310 switch (state.event) {
311 #ifdef CONFIG_SUSPEND
312 case PM_EVENT_SUSPEND:
313 return ops->suspend;
314 case PM_EVENT_RESUME:
315 return ops->resume;
316 #endif /* CONFIG_SUSPEND */
317 #ifdef CONFIG_HIBERNATE_CALLBACKS
318 case PM_EVENT_FREEZE:
319 case PM_EVENT_QUIESCE:
320 return ops->freeze;
321 case PM_EVENT_HIBERNATE:
322 return ops->poweroff;
323 case PM_EVENT_THAW:
324 case PM_EVENT_RECOVER:
325 return ops->thaw;
326 break;
327 case PM_EVENT_RESTORE:
328 return ops->restore;
329 #endif /* CONFIG_HIBERNATE_CALLBACKS */
330 }
331
332 return NULL;
333 }
334
335 /**
336 * pm_late_early_op - Return the PM operation appropriate for given PM event.
337 * @ops: PM operations to choose from.
338 * @state: PM transition of the system being carried out.
339 *
340 * Runtime PM is disabled for @dev while this function is being executed.
341 */
pm_late_early_op(const struct dev_pm_ops * ops,pm_message_t state)342 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
343 pm_message_t state)
344 {
345 switch (state.event) {
346 #ifdef CONFIG_SUSPEND
347 case PM_EVENT_SUSPEND:
348 return ops->suspend_late;
349 case PM_EVENT_RESUME:
350 return ops->resume_early;
351 #endif /* CONFIG_SUSPEND */
352 #ifdef CONFIG_HIBERNATE_CALLBACKS
353 case PM_EVENT_FREEZE:
354 case PM_EVENT_QUIESCE:
355 return ops->freeze_late;
356 case PM_EVENT_HIBERNATE:
357 return ops->poweroff_late;
358 case PM_EVENT_THAW:
359 case PM_EVENT_RECOVER:
360 return ops->thaw_early;
361 case PM_EVENT_RESTORE:
362 return ops->restore_early;
363 #endif /* CONFIG_HIBERNATE_CALLBACKS */
364 }
365
366 return NULL;
367 }
368
369 /**
370 * pm_noirq_op - Return the PM operation appropriate for given PM event.
371 * @ops: PM operations to choose from.
372 * @state: PM transition of the system being carried out.
373 *
374 * The driver of @dev will not receive interrupts while this function is being
375 * executed.
376 */
pm_noirq_op(const struct dev_pm_ops * ops,pm_message_t state)377 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
378 {
379 switch (state.event) {
380 #ifdef CONFIG_SUSPEND
381 case PM_EVENT_SUSPEND:
382 return ops->suspend_noirq;
383 case PM_EVENT_RESUME:
384 return ops->resume_noirq;
385 #endif /* CONFIG_SUSPEND */
386 #ifdef CONFIG_HIBERNATE_CALLBACKS
387 case PM_EVENT_FREEZE:
388 case PM_EVENT_QUIESCE:
389 return ops->freeze_noirq;
390 case PM_EVENT_HIBERNATE:
391 return ops->poweroff_noirq;
392 case PM_EVENT_THAW:
393 case PM_EVENT_RECOVER:
394 return ops->thaw_noirq;
395 case PM_EVENT_RESTORE:
396 return ops->restore_noirq;
397 #endif /* CONFIG_HIBERNATE_CALLBACKS */
398 }
399
400 return NULL;
401 }
402
pm_dev_dbg(struct device * dev,pm_message_t state,const char * info)403 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
404 {
405 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
406 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
407 ", may wakeup" : "");
408 }
409
pm_dev_err(struct device * dev,pm_message_t state,const char * info,int error)410 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
411 int error)
412 {
413 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
414 dev_name(dev), pm_verb(state.event), info, error);
415 }
416
dpm_show_time(ktime_t starttime,pm_message_t state,int error,const char * info)417 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
418 const char *info)
419 {
420 ktime_t calltime;
421 u64 usecs64;
422 int usecs;
423
424 calltime = ktime_get();
425 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
426 do_div(usecs64, NSEC_PER_USEC);
427 usecs = usecs64;
428 if (usecs == 0)
429 usecs = 1;
430
431 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
432 info ?: "", info ? " " : "", pm_verb(state.event),
433 error ? "aborted" : "complete",
434 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
435 }
436
dpm_run_callback(pm_callback_t cb,struct device * dev,pm_message_t state,const char * info)437 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
438 pm_message_t state, const char *info)
439 {
440 ktime_t calltime;
441 int error;
442
443 if (!cb)
444 return 0;
445
446 calltime = initcall_debug_start(dev, cb);
447
448 pm_dev_dbg(dev, state, info);
449 trace_device_pm_callback_start(dev, info, state.event);
450 error = cb(dev);
451 trace_device_pm_callback_end(dev, error);
452 suspend_report_result(cb, error);
453
454 initcall_debug_report(dev, calltime, cb, error);
455
456 return error;
457 }
458
459 #ifdef CONFIG_DPM_WATCHDOG
460 struct dpm_watchdog {
461 struct device *dev;
462 struct task_struct *tsk;
463 struct timer_list timer;
464 };
465
466 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
467 struct dpm_watchdog wd
468
469 /**
470 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
471 * @data: Watchdog object address.
472 *
473 * Called when a driver has timed out suspending or resuming.
474 * There's not much we can do here to recover so panic() to
475 * capture a crash-dump in pstore.
476 */
dpm_watchdog_handler(struct timer_list * t)477 static void dpm_watchdog_handler(struct timer_list *t)
478 {
479 struct dpm_watchdog *wd = from_timer(wd, t, timer);
480
481 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
482 show_stack(wd->tsk, NULL);
483 panic("%s %s: unrecoverable failure\n",
484 dev_driver_string(wd->dev), dev_name(wd->dev));
485 }
486
487 /**
488 * dpm_watchdog_set - Enable pm watchdog for given device.
489 * @wd: Watchdog. Must be allocated on the stack.
490 * @dev: Device to handle.
491 */
dpm_watchdog_set(struct dpm_watchdog * wd,struct device * dev)492 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
493 {
494 struct timer_list *timer = &wd->timer;
495
496 wd->dev = dev;
497 wd->tsk = current;
498
499 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
500 /* use same timeout value for both suspend and resume */
501 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
502 add_timer(timer);
503 }
504
505 /**
506 * dpm_watchdog_clear - Disable suspend/resume watchdog.
507 * @wd: Watchdog to disable.
508 */
dpm_watchdog_clear(struct dpm_watchdog * wd)509 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
510 {
511 struct timer_list *timer = &wd->timer;
512
513 del_timer_sync(timer);
514 destroy_timer_on_stack(timer);
515 }
516 #else
517 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
518 #define dpm_watchdog_set(x, y)
519 #define dpm_watchdog_clear(x)
520 #endif
521
522 /*------------------------- Resume routines -------------------------*/
523
524 /**
525 * dev_pm_skip_next_resume_phases - Skip next system resume phases for device.
526 * @dev: Target device.
527 *
528 * Make the core skip the "early resume" and "resume" phases for @dev.
529 *
530 * This function can be called by middle-layer code during the "noirq" phase of
531 * system resume if necessary, but not by device drivers.
532 */
dev_pm_skip_next_resume_phases(struct device * dev)533 void dev_pm_skip_next_resume_phases(struct device *dev)
534 {
535 dev->power.is_late_suspended = false;
536 dev->power.is_suspended = false;
537 }
538
539 /**
540 * suspend_event - Return a "suspend" message for given "resume" one.
541 * @resume_msg: PM message representing a system-wide resume transition.
542 */
suspend_event(pm_message_t resume_msg)543 static pm_message_t suspend_event(pm_message_t resume_msg)
544 {
545 switch (resume_msg.event) {
546 case PM_EVENT_RESUME:
547 return PMSG_SUSPEND;
548 case PM_EVENT_THAW:
549 case PM_EVENT_RESTORE:
550 return PMSG_FREEZE;
551 case PM_EVENT_RECOVER:
552 return PMSG_HIBERNATE;
553 }
554 return PMSG_ON;
555 }
556
557 /**
558 * dev_pm_may_skip_resume - System-wide device resume optimization check.
559 * @dev: Target device.
560 *
561 * Checks whether or not the device may be left in suspend after a system-wide
562 * transition to the working state.
563 */
dev_pm_may_skip_resume(struct device * dev)564 bool dev_pm_may_skip_resume(struct device *dev)
565 {
566 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
567 }
568
dpm_subsys_resume_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)569 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
570 pm_message_t state,
571 const char **info_p)
572 {
573 pm_callback_t callback;
574 const char *info;
575
576 if (dev->pm_domain) {
577 info = "noirq power domain ";
578 callback = pm_noirq_op(&dev->pm_domain->ops, state);
579 } else if (dev->type && dev->type->pm) {
580 info = "noirq type ";
581 callback = pm_noirq_op(dev->type->pm, state);
582 } else if (dev->class && dev->class->pm) {
583 info = "noirq class ";
584 callback = pm_noirq_op(dev->class->pm, state);
585 } else if (dev->bus && dev->bus->pm) {
586 info = "noirq bus ";
587 callback = pm_noirq_op(dev->bus->pm, state);
588 } else {
589 return NULL;
590 }
591
592 if (info_p)
593 *info_p = info;
594
595 return callback;
596 }
597
598 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
599 pm_message_t state,
600 const char **info_p);
601
602 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
603 pm_message_t state,
604 const char **info_p);
605
606 /**
607 * device_resume_noirq - Execute a "noirq resume" callback for given device.
608 * @dev: Device to handle.
609 * @state: PM transition of the system being carried out.
610 * @async: If true, the device is being resumed asynchronously.
611 *
612 * The driver of @dev will not receive interrupts while this function is being
613 * executed.
614 */
device_resume_noirq(struct device * dev,pm_message_t state,bool async)615 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
616 {
617 pm_callback_t callback;
618 const char *info;
619 bool skip_resume;
620 int error = 0;
621
622 TRACE_DEVICE(dev);
623 TRACE_RESUME(0);
624
625 if (dev->power.syscore || dev->power.direct_complete)
626 goto Out;
627
628 if (!dev->power.is_noirq_suspended)
629 goto Out;
630
631 dpm_wait_for_superior(dev, async);
632
633 skip_resume = dev_pm_may_skip_resume(dev);
634
635 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
636 if (callback)
637 goto Run;
638
639 if (skip_resume)
640 goto Skip;
641
642 if (dev_pm_smart_suspend_and_suspended(dev)) {
643 pm_message_t suspend_msg = suspend_event(state);
644
645 /*
646 * If "freeze" callbacks have been skipped during a transition
647 * related to hibernation, the subsequent "thaw" callbacks must
648 * be skipped too or bad things may happen. Otherwise, resume
649 * callbacks are going to be run for the device, so its runtime
650 * PM status must be changed to reflect the new state after the
651 * transition under way.
652 */
653 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
654 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
655 if (state.event == PM_EVENT_THAW) {
656 skip_resume = true;
657 goto Skip;
658 } else {
659 pm_runtime_set_active(dev);
660 }
661 }
662 }
663
664 if (dev->driver && dev->driver->pm) {
665 info = "noirq driver ";
666 callback = pm_noirq_op(dev->driver->pm, state);
667 }
668
669 Run:
670 error = dpm_run_callback(callback, dev, state, info);
671
672 Skip:
673 dev->power.is_noirq_suspended = false;
674
675 if (skip_resume) {
676 /*
677 * The device is going to be left in suspend, but it might not
678 * have been in runtime suspend before the system suspended, so
679 * its runtime PM status needs to be updated to avoid confusing
680 * the runtime PM framework when runtime PM is enabled for the
681 * device again.
682 */
683 pm_runtime_set_suspended(dev);
684 dev_pm_skip_next_resume_phases(dev);
685 }
686
687 Out:
688 complete_all(&dev->power.completion);
689 TRACE_RESUME(error);
690 return error;
691 }
692
is_async(struct device * dev)693 static bool is_async(struct device *dev)
694 {
695 return dev->power.async_suspend && pm_async_enabled
696 && !pm_trace_is_enabled();
697 }
698
async_resume_noirq(void * data,async_cookie_t cookie)699 static void async_resume_noirq(void *data, async_cookie_t cookie)
700 {
701 struct device *dev = (struct device *)data;
702 int error;
703
704 error = device_resume_noirq(dev, pm_transition, true);
705 if (error)
706 pm_dev_err(dev, pm_transition, " async", error);
707
708 put_device(dev);
709 }
710
dpm_noirq_resume_devices(pm_message_t state)711 void dpm_noirq_resume_devices(pm_message_t state)
712 {
713 struct device *dev;
714 ktime_t starttime = ktime_get();
715
716 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
717 mutex_lock(&dpm_list_mtx);
718 pm_transition = state;
719
720 /*
721 * Advanced the async threads upfront,
722 * in case the starting of async threads is
723 * delayed by non-async resuming devices.
724 */
725 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
726 reinit_completion(&dev->power.completion);
727 if (is_async(dev)) {
728 get_device(dev);
729 async_schedule(async_resume_noirq, dev);
730 }
731 }
732
733 while (!list_empty(&dpm_noirq_list)) {
734 dev = to_device(dpm_noirq_list.next);
735 get_device(dev);
736 list_move_tail(&dev->power.entry, &dpm_late_early_list);
737 mutex_unlock(&dpm_list_mtx);
738
739 if (!is_async(dev)) {
740 int error;
741
742 error = device_resume_noirq(dev, state, false);
743 if (error) {
744 suspend_stats.failed_resume_noirq++;
745 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
746 dpm_save_failed_dev(dev_name(dev));
747 pm_dev_err(dev, state, " noirq", error);
748 }
749 }
750
751 mutex_lock(&dpm_list_mtx);
752 put_device(dev);
753 }
754 mutex_unlock(&dpm_list_mtx);
755 async_synchronize_full();
756 dpm_show_time(starttime, state, 0, "noirq");
757 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
758 }
759
dpm_noirq_end(void)760 void dpm_noirq_end(void)
761 {
762 resume_device_irqs();
763 device_wakeup_disarm_wake_irqs();
764 cpuidle_resume();
765 }
766
767 /**
768 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
769 * @state: PM transition of the system being carried out.
770 *
771 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
772 * allow device drivers' interrupt handlers to be called.
773 */
dpm_resume_noirq(pm_message_t state)774 void dpm_resume_noirq(pm_message_t state)
775 {
776 dpm_noirq_resume_devices(state);
777 dpm_noirq_end();
778 }
779
dpm_subsys_resume_early_cb(struct device * dev,pm_message_t state,const char ** info_p)780 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
781 pm_message_t state,
782 const char **info_p)
783 {
784 pm_callback_t callback;
785 const char *info;
786
787 if (dev->pm_domain) {
788 info = "early power domain ";
789 callback = pm_late_early_op(&dev->pm_domain->ops, state);
790 } else if (dev->type && dev->type->pm) {
791 info = "early type ";
792 callback = pm_late_early_op(dev->type->pm, state);
793 } else if (dev->class && dev->class->pm) {
794 info = "early class ";
795 callback = pm_late_early_op(dev->class->pm, state);
796 } else if (dev->bus && dev->bus->pm) {
797 info = "early bus ";
798 callback = pm_late_early_op(dev->bus->pm, state);
799 } else {
800 return NULL;
801 }
802
803 if (info_p)
804 *info_p = info;
805
806 return callback;
807 }
808
809 /**
810 * device_resume_early - Execute an "early resume" callback for given device.
811 * @dev: Device to handle.
812 * @state: PM transition of the system being carried out.
813 * @async: If true, the device is being resumed asynchronously.
814 *
815 * Runtime PM is disabled for @dev while this function is being executed.
816 */
device_resume_early(struct device * dev,pm_message_t state,bool async)817 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
818 {
819 pm_callback_t callback;
820 const char *info;
821 int error = 0;
822
823 TRACE_DEVICE(dev);
824 TRACE_RESUME(0);
825
826 if (dev->power.syscore || dev->power.direct_complete)
827 goto Out;
828
829 if (!dev->power.is_late_suspended)
830 goto Out;
831
832 dpm_wait_for_superior(dev, async);
833
834 callback = dpm_subsys_resume_early_cb(dev, state, &info);
835
836 if (!callback && dev->driver && dev->driver->pm) {
837 info = "early driver ";
838 callback = pm_late_early_op(dev->driver->pm, state);
839 }
840
841 error = dpm_run_callback(callback, dev, state, info);
842 dev->power.is_late_suspended = false;
843
844 Out:
845 TRACE_RESUME(error);
846
847 pm_runtime_enable(dev);
848 complete_all(&dev->power.completion);
849 return error;
850 }
851
async_resume_early(void * data,async_cookie_t cookie)852 static void async_resume_early(void *data, async_cookie_t cookie)
853 {
854 struct device *dev = (struct device *)data;
855 int error;
856
857 error = device_resume_early(dev, pm_transition, true);
858 if (error)
859 pm_dev_err(dev, pm_transition, " async", error);
860
861 put_device(dev);
862 }
863
864 /**
865 * dpm_resume_early - Execute "early resume" callbacks for all devices.
866 * @state: PM transition of the system being carried out.
867 */
dpm_resume_early(pm_message_t state)868 void dpm_resume_early(pm_message_t state)
869 {
870 struct device *dev;
871 ktime_t starttime = ktime_get();
872
873 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
874 mutex_lock(&dpm_list_mtx);
875 pm_transition = state;
876
877 /*
878 * Advanced the async threads upfront,
879 * in case the starting of async threads is
880 * delayed by non-async resuming devices.
881 */
882 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
883 reinit_completion(&dev->power.completion);
884 if (is_async(dev)) {
885 get_device(dev);
886 async_schedule(async_resume_early, dev);
887 }
888 }
889
890 while (!list_empty(&dpm_late_early_list)) {
891 dev = to_device(dpm_late_early_list.next);
892 get_device(dev);
893 list_move_tail(&dev->power.entry, &dpm_suspended_list);
894 mutex_unlock(&dpm_list_mtx);
895
896 if (!is_async(dev)) {
897 int error;
898
899 error = device_resume_early(dev, state, false);
900 if (error) {
901 suspend_stats.failed_resume_early++;
902 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
903 dpm_save_failed_dev(dev_name(dev));
904 pm_dev_err(dev, state, " early", error);
905 }
906 }
907 mutex_lock(&dpm_list_mtx);
908 put_device(dev);
909 }
910 mutex_unlock(&dpm_list_mtx);
911 async_synchronize_full();
912 dpm_show_time(starttime, state, 0, "early");
913 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
914 }
915
916 /**
917 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
918 * @state: PM transition of the system being carried out.
919 */
dpm_resume_start(pm_message_t state)920 void dpm_resume_start(pm_message_t state)
921 {
922 dpm_resume_noirq(state);
923 dpm_resume_early(state);
924 }
925 EXPORT_SYMBOL_GPL(dpm_resume_start);
926
927 /**
928 * device_resume - Execute "resume" callbacks for given device.
929 * @dev: Device to handle.
930 * @state: PM transition of the system being carried out.
931 * @async: If true, the device is being resumed asynchronously.
932 */
device_resume(struct device * dev,pm_message_t state,bool async)933 static int device_resume(struct device *dev, pm_message_t state, bool async)
934 {
935 pm_callback_t callback = NULL;
936 const char *info = NULL;
937 int error = 0;
938 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
939
940 TRACE_DEVICE(dev);
941 TRACE_RESUME(0);
942
943 if (dev->power.syscore)
944 goto Complete;
945
946 if (dev->power.direct_complete) {
947 /* Match the pm_runtime_disable() in __device_suspend(). */
948 pm_runtime_enable(dev);
949 goto Complete;
950 }
951
952 dpm_wait_for_superior(dev, async);
953 dpm_watchdog_set(&wd, dev);
954 device_lock(dev);
955
956 /*
957 * This is a fib. But we'll allow new children to be added below
958 * a resumed device, even if the device hasn't been completed yet.
959 */
960 dev->power.is_prepared = false;
961
962 if (!dev->power.is_suspended)
963 goto Unlock;
964
965 if (dev->pm_domain) {
966 info = "power domain ";
967 callback = pm_op(&dev->pm_domain->ops, state);
968 goto Driver;
969 }
970
971 if (dev->type && dev->type->pm) {
972 info = "type ";
973 callback = pm_op(dev->type->pm, state);
974 goto Driver;
975 }
976
977 if (dev->class && dev->class->pm) {
978 info = "class ";
979 callback = pm_op(dev->class->pm, state);
980 goto Driver;
981 }
982
983 if (dev->bus) {
984 if (dev->bus->pm) {
985 info = "bus ";
986 callback = pm_op(dev->bus->pm, state);
987 } else if (dev->bus->resume) {
988 info = "legacy bus ";
989 callback = dev->bus->resume;
990 goto End;
991 }
992 }
993
994 Driver:
995 if (!callback && dev->driver && dev->driver->pm) {
996 info = "driver ";
997 callback = pm_op(dev->driver->pm, state);
998 }
999
1000 End:
1001 error = dpm_run_callback(callback, dev, state, info);
1002 dev->power.is_suspended = false;
1003
1004 Unlock:
1005 device_unlock(dev);
1006 dpm_watchdog_clear(&wd);
1007
1008 Complete:
1009 complete_all(&dev->power.completion);
1010
1011 TRACE_RESUME(error);
1012
1013 return error;
1014 }
1015
async_resume(void * data,async_cookie_t cookie)1016 static void async_resume(void *data, async_cookie_t cookie)
1017 {
1018 struct device *dev = (struct device *)data;
1019 int error;
1020
1021 error = device_resume(dev, pm_transition, true);
1022 if (error)
1023 pm_dev_err(dev, pm_transition, " async", error);
1024 put_device(dev);
1025 }
1026
1027 /**
1028 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
1029 * @state: PM transition of the system being carried out.
1030 *
1031 * Execute the appropriate "resume" callback for all devices whose status
1032 * indicates that they are suspended.
1033 */
dpm_resume(pm_message_t state)1034 void dpm_resume(pm_message_t state)
1035 {
1036 struct device *dev;
1037 ktime_t starttime = ktime_get();
1038
1039 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1040 might_sleep();
1041
1042 mutex_lock(&dpm_list_mtx);
1043 pm_transition = state;
1044 async_error = 0;
1045
1046 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
1047 reinit_completion(&dev->power.completion);
1048 if (is_async(dev)) {
1049 get_device(dev);
1050 async_schedule(async_resume, dev);
1051 }
1052 }
1053
1054 while (!list_empty(&dpm_suspended_list)) {
1055 dev = to_device(dpm_suspended_list.next);
1056 get_device(dev);
1057 if (!is_async(dev)) {
1058 int error;
1059
1060 mutex_unlock(&dpm_list_mtx);
1061
1062 error = device_resume(dev, state, false);
1063 if (error) {
1064 suspend_stats.failed_resume++;
1065 dpm_save_failed_step(SUSPEND_RESUME);
1066 dpm_save_failed_dev(dev_name(dev));
1067 pm_dev_err(dev, state, "", error);
1068 }
1069
1070 mutex_lock(&dpm_list_mtx);
1071 }
1072 if (!list_empty(&dev->power.entry))
1073 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1074 put_device(dev);
1075 }
1076 mutex_unlock(&dpm_list_mtx);
1077 async_synchronize_full();
1078 dpm_show_time(starttime, state, 0, NULL);
1079
1080 cpufreq_resume();
1081 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1082 }
1083
1084 /**
1085 * device_complete - Complete a PM transition for given device.
1086 * @dev: Device to handle.
1087 * @state: PM transition of the system being carried out.
1088 */
device_complete(struct device * dev,pm_message_t state)1089 static void device_complete(struct device *dev, pm_message_t state)
1090 {
1091 void (*callback)(struct device *) = NULL;
1092 const char *info = NULL;
1093
1094 if (dev->power.syscore)
1095 return;
1096
1097 device_lock(dev);
1098
1099 if (dev->pm_domain) {
1100 info = "completing power domain ";
1101 callback = dev->pm_domain->ops.complete;
1102 } else if (dev->type && dev->type->pm) {
1103 info = "completing type ";
1104 callback = dev->type->pm->complete;
1105 } else if (dev->class && dev->class->pm) {
1106 info = "completing class ";
1107 callback = dev->class->pm->complete;
1108 } else if (dev->bus && dev->bus->pm) {
1109 info = "completing bus ";
1110 callback = dev->bus->pm->complete;
1111 }
1112
1113 if (!callback && dev->driver && dev->driver->pm) {
1114 info = "completing driver ";
1115 callback = dev->driver->pm->complete;
1116 }
1117
1118 if (callback) {
1119 pm_dev_dbg(dev, state, info);
1120 callback(dev);
1121 }
1122
1123 device_unlock(dev);
1124
1125 pm_runtime_put(dev);
1126 }
1127
1128 /**
1129 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1130 * @state: PM transition of the system being carried out.
1131 *
1132 * Execute the ->complete() callbacks for all devices whose PM status is not
1133 * DPM_ON (this allows new devices to be registered).
1134 */
dpm_complete(pm_message_t state)1135 void dpm_complete(pm_message_t state)
1136 {
1137 struct list_head list;
1138
1139 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1140 might_sleep();
1141
1142 INIT_LIST_HEAD(&list);
1143 mutex_lock(&dpm_list_mtx);
1144 while (!list_empty(&dpm_prepared_list)) {
1145 struct device *dev = to_device(dpm_prepared_list.prev);
1146
1147 get_device(dev);
1148 dev->power.is_prepared = false;
1149 list_move(&dev->power.entry, &list);
1150 mutex_unlock(&dpm_list_mtx);
1151
1152 trace_device_pm_callback_start(dev, "", state.event);
1153 device_complete(dev, state);
1154 trace_device_pm_callback_end(dev, 0);
1155
1156 mutex_lock(&dpm_list_mtx);
1157 put_device(dev);
1158 }
1159 list_splice(&list, &dpm_list);
1160 mutex_unlock(&dpm_list_mtx);
1161
1162 /* Allow device probing and trigger re-probing of deferred devices */
1163 device_unblock_probing();
1164 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1165 }
1166
1167 /**
1168 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1169 * @state: PM transition of the system being carried out.
1170 *
1171 * Execute "resume" callbacks for all devices and complete the PM transition of
1172 * the system.
1173 */
dpm_resume_end(pm_message_t state)1174 void dpm_resume_end(pm_message_t state)
1175 {
1176 dpm_resume(state);
1177 dpm_complete(state);
1178 }
1179 EXPORT_SYMBOL_GPL(dpm_resume_end);
1180
1181
1182 /*------------------------- Suspend routines -------------------------*/
1183
1184 /**
1185 * resume_event - Return a "resume" message for given "suspend" sleep state.
1186 * @sleep_state: PM message representing a sleep state.
1187 *
1188 * Return a PM message representing the resume event corresponding to given
1189 * sleep state.
1190 */
resume_event(pm_message_t sleep_state)1191 static pm_message_t resume_event(pm_message_t sleep_state)
1192 {
1193 switch (sleep_state.event) {
1194 case PM_EVENT_SUSPEND:
1195 return PMSG_RESUME;
1196 case PM_EVENT_FREEZE:
1197 case PM_EVENT_QUIESCE:
1198 return PMSG_RECOVER;
1199 case PM_EVENT_HIBERNATE:
1200 return PMSG_RESTORE;
1201 }
1202 return PMSG_ON;
1203 }
1204
dpm_superior_set_must_resume(struct device * dev)1205 static void dpm_superior_set_must_resume(struct device *dev)
1206 {
1207 struct device_link *link;
1208 int idx;
1209
1210 if (dev->parent)
1211 dev->parent->power.must_resume = true;
1212
1213 idx = device_links_read_lock();
1214
1215 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1216 link->supplier->power.must_resume = true;
1217
1218 device_links_read_unlock(idx);
1219 }
1220
dpm_subsys_suspend_noirq_cb(struct device * dev,pm_message_t state,const char ** info_p)1221 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1222 pm_message_t state,
1223 const char **info_p)
1224 {
1225 pm_callback_t callback;
1226 const char *info;
1227
1228 if (dev->pm_domain) {
1229 info = "noirq power domain ";
1230 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1231 } else if (dev->type && dev->type->pm) {
1232 info = "noirq type ";
1233 callback = pm_noirq_op(dev->type->pm, state);
1234 } else if (dev->class && dev->class->pm) {
1235 info = "noirq class ";
1236 callback = pm_noirq_op(dev->class->pm, state);
1237 } else if (dev->bus && dev->bus->pm) {
1238 info = "noirq bus ";
1239 callback = pm_noirq_op(dev->bus->pm, state);
1240 } else {
1241 return NULL;
1242 }
1243
1244 if (info_p)
1245 *info_p = info;
1246
1247 return callback;
1248 }
1249
device_must_resume(struct device * dev,pm_message_t state,bool no_subsys_suspend_noirq)1250 static bool device_must_resume(struct device *dev, pm_message_t state,
1251 bool no_subsys_suspend_noirq)
1252 {
1253 pm_message_t resume_msg = resume_event(state);
1254
1255 /*
1256 * If all of the device driver's "noirq", "late" and "early" callbacks
1257 * are invoked directly by the core, the decision to allow the device to
1258 * stay in suspend can be based on its current runtime PM status and its
1259 * wakeup settings.
1260 */
1261 if (no_subsys_suspend_noirq &&
1262 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1263 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1264 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1265 return !pm_runtime_status_suspended(dev) &&
1266 (resume_msg.event != PM_EVENT_RESUME ||
1267 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1268
1269 /*
1270 * The only safe strategy here is to require that if the device may not
1271 * be left in suspend, resume callbacks must be invoked for it.
1272 */
1273 return !dev->power.may_skip_resume;
1274 }
1275
1276 /**
1277 * __device_suspend_noirq - Execute a "noirq suspend" callback for given device.
1278 * @dev: Device to handle.
1279 * @state: PM transition of the system being carried out.
1280 * @async: If true, the device is being suspended asynchronously.
1281 *
1282 * The driver of @dev will not receive interrupts while this function is being
1283 * executed.
1284 */
__device_suspend_noirq(struct device * dev,pm_message_t state,bool async)1285 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1286 {
1287 pm_callback_t callback;
1288 const char *info;
1289 bool no_subsys_cb = false;
1290 int error = 0;
1291
1292 TRACE_DEVICE(dev);
1293 TRACE_SUSPEND(0);
1294
1295 dpm_wait_for_subordinate(dev, async);
1296
1297 if (async_error)
1298 goto Complete;
1299
1300 if (pm_wakeup_pending()) {
1301 async_error = -EBUSY;
1302 goto Complete;
1303 }
1304
1305 if (dev->power.syscore || dev->power.direct_complete)
1306 goto Complete;
1307
1308 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1309 if (callback)
1310 goto Run;
1311
1312 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1313
1314 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1315 goto Skip;
1316
1317 if (dev->driver && dev->driver->pm) {
1318 info = "noirq driver ";
1319 callback = pm_noirq_op(dev->driver->pm, state);
1320 }
1321
1322 Run:
1323 error = dpm_run_callback(callback, dev, state, info);
1324 if (error) {
1325 async_error = error;
1326 goto Complete;
1327 }
1328
1329 Skip:
1330 dev->power.is_noirq_suspended = true;
1331
1332 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1333 dev->power.must_resume = dev->power.must_resume ||
1334 atomic_read(&dev->power.usage_count) > 1 ||
1335 device_must_resume(dev, state, no_subsys_cb);
1336 } else {
1337 dev->power.must_resume = true;
1338 }
1339
1340 if (dev->power.must_resume)
1341 dpm_superior_set_must_resume(dev);
1342
1343 Complete:
1344 complete_all(&dev->power.completion);
1345 TRACE_SUSPEND(error);
1346 return error;
1347 }
1348
async_suspend_noirq(void * data,async_cookie_t cookie)1349 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1350 {
1351 struct device *dev = (struct device *)data;
1352 int error;
1353
1354 error = __device_suspend_noirq(dev, pm_transition, true);
1355 if (error) {
1356 dpm_save_failed_dev(dev_name(dev));
1357 pm_dev_err(dev, pm_transition, " async", error);
1358 }
1359
1360 put_device(dev);
1361 }
1362
device_suspend_noirq(struct device * dev)1363 static int device_suspend_noirq(struct device *dev)
1364 {
1365 reinit_completion(&dev->power.completion);
1366
1367 if (is_async(dev)) {
1368 get_device(dev);
1369 async_schedule(async_suspend_noirq, dev);
1370 return 0;
1371 }
1372 return __device_suspend_noirq(dev, pm_transition, false);
1373 }
1374
dpm_noirq_begin(void)1375 void dpm_noirq_begin(void)
1376 {
1377 cpuidle_pause();
1378 device_wakeup_arm_wake_irqs();
1379 suspend_device_irqs();
1380 }
1381
dpm_noirq_suspend_devices(pm_message_t state)1382 int dpm_noirq_suspend_devices(pm_message_t state)
1383 {
1384 ktime_t starttime = ktime_get();
1385 int error = 0;
1386
1387 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1388 mutex_lock(&dpm_list_mtx);
1389 pm_transition = state;
1390 async_error = 0;
1391
1392 while (!list_empty(&dpm_late_early_list)) {
1393 struct device *dev = to_device(dpm_late_early_list.prev);
1394
1395 get_device(dev);
1396 mutex_unlock(&dpm_list_mtx);
1397
1398 error = device_suspend_noirq(dev);
1399
1400 mutex_lock(&dpm_list_mtx);
1401 if (error) {
1402 pm_dev_err(dev, state, " noirq", error);
1403 dpm_save_failed_dev(dev_name(dev));
1404 put_device(dev);
1405 break;
1406 }
1407 if (!list_empty(&dev->power.entry))
1408 list_move(&dev->power.entry, &dpm_noirq_list);
1409 put_device(dev);
1410
1411 if (async_error)
1412 break;
1413 }
1414 mutex_unlock(&dpm_list_mtx);
1415 async_synchronize_full();
1416 if (!error)
1417 error = async_error;
1418
1419 if (error) {
1420 suspend_stats.failed_suspend_noirq++;
1421 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1422 }
1423 dpm_show_time(starttime, state, error, "noirq");
1424 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1425 return error;
1426 }
1427
1428 /**
1429 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1430 * @state: PM transition of the system being carried out.
1431 *
1432 * Prevent device drivers' interrupt handlers from being called and invoke
1433 * "noirq" suspend callbacks for all non-sysdev devices.
1434 */
dpm_suspend_noirq(pm_message_t state)1435 int dpm_suspend_noirq(pm_message_t state)
1436 {
1437 int ret;
1438
1439 dpm_noirq_begin();
1440 ret = dpm_noirq_suspend_devices(state);
1441 if (ret)
1442 dpm_resume_noirq(resume_event(state));
1443
1444 return ret;
1445 }
1446
dpm_propagate_wakeup_to_parent(struct device * dev)1447 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1448 {
1449 struct device *parent = dev->parent;
1450
1451 if (!parent)
1452 return;
1453
1454 spin_lock_irq(&parent->power.lock);
1455
1456 if (dev->power.wakeup_path && !parent->power.ignore_children)
1457 parent->power.wakeup_path = true;
1458
1459 spin_unlock_irq(&parent->power.lock);
1460 }
1461
dpm_subsys_suspend_late_cb(struct device * dev,pm_message_t state,const char ** info_p)1462 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1463 pm_message_t state,
1464 const char **info_p)
1465 {
1466 pm_callback_t callback;
1467 const char *info;
1468
1469 if (dev->pm_domain) {
1470 info = "late power domain ";
1471 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1472 } else if (dev->type && dev->type->pm) {
1473 info = "late type ";
1474 callback = pm_late_early_op(dev->type->pm, state);
1475 } else if (dev->class && dev->class->pm) {
1476 info = "late class ";
1477 callback = pm_late_early_op(dev->class->pm, state);
1478 } else if (dev->bus && dev->bus->pm) {
1479 info = "late bus ";
1480 callback = pm_late_early_op(dev->bus->pm, state);
1481 } else {
1482 return NULL;
1483 }
1484
1485 if (info_p)
1486 *info_p = info;
1487
1488 return callback;
1489 }
1490
1491 /**
1492 * __device_suspend_late - Execute a "late suspend" callback for given device.
1493 * @dev: Device to handle.
1494 * @state: PM transition of the system being carried out.
1495 * @async: If true, the device is being suspended asynchronously.
1496 *
1497 * Runtime PM is disabled for @dev while this function is being executed.
1498 */
__device_suspend_late(struct device * dev,pm_message_t state,bool async)1499 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1500 {
1501 pm_callback_t callback;
1502 const char *info;
1503 int error = 0;
1504
1505 TRACE_DEVICE(dev);
1506 TRACE_SUSPEND(0);
1507
1508 __pm_runtime_disable(dev, false);
1509
1510 dpm_wait_for_subordinate(dev, async);
1511
1512 if (async_error)
1513 goto Complete;
1514
1515 if (pm_wakeup_pending()) {
1516 async_error = -EBUSY;
1517 goto Complete;
1518 }
1519
1520 if (dev->power.syscore || dev->power.direct_complete)
1521 goto Complete;
1522
1523 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1524 if (callback)
1525 goto Run;
1526
1527 if (dev_pm_smart_suspend_and_suspended(dev) &&
1528 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1529 goto Skip;
1530
1531 if (dev->driver && dev->driver->pm) {
1532 info = "late driver ";
1533 callback = pm_late_early_op(dev->driver->pm, state);
1534 }
1535
1536 Run:
1537 error = dpm_run_callback(callback, dev, state, info);
1538 if (error) {
1539 async_error = error;
1540 goto Complete;
1541 }
1542 dpm_propagate_wakeup_to_parent(dev);
1543
1544 Skip:
1545 dev->power.is_late_suspended = true;
1546
1547 Complete:
1548 TRACE_SUSPEND(error);
1549 complete_all(&dev->power.completion);
1550 return error;
1551 }
1552
async_suspend_late(void * data,async_cookie_t cookie)1553 static void async_suspend_late(void *data, async_cookie_t cookie)
1554 {
1555 struct device *dev = (struct device *)data;
1556 int error;
1557
1558 error = __device_suspend_late(dev, pm_transition, true);
1559 if (error) {
1560 dpm_save_failed_dev(dev_name(dev));
1561 pm_dev_err(dev, pm_transition, " async", error);
1562 }
1563 put_device(dev);
1564 }
1565
device_suspend_late(struct device * dev)1566 static int device_suspend_late(struct device *dev)
1567 {
1568 reinit_completion(&dev->power.completion);
1569
1570 if (is_async(dev)) {
1571 get_device(dev);
1572 async_schedule(async_suspend_late, dev);
1573 return 0;
1574 }
1575
1576 return __device_suspend_late(dev, pm_transition, false);
1577 }
1578
1579 /**
1580 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1581 * @state: PM transition of the system being carried out.
1582 */
dpm_suspend_late(pm_message_t state)1583 int dpm_suspend_late(pm_message_t state)
1584 {
1585 ktime_t starttime = ktime_get();
1586 int error = 0;
1587
1588 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1589 mutex_lock(&dpm_list_mtx);
1590 pm_transition = state;
1591 async_error = 0;
1592
1593 while (!list_empty(&dpm_suspended_list)) {
1594 struct device *dev = to_device(dpm_suspended_list.prev);
1595
1596 get_device(dev);
1597 mutex_unlock(&dpm_list_mtx);
1598
1599 error = device_suspend_late(dev);
1600
1601 mutex_lock(&dpm_list_mtx);
1602 if (!list_empty(&dev->power.entry))
1603 list_move(&dev->power.entry, &dpm_late_early_list);
1604
1605 if (error) {
1606 pm_dev_err(dev, state, " late", error);
1607 dpm_save_failed_dev(dev_name(dev));
1608 put_device(dev);
1609 break;
1610 }
1611 put_device(dev);
1612
1613 if (async_error)
1614 break;
1615 }
1616 mutex_unlock(&dpm_list_mtx);
1617 async_synchronize_full();
1618 if (!error)
1619 error = async_error;
1620 if (error) {
1621 suspend_stats.failed_suspend_late++;
1622 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1623 dpm_resume_early(resume_event(state));
1624 }
1625 dpm_show_time(starttime, state, error, "late");
1626 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1627 return error;
1628 }
1629
1630 /**
1631 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1632 * @state: PM transition of the system being carried out.
1633 */
dpm_suspend_end(pm_message_t state)1634 int dpm_suspend_end(pm_message_t state)
1635 {
1636 int error = dpm_suspend_late(state);
1637 if (error)
1638 return error;
1639
1640 error = dpm_suspend_noirq(state);
1641 if (error) {
1642 dpm_resume_early(resume_event(state));
1643 return error;
1644 }
1645
1646 return 0;
1647 }
1648 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1649
1650 /**
1651 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1652 * @dev: Device to suspend.
1653 * @state: PM transition of the system being carried out.
1654 * @cb: Suspend callback to execute.
1655 * @info: string description of caller.
1656 */
legacy_suspend(struct device * dev,pm_message_t state,int (* cb)(struct device * dev,pm_message_t state),const char * info)1657 static int legacy_suspend(struct device *dev, pm_message_t state,
1658 int (*cb)(struct device *dev, pm_message_t state),
1659 const char *info)
1660 {
1661 int error;
1662 ktime_t calltime;
1663
1664 calltime = initcall_debug_start(dev, cb);
1665
1666 trace_device_pm_callback_start(dev, info, state.event);
1667 error = cb(dev, state);
1668 trace_device_pm_callback_end(dev, error);
1669 suspend_report_result(cb, error);
1670
1671 initcall_debug_report(dev, calltime, cb, error);
1672
1673 return error;
1674 }
1675
dpm_clear_superiors_direct_complete(struct device * dev)1676 static void dpm_clear_superiors_direct_complete(struct device *dev)
1677 {
1678 struct device_link *link;
1679 int idx;
1680
1681 if (dev->parent) {
1682 spin_lock_irq(&dev->parent->power.lock);
1683 dev->parent->power.direct_complete = false;
1684 spin_unlock_irq(&dev->parent->power.lock);
1685 }
1686
1687 idx = device_links_read_lock();
1688
1689 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1690 spin_lock_irq(&link->supplier->power.lock);
1691 link->supplier->power.direct_complete = false;
1692 spin_unlock_irq(&link->supplier->power.lock);
1693 }
1694
1695 device_links_read_unlock(idx);
1696 }
1697
1698 /**
1699 * __device_suspend - Execute "suspend" callbacks for given device.
1700 * @dev: Device to handle.
1701 * @state: PM transition of the system being carried out.
1702 * @async: If true, the device is being suspended asynchronously.
1703 */
__device_suspend(struct device * dev,pm_message_t state,bool async)1704 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1705 {
1706 pm_callback_t callback = NULL;
1707 const char *info = NULL;
1708 int error = 0;
1709 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1710
1711 TRACE_DEVICE(dev);
1712 TRACE_SUSPEND(0);
1713
1714 dpm_wait_for_subordinate(dev, async);
1715
1716 if (async_error) {
1717 dev->power.direct_complete = false;
1718 goto Complete;
1719 }
1720
1721 /*
1722 * If a device configured to wake up the system from sleep states
1723 * has been suspended at run time and there's a resume request pending
1724 * for it, this is equivalent to the device signaling wakeup, so the
1725 * system suspend operation should be aborted.
1726 */
1727 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1728 pm_wakeup_event(dev, 0);
1729
1730 if (pm_wakeup_pending()) {
1731 dev->power.direct_complete = false;
1732 async_error = -EBUSY;
1733 goto Complete;
1734 }
1735
1736 if (dev->power.syscore)
1737 goto Complete;
1738
1739 if (dev->power.direct_complete) {
1740 if (pm_runtime_status_suspended(dev)) {
1741 pm_runtime_disable(dev);
1742 if (pm_runtime_status_suspended(dev))
1743 goto Complete;
1744
1745 pm_runtime_enable(dev);
1746 }
1747 dev->power.direct_complete = false;
1748 }
1749
1750 dev->power.may_skip_resume = false;
1751 dev->power.must_resume = false;
1752
1753 dpm_watchdog_set(&wd, dev);
1754 device_lock(dev);
1755
1756 if (dev->pm_domain) {
1757 info = "power domain ";
1758 callback = pm_op(&dev->pm_domain->ops, state);
1759 goto Run;
1760 }
1761
1762 if (dev->type && dev->type->pm) {
1763 info = "type ";
1764 callback = pm_op(dev->type->pm, state);
1765 goto Run;
1766 }
1767
1768 if (dev->class && dev->class->pm) {
1769 info = "class ";
1770 callback = pm_op(dev->class->pm, state);
1771 goto Run;
1772 }
1773
1774 if (dev->bus) {
1775 if (dev->bus->pm) {
1776 info = "bus ";
1777 callback = pm_op(dev->bus->pm, state);
1778 } else if (dev->bus->suspend) {
1779 pm_dev_dbg(dev, state, "legacy bus ");
1780 error = legacy_suspend(dev, state, dev->bus->suspend,
1781 "legacy bus ");
1782 goto End;
1783 }
1784 }
1785
1786 Run:
1787 if (!callback && dev->driver && dev->driver->pm) {
1788 info = "driver ";
1789 callback = pm_op(dev->driver->pm, state);
1790 }
1791
1792 error = dpm_run_callback(callback, dev, state, info);
1793
1794 End:
1795 if (!error) {
1796 dev->power.is_suspended = true;
1797 if (device_may_wakeup(dev))
1798 dev->power.wakeup_path = true;
1799
1800 dpm_propagate_wakeup_to_parent(dev);
1801 dpm_clear_superiors_direct_complete(dev);
1802 }
1803
1804 device_unlock(dev);
1805 dpm_watchdog_clear(&wd);
1806
1807 Complete:
1808 if (error)
1809 async_error = error;
1810
1811 complete_all(&dev->power.completion);
1812 TRACE_SUSPEND(error);
1813 return error;
1814 }
1815
async_suspend(void * data,async_cookie_t cookie)1816 static void async_suspend(void *data, async_cookie_t cookie)
1817 {
1818 struct device *dev = (struct device *)data;
1819 int error;
1820
1821 error = __device_suspend(dev, pm_transition, true);
1822 if (error) {
1823 dpm_save_failed_dev(dev_name(dev));
1824 pm_dev_err(dev, pm_transition, " async", error);
1825 }
1826
1827 put_device(dev);
1828 }
1829
device_suspend(struct device * dev)1830 static int device_suspend(struct device *dev)
1831 {
1832 reinit_completion(&dev->power.completion);
1833
1834 if (is_async(dev)) {
1835 get_device(dev);
1836 async_schedule(async_suspend, dev);
1837 return 0;
1838 }
1839
1840 return __device_suspend(dev, pm_transition, false);
1841 }
1842
1843 /**
1844 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1845 * @state: PM transition of the system being carried out.
1846 */
dpm_suspend(pm_message_t state)1847 int dpm_suspend(pm_message_t state)
1848 {
1849 ktime_t starttime = ktime_get();
1850 int error = 0;
1851
1852 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1853 might_sleep();
1854
1855 cpufreq_suspend();
1856
1857 mutex_lock(&dpm_list_mtx);
1858 pm_transition = state;
1859 async_error = 0;
1860 while (!list_empty(&dpm_prepared_list)) {
1861 struct device *dev = to_device(dpm_prepared_list.prev);
1862
1863 get_device(dev);
1864 mutex_unlock(&dpm_list_mtx);
1865
1866 error = device_suspend(dev);
1867
1868 mutex_lock(&dpm_list_mtx);
1869 if (error) {
1870 pm_dev_err(dev, state, "", error);
1871 dpm_save_failed_dev(dev_name(dev));
1872 put_device(dev);
1873 break;
1874 }
1875 if (!list_empty(&dev->power.entry))
1876 list_move(&dev->power.entry, &dpm_suspended_list);
1877 put_device(dev);
1878 if (async_error)
1879 break;
1880 }
1881 mutex_unlock(&dpm_list_mtx);
1882 async_synchronize_full();
1883 if (!error)
1884 error = async_error;
1885 if (error) {
1886 suspend_stats.failed_suspend++;
1887 dpm_save_failed_step(SUSPEND_SUSPEND);
1888 }
1889 dpm_show_time(starttime, state, error, NULL);
1890 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1891 return error;
1892 }
1893
1894 /**
1895 * device_prepare - Prepare a device for system power transition.
1896 * @dev: Device to handle.
1897 * @state: PM transition of the system being carried out.
1898 *
1899 * Execute the ->prepare() callback(s) for given device. No new children of the
1900 * device may be registered after this function has returned.
1901 */
device_prepare(struct device * dev,pm_message_t state)1902 static int device_prepare(struct device *dev, pm_message_t state)
1903 {
1904 int (*callback)(struct device *) = NULL;
1905 int ret = 0;
1906
1907 if (dev->power.syscore)
1908 return 0;
1909
1910 WARN_ON(!pm_runtime_enabled(dev) &&
1911 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1912 DPM_FLAG_LEAVE_SUSPENDED));
1913
1914 /*
1915 * If a device's parent goes into runtime suspend at the wrong time,
1916 * it won't be possible to resume the device. To prevent this we
1917 * block runtime suspend here, during the prepare phase, and allow
1918 * it again during the complete phase.
1919 */
1920 pm_runtime_get_noresume(dev);
1921
1922 device_lock(dev);
1923
1924 dev->power.wakeup_path = false;
1925
1926 if (dev->power.no_pm_callbacks)
1927 goto unlock;
1928
1929 if (dev->pm_domain)
1930 callback = dev->pm_domain->ops.prepare;
1931 else if (dev->type && dev->type->pm)
1932 callback = dev->type->pm->prepare;
1933 else if (dev->class && dev->class->pm)
1934 callback = dev->class->pm->prepare;
1935 else if (dev->bus && dev->bus->pm)
1936 callback = dev->bus->pm->prepare;
1937
1938 if (!callback && dev->driver && dev->driver->pm)
1939 callback = dev->driver->pm->prepare;
1940
1941 if (callback)
1942 ret = callback(dev);
1943
1944 unlock:
1945 device_unlock(dev);
1946
1947 if (ret < 0) {
1948 suspend_report_result(callback, ret);
1949 pm_runtime_put(dev);
1950 return ret;
1951 }
1952 /*
1953 * A positive return value from ->prepare() means "this device appears
1954 * to be runtime-suspended and its state is fine, so if it really is
1955 * runtime-suspended, you can leave it in that state provided that you
1956 * will do the same thing with all of its descendants". This only
1957 * applies to suspend transitions, however.
1958 */
1959 spin_lock_irq(&dev->power.lock);
1960 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1961 ((pm_runtime_suspended(dev) && ret > 0) ||
1962 dev->power.no_pm_callbacks) &&
1963 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1964 spin_unlock_irq(&dev->power.lock);
1965 return 0;
1966 }
1967
1968 /**
1969 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1970 * @state: PM transition of the system being carried out.
1971 *
1972 * Execute the ->prepare() callback(s) for all devices.
1973 */
dpm_prepare(pm_message_t state)1974 int dpm_prepare(pm_message_t state)
1975 {
1976 int error = 0;
1977
1978 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1979 might_sleep();
1980
1981 /*
1982 * Give a chance for the known devices to complete their probes, before
1983 * disable probing of devices. This sync point is important at least
1984 * at boot time + hibernation restore.
1985 */
1986 wait_for_device_probe();
1987 /*
1988 * It is unsafe if probing of devices will happen during suspend or
1989 * hibernation and system behavior will be unpredictable in this case.
1990 * So, let's prohibit device's probing here and defer their probes
1991 * instead. The normal behavior will be restored in dpm_complete().
1992 */
1993 device_block_probing();
1994
1995 mutex_lock(&dpm_list_mtx);
1996 while (!list_empty(&dpm_list)) {
1997 struct device *dev = to_device(dpm_list.next);
1998
1999 get_device(dev);
2000 mutex_unlock(&dpm_list_mtx);
2001
2002 trace_device_pm_callback_start(dev, "", state.event);
2003 error = device_prepare(dev, state);
2004 trace_device_pm_callback_end(dev, error);
2005
2006 mutex_lock(&dpm_list_mtx);
2007 if (error) {
2008 if (error == -EAGAIN) {
2009 put_device(dev);
2010 error = 0;
2011 continue;
2012 }
2013 printk(KERN_INFO "PM: Device %s not prepared "
2014 "for power transition: code %d\n",
2015 dev_name(dev), error);
2016 put_device(dev);
2017 break;
2018 }
2019 dev->power.is_prepared = true;
2020 if (!list_empty(&dev->power.entry))
2021 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2022 put_device(dev);
2023 }
2024 mutex_unlock(&dpm_list_mtx);
2025 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2026 return error;
2027 }
2028
2029 /**
2030 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
2031 * @state: PM transition of the system being carried out.
2032 *
2033 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
2034 * callbacks for them.
2035 */
dpm_suspend_start(pm_message_t state)2036 int dpm_suspend_start(pm_message_t state)
2037 {
2038 int error;
2039
2040 error = dpm_prepare(state);
2041 if (error) {
2042 suspend_stats.failed_prepare++;
2043 dpm_save_failed_step(SUSPEND_PREPARE);
2044 } else
2045 error = dpm_suspend(state);
2046 return error;
2047 }
2048 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2049
__suspend_report_result(const char * function,void * fn,int ret)2050 void __suspend_report_result(const char *function, void *fn, int ret)
2051 {
2052 if (ret)
2053 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
2054 }
2055 EXPORT_SYMBOL_GPL(__suspend_report_result);
2056
2057 /**
2058 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
2059 * @dev: Device to wait for.
2060 * @subordinate: Device that needs to wait for @dev.
2061 */
device_pm_wait_for_dev(struct device * subordinate,struct device * dev)2062 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2063 {
2064 dpm_wait(dev, subordinate->power.async_suspend);
2065 return async_error;
2066 }
2067 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2068
2069 /**
2070 * dpm_for_each_dev - device iterator.
2071 * @data: data for the callback.
2072 * @fn: function to be called for each device.
2073 *
2074 * Iterate over devices in dpm_list, and call @fn for each device,
2075 * passing it @data.
2076 */
dpm_for_each_dev(void * data,void (* fn)(struct device *,void *))2077 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2078 {
2079 struct device *dev;
2080
2081 if (!fn)
2082 return;
2083
2084 device_pm_lock();
2085 list_for_each_entry(dev, &dpm_list, power.entry)
2086 fn(dev, data);
2087 device_pm_unlock();
2088 }
2089 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2090
pm_ops_is_empty(const struct dev_pm_ops * ops)2091 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2092 {
2093 if (!ops)
2094 return true;
2095
2096 return !ops->prepare &&
2097 !ops->suspend &&
2098 !ops->suspend_late &&
2099 !ops->suspend_noirq &&
2100 !ops->resume_noirq &&
2101 !ops->resume_early &&
2102 !ops->resume &&
2103 !ops->complete;
2104 }
2105
device_pm_check_callbacks(struct device * dev)2106 void device_pm_check_callbacks(struct device *dev)
2107 {
2108 spin_lock_irq(&dev->power.lock);
2109 dev->power.no_pm_callbacks =
2110 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2111 !dev->bus->suspend && !dev->bus->resume)) &&
2112 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2113 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2114 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2115 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2116 !dev->driver->suspend && !dev->driver->resume));
2117 spin_unlock_irq(&dev->power.lock);
2118 }
2119
dev_pm_smart_suspend_and_suspended(struct device * dev)2120 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2121 {
2122 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2123 pm_runtime_status_suspended(dev);
2124 }
2125