Lines Matching refs:dev

21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)  in __rpm_get_callback()  argument
26 if (dev->pm_domain) in __rpm_get_callback()
27 ops = &dev->pm_domain->ops; in __rpm_get_callback()
28 else if (dev->type && dev->type->pm) in __rpm_get_callback()
29 ops = dev->type->pm; in __rpm_get_callback()
30 else if (dev->class && dev->class->pm) in __rpm_get_callback()
31 ops = dev->class->pm; in __rpm_get_callback()
32 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
33 ops = dev->bus->pm; in __rpm_get_callback()
42 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
48 #define RPM_GET_CALLBACK(dev, callback) \ argument
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
65 static void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
69 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
72 last = dev->power.accounting_timestamp; in update_pm_runtime_accounting()
75 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
87 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
88 dev->power.suspended_time += delta; in update_pm_runtime_accounting()
90 dev->power.active_time += delta; in update_pm_runtime_accounting()
93 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
95 update_pm_runtime_accounting(dev); in __update_runtime_status()
96 dev->power.runtime_status = status; in __update_runtime_status()
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended) in rpm_get_accounted_time() argument
104 spin_lock_irqsave(&dev->power.lock, flags); in rpm_get_accounted_time()
106 update_pm_runtime_accounting(dev); in rpm_get_accounted_time()
107 time = suspended ? dev->power.suspended_time : dev->power.active_time; in rpm_get_accounted_time()
109 spin_unlock_irqrestore(&dev->power.lock, flags); in rpm_get_accounted_time()
114 u64 pm_runtime_active_time(struct device *dev) in pm_runtime_active_time() argument
116 return rpm_get_accounted_time(dev, false); in pm_runtime_active_time()
119 u64 pm_runtime_suspended_time(struct device *dev) in pm_runtime_suspended_time() argument
121 return rpm_get_accounted_time(dev, true); in pm_runtime_suspended_time()
129 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
131 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
132 hrtimer_try_to_cancel(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
133 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
141 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
143 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
148 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
163 u64 pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
168 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
175 expires = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
184 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
186 return dev->power.memalloc_noio; in dev_memalloc_noio()
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
226 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
227 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
228 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
229 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
238 dev = dev->parent; in pm_runtime_set_memalloc_noio()
245 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
246 device_for_each_child(dev, NULL, in pm_runtime_set_memalloc_noio()
258 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
262 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
264 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
266 else if (atomic_read(&dev->power.usage_count) > 0) in rpm_check_suspend_allowed()
268 else if (!dev->power.ignore_children && in rpm_check_suspend_allowed()
269 atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
273 else if ((dev->power.deferred_resume in rpm_check_suspend_allowed()
274 && dev->power.runtime_status == RPM_SUSPENDING) in rpm_check_suspend_allowed()
275 || (dev->power.request_pending in rpm_check_suspend_allowed()
276 && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
278 else if (__dev_pm_qos_resume_latency(dev) == 0) in rpm_check_suspend_allowed()
280 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
286 static int rpm_get_suppliers(struct device *dev) in rpm_get_suppliers() argument
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_get_suppliers()
308 static void rpm_put_suppliers(struct device *dev) in rpm_put_suppliers() argument
312 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in rpm_put_suppliers()
325 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
326 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
329 bool use_links = dev->power.links_count > 0; in __rpm_callback()
331 if (dev->power.irq_safe) { in __rpm_callback()
332 spin_unlock(&dev->power.lock); in __rpm_callback()
334 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
343 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
346 retval = rpm_get_suppliers(dev); in __rpm_callback()
354 retval = cb(dev); in __rpm_callback()
356 if (dev->power.irq_safe) { in __rpm_callback()
357 spin_lock(&dev->power.lock); in __rpm_callback()
367 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) in __rpm_callback()
368 || (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
372 rpm_put_suppliers(dev); in __rpm_callback()
377 spin_lock_irq(&dev->power.lock); in __rpm_callback()
396 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
401 trace_rpm_idle_rcuidle(dev, rpmflags); in rpm_idle()
402 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
407 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
414 else if (dev->power.request_pending && in rpm_idle()
415 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
419 else if (dev->power.idle_notification) in rpm_idle()
425 dev->power.request = RPM_REQ_NONE; in rpm_idle()
427 if (dev->power.no_callbacks) in rpm_idle()
432 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
433 if (!dev->power.request_pending) { in rpm_idle()
434 dev->power.request_pending = true; in rpm_idle()
435 queue_work(pm_wq, &dev->power.work); in rpm_idle()
437 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); in rpm_idle()
441 dev->power.idle_notification = true; in rpm_idle()
443 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
446 retval = __rpm_callback(callback, dev); in rpm_idle()
448 dev->power.idle_notification = false; in rpm_idle()
449 wake_up_all(&dev->power.wait_queue); in rpm_idle()
452 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_idle()
453 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
461 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
468 if (dev->power.memalloc_noio) { in rpm_callback()
481 retval = __rpm_callback(cb, dev); in rpm_callback()
484 retval = __rpm_callback(cb, dev); in rpm_callback()
487 dev->power.runtime_error = retval; in rpm_callback()
512 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
513 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
519 trace_rpm_suspend_rcuidle(dev, rpmflags); in rpm_suspend()
522 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
527 if (dev->power.runtime_status == RPM_RESUMING && !(rpmflags & RPM_ASYNC)) in rpm_suspend()
534 && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
535 u64 expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
539 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
548 if (!(dev->power.timer_expires && in rpm_suspend()
549 dev->power.timer_expires <= expires)) { in rpm_suspend()
554 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) * in rpm_suspend()
557 dev->power.timer_expires = expires; in rpm_suspend()
558 hrtimer_start_range_ns(&dev->power.suspend_timer, in rpm_suspend()
563 dev->power.timer_autosuspends = 1; in rpm_suspend()
569 pm_runtime_cancel_pending(dev); in rpm_suspend()
571 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
579 if (dev->power.irq_safe) { in rpm_suspend()
580 spin_unlock(&dev->power.lock); in rpm_suspend()
584 spin_lock(&dev->power.lock); in rpm_suspend()
590 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
592 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
595 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
599 spin_lock_irq(&dev->power.lock); in rpm_suspend()
601 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
605 if (dev->power.no_callbacks) in rpm_suspend()
610 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
612 if (!dev->power.request_pending) { in rpm_suspend()
613 dev->power.request_pending = true; in rpm_suspend()
614 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
619 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
621 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
623 dev_pm_enable_wake_irq_check(dev, true); in rpm_suspend()
624 retval = rpm_callback(callback, dev); in rpm_suspend()
629 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
630 pm_runtime_deactivate_timer(dev); in rpm_suspend()
632 if (dev->parent) { in rpm_suspend()
633 parent = dev->parent; in rpm_suspend()
636 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
638 if (dev->power.deferred_resume) { in rpm_suspend()
639 dev->power.deferred_resume = false; in rpm_suspend()
640 rpm_resume(dev, 0); in rpm_suspend()
646 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { in rpm_suspend()
647 spin_unlock(&dev->power.lock); in rpm_suspend()
653 spin_lock(&dev->power.lock); in rpm_suspend()
657 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_suspend()
662 dev_pm_disable_wake_irq_check(dev); in rpm_suspend()
663 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
664 dev->power.deferred_resume = false; in rpm_suspend()
665 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
668 dev->power.runtime_error = 0; in rpm_suspend()
677 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
680 pm_runtime_cancel_pending(dev); in rpm_suspend()
702 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
703 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
709 trace_rpm_resume_rcuidle(dev, rpmflags); in rpm_resume()
712 if (dev->power.runtime_error) in rpm_resume()
714 else if (dev->power.disable_depth == 1 && dev->power.is_suspended in rpm_resume()
715 && dev->power.runtime_status == RPM_ACTIVE) in rpm_resume()
717 else if (dev->power.disable_depth > 0) in rpm_resume()
728 dev->power.request = RPM_REQ_NONE; in rpm_resume()
729 if (!dev->power.timer_autosuspends) in rpm_resume()
730 pm_runtime_deactivate_timer(dev); in rpm_resume()
732 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
737 if (dev->power.runtime_status == RPM_RESUMING in rpm_resume()
738 || dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
742 if (dev->power.runtime_status == RPM_SUSPENDING) in rpm_resume()
743 dev->power.deferred_resume = true; in rpm_resume()
749 if (dev->power.irq_safe) { in rpm_resume()
750 spin_unlock(&dev->power.lock); in rpm_resume()
754 spin_lock(&dev->power.lock); in rpm_resume()
760 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
762 if (dev->power.runtime_status != RPM_RESUMING in rpm_resume()
763 && dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
766 spin_unlock_irq(&dev->power.lock); in rpm_resume()
770 spin_lock_irq(&dev->power.lock); in rpm_resume()
772 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
781 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
782 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
783 if (dev->parent->power.disable_depth > 0 in rpm_resume()
784 || dev->parent->power.ignore_children in rpm_resume()
785 || dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
786 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
787 spin_unlock(&dev->parent->power.lock); in rpm_resume()
791 spin_unlock(&dev->parent->power.lock); in rpm_resume()
796 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
797 if (!dev->power.request_pending) { in rpm_resume()
798 dev->power.request_pending = true; in rpm_resume()
799 queue_work(pm_wq, &dev->power.work); in rpm_resume()
805 if (!parent && dev->parent) { in rpm_resume()
811 parent = dev->parent; in rpm_resume()
812 if (dev->power.irq_safe) in rpm_resume()
814 spin_unlock(&dev->power.lock); in rpm_resume()
831 spin_lock(&dev->power.lock); in rpm_resume()
838 if (dev->power.no_callbacks) in rpm_resume()
841 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
843 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
845 dev_pm_disable_wake_irq_check(dev); in rpm_resume()
846 retval = rpm_callback(callback, dev); in rpm_resume()
848 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
849 pm_runtime_cancel_pending(dev); in rpm_resume()
850 dev_pm_enable_wake_irq_check(dev, false); in rpm_resume()
853 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
854 pm_runtime_mark_last_busy(dev); in rpm_resume()
858 wake_up_all(&dev->power.wait_queue); in rpm_resume()
861 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
864 if (parent && !dev->power.irq_safe) { in rpm_resume()
865 spin_unlock_irq(&dev->power.lock); in rpm_resume()
869 spin_lock_irq(&dev->power.lock); in rpm_resume()
872 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_resume()
886 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
889 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
891 if (!dev->power.request_pending) in pm_runtime_work()
894 req = dev->power.request; in pm_runtime_work()
895 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
896 dev->power.request_pending = false; in pm_runtime_work()
902 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
905 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
908 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
911 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
916 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
927 struct device *dev = container_of(timer, struct device, power.suspend_timer); in pm_suspend_timer_fn() local
931 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
933 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
939 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
940 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
944 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
954 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
960 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
963 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
967 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
972 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
975 dev->power.timer_expires = expires; in pm_schedule_suspend()
976 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
977 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); in pm_schedule_suspend()
980 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
998 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
1004 if (!atomic_dec_and_test(&dev->power.usage_count)) { in __pm_runtime_idle()
1005 trace_rpm_usage_rcuidle(dev, rpmflags); in __pm_runtime_idle()
1010 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
1012 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
1013 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
1014 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
1032 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
1038 if (!atomic_dec_and_test(&dev->power.usage_count)) { in __pm_runtime_suspend()
1039 trace_rpm_usage_rcuidle(dev, rpmflags); in __pm_runtime_suspend()
1044 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1046 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1047 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
1048 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1065 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
1070 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1071 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1074 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1076 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1077 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
1078 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1106 int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count) in pm_runtime_get_if_active() argument
1111 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_if_active()
1112 if (dev->power.disable_depth > 0) { in pm_runtime_get_if_active()
1114 } else if (dev->power.runtime_status != RPM_ACTIVE) { in pm_runtime_get_if_active()
1118 atomic_inc(&dev->power.usage_count); in pm_runtime_get_if_active()
1120 retval = atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_if_active()
1122 trace_rpm_usage_rcuidle(dev, 0); in pm_runtime_get_if_active()
1123 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_if_active()
1153 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
1155 struct device *parent = dev->parent; in __pm_runtime_set_status()
1162 spin_lock_irq(&dev->power.lock); in __pm_runtime_set_status()
1168 if (dev->power.runtime_error || dev->power.disable_depth) in __pm_runtime_set_status()
1169 dev->power.disable_depth++; in __pm_runtime_set_status()
1173 spin_unlock_irq(&dev->power.lock); in __pm_runtime_set_status()
1187 error = rpm_get_suppliers(dev); in __pm_runtime_set_status()
1194 spin_lock_irq(&dev->power.lock); in __pm_runtime_set_status()
1196 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1213 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", in __pm_runtime_set_status()
1214 dev_name(dev), in __pm_runtime_set_status()
1217 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1230 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1232 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1235 spin_unlock_irq(&dev->power.lock); in __pm_runtime_set_status()
1243 rpm_put_suppliers(dev); in __pm_runtime_set_status()
1248 pm_runtime_enable(dev); in __pm_runtime_set_status()
1263 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1265 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1267 if (dev->power.request_pending) { in __pm_runtime_barrier()
1268 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1269 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1271 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1273 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1274 dev->power.request_pending = false; in __pm_runtime_barrier()
1277 if (dev->power.runtime_status == RPM_SUSPENDING in __pm_runtime_barrier()
1278 || dev->power.runtime_status == RPM_RESUMING in __pm_runtime_barrier()
1279 || dev->power.idle_notification) { in __pm_runtime_barrier()
1284 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1286 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1287 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1288 && !dev->power.idle_notification) in __pm_runtime_barrier()
1290 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1294 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1296 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1314 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1318 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1319 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1321 if (dev->power.request_pending in pm_runtime_barrier()
1322 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1323 rpm_resume(dev, 0); in pm_runtime_barrier()
1327 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1329 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1330 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1350 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1352 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1354 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1355 dev->power.disable_depth++; in __pm_runtime_disable()
1364 if (check_resume && dev->power.request_pending in __pm_runtime_disable()
1365 && dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1370 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1372 rpm_resume(dev, 0); in __pm_runtime_disable()
1374 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1378 update_pm_runtime_accounting(dev); in __pm_runtime_disable()
1380 if (!dev->power.disable_depth++) in __pm_runtime_disable()
1381 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1384 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1392 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1396 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1398 if (dev->power.disable_depth > 0) { in pm_runtime_enable()
1399 dev->power.disable_depth--; in pm_runtime_enable()
1402 if (!dev->power.disable_depth) in pm_runtime_enable()
1403 dev->power.accounting_timestamp = ktime_get_mono_fast_ns(); in pm_runtime_enable()
1405 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1408 WARN(!dev->power.disable_depth && in pm_runtime_enable()
1409 dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1410 !dev->power.ignore_children && in pm_runtime_enable()
1411 atomic_read(&dev->power.child_count) > 0, in pm_runtime_enable()
1413 dev_name(dev)); in pm_runtime_enable()
1415 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1427 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1429 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1430 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1433 dev->power.runtime_auto = false; in pm_runtime_forbid()
1434 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1435 rpm_resume(dev, 0); in pm_runtime_forbid()
1438 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1448 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1450 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1451 if (dev->power.runtime_auto) in pm_runtime_allow()
1454 dev->power.runtime_auto = true; in pm_runtime_allow()
1455 if (atomic_dec_and_test(&dev->power.usage_count)) in pm_runtime_allow()
1456 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1458 trace_rpm_usage_rcuidle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1461 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1473 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1475 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1476 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1477 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1478 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1479 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1494 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1496 if (dev->parent) in pm_runtime_irq_safe()
1497 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1498 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1499 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1500 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1515 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1517 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1520 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1524 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1525 rpm_resume(dev, 0); in update_autosuspend()
1527 trace_rpm_usage_rcuidle(dev, 0); in update_autosuspend()
1536 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1539 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1552 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1556 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1557 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1558 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1559 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1560 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1561 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1573 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1577 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1578 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1579 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1580 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1581 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1582 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1590 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1592 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1593 dev->power.idle_notification = false; in pm_runtime_init()
1595 dev->power.disable_depth = 1; in pm_runtime_init()
1596 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1598 dev->power.runtime_error = 0; in pm_runtime_init()
1600 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1601 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1602 dev->power.runtime_auto = true; in pm_runtime_init()
1604 dev->power.request_pending = false; in pm_runtime_init()
1605 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1606 dev->power.deferred_resume = false; in pm_runtime_init()
1607 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1609 dev->power.timer_expires = 0; in pm_runtime_init()
1610 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in pm_runtime_init()
1611 dev->power.suspend_timer.function = pm_suspend_timer_fn; in pm_runtime_init()
1613 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1620 void pm_runtime_reinit(struct device *dev) in pm_runtime_reinit() argument
1622 if (!pm_runtime_enabled(dev)) { in pm_runtime_reinit()
1623 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1624 pm_runtime_set_suspended(dev); in pm_runtime_reinit()
1625 if (dev->power.irq_safe) { in pm_runtime_reinit()
1626 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1627 dev->power.irq_safe = 0; in pm_runtime_reinit()
1628 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1629 if (dev->parent) in pm_runtime_reinit()
1630 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1639 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1641 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1642 pm_runtime_reinit(dev); in pm_runtime_remove()
1649 void pm_runtime_get_suppliers(struct device *dev) in pm_runtime_get_suppliers() argument
1656 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_get_suppliers()
1671 void pm_runtime_put_suppliers(struct device *dev) in pm_runtime_put_suppliers() argument
1678 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node, in pm_runtime_put_suppliers()
1689 void pm_runtime_new_link(struct device *dev) in pm_runtime_new_link() argument
1691 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1692 dev->power.links_count++; in pm_runtime_new_link()
1693 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1696 static void pm_runtime_drop_link_count(struct device *dev) in pm_runtime_drop_link_count() argument
1698 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1699 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link_count()
1700 dev->power.links_count--; in pm_runtime_drop_link_count()
1701 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link_count()
1723 static bool pm_runtime_need_not_resume(struct device *dev) in pm_runtime_need_not_resume() argument
1725 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
1726 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
1727 dev->power.ignore_children); in pm_runtime_need_not_resume()
1747 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
1752 pm_runtime_disable(dev); in pm_runtime_force_suspend()
1753 if (pm_runtime_status_suspended(dev)) in pm_runtime_force_suspend()
1756 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
1758 ret = callback ? callback(dev) : 0; in pm_runtime_force_suspend()
1768 if (pm_runtime_need_not_resume(dev)) in pm_runtime_force_suspend()
1769 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
1771 __update_runtime_status(dev, RPM_SUSPENDED); in pm_runtime_force_suspend()
1776 pm_runtime_enable(dev); in pm_runtime_force_suspend()
1793 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
1798 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) in pm_runtime_force_resume()
1805 __update_runtime_status(dev, RPM_ACTIVE); in pm_runtime_force_resume()
1807 callback = RPM_GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
1809 ret = callback ? callback(dev) : 0; in pm_runtime_force_resume()
1811 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
1815 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
1817 pm_runtime_enable(dev); in pm_runtime_force_resume()