Lines Matching refs:dev
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset) in __rpm_get_callback() argument
26 if (dev->pm_domain) in __rpm_get_callback()
27 ops = &dev->pm_domain->ops; in __rpm_get_callback()
28 else if (dev->type && dev->type->pm) in __rpm_get_callback()
29 ops = dev->type->pm; in __rpm_get_callback()
30 else if (dev->class && dev->class->pm) in __rpm_get_callback()
31 ops = dev->class->pm; in __rpm_get_callback()
32 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
33 ops = dev->bus->pm; in __rpm_get_callback()
42 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
48 #define RPM_GET_CALLBACK(dev, callback) \ argument
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
65 void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
70 delta = now - dev->power.accounting_timestamp; in update_pm_runtime_accounting()
72 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
74 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
77 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
78 dev->power.suspended_jiffies += delta; in update_pm_runtime_accounting()
80 dev->power.active_jiffies += delta; in update_pm_runtime_accounting()
83 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
85 update_pm_runtime_accounting(dev); in __update_runtime_status()
86 dev->power.runtime_status = status; in __update_runtime_status()
93 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
95 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
96 del_timer(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
97 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
105 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
107 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
112 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
127 unsigned long pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
134 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
137 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
141 last_busy = READ_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
162 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
164 return dev->power.memalloc_noio; in dev_memalloc_noio()
195 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
204 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
205 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
206 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
207 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
216 dev = dev->parent; in pm_runtime_set_memalloc_noio()
223 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
224 device_for_each_child(dev, NULL, in pm_runtime_set_memalloc_noio()
236 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
240 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
242 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
244 else if (atomic_read(&dev->power.usage_count) > 0) in rpm_check_suspend_allowed()
246 else if (!dev->power.ignore_children && in rpm_check_suspend_allowed()
247 atomic_read(&dev->power.child_count)) in rpm_check_suspend_allowed()
251 else if ((dev->power.deferred_resume in rpm_check_suspend_allowed()
252 && dev->power.runtime_status == RPM_SUSPENDING) in rpm_check_suspend_allowed()
253 || (dev->power.request_pending in rpm_check_suspend_allowed()
254 && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
256 else if (__dev_pm_qos_read_value(dev) == 0) in rpm_check_suspend_allowed()
258 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
264 static int rpm_get_suppliers(struct device *dev) in rpm_get_suppliers() argument
268 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { in rpm_get_suppliers()
289 static void rpm_put_suppliers(struct device *dev) in rpm_put_suppliers() argument
293 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) in rpm_put_suppliers()
306 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
307 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
310 bool use_links = dev->power.links_count > 0; in __rpm_callback()
312 if (dev->power.irq_safe) { in __rpm_callback()
313 spin_unlock(&dev->power.lock); in __rpm_callback()
315 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
324 if (use_links && dev->power.runtime_status == RPM_RESUMING) { in __rpm_callback()
327 retval = rpm_get_suppliers(dev); in __rpm_callback()
335 retval = cb(dev); in __rpm_callback()
337 if (dev->power.irq_safe) { in __rpm_callback()
338 spin_lock(&dev->power.lock); in __rpm_callback()
348 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval) in __rpm_callback()
349 || (dev->power.runtime_status == RPM_RESUMING && retval))) { in __rpm_callback()
353 rpm_put_suppliers(dev); in __rpm_callback()
358 spin_lock_irq(&dev->power.lock); in __rpm_callback()
377 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
382 trace_rpm_idle_rcuidle(dev, rpmflags); in rpm_idle()
383 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
388 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
395 else if (dev->power.request_pending && in rpm_idle()
396 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
400 else if (dev->power.idle_notification) in rpm_idle()
406 dev->power.request = RPM_REQ_NONE; in rpm_idle()
408 if (dev->power.no_callbacks) in rpm_idle()
413 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
414 if (!dev->power.request_pending) { in rpm_idle()
415 dev->power.request_pending = true; in rpm_idle()
416 queue_work(pm_wq, &dev->power.work); in rpm_idle()
418 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0); in rpm_idle()
422 dev->power.idle_notification = true; in rpm_idle()
424 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
427 retval = __rpm_callback(callback, dev); in rpm_idle()
429 dev->power.idle_notification = false; in rpm_idle()
430 wake_up_all(&dev->power.wait_queue); in rpm_idle()
433 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_idle()
434 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
442 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
449 if (dev->power.memalloc_noio) { in rpm_callback()
462 retval = __rpm_callback(cb, dev); in rpm_callback()
465 retval = __rpm_callback(cb, dev); in rpm_callback()
468 dev->power.runtime_error = retval; in rpm_callback()
493 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
494 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
500 trace_rpm_suspend_rcuidle(dev, rpmflags); in rpm_suspend()
503 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
509 else if (dev->power.runtime_status == RPM_RESUMING && in rpm_suspend()
517 && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
518 unsigned long expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
522 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
531 if (!(dev->power.timer_expires && time_before_eq( in rpm_suspend()
532 dev->power.timer_expires, expires))) { in rpm_suspend()
533 dev->power.timer_expires = expires; in rpm_suspend()
534 mod_timer(&dev->power.suspend_timer, expires); in rpm_suspend()
536 dev->power.timer_autosuspends = 1; in rpm_suspend()
542 pm_runtime_cancel_pending(dev); in rpm_suspend()
544 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
552 if (dev->power.irq_safe) { in rpm_suspend()
553 spin_unlock(&dev->power.lock); in rpm_suspend()
557 spin_lock(&dev->power.lock); in rpm_suspend()
563 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
565 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
568 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
572 spin_lock_irq(&dev->power.lock); in rpm_suspend()
574 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
578 if (dev->power.no_callbacks) in rpm_suspend()
583 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
585 if (!dev->power.request_pending) { in rpm_suspend()
586 dev->power.request_pending = true; in rpm_suspend()
587 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
592 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
594 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
596 dev_pm_enable_wake_irq_check(dev, true); in rpm_suspend()
597 retval = rpm_callback(callback, dev); in rpm_suspend()
602 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
603 pm_runtime_deactivate_timer(dev); in rpm_suspend()
605 if (dev->parent) { in rpm_suspend()
606 parent = dev->parent; in rpm_suspend()
609 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
611 if (dev->power.deferred_resume) { in rpm_suspend()
612 dev->power.deferred_resume = false; in rpm_suspend()
613 rpm_resume(dev, 0); in rpm_suspend()
619 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { in rpm_suspend()
620 spin_unlock(&dev->power.lock); in rpm_suspend()
626 spin_lock(&dev->power.lock); in rpm_suspend()
630 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_suspend()
635 dev_pm_disable_wake_irq_check(dev); in rpm_suspend()
636 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
637 dev->power.deferred_resume = false; in rpm_suspend()
638 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
641 dev->power.runtime_error = 0; in rpm_suspend()
650 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
653 pm_runtime_cancel_pending(dev); in rpm_suspend()
675 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
676 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
682 trace_rpm_resume_rcuidle(dev, rpmflags); in rpm_resume()
685 if (dev->power.runtime_error) in rpm_resume()
687 else if (dev->power.disable_depth == 1 && dev->power.is_suspended in rpm_resume()
688 && dev->power.runtime_status == RPM_ACTIVE) in rpm_resume()
690 else if (dev->power.disable_depth > 0) in rpm_resume()
701 dev->power.request = RPM_REQ_NONE; in rpm_resume()
702 if (!dev->power.timer_autosuspends) in rpm_resume()
703 pm_runtime_deactivate_timer(dev); in rpm_resume()
705 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
710 if (dev->power.runtime_status == RPM_RESUMING in rpm_resume()
711 || dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
715 if (dev->power.runtime_status == RPM_SUSPENDING) in rpm_resume()
716 dev->power.deferred_resume = true; in rpm_resume()
722 if (dev->power.irq_safe) { in rpm_resume()
723 spin_unlock(&dev->power.lock); in rpm_resume()
727 spin_lock(&dev->power.lock); in rpm_resume()
733 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
735 if (dev->power.runtime_status != RPM_RESUMING in rpm_resume()
736 && dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
739 spin_unlock_irq(&dev->power.lock); in rpm_resume()
743 spin_lock_irq(&dev->power.lock); in rpm_resume()
745 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
754 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
755 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
756 if (dev->parent->power.disable_depth > 0 in rpm_resume()
757 || dev->parent->power.ignore_children in rpm_resume()
758 || dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
759 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
760 spin_unlock(&dev->parent->power.lock); in rpm_resume()
764 spin_unlock(&dev->parent->power.lock); in rpm_resume()
769 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
770 if (!dev->power.request_pending) { in rpm_resume()
771 dev->power.request_pending = true; in rpm_resume()
772 queue_work(pm_wq, &dev->power.work); in rpm_resume()
778 if (!parent && dev->parent) { in rpm_resume()
784 parent = dev->parent; in rpm_resume()
785 if (dev->power.irq_safe) in rpm_resume()
787 spin_unlock(&dev->power.lock); in rpm_resume()
804 spin_lock(&dev->power.lock); in rpm_resume()
811 if (dev->power.no_callbacks) in rpm_resume()
814 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
816 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
818 dev_pm_disable_wake_irq_check(dev); in rpm_resume()
819 retval = rpm_callback(callback, dev); in rpm_resume()
821 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
822 pm_runtime_cancel_pending(dev); in rpm_resume()
823 dev_pm_enable_wake_irq_check(dev, false); in rpm_resume()
826 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
827 pm_runtime_mark_last_busy(dev); in rpm_resume()
831 wake_up_all(&dev->power.wait_queue); in rpm_resume()
834 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
837 if (parent && !dev->power.irq_safe) { in rpm_resume()
838 spin_unlock_irq(&dev->power.lock); in rpm_resume()
842 spin_lock_irq(&dev->power.lock); in rpm_resume()
845 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval); in rpm_resume()
859 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
862 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
864 if (!dev->power.request_pending) in pm_runtime_work()
867 req = dev->power.request; in pm_runtime_work()
868 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
869 dev->power.request_pending = false; in pm_runtime_work()
875 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
878 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
881 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
884 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
889 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
900 struct device *dev = from_timer(dev, t, power.suspend_timer); in pm_suspend_timer_fn() local
904 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
906 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
909 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
910 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
914 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
922 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
927 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
930 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
934 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
939 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
941 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); in pm_schedule_suspend()
942 dev->power.timer_expires += !dev->power.timer_expires; in pm_schedule_suspend()
943 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
944 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); in pm_schedule_suspend()
947 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
965 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
971 if (!atomic_dec_and_test(&dev->power.usage_count)) in __pm_runtime_idle()
975 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
977 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
978 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
979 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
997 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
1003 if (!atomic_dec_and_test(&dev->power.usage_count)) in __pm_runtime_suspend()
1007 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
1009 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
1010 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
1011 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
1028 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
1033 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe && in __pm_runtime_resume()
1034 dev->power.runtime_status != RPM_ACTIVE); in __pm_runtime_resume()
1037 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
1039 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
1040 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
1041 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
1057 int pm_runtime_get_if_in_use(struct device *dev) in pm_runtime_get_if_in_use() argument
1062 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_get_if_in_use()
1063 retval = dev->power.disable_depth > 0 ? -EINVAL : in pm_runtime_get_if_in_use()
1064 dev->power.runtime_status == RPM_ACTIVE in pm_runtime_get_if_in_use()
1065 && atomic_inc_not_zero(&dev->power.usage_count); in pm_runtime_get_if_in_use()
1066 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_get_if_in_use()
1088 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
1090 struct device *parent = dev->parent; in __pm_runtime_set_status()
1098 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
1100 if (!dev->power.runtime_error && !dev->power.disable_depth) { in __pm_runtime_set_status()
1105 if (dev->power.runtime_status == status || !parent) in __pm_runtime_set_status()
1122 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n", in __pm_runtime_set_status()
1123 dev_name(dev), in __pm_runtime_set_status()
1126 } else if (dev->power.runtime_status == RPM_SUSPENDED) { in __pm_runtime_set_status()
1137 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1138 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1140 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1158 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1160 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1162 if (dev->power.request_pending) { in __pm_runtime_barrier()
1163 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1164 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1166 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1168 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1169 dev->power.request_pending = false; in __pm_runtime_barrier()
1172 if (dev->power.runtime_status == RPM_SUSPENDING in __pm_runtime_barrier()
1173 || dev->power.runtime_status == RPM_RESUMING in __pm_runtime_barrier()
1174 || dev->power.idle_notification) { in __pm_runtime_barrier()
1179 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1181 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1182 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1183 && !dev->power.idle_notification) in __pm_runtime_barrier()
1185 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1189 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1191 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1209 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1213 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1214 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1216 if (dev->power.request_pending in pm_runtime_barrier()
1217 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1218 rpm_resume(dev, 0); in pm_runtime_barrier()
1222 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1224 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1225 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1245 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1247 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1249 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1250 dev->power.disable_depth++; in __pm_runtime_disable()
1259 if (check_resume && dev->power.request_pending in __pm_runtime_disable()
1260 && dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1265 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1267 rpm_resume(dev, 0); in __pm_runtime_disable()
1269 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1272 if (!dev->power.disable_depth++) in __pm_runtime_disable()
1273 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1276 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1284 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1288 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1290 if (dev->power.disable_depth > 0) in pm_runtime_enable()
1291 dev->power.disable_depth--; in pm_runtime_enable()
1293 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1295 WARN(!dev->power.disable_depth && in pm_runtime_enable()
1296 dev->power.runtime_status == RPM_SUSPENDED && in pm_runtime_enable()
1297 !dev->power.ignore_children && in pm_runtime_enable()
1298 atomic_read(&dev->power.child_count) > 0, in pm_runtime_enable()
1300 dev_name(dev)); in pm_runtime_enable()
1302 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1314 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1316 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1317 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1320 dev->power.runtime_auto = false; in pm_runtime_forbid()
1321 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1322 rpm_resume(dev, 0); in pm_runtime_forbid()
1325 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1335 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1337 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1338 if (dev->power.runtime_auto) in pm_runtime_allow()
1341 dev->power.runtime_auto = true; in pm_runtime_allow()
1342 if (atomic_dec_and_test(&dev->power.usage_count)) in pm_runtime_allow()
1343 rpm_idle(dev, RPM_AUTO | RPM_ASYNC); in pm_runtime_allow()
1346 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1358 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1360 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1361 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1362 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1363 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1364 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1379 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1381 if (dev->parent) in pm_runtime_irq_safe()
1382 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1383 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1384 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1385 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1400 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1402 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1405 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1409 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1410 rpm_resume(dev, 0); in update_autosuspend()
1419 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1422 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1435 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1439 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1440 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1441 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1442 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1443 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1444 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1456 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1460 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1461 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1462 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1463 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1464 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1465 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1473 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1475 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1476 dev->power.idle_notification = false; in pm_runtime_init()
1478 dev->power.disable_depth = 1; in pm_runtime_init()
1479 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1481 dev->power.runtime_error = 0; in pm_runtime_init()
1483 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1484 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1485 dev->power.runtime_auto = true; in pm_runtime_init()
1487 dev->power.request_pending = false; in pm_runtime_init()
1488 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1489 dev->power.deferred_resume = false; in pm_runtime_init()
1490 dev->power.accounting_timestamp = jiffies; in pm_runtime_init()
1491 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1493 dev->power.timer_expires = 0; in pm_runtime_init()
1494 timer_setup(&dev->power.suspend_timer, pm_suspend_timer_fn, 0); in pm_runtime_init()
1496 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1503 void pm_runtime_reinit(struct device *dev) in pm_runtime_reinit() argument
1505 if (!pm_runtime_enabled(dev)) { in pm_runtime_reinit()
1506 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_reinit()
1507 pm_runtime_set_suspended(dev); in pm_runtime_reinit()
1508 if (dev->power.irq_safe) { in pm_runtime_reinit()
1509 spin_lock_irq(&dev->power.lock); in pm_runtime_reinit()
1510 dev->power.irq_safe = 0; in pm_runtime_reinit()
1511 spin_unlock_irq(&dev->power.lock); in pm_runtime_reinit()
1512 if (dev->parent) in pm_runtime_reinit()
1513 pm_runtime_put(dev->parent); in pm_runtime_reinit()
1522 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1524 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1525 pm_runtime_reinit(dev); in pm_runtime_remove()
1545 void pm_runtime_clean_up_links(struct device *dev) in pm_runtime_clean_up_links() argument
1552 list_for_each_entry_rcu(link, &dev->links.consumers, s_node) { in pm_runtime_clean_up_links()
1557 pm_runtime_put_noidle(dev); in pm_runtime_clean_up_links()
1569 void pm_runtime_get_suppliers(struct device *dev) in pm_runtime_get_suppliers() argument
1576 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) in pm_runtime_get_suppliers()
1587 void pm_runtime_put_suppliers(struct device *dev) in pm_runtime_put_suppliers() argument
1594 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) in pm_runtime_put_suppliers()
1601 void pm_runtime_new_link(struct device *dev) in pm_runtime_new_link() argument
1603 spin_lock_irq(&dev->power.lock); in pm_runtime_new_link()
1604 dev->power.links_count++; in pm_runtime_new_link()
1605 spin_unlock_irq(&dev->power.lock); in pm_runtime_new_link()
1608 void pm_runtime_drop_link(struct device *dev) in pm_runtime_drop_link() argument
1610 rpm_put_suppliers(dev); in pm_runtime_drop_link()
1612 spin_lock_irq(&dev->power.lock); in pm_runtime_drop_link()
1613 WARN_ON(dev->power.links_count == 0); in pm_runtime_drop_link()
1614 dev->power.links_count--; in pm_runtime_drop_link()
1615 spin_unlock_irq(&dev->power.lock); in pm_runtime_drop_link()
1618 static bool pm_runtime_need_not_resume(struct device *dev) in pm_runtime_need_not_resume() argument
1620 return atomic_read(&dev->power.usage_count) <= 1 && in pm_runtime_need_not_resume()
1621 (atomic_read(&dev->power.child_count) == 0 || in pm_runtime_need_not_resume()
1622 dev->power.ignore_children); in pm_runtime_need_not_resume()
1642 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
1647 pm_runtime_disable(dev); in pm_runtime_force_suspend()
1648 if (pm_runtime_status_suspended(dev)) in pm_runtime_force_suspend()
1651 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
1653 ret = callback ? callback(dev) : 0; in pm_runtime_force_suspend()
1663 if (pm_runtime_need_not_resume(dev)) in pm_runtime_force_suspend()
1664 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
1666 __update_runtime_status(dev, RPM_SUSPENDED); in pm_runtime_force_suspend()
1671 pm_runtime_enable(dev); in pm_runtime_force_suspend()
1688 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
1693 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) in pm_runtime_force_resume()
1700 __update_runtime_status(dev, RPM_ACTIVE); in pm_runtime_force_resume()
1702 callback = RPM_GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
1704 ret = callback ? callback(dev) : 0; in pm_runtime_force_resume()
1706 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
1710 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
1712 pm_runtime_enable(dev); in pm_runtime_force_resume()