1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * pm_runtime.h - Device run-time power management helper functions.
4  *
5  * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>
6  */
7 
8 #ifndef _LINUX_PM_RUNTIME_H
9 #define _LINUX_PM_RUNTIME_H
10 
11 #include <linux/device.h>
12 #include <linux/notifier.h>
13 #include <linux/pm.h>
14 
15 #include <linux/jiffies.h>
16 
17 /* Runtime PM flag argument bits */
18 #define RPM_ASYNC		0x01	/* Request is asynchronous */
19 #define RPM_NOWAIT		0x02	/* Don't wait for concurrent
20 					    state change */
21 #define RPM_GET_PUT		0x04	/* Increment/decrement the
22 					    usage_count */
23 #define RPM_AUTO		0x08	/* Use autosuspend_delay */
24 
25 #ifdef CONFIG_PM
26 extern struct workqueue_struct *pm_wq;
27 
queue_pm_work(struct work_struct * work)28 static inline bool queue_pm_work(struct work_struct *work)
29 {
30 	return queue_work(pm_wq, work);
31 }
32 
33 extern int pm_generic_runtime_suspend(struct device *dev);
34 extern int pm_generic_runtime_resume(struct device *dev);
35 extern int pm_runtime_force_suspend(struct device *dev);
36 extern int pm_runtime_force_resume(struct device *dev);
37 
38 extern int __pm_runtime_idle(struct device *dev, int rpmflags);
39 extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
40 extern int __pm_runtime_resume(struct device *dev, int rpmflags);
41 extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
42 extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
43 extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
44 extern int pm_runtime_barrier(struct device *dev);
45 extern void pm_runtime_enable(struct device *dev);
46 extern void __pm_runtime_disable(struct device *dev, bool check_resume);
47 extern void pm_runtime_allow(struct device *dev);
48 extern void pm_runtime_forbid(struct device *dev);
49 extern void pm_runtime_no_callbacks(struct device *dev);
50 extern void pm_runtime_irq_safe(struct device *dev);
51 extern void __pm_runtime_use_autosuspend(struct device *dev, bool use);
52 extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay);
53 extern u64 pm_runtime_autosuspend_expiration(struct device *dev);
54 extern void pm_runtime_update_max_time_suspended(struct device *dev,
55 						 s64 delta_ns);
56 extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable);
57 extern void pm_runtime_get_suppliers(struct device *dev);
58 extern void pm_runtime_put_suppliers(struct device *dev);
59 extern void pm_runtime_new_link(struct device *dev);
60 extern void pm_runtime_drop_link(struct device_link *link);
61 
62 extern int devm_pm_runtime_enable(struct device *dev);
63 
64 /**
65  * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter.
66  * @dev: Target device.
67  *
68  * Increment the runtime PM usage counter of @dev if its runtime PM status is
69  * %RPM_ACTIVE and its runtime PM usage counter is greater than 0.
70  */
pm_runtime_get_if_in_use(struct device * dev)71 static inline int pm_runtime_get_if_in_use(struct device *dev)
72 {
73 	return pm_runtime_get_if_active(dev, false);
74 }
75 
76 /**
77  * pm_suspend_ignore_children - Set runtime PM behavior regarding children.
78  * @dev: Target device.
79  * @enable: Whether or not to ignore possible dependencies on children.
80  *
81  * The dependencies of @dev on its children will not be taken into account by
82  * the runtime PM framework going forward if @enable is %true, or they will
83  * be taken into account otherwise.
84  */
pm_suspend_ignore_children(struct device * dev,bool enable)85 static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
86 {
87 	dev->power.ignore_children = enable;
88 }
89 
90 /**
91  * pm_runtime_get_noresume - Bump up runtime PM usage counter of a device.
92  * @dev: Target device.
93  */
pm_runtime_get_noresume(struct device * dev)94 static inline void pm_runtime_get_noresume(struct device *dev)
95 {
96 	atomic_inc(&dev->power.usage_count);
97 }
98 
99 /**
100  * pm_runtime_put_noidle - Drop runtime PM usage counter of a device.
101  * @dev: Target device.
102  *
103  * Decrement the runtime PM usage counter of @dev unless it is 0 already.
104  */
pm_runtime_put_noidle(struct device * dev)105 static inline void pm_runtime_put_noidle(struct device *dev)
106 {
107 	atomic_add_unless(&dev->power.usage_count, -1, 0);
108 }
109 
110 /**
111  * pm_runtime_suspended - Check whether or not a device is runtime-suspended.
112  * @dev: Target device.
113  *
114  * Return %true if runtime PM is enabled for @dev and its runtime PM status is
115  * %RPM_SUSPENDED, or %false otherwise.
116  *
117  * Note that the return value of this function can only be trusted if it is
118  * called under the runtime PM lock of @dev or under conditions in which
119  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
120  * status cannot change.
121  */
pm_runtime_suspended(struct device * dev)122 static inline bool pm_runtime_suspended(struct device *dev)
123 {
124 	return dev->power.runtime_status == RPM_SUSPENDED
125 		&& !dev->power.disable_depth;
126 }
127 
128 /**
129  * pm_runtime_active - Check whether or not a device is runtime-active.
130  * @dev: Target device.
131  *
132  * Return %true if runtime PM is enabled for @dev and its runtime PM status is
133  * %RPM_ACTIVE, or %false otherwise.
134  *
135  * Note that the return value of this function can only be trusted if it is
136  * called under the runtime PM lock of @dev or under conditions in which
137  * runtime PM cannot be either disabled or enabled for @dev and its runtime PM
138  * status cannot change.
139  */
pm_runtime_active(struct device * dev)140 static inline bool pm_runtime_active(struct device *dev)
141 {
142 	return dev->power.runtime_status == RPM_ACTIVE
143 		|| dev->power.disable_depth;
144 }
145 
146 /**
147  * pm_runtime_status_suspended - Check if runtime PM status is "suspended".
148  * @dev: Target device.
149  *
150  * Return %true if the runtime PM status of @dev is %RPM_SUSPENDED, or %false
151  * otherwise, regardless of whether or not runtime PM has been enabled for @dev.
152  *
153  * Note that the return value of this function can only be trusted if it is
154  * called under the runtime PM lock of @dev or under conditions in which the
155  * runtime PM status of @dev cannot change.
156  */
pm_runtime_status_suspended(struct device * dev)157 static inline bool pm_runtime_status_suspended(struct device *dev)
158 {
159 	return dev->power.runtime_status == RPM_SUSPENDED;
160 }
161 
162 /**
163  * pm_runtime_enabled - Check if runtime PM is enabled.
164  * @dev: Target device.
165  *
166  * Return %true if runtime PM is enabled for @dev or %false otherwise.
167  *
168  * Note that the return value of this function can only be trusted if it is
169  * called under the runtime PM lock of @dev or under conditions in which
170  * runtime PM cannot be either disabled or enabled for @dev.
171  */
pm_runtime_enabled(struct device * dev)172 static inline bool pm_runtime_enabled(struct device *dev)
173 {
174 	return !dev->power.disable_depth;
175 }
176 
177 /**
178  * pm_runtime_has_no_callbacks - Check if runtime PM callbacks may be present.
179  * @dev: Target device.
180  *
181  * Return %true if @dev is a special device without runtime PM callbacks or
182  * %false otherwise.
183  */
pm_runtime_has_no_callbacks(struct device * dev)184 static inline bool pm_runtime_has_no_callbacks(struct device *dev)
185 {
186 	return dev->power.no_callbacks;
187 }
188 
189 /**
190  * pm_runtime_mark_last_busy - Update the last access time of a device.
191  * @dev: Target device.
192  *
193  * Update the last access time of @dev used by the runtime PM autosuspend
194  * mechanism to the current time as returned by ktime_get_mono_fast_ns().
195  */
pm_runtime_mark_last_busy(struct device * dev)196 static inline void pm_runtime_mark_last_busy(struct device *dev)
197 {
198 	WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns());
199 }
200 
201 /**
202  * pm_runtime_is_irq_safe - Check if runtime PM can work in interrupt context.
203  * @dev: Target device.
204  *
205  * Return %true if @dev has been marked as an "IRQ-safe" device (with respect
206  * to runtime PM), in which case its runtime PM callabcks can be expected to
207  * work correctly when invoked from interrupt handlers.
208  */
pm_runtime_is_irq_safe(struct device * dev)209 static inline bool pm_runtime_is_irq_safe(struct device *dev)
210 {
211 	return dev->power.irq_safe;
212 }
213 
214 extern u64 pm_runtime_suspended_time(struct device *dev);
215 
216 #else /* !CONFIG_PM */
217 
queue_pm_work(struct work_struct * work)218 static inline bool queue_pm_work(struct work_struct *work) { return false; }
219 
pm_generic_runtime_suspend(struct device * dev)220 static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
pm_generic_runtime_resume(struct device * dev)221 static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
pm_runtime_force_suspend(struct device * dev)222 static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
pm_runtime_force_resume(struct device * dev)223 static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
224 
__pm_runtime_idle(struct device * dev,int rpmflags)225 static inline int __pm_runtime_idle(struct device *dev, int rpmflags)
226 {
227 	return -ENOSYS;
228 }
__pm_runtime_suspend(struct device * dev,int rpmflags)229 static inline int __pm_runtime_suspend(struct device *dev, int rpmflags)
230 {
231 	return -ENOSYS;
232 }
__pm_runtime_resume(struct device * dev,int rpmflags)233 static inline int __pm_runtime_resume(struct device *dev, int rpmflags)
234 {
235 	return 1;
236 }
pm_schedule_suspend(struct device * dev,unsigned int delay)237 static inline int pm_schedule_suspend(struct device *dev, unsigned int delay)
238 {
239 	return -ENOSYS;
240 }
pm_runtime_get_if_in_use(struct device * dev)241 static inline int pm_runtime_get_if_in_use(struct device *dev)
242 {
243 	return -EINVAL;
244 }
pm_runtime_get_if_active(struct device * dev,bool ign_usage_count)245 static inline int pm_runtime_get_if_active(struct device *dev,
246 					   bool ign_usage_count)
247 {
248 	return -EINVAL;
249 }
__pm_runtime_set_status(struct device * dev,unsigned int status)250 static inline int __pm_runtime_set_status(struct device *dev,
251 					    unsigned int status) { return 0; }
pm_runtime_barrier(struct device * dev)252 static inline int pm_runtime_barrier(struct device *dev) { return 0; }
pm_runtime_enable(struct device * dev)253 static inline void pm_runtime_enable(struct device *dev) {}
__pm_runtime_disable(struct device * dev,bool c)254 static inline void __pm_runtime_disable(struct device *dev, bool c) {}
pm_runtime_allow(struct device * dev)255 static inline void pm_runtime_allow(struct device *dev) {}
pm_runtime_forbid(struct device * dev)256 static inline void pm_runtime_forbid(struct device *dev) {}
257 
devm_pm_runtime_enable(struct device * dev)258 static inline int devm_pm_runtime_enable(struct device *dev) { return 0; }
259 
pm_suspend_ignore_children(struct device * dev,bool enable)260 static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {}
pm_runtime_get_noresume(struct device * dev)261 static inline void pm_runtime_get_noresume(struct device *dev) {}
pm_runtime_put_noidle(struct device * dev)262 static inline void pm_runtime_put_noidle(struct device *dev) {}
pm_runtime_suspended(struct device * dev)263 static inline bool pm_runtime_suspended(struct device *dev) { return false; }
pm_runtime_active(struct device * dev)264 static inline bool pm_runtime_active(struct device *dev) { return true; }
pm_runtime_status_suspended(struct device * dev)265 static inline bool pm_runtime_status_suspended(struct device *dev) { return false; }
pm_runtime_enabled(struct device * dev)266 static inline bool pm_runtime_enabled(struct device *dev) { return false; }
267 
pm_runtime_no_callbacks(struct device * dev)268 static inline void pm_runtime_no_callbacks(struct device *dev) {}
pm_runtime_irq_safe(struct device * dev)269 static inline void pm_runtime_irq_safe(struct device *dev) {}
pm_runtime_is_irq_safe(struct device * dev)270 static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
271 
pm_runtime_has_no_callbacks(struct device * dev)272 static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
pm_runtime_mark_last_busy(struct device * dev)273 static inline void pm_runtime_mark_last_busy(struct device *dev) {}
__pm_runtime_use_autosuspend(struct device * dev,bool use)274 static inline void __pm_runtime_use_autosuspend(struct device *dev,
275 						bool use) {}
pm_runtime_set_autosuspend_delay(struct device * dev,int delay)276 static inline void pm_runtime_set_autosuspend_delay(struct device *dev,
277 						int delay) {}
pm_runtime_autosuspend_expiration(struct device * dev)278 static inline u64 pm_runtime_autosuspend_expiration(
279 				struct device *dev) { return 0; }
pm_runtime_set_memalloc_noio(struct device * dev,bool enable)280 static inline void pm_runtime_set_memalloc_noio(struct device *dev,
281 						bool enable){}
pm_runtime_get_suppliers(struct device * dev)282 static inline void pm_runtime_get_suppliers(struct device *dev) {}
pm_runtime_put_suppliers(struct device * dev)283 static inline void pm_runtime_put_suppliers(struct device *dev) {}
pm_runtime_new_link(struct device * dev)284 static inline void pm_runtime_new_link(struct device *dev) {}
pm_runtime_drop_link(struct device_link * link)285 static inline void pm_runtime_drop_link(struct device_link *link) {}
286 
287 #endif /* !CONFIG_PM */
288 
289 /**
290  * pm_runtime_idle - Conditionally set up autosuspend of a device or suspend it.
291  * @dev: Target device.
292  *
293  * Invoke the "idle check" callback of @dev and, depending on its return value,
294  * set up autosuspend of @dev or suspend it (depending on whether or not
295  * autosuspend has been enabled for it).
296  */
pm_runtime_idle(struct device * dev)297 static inline int pm_runtime_idle(struct device *dev)
298 {
299 	return __pm_runtime_idle(dev, 0);
300 }
301 
302 /**
303  * pm_runtime_suspend - Suspend a device synchronously.
304  * @dev: Target device.
305  */
pm_runtime_suspend(struct device * dev)306 static inline int pm_runtime_suspend(struct device *dev)
307 {
308 	return __pm_runtime_suspend(dev, 0);
309 }
310 
311 /**
312  * pm_runtime_autosuspend - Set up autosuspend of a device or suspend it.
313  * @dev: Target device.
314  *
315  * Set up autosuspend of @dev or suspend it (depending on whether or not
316  * autosuspend is enabled for it) without engaging its "idle check" callback.
317  */
pm_runtime_autosuspend(struct device * dev)318 static inline int pm_runtime_autosuspend(struct device *dev)
319 {
320 	return __pm_runtime_suspend(dev, RPM_AUTO);
321 }
322 
323 /**
324  * pm_runtime_resume - Resume a device synchronously.
325  * @dev: Target device.
326  */
pm_runtime_resume(struct device * dev)327 static inline int pm_runtime_resume(struct device *dev)
328 {
329 	return __pm_runtime_resume(dev, 0);
330 }
331 
332 /**
333  * pm_request_idle - Queue up "idle check" execution for a device.
334  * @dev: Target device.
335  *
336  * Queue up a work item to run an equivalent of pm_runtime_idle() for @dev
337  * asynchronously.
338  */
pm_request_idle(struct device * dev)339 static inline int pm_request_idle(struct device *dev)
340 {
341 	return __pm_runtime_idle(dev, RPM_ASYNC);
342 }
343 
344 /**
345  * pm_request_resume - Queue up runtime-resume of a device.
346  * @dev: Target device.
347  */
pm_request_resume(struct device * dev)348 static inline int pm_request_resume(struct device *dev)
349 {
350 	return __pm_runtime_resume(dev, RPM_ASYNC);
351 }
352 
353 /**
354  * pm_request_autosuspend - Queue up autosuspend of a device.
355  * @dev: Target device.
356  *
357  * Queue up a work item to run an equivalent pm_runtime_autosuspend() for @dev
358  * asynchronously.
359  */
pm_request_autosuspend(struct device * dev)360 static inline int pm_request_autosuspend(struct device *dev)
361 {
362 	return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO);
363 }
364 
365 /**
366  * pm_runtime_get - Bump up usage counter and queue up resume of a device.
367  * @dev: Target device.
368  *
369  * Bump up the runtime PM usage counter of @dev and queue up a work item to
370  * carry out runtime-resume of it.
371  */
pm_runtime_get(struct device * dev)372 static inline int pm_runtime_get(struct device *dev)
373 {
374 	return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC);
375 }
376 
377 /**
378  * pm_runtime_get_sync - Bump up usage counter of a device and resume it.
379  * @dev: Target device.
380  *
381  * Bump up the runtime PM usage counter of @dev and carry out runtime-resume of
382  * it synchronously.
383  *
384  * The possible return values of this function are the same as for
385  * pm_runtime_resume() and the runtime PM usage counter of @dev remains
386  * incremented in all cases, even if it returns an error code.
387  * Consider using pm_runtime_resume_and_get() instead of it, especially
388  * if its return value is checked by the caller, as this is likely to result
389  * in cleaner code.
390  */
pm_runtime_get_sync(struct device * dev)391 static inline int pm_runtime_get_sync(struct device *dev)
392 {
393 	return __pm_runtime_resume(dev, RPM_GET_PUT);
394 }
395 
396 /**
397  * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
398  * @dev: Target device.
399  *
400  * Resume @dev synchronously and if that is successful, increment its runtime
401  * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
402  * incremented or a negative error code otherwise.
403  */
pm_runtime_resume_and_get(struct device * dev)404 static inline int pm_runtime_resume_and_get(struct device *dev)
405 {
406 	int ret;
407 
408 	ret = __pm_runtime_resume(dev, RPM_GET_PUT);
409 	if (ret < 0) {
410 		pm_runtime_put_noidle(dev);
411 		return ret;
412 	}
413 
414 	return 0;
415 }
416 
417 /**
418  * pm_runtime_put - Drop device usage counter and queue up "idle check" if 0.
419  * @dev: Target device.
420  *
421  * Decrement the runtime PM usage counter of @dev and if it turns out to be
422  * equal to 0, queue up a work item for @dev like in pm_request_idle().
423  */
pm_runtime_put(struct device * dev)424 static inline int pm_runtime_put(struct device *dev)
425 {
426 	return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
427 }
428 
429 /**
430  * pm_runtime_put_autosuspend - Drop device usage counter and queue autosuspend if 0.
431  * @dev: Target device.
432  *
433  * Decrement the runtime PM usage counter of @dev and if it turns out to be
434  * equal to 0, queue up a work item for @dev like in pm_request_autosuspend().
435  */
pm_runtime_put_autosuspend(struct device * dev)436 static inline int pm_runtime_put_autosuspend(struct device *dev)
437 {
438 	return __pm_runtime_suspend(dev,
439 	    RPM_GET_PUT | RPM_ASYNC | RPM_AUTO);
440 }
441 
442 /**
443  * pm_runtime_put_sync - Drop device usage counter and run "idle check" if 0.
444  * @dev: Target device.
445  *
446  * Decrement the runtime PM usage counter of @dev and if it turns out to be
447  * equal to 0, invoke the "idle check" callback of @dev and, depending on its
448  * return value, set up autosuspend of @dev or suspend it (depending on whether
449  * or not autosuspend has been enabled for it).
450  *
451  * The possible return values of this function are the same as for
452  * pm_runtime_idle() and the runtime PM usage counter of @dev remains
453  * decremented in all cases, even if it returns an error code.
454  */
pm_runtime_put_sync(struct device * dev)455 static inline int pm_runtime_put_sync(struct device *dev)
456 {
457 	return __pm_runtime_idle(dev, RPM_GET_PUT);
458 }
459 
460 /**
461  * pm_runtime_put_sync_suspend - Drop device usage counter and suspend if 0.
462  * @dev: Target device.
463  *
464  * Decrement the runtime PM usage counter of @dev and if it turns out to be
465  * equal to 0, carry out runtime-suspend of @dev synchronously.
466  *
467  * The possible return values of this function are the same as for
468  * pm_runtime_suspend() and the runtime PM usage counter of @dev remains
469  * decremented in all cases, even if it returns an error code.
470  */
pm_runtime_put_sync_suspend(struct device * dev)471 static inline int pm_runtime_put_sync_suspend(struct device *dev)
472 {
473 	return __pm_runtime_suspend(dev, RPM_GET_PUT);
474 }
475 
476 /**
477  * pm_runtime_put_sync_autosuspend - Drop device usage counter and autosuspend if 0.
478  * @dev: Target device.
479  *
480  * Decrement the runtime PM usage counter of @dev and if it turns out to be
481  * equal to 0, set up autosuspend of @dev or suspend it synchronously (depending
482  * on whether or not autosuspend has been enabled for it).
483  *
484  * The possible return values of this function are the same as for
485  * pm_runtime_autosuspend() and the runtime PM usage counter of @dev remains
486  * decremented in all cases, even if it returns an error code.
487  */
pm_runtime_put_sync_autosuspend(struct device * dev)488 static inline int pm_runtime_put_sync_autosuspend(struct device *dev)
489 {
490 	return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO);
491 }
492 
493 /**
494  * pm_runtime_set_active - Set runtime PM status to "active".
495  * @dev: Target device.
496  *
497  * Set the runtime PM status of @dev to %RPM_ACTIVE and ensure that dependencies
498  * of it will be taken into account.
499  *
500  * It is not valid to call this function for devices with runtime PM enabled.
501  */
pm_runtime_set_active(struct device * dev)502 static inline int pm_runtime_set_active(struct device *dev)
503 {
504 	return __pm_runtime_set_status(dev, RPM_ACTIVE);
505 }
506 
507 /**
508  * pm_runtime_set_suspended - Set runtime PM status to "suspended".
509  * @dev: Target device.
510  *
511  * Set the runtime PM status of @dev to %RPM_SUSPENDED and ensure that
512  * dependencies of it will be taken into account.
513  *
514  * It is not valid to call this function for devices with runtime PM enabled.
515  */
pm_runtime_set_suspended(struct device * dev)516 static inline int pm_runtime_set_suspended(struct device *dev)
517 {
518 	return __pm_runtime_set_status(dev, RPM_SUSPENDED);
519 }
520 
521 /**
522  * pm_runtime_disable - Disable runtime PM for a device.
523  * @dev: Target device.
524  *
525  * Prevent the runtime PM framework from working with @dev (by incrementing its
526  * "blocking" counter).
527  *
528  * For each invocation of this function for @dev there must be a matching
529  * pm_runtime_enable() call in order for runtime PM to be enabled for it.
530  */
pm_runtime_disable(struct device * dev)531 static inline void pm_runtime_disable(struct device *dev)
532 {
533 	__pm_runtime_disable(dev, true);
534 }
535 
536 /**
537  * pm_runtime_use_autosuspend - Allow autosuspend to be used for a device.
538  * @dev: Target device.
539  *
540  * Allow the runtime PM autosuspend mechanism to be used for @dev whenever
541  * requested (or "autosuspend" will be handled as direct runtime-suspend for
542  * it).
543  */
pm_runtime_use_autosuspend(struct device * dev)544 static inline void pm_runtime_use_autosuspend(struct device *dev)
545 {
546 	__pm_runtime_use_autosuspend(dev, true);
547 }
548 
549 /**
550  * pm_runtime_dont_use_autosuspend - Prevent autosuspend from being used.
551  * @dev: Target device.
552  *
553  * Prevent the runtime PM autosuspend mechanism from being used for @dev which
554  * means that "autosuspend" will be handled as direct runtime-suspend for it
555  * going forward.
556  */
pm_runtime_dont_use_autosuspend(struct device * dev)557 static inline void pm_runtime_dont_use_autosuspend(struct device *dev)
558 {
559 	__pm_runtime_use_autosuspend(dev, false);
560 }
561 
562 #endif
563