1 // SPDX-License-Identifier: GPL-2.0
2 /* sysfs entries for device PM */
3 #include <linux/device.h>
4 #include <linux/string.h>
5 #include <linux/export.h>
6 #include <linux/pm_qos.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/pm_wakeup.h>
9 #include <linux/atomic.h>
10 #include <linux/jiffies.h>
11 #include "power.h"
12
13 /*
14 * control - Report/change current runtime PM setting of the device
15 *
16 * Runtime power management of a device can be blocked with the help of
17 * this attribute. All devices have one of the following two values for
18 * the power/control file:
19 *
20 * + "auto\n" to allow the device to be power managed at run time;
21 * + "on\n" to prevent the device from being power managed at run time;
22 *
23 * The default for all devices is "auto", which means that devices may be
24 * subject to automatic power management, depending on their drivers.
25 * Changing this attribute to "on" prevents the driver from power managing
26 * the device at run time. Doing that while the device is suspended causes
27 * it to be woken up.
28 *
29 * wakeup - Report/change current wakeup option for device
30 *
31 * Some devices support "wakeup" events, which are hardware signals
32 * used to activate devices from suspended or low power states. Such
33 * devices have one of three values for the sysfs power/wakeup file:
34 *
35 * + "enabled\n" to issue the events;
36 * + "disabled\n" not to do so; or
37 * + "\n" for temporary or permanent inability to issue wakeup.
38 *
39 * (For example, unconfigured USB devices can't issue wakeups.)
40 *
41 * Familiar examples of devices that can issue wakeup events include
42 * keyboards and mice (both PS2 and USB styles), power buttons, modems,
43 * "Wake-On-LAN" Ethernet links, GPIO lines, and more. Some events
44 * will wake the entire system from a suspend state; others may just
45 * wake up the device (if the system as a whole is already active).
46 * Some wakeup events use normal IRQ lines; other use special out
47 * of band signaling.
48 *
49 * It is the responsibility of device drivers to enable (or disable)
50 * wakeup signaling as part of changing device power states, respecting
51 * the policy choices provided through the driver model.
52 *
53 * Devices may not be able to generate wakeup events from all power
54 * states. Also, the events may be ignored in some configurations;
55 * for example, they might need help from other devices that aren't
56 * active, or which may have wakeup disabled. Some drivers rely on
57 * wakeup events internally (unless they are disabled), keeping
58 * their hardware in low power modes whenever they're unused. This
59 * saves runtime power, without requiring system-wide sleep states.
60 *
61 * async - Report/change current async suspend setting for the device
62 *
63 * Asynchronous suspend and resume of the device during system-wide power
64 * state transitions can be enabled by writing "enabled" to this file.
65 * Analogously, if "disabled" is written to this file, the device will be
66 * suspended and resumed synchronously.
67 *
68 * All devices have one of the following two values for power/async:
69 *
70 * + "enabled\n" to permit the asynchronous suspend/resume of the device;
71 * + "disabled\n" to forbid it;
72 *
73 * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
74 * of a device unless it is certain that all of the PM dependencies of the
75 * device are known to the PM core. However, for some devices this
76 * attribute is set to "enabled" by bus type code or device drivers and in
77 * that cases it should be safe to leave the default value.
78 *
79 * autosuspend_delay_ms - Report/change a device's autosuspend_delay value
80 *
81 * Some drivers don't want to carry out a runtime suspend as soon as a
82 * device becomes idle; they want it always to remain idle for some period
83 * of time before suspending it. This period is the autosuspend_delay
84 * value (expressed in milliseconds) and it can be controlled by the user.
85 * If the value is negative then the device will never be runtime
86 * suspended.
87 *
88 * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay
89 * value are used only if the driver calls pm_runtime_use_autosuspend().
90 *
91 * wakeup_count - Report the number of wakeup events related to the device
92 */
93
94 const char power_group_name[] = "power";
95 EXPORT_SYMBOL_GPL(power_group_name);
96
97 static const char ctrl_auto[] = "auto";
98 static const char ctrl_on[] = "on";
99
control_show(struct device * dev,struct device_attribute * attr,char * buf)100 static ssize_t control_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102 {
103 return sprintf(buf, "%s\n",
104 dev->power.runtime_auto ? ctrl_auto : ctrl_on);
105 }
106
control_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)107 static ssize_t control_store(struct device * dev, struct device_attribute *attr,
108 const char * buf, size_t n)
109 {
110 device_lock(dev);
111 if (sysfs_streq(buf, ctrl_auto))
112 pm_runtime_allow(dev);
113 else if (sysfs_streq(buf, ctrl_on))
114 pm_runtime_forbid(dev);
115 else
116 n = -EINVAL;
117 device_unlock(dev);
118 return n;
119 }
120
121 static DEVICE_ATTR_RW(control);
122
runtime_active_time_show(struct device * dev,struct device_attribute * attr,char * buf)123 static ssize_t runtime_active_time_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
125 {
126 int ret;
127 u64 tmp = pm_runtime_active_time(dev);
128 do_div(tmp, NSEC_PER_MSEC);
129 ret = sprintf(buf, "%llu\n", tmp);
130 return ret;
131 }
132
133 static DEVICE_ATTR_RO(runtime_active_time);
134
runtime_suspended_time_show(struct device * dev,struct device_attribute * attr,char * buf)135 static ssize_t runtime_suspended_time_show(struct device *dev,
136 struct device_attribute *attr, char *buf)
137 {
138 int ret;
139 u64 tmp = pm_runtime_suspended_time(dev);
140 do_div(tmp, NSEC_PER_MSEC);
141 ret = sprintf(buf, "%llu\n", tmp);
142 return ret;
143 }
144
145 static DEVICE_ATTR_RO(runtime_suspended_time);
146
runtime_status_show(struct device * dev,struct device_attribute * attr,char * buf)147 static ssize_t runtime_status_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149 {
150 const char *p;
151
152 if (dev->power.runtime_error) {
153 p = "error\n";
154 } else if (dev->power.disable_depth) {
155 p = "unsupported\n";
156 } else {
157 switch (dev->power.runtime_status) {
158 case RPM_SUSPENDED:
159 p = "suspended\n";
160 break;
161 case RPM_SUSPENDING:
162 p = "suspending\n";
163 break;
164 case RPM_RESUMING:
165 p = "resuming\n";
166 break;
167 case RPM_ACTIVE:
168 p = "active\n";
169 break;
170 default:
171 return -EIO;
172 }
173 }
174 return sprintf(buf, p);
175 }
176
177 static DEVICE_ATTR_RO(runtime_status);
178
autosuspend_delay_ms_show(struct device * dev,struct device_attribute * attr,char * buf)179 static ssize_t autosuspend_delay_ms_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181 {
182 if (!dev->power.use_autosuspend)
183 return -EIO;
184 return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
185 }
186
autosuspend_delay_ms_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)187 static ssize_t autosuspend_delay_ms_store(struct device *dev,
188 struct device_attribute *attr, const char *buf, size_t n)
189 {
190 long delay;
191
192 if (!dev->power.use_autosuspend)
193 return -EIO;
194
195 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
196 return -EINVAL;
197
198 device_lock(dev);
199 pm_runtime_set_autosuspend_delay(dev, delay);
200 device_unlock(dev);
201 return n;
202 }
203
204 static DEVICE_ATTR_RW(autosuspend_delay_ms);
205
pm_qos_resume_latency_us_show(struct device * dev,struct device_attribute * attr,char * buf)206 static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209 {
210 s32 value = dev_pm_qos_requested_resume_latency(dev);
211
212 if (value == 0)
213 return sprintf(buf, "n/a\n");
214 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
215 value = 0;
216
217 return sprintf(buf, "%d\n", value);
218 }
219
pm_qos_resume_latency_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)220 static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t n)
223 {
224 s32 value;
225 int ret;
226
227 if (!kstrtos32(buf, 0, &value)) {
228 /*
229 * Prevent users from writing negative or "no constraint" values
230 * directly.
231 */
232 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
233 return -EINVAL;
234
235 if (value == 0)
236 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
237 } else if (sysfs_streq(buf, "n/a")) {
238 value = 0;
239 } else {
240 return -EINVAL;
241 }
242
243 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
244 value);
245 return ret < 0 ? ret : n;
246 }
247
248 static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
249
pm_qos_latency_tolerance_us_show(struct device * dev,struct device_attribute * attr,char * buf)250 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
253 {
254 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
255
256 if (value < 0)
257 return sprintf(buf, "auto\n");
258 if (value == PM_QOS_LATENCY_ANY)
259 return sprintf(buf, "any\n");
260
261 return sprintf(buf, "%d\n", value);
262 }
263
pm_qos_latency_tolerance_us_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)264 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
265 struct device_attribute *attr,
266 const char *buf, size_t n)
267 {
268 s32 value;
269 int ret;
270
271 if (kstrtos32(buf, 0, &value) == 0) {
272 /* Users can't write negative values directly */
273 if (value < 0)
274 return -EINVAL;
275 } else {
276 if (sysfs_streq(buf, "auto"))
277 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
278 else if (sysfs_streq(buf, "any"))
279 value = PM_QOS_LATENCY_ANY;
280 else
281 return -EINVAL;
282 }
283 ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
284 return ret < 0 ? ret : n;
285 }
286
287 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
288
pm_qos_no_power_off_show(struct device * dev,struct device_attribute * attr,char * buf)289 static ssize_t pm_qos_no_power_off_show(struct device *dev,
290 struct device_attribute *attr,
291 char *buf)
292 {
293 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
294 & PM_QOS_FLAG_NO_POWER_OFF));
295 }
296
pm_qos_no_power_off_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)297 static ssize_t pm_qos_no_power_off_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t n)
300 {
301 int ret;
302
303 if (kstrtoint(buf, 0, &ret))
304 return -EINVAL;
305
306 if (ret != 0 && ret != 1)
307 return -EINVAL;
308
309 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
310 return ret < 0 ? ret : n;
311 }
312
313 static DEVICE_ATTR_RW(pm_qos_no_power_off);
314
315 #ifdef CONFIG_PM_SLEEP
316 static const char _enabled[] = "enabled";
317 static const char _disabled[] = "disabled";
318
wakeup_show(struct device * dev,struct device_attribute * attr,char * buf)319 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
320 char *buf)
321 {
322 return sprintf(buf, "%s\n", device_can_wakeup(dev)
323 ? (device_may_wakeup(dev) ? _enabled : _disabled)
324 : "");
325 }
326
wakeup_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)327 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
328 const char *buf, size_t n)
329 {
330 if (!device_can_wakeup(dev))
331 return -EINVAL;
332
333 if (sysfs_streq(buf, _enabled))
334 device_set_wakeup_enable(dev, 1);
335 else if (sysfs_streq(buf, _disabled))
336 device_set_wakeup_enable(dev, 0);
337 else
338 return -EINVAL;
339 return n;
340 }
341
342 static DEVICE_ATTR_RW(wakeup);
343
wakeup_count_show(struct device * dev,struct device_attribute * attr,char * buf)344 static ssize_t wakeup_count_show(struct device *dev,
345 struct device_attribute *attr, char *buf)
346 {
347 unsigned long count = 0;
348 bool enabled = false;
349
350 spin_lock_irq(&dev->power.lock);
351 if (dev->power.wakeup) {
352 count = dev->power.wakeup->wakeup_count;
353 enabled = true;
354 }
355 spin_unlock_irq(&dev->power.lock);
356 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
357 }
358
359 static DEVICE_ATTR_RO(wakeup_count);
360
wakeup_active_count_show(struct device * dev,struct device_attribute * attr,char * buf)361 static ssize_t wakeup_active_count_show(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
364 {
365 unsigned long count = 0;
366 bool enabled = false;
367
368 spin_lock_irq(&dev->power.lock);
369 if (dev->power.wakeup) {
370 count = dev->power.wakeup->active_count;
371 enabled = true;
372 }
373 spin_unlock_irq(&dev->power.lock);
374 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
375 }
376
377 static DEVICE_ATTR_RO(wakeup_active_count);
378
wakeup_abort_count_show(struct device * dev,struct device_attribute * attr,char * buf)379 static ssize_t wakeup_abort_count_show(struct device *dev,
380 struct device_attribute *attr,
381 char *buf)
382 {
383 unsigned long count = 0;
384 bool enabled = false;
385
386 spin_lock_irq(&dev->power.lock);
387 if (dev->power.wakeup) {
388 count = dev->power.wakeup->wakeup_count;
389 enabled = true;
390 }
391 spin_unlock_irq(&dev->power.lock);
392 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
393 }
394
395 static DEVICE_ATTR_RO(wakeup_abort_count);
396
wakeup_expire_count_show(struct device * dev,struct device_attribute * attr,char * buf)397 static ssize_t wakeup_expire_count_show(struct device *dev,
398 struct device_attribute *attr,
399 char *buf)
400 {
401 unsigned long count = 0;
402 bool enabled = false;
403
404 spin_lock_irq(&dev->power.lock);
405 if (dev->power.wakeup) {
406 count = dev->power.wakeup->expire_count;
407 enabled = true;
408 }
409 spin_unlock_irq(&dev->power.lock);
410 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
411 }
412
413 static DEVICE_ATTR_RO(wakeup_expire_count);
414
wakeup_active_show(struct device * dev,struct device_attribute * attr,char * buf)415 static ssize_t wakeup_active_show(struct device *dev,
416 struct device_attribute *attr, char *buf)
417 {
418 unsigned int active = 0;
419 bool enabled = false;
420
421 spin_lock_irq(&dev->power.lock);
422 if (dev->power.wakeup) {
423 active = dev->power.wakeup->active;
424 enabled = true;
425 }
426 spin_unlock_irq(&dev->power.lock);
427 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
428 }
429
430 static DEVICE_ATTR_RO(wakeup_active);
431
wakeup_total_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)432 static ssize_t wakeup_total_time_ms_show(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435 {
436 s64 msec = 0;
437 bool enabled = false;
438
439 spin_lock_irq(&dev->power.lock);
440 if (dev->power.wakeup) {
441 msec = ktime_to_ms(dev->power.wakeup->total_time);
442 enabled = true;
443 }
444 spin_unlock_irq(&dev->power.lock);
445 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
446 }
447
448 static DEVICE_ATTR_RO(wakeup_total_time_ms);
449
wakeup_max_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)450 static ssize_t wakeup_max_time_ms_show(struct device *dev,
451 struct device_attribute *attr, char *buf)
452 {
453 s64 msec = 0;
454 bool enabled = false;
455
456 spin_lock_irq(&dev->power.lock);
457 if (dev->power.wakeup) {
458 msec = ktime_to_ms(dev->power.wakeup->max_time);
459 enabled = true;
460 }
461 spin_unlock_irq(&dev->power.lock);
462 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
463 }
464
465 static DEVICE_ATTR_RO(wakeup_max_time_ms);
466
wakeup_last_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)467 static ssize_t wakeup_last_time_ms_show(struct device *dev,
468 struct device_attribute *attr,
469 char *buf)
470 {
471 s64 msec = 0;
472 bool enabled = false;
473
474 spin_lock_irq(&dev->power.lock);
475 if (dev->power.wakeup) {
476 msec = ktime_to_ms(dev->power.wakeup->last_time);
477 enabled = true;
478 }
479 spin_unlock_irq(&dev->power.lock);
480 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
481 }
482
483 static DEVICE_ATTR_RO(wakeup_last_time_ms);
484
485 #ifdef CONFIG_PM_AUTOSLEEP
wakeup_prevent_sleep_time_ms_show(struct device * dev,struct device_attribute * attr,char * buf)486 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
487 struct device_attribute *attr,
488 char *buf)
489 {
490 s64 msec = 0;
491 bool enabled = false;
492
493 spin_lock_irq(&dev->power.lock);
494 if (dev->power.wakeup) {
495 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
496 enabled = true;
497 }
498 spin_unlock_irq(&dev->power.lock);
499 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
500 }
501
502 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
503 #endif /* CONFIG_PM_AUTOSLEEP */
504 #endif /* CONFIG_PM_SLEEP */
505
506 #ifdef CONFIG_PM_ADVANCED_DEBUG
runtime_usage_show(struct device * dev,struct device_attribute * attr,char * buf)507 static ssize_t runtime_usage_show(struct device *dev,
508 struct device_attribute *attr, char *buf)
509 {
510 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
511 }
512 static DEVICE_ATTR_RO(runtime_usage);
513
runtime_active_kids_show(struct device * dev,struct device_attribute * attr,char * buf)514 static ssize_t runtime_active_kids_show(struct device *dev,
515 struct device_attribute *attr,
516 char *buf)
517 {
518 return sprintf(buf, "%d\n", dev->power.ignore_children ?
519 0 : atomic_read(&dev->power.child_count));
520 }
521 static DEVICE_ATTR_RO(runtime_active_kids);
522
runtime_enabled_show(struct device * dev,struct device_attribute * attr,char * buf)523 static ssize_t runtime_enabled_show(struct device *dev,
524 struct device_attribute *attr, char *buf)
525 {
526 if (dev->power.disable_depth && (dev->power.runtime_auto == false))
527 return sprintf(buf, "disabled & forbidden\n");
528 if (dev->power.disable_depth)
529 return sprintf(buf, "disabled\n");
530 if (dev->power.runtime_auto == false)
531 return sprintf(buf, "forbidden\n");
532 return sprintf(buf, "enabled\n");
533 }
534 static DEVICE_ATTR_RO(runtime_enabled);
535
536 #ifdef CONFIG_PM_SLEEP
async_show(struct device * dev,struct device_attribute * attr,char * buf)537 static ssize_t async_show(struct device *dev, struct device_attribute *attr,
538 char *buf)
539 {
540 return sprintf(buf, "%s\n",
541 device_async_suspend_enabled(dev) ?
542 _enabled : _disabled);
543 }
544
async_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t n)545 static ssize_t async_store(struct device *dev, struct device_attribute *attr,
546 const char *buf, size_t n)
547 {
548 if (sysfs_streq(buf, _enabled))
549 device_enable_async_suspend(dev);
550 else if (sysfs_streq(buf, _disabled))
551 device_disable_async_suspend(dev);
552 else
553 return -EINVAL;
554 return n;
555 }
556
557 static DEVICE_ATTR_RW(async);
558
559 #endif /* CONFIG_PM_SLEEP */
560 #endif /* CONFIG_PM_ADVANCED_DEBUG */
561
562 static struct attribute *power_attrs[] = {
563 #ifdef CONFIG_PM_ADVANCED_DEBUG
564 #ifdef CONFIG_PM_SLEEP
565 &dev_attr_async.attr,
566 #endif
567 &dev_attr_runtime_status.attr,
568 &dev_attr_runtime_usage.attr,
569 &dev_attr_runtime_active_kids.attr,
570 &dev_attr_runtime_enabled.attr,
571 #endif /* CONFIG_PM_ADVANCED_DEBUG */
572 NULL,
573 };
574 static const struct attribute_group pm_attr_group = {
575 .name = power_group_name,
576 .attrs = power_attrs,
577 };
578
579 static struct attribute *wakeup_attrs[] = {
580 #ifdef CONFIG_PM_SLEEP
581 &dev_attr_wakeup.attr,
582 &dev_attr_wakeup_count.attr,
583 &dev_attr_wakeup_active_count.attr,
584 &dev_attr_wakeup_abort_count.attr,
585 &dev_attr_wakeup_expire_count.attr,
586 &dev_attr_wakeup_active.attr,
587 &dev_attr_wakeup_total_time_ms.attr,
588 &dev_attr_wakeup_max_time_ms.attr,
589 &dev_attr_wakeup_last_time_ms.attr,
590 #ifdef CONFIG_PM_AUTOSLEEP
591 &dev_attr_wakeup_prevent_sleep_time_ms.attr,
592 #endif
593 #endif
594 NULL,
595 };
596 static const struct attribute_group pm_wakeup_attr_group = {
597 .name = power_group_name,
598 .attrs = wakeup_attrs,
599 };
600
601 static struct attribute *runtime_attrs[] = {
602 #ifndef CONFIG_PM_ADVANCED_DEBUG
603 &dev_attr_runtime_status.attr,
604 #endif
605 &dev_attr_control.attr,
606 &dev_attr_runtime_suspended_time.attr,
607 &dev_attr_runtime_active_time.attr,
608 &dev_attr_autosuspend_delay_ms.attr,
609 NULL,
610 };
611 static const struct attribute_group pm_runtime_attr_group = {
612 .name = power_group_name,
613 .attrs = runtime_attrs,
614 };
615
616 static struct attribute *pm_qos_resume_latency_attrs[] = {
617 &dev_attr_pm_qos_resume_latency_us.attr,
618 NULL,
619 };
620 static const struct attribute_group pm_qos_resume_latency_attr_group = {
621 .name = power_group_name,
622 .attrs = pm_qos_resume_latency_attrs,
623 };
624
625 static struct attribute *pm_qos_latency_tolerance_attrs[] = {
626 &dev_attr_pm_qos_latency_tolerance_us.attr,
627 NULL,
628 };
629 static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
630 .name = power_group_name,
631 .attrs = pm_qos_latency_tolerance_attrs,
632 };
633
634 static struct attribute *pm_qos_flags_attrs[] = {
635 &dev_attr_pm_qos_no_power_off.attr,
636 NULL,
637 };
638 static const struct attribute_group pm_qos_flags_attr_group = {
639 .name = power_group_name,
640 .attrs = pm_qos_flags_attrs,
641 };
642
dpm_sysfs_add(struct device * dev)643 int dpm_sysfs_add(struct device *dev)
644 {
645 int rc;
646
647 /* No need to create PM sysfs if explicitly disabled. */
648 if (device_pm_not_required(dev))
649 return 0;
650
651 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
652 if (rc)
653 return rc;
654
655 if (pm_runtime_callbacks_present(dev)) {
656 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
657 if (rc)
658 goto err_out;
659 }
660 if (device_can_wakeup(dev)) {
661 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
662 if (rc)
663 goto err_runtime;
664 }
665 if (dev->power.set_latency_tolerance) {
666 rc = sysfs_merge_group(&dev->kobj,
667 &pm_qos_latency_tolerance_attr_group);
668 if (rc)
669 goto err_wakeup;
670 }
671 rc = pm_wakeup_source_sysfs_add(dev);
672 if (rc)
673 goto err_latency;
674 return 0;
675
676 err_latency:
677 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
678 err_wakeup:
679 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
680 err_runtime:
681 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
682 err_out:
683 sysfs_remove_group(&dev->kobj, &pm_attr_group);
684 return rc;
685 }
686
wakeup_sysfs_add(struct device * dev)687 int wakeup_sysfs_add(struct device *dev)
688 {
689 return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
690 }
691
wakeup_sysfs_remove(struct device * dev)692 void wakeup_sysfs_remove(struct device *dev)
693 {
694 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
695 }
696
pm_qos_sysfs_add_resume_latency(struct device * dev)697 int pm_qos_sysfs_add_resume_latency(struct device *dev)
698 {
699 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
700 }
701
pm_qos_sysfs_remove_resume_latency(struct device * dev)702 void pm_qos_sysfs_remove_resume_latency(struct device *dev)
703 {
704 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
705 }
706
pm_qos_sysfs_add_flags(struct device * dev)707 int pm_qos_sysfs_add_flags(struct device *dev)
708 {
709 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
710 }
711
pm_qos_sysfs_remove_flags(struct device * dev)712 void pm_qos_sysfs_remove_flags(struct device *dev)
713 {
714 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
715 }
716
pm_qos_sysfs_add_latency_tolerance(struct device * dev)717 int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
718 {
719 return sysfs_merge_group(&dev->kobj,
720 &pm_qos_latency_tolerance_attr_group);
721 }
722
pm_qos_sysfs_remove_latency_tolerance(struct device * dev)723 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
724 {
725 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
726 }
727
rpm_sysfs_remove(struct device * dev)728 void rpm_sysfs_remove(struct device *dev)
729 {
730 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
731 }
732
dpm_sysfs_remove(struct device * dev)733 void dpm_sysfs_remove(struct device *dev)
734 {
735 if (device_pm_not_required(dev))
736 return;
737 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
738 dev_pm_qos_constraints_destroy(dev);
739 rpm_sysfs_remove(dev);
740 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
741 sysfs_remove_group(&dev->kobj, &pm_attr_group);
742 }
743