1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Devices PM QoS constraints management
4  *
5  * Copyright (C) 2011 Texas Instruments, Inc.
6  *
7  * This module exposes the interface to kernel space for specifying
8  * per-device PM QoS dependencies. It provides infrastructure for registration
9  * of:
10  *
11  * Dependents on a QoS value : register requests
12  * Watchers of QoS value : get notified when target QoS value changes
13  *
14  * This QoS design is best effort based. Dependents register their QoS needs.
15  * Watchers register to keep track of the current QoS needs of the system.
16  * Watchers can register a per-device notification callback using the
17  * dev_pm_qos_*_notifier API. The notification chain data is stored in the
18  * per-device constraint data struct.
19  *
20  * Note about the per-device constraint data struct allocation:
21  * . The per-device constraints data struct ptr is stored into the device
22  *    dev_pm_info.
23  * . To minimize the data usage by the per-device constraints, the data struct
24  *   is only allocated at the first call to dev_pm_qos_add_request.
25  * . The data is later free'd when the device is removed from the system.
26  *  . A global mutex protects the constraints users from the data being
27  *     allocated and free'd.
28  */
29 
30 #include <linux/pm_qos.h>
31 #include <linux/spinlock.h>
32 #include <linux/slab.h>
33 #include <linux/device.h>
34 #include <linux/mutex.h>
35 #include <linux/export.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/err.h>
38 #include <trace/events/power.h>
39 
40 #include "power.h"
41 
42 static DEFINE_MUTEX(dev_pm_qos_mtx);
43 static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
44 
45 /**
46  * __dev_pm_qos_flags - Check PM QoS flags for a given device.
47  * @dev: Device to check the PM QoS flags for.
48  * @mask: Flags to check against.
49  *
50  * This routine must be called with dev->power.lock held.
51  */
__dev_pm_qos_flags(struct device * dev,s32 mask)52 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
53 {
54 	struct dev_pm_qos *qos = dev->power.qos;
55 	struct pm_qos_flags *pqf;
56 	s32 val;
57 
58 	lockdep_assert_held(&dev->power.lock);
59 
60 	if (IS_ERR_OR_NULL(qos))
61 		return PM_QOS_FLAGS_UNDEFINED;
62 
63 	pqf = &qos->flags;
64 	if (list_empty(&pqf->list))
65 		return PM_QOS_FLAGS_UNDEFINED;
66 
67 	val = pqf->effective_flags & mask;
68 	if (val)
69 		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
70 
71 	return PM_QOS_FLAGS_NONE;
72 }
73 
74 /**
75  * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
76  * @dev: Device to check the PM QoS flags for.
77  * @mask: Flags to check against.
78  */
dev_pm_qos_flags(struct device * dev,s32 mask)79 enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
80 {
81 	unsigned long irqflags;
82 	enum pm_qos_flags_status ret;
83 
84 	spin_lock_irqsave(&dev->power.lock, irqflags);
85 	ret = __dev_pm_qos_flags(dev, mask);
86 	spin_unlock_irqrestore(&dev->power.lock, irqflags);
87 
88 	return ret;
89 }
90 EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
91 
92 /**
93  * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
94  * @dev: Device to get the PM QoS constraint value for.
95  *
96  * This routine must be called with dev->power.lock held.
97  */
__dev_pm_qos_resume_latency(struct device * dev)98 s32 __dev_pm_qos_resume_latency(struct device *dev)
99 {
100 	lockdep_assert_held(&dev->power.lock);
101 
102 	return dev_pm_qos_raw_resume_latency(dev);
103 }
104 
105 /**
106  * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107  * @dev: Device to get the PM QoS constraint value for.
108  * @type: QoS request type.
109  */
dev_pm_qos_read_value(struct device * dev,enum dev_pm_qos_req_type type)110 s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111 {
112 	struct dev_pm_qos *qos = dev->power.qos;
113 	unsigned long flags;
114 	s32 ret;
115 
116 	spin_lock_irqsave(&dev->power.lock, flags);
117 
118 	if (type == DEV_PM_QOS_RESUME_LATENCY) {
119 		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
120 			: pm_qos_read_value(&qos->resume_latency);
121 	} else {
122 		WARN_ON(1);
123 		ret = 0;
124 	}
125 
126 	spin_unlock_irqrestore(&dev->power.lock, flags);
127 
128 	return ret;
129 }
130 
131 /**
132  * apply_constraint - Add/modify/remove device PM QoS request.
133  * @req: Constraint request to apply
134  * @action: Action to perform (add/update/remove).
135  * @value: Value to assign to the QoS request.
136  *
137  * Internal function to update the constraints list using the PM QoS core
138  * code and if needed call the per-device callbacks.
139  */
apply_constraint(struct dev_pm_qos_request * req,enum pm_qos_req_action action,s32 value)140 static int apply_constraint(struct dev_pm_qos_request *req,
141 			    enum pm_qos_req_action action, s32 value)
142 {
143 	struct dev_pm_qos *qos = req->dev->power.qos;
144 	int ret;
145 
146 	switch(req->type) {
147 	case DEV_PM_QOS_RESUME_LATENCY:
148 		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
149 			value = 0;
150 
151 		ret = pm_qos_update_target(&qos->resume_latency,
152 					   &req->data.pnode, action, value);
153 		break;
154 	case DEV_PM_QOS_LATENCY_TOLERANCE:
155 		ret = pm_qos_update_target(&qos->latency_tolerance,
156 					   &req->data.pnode, action, value);
157 		if (ret) {
158 			value = pm_qos_read_value(&qos->latency_tolerance);
159 			req->dev->power.set_latency_tolerance(req->dev, value);
160 		}
161 		break;
162 	case DEV_PM_QOS_FLAGS:
163 		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
164 					  action, value);
165 		break;
166 	default:
167 		ret = -EINVAL;
168 	}
169 
170 	return ret;
171 }
172 
173 /*
174  * dev_pm_qos_constraints_allocate
175  * @dev: device to allocate data for
176  *
177  * Called at the first call to add_request, for constraint data allocation
178  * Must be called with the dev_pm_qos_mtx mutex held
179  */
dev_pm_qos_constraints_allocate(struct device * dev)180 static int dev_pm_qos_constraints_allocate(struct device *dev)
181 {
182 	struct dev_pm_qos *qos;
183 	struct pm_qos_constraints *c;
184 	struct blocking_notifier_head *n;
185 
186 	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
187 	if (!qos)
188 		return -ENOMEM;
189 
190 	n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
191 	if (!n) {
192 		kfree(qos);
193 		return -ENOMEM;
194 	}
195 
196 	c = &qos->resume_latency;
197 	plist_head_init(&c->list);
198 	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
199 	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
200 	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
201 	c->type = PM_QOS_MIN;
202 	c->notifiers = n;
203 	BLOCKING_INIT_NOTIFIER_HEAD(n);
204 
205 	c = &qos->latency_tolerance;
206 	plist_head_init(&c->list);
207 	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
208 	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
209 	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
210 	c->type = PM_QOS_MIN;
211 
212 	INIT_LIST_HEAD(&qos->flags.list);
213 
214 	spin_lock_irq(&dev->power.lock);
215 	dev->power.qos = qos;
216 	spin_unlock_irq(&dev->power.lock);
217 
218 	return 0;
219 }
220 
221 static void __dev_pm_qos_hide_latency_limit(struct device *dev);
222 static void __dev_pm_qos_hide_flags(struct device *dev);
223 
224 /**
225  * dev_pm_qos_constraints_destroy
226  * @dev: target device
227  *
228  * Called from the device PM subsystem on device removal under device_pm_lock().
229  */
dev_pm_qos_constraints_destroy(struct device * dev)230 void dev_pm_qos_constraints_destroy(struct device *dev)
231 {
232 	struct dev_pm_qos *qos;
233 	struct dev_pm_qos_request *req, *tmp;
234 	struct pm_qos_constraints *c;
235 	struct pm_qos_flags *f;
236 
237 	mutex_lock(&dev_pm_qos_sysfs_mtx);
238 
239 	/*
240 	 * If the device's PM QoS resume latency limit or PM QoS flags have been
241 	 * exposed to user space, they have to be hidden at this point.
242 	 */
243 	pm_qos_sysfs_remove_resume_latency(dev);
244 	pm_qos_sysfs_remove_flags(dev);
245 
246 	mutex_lock(&dev_pm_qos_mtx);
247 
248 	__dev_pm_qos_hide_latency_limit(dev);
249 	__dev_pm_qos_hide_flags(dev);
250 
251 	qos = dev->power.qos;
252 	if (!qos)
253 		goto out;
254 
255 	/* Flush the constraints lists for the device. */
256 	c = &qos->resume_latency;
257 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
258 		/*
259 		 * Update constraints list and call the notification
260 		 * callbacks if needed
261 		 */
262 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
263 		memset(req, 0, sizeof(*req));
264 	}
265 
266 	c = &qos->latency_tolerance;
267 	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
268 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
269 		memset(req, 0, sizeof(*req));
270 	}
271 
272 	f = &qos->flags;
273 	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
274 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
275 		memset(req, 0, sizeof(*req));
276 	}
277 
278 	spin_lock_irq(&dev->power.lock);
279 	dev->power.qos = ERR_PTR(-ENODEV);
280 	spin_unlock_irq(&dev->power.lock);
281 
282 	kfree(qos->resume_latency.notifiers);
283 	kfree(qos);
284 
285  out:
286 	mutex_unlock(&dev_pm_qos_mtx);
287 
288 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
289 }
290 
dev_pm_qos_invalid_req_type(struct device * dev,enum dev_pm_qos_req_type type)291 static bool dev_pm_qos_invalid_req_type(struct device *dev,
292 					enum dev_pm_qos_req_type type)
293 {
294 	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
295 	       !dev->power.set_latency_tolerance;
296 }
297 
__dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)298 static int __dev_pm_qos_add_request(struct device *dev,
299 				    struct dev_pm_qos_request *req,
300 				    enum dev_pm_qos_req_type type, s32 value)
301 {
302 	int ret = 0;
303 
304 	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
305 		return -EINVAL;
306 
307 	if (WARN(dev_pm_qos_request_active(req),
308 		 "%s() called for already added request\n", __func__))
309 		return -EINVAL;
310 
311 	if (IS_ERR(dev->power.qos))
312 		ret = -ENODEV;
313 	else if (!dev->power.qos)
314 		ret = dev_pm_qos_constraints_allocate(dev);
315 
316 	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
317 	if (!ret) {
318 		req->dev = dev;
319 		req->type = type;
320 		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
321 	}
322 	return ret;
323 }
324 
325 /**
326  * dev_pm_qos_add_request - inserts new qos request into the list
327  * @dev: target device for the constraint
328  * @req: pointer to a preallocated handle
329  * @type: type of the request
330  * @value: defines the qos request
331  *
332  * This function inserts a new entry in the device constraints list of
333  * requested qos performance characteristics. It recomputes the aggregate
334  * QoS expectations of parameters and initializes the dev_pm_qos_request
335  * handle.  Caller needs to save this handle for later use in updates and
336  * removal.
337  *
338  * Returns 1 if the aggregated constraint value has changed,
339  * 0 if the aggregated constraint value has not changed,
340  * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
341  * to allocate for data structures, -ENODEV if the device has just been removed
342  * from the system.
343  *
344  * Callers should ensure that the target device is not RPM_SUSPENDED before
345  * using this function for requests of type DEV_PM_QOS_FLAGS.
346  */
dev_pm_qos_add_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)347 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
348 			   enum dev_pm_qos_req_type type, s32 value)
349 {
350 	int ret;
351 
352 	mutex_lock(&dev_pm_qos_mtx);
353 	ret = __dev_pm_qos_add_request(dev, req, type, value);
354 	mutex_unlock(&dev_pm_qos_mtx);
355 	return ret;
356 }
357 EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
358 
359 /**
360  * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
361  * @req : PM QoS request to modify.
362  * @new_value: New value to request.
363  */
__dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)364 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
365 				       s32 new_value)
366 {
367 	s32 curr_value;
368 	int ret = 0;
369 
370 	if (!req) /*guard against callers passing in null */
371 		return -EINVAL;
372 
373 	if (WARN(!dev_pm_qos_request_active(req),
374 		 "%s() called for unknown object\n", __func__))
375 		return -EINVAL;
376 
377 	if (IS_ERR_OR_NULL(req->dev->power.qos))
378 		return -ENODEV;
379 
380 	switch(req->type) {
381 	case DEV_PM_QOS_RESUME_LATENCY:
382 	case DEV_PM_QOS_LATENCY_TOLERANCE:
383 		curr_value = req->data.pnode.prio;
384 		break;
385 	case DEV_PM_QOS_FLAGS:
386 		curr_value = req->data.flr.flags;
387 		break;
388 	default:
389 		return -EINVAL;
390 	}
391 
392 	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
393 					new_value);
394 	if (curr_value != new_value)
395 		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
396 
397 	return ret;
398 }
399 
400 /**
401  * dev_pm_qos_update_request - modifies an existing qos request
402  * @req : handle to list element holding a dev_pm_qos request to use
403  * @new_value: defines the qos request
404  *
405  * Updates an existing dev PM qos request along with updating the
406  * target value.
407  *
408  * Attempts are made to make this code callable on hot code paths.
409  *
410  * Returns 1 if the aggregated constraint value has changed,
411  * 0 if the aggregated constraint value has not changed,
412  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
413  * removed from the system
414  *
415  * Callers should ensure that the target device is not RPM_SUSPENDED before
416  * using this function for requests of type DEV_PM_QOS_FLAGS.
417  */
dev_pm_qos_update_request(struct dev_pm_qos_request * req,s32 new_value)418 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
419 {
420 	int ret;
421 
422 	mutex_lock(&dev_pm_qos_mtx);
423 	ret = __dev_pm_qos_update_request(req, new_value);
424 	mutex_unlock(&dev_pm_qos_mtx);
425 	return ret;
426 }
427 EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
428 
__dev_pm_qos_remove_request(struct dev_pm_qos_request * req)429 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
430 {
431 	int ret;
432 
433 	if (!req) /*guard against callers passing in null */
434 		return -EINVAL;
435 
436 	if (WARN(!dev_pm_qos_request_active(req),
437 		 "%s() called for unknown object\n", __func__))
438 		return -EINVAL;
439 
440 	if (IS_ERR_OR_NULL(req->dev->power.qos))
441 		return -ENODEV;
442 
443 	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
444 					PM_QOS_DEFAULT_VALUE);
445 	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
446 	memset(req, 0, sizeof(*req));
447 	return ret;
448 }
449 
450 /**
451  * dev_pm_qos_remove_request - modifies an existing qos request
452  * @req: handle to request list element
453  *
454  * Will remove pm qos request from the list of constraints and
455  * recompute the current target value. Call this on slow code paths.
456  *
457  * Returns 1 if the aggregated constraint value has changed,
458  * 0 if the aggregated constraint value has not changed,
459  * -EINVAL in case of wrong parameters, -ENODEV if the device has been
460  * removed from the system
461  *
462  * Callers should ensure that the target device is not RPM_SUSPENDED before
463  * using this function for requests of type DEV_PM_QOS_FLAGS.
464  */
dev_pm_qos_remove_request(struct dev_pm_qos_request * req)465 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
466 {
467 	int ret;
468 
469 	mutex_lock(&dev_pm_qos_mtx);
470 	ret = __dev_pm_qos_remove_request(req);
471 	mutex_unlock(&dev_pm_qos_mtx);
472 	return ret;
473 }
474 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
475 
476 /**
477  * dev_pm_qos_add_notifier - sets notification entry for changes to target value
478  * of per-device PM QoS constraints
479  *
480  * @dev: target device for the constraint
481  * @notifier: notifier block managed by caller.
482  * @type: request type.
483  *
484  * Will register the notifier into a notification chain that gets called
485  * upon changes to the target value for the device.
486  *
487  * If the device's constraints object doesn't exist when this routine is called,
488  * it will be created (or error code will be returned if that fails).
489  */
dev_pm_qos_add_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)490 int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
491 			    enum dev_pm_qos_req_type type)
492 {
493 	int ret = 0;
494 
495 	mutex_lock(&dev_pm_qos_mtx);
496 
497 	if (IS_ERR(dev->power.qos))
498 		ret = -ENODEV;
499 	else if (!dev->power.qos)
500 		ret = dev_pm_qos_constraints_allocate(dev);
501 
502 	if (ret)
503 		goto unlock;
504 
505 	switch (type) {
506 	case DEV_PM_QOS_RESUME_LATENCY:
507 		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
508 						       notifier);
509 		break;
510 	default:
511 		WARN_ON(1);
512 		ret = -EINVAL;
513 	}
514 
515 unlock:
516 	mutex_unlock(&dev_pm_qos_mtx);
517 	return ret;
518 }
519 EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
520 
521 /**
522  * dev_pm_qos_remove_notifier - deletes notification for changes to target value
523  * of per-device PM QoS constraints
524  *
525  * @dev: target device for the constraint
526  * @notifier: notifier block to be removed.
527  * @type: request type.
528  *
529  * Will remove the notifier from the notification chain that gets called
530  * upon changes to the target value.
531  */
dev_pm_qos_remove_notifier(struct device * dev,struct notifier_block * notifier,enum dev_pm_qos_req_type type)532 int dev_pm_qos_remove_notifier(struct device *dev,
533 			       struct notifier_block *notifier,
534 			       enum dev_pm_qos_req_type type)
535 {
536 	int ret = 0;
537 
538 	mutex_lock(&dev_pm_qos_mtx);
539 
540 	/* Silently return if the constraints object is not present. */
541 	if (IS_ERR_OR_NULL(dev->power.qos))
542 		goto unlock;
543 
544 	switch (type) {
545 	case DEV_PM_QOS_RESUME_LATENCY:
546 		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
547 							 notifier);
548 		break;
549 	default:
550 		WARN_ON(1);
551 		ret = -EINVAL;
552 	}
553 
554 unlock:
555 	mutex_unlock(&dev_pm_qos_mtx);
556 	return ret;
557 }
558 EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
559 
560 /**
561  * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
562  * @dev: Device whose ancestor to add the request for.
563  * @req: Pointer to the preallocated handle.
564  * @type: Type of the request.
565  * @value: Constraint latency value.
566  */
dev_pm_qos_add_ancestor_request(struct device * dev,struct dev_pm_qos_request * req,enum dev_pm_qos_req_type type,s32 value)567 int dev_pm_qos_add_ancestor_request(struct device *dev,
568 				    struct dev_pm_qos_request *req,
569 				    enum dev_pm_qos_req_type type, s32 value)
570 {
571 	struct device *ancestor = dev->parent;
572 	int ret = -ENODEV;
573 
574 	switch (type) {
575 	case DEV_PM_QOS_RESUME_LATENCY:
576 		while (ancestor && !ancestor->power.ignore_children)
577 			ancestor = ancestor->parent;
578 
579 		break;
580 	case DEV_PM_QOS_LATENCY_TOLERANCE:
581 		while (ancestor && !ancestor->power.set_latency_tolerance)
582 			ancestor = ancestor->parent;
583 
584 		break;
585 	default:
586 		ancestor = NULL;
587 	}
588 	if (ancestor)
589 		ret = dev_pm_qos_add_request(ancestor, req, type, value);
590 
591 	if (ret < 0)
592 		req->dev = NULL;
593 
594 	return ret;
595 }
596 EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
597 
__dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)598 static void __dev_pm_qos_drop_user_request(struct device *dev,
599 					   enum dev_pm_qos_req_type type)
600 {
601 	struct dev_pm_qos_request *req = NULL;
602 
603 	switch(type) {
604 	case DEV_PM_QOS_RESUME_LATENCY:
605 		req = dev->power.qos->resume_latency_req;
606 		dev->power.qos->resume_latency_req = NULL;
607 		break;
608 	case DEV_PM_QOS_LATENCY_TOLERANCE:
609 		req = dev->power.qos->latency_tolerance_req;
610 		dev->power.qos->latency_tolerance_req = NULL;
611 		break;
612 	case DEV_PM_QOS_FLAGS:
613 		req = dev->power.qos->flags_req;
614 		dev->power.qos->flags_req = NULL;
615 		break;
616 	default:
617 		WARN_ON(1);
618 		return;
619 	}
620 	__dev_pm_qos_remove_request(req);
621 	kfree(req);
622 }
623 
dev_pm_qos_drop_user_request(struct device * dev,enum dev_pm_qos_req_type type)624 static void dev_pm_qos_drop_user_request(struct device *dev,
625 					 enum dev_pm_qos_req_type type)
626 {
627 	mutex_lock(&dev_pm_qos_mtx);
628 	__dev_pm_qos_drop_user_request(dev, type);
629 	mutex_unlock(&dev_pm_qos_mtx);
630 }
631 
632 /**
633  * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
634  * @dev: Device whose PM QoS latency limit is to be exposed to user space.
635  * @value: Initial value of the latency limit.
636  */
dev_pm_qos_expose_latency_limit(struct device * dev,s32 value)637 int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
638 {
639 	struct dev_pm_qos_request *req;
640 	int ret;
641 
642 	if (!device_is_registered(dev) || value < 0)
643 		return -EINVAL;
644 
645 	req = kzalloc(sizeof(*req), GFP_KERNEL);
646 	if (!req)
647 		return -ENOMEM;
648 
649 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
650 	if (ret < 0) {
651 		kfree(req);
652 		return ret;
653 	}
654 
655 	mutex_lock(&dev_pm_qos_sysfs_mtx);
656 
657 	mutex_lock(&dev_pm_qos_mtx);
658 
659 	if (IS_ERR_OR_NULL(dev->power.qos))
660 		ret = -ENODEV;
661 	else if (dev->power.qos->resume_latency_req)
662 		ret = -EEXIST;
663 
664 	if (ret < 0) {
665 		__dev_pm_qos_remove_request(req);
666 		kfree(req);
667 		mutex_unlock(&dev_pm_qos_mtx);
668 		goto out;
669 	}
670 	dev->power.qos->resume_latency_req = req;
671 
672 	mutex_unlock(&dev_pm_qos_mtx);
673 
674 	ret = pm_qos_sysfs_add_resume_latency(dev);
675 	if (ret)
676 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
677 
678  out:
679 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
680 	return ret;
681 }
682 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
683 
__dev_pm_qos_hide_latency_limit(struct device * dev)684 static void __dev_pm_qos_hide_latency_limit(struct device *dev)
685 {
686 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
687 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
688 }
689 
690 /**
691  * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
692  * @dev: Device whose PM QoS latency limit is to be hidden from user space.
693  */
dev_pm_qos_hide_latency_limit(struct device * dev)694 void dev_pm_qos_hide_latency_limit(struct device *dev)
695 {
696 	mutex_lock(&dev_pm_qos_sysfs_mtx);
697 
698 	pm_qos_sysfs_remove_resume_latency(dev);
699 
700 	mutex_lock(&dev_pm_qos_mtx);
701 	__dev_pm_qos_hide_latency_limit(dev);
702 	mutex_unlock(&dev_pm_qos_mtx);
703 
704 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
705 }
706 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
707 
708 /**
709  * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
710  * @dev: Device whose PM QoS flags are to be exposed to user space.
711  * @val: Initial values of the flags.
712  */
dev_pm_qos_expose_flags(struct device * dev,s32 val)713 int dev_pm_qos_expose_flags(struct device *dev, s32 val)
714 {
715 	struct dev_pm_qos_request *req;
716 	int ret;
717 
718 	if (!device_is_registered(dev))
719 		return -EINVAL;
720 
721 	req = kzalloc(sizeof(*req), GFP_KERNEL);
722 	if (!req)
723 		return -ENOMEM;
724 
725 	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
726 	if (ret < 0) {
727 		kfree(req);
728 		return ret;
729 	}
730 
731 	pm_runtime_get_sync(dev);
732 	mutex_lock(&dev_pm_qos_sysfs_mtx);
733 
734 	mutex_lock(&dev_pm_qos_mtx);
735 
736 	if (IS_ERR_OR_NULL(dev->power.qos))
737 		ret = -ENODEV;
738 	else if (dev->power.qos->flags_req)
739 		ret = -EEXIST;
740 
741 	if (ret < 0) {
742 		__dev_pm_qos_remove_request(req);
743 		kfree(req);
744 		mutex_unlock(&dev_pm_qos_mtx);
745 		goto out;
746 	}
747 	dev->power.qos->flags_req = req;
748 
749 	mutex_unlock(&dev_pm_qos_mtx);
750 
751 	ret = pm_qos_sysfs_add_flags(dev);
752 	if (ret)
753 		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
754 
755  out:
756 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
757 	pm_runtime_put(dev);
758 	return ret;
759 }
760 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
761 
__dev_pm_qos_hide_flags(struct device * dev)762 static void __dev_pm_qos_hide_flags(struct device *dev)
763 {
764 	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
765 		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
766 }
767 
768 /**
769  * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
770  * @dev: Device whose PM QoS flags are to be hidden from user space.
771  */
dev_pm_qos_hide_flags(struct device * dev)772 void dev_pm_qos_hide_flags(struct device *dev)
773 {
774 	pm_runtime_get_sync(dev);
775 	mutex_lock(&dev_pm_qos_sysfs_mtx);
776 
777 	pm_qos_sysfs_remove_flags(dev);
778 
779 	mutex_lock(&dev_pm_qos_mtx);
780 	__dev_pm_qos_hide_flags(dev);
781 	mutex_unlock(&dev_pm_qos_mtx);
782 
783 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
784 	pm_runtime_put(dev);
785 }
786 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
787 
788 /**
789  * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
790  * @dev: Device to update the PM QoS flags request for.
791  * @mask: Flags to set/clear.
792  * @set: Whether to set or clear the flags (true means set).
793  */
dev_pm_qos_update_flags(struct device * dev,s32 mask,bool set)794 int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
795 {
796 	s32 value;
797 	int ret;
798 
799 	pm_runtime_get_sync(dev);
800 	mutex_lock(&dev_pm_qos_mtx);
801 
802 	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
803 		ret = -EINVAL;
804 		goto out;
805 	}
806 
807 	value = dev_pm_qos_requested_flags(dev);
808 	if (set)
809 		value |= mask;
810 	else
811 		value &= ~mask;
812 
813 	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
814 
815  out:
816 	mutex_unlock(&dev_pm_qos_mtx);
817 	pm_runtime_put(dev);
818 	return ret;
819 }
820 
821 /**
822  * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
823  * @dev: Device to obtain the user space latency tolerance for.
824  */
dev_pm_qos_get_user_latency_tolerance(struct device * dev)825 s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
826 {
827 	s32 ret;
828 
829 	mutex_lock(&dev_pm_qos_mtx);
830 	ret = IS_ERR_OR_NULL(dev->power.qos)
831 		|| !dev->power.qos->latency_tolerance_req ?
832 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
833 			dev->power.qos->latency_tolerance_req->data.pnode.prio;
834 	mutex_unlock(&dev_pm_qos_mtx);
835 	return ret;
836 }
837 
838 /**
839  * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
840  * @dev: Device to update the user space latency tolerance for.
841  * @val: New user space latency tolerance for @dev (negative values disable).
842  */
dev_pm_qos_update_user_latency_tolerance(struct device * dev,s32 val)843 int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
844 {
845 	int ret;
846 
847 	mutex_lock(&dev_pm_qos_mtx);
848 
849 	if (IS_ERR_OR_NULL(dev->power.qos)
850 	    || !dev->power.qos->latency_tolerance_req) {
851 		struct dev_pm_qos_request *req;
852 
853 		if (val < 0) {
854 			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
855 				ret = 0;
856 			else
857 				ret = -EINVAL;
858 			goto out;
859 		}
860 		req = kzalloc(sizeof(*req), GFP_KERNEL);
861 		if (!req) {
862 			ret = -ENOMEM;
863 			goto out;
864 		}
865 		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
866 		if (ret < 0) {
867 			kfree(req);
868 			goto out;
869 		}
870 		dev->power.qos->latency_tolerance_req = req;
871 	} else {
872 		if (val < 0) {
873 			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
874 			ret = 0;
875 		} else {
876 			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
877 		}
878 	}
879 
880  out:
881 	mutex_unlock(&dev_pm_qos_mtx);
882 	return ret;
883 }
884 EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
885 
886 /**
887  * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
888  * @dev: Device whose latency tolerance to expose
889  */
dev_pm_qos_expose_latency_tolerance(struct device * dev)890 int dev_pm_qos_expose_latency_tolerance(struct device *dev)
891 {
892 	int ret;
893 
894 	if (!dev->power.set_latency_tolerance)
895 		return -EINVAL;
896 
897 	mutex_lock(&dev_pm_qos_sysfs_mtx);
898 	ret = pm_qos_sysfs_add_latency_tolerance(dev);
899 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
900 
901 	return ret;
902 }
903 EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
904 
905 /**
906  * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
907  * @dev: Device whose latency tolerance to hide
908  */
dev_pm_qos_hide_latency_tolerance(struct device * dev)909 void dev_pm_qos_hide_latency_tolerance(struct device *dev)
910 {
911 	mutex_lock(&dev_pm_qos_sysfs_mtx);
912 	pm_qos_sysfs_remove_latency_tolerance(dev);
913 	mutex_unlock(&dev_pm_qos_sysfs_mtx);
914 
915 	/* Remove the request from user space now */
916 	pm_runtime_get_sync(dev);
917 	dev_pm_qos_update_user_latency_tolerance(dev,
918 		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
919 	pm_runtime_put(dev);
920 }
921 EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
922