1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2022 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <stdint.h>
9 
10 #include <zephyr/pm/policy.h>
11 #include <zephyr/spinlock.h>
12 #include <zephyr/sys/time_units.h>
13 
14 /** Lock to synchronize access to the latency request list. */
15 static struct k_spinlock latency_lock;
16 /** List of maximum latency requests. */
17 static sys_slist_t latency_reqs;
18 /** Maximum CPU latency in us */
19 static int32_t max_latency_us = SYS_FOREVER_US;
20 /** Maximum CPU latency in cycles */
21 int32_t max_latency_cyc = -1;
22 /** List of latency change subscribers. */
23 static sys_slist_t latency_subs;
24 
25 /** @brief Update maximum allowed latency. */
update_max_latency(void)26 static void update_max_latency(void)
27 {
28 	int32_t new_max_latency_us = SYS_FOREVER_US;
29 	struct pm_policy_latency_request *req;
30 
31 	SYS_SLIST_FOR_EACH_CONTAINER(&latency_reqs, req, node) {
32 		if ((new_max_latency_us == SYS_FOREVER_US) ||
33 		    ((int32_t)req->value_us < new_max_latency_us)) {
34 			new_max_latency_us = (int32_t)req->value_us;
35 		}
36 	}
37 
38 	if (max_latency_us != new_max_latency_us) {
39 		struct pm_policy_latency_subscription *sreq;
40 		int32_t new_max_latency_cyc = -1;
41 
42 		SYS_SLIST_FOR_EACH_CONTAINER(&latency_subs, sreq, node) {
43 			sreq->cb(new_max_latency_us);
44 		}
45 
46 		if (new_max_latency_us != SYS_FOREVER_US) {
47 			new_max_latency_cyc = (int32_t)k_us_to_cyc_ceil32(new_max_latency_us);
48 		}
49 
50 		max_latency_us = new_max_latency_us;
51 		max_latency_cyc = new_max_latency_cyc;
52 	}
53 }
54 
pm_policy_latency_request_add(struct pm_policy_latency_request * req,uint32_t value_us)55 void pm_policy_latency_request_add(struct pm_policy_latency_request *req,
56 				   uint32_t value_us)
57 {
58 	req->value_us = value_us;
59 
60 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
61 
62 	sys_slist_append(&latency_reqs, &req->node);
63 	update_max_latency();
64 
65 	k_spin_unlock(&latency_lock, key);
66 }
67 
pm_policy_latency_request_update(struct pm_policy_latency_request * req,uint32_t value_us)68 void pm_policy_latency_request_update(struct pm_policy_latency_request *req,
69 				      uint32_t value_us)
70 {
71 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
72 
73 	req->value_us = value_us;
74 	update_max_latency();
75 
76 	k_spin_unlock(&latency_lock, key);
77 }
78 
pm_policy_latency_request_remove(struct pm_policy_latency_request * req)79 void pm_policy_latency_request_remove(struct pm_policy_latency_request *req)
80 {
81 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
82 
83 	(void)sys_slist_find_and_remove(&latency_reqs, &req->node);
84 	update_max_latency();
85 
86 	k_spin_unlock(&latency_lock, key);
87 }
88 
pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription * req,pm_policy_latency_changed_cb_t cb)89 void pm_policy_latency_changed_subscribe(struct pm_policy_latency_subscription *req,
90 					 pm_policy_latency_changed_cb_t cb)
91 {
92 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
93 
94 	req->cb = cb;
95 	sys_slist_append(&latency_subs, &req->node);
96 
97 	k_spin_unlock(&latency_lock, key);
98 }
99 
pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription * req)100 void pm_policy_latency_changed_unsubscribe(struct pm_policy_latency_subscription *req)
101 {
102 	k_spinlock_key_t key = k_spin_lock(&latency_lock);
103 
104 	(void)sys_slist_find_and_remove(&latency_subs, &req->node);
105 
106 	k_spin_unlock(&latency_lock, key);
107 }
108