1 /*
2 * Copyright (c) 2023, Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/kernel/obj_core.h>
9
10 static struct k_spinlock lock;
11
12 sys_slist_t z_obj_type_list = SYS_SLIST_STATIC_INIT(&z_obj_type_list);
13
z_obj_type_init(struct k_obj_type * type,uint32_t id,size_t off)14 struct k_obj_type *z_obj_type_init(struct k_obj_type *type,
15 uint32_t id, size_t off)
16 {
17 sys_slist_init(&type->list);
18 sys_slist_append(&z_obj_type_list, &type->node);
19 type->id = id;
20 type->obj_core_offset = off;
21
22 return type;
23 }
24
k_obj_core_init(struct k_obj_core * obj_core,struct k_obj_type * type)25 void k_obj_core_init(struct k_obj_core *obj_core, struct k_obj_type *type)
26 {
27 obj_core->node.next = NULL;
28 obj_core->type = type;
29 #ifdef CONFIG_OBJ_CORE_STATS
30 obj_core->stats = NULL;
31 #endif /* CONFIG_OBJ_CORE_STATS */
32 }
33
k_obj_core_link(struct k_obj_core * obj_core)34 void k_obj_core_link(struct k_obj_core *obj_core)
35 {
36 k_spinlock_key_t key = k_spin_lock(&lock);
37
38 sys_slist_append(&obj_core->type->list, &obj_core->node);
39
40 k_spin_unlock(&lock, key);
41 }
42
k_obj_core_init_and_link(struct k_obj_core * obj_core,struct k_obj_type * type)43 void k_obj_core_init_and_link(struct k_obj_core *obj_core,
44 struct k_obj_type *type)
45 {
46 k_obj_core_init(obj_core, type);
47 k_obj_core_link(obj_core);
48 }
49
k_obj_core_unlink(struct k_obj_core * obj_core)50 void k_obj_core_unlink(struct k_obj_core *obj_core)
51 {
52 k_spinlock_key_t key = k_spin_lock(&lock);
53
54 sys_slist_find_and_remove(&obj_core->type->list, &obj_core->node);
55
56 k_spin_unlock(&lock, key);
57 }
58
k_obj_type_find(uint32_t type_id)59 struct k_obj_type *k_obj_type_find(uint32_t type_id)
60 {
61 struct k_obj_type *type;
62 struct k_obj_type *rv = NULL;
63 sys_snode_t *node;
64
65 k_spinlock_key_t key = k_spin_lock(&lock);
66
67 SYS_SLIST_FOR_EACH_NODE(&z_obj_type_list, node) {
68 type = CONTAINER_OF(node, struct k_obj_type, node);
69 if (type->id == type_id) {
70 rv = type;
71 break;
72 }
73 }
74
75 k_spin_unlock(&lock, key);
76
77 return rv;
78 }
79
k_obj_type_walk_locked(struct k_obj_type * type,int (* func)(struct k_obj_core *,void *),void * data)80 int k_obj_type_walk_locked(struct k_obj_type *type,
81 int (*func)(struct k_obj_core *, void *),
82 void *data)
83 {
84 k_spinlock_key_t key;
85 struct k_obj_core *obj_core;
86 sys_snode_t *node;
87 int status = 0;
88
89 key = k_spin_lock(&lock);
90
91 SYS_SLIST_FOR_EACH_NODE(&type->list, node) {
92 obj_core = CONTAINER_OF(node, struct k_obj_core, node);
93 status = func(obj_core, data);
94 if (status != 0) {
95 break;
96 }
97 }
98
99 k_spin_unlock(&lock, key);
100
101 return status;
102 }
103
k_obj_type_walk_unlocked(struct k_obj_type * type,int (* func)(struct k_obj_core *,void *),void * data)104 int k_obj_type_walk_unlocked(struct k_obj_type *type,
105 int (*func)(struct k_obj_core *, void *),
106 void *data)
107 {
108 struct k_obj_core *obj_core;
109 sys_snode_t *node;
110 sys_snode_t *next;
111 int status = 0;
112
113 SYS_SLIST_FOR_EACH_NODE_SAFE(&type->list, node, next) {
114 obj_core = CONTAINER_OF(node, struct k_obj_core, node);
115 status = func(obj_core, data);
116 if (status != 0) {
117 break;
118 }
119 }
120
121 return status;
122 }
123
124 #ifdef CONFIG_OBJ_CORE_STATS
k_obj_core_stats_register(struct k_obj_core * obj_core,void * stats,size_t stats_len)125 int k_obj_core_stats_register(struct k_obj_core *obj_core, void *stats,
126 size_t stats_len)
127 {
128 int rv;
129 k_spinlock_key_t key = k_spin_lock(&lock);
130
131 if (obj_core->type->stats_desc == NULL) {
132 /* Object type not configured for statistics. */
133 rv = -ENOTSUP;
134 } else if (obj_core->type->stats_desc->raw_size != stats_len) {
135 /* Buffer size mismatch */
136 rv = -EINVAL;
137 } else {
138 obj_core->stats = stats;
139 rv = 0;
140 }
141
142 k_spin_unlock(&lock, key);
143
144 return rv;
145 }
146
k_obj_core_stats_deregister(struct k_obj_core * obj_core)147 int k_obj_core_stats_deregister(struct k_obj_core *obj_core)
148 {
149 int rv;
150 k_spinlock_key_t key = k_spin_lock(&lock);
151
152 if (obj_core->type->stats_desc == NULL) {
153 /* Object type not configured for statistics. */
154 rv = -ENOTSUP;
155 } else {
156 obj_core->stats = NULL;
157 rv = 0;
158 }
159
160 k_spin_unlock(&lock, key);
161
162 return rv;
163 }
164
k_obj_core_stats_raw(struct k_obj_core * obj_core,void * stats,size_t stats_len)165 int k_obj_core_stats_raw(struct k_obj_core *obj_core, void *stats,
166 size_t stats_len)
167 {
168 int rv;
169 struct k_obj_core_stats_desc *desc;
170
171 k_spinlock_key_t key = k_spin_lock(&lock);
172
173 desc = obj_core->type->stats_desc;
174 if ((desc == NULL) || (desc->raw == NULL)) {
175 /* The object type is not configured for this operation */
176 rv = -ENOTSUP;
177 } else if ((desc->raw_size != stats_len) || (obj_core->stats == NULL)) {
178 /*
179 * Either the size of the stats buffer is wrong or
180 * the kernel object was not registered for statistics.
181 */
182 rv = -EINVAL;
183 } else {
184 rv = desc->raw(obj_core, stats);
185 }
186
187 k_spin_unlock(&lock, key);
188
189 return rv;
190 }
191
k_obj_core_stats_query(struct k_obj_core * obj_core,void * stats,size_t stats_len)192 int k_obj_core_stats_query(struct k_obj_core *obj_core, void *stats,
193 size_t stats_len)
194 {
195 int rv;
196 struct k_obj_core_stats_desc *desc;
197
198 k_spinlock_key_t key = k_spin_lock(&lock);
199
200 desc = obj_core->type->stats_desc;
201 if ((desc == NULL) || (desc->query == NULL)) {
202 /* The object type is not configured for this operation */
203 rv = -ENOTSUP;
204 } else if ((desc->query_size != stats_len) || (obj_core->stats == NULL)) {
205 /*
206 * Either the size of the stats buffer is wrong or
207 * the kernel object was not registered for statistics.
208 */
209 rv = -EINVAL;
210 } else {
211 rv = desc->query(obj_core, stats);
212 }
213
214 k_spin_unlock(&lock, key);
215
216 return rv;
217 }
218
k_obj_core_stats_reset(struct k_obj_core * obj_core)219 int k_obj_core_stats_reset(struct k_obj_core *obj_core)
220 {
221 int rv;
222 struct k_obj_core_stats_desc *desc;
223
224 k_spinlock_key_t key = k_spin_lock(&lock);
225
226 desc = obj_core->type->stats_desc;
227 if ((desc == NULL) || (desc->reset == NULL)) {
228 /* The object type is not configured for this operation */
229 rv = -ENOTSUP;
230 } else if (obj_core->stats == NULL) {
231 /* This kernel object is not configured for statistics */
232 rv = -EINVAL;
233 } else {
234 rv = desc->reset(obj_core);
235 }
236
237 k_spin_unlock(&lock, key);
238
239 return rv;
240 }
241
k_obj_core_stats_disable(struct k_obj_core * obj_core)242 int k_obj_core_stats_disable(struct k_obj_core *obj_core)
243 {
244 int rv;
245 struct k_obj_core_stats_desc *desc;
246
247 k_spinlock_key_t key = k_spin_lock(&lock);
248
249 desc = obj_core->type->stats_desc;
250 if ((desc == NULL) || (desc->disable == NULL)) {
251 /* The object type is not configured for this operation */
252 rv = -ENOTSUP;
253 } else if (obj_core->stats == NULL) {
254 /* This kernel object is not configured for statistics */
255 rv = -EINVAL;
256 } else {
257 rv = desc->disable(obj_core);
258 }
259
260 k_spin_unlock(&lock, key);
261
262 return rv;
263 }
264
k_obj_core_stats_enable(struct k_obj_core * obj_core)265 int k_obj_core_stats_enable(struct k_obj_core *obj_core)
266 {
267 int rv;
268 struct k_obj_core_stats_desc *desc;
269
270 k_spinlock_key_t key = k_spin_lock(&lock);
271
272 desc = obj_core->type->stats_desc;
273 if ((desc == NULL) || (desc->enable == NULL)) {
274 /* The object type is not configured for this operation */
275 rv = -ENOTSUP;
276 } else if (obj_core->stats == NULL) {
277 /* This kernel object is not configured for statistics */
278 rv = -EINVAL;
279 } else {
280 rv = desc->enable(obj_core);
281 }
282
283 k_spin_unlock(&lock, key);
284
285 return rv;
286 }
287 #endif /* CONFIG_OBJ_CORE_STATS */
288