1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/timing/timing.h>
10 #include <ksched.h>
11 #include <zephyr/spinlock.h>
12 #include <zephyr/sys/check.h>
13 
14 /* Need one of these for this to work */
15 #if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
16 #error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
17 #endif
18 
19 static struct k_spinlock usage_lock;
20 
usage_now(void)21 static uint32_t usage_now(void)
22 {
23 	uint32_t now;
24 
25 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
26 	now = (uint32_t)timing_counter_get();
27 #else
28 	now = k_cycle_get_32();
29 #endif
30 
31 	/* Edge case: we use a zero as a null ("stop() already called") */
32 	return (now == 0) ? 1 : now;
33 }
34 
35 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
sched_cpu_update_usage(struct _cpu * cpu,uint32_t cycles)36 static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
37 {
38 	if (!cpu->usage->track_usage) {
39 		return;
40 	}
41 
42 	if (cpu->current != cpu->idle_thread) {
43 		cpu->usage->total += cycles;
44 
45 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
46 		cpu->usage->current += cycles;
47 
48 		if (cpu->usage->longest < cpu->usage->current) {
49 			cpu->usage->longest = cpu->usage->current;
50 		}
51 	} else {
52 		cpu->usage->current = 0;
53 		cpu->usage->num_windows++;
54 #endif
55 	}
56 }
57 #else
58 #define sched_cpu_update_usage(cpu, cycles)   do { } while (0)
59 #endif
60 
sched_thread_update_usage(struct k_thread * thread,uint32_t cycles)61 static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
62 {
63 	thread->base.usage.total += cycles;
64 
65 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
66 	thread->base.usage.current += cycles;
67 
68 	if (thread->base.usage.longest < thread->base.usage.current) {
69 		thread->base.usage.longest = thread->base.usage.current;
70 	}
71 #endif
72 }
73 
z_sched_usage_start(struct k_thread * thread)74 void z_sched_usage_start(struct k_thread *thread)
75 {
76 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
77 	k_spinlock_key_t  key;
78 
79 	key = k_spin_lock(&usage_lock);
80 
81 	_current_cpu->usage0 = usage_now();   /* Always update */
82 
83 	if (thread->base.usage.track_usage) {
84 		thread->base.usage.num_windows++;
85 		thread->base.usage.current = 0;
86 	}
87 
88 	k_spin_unlock(&usage_lock, key);
89 #else
90 	/* One write through a volatile pointer doesn't require
91 	 * synchronization as long as _usage() treats it as volatile
92 	 * (we can't race with _stop() by design).
93 	 */
94 
95 	_current_cpu->usage0 = usage_now();
96 #endif
97 }
98 
z_sched_usage_stop(void)99 void z_sched_usage_stop(void)
100 {
101 	k_spinlock_key_t k   = k_spin_lock(&usage_lock);
102 
103 	struct _cpu     *cpu = _current_cpu;
104 
105 	uint32_t u0 = cpu->usage0;
106 
107 	if (u0 != 0) {
108 		uint32_t cycles = usage_now() - u0;
109 
110 		if (cpu->current->base.usage.track_usage) {
111 			sched_thread_update_usage(cpu->current, cycles);
112 		}
113 
114 		sched_cpu_update_usage(cpu, cycles);
115 	}
116 
117 	cpu->usage0 = 0;
118 	k_spin_unlock(&usage_lock, k);
119 }
120 
121 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
z_sched_cpu_usage(uint8_t cpu_id,struct k_thread_runtime_stats * stats)122 void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
123 {
124 	k_spinlock_key_t  key;
125 	struct _cpu *cpu;
126 
127 	key = k_spin_lock(&usage_lock);
128 	cpu = _current_cpu;
129 
130 
131 	if (&_kernel.cpus[cpu_id] == cpu) {
132 		uint32_t  now = usage_now();
133 		uint32_t cycles = now - cpu->usage0;
134 
135 		/*
136 		 * Getting stats for the current CPU. Update both its
137 		 * current thread stats and the CPU stats as the CPU's
138 		 * [usage0] field will also get updated. This keeps all
139 		 * that information up-to-date.
140 		 */
141 
142 		if (cpu->current->base.usage.track_usage) {
143 			sched_thread_update_usage(cpu->current, cycles);
144 		}
145 
146 		sched_cpu_update_usage(cpu, cycles);
147 
148 		cpu->usage0 = now;
149 	}
150 
151 	stats->total_cycles     = cpu->usage->total;
152 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
153 	stats->current_cycles   = cpu->usage->current;
154 	stats->peak_cycles      = cpu->usage->longest;
155 
156 	if (cpu->usage->num_windows == 0) {
157 		stats->average_cycles = 0;
158 	} else {
159 		stats->average_cycles = stats->total_cycles /
160 					cpu->usage->num_windows;
161 	}
162 #endif
163 
164 	stats->idle_cycles =
165 		_kernel.cpus[cpu_id].idle_thread->base.usage.total;
166 
167 	stats->execution_cycles = stats->total_cycles + stats->idle_cycles;
168 
169 	k_spin_unlock(&usage_lock, key);
170 }
171 #endif
172 
z_sched_thread_usage(struct k_thread * thread,struct k_thread_runtime_stats * stats)173 void z_sched_thread_usage(struct k_thread *thread,
174 			  struct k_thread_runtime_stats *stats)
175 {
176 	struct _cpu *cpu;
177 	k_spinlock_key_t  key;
178 
179 	key = k_spin_lock(&usage_lock);
180 	cpu = _current_cpu;
181 
182 
183 	if (thread == cpu->current) {
184 		uint32_t now = usage_now();
185 		uint32_t cycles = now - cpu->usage0;
186 
187 		/*
188 		 * Getting stats for the current thread. Update both the
189 		 * current thread stats and its CPU stats as the CPU's
190 		 * [usage0] field will also get updated. This keeps all
191 		 * that information up-to-date.
192 		 */
193 
194 		if (thread->base.usage.track_usage) {
195 			sched_thread_update_usage(thread, cycles);
196 		}
197 
198 		sched_cpu_update_usage(cpu, cycles);
199 
200 		cpu->usage0 = now;
201 	}
202 
203 	stats->execution_cycles = thread->base.usage.total;
204 	stats->total_cycles     = thread->base.usage.total;
205 
206 	/* Copy-out the thread's usage stats */
207 
208 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
209 	stats->current_cycles = thread->base.usage.current;
210 	stats->peak_cycles    = thread->base.usage.longest;
211 
212 	if (thread->base.usage.num_windows == 0) {
213 		stats->average_cycles = 0;
214 	} else {
215 		stats->average_cycles = stats->total_cycles /
216 					thread->base.usage.num_windows;
217 	}
218 #endif
219 
220 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
221 	stats->idle_cycles = 0;
222 #endif
223 	stats->execution_cycles = thread->base.usage.total;
224 
225 	k_spin_unlock(&usage_lock, key);
226 }
227 
228 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
k_thread_runtime_stats_enable(k_tid_t thread)229 int k_thread_runtime_stats_enable(k_tid_t  thread)
230 {
231 	k_spinlock_key_t  key;
232 
233 	CHECKIF(thread == NULL) {
234 		return -EINVAL;
235 	}
236 
237 	key = k_spin_lock(&usage_lock);
238 
239 	if (!thread->base.usage.track_usage) {
240 		thread->base.usage.track_usage = true;
241 		thread->base.usage.num_windows++;
242 		thread->base.usage.current = 0;
243 	}
244 
245 	k_spin_unlock(&usage_lock, key);
246 
247 	return 0;
248 }
249 
k_thread_runtime_stats_disable(k_tid_t thread)250 int k_thread_runtime_stats_disable(k_tid_t  thread)
251 {
252 	k_spinlock_key_t key;
253 
254 	CHECKIF(thread == NULL) {
255 		return -EINVAL;
256 	}
257 
258 	key = k_spin_lock(&usage_lock);
259 	struct _cpu *cpu = _current_cpu;
260 
261 	if (thread->base.usage.track_usage) {
262 		thread->base.usage.track_usage = false;
263 
264 		if (thread == cpu->current) {
265 			uint32_t cycles = usage_now() - cpu->usage0;
266 
267 			sched_thread_update_usage(thread, cycles);
268 			sched_cpu_update_usage(cpu, cycles);
269 		}
270 	}
271 
272 	k_spin_unlock(&usage_lock, key);
273 
274 	return 0;
275 }
276 #endif
277 
278 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
k_sys_runtime_stats_enable(void)279 void k_sys_runtime_stats_enable(void)
280 {
281 	k_spinlock_key_t  key;
282 
283 	key = k_spin_lock(&usage_lock);
284 
285 	if (_current_cpu->usage->track_usage) {
286 
287 		/*
288 		 * Usage tracking is already enabled on the current CPU
289 		 * and thus on all other CPUs (if applicable). There is
290 		 * nothing left to do.
291 		 */
292 
293 		k_spin_unlock(&usage_lock, key);
294 		return;
295 	}
296 
297 	/* Enable gathering of runtime stats on each CPU */
298 
299 	unsigned int num_cpus = arch_num_cpus();
300 
301 	for (uint8_t i = 0; i < num_cpus; i++) {
302 		_kernel.cpus[i].usage->track_usage = true;
303 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
304 		_kernel.cpus[i].usage->num_windows++;
305 		_kernel.cpus[i].usage->current = 0;
306 #endif
307 	}
308 
309 	k_spin_unlock(&usage_lock, key);
310 }
311 
k_sys_runtime_stats_disable(void)312 void k_sys_runtime_stats_disable(void)
313 {
314 	struct _cpu *cpu;
315 	k_spinlock_key_t key;
316 
317 	key = k_spin_lock(&usage_lock);
318 
319 	if (!_current_cpu->usage->track_usage) {
320 
321 		/*
322 		 * Usage tracking is already disabled on the current CPU
323 		 * and thus on all other CPUs (if applicable). There is
324 		 * nothing left to do.
325 		 */
326 
327 		k_spin_unlock(&usage_lock, key);
328 		return;
329 	}
330 
331 	uint32_t now = usage_now();
332 
333 	unsigned int num_cpus = arch_num_cpus();
334 
335 	for (uint8_t i = 0; i < num_cpus; i++) {
336 		cpu = &_kernel.cpus[i];
337 		if (cpu->usage0 != 0) {
338 			sched_cpu_update_usage(cpu, now - cpu->usage0);
339 		}
340 		cpu->usage->track_usage = false;
341 	}
342 
343 	k_spin_unlock(&usage_lock, key);
344 }
345 #endif
346 
347 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
z_thread_stats_raw(struct k_obj_core * obj_core,void * stats)348 int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats)
349 {
350 	k_spinlock_key_t  key;
351 
352 	key = k_spin_lock(&usage_lock);
353 	memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
354 	k_spin_unlock(&usage_lock, key);
355 
356 	return 0;
357 }
358 
z_thread_stats_query(struct k_obj_core * obj_core,void * stats)359 int z_thread_stats_query(struct k_obj_core *obj_core, void *stats)
360 {
361 	struct k_thread *thread;
362 
363 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
364 
365 	z_sched_thread_usage(thread, stats);
366 
367 	return 0;
368 }
369 
z_thread_stats_reset(struct k_obj_core * obj_core)370 int z_thread_stats_reset(struct k_obj_core *obj_core)
371 {
372 	k_spinlock_key_t  key;
373 	struct k_cycle_stats  *stats;
374 	struct k_thread *thread;
375 
376 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
377 	key = k_spin_lock(&usage_lock);
378 	stats = obj_core->stats;
379 
380 	stats->total = 0ULL;
381 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
382 	stats->current = 0ULL;
383 	stats->longest = 0ULL;
384 	stats->num_windows = (thread->base.usage.track_usage) ?  1U : 0U;
385 #endif
386 
387 	if (thread != _current_cpu->current) {
388 
389 		/*
390 		 * If the thread is not running, there is nothing else to do.
391 		 * If the thread is running on another core, then it is not
392 		 * safe to do anything else but unlock and return (and pretend
393 		 * that its stats were reset at the start of its execution
394 		 * window.
395 		 */
396 
397 		k_spin_unlock(&usage_lock, key);
398 
399 		return 0;
400 	}
401 
402 	/* Update the current CPU stats. */
403 
404 	uint32_t now = usage_now();
405 	uint32_t cycles = now - _current_cpu->usage0;
406 
407 	sched_cpu_update_usage(_current_cpu, cycles);
408 
409 	_current_cpu->usage0 = now;
410 
411 	k_spin_unlock(&usage_lock, key);
412 
413 	return 0;
414 }
415 
z_thread_stats_disable(struct k_obj_core * obj_core)416 int z_thread_stats_disable(struct k_obj_core *obj_core)
417 {
418 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
419 	struct k_thread *thread;
420 
421 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
422 
423 	return k_thread_runtime_stats_disable(thread);
424 #else
425 	return -ENOTSUP;
426 #endif
427 }
428 
z_thread_stats_enable(struct k_obj_core * obj_core)429 int z_thread_stats_enable(struct k_obj_core *obj_core)
430 {
431 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
432 	struct k_thread *thread;
433 
434 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
435 
436 	return k_thread_runtime_stats_enable(thread);
437 #else
438 	return -ENOTSUP;
439 #endif
440 }
441 #endif
442 
443 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
z_cpu_stats_raw(struct k_obj_core * obj_core,void * stats)444 int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats)
445 {
446 	k_spinlock_key_t  key;
447 
448 	key = k_spin_lock(&usage_lock);
449 	memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
450 	k_spin_unlock(&usage_lock, key);
451 
452 	return 0;
453 }
454 
z_cpu_stats_query(struct k_obj_core * obj_core,void * stats)455 int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats)
456 {
457 	struct _cpu  *cpu;
458 
459 	cpu = CONTAINER_OF(obj_core, struct _cpu, obj_core);
460 
461 	z_sched_cpu_usage(cpu->id, stats);
462 
463 	return 0;
464 }
465 #endif
466 
467 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
z_kernel_stats_raw(struct k_obj_core * obj_core,void * stats)468 int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats)
469 {
470 	k_spinlock_key_t  key;
471 
472 	key = k_spin_lock(&usage_lock);
473 	memcpy(stats, obj_core->stats,
474 	       CONFIG_MP_MAX_NUM_CPUS * sizeof(struct k_cycle_stats));
475 	k_spin_unlock(&usage_lock, key);
476 
477 	return 0;
478 }
479 
z_kernel_stats_query(struct k_obj_core * obj_core,void * stats)480 int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats)
481 {
482 	ARG_UNUSED(obj_core);
483 
484 	return k_thread_runtime_stats_all_get(stats);
485 }
486 #endif
487