1 /*
2  * Copyright (c) 2018 Intel Corporation
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/kernel.h>
8 
9 #include <zephyr/timing/timing.h>
10 #include <ksched.h>
11 #include <zephyr/spinlock.h>
12 #include <zephyr/sys/check.h>
13 
14 /* Need one of these for this to work */
15 #if !defined(CONFIG_USE_SWITCH) && !defined(CONFIG_INSTRUMENT_THREAD_SWITCHING)
16 #error "No data backend configured for CONFIG_SCHED_THREAD_USAGE"
17 #endif /* !CONFIG_USE_SWITCH && !CONFIG_INSTRUMENT_THREAD_SWITCHING */
18 
19 static struct k_spinlock usage_lock;
20 
usage_now(void)21 static uint32_t usage_now(void)
22 {
23 	uint32_t now;
24 
25 #ifdef CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS
26 	now = (uint32_t)timing_counter_get();
27 #else
28 	now = k_cycle_get_32();
29 #endif /* CONFIG_THREAD_RUNTIME_STATS_USE_TIMING_FUNCTIONS */
30 
31 	/* Edge case: we use a zero as a null ("stop() already called") */
32 	return (now == 0) ? 1 : now;
33 }
34 
35 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
sched_cpu_update_usage(struct _cpu * cpu,uint32_t cycles)36 static void sched_cpu_update_usage(struct _cpu *cpu, uint32_t cycles)
37 {
38 	if (!cpu->usage->track_usage) {
39 		return;
40 	}
41 
42 	if (cpu->current != cpu->idle_thread) {
43 		cpu->usage->total += cycles;
44 
45 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
46 		cpu->usage->current += cycles;
47 
48 		if (cpu->usage->longest < cpu->usage->current) {
49 			cpu->usage->longest = cpu->usage->current;
50 		}
51 	} else {
52 		cpu->usage->current = 0;
53 		cpu->usage->num_windows++;
54 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
55 	}
56 }
57 #else
58 #define sched_cpu_update_usage(cpu, cycles)   do { } while (0)
59 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
60 
sched_thread_update_usage(struct k_thread * thread,uint32_t cycles)61 static void sched_thread_update_usage(struct k_thread *thread, uint32_t cycles)
62 {
63 	thread->base.usage.total += cycles;
64 
65 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
66 	thread->base.usage.current += cycles;
67 
68 	if (thread->base.usage.longest < thread->base.usage.current) {
69 		thread->base.usage.longest = thread->base.usage.current;
70 	}
71 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
72 }
73 
z_sched_usage_start(struct k_thread * thread)74 void z_sched_usage_start(struct k_thread *thread)
75 {
76 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
77 	k_spinlock_key_t  key;
78 
79 	key = k_spin_lock(&usage_lock);
80 
81 	_current_cpu->usage0 = usage_now();   /* Always update */
82 
83 	if (thread->base.usage.track_usage) {
84 		thread->base.usage.num_windows++;
85 		thread->base.usage.current = 0;
86 	}
87 
88 	k_spin_unlock(&usage_lock, key);
89 #else
90 	/* One write through a volatile pointer doesn't require
91 	 * synchronization as long as _usage() treats it as volatile
92 	 * (we can't race with _stop() by design).
93 	 */
94 
95 	_current_cpu->usage0 = usage_now();
96 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
97 }
98 
z_sched_usage_stop(void)99 void z_sched_usage_stop(void)
100 {
101 	k_spinlock_key_t k   = k_spin_lock(&usage_lock);
102 
103 	struct _cpu     *cpu = _current_cpu;
104 
105 	uint32_t u0 = cpu->usage0;
106 
107 	if (u0 != 0) {
108 		uint32_t cycles = usage_now() - u0;
109 
110 		if (cpu->current->base.usage.track_usage) {
111 			sched_thread_update_usage(cpu->current, cycles);
112 		}
113 
114 		sched_cpu_update_usage(cpu, cycles);
115 	}
116 
117 	cpu->usage0 = 0;
118 	k_spin_unlock(&usage_lock, k);
119 }
120 
121 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
z_sched_cpu_usage(uint8_t cpu_id,struct k_thread_runtime_stats * stats)122 void z_sched_cpu_usage(uint8_t cpu_id, struct k_thread_runtime_stats *stats)
123 {
124 	k_spinlock_key_t  key;
125 	struct _cpu *cpu;
126 
127 	key = k_spin_lock(&usage_lock);
128 	cpu = &_kernel.cpus[cpu_id];
129 
130 	if (cpu == _current_cpu) {
131 		uint32_t  now = usage_now();
132 		uint32_t cycles = now - cpu->usage0;
133 
134 		/*
135 		 * Getting stats for the current CPU. Update both its
136 		 * current thread stats and the CPU stats as the CPU's
137 		 * [usage0] field will also get updated. This keeps all
138 		 * that information up-to-date.
139 		 */
140 
141 		if (cpu->current->base.usage.track_usage) {
142 			sched_thread_update_usage(cpu->current, cycles);
143 		}
144 
145 		sched_cpu_update_usage(cpu, cycles);
146 
147 		cpu->usage0 = now;
148 	}
149 
150 	stats->total_cycles     = cpu->usage->total;
151 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
152 	stats->current_cycles   = cpu->usage->current;
153 	stats->peak_cycles      = cpu->usage->longest;
154 
155 	if (cpu->usage->num_windows == 0) {
156 		stats->average_cycles = 0;
157 	} else {
158 		stats->average_cycles = stats->total_cycles /
159 					cpu->usage->num_windows;
160 	}
161 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
162 
163 	stats->idle_cycles =
164 		_kernel.cpus[cpu_id].idle_thread->base.usage.total;
165 
166 	stats->execution_cycles = stats->total_cycles + stats->idle_cycles;
167 
168 	k_spin_unlock(&usage_lock, key);
169 }
170 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
171 
z_sched_thread_usage(struct k_thread * thread,struct k_thread_runtime_stats * stats)172 void z_sched_thread_usage(struct k_thread *thread,
173 			  struct k_thread_runtime_stats *stats)
174 {
175 	struct _cpu *cpu;
176 	k_spinlock_key_t  key;
177 
178 	key = k_spin_lock(&usage_lock);
179 	cpu = _current_cpu;
180 
181 
182 	if (thread == cpu->current) {
183 		uint32_t now = usage_now();
184 		uint32_t cycles = now - cpu->usage0;
185 
186 		/*
187 		 * Getting stats for the current thread. Update both the
188 		 * current thread stats and its CPU stats as the CPU's
189 		 * [usage0] field will also get updated. This keeps all
190 		 * that information up-to-date.
191 		 */
192 
193 		if (thread->base.usage.track_usage) {
194 			sched_thread_update_usage(thread, cycles);
195 		}
196 
197 		sched_cpu_update_usage(cpu, cycles);
198 
199 		cpu->usage0 = now;
200 	}
201 
202 	stats->execution_cycles = thread->base.usage.total;
203 	stats->total_cycles     = thread->base.usage.total;
204 
205 	/* Copy-out the thread's usage stats */
206 
207 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
208 	stats->current_cycles = thread->base.usage.current;
209 	stats->peak_cycles    = thread->base.usage.longest;
210 
211 	if (thread->base.usage.num_windows == 0) {
212 		stats->average_cycles = 0;
213 	} else {
214 		stats->average_cycles = stats->total_cycles /
215 					thread->base.usage.num_windows;
216 	}
217 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
218 
219 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
220 	stats->idle_cycles = 0;
221 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
222 
223 	k_spin_unlock(&usage_lock, key);
224 }
225 
226 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
k_thread_runtime_stats_enable(k_tid_t thread)227 int k_thread_runtime_stats_enable(k_tid_t  thread)
228 {
229 	k_spinlock_key_t  key;
230 
231 	CHECKIF(thread == NULL) {
232 		return -EINVAL;
233 	}
234 
235 	key = k_spin_lock(&usage_lock);
236 
237 	if (!thread->base.usage.track_usage) {
238 		thread->base.usage.track_usage = true;
239 		thread->base.usage.num_windows++;
240 		thread->base.usage.current = 0;
241 	}
242 
243 	k_spin_unlock(&usage_lock, key);
244 
245 	return 0;
246 }
247 
k_thread_runtime_stats_disable(k_tid_t thread)248 int k_thread_runtime_stats_disable(k_tid_t  thread)
249 {
250 	k_spinlock_key_t key;
251 
252 	CHECKIF(thread == NULL) {
253 		return -EINVAL;
254 	}
255 
256 	key = k_spin_lock(&usage_lock);
257 	struct _cpu *cpu = _current_cpu;
258 
259 	if (thread->base.usage.track_usage) {
260 		thread->base.usage.track_usage = false;
261 
262 		if (thread == cpu->current) {
263 			uint32_t cycles = usage_now() - cpu->usage0;
264 
265 			sched_thread_update_usage(thread, cycles);
266 			sched_cpu_update_usage(cpu, cycles);
267 		}
268 	}
269 
270 	k_spin_unlock(&usage_lock, key);
271 
272 	return 0;
273 }
274 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
275 
276 #ifdef CONFIG_SCHED_THREAD_USAGE_ALL
k_sys_runtime_stats_enable(void)277 void k_sys_runtime_stats_enable(void)
278 {
279 	k_spinlock_key_t  key;
280 
281 	key = k_spin_lock(&usage_lock);
282 
283 	if (_current_cpu->usage->track_usage) {
284 
285 		/*
286 		 * Usage tracking is already enabled on the current CPU
287 		 * and thus on all other CPUs (if applicable). There is
288 		 * nothing left to do.
289 		 */
290 
291 		k_spin_unlock(&usage_lock, key);
292 		return;
293 	}
294 
295 	/* Enable gathering of runtime stats on each CPU */
296 
297 	unsigned int num_cpus = arch_num_cpus();
298 
299 	for (uint8_t i = 0; i < num_cpus; i++) {
300 		_kernel.cpus[i].usage->track_usage = true;
301 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
302 		_kernel.cpus[i].usage->num_windows++;
303 		_kernel.cpus[i].usage->current = 0;
304 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
305 	}
306 
307 	k_spin_unlock(&usage_lock, key);
308 }
309 
k_sys_runtime_stats_disable(void)310 void k_sys_runtime_stats_disable(void)
311 {
312 	struct _cpu *cpu;
313 	k_spinlock_key_t key;
314 
315 	key = k_spin_lock(&usage_lock);
316 
317 	if (!_current_cpu->usage->track_usage) {
318 
319 		/*
320 		 * Usage tracking is already disabled on the current CPU
321 		 * and thus on all other CPUs (if applicable). There is
322 		 * nothing left to do.
323 		 */
324 
325 		k_spin_unlock(&usage_lock, key);
326 		return;
327 	}
328 
329 	uint32_t now = usage_now();
330 
331 	unsigned int num_cpus = arch_num_cpus();
332 
333 	for (uint8_t i = 0; i < num_cpus; i++) {
334 		cpu = &_kernel.cpus[i];
335 		if (cpu->usage0 != 0) {
336 			sched_cpu_update_usage(cpu, now - cpu->usage0);
337 		}
338 		cpu->usage->track_usage = false;
339 	}
340 
341 	k_spin_unlock(&usage_lock, key);
342 }
343 #endif /* CONFIG_SCHED_THREAD_USAGE_ALL */
344 
345 #ifdef CONFIG_OBJ_CORE_STATS_THREAD
z_thread_stats_raw(struct k_obj_core * obj_core,void * stats)346 int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats)
347 {
348 	k_spinlock_key_t  key;
349 
350 	key = k_spin_lock(&usage_lock);
351 	memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
352 	k_spin_unlock(&usage_lock, key);
353 
354 	return 0;
355 }
356 
z_thread_stats_query(struct k_obj_core * obj_core,void * stats)357 int z_thread_stats_query(struct k_obj_core *obj_core, void *stats)
358 {
359 	struct k_thread *thread;
360 
361 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
362 
363 	z_sched_thread_usage(thread, stats);
364 
365 	return 0;
366 }
367 
z_thread_stats_reset(struct k_obj_core * obj_core)368 int z_thread_stats_reset(struct k_obj_core *obj_core)
369 {
370 	k_spinlock_key_t  key;
371 	struct k_cycle_stats  *stats;
372 	struct k_thread *thread;
373 
374 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
375 	key = k_spin_lock(&usage_lock);
376 	stats = obj_core->stats;
377 
378 	stats->total = 0ULL;
379 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
380 	stats->current = 0ULL;
381 	stats->longest = 0ULL;
382 	stats->num_windows = (thread->base.usage.track_usage) ?  1U : 0U;
383 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
384 
385 	if (thread != _current_cpu->current) {
386 
387 		/*
388 		 * If the thread is not running, there is nothing else to do.
389 		 * If the thread is running on another core, then it is not
390 		 * safe to do anything else but unlock and return (and pretend
391 		 * that its stats were reset at the start of its execution
392 		 * window.
393 		 */
394 
395 		k_spin_unlock(&usage_lock, key);
396 
397 		return 0;
398 	}
399 
400 	/* Update the current CPU stats. */
401 
402 	uint32_t now = usage_now();
403 	uint32_t cycles = now - _current_cpu->usage0;
404 
405 	sched_cpu_update_usage(_current_cpu, cycles);
406 
407 	_current_cpu->usage0 = now;
408 
409 	k_spin_unlock(&usage_lock, key);
410 
411 	return 0;
412 }
413 
z_thread_stats_disable(struct k_obj_core * obj_core)414 int z_thread_stats_disable(struct k_obj_core *obj_core)
415 {
416 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
417 	struct k_thread *thread;
418 
419 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
420 
421 	return k_thread_runtime_stats_disable(thread);
422 #else
423 	return -ENOTSUP;
424 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
425 }
426 
z_thread_stats_enable(struct k_obj_core * obj_core)427 int z_thread_stats_enable(struct k_obj_core *obj_core)
428 {
429 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
430 	struct k_thread *thread;
431 
432 	thread = CONTAINER_OF(obj_core, struct k_thread, obj_core);
433 
434 	return k_thread_runtime_stats_enable(thread);
435 #else
436 	return -ENOTSUP;
437 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
438 }
439 #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
440 
441 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
z_cpu_stats_raw(struct k_obj_core * obj_core,void * stats)442 int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats)
443 {
444 	k_spinlock_key_t  key;
445 
446 	key = k_spin_lock(&usage_lock);
447 	memcpy(stats, obj_core->stats, sizeof(struct k_cycle_stats));
448 	k_spin_unlock(&usage_lock, key);
449 
450 	return 0;
451 }
452 
z_cpu_stats_query(struct k_obj_core * obj_core,void * stats)453 int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats)
454 {
455 	struct _cpu  *cpu;
456 
457 	cpu = CONTAINER_OF(obj_core, struct _cpu, obj_core);
458 
459 	z_sched_cpu_usage(cpu->id, stats);
460 
461 	return 0;
462 }
463 #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
464 
465 #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
z_kernel_stats_raw(struct k_obj_core * obj_core,void * stats)466 int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats)
467 {
468 	k_spinlock_key_t  key;
469 
470 	key = k_spin_lock(&usage_lock);
471 	memcpy(stats, obj_core->stats,
472 	       CONFIG_MP_MAX_NUM_CPUS * sizeof(struct k_cycle_stats));
473 	k_spin_unlock(&usage_lock, key);
474 
475 	return 0;
476 }
477 
z_kernel_stats_query(struct k_obj_core * obj_core,void * stats)478 int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats)
479 {
480 	ARG_UNUSED(obj_core);
481 
482 	return k_thread_runtime_stats_all_get(stats);
483 }
484 #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
485