1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  * Copyright (c) 2016 Intel Corporation
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include "kernel_shell.h"
9 
10 #include <zephyr/drivers/timer/system_timer.h>
11 #include <zephyr/kernel.h>
12 
13 #ifdef CONFIG_THREAD_RUNTIME_STATS
rt_stats_dump(const struct shell * sh,struct k_thread * thread)14 static void rt_stats_dump(const struct shell *sh, struct k_thread *thread)
15 {
16 	k_thread_runtime_stats_t rt_stats_thread;
17 	k_thread_runtime_stats_t rt_stats_all;
18 	int ret = 0;
19 	unsigned int pcnt;
20 
21 	if (k_thread_runtime_stats_get(thread, &rt_stats_thread) != 0) {
22 		ret++;
23 	}
24 
25 	if (k_thread_runtime_stats_all_get(&rt_stats_all) != 0) {
26 		ret++;
27 	}
28 
29 	if (ret == 0) {
30 		pcnt = (rt_stats_thread.execution_cycles * 100U) /
31 		       rt_stats_all.execution_cycles;
32 
33 		/*
34 		 * z_prf() does not support %llu by default unless
35 		 * CONFIG_MINIMAL_LIBC_LL_PRINTF=y. So do conditional
36 		 * compilation to avoid blindly enabling this kconfig
37 		 * so it won't increase RAM/ROM usage too much on 32-bit
38 		 * targets.
39 		 */
40 		shell_print(sh, "\tTotal execution cycles: %u (%u %%)",
41 			    (uint32_t)rt_stats_thread.execution_cycles,
42 			    pcnt);
43 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
44 		shell_print(sh, "\tCurrent execution cycles: %u",
45 			    (uint32_t)rt_stats_thread.current_cycles);
46 		shell_print(sh, "\tPeak execution cycles: %u",
47 			    (uint32_t)rt_stats_thread.peak_cycles);
48 		shell_print(sh, "\tAverage execution cycles: %u",
49 			    (uint32_t)rt_stats_thread.average_cycles);
50 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
51 	} else {
52 		shell_print(sh, "\tTotal execution cycles: ? (? %%)");
53 #ifdef CONFIG_SCHED_THREAD_USAGE_ANALYSIS
54 		shell_print(sh, "\tCurrent execution cycles: ?");
55 		shell_print(sh, "\tPeak execution cycles: ?");
56 		shell_print(sh, "\tAverage execution cycles: ?");
57 #endif /* CONFIG_SCHED_THREAD_USAGE_ANALYSIS */
58 	}
59 }
60 #endif /* CONFIG_THREAD_RUNTIME_STATS */
61 
shell_tdata_dump(const struct k_thread * cthread,void * user_data)62 static void shell_tdata_dump(const struct k_thread *cthread, void *user_data)
63 {
64 	struct k_thread *thread = (struct k_thread *)cthread;
65 	const struct shell *sh = (const struct shell *)user_data;
66 	unsigned int pcnt;
67 	size_t unused;
68 	size_t size = thread->stack_info.size;
69 	const char *tname;
70 	int ret;
71 	char state_str[32];
72 
73 	tname = k_thread_name_get(thread);
74 
75 	shell_print(sh, "%s%p %-10s",
76 		    (thread == k_current_get()) ? "*" : " ",
77 		    thread,
78 		    tname ? tname : "NA");
79 	/* Cannot use lld as it's less portable. */
80 	shell_print(sh, "\toptions: 0x%x, priority: %d timeout: %" PRId64,
81 		    thread->base.user_options,
82 		    thread->base.prio,
83 		    (int64_t)thread->base.timeout.dticks);
84 	shell_print(sh, "\tstate: %s, entry: %p",
85 		    k_thread_state_str(thread, state_str, sizeof(state_str)),
86 		    thread->entry.pEntry);
87 
88 #ifdef CONFIG_SCHED_CPU_MASK
89 	shell_print(sh, "\tcpu_mask: 0x%x", thread->base.cpu_mask);
90 #endif /* CONFIG_SCHED_CPU_MASK */
91 
92 	IF_ENABLED(CONFIG_THREAD_RUNTIME_STATS, (rt_stats_dump(sh, thread)));
93 
94 	ret = k_thread_stack_space_get(thread, &unused);
95 	if (ret) {
96 		shell_print(sh,
97 			    "Unable to determine unused stack size (%d)\n",
98 			    ret);
99 	} else {
100 		/* Calculate the real size reserved for the stack */
101 		pcnt = ((size - unused) * 100U) / size;
102 
103 		shell_print(sh,
104 			    "\tstack size %zu, unused %zu, usage %zu / %zu (%u %%)\n",
105 			    size, unused, size - unused, size, pcnt);
106 	}
107 
108 }
109 
cmd_kernel_thread_list(const struct shell * sh,size_t argc,char ** argv)110 static int cmd_kernel_thread_list(const struct shell *sh, size_t argc, char **argv)
111 {
112 	ARG_UNUSED(argc);
113 	ARG_UNUSED(argv);
114 
115 	shell_print(sh, "Scheduler: %u since last call", sys_clock_elapsed());
116 	shell_print(sh, "Threads:");
117 
118 	/*
119 	 * Use the unlocked version as the callback itself might call
120 	 * arch_irq_unlock.
121 	 */
122 	k_thread_foreach_unlocked(shell_tdata_dump, (void *)sh);
123 
124 	return 0;
125 }
126 
127 KERNEL_THREAD_CMD_ADD(list, NULL, "List kernel threads.", cmd_kernel_thread_list);
128