1 /*
2  * Copyright (c) 2024, Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/debug/cpu_load.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/drivers/counter.h>
10 #include <zephyr/logging/log.h>
11 LOG_MODULE_REGISTER(cpu_load);
12 
13 BUILD_ASSERT(!IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER) || DT_HAS_CHOSEN(zephyr_cpu_load_counter));
14 
15 #ifndef CONFIG_CPU_LOAD_LOG_PERIODICALLY
16 #define CONFIG_CPU_LOAD_LOG_PERIODICALLY 0
17 #endif
18 
19 const struct device *counter = COND_CODE_1(CONFIG_CPU_LOAD_USE_COUNTER,
20 				(DEVICE_DT_GET(DT_CHOSEN(zephyr_cpu_load_counter))), (NULL));
21 static uint32_t enter_ts;
22 static uint32_t cyc_start;
23 static uint32_t ticks_idle;
24 
25 static struct k_work_delayable cpu_load_log;
26 
cpu_load_log_control(bool enable)27 void cpu_load_log_control(bool enable)
28 {
29 	if (CONFIG_CPU_LOAD_LOG_PERIODICALLY == 0) {
30 		return;
31 	}
32 
33 	if (enable) {
34 		(void)cpu_load_get(true);
35 		k_work_schedule(&cpu_load_log, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
36 	} else {
37 		k_work_cancel_delayable(&cpu_load_log);
38 	}
39 }
40 
41 #if CONFIG_CPU_LOAD_USE_COUNTER || CONFIG_CPU_LOAD_LOG_PERIODICALLY
cpu_load_log_fn(struct k_work * work)42 static void cpu_load_log_fn(struct k_work *work)
43 {
44 	int load = cpu_load_get(true);
45 	uint32_t percent = load / 10;
46 	uint32_t fraction = load % 10;
47 
48 	LOG_INF("Load:%d.%03d%%", percent, fraction);
49 	cpu_load_log_control(true);
50 }
51 
cpu_load_init(void)52 static int cpu_load_init(void)
53 {
54 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
55 		int err = counter_start(counter);
56 
57 		(void)err;
58 		__ASSERT_NO_MSG(err == 0);
59 	}
60 
61 	if (CONFIG_CPU_LOAD_LOG_PERIODICALLY > 0) {
62 		k_work_init_delayable(&cpu_load_log, cpu_load_log_fn);
63 		return k_work_schedule(&cpu_load_log, K_MSEC(CONFIG_CPU_LOAD_LOG_PERIODICALLY));
64 	}
65 
66 	return 0;
67 }
68 
69 SYS_INIT(cpu_load_init, POST_KERNEL, 0);
70 #endif
71 
cpu_load_on_enter_idle(void)72 void cpu_load_on_enter_idle(void)
73 {
74 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
75 		counter_get_value(counter, &enter_ts);
76 		return;
77 	}
78 
79 	enter_ts = k_cycle_get_32();
80 }
81 
cpu_load_on_exit_idle(void)82 void cpu_load_on_exit_idle(void)
83 {
84 	uint32_t now;
85 
86 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
87 		counter_get_value(counter, &now);
88 	} else {
89 		now = k_cycle_get_32();
90 	}
91 
92 	ticks_idle += now - enter_ts;
93 }
94 
cpu_load_get(bool reset)95 int cpu_load_get(bool reset)
96 {
97 	uint32_t idle_us;
98 	uint32_t total = k_cycle_get_32() - cyc_start;
99 	uint32_t total_us = (uint32_t)k_cyc_to_us_floor32(total);
100 	uint32_t res;
101 	uint32_t active_us;
102 
103 	if (IS_ENABLED(CONFIG_CPU_LOAD_USE_COUNTER)) {
104 		idle_us = counter_ticks_to_us(counter, ticks_idle);
105 	} else {
106 		idle_us = k_cyc_to_us_floor32(ticks_idle);
107 	}
108 
109 	idle_us = MIN(idle_us, total_us);
110 	active_us = total_us - idle_us;
111 
112 	res = ((1000 * active_us) / total_us);
113 
114 	if (reset) {
115 		cyc_start = k_cycle_get_32();
116 		ticks_idle = 0;
117 	}
118 
119 	return res;
120 }
121