1 /*
2  * Copyright (c) 2020 Libre Solar Technologies GmbH
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/task_wdt/task_wdt.h>
8 
9 #include <zephyr/drivers/watchdog.h>
10 #include <zephyr/sys/reboot.h>
11 #include <zephyr/device.h>
12 #include <errno.h>
13 
14 #define LOG_LEVEL CONFIG_WDT_LOG_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(task_wdt);
17 
18 /*
19  * This dummy channel is used to continue feeding the hardware watchdog if the
20  * task watchdog timeouts are too long for regular updates
21  */
22 #define TASK_WDT_BACKGROUND_CHANNEL UINTPTR_MAX
23 
24 /*
25  * Task watchdog channel data
26  */
27 struct task_wdt_channel {
28 	/* period in milliseconds used to reset the timeout, set to 0 to
29 	 * indicate that the channel is available
30 	 */
31 	uint32_t reload_period;
32 	/* abs. ticks when this channel expires (updated by task_wdt_feed) */
33 	int64_t timeout_abs_ticks;
34 	/* user data passed to the callback function */
35 	void *user_data;
36 	/* function to be called when watchdog timer expired */
37 	task_wdt_callback_t callback;
38 };
39 
40 /* array of all task watchdog channels */
41 static struct task_wdt_channel channels[CONFIG_TASK_WDT_CHANNELS];
42 static struct k_spinlock channels_lock;
43 
44 /* timer used for watchdog handling */
45 static struct k_timer timer;
46 
47 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
48 /* pointer to the hardware watchdog used as a fallback */
49 static const struct device *hw_wdt_dev;
50 static int hw_wdt_channel;
51 static bool hw_wdt_started;
52 #endif
53 
schedule_next_timeout(int64_t current_ticks)54 static void schedule_next_timeout(int64_t current_ticks)
55 {
56 	uintptr_t next_channel_id;	/* channel which will time out next */
57 	int64_t next_timeout;		/* timeout in absolute ticks of this channel */
58 
59 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
60 	next_channel_id = TASK_WDT_BACKGROUND_CHANNEL;
61 	next_timeout = current_ticks +
62 		k_ms_to_ticks_ceil64(CONFIG_TASK_WDT_MIN_TIMEOUT);
63 #else
64 	next_channel_id = 0;
65 	next_timeout = INT64_MAX;
66 #endif
67 
68 	/* find minimum timeout of all channels */
69 	for (int id = 0; id < ARRAY_SIZE(channels); id++) {
70 		if (channels[id].reload_period != 0 &&
71 		    channels[id].timeout_abs_ticks < next_timeout) {
72 			next_channel_id = id;
73 			next_timeout = channels[id].timeout_abs_ticks;
74 		}
75 	}
76 
77 	/* update task wdt kernel timer */
78 	k_timer_user_data_set(&timer, (void *)next_channel_id);
79 	k_timer_start(&timer, K_TIMEOUT_ABS_TICKS(next_timeout), K_FOREVER);
80 
81 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
82 	if (hw_wdt_started) {
83 		wdt_feed(hw_wdt_dev, hw_wdt_channel);
84 	}
85 #endif
86 }
87 
88 /**
89  * @brief Task watchdog timer callback.
90  *
91  * If the device operates as intended, this function will never be called,
92  * as the timer is continuously restarted with the next due timeout in the
93  * task_wdt_feed() function.
94  *
95  * If all task watchdogs have longer timeouts than the hardware watchdog,
96  * this function is called regularly (via the background channel). This
97  * should be avoided by setting CONFIG_TASK_WDT_MIN_TIMEOUT to the minimum
98  * task watchdog timeout used in the application.
99  *
100  * @param timer_id Pointer to the timer which called the function
101  */
task_wdt_trigger(struct k_timer * timer_id)102 static void task_wdt_trigger(struct k_timer *timer_id)
103 {
104 	uintptr_t channel_id = (uintptr_t)k_timer_user_data_get(timer_id);
105 	bool bg_channel = IS_ENABLED(CONFIG_TASK_WDT_HW_FALLBACK) &&
106 			  (channel_id == TASK_WDT_BACKGROUND_CHANNEL);
107 
108 	/* If the timeout expired for the background channel (so the hardware
109 	 * watchdog needs to be fed) or for a channel that has been deleted,
110 	 * only schedule a new timeout (the hardware watchdog, if used, will be
111 	 * fed right after that new timeout is scheduled).
112 	 */
113 	if (bg_channel || channels[channel_id].reload_period == 0) {
114 		schedule_next_timeout(sys_clock_tick_get());
115 		return;
116 	}
117 
118 	if (channels[channel_id].callback) {
119 		channels[channel_id].callback(channel_id,
120 			channels[channel_id].user_data);
121 	} else {
122 		sys_reboot(SYS_REBOOT_COLD);
123 	}
124 }
125 
task_wdt_init(const struct device * hw_wdt)126 int task_wdt_init(const struct device *hw_wdt)
127 {
128 	if (hw_wdt) {
129 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
130 		struct wdt_timeout_cfg wdt_config;
131 
132 		wdt_config.flags = WDT_FLAG_RESET_SOC;
133 		wdt_config.window.min = 0U;
134 		wdt_config.window.max = CONFIG_TASK_WDT_MIN_TIMEOUT +
135 			CONFIG_TASK_WDT_HW_FALLBACK_DELAY;
136 		wdt_config.callback = NULL;
137 
138 		hw_wdt_dev = hw_wdt;
139 		hw_wdt_channel = wdt_install_timeout(hw_wdt_dev, &wdt_config);
140 		if (hw_wdt_channel < 0) {
141 			LOG_ERR("hw_wdt install timeout failed: %d", hw_wdt_channel);
142 			return hw_wdt_channel;
143 		}
144 #else
145 		return -ENOTSUP;
146 #endif
147 	}
148 
149 	k_timer_init(&timer, task_wdt_trigger, NULL);
150 	schedule_next_timeout(sys_clock_tick_get());
151 
152 	return 0;
153 }
154 
task_wdt_add(uint32_t reload_period,task_wdt_callback_t callback,void * user_data)155 int task_wdt_add(uint32_t reload_period, task_wdt_callback_t callback,
156 		 void *user_data)
157 {
158 	k_spinlock_key_t key;
159 
160 	if (reload_period == 0) {
161 		return -EINVAL;
162 	}
163 
164 	/*
165 	 * k_spin_lock instead of k_sched_lock required here to avoid being interrupted by a
166 	 * triggering other task watchdog channel (executed in ISR context).
167 	 */
168 	key = k_spin_lock(&channels_lock);
169 
170 	/* look for unused channel (reload_period set to 0) */
171 	for (int id = 0; id < ARRAY_SIZE(channels); id++) {
172 		if (channels[id].reload_period == 0) {
173 			channels[id].reload_period = reload_period;
174 			channels[id].user_data = user_data;
175 			channels[id].timeout_abs_ticks = K_TICKS_FOREVER;
176 			channels[id].callback = callback;
177 
178 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
179 			if (!hw_wdt_started && hw_wdt_dev) {
180 				/* also start fallback hw wdt */
181 				wdt_setup(hw_wdt_dev,
182 					WDT_OPT_PAUSE_HALTED_BY_DBG);
183 				hw_wdt_started = true;
184 			}
185 #endif
186 			/* must be called after hw wdt has been started */
187 			task_wdt_feed(id);
188 
189 			k_spin_unlock(&channels_lock, key);
190 
191 			return id;
192 		}
193 	}
194 
195 	k_spin_unlock(&channels_lock, key);
196 
197 	return -ENOMEM;
198 }
199 
task_wdt_delete(int channel_id)200 int task_wdt_delete(int channel_id)
201 {
202 	k_spinlock_key_t key;
203 
204 	if (channel_id < 0 || channel_id >= ARRAY_SIZE(channels)) {
205 		return -EINVAL;
206 	}
207 
208 	key = k_spin_lock(&channels_lock);
209 
210 	channels[channel_id].reload_period = 0;
211 
212 	k_spin_unlock(&channels_lock, key);
213 
214 	return 0;
215 }
216 
task_wdt_feed(int channel_id)217 int task_wdt_feed(int channel_id)
218 {
219 	int64_t current_ticks;
220 
221 	if (channel_id < 0 || channel_id >= ARRAY_SIZE(channels)) {
222 		return -EINVAL;
223 	}
224 
225 	/*
226 	 * We need a critical section instead of a mutex while updating the
227 	 * channels array in order to prevent priority inversion. Otherwise,
228 	 * a low priority thread could be preempted before releasing the mutex
229 	 * and block a high priority thread that wants to feed its task wdt.
230 	 */
231 	k_sched_lock();
232 
233 	current_ticks = sys_clock_tick_get();
234 
235 	/* feed the specified channel */
236 	channels[channel_id].timeout_abs_ticks = current_ticks +
237 		k_ms_to_ticks_ceil64(channels[channel_id].reload_period);
238 
239 	schedule_next_timeout(current_ticks);
240 
241 	k_sched_unlock();
242 
243 	return 0;
244 }
245