1 /*
2  * Copyright (c) 2022 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/logging/log_frontend.h>
7 #include <zephyr/logging/log_internal.h>
8 #include <zephyr/logging/log_output_dict.h>
9 #include <zephyr/sys/mpsc_pbuf.h>
10 #include <zephyr/device.h>
11 #include <zephyr/drivers/uart.h>
12 #include <zephyr/init.h>
13 
14 static uint32_t dbuf[CONFIG_LOG_FRONTEND_DICT_UART_BUFFER_SIZE / sizeof(uint32_t)];
15 
16 struct log_frontend_uart_pkt_hdr {
17 	MPSC_PBUF_HDR;
18 	uint16_t len: 12;
19 	uint32_t noff: 2;
20 } __packed;
21 
22 BUILD_ASSERT(sizeof(struct log_frontend_uart_pkt_hdr) == sizeof(uint16_t));
23 
24 struct log_frontend_uart_generic_pkt {
25 	struct log_frontend_uart_pkt_hdr hdr;
26 	uint8_t data[0];
27 } __packed;
28 
29 struct log_frontend_uart_dropped_pkt {
30 	struct log_frontend_uart_pkt_hdr hdr;
31 	struct log_dict_output_dropped_msg_t data;
32 } __packed;
33 
34 struct log_frontend_uart_pkt {
35 	struct log_frontend_uart_pkt_hdr hdr;
36 	struct log_dict_output_normal_msg_hdr_t data_hdr;
37 	uint8_t data[0];
38 } __packed;
39 
40 /* Union needed to avoid warning when casting to packed structure. */
41 union log_frontend_pkt {
42 	struct log_frontend_uart_generic_pkt *generic;
43 	struct log_frontend_uart_dropped_pkt *dropped;
44 	struct log_frontend_uart_pkt *pkt;
45 	const union mpsc_pbuf_generic *ro_pkt;
46 	union mpsc_pbuf_generic *rw_pkt;
47 };
48 
get_wlen(const union mpsc_pbuf_generic * packet)49 static uint32_t get_wlen(const union mpsc_pbuf_generic *packet)
50 {
51 	struct log_frontend_uart_generic_pkt *hdr = (struct log_frontend_uart_generic_pkt *)packet;
52 
53 	return (uint32_t)hdr->hdr.len;
54 }
55 
notify_drop(const struct mpsc_pbuf_buffer * buffer,const union mpsc_pbuf_generic * packet)56 static void notify_drop(const struct mpsc_pbuf_buffer *buffer,
57 			const union mpsc_pbuf_generic *packet)
58 {
59 }
60 
61 static const struct mpsc_pbuf_buffer_config config = {
62 	.buf = dbuf,
63 	.size = ARRAY_SIZE(dbuf),
64 	.notify_drop = notify_drop,
65 	.get_wlen = get_wlen,
66 	.flags = 0
67 };
68 static const struct device *const uart_dev = DEVICE_DT_GET(DT_CHOSEN(zephyr_console));
69 
70 static struct mpsc_pbuf_buffer buf;
71 static atomic_t active_cnt; /* Counts number of buffered messages. */
72 static atomic_t dropped; /* Counter storing number of dropped messages to be reported. */
73 static atomic_t adding_drop; /* Indicates that drop report message is being added. */
74 static volatile bool in_panic; /* Indicates that logging is in panic state. */
75 static bool dropped_notify; /* Indicate that drop report message should be added. */
76 
77 static void tx(void);
78 static atomic_val_t add_drop_msg(void);
79 
timeout(struct k_timer * timer)80 static void timeout(struct k_timer *timer)
81 {
82 	if (dropped != 0) {
83 		dropped_notify = true;
84 		if (add_drop_msg() == 0) {
85 			tx();
86 		}
87 	}
88 }
89 
90 K_TIMER_DEFINE(dropped_timer, timeout, NULL);
91 
92 /* Attempts to get pending message and initiate UART transfer. In panic it polls
93  * out the message in the blocking mode.
94  */
tx(void)95 static void tx(void)
96 {
97 	union log_frontend_pkt generic_pkt;
98 	struct log_frontend_uart_generic_pkt *pkt;
99 
100 	if (!IS_ENABLED(CONFIG_UART_ASYNC_API) && !in_panic) {
101 		uart_irq_tx_enable(uart_dev);
102 		return;
103 	}
104 
105 	generic_pkt.ro_pkt = mpsc_pbuf_claim(&buf);
106 	pkt = generic_pkt.generic;
107 	__ASSERT_NO_MSG(pkt == NULL);
108 
109 	size_t len = sizeof(uint32_t) * pkt->hdr.len - pkt->hdr.noff -
110 			sizeof(struct log_frontend_uart_pkt_hdr);
111 
112 	if (in_panic) {
113 		for (int i = 0; i < len; i++) {
114 			uart_poll_out(uart_dev, pkt->data[i]);
115 		}
116 		atomic_dec(&active_cnt);
117 	} else {
118 		int err = uart_tx(uart_dev, pkt->data, len, SYS_FOREVER_US);
119 
120 		(void)err;
121 		__ASSERT_NO_MSG(err == 0);
122 	}
123 }
124 
125 /* Add drop message and reset drop message count. */
add_drop_msg(void)126 static atomic_val_t add_drop_msg(void)
127 {
128 	union log_frontend_pkt generic_pkt;
129 	struct log_frontend_uart_dropped_pkt *pkt;
130 	size_t len = sizeof(struct log_frontend_uart_dropped_pkt);
131 	size_t wlen = DIV_ROUND_UP(len, sizeof(uint32_t));
132 
133 	if (atomic_cas(&adding_drop, 0, 1) == false) {
134 		return 1;
135 	}
136 
137 	generic_pkt.rw_pkt = mpsc_pbuf_alloc(&buf, wlen, K_NO_WAIT);
138 	pkt = generic_pkt.dropped;
139 	if (pkt == NULL) {
140 		return 1;
141 	}
142 
143 	dropped_notify = false;
144 	pkt->hdr.len = wlen;
145 	pkt->hdr.noff = sizeof(uint32_t) * wlen - len;
146 	pkt->data.type = MSG_DROPPED_MSG;
147 	pkt->data.num_dropped_messages = atomic_set(&dropped, 0);
148 	mpsc_pbuf_commit(&buf, generic_pkt.rw_pkt);
149 
150 	return atomic_inc(&active_cnt);
151 }
152 
uart_callback(const struct device * dev,struct uart_event * evt,void * user_data)153 static void uart_callback(const struct device *dev,
154 			  struct uart_event *evt,
155 			  void *user_data)
156 {
157 	switch (evt->type) {
158 	case UART_TX_DONE:
159 	{
160 		union log_frontend_pkt generic_pkt;
161 		struct log_frontend_uart_pkt_hdr *hdr;
162 
163 		hdr = (struct log_frontend_uart_pkt_hdr *)evt->data.tx.buf;
164 		generic_pkt.generic = CONTAINER_OF(hdr, struct log_frontend_uart_generic_pkt, hdr);
165 
166 		mpsc_pbuf_free(&buf, generic_pkt.ro_pkt);
167 		atomic_val_t rem_pkts = atomic_dec(&active_cnt);
168 
169 		if (dropped_notify) {
170 			rem_pkts = add_drop_msg();
171 		}
172 
173 		if (rem_pkts > 1) {
174 			tx();
175 		}
176 	}
177 	break;
178 
179 	case UART_TX_ABORTED:
180 		break;
181 	default:
182 		break;
183 	}
184 }
185 
uart_isr_callback(const struct device * dev,void * user_data)186 static void uart_isr_callback(const struct device *dev, void *user_data)
187 {
188 	static size_t curr_offset;
189 	static union log_frontend_pkt isr_pkt;
190 
191 	if (uart_irq_update(dev) && uart_irq_tx_ready(dev)) {
192 		if (isr_pkt.ro_pkt == NULL) {
193 			isr_pkt.ro_pkt = mpsc_pbuf_claim(&buf);
194 			__ASSERT_NO_MSG(isr_pkt.ro_pkt != NULL);
195 
196 			curr_offset = 0;
197 		}
198 
199 		if (isr_pkt.ro_pkt != NULL) {
200 			struct log_frontend_uart_generic_pkt *pkt = isr_pkt.generic;
201 			size_t len = sizeof(uint32_t) * pkt->hdr.len - pkt->hdr.noff -
202 				sizeof(struct log_frontend_uart_pkt_hdr);
203 			size_t rem = len - curr_offset;
204 
205 			if (rem > 0) {
206 				uint8_t *d = &pkt->data[curr_offset];
207 
208 				curr_offset += uart_fifo_fill(dev, d, rem);
209 			} else {
210 				mpsc_pbuf_free(&buf, isr_pkt.ro_pkt);
211 				isr_pkt.ro_pkt = NULL;
212 
213 				static struct k_spinlock lock;
214 				k_spinlock_key_t key = k_spin_lock(&lock);
215 
216 				active_cnt--;
217 				if (active_cnt == 0) {
218 					uart_irq_tx_disable(dev);
219 				}
220 				k_spin_unlock(&lock, key);
221 			}
222 		}
223 	}
224 }
225 
hdr_fill(struct log_dict_output_normal_msg_hdr_t * hdr,const void * source,const struct log_msg_desc desc)226 static inline void hdr_fill(struct log_dict_output_normal_msg_hdr_t *hdr,
227 			    const void *source,
228 			    const struct log_msg_desc desc)
229 {
230 	hdr->type = MSG_NORMAL;
231 	hdr->domain = desc.domain;
232 	hdr->level = desc.level;
233 	hdr->package_len = desc.package_len;
234 	hdr->data_len = desc.data_len;
235 	hdr->timestamp = z_log_timestamp();
236 	hdr->source = (source != NULL) ?
237 			(IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING) ?
238 				log_dynamic_source_id((void *)source) :
239 				log_const_source_id((void *)source)) :
240 			0U;
241 }
242 
243 /* Handle logging message in synchronous manner, in panic mode. */
sync_msg(const void * source,const struct log_msg_desc desc,uint8_t * package,const void * data)244 static void sync_msg(const void *source,
245 		     const struct log_msg_desc desc,
246 		     uint8_t *package, const void *data)
247 {
248 	struct log_dict_output_normal_msg_hdr_t hdr;
249 	uint8_t *datas[3] = {(uint8_t *)&hdr, package, (uint8_t *)data};
250 	size_t len[3] = {sizeof(hdr), desc.package_len, desc.data_len};
251 
252 	hdr_fill(&hdr, source, desc);
253 
254 	for (int i = 0; i < ARRAY_SIZE(datas); i++) {
255 		for (int j = 0; j < len[i]; j++) {
256 			uart_poll_out(uart_dev, datas[i][j]);
257 		}
258 	}
259 }
260 
log_frontend_msg(const void * source,const struct log_msg_desc desc,uint8_t * package,const void * data)261 void log_frontend_msg(const void *source,
262 		      const struct log_msg_desc desc,
263 		      uint8_t *package, const void *data)
264 {
265 	uint16_t strl[4];
266 	struct log_msg_desc outdesc = desc;
267 	int plen = cbprintf_package_copy(package, desc.package_len, NULL, 0,
268 					 CBPRINTF_PACKAGE_CONVERT_RW_STR,
269 					 strl, ARRAY_SIZE(strl));
270 	size_t dlen = desc.data_len;
271 	bool dev_ready = device_is_ready(uart_dev);
272 	size_t total_len = plen + dlen + sizeof(struct log_frontend_uart_pkt);
273 	size_t total_wlen = DIV_ROUND_UP(total_len, sizeof(uint32_t));
274 
275 	if (in_panic) {
276 		sync_msg(source, desc, package, data);
277 	}
278 
279 	union log_frontend_pkt generic_pkt;
280 	struct log_frontend_uart_pkt *pkt;
281 
282 	generic_pkt.rw_pkt = mpsc_pbuf_alloc(&buf, total_wlen, K_NO_WAIT);
283 
284 	pkt = generic_pkt.pkt;
285 	if (pkt == NULL) {
286 		/* Dropping */
287 		atomic_inc(&dropped);
288 		return;
289 	}
290 
291 	pkt->hdr.len = total_wlen;
292 	pkt->hdr.noff = sizeof(uint32_t) * total_wlen - total_len;
293 	outdesc.package_len = plen;
294 	hdr_fill(&pkt->data_hdr, source, outdesc);
295 
296 	plen = cbprintf_package_copy(package, desc.package_len,
297 				     pkt->data, plen,
298 				     CBPRINTF_PACKAGE_CONVERT_RW_STR,
299 				     strl, ARRAY_SIZE(strl));
300 	if (plen < 0) {
301 		/* error */
302 	}
303 
304 	if (dlen != 0) {
305 		memcpy(&pkt->data[plen], data, dlen);
306 	}
307 
308 	mpsc_pbuf_commit(&buf, generic_pkt.rw_pkt);
309 
310 	if (dev_ready && (atomic_inc(&active_cnt) == 0)) {
311 		tx();
312 	}
313 }
314 
log_frontend_panic(void)315 void log_frontend_panic(void)
316 {
317 	in_panic = true;
318 
319 	/* Flush all data */
320 	while (atomic_get(&active_cnt) > 0) {
321 		tx();
322 		/* empty */
323 	}
324 }
325 
log_frontend_init(void)326 void log_frontend_init(void)
327 {
328 	int err;
329 
330 	if (IS_ENABLED(CONFIG_UART_ASYNC_API)) {
331 		err = uart_callback_set(uart_dev, uart_callback, NULL);
332 	} else if (IS_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN)) {
333 		uart_irq_callback_user_data_set(uart_dev, uart_isr_callback, NULL);
334 		err = 0;
335 	}
336 
337 	__ASSERT(err == 0, "Failed to set callback");
338 	if (err < 0) {
339 		return;
340 	}
341 
342 	mpsc_pbuf_init(&buf, &config);
343 }
344 
345 /* Cannot be started in log_frontend_init because it is called before kernel is ready. */
log_frontend_uart_start_timer(void)346 static int log_frontend_uart_start_timer(void)
347 {
348 	k_timeout_t t = K_MSEC(CONFIG_LOG_FRONTEND_DICT_UART_DROPPED_NOTIFY_PERIOD);
349 
350 	k_timer_start(&dropped_timer, t, t);
351 
352 	return 0;
353 }
354 
355 SYS_INIT(log_frontend_uart_start_timer, POST_KERNEL,
356 	 CONFIG_KERNEL_INIT_PRIORITY_DEFAULT);
357