1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/logging/log.h>
7 #include <zephyr/logging/log_backend.h>
8 #include <zephyr/logging/log_ctrl.h>
9 #include <zephyr/logging/log_output.h>
10 #include <zephyr/logging/log_internal.h>
11 #include <zephyr/sys/mpsc_pbuf.h>
12 #include <zephyr/logging/log_link.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/init.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/atomic.h>
18 #include <zephyr/sys/iterable_sections.h>
19 #include <ctype.h>
20 #include <zephyr/logging/log_frontend.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <zephyr/logging/log_output_dict.h>
23 #include <zephyr/logging/log_output_custom.h>
24 #include <zephyr/linker/utils.h>
25 
26 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
27 #include <zephyr/posix/time.h>
28 #endif
29 
30 LOG_MODULE_REGISTER(log);
31 
32 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
33 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
34 #endif
35 
36 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
37 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
38 #endif
39 
40 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
41 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
42 #endif
43 
44 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
45 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
46 #endif
47 
48 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
49 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
50 #endif
51 
52 #ifndef CONFIG_LOG_BUFFER_SIZE
53 #define CONFIG_LOG_BUFFER_SIZE 4
54 #endif
55 
56 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
57 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
58 #else
59 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
60 #endif
61 
62 #ifndef CONFIG_LOG_TAG_MAX_LEN
63 #define CONFIG_LOG_TAG_MAX_LEN 0
64 #endif
65 
66 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
67 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
68 #endif
69 
70 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
71 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
72 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
73 	     "CONFIG_NO_OPTIMIZATIONS is set");
74 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
75 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
76 	     "CONFIG_LOG_MODE_IMMEDIATE is set");
77 #endif
78 
79 static const log_format_func_t format_table[] = {
80 	[LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
81 						log_output_msg_process : NULL,
82 	[LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
83 						log_output_msg_syst_process : NULL,
84 	[LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
85 						log_dict_output_msg_process : NULL,
86 	[LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
87 						log_custom_output_msg_process : NULL,
88 };
89 
log_format_func_t_get(uint32_t log_type)90 log_format_func_t log_format_func_t_get(uint32_t log_type)
91 {
92 	return format_table[log_type];
93 }
94 
log_format_table_size(void)95 size_t log_format_table_size(void)
96 {
97 	return ARRAY_SIZE(format_table);
98 }
99 
100 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
101 
102 static atomic_t initialized;
103 static bool panic_mode;
104 static bool backend_attached;
105 static atomic_t buffered_cnt;
106 static atomic_t dropped_cnt;
107 static k_tid_t proc_tid;
108 static struct k_timer log_process_thread_timer;
109 
110 static log_timestamp_t dummy_timestamp(void);
111 static log_timestamp_get_t timestamp_func = dummy_timestamp;
112 static uint32_t timestamp_freq;
113 static log_timestamp_t proc_latency;
114 static log_timestamp_t prev_timestamp;
115 static atomic_t unordered_cnt;
116 static uint64_t last_failure_report;
117 static struct k_spinlock process_lock;
118 
119 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
120 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
121 static struct mpsc_pbuf_buffer *curr_log_buffer;
122 
123 #ifdef CONFIG_MPSC_PBUF
124 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
125 	buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
126 
127 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
128 			      const union mpsc_pbuf_generic *item);
129 
130 static const struct mpsc_pbuf_buffer_config mpsc_config = {
131 	.buf = (uint32_t *)buf32,
132 	.size = ARRAY_SIZE(buf32),
133 	.notify_drop = z_log_notify_drop,
134 	.get_wlen = log_msg_generic_get_wlen,
135 	.flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
136 		  MPSC_PBUF_MODE_OVERWRITE : 0) |
137 		 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
138 		  MPSC_PBUF_MAX_UTILIZATION : 0)
139 };
140 #endif
141 
142 /* Check that default tag can fit in tag buffer. */
143 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
144 	(BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
145 		      "Default string longer than tag capacity")));
146 
147 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
148 	COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
149 
150 static void msg_process(union log_msg_generic *msg);
151 
dummy_timestamp(void)152 static log_timestamp_t dummy_timestamp(void)
153 {
154 	return 0;
155 }
156 
z_log_timestamp(void)157 log_timestamp_t z_log_timestamp(void)
158 {
159 	return timestamp_func();
160 }
161 
z_log_msg_post_finalize(void)162 static void z_log_msg_post_finalize(void)
163 {
164 	atomic_val_t cnt = atomic_inc(&buffered_cnt);
165 
166 	if (panic_mode) {
167 		k_spinlock_key_t key = k_spin_lock(&process_lock);
168 		(void)log_process();
169 
170 		k_spin_unlock(&process_lock, key);
171 	} else if (proc_tid != NULL) {
172 		/*
173 		 * If CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1,
174 		 * timer is never needed. We release the processing
175 		 * thread after every message is posted.
176 		 */
177 		if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1) {
178 			if (cnt == 0) {
179 				k_sem_give(&log_process_thread_sem);
180 			}
181 		} else {
182 			if (cnt == 0) {
183 				k_timer_start(&log_process_thread_timer,
184 					      K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
185 					      K_NO_WAIT);
186 			} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
187 				   (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
188 				k_timer_stop(&log_process_thread_timer);
189 				k_sem_give(&log_process_thread_sem);
190 			} else {
191 				/* No action needed. Message processing will be triggered by the
192 				 * timeout or when number of upcoming messages exceeds the
193 				 * threshold.
194 				 */
195 			}
196 		}
197 	}
198 }
199 
log_format_set_all_active_backends(size_t log_type)200 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
201 {
202 	const struct log_backend *failed_backend = NULL;
203 
204 	STRUCT_SECTION_FOREACH(log_backend, backend) {
205 		if (log_backend_is_active(backend)) {
206 			int retCode = log_backend_format_set(backend, log_type);
207 
208 			if (retCode != 0) {
209 				failed_backend = backend;
210 			}
211 		}
212 	}
213 	return failed_backend;
214 }
215 
z_log_vprintk(const char * fmt,va_list ap)216 void z_log_vprintk(const char *fmt, va_list ap)
217 {
218 	if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
219 		return;
220 	}
221 
222 	z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
223 				   LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
224 				   Z_LOG_MSG_CBPRINTF_FLAGS(0),
225 				   fmt, ap);
226 }
227 
228 #ifndef CONFIG_LOG_TIMESTAMP_USE_REALTIME
default_get_timestamp(void)229 static log_timestamp_t default_get_timestamp(void)
230 {
231 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
232 		sys_clock_tick_get() : k_cycle_get_32();
233 }
234 
default_lf_get_timestamp(void)235 static log_timestamp_t default_lf_get_timestamp(void)
236 {
237 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
238 		k_uptime_get() : k_uptime_get_32();
239 }
240 #else
default_rt_get_timestamp(void)241 static log_timestamp_t default_rt_get_timestamp(void)
242 {
243 	struct timespec tspec;
244 
245 	clock_gettime(CLOCK_REALTIME, &tspec);
246 
247 	return ((uint64_t)tspec.tv_sec * MSEC_PER_SEC) + (tspec.tv_nsec / NSEC_PER_MSEC);
248 }
249 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
250 
log_core_init(void)251 void log_core_init(void)
252 {
253 	panic_mode = false;
254 	dropped_cnt = 0;
255 	buffered_cnt = 0;
256 
257 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
258 		log_frontend_init();
259 
260 		if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
261 			for (uint16_t s = 0; s < log_src_cnt_get(0); s++) {
262 				log_frontend_filter_set(s, CONFIG_LOG_MAX_LEVEL);
263 			}
264 		}
265 
266 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
267 			return;
268 		}
269 	}
270 
271 	/* Set default timestamp. */
272 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
273 	log_set_timestamp_func(default_rt_get_timestamp, 1000U);
274 #else
275 	if (sys_clock_hw_cycles_per_sec() > 1000000) {
276 		log_set_timestamp_func(default_lf_get_timestamp, 1000U);
277 	} else {
278 		uint32_t freq = IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
279 			CONFIG_SYS_CLOCK_TICKS_PER_SEC : sys_clock_hw_cycles_per_sec();
280 		log_set_timestamp_func(default_get_timestamp, freq);
281 	}
282 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
283 
284 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
285 		z_log_msg_init();
286 	}
287 
288 	if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
289 		z_log_runtime_filters_init();
290 	}
291 
292 	STRUCT_SECTION_FOREACH(log_backend, backend) {
293 		uint32_t id;
294 		/* As first slot in filtering mask is reserved, backend ID has offset.*/
295 		id = LOG_FILTER_FIRST_BACKEND_SLOT_IDX;
296 		id += backend - log_backend_get(0);
297 		log_backend_id_set(backend, id);
298 	}
299 }
300 
activate_foreach_backend(uint32_t mask)301 static uint32_t activate_foreach_backend(uint32_t mask)
302 {
303 	uint32_t mask_cpy = mask;
304 
305 	while (mask_cpy) {
306 		uint32_t i = __builtin_ctz(mask_cpy);
307 		const struct log_backend *backend = log_backend_get(i);
308 
309 		mask_cpy &= ~BIT(i);
310 		if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
311 			mask &= ~BIT(i);
312 			log_backend_enable(backend,
313 					   backend->cb->ctx,
314 					   CONFIG_LOG_MAX_LEVEL);
315 		}
316 	}
317 
318 	return mask;
319 }
320 
z_log_init(bool blocking,bool can_sleep)321 static uint32_t z_log_init(bool blocking, bool can_sleep)
322 {
323 	uint32_t mask = 0;
324 
325 	if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
326 		return 0;
327 	}
328 
329 	__ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_MAX_BACKENDS);
330 
331 	if (atomic_inc(&initialized) != 0) {
332 		return 0;
333 	}
334 
335 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
336 		z_log_links_initiate();
337 	}
338 
339 	int backend_index = 0;
340 
341 	STRUCT_SECTION_FOREACH(log_backend, backend) {
342 		/* Activate autostart backends */
343 		if (backend->autostart) {
344 			log_backend_init(backend);
345 
346 			/* If backend has activation function then backend is
347 			 * not ready until activated.
348 			 */
349 			if (log_backend_is_ready(backend) == 0) {
350 				log_backend_enable(backend,
351 						   backend->cb->ctx,
352 						   CONFIG_LOG_MAX_LEVEL);
353 			} else {
354 				mask |= BIT(backend_index);
355 			}
356 		}
357 
358 		++backend_index;
359 	}
360 
361 	/* If blocking init, wait until all backends are activated. */
362 	if (blocking) {
363 		while (mask) {
364 			mask = activate_foreach_backend(mask);
365 			if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
366 				k_msleep(10);
367 			}
368 		}
369 	}
370 
371 	return mask;
372 }
373 
log_init(void)374 void log_init(void)
375 {
376 	(void)z_log_init(true, true);
377 }
378 
log_thread_trigger(void)379 void log_thread_trigger(void)
380 {
381 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
382 		return;
383 	}
384 
385 	k_timer_stop(&log_process_thread_timer);
386 	k_sem_give(&log_process_thread_sem);
387 }
388 
thread_set(k_tid_t process_tid)389 static void thread_set(k_tid_t process_tid)
390 {
391 	proc_tid = process_tid;
392 
393 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
394 		return;
395 	}
396 
397 	if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
398 	    process_tid &&
399 	    buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
400 		k_sem_give(&log_process_thread_sem);
401 	}
402 }
403 
log_thread_set(k_tid_t process_tid)404 void log_thread_set(k_tid_t process_tid)
405 {
406 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
407 		__ASSERT_NO_MSG(0);
408 	} else {
409 		thread_set(process_tid);
410 	}
411 }
412 
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)413 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
414 {
415 	if (timestamp_getter == NULL) {
416 		return -EINVAL;
417 	}
418 
419 	timestamp_func = timestamp_getter;
420 	timestamp_freq = freq;
421 	if (CONFIG_LOG_PROCESSING_LATENCY_US) {
422 		proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
423 	}
424 
425 	if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
426 		log_output_timestamp_freq_set(freq);
427 	}
428 
429 	return 0;
430 }
431 
z_impl_log_panic(void)432 void z_impl_log_panic(void)
433 {
434 	if (panic_mode) {
435 		return;
436 	}
437 
438 	/* If panic happened early logger might not be initialized.
439 	 * Forcing initialization of the logger and auto-starting backends.
440 	 */
441 	(void)z_log_init(true, false);
442 
443 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
444 		log_frontend_panic();
445 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
446 			goto out;
447 		}
448 	}
449 
450 	STRUCT_SECTION_FOREACH(log_backend, backend) {
451 		if (log_backend_is_active(backend)) {
452 			log_backend_panic(backend);
453 		}
454 	}
455 
456 	if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
457 		/* Flush */
458 		while (log_process() == true) {
459 		}
460 	}
461 
462 out:
463 	panic_mode = true;
464 }
465 
466 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)467 void z_vrfy_log_panic(void)
468 {
469 	z_impl_log_panic();
470 }
471 #include <zephyr/syscalls/log_panic_mrsh.c>
472 #endif
473 
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)474 static bool msg_filter_check(struct log_backend const *backend,
475 			     union log_msg_generic *msg)
476 {
477 	if (!z_log_item_is_msg(msg)) {
478 		return true;
479 	}
480 
481 	if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
482 		return true;
483 	}
484 
485 	uint32_t backend_level;
486 	uint8_t level;
487 	uint8_t domain_id;
488 	int16_t source_id;
489 
490 	level = log_msg_get_level(&msg->log);
491 	domain_id = log_msg_get_domain(&msg->log);
492 	source_id = log_msg_get_source_id(&msg->log);
493 
494 	/* Accept all non-logging messages. */
495 	if (level == LOG_LEVEL_NONE) {
496 		return true;
497 	}
498 	if (source_id >= 0) {
499 		backend_level = log_filter_get(backend, domain_id, source_id, true);
500 
501 		return (level <= backend_level);
502 	} else {
503 		return true;
504 	}
505 }
506 
msg_process(union log_msg_generic * msg)507 static void msg_process(union log_msg_generic *msg)
508 {
509 	STRUCT_SECTION_FOREACH(log_backend, backend) {
510 		if (log_backend_is_active(backend) &&
511 		    msg_filter_check(backend, msg)) {
512 			log_backend_msg_process(backend, msg);
513 		}
514 	}
515 }
516 
dropped_notify(void)517 void dropped_notify(void)
518 {
519 	uint32_t dropped = z_log_dropped_read_and_clear();
520 
521 	STRUCT_SECTION_FOREACH(log_backend, backend) {
522 		if (log_backend_is_active(backend)) {
523 			log_backend_dropped(backend, dropped);
524 		}
525 	}
526 }
527 
unordered_notify(void)528 void unordered_notify(void)
529 {
530 	uint32_t unordered = atomic_set(&unordered_cnt, 0);
531 
532 	LOG_WRN("%d unordered messages since last report", unordered);
533 }
534 
z_log_notify_backend_enabled(void)535 void z_log_notify_backend_enabled(void)
536 {
537 	/* Wakeup logger thread after attaching first backend. It might be
538 	 * blocked with log messages pending.
539 	 */
540 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
541 		k_sem_give(&log_process_thread_sem);
542 	}
543 
544 	backend_attached = true;
545 }
546 
z_log_unordered_pending(void)547 static inline bool z_log_unordered_pending(void)
548 {
549 	return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
550 }
551 
z_impl_log_process(void)552 bool z_impl_log_process(void)
553 {
554 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
555 		return false;
556 	}
557 
558 	k_timeout_t backoff = K_NO_WAIT;
559 	union log_msg_generic *msg;
560 
561 	if (!backend_attached) {
562 		return false;
563 	}
564 
565 	msg = z_log_msg_claim(&backoff);
566 
567 	if (msg) {
568 		msg_process(msg);
569 		z_log_msg_free(msg);
570 		atomic_dec(&buffered_cnt);
571 	} else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
572 		/* If backoff is requested, it means that there are pending
573 		 * messages but they are too new and processing shall back off
574 		 * to allow arrival of newer messages from remote domains.
575 		 */
576 		k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
577 
578 		return false;
579 	}
580 
581 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
582 		bool dropped_pend = z_log_dropped_pending();
583 		bool unordered_pend = z_log_unordered_pending();
584 
585 		if ((dropped_pend || unordered_pend) &&
586 		   (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
587 			if (dropped_pend) {
588 				dropped_notify();
589 			}
590 
591 			if (unordered_pend) {
592 				unordered_notify();
593 			}
594 		}
595 
596 		last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
597 	}
598 
599 	return z_log_msg_pending();
600 }
601 
602 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)603 bool z_vrfy_log_process(void)
604 {
605 	return z_impl_log_process();
606 }
607 #include <zephyr/syscalls/log_process_mrsh.c>
608 #endif
609 
z_impl_log_buffered_cnt(void)610 uint32_t z_impl_log_buffered_cnt(void)
611 {
612 	return buffered_cnt;
613 }
614 
615 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)616 uint32_t z_vrfy_log_buffered_cnt(void)
617 {
618 	return z_impl_log_buffered_cnt();
619 }
620 #include <zephyr/syscalls/log_buffered_cnt_mrsh.c>
621 #endif
622 
z_log_dropped(bool buffered)623 void z_log_dropped(bool buffered)
624 {
625 	atomic_inc(&dropped_cnt);
626 	if (buffered) {
627 		atomic_dec(&buffered_cnt);
628 	}
629 
630 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
631 		k_timer_stop(&log_process_thread_timer);
632 		k_sem_give(&log_process_thread_sem);
633 	}
634 }
635 
z_log_dropped_read_and_clear(void)636 uint32_t z_log_dropped_read_and_clear(void)
637 {
638 	return atomic_set(&dropped_cnt, 0);
639 }
640 
z_log_dropped_pending(void)641 bool z_log_dropped_pending(void)
642 {
643 	return dropped_cnt > 0;
644 }
645 
z_log_msg_init(void)646 void z_log_msg_init(void)
647 {
648 #ifdef CONFIG_MPSC_PBUF
649 	mpsc_pbuf_init(&log_buffer, &mpsc_config);
650 	curr_log_buffer = &log_buffer;
651 #endif
652 }
653 
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)654 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
655 {
656 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
657 		return NULL;
658 	}
659 
660 	return (struct log_msg *)mpsc_pbuf_alloc(
661 		buffer, wlen,
662 		(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
663 			? K_FOREVER
664 			: K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
665 }
666 
z_log_msg_alloc(uint32_t wlen)667 struct log_msg *z_log_msg_alloc(uint32_t wlen)
668 {
669 	return msg_alloc(&log_buffer, wlen);
670 }
671 
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)672 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
673 {
674 	union log_msg_generic *m = (union log_msg_generic *)msg;
675 
676 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
677 		k_spinlock_key_t key;
678 
679 		if (IS_ENABLED(CONFIG_LOG_IMMEDIATE_CLEAN_OUTPUT)) {
680 			key = k_spin_lock(&process_lock);
681 		}
682 
683 		msg_process(m);
684 
685 		if (IS_ENABLED(CONFIG_LOG_IMMEDIATE_CLEAN_OUTPUT)) {
686 			k_spin_unlock(&process_lock, key);
687 		}
688 
689 		return;
690 	}
691 
692 #ifdef CONFIG_MPSC_PBUF
693 	mpsc_pbuf_commit(buffer, &m->buf);
694 #endif
695 	z_log_msg_post_finalize();
696 }
697 
z_log_msg_commit(struct log_msg * msg)698 void z_log_msg_commit(struct log_msg *msg)
699 {
700 	msg->hdr.timestamp = timestamp_func();
701 	msg_commit(&log_buffer, msg);
702 }
703 
z_log_msg_local_claim(void)704 union log_msg_generic *z_log_msg_local_claim(void)
705 {
706 #ifdef CONFIG_MPSC_PBUF
707 	return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
708 #else
709 	return NULL;
710 #endif
711 
712 }
713 
714 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)715 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
716 {
717 	union log_msg_generic *msg = NULL;
718 	struct log_msg_ptr *chosen = NULL;
719 	log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
720 				UINT64_MAX : UINT32_MAX;
721 	int i = 0;
722 
723 	/* Else iterate on all available buffers and get the oldest message. */
724 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
725 		struct log_mpsc_pbuf *buf;
726 
727 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
728 
729 #ifdef CONFIG_MPSC_PBUF
730 		if (msg_ptr->msg == NULL) {
731 			msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
732 		}
733 #endif
734 
735 		if (msg_ptr->msg) {
736 			log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
737 
738 			if (t < t_min) {
739 				t_min = t;
740 				msg = msg_ptr->msg;
741 				chosen = msg_ptr;
742 				curr_log_buffer = &buf->buf;
743 			}
744 		}
745 		i++;
746 	}
747 
748 	if (msg) {
749 		if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
750 			int32_t diff = t_min - (timestamp_func() - proc_latency);
751 
752 			if (diff > 0) {
753 			       /* Entry is too new. Back off for sometime to allow new
754 				* remote messages to arrive which may have been captured
755 				* earlier (but on other platform). Calculate for how
756 				* long processing shall back off.
757 				*/
758 				if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
759 					*backoff = K_TICKS(diff);
760 				} else {
761 					*backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
762 							timestamp_freq);
763 				}
764 
765 				return NULL;
766 			}
767 		}
768 
769 		(*chosen).msg = NULL;
770 	}
771 
772 	if (t_min < prev_timestamp) {
773 		atomic_inc(&unordered_cnt);
774 	}
775 
776 	prev_timestamp = t_min;
777 
778 	return msg;
779 }
780 
z_log_msg_claim(k_timeout_t * backoff)781 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
782 {
783 	size_t len;
784 
785 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
786 
787 	/* Use only one buffer if others are not registered. */
788 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
789 		return z_log_msg_claim_oldest(backoff);
790 	}
791 
792 	return z_log_msg_local_claim();
793 }
794 
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)795 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
796 {
797 #ifdef CONFIG_MPSC_PBUF
798 	mpsc_pbuf_free(buffer, &msg->buf);
799 #endif
800 }
801 
z_log_msg_free(union log_msg_generic * msg)802 void z_log_msg_free(union log_msg_generic *msg)
803 {
804 	msg_free(curr_log_buffer, msg);
805 }
806 
msg_pending(struct mpsc_pbuf_buffer * buffer)807 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
808 {
809 #ifdef CONFIG_MPSC_PBUF
810 	return mpsc_pbuf_is_pending(buffer);
811 #else
812 	return false;
813 #endif
814 }
815 
z_log_msg_pending(void)816 bool z_log_msg_pending(void)
817 {
818 	size_t len;
819 	int i = 0;
820 
821 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
822 
823 	if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
824 		return msg_pending(&log_buffer);
825 	}
826 
827 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
828 		struct log_mpsc_pbuf *buf;
829 
830 		if (msg_ptr->msg) {
831 			return true;
832 		}
833 
834 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
835 
836 		if (msg_pending(&buf->buf)) {
837 			return true;
838 		}
839 
840 		i++;
841 	}
842 
843 	return false;
844 }
845 
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)846 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
847 {
848 	struct log_msg *log_msg = (struct log_msg *)data;
849 	size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
850 	struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
851 	struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
852 
853 	if (!local_msg) {
854 		z_log_dropped(false);
855 		return;
856 	}
857 
858 	log_msg->hdr.desc.valid = 0;
859 	log_msg->hdr.desc.busy = 0;
860 	log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
861 	memcpy((void *)local_msg, data, len);
862 	msg_commit(mpsc_pbuffer, local_msg);
863 }
864 
z_log_get_tag(void)865 const char *z_log_get_tag(void)
866 {
867 	return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
868 }
869 
log_set_tag(const char * str)870 int log_set_tag(const char *str)
871 {
872 #if CONFIG_LOG_TAG_MAX_LEN > 0
873 	if (str == NULL) {
874 		return -EINVAL;
875 	}
876 
877 	size_t len = strlen(str);
878 	size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
879 
880 	memcpy(tag, str, cpy_len);
881 	tag[cpy_len] = '\0';
882 
883 	if (cpy_len < len) {
884 		tag[cpy_len - 1] = '~';
885 		return -ENOMEM;
886 	}
887 
888 	return 0;
889 #else
890 	return -ENOTSUP;
891 #endif
892 }
893 
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)894 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
895 {
896 	__ASSERT_NO_MSG(buf_size != NULL);
897 	__ASSERT_NO_MSG(usage != NULL);
898 
899 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
900 		return -EINVAL;
901 	}
902 
903 	mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
904 
905 	return 0;
906 }
907 
log_mem_get_max_usage(uint32_t * max)908 int log_mem_get_max_usage(uint32_t *max)
909 {
910 	__ASSERT_NO_MSG(max != NULL);
911 
912 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
913 		return -EINVAL;
914 	}
915 
916 	return mpsc_pbuf_get_max_utilization(&log_buffer, max);
917 }
918 
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)919 static void log_backend_notify_all(enum log_backend_evt event,
920 				   union log_backend_evt_arg *arg)
921 {
922 	STRUCT_SECTION_FOREACH(log_backend, backend) {
923 		log_backend_notify(backend, event, arg);
924 	}
925 }
926 
log_process_thread_timer_expiry_fn(struct k_timer * timer)927 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
928 {
929 	k_sem_give(&log_process_thread_sem);
930 }
931 
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)932 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
933 {
934 	__ASSERT_NO_MSG(log_backend_count_get() > 0);
935 	uint32_t links_active_mask = 0xFFFFFFFF;
936 	uint8_t domain_offset = 0;
937 	uint32_t activate_mask = z_log_init(false, false);
938 	/* If some backends are not activated yet set periodical thread wake up
939 	 * to poll backends for readiness. Period is set arbitrary.
940 	 * If all backends are ready periodic wake up is not needed.
941 	 */
942 	k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
943 	bool processed_any = false;
944 	thread_set(k_current_get());
945 
946 	/* Logging thread is periodically waken up until all backends that
947 	 * should be autostarted are ready.
948 	 */
949 	while (true) {
950 		if (activate_mask) {
951 			activate_mask = activate_foreach_backend(activate_mask);
952 			if (!activate_mask) {
953 				/* Periodic wake up no longer needed since all
954 				 * backends are ready.
955 				 */
956 				timeout = K_FOREVER;
957 			}
958 		}
959 
960 		/* Keep trying to activate links until all links are active. */
961 		if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
962 			links_active_mask =
963 				z_log_links_activate(links_active_mask, &domain_offset);
964 		}
965 
966 
967 		if (log_process() == false) {
968 			if (processed_any) {
969 				processed_any = false;
970 				log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
971 			}
972 			(void)k_sem_take(&log_process_thread_sem, timeout);
973 		} else {
974 			processed_any = true;
975 		}
976 	}
977 }
978 
979 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
980 struct k_thread logging_thread;
981 
enable_logger(void)982 static int enable_logger(void)
983 {
984 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
985 		k_timer_init(&log_process_thread_timer,
986 				log_process_thread_timer_expiry_fn, NULL);
987 		/* start logging thread */
988 		k_thread_create(&logging_thread, logging_stack,
989 				K_KERNEL_STACK_SIZEOF(logging_stack),
990 				log_process_thread_func, NULL, NULL, NULL,
991 				LOG_PROCESS_THREAD_PRIORITY, 0,
992 				COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
993 					K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
994 					K_NO_WAIT));
995 		k_thread_name_set(&logging_thread, "logging");
996 	} else {
997 		(void)z_log_init(false, false);
998 	}
999 
1000 	return 0;
1001 }
1002 
1003 #ifdef CONFIG_LOG_MODE_DEFERRED
log_flush(void)1004 void log_flush(void)
1005 {
1006 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
1007 		while (atomic_get(&buffered_cnt)) {
1008 			log_thread_trigger();
1009 			k_sleep(K_USEC(CONFIG_LOG_FLUSH_SLEEP_US));
1010 		}
1011 	} else {
1012 		while (LOG_PROCESS()) {
1013 		}
1014 	}
1015 }
1016 #endif
1017 
1018 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
1019