1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/logging/log.h>
7 #include <zephyr/logging/log_backend.h>
8 #include <zephyr/logging/log_ctrl.h>
9 #include <zephyr/logging/log_output.h>
10 #include <zephyr/logging/log_internal.h>
11 #include <zephyr/sys/mpsc_pbuf.h>
12 #include <zephyr/logging/log_link.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/init.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/atomic.h>
18 #include <zephyr/sys/iterable_sections.h>
19 #include <ctype.h>
20 #include <zephyr/logging/log_frontend.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <zephyr/logging/log_output_dict.h>
23 #include <zephyr/logging/log_output_custom.h>
24 #include <zephyr/linker/utils.h>
25 
26 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
27 #include <zephyr/posix/time.h>
28 #endif
29 
30 LOG_MODULE_REGISTER(log);
31 
32 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
33 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
34 #endif
35 
36 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
37 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
38 #endif
39 
40 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
41 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
42 #endif
43 
44 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
45 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
46 #endif
47 
48 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
49 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
50 #endif
51 
52 #ifndef CONFIG_LOG_BUFFER_SIZE
53 #define CONFIG_LOG_BUFFER_SIZE 4
54 #endif
55 
56 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
57 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
58 #else
59 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
60 #endif
61 
62 #ifndef CONFIG_LOG_TAG_MAX_LEN
63 #define CONFIG_LOG_TAG_MAX_LEN 0
64 #endif
65 
66 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
67 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
68 #endif
69 
70 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
71 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
72 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
73 	     "CONFIG_NO_OPTIMIZATIONS is set");
74 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
75 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
76 	     "CONFIG_LOG_MODE_IMMEDIATE is set");
77 #endif
78 
79 static const log_format_func_t format_table[] = {
80 	[LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
81 						log_output_msg_process : NULL,
82 	[LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
83 						log_output_msg_syst_process : NULL,
84 	[LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
85 						log_dict_output_msg_process : NULL,
86 	[LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
87 						log_custom_output_msg_process : NULL,
88 };
89 
log_format_func_t_get(uint32_t log_type)90 log_format_func_t log_format_func_t_get(uint32_t log_type)
91 {
92 	return format_table[log_type];
93 }
94 
log_format_table_size(void)95 size_t log_format_table_size(void)
96 {
97 	return ARRAY_SIZE(format_table);
98 }
99 
100 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
101 
102 static atomic_t initialized;
103 static bool panic_mode;
104 static bool backend_attached;
105 static atomic_t buffered_cnt;
106 static atomic_t dropped_cnt;
107 static k_tid_t proc_tid;
108 static struct k_timer log_process_thread_timer;
109 
110 static log_timestamp_t dummy_timestamp(void);
111 static log_timestamp_get_t timestamp_func = dummy_timestamp;
112 static uint32_t timestamp_freq;
113 static log_timestamp_t proc_latency;
114 static log_timestamp_t prev_timestamp;
115 static atomic_t unordered_cnt;
116 static uint64_t last_failure_report;
117 static struct k_spinlock process_lock;
118 
119 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
120 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
121 static struct mpsc_pbuf_buffer *curr_log_buffer;
122 
123 #ifdef CONFIG_MPSC_PBUF
124 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
125 	buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
126 
127 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
128 			      const union mpsc_pbuf_generic *item);
129 
130 static const struct mpsc_pbuf_buffer_config mpsc_config = {
131 	.buf = (uint32_t *)buf32,
132 	.size = ARRAY_SIZE(buf32),
133 	.notify_drop = z_log_notify_drop,
134 	.get_wlen = log_msg_generic_get_wlen,
135 	.flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
136 		  MPSC_PBUF_MODE_OVERWRITE : 0) |
137 		 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
138 		  MPSC_PBUF_MAX_UTILIZATION : 0)
139 };
140 #endif
141 
142 /* Check that default tag can fit in tag buffer. */
143 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
144 	(BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
145 		      "Default string longer than tag capacity")));
146 
147 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
148 	COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
149 
150 static void msg_process(union log_msg_generic *msg);
151 
dummy_timestamp(void)152 static log_timestamp_t dummy_timestamp(void)
153 {
154 	return 0;
155 }
156 
z_log_timestamp(void)157 log_timestamp_t z_log_timestamp(void)
158 {
159 	return timestamp_func();
160 }
161 
z_log_msg_post_finalize(void)162 static void z_log_msg_post_finalize(void)
163 {
164 	atomic_val_t cnt = atomic_inc(&buffered_cnt);
165 
166 	if (panic_mode) {
167 		k_spinlock_key_t key = k_spin_lock(&process_lock);
168 		(void)log_process();
169 
170 		k_spin_unlock(&process_lock, key);
171 	} else if (proc_tid != NULL) {
172 		/*
173 		 * If CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1,
174 		 * timer is never needed. We release the processing
175 		 * thread after every message is posted.
176 		 */
177 		if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1) {
178 			if (cnt == 0) {
179 				k_sem_give(&log_process_thread_sem);
180 			}
181 		} else {
182 			if (cnt == 0) {
183 				k_timer_start(&log_process_thread_timer,
184 					      K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
185 					      K_NO_WAIT);
186 			} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
187 				   (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
188 				k_timer_stop(&log_process_thread_timer);
189 				k_sem_give(&log_process_thread_sem);
190 			} else {
191 				/* No action needed. Message processing will be triggered by the
192 				 * timeout or when number of upcoming messages exceeds the
193 				 * threshold.
194 				 */
195 			}
196 		}
197 	}
198 }
199 
log_format_set_all_active_backends(size_t log_type)200 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
201 {
202 	const struct log_backend *failed_backend = NULL;
203 
204 	STRUCT_SECTION_FOREACH(log_backend, backend) {
205 		if (log_backend_is_active(backend)) {
206 			int retCode = log_backend_format_set(backend, log_type);
207 
208 			if (retCode != 0) {
209 				failed_backend = backend;
210 			}
211 		}
212 	}
213 	return failed_backend;
214 }
215 
z_log_vprintk(const char * fmt,va_list ap)216 void z_log_vprintk(const char *fmt, va_list ap)
217 {
218 	if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
219 		return;
220 	}
221 
222 	z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
223 				   LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
224 				   Z_LOG_MSG_CBPRINTF_FLAGS(0),
225 				   fmt, ap);
226 }
227 
228 #ifndef CONFIG_LOG_TIMESTAMP_USE_REALTIME
default_get_timestamp(void)229 static log_timestamp_t default_get_timestamp(void)
230 {
231 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
232 		sys_clock_tick_get() : k_cycle_get_32();
233 }
234 
default_lf_get_timestamp(void)235 static log_timestamp_t default_lf_get_timestamp(void)
236 {
237 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
238 		k_uptime_get() : k_uptime_get_32();
239 }
240 #else
default_rt_get_timestamp(void)241 static log_timestamp_t default_rt_get_timestamp(void)
242 {
243 	struct timespec tspec;
244 
245 	clock_gettime(CLOCK_REALTIME, &tspec);
246 
247 	return ((uint64_t)tspec.tv_sec * MSEC_PER_SEC) + (tspec.tv_nsec / NSEC_PER_MSEC);
248 }
249 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
250 
log_core_init(void)251 void log_core_init(void)
252 {
253 	panic_mode = false;
254 	dropped_cnt = 0;
255 	buffered_cnt = 0;
256 
257 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
258 		log_frontend_init();
259 
260 		if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
261 			for (uint16_t s = 0; s < log_src_cnt_get(0); s++) {
262 				log_frontend_filter_set(s, CONFIG_LOG_MAX_LEVEL);
263 			}
264 		}
265 
266 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
267 			return;
268 		}
269 	}
270 
271 	/* Set default timestamp. */
272 #ifdef CONFIG_LOG_TIMESTAMP_USE_REALTIME
273 	log_set_timestamp_func(default_rt_get_timestamp, 1000U);
274 #else
275 	if (sys_clock_hw_cycles_per_sec() > 1000000) {
276 		log_set_timestamp_func(default_lf_get_timestamp, 1000U);
277 	} else {
278 		uint32_t freq = IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
279 			CONFIG_SYS_CLOCK_TICKS_PER_SEC : sys_clock_hw_cycles_per_sec();
280 		log_set_timestamp_func(default_get_timestamp, freq);
281 	}
282 #endif /* CONFIG_LOG_TIMESTAMP_USE_REALTIME */
283 
284 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
285 		z_log_msg_init();
286 	}
287 
288 	if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
289 		z_log_runtime_filters_init();
290 	}
291 }
292 
activate_foreach_backend(uint32_t mask)293 static uint32_t activate_foreach_backend(uint32_t mask)
294 {
295 	uint32_t mask_cpy = mask;
296 
297 	while (mask_cpy) {
298 		uint32_t i = __builtin_ctz(mask_cpy);
299 		const struct log_backend *backend = log_backend_get(i);
300 
301 		mask_cpy &= ~BIT(i);
302 		if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
303 			mask &= ~BIT(i);
304 			log_backend_enable(backend,
305 					   backend->cb->ctx,
306 					   CONFIG_LOG_MAX_LEVEL);
307 		}
308 	}
309 
310 	return mask;
311 }
312 
z_log_init(bool blocking,bool can_sleep)313 static uint32_t z_log_init(bool blocking, bool can_sleep)
314 {
315 	uint32_t mask = 0;
316 
317 	if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
318 		return 0;
319 	}
320 
321 	__ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_MAX_BACKENDS);
322 
323 	if (atomic_inc(&initialized) != 0) {
324 		return 0;
325 	}
326 
327 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
328 		z_log_links_initiate();
329 	}
330 
331 	int backend_index = 0;
332 
333 	/* Activate autostart backends */
334 	STRUCT_SECTION_FOREACH(log_backend, backend) {
335 		if (backend->autostart) {
336 			log_backend_init(backend);
337 
338 			/* If backend has activation function then backend is
339 			 * not ready until activated.
340 			 */
341 			if (log_backend_is_ready(backend) == 0) {
342 				log_backend_enable(backend,
343 						   backend->cb->ctx,
344 						   CONFIG_LOG_MAX_LEVEL);
345 			} else {
346 				mask |= BIT(backend_index);
347 			}
348 		}
349 
350 		++backend_index;
351 	}
352 
353 	/* If blocking init, wait until all backends are activated. */
354 	if (blocking) {
355 		while (mask) {
356 			mask = activate_foreach_backend(mask);
357 			if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
358 				k_msleep(10);
359 			}
360 		}
361 	}
362 
363 	return mask;
364 }
365 
log_init(void)366 void log_init(void)
367 {
368 	(void)z_log_init(true, true);
369 }
370 
log_thread_trigger(void)371 void log_thread_trigger(void)
372 {
373 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
374 		return;
375 	}
376 
377 	k_timer_stop(&log_process_thread_timer);
378 	k_sem_give(&log_process_thread_sem);
379 }
380 
thread_set(k_tid_t process_tid)381 static void thread_set(k_tid_t process_tid)
382 {
383 	proc_tid = process_tid;
384 
385 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
386 		return;
387 	}
388 
389 	if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
390 	    process_tid &&
391 	    buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
392 		k_sem_give(&log_process_thread_sem);
393 	}
394 }
395 
log_thread_set(k_tid_t process_tid)396 void log_thread_set(k_tid_t process_tid)
397 {
398 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
399 		__ASSERT_NO_MSG(0);
400 	} else {
401 		thread_set(process_tid);
402 	}
403 }
404 
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)405 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
406 {
407 	if (timestamp_getter == NULL) {
408 		return -EINVAL;
409 	}
410 
411 	timestamp_func = timestamp_getter;
412 	timestamp_freq = freq;
413 	if (CONFIG_LOG_PROCESSING_LATENCY_US) {
414 		proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
415 	}
416 
417 	if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
418 		log_output_timestamp_freq_set(freq);
419 	}
420 
421 	return 0;
422 }
423 
z_impl_log_panic(void)424 void z_impl_log_panic(void)
425 {
426 	if (panic_mode) {
427 		return;
428 	}
429 
430 	/* If panic happened early logger might not be initialized.
431 	 * Forcing initialization of the logger and auto-starting backends.
432 	 */
433 	(void)z_log_init(true, false);
434 
435 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
436 		log_frontend_panic();
437 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
438 			goto out;
439 		}
440 	}
441 
442 	STRUCT_SECTION_FOREACH(log_backend, backend) {
443 		if (log_backend_is_active(backend)) {
444 			log_backend_panic(backend);
445 		}
446 	}
447 
448 	if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
449 		/* Flush */
450 		while (log_process() == true) {
451 		}
452 	}
453 
454 out:
455 	panic_mode = true;
456 }
457 
458 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)459 void z_vrfy_log_panic(void)
460 {
461 	z_impl_log_panic();
462 }
463 #include <zephyr/syscalls/log_panic_mrsh.c>
464 #endif
465 
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)466 static bool msg_filter_check(struct log_backend const *backend,
467 			     union log_msg_generic *msg)
468 {
469 	if (!z_log_item_is_msg(msg)) {
470 		return true;
471 	}
472 
473 	if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
474 		return true;
475 	}
476 
477 	uint32_t backend_level;
478 	uint8_t level;
479 	uint8_t domain_id;
480 	int16_t source_id;
481 
482 	level = log_msg_get_level(&msg->log);
483 	domain_id = log_msg_get_domain(&msg->log);
484 	source_id = log_msg_get_source_id(&msg->log);
485 
486 	/* Accept all non-logging messages. */
487 	if (level == LOG_LEVEL_NONE) {
488 		return true;
489 	}
490 	if (source_id >= 0) {
491 		backend_level = log_filter_get(backend, domain_id, source_id, true);
492 
493 		return (level <= backend_level);
494 	} else {
495 		return true;
496 	}
497 }
498 
msg_process(union log_msg_generic * msg)499 static void msg_process(union log_msg_generic *msg)
500 {
501 	STRUCT_SECTION_FOREACH(log_backend, backend) {
502 		if (log_backend_is_active(backend) &&
503 		    msg_filter_check(backend, msg)) {
504 			log_backend_msg_process(backend, msg);
505 		}
506 	}
507 }
508 
dropped_notify(void)509 void dropped_notify(void)
510 {
511 	uint32_t dropped = z_log_dropped_read_and_clear();
512 
513 	STRUCT_SECTION_FOREACH(log_backend, backend) {
514 		if (log_backend_is_active(backend)) {
515 			log_backend_dropped(backend, dropped);
516 		}
517 	}
518 }
519 
unordered_notify(void)520 void unordered_notify(void)
521 {
522 	uint32_t unordered = atomic_set(&unordered_cnt, 0);
523 
524 	LOG_WRN("%d unordered messages since last report", unordered);
525 }
526 
z_log_notify_backend_enabled(void)527 void z_log_notify_backend_enabled(void)
528 {
529 	/* Wakeup logger thread after attaching first backend. It might be
530 	 * blocked with log messages pending.
531 	 */
532 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
533 		k_sem_give(&log_process_thread_sem);
534 	}
535 
536 	backend_attached = true;
537 }
538 
z_log_unordered_pending(void)539 static inline bool z_log_unordered_pending(void)
540 {
541 	return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
542 }
543 
z_impl_log_process(void)544 bool z_impl_log_process(void)
545 {
546 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
547 		return false;
548 	}
549 
550 	k_timeout_t backoff = K_NO_WAIT;
551 	union log_msg_generic *msg;
552 
553 	if (!backend_attached) {
554 		return false;
555 	}
556 
557 	msg = z_log_msg_claim(&backoff);
558 
559 	if (msg) {
560 		atomic_dec(&buffered_cnt);
561 		msg_process(msg);
562 		z_log_msg_free(msg);
563 	} else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
564 		/* If backoff is requested, it means that there are pending
565 		 * messages but they are too new and processing shall back off
566 		 * to allow arrival of newer messages from remote domains.
567 		 */
568 		k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
569 
570 		return false;
571 	}
572 
573 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
574 		bool dropped_pend = z_log_dropped_pending();
575 		bool unordered_pend = z_log_unordered_pending();
576 
577 		if ((dropped_pend || unordered_pend) &&
578 		   (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
579 			if (dropped_pend) {
580 				dropped_notify();
581 			}
582 
583 			if (unordered_pend) {
584 				unordered_notify();
585 			}
586 		}
587 
588 		last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
589 	}
590 
591 	return z_log_msg_pending();
592 }
593 
594 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)595 bool z_vrfy_log_process(void)
596 {
597 	return z_impl_log_process();
598 }
599 #include <zephyr/syscalls/log_process_mrsh.c>
600 #endif
601 
z_impl_log_buffered_cnt(void)602 uint32_t z_impl_log_buffered_cnt(void)
603 {
604 	return buffered_cnt;
605 }
606 
607 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)608 uint32_t z_vrfy_log_buffered_cnt(void)
609 {
610 	return z_impl_log_buffered_cnt();
611 }
612 #include <zephyr/syscalls/log_buffered_cnt_mrsh.c>
613 #endif
614 
z_log_dropped(bool buffered)615 void z_log_dropped(bool buffered)
616 {
617 	atomic_inc(&dropped_cnt);
618 	if (buffered) {
619 		atomic_dec(&buffered_cnt);
620 	}
621 
622 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
623 		k_timer_stop(&log_process_thread_timer);
624 		k_sem_give(&log_process_thread_sem);
625 	}
626 }
627 
z_log_dropped_read_and_clear(void)628 uint32_t z_log_dropped_read_and_clear(void)
629 {
630 	return atomic_set(&dropped_cnt, 0);
631 }
632 
z_log_dropped_pending(void)633 bool z_log_dropped_pending(void)
634 {
635 	return dropped_cnt > 0;
636 }
637 
z_log_msg_init(void)638 void z_log_msg_init(void)
639 {
640 #ifdef CONFIG_MPSC_PBUF
641 	mpsc_pbuf_init(&log_buffer, &mpsc_config);
642 	curr_log_buffer = &log_buffer;
643 #endif
644 }
645 
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)646 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
647 {
648 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
649 		return NULL;
650 	}
651 
652 	return (struct log_msg *)mpsc_pbuf_alloc(
653 		buffer, wlen,
654 		(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
655 			? K_FOREVER
656 			: K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
657 }
658 
z_log_msg_alloc(uint32_t wlen)659 struct log_msg *z_log_msg_alloc(uint32_t wlen)
660 {
661 	return msg_alloc(&log_buffer, wlen);
662 }
663 
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)664 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
665 {
666 	union log_msg_generic *m = (union log_msg_generic *)msg;
667 
668 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
669 		k_spinlock_key_t key;
670 
671 		if (IS_ENABLED(CONFIG_LOG_IMMEDIATE_CLEAN_OUTPUT)) {
672 			key = k_spin_lock(&process_lock);
673 		}
674 
675 		msg_process(m);
676 
677 		if (IS_ENABLED(CONFIG_LOG_IMMEDIATE_CLEAN_OUTPUT)) {
678 			k_spin_unlock(&process_lock, key);
679 		}
680 
681 		return;
682 	}
683 
684 #ifdef CONFIG_MPSC_PBUF
685 	mpsc_pbuf_commit(buffer, &m->buf);
686 #endif
687 	z_log_msg_post_finalize();
688 }
689 
z_log_msg_commit(struct log_msg * msg)690 void z_log_msg_commit(struct log_msg *msg)
691 {
692 	msg->hdr.timestamp = timestamp_func();
693 	msg_commit(&log_buffer, msg);
694 }
695 
z_log_msg_local_claim(void)696 union log_msg_generic *z_log_msg_local_claim(void)
697 {
698 #ifdef CONFIG_MPSC_PBUF
699 	return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
700 #else
701 	return NULL;
702 #endif
703 
704 }
705 
706 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)707 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
708 {
709 	union log_msg_generic *msg = NULL;
710 	struct log_msg_ptr *chosen = NULL;
711 	log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
712 				UINT64_MAX : UINT32_MAX;
713 	int i = 0;
714 
715 	/* Else iterate on all available buffers and get the oldest message. */
716 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
717 		struct log_mpsc_pbuf *buf;
718 
719 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
720 
721 #ifdef CONFIG_MPSC_PBUF
722 		if (msg_ptr->msg == NULL) {
723 			msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
724 		}
725 #endif
726 
727 		if (msg_ptr->msg) {
728 			log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
729 
730 			if (t < t_min) {
731 				t_min = t;
732 				msg = msg_ptr->msg;
733 				chosen = msg_ptr;
734 				curr_log_buffer = &buf->buf;
735 			}
736 		}
737 		i++;
738 	}
739 
740 	if (msg) {
741 		if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
742 			int32_t diff = t_min - (timestamp_func() - proc_latency);
743 
744 			if (diff > 0) {
745 			       /* Entry is too new. Back off for sometime to allow new
746 				* remote messages to arrive which may have been captured
747 				* earlier (but on other platform). Calculate for how
748 				* long processing shall back off.
749 				*/
750 				if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
751 					*backoff = K_TICKS(diff);
752 				} else {
753 					*backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
754 							timestamp_freq);
755 				}
756 
757 				return NULL;
758 			}
759 		}
760 
761 		(*chosen).msg = NULL;
762 	}
763 
764 	if (t_min < prev_timestamp) {
765 		atomic_inc(&unordered_cnt);
766 	}
767 
768 	prev_timestamp = t_min;
769 
770 	return msg;
771 }
772 
z_log_msg_claim(k_timeout_t * backoff)773 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
774 {
775 	size_t len;
776 
777 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
778 
779 	/* Use only one buffer if others are not registered. */
780 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
781 		return z_log_msg_claim_oldest(backoff);
782 	}
783 
784 	return z_log_msg_local_claim();
785 }
786 
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)787 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
788 {
789 #ifdef CONFIG_MPSC_PBUF
790 	mpsc_pbuf_free(buffer, &msg->buf);
791 #endif
792 }
793 
z_log_msg_free(union log_msg_generic * msg)794 void z_log_msg_free(union log_msg_generic *msg)
795 {
796 	msg_free(curr_log_buffer, msg);
797 }
798 
msg_pending(struct mpsc_pbuf_buffer * buffer)799 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
800 {
801 #ifdef CONFIG_MPSC_PBUF
802 	return mpsc_pbuf_is_pending(buffer);
803 #else
804 	return false;
805 #endif
806 }
807 
z_log_msg_pending(void)808 bool z_log_msg_pending(void)
809 {
810 	size_t len;
811 	int i = 0;
812 
813 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
814 
815 	if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
816 		return msg_pending(&log_buffer);
817 	}
818 
819 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
820 		struct log_mpsc_pbuf *buf;
821 
822 		if (msg_ptr->msg) {
823 			return true;
824 		}
825 
826 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
827 
828 		if (msg_pending(&buf->buf)) {
829 			return true;
830 		}
831 
832 		i++;
833 	}
834 
835 	return false;
836 }
837 
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)838 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
839 {
840 	struct log_msg *log_msg = (struct log_msg *)data;
841 	size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
842 	struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
843 	struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
844 
845 	if (!local_msg) {
846 		z_log_dropped(false);
847 		return;
848 	}
849 
850 	log_msg->hdr.desc.valid = 0;
851 	log_msg->hdr.desc.busy = 0;
852 	log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
853 	memcpy((void *)local_msg, data, len);
854 	msg_commit(mpsc_pbuffer, local_msg);
855 }
856 
z_log_get_tag(void)857 const char *z_log_get_tag(void)
858 {
859 	return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
860 }
861 
log_set_tag(const char * str)862 int log_set_tag(const char *str)
863 {
864 #if CONFIG_LOG_TAG_MAX_LEN > 0
865 	if (str == NULL) {
866 		return -EINVAL;
867 	}
868 
869 	size_t len = strlen(str);
870 	size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
871 
872 	memcpy(tag, str, cpy_len);
873 	tag[cpy_len] = '\0';
874 
875 	if (cpy_len < len) {
876 		tag[cpy_len - 1] = '~';
877 		return -ENOMEM;
878 	}
879 
880 	return 0;
881 #else
882 	return -ENOTSUP;
883 #endif
884 }
885 
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)886 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
887 {
888 	__ASSERT_NO_MSG(buf_size != NULL);
889 	__ASSERT_NO_MSG(usage != NULL);
890 
891 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
892 		return -EINVAL;
893 	}
894 
895 	mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
896 
897 	return 0;
898 }
899 
log_mem_get_max_usage(uint32_t * max)900 int log_mem_get_max_usage(uint32_t *max)
901 {
902 	__ASSERT_NO_MSG(max != NULL);
903 
904 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
905 		return -EINVAL;
906 	}
907 
908 	return mpsc_pbuf_get_max_utilization(&log_buffer, max);
909 }
910 
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)911 static void log_backend_notify_all(enum log_backend_evt event,
912 				   union log_backend_evt_arg *arg)
913 {
914 	STRUCT_SECTION_FOREACH(log_backend, backend) {
915 		log_backend_notify(backend, event, arg);
916 	}
917 }
918 
log_process_thread_timer_expiry_fn(struct k_timer * timer)919 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
920 {
921 	k_sem_give(&log_process_thread_sem);
922 }
923 
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)924 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
925 {
926 	__ASSERT_NO_MSG(log_backend_count_get() > 0);
927 	uint32_t links_active_mask = 0xFFFFFFFF;
928 	uint8_t domain_offset = 0;
929 	uint32_t activate_mask = z_log_init(false, false);
930 	/* If some backends are not activated yet set periodical thread wake up
931 	 * to poll backends for readiness. Period is set arbitrary.
932 	 * If all backends are ready periodic wake up is not needed.
933 	 */
934 	k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
935 	bool processed_any = false;
936 	thread_set(k_current_get());
937 
938 	/* Logging thread is periodically waken up until all backends that
939 	 * should be autostarted are ready.
940 	 */
941 	while (true) {
942 		if (activate_mask) {
943 			activate_mask = activate_foreach_backend(activate_mask);
944 			if (!activate_mask) {
945 				/* Periodic wake up no longer needed since all
946 				 * backends are ready.
947 				 */
948 				timeout = K_FOREVER;
949 			}
950 		}
951 
952 		/* Keep trying to activate links until all links are active. */
953 		if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
954 			links_active_mask =
955 				z_log_links_activate(links_active_mask, &domain_offset);
956 		}
957 
958 
959 		if (log_process() == false) {
960 			if (processed_any) {
961 				processed_any = false;
962 				log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
963 			}
964 			(void)k_sem_take(&log_process_thread_sem, timeout);
965 		} else {
966 			processed_any = true;
967 		}
968 	}
969 }
970 
971 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
972 struct k_thread logging_thread;
973 
enable_logger(void)974 static int enable_logger(void)
975 {
976 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
977 		k_timer_init(&log_process_thread_timer,
978 				log_process_thread_timer_expiry_fn, NULL);
979 		/* start logging thread */
980 		k_thread_create(&logging_thread, logging_stack,
981 				K_KERNEL_STACK_SIZEOF(logging_stack),
982 				log_process_thread_func, NULL, NULL, NULL,
983 				LOG_PROCESS_THREAD_PRIORITY, 0,
984 				COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
985 					K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
986 					K_NO_WAIT));
987 		k_thread_name_set(&logging_thread, "logging");
988 	} else {
989 		(void)z_log_init(false, false);
990 	}
991 
992 	return 0;
993 }
994 
995 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
996