1 /*
2  * Copyright (c) 2018 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <zephyr/logging/log.h>
7 #include <zephyr/logging/log_backend.h>
8 #include <zephyr/logging/log_ctrl.h>
9 #include <zephyr/logging/log_output.h>
10 #include <zephyr/logging/log_internal.h>
11 #include <zephyr/sys/mpsc_pbuf.h>
12 #include <zephyr/logging/log_link.h>
13 #include <zephyr/sys/printk.h>
14 #include <zephyr/sys_clock.h>
15 #include <zephyr/init.h>
16 #include <zephyr/sys/__assert.h>
17 #include <zephyr/sys/atomic.h>
18 #include <zephyr/sys/iterable_sections.h>
19 #include <ctype.h>
20 #include <zephyr/logging/log_frontend.h>
21 #include <zephyr/internal/syscall_handler.h>
22 #include <zephyr/logging/log_output_dict.h>
23 #include <zephyr/logging/log_output_custom.h>
24 #include <zephyr/linker/utils.h>
25 
26 LOG_MODULE_REGISTER(log);
27 
28 #ifndef CONFIG_LOG_PROCESS_THREAD_SLEEP_MS
29 #define CONFIG_LOG_PROCESS_THREAD_SLEEP_MS 0
30 #endif
31 
32 #ifndef CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD
33 #define CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD 0
34 #endif
35 
36 #ifndef CONFIG_LOG_PROCESS_THREAD_STACK_SIZE
37 #define CONFIG_LOG_PROCESS_THREAD_STACK_SIZE 1
38 #endif
39 
40 #ifndef CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS
41 #define CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS 0
42 #endif
43 
44 #ifndef CONFIG_LOG_PROCESSING_LATENCY_US
45 #define CONFIG_LOG_PROCESSING_LATENCY_US 0
46 #endif
47 
48 #ifndef CONFIG_LOG_BUFFER_SIZE
49 #define CONFIG_LOG_BUFFER_SIZE 4
50 #endif
51 
52 #ifdef CONFIG_LOG_PROCESS_THREAD_CUSTOM_PRIORITY
53 #define LOG_PROCESS_THREAD_PRIORITY CONFIG_LOG_PROCESS_THREAD_PRIORITY
54 #else
55 #define LOG_PROCESS_THREAD_PRIORITY K_LOWEST_APPLICATION_THREAD_PRIO
56 #endif
57 
58 #ifndef CONFIG_LOG_TAG_MAX_LEN
59 #define CONFIG_LOG_TAG_MAX_LEN 0
60 #endif
61 
62 #ifndef CONFIG_LOG_FAILURE_REPORT_PERIOD
63 #define CONFIG_LOG_FAILURE_REPORT_PERIOD 0
64 #endif
65 
66 #ifndef CONFIG_LOG_ALWAYS_RUNTIME
67 BUILD_ASSERT(!IS_ENABLED(CONFIG_NO_OPTIMIZATIONS),
68 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
69 	     "CONFIG_NO_OPTIMIZATIONS is set");
70 BUILD_ASSERT(!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE),
71 	     "CONFIG_LOG_ALWAYS_RUNTIME must be enabled when "
72 	     "CONFIG_LOG_MODE_IMMEDIATE is set");
73 #endif
74 
75 static const log_format_func_t format_table[] = {
76 	[LOG_OUTPUT_TEXT] = IS_ENABLED(CONFIG_LOG_OUTPUT) ?
77 						log_output_msg_process : NULL,
78 	[LOG_OUTPUT_SYST] = IS_ENABLED(CONFIG_LOG_MIPI_SYST_ENABLE) ?
79 						log_output_msg_syst_process : NULL,
80 	[LOG_OUTPUT_DICT] = IS_ENABLED(CONFIG_LOG_DICTIONARY_SUPPORT) ?
81 						log_dict_output_msg_process : NULL,
82 	[LOG_OUTPUT_CUSTOM] = IS_ENABLED(CONFIG_LOG_CUSTOM_FORMAT_SUPPORT) ?
83 						log_custom_output_msg_process : NULL,
84 };
85 
log_format_func_t_get(uint32_t log_type)86 log_format_func_t log_format_func_t_get(uint32_t log_type)
87 {
88 	return format_table[log_type];
89 }
90 
log_format_table_size(void)91 size_t log_format_table_size(void)
92 {
93 	return ARRAY_SIZE(format_table);
94 }
95 
96 K_SEM_DEFINE(log_process_thread_sem, 0, 1);
97 
98 static atomic_t initialized;
99 static bool panic_mode;
100 static bool backend_attached;
101 static atomic_t buffered_cnt;
102 static atomic_t dropped_cnt;
103 static k_tid_t proc_tid;
104 static struct k_timer log_process_thread_timer;
105 
106 static log_timestamp_t dummy_timestamp(void);
107 static log_timestamp_get_t timestamp_func = dummy_timestamp;
108 static uint32_t timestamp_freq;
109 static log_timestamp_t proc_latency;
110 static log_timestamp_t prev_timestamp;
111 static atomic_t unordered_cnt;
112 static uint64_t last_failure_report;
113 
114 static STRUCT_SECTION_ITERABLE(log_msg_ptr, log_msg_ptr);
115 static STRUCT_SECTION_ITERABLE_ALTERNATE(log_mpsc_pbuf, mpsc_pbuf_buffer, log_buffer);
116 static struct mpsc_pbuf_buffer *curr_log_buffer;
117 
118 #ifdef CONFIG_MPSC_PBUF
119 static uint32_t __aligned(Z_LOG_MSG_ALIGNMENT)
120 	buf32[CONFIG_LOG_BUFFER_SIZE / sizeof(int)];
121 
122 static void z_log_notify_drop(const struct mpsc_pbuf_buffer *buffer,
123 			      const union mpsc_pbuf_generic *item);
124 
125 static const struct mpsc_pbuf_buffer_config mpsc_config = {
126 	.buf = (uint32_t *)buf32,
127 	.size = ARRAY_SIZE(buf32),
128 	.notify_drop = z_log_notify_drop,
129 	.get_wlen = log_msg_generic_get_wlen,
130 	.flags = (IS_ENABLED(CONFIG_LOG_MODE_OVERFLOW) ?
131 		  MPSC_PBUF_MODE_OVERWRITE : 0) |
132 		 (IS_ENABLED(CONFIG_LOG_MEM_UTILIZATION) ?
133 		  MPSC_PBUF_MAX_UTILIZATION : 0)
134 };
135 #endif
136 
137 /* Check that default tag can fit in tag buffer. */
138 COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, (),
139 	(BUILD_ASSERT(sizeof(CONFIG_LOG_TAG_DEFAULT) <= CONFIG_LOG_TAG_MAX_LEN + 1,
140 		      "Default string longer than tag capacity")));
141 
142 static char tag[CONFIG_LOG_TAG_MAX_LEN + 1] =
143 	COND_CODE_0(CONFIG_LOG_TAG_MAX_LEN, ({}), (CONFIG_LOG_TAG_DEFAULT));
144 
145 static void msg_process(union log_msg_generic *msg);
146 
dummy_timestamp(void)147 static log_timestamp_t dummy_timestamp(void)
148 {
149 	return 0;
150 }
151 
z_log_timestamp(void)152 log_timestamp_t z_log_timestamp(void)
153 {
154 	return timestamp_func();
155 }
156 
z_log_msg_post_finalize(void)157 static void z_log_msg_post_finalize(void)
158 {
159 	atomic_val_t cnt = atomic_inc(&buffered_cnt);
160 
161 	if (panic_mode) {
162 		static struct k_spinlock process_lock;
163 		k_spinlock_key_t key = k_spin_lock(&process_lock);
164 		(void)log_process();
165 
166 		k_spin_unlock(&process_lock, key);
167 	} else if (proc_tid != NULL) {
168 		/*
169 		 * If CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1,
170 		 * timer is never needed. We release the processing
171 		 * thread after every message is posted.
172 		 */
173 		if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD == 1) {
174 			if (cnt == 0) {
175 				k_sem_give(&log_process_thread_sem);
176 			}
177 		} else {
178 			if (cnt == 0) {
179 				k_timer_start(&log_process_thread_timer,
180 					      K_MSEC(CONFIG_LOG_PROCESS_THREAD_SLEEP_MS),
181 					      K_NO_WAIT);
182 			} else if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
183 				   (cnt + 1) == CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
184 				k_timer_stop(&log_process_thread_timer);
185 				k_sem_give(&log_process_thread_sem);
186 			} else {
187 				/* No action needed. Message processing will be triggered by the
188 				 * timeout or when number of upcoming messages exceeds the
189 				 * threshold.
190 				 */
191 			}
192 		}
193 	}
194 }
195 
log_format_set_all_active_backends(size_t log_type)196 const struct log_backend *log_format_set_all_active_backends(size_t log_type)
197 {
198 	const struct log_backend *failed_backend = NULL;
199 
200 	STRUCT_SECTION_FOREACH(log_backend, backend) {
201 		if (log_backend_is_active(backend)) {
202 			int retCode = log_backend_format_set(backend, log_type);
203 
204 			if (retCode != 0) {
205 				failed_backend = backend;
206 			}
207 		}
208 	}
209 	return failed_backend;
210 }
211 
z_log_vprintk(const char * fmt,va_list ap)212 void z_log_vprintk(const char *fmt, va_list ap)
213 {
214 	if (!IS_ENABLED(CONFIG_LOG_PRINTK)) {
215 		return;
216 	}
217 
218 	z_log_msg_runtime_vcreate(Z_LOG_LOCAL_DOMAIN_ID, NULL,
219 				   LOG_LEVEL_INTERNAL_RAW_STRING, NULL, 0,
220 				   Z_LOG_MSG_CBPRINTF_FLAGS(0),
221 				   fmt, ap);
222 }
223 
default_get_timestamp(void)224 static log_timestamp_t default_get_timestamp(void)
225 {
226 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
227 		sys_clock_tick_get() : k_cycle_get_32();
228 }
229 
default_lf_get_timestamp(void)230 static log_timestamp_t default_lf_get_timestamp(void)
231 {
232 	return IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
233 		k_uptime_get() : k_uptime_get_32();
234 }
235 
log_core_init(void)236 void log_core_init(void)
237 {
238 	panic_mode = false;
239 	dropped_cnt = 0;
240 	buffered_cnt = 0;
241 
242 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
243 		log_frontend_init();
244 
245 		for (uint16_t s = 0; s < log_src_cnt_get(0); s++) {
246 			log_frontend_filter_set(s, CONFIG_LOG_MAX_LEVEL);
247 		}
248 
249 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
250 			return;
251 		}
252 	}
253 
254 	/* Set default timestamp. */
255 	if (sys_clock_hw_cycles_per_sec() > 1000000) {
256 		log_set_timestamp_func(default_lf_get_timestamp, 1000U);
257 	} else {
258 		uint32_t freq = IS_ENABLED(CONFIG_LOG_TIMESTAMP_64BIT) ?
259 			CONFIG_SYS_CLOCK_TICKS_PER_SEC : sys_clock_hw_cycles_per_sec();
260 		log_set_timestamp_func(default_get_timestamp, freq);
261 	}
262 
263 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
264 		z_log_msg_init();
265 	}
266 
267 	if (IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
268 		z_log_runtime_filters_init();
269 	}
270 }
271 
activate_foreach_backend(uint32_t mask)272 static uint32_t activate_foreach_backend(uint32_t mask)
273 {
274 	uint32_t mask_cpy = mask;
275 
276 	while (mask_cpy) {
277 		uint32_t i = __builtin_ctz(mask_cpy);
278 		const struct log_backend *backend = log_backend_get(i);
279 
280 		mask_cpy &= ~BIT(i);
281 		if (backend->autostart && (log_backend_is_ready(backend) == 0)) {
282 			mask &= ~BIT(i);
283 			log_backend_enable(backend,
284 					   backend->cb->ctx,
285 					   CONFIG_LOG_MAX_LEVEL);
286 		}
287 	}
288 
289 	return mask;
290 }
291 
z_log_init(bool blocking,bool can_sleep)292 static uint32_t z_log_init(bool blocking, bool can_sleep)
293 {
294 	uint32_t mask = 0;
295 
296 	if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
297 		return 0;
298 	}
299 
300 	__ASSERT_NO_MSG(log_backend_count_get() < LOG_FILTERS_MAX_BACKENDS);
301 
302 	if (atomic_inc(&initialized) != 0) {
303 		return 0;
304 	}
305 
306 	int i = 0;
307 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN)) {
308 		z_log_links_initiate();
309 	}
310 
311 
312 	/* Assign ids to backends. */
313 	STRUCT_SECTION_FOREACH(log_backend, backend) {
314 		if (backend->autostart) {
315 			log_backend_init(backend);
316 
317 			/* If backend has activation function then backend is
318 			 * not ready until activated.
319 			 */
320 			if (log_backend_is_ready(backend) == 0) {
321 				log_backend_enable(backend,
322 						   backend->cb->ctx,
323 						   CONFIG_LOG_MAX_LEVEL);
324 			} else {
325 				mask |= BIT(i);
326 			}
327 
328 			i++;
329 		}
330 	}
331 
332 	/* If blocking init, wait until all backends are activated. */
333 	if (blocking) {
334 		while (mask) {
335 			mask = activate_foreach_backend(mask);
336 			if (IS_ENABLED(CONFIG_MULTITHREADING) && can_sleep) {
337 				k_msleep(10);
338 			}
339 		}
340 	}
341 
342 	return mask;
343 }
344 
log_init(void)345 void log_init(void)
346 {
347 	(void)z_log_init(true, true);
348 }
349 
thread_set(k_tid_t process_tid)350 static void thread_set(k_tid_t process_tid)
351 {
352 	proc_tid = process_tid;
353 
354 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
355 		return;
356 	}
357 
358 	if (CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD &&
359 	    process_tid &&
360 	    buffered_cnt >= CONFIG_LOG_PROCESS_TRIGGER_THRESHOLD) {
361 		k_sem_give(&log_process_thread_sem);
362 	}
363 }
364 
log_thread_set(k_tid_t process_tid)365 void log_thread_set(k_tid_t process_tid)
366 {
367 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
368 		__ASSERT_NO_MSG(0);
369 	} else {
370 		thread_set(process_tid);
371 	}
372 }
373 
log_set_timestamp_func(log_timestamp_get_t timestamp_getter,uint32_t freq)374 int log_set_timestamp_func(log_timestamp_get_t timestamp_getter, uint32_t freq)
375 {
376 	if (timestamp_getter == NULL) {
377 		return -EINVAL;
378 	}
379 
380 	timestamp_func = timestamp_getter;
381 	timestamp_freq = freq;
382 	if (CONFIG_LOG_PROCESSING_LATENCY_US) {
383 		proc_latency = (freq * CONFIG_LOG_PROCESSING_LATENCY_US) / 1000000;
384 	}
385 
386 	if (IS_ENABLED(CONFIG_LOG_OUTPUT)) {
387 		log_output_timestamp_freq_set(freq);
388 	}
389 
390 	return 0;
391 }
392 
z_impl_log_panic(void)393 void z_impl_log_panic(void)
394 {
395 	if (panic_mode) {
396 		return;
397 	}
398 
399 	/* If panic happened early logger might not be initialized.
400 	 * Forcing initialization of the logger and auto-starting backends.
401 	 */
402 	(void)z_log_init(true, false);
403 
404 	if (IS_ENABLED(CONFIG_LOG_FRONTEND)) {
405 		log_frontend_panic();
406 		if (IS_ENABLED(CONFIG_LOG_FRONTEND_ONLY)) {
407 			goto out;
408 		}
409 	}
410 
411 	STRUCT_SECTION_FOREACH(log_backend, backend) {
412 		if (log_backend_is_active(backend)) {
413 			log_backend_panic(backend);
414 		}
415 	}
416 
417 	if (!IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
418 		/* Flush */
419 		while (log_process() == true) {
420 		}
421 	}
422 
423 out:
424 	panic_mode = true;
425 }
426 
427 #ifdef CONFIG_USERSPACE
z_vrfy_log_panic(void)428 void z_vrfy_log_panic(void)
429 {
430 	z_impl_log_panic();
431 }
432 #include <syscalls/log_panic_mrsh.c>
433 #endif
434 
msg_filter_check(struct log_backend const * backend,union log_msg_generic * msg)435 static bool msg_filter_check(struct log_backend const *backend,
436 			     union log_msg_generic *msg)
437 {
438 	if (!z_log_item_is_msg(msg)) {
439 		return true;
440 	}
441 
442 	if (!IS_ENABLED(CONFIG_LOG_RUNTIME_FILTERING)) {
443 		return true;
444 	}
445 
446 	uint32_t backend_level;
447 	uint8_t level;
448 	uint8_t domain_id;
449 	int16_t source_id;
450 	struct log_source_dynamic_data *source;
451 
452 	source = (struct log_source_dynamic_data *)log_msg_get_source(&msg->log);
453 	level = log_msg_get_level(&msg->log);
454 	domain_id = log_msg_get_domain(&msg->log);
455 
456 	/* Accept all non-logging messages. */
457 	if (level == LOG_LEVEL_NONE) {
458 		return true;
459 	}
460 	if (source) {
461 		source_id = log_dynamic_source_id(source);
462 		backend_level = log_filter_get(backend, domain_id, source_id, true);
463 
464 		return (level <= backend_level);
465 	} else {
466 		return true;
467 	}
468 }
469 
msg_process(union log_msg_generic * msg)470 static void msg_process(union log_msg_generic *msg)
471 {
472 	STRUCT_SECTION_FOREACH(log_backend, backend) {
473 		if (log_backend_is_active(backend) &&
474 		    msg_filter_check(backend, msg)) {
475 			log_backend_msg_process(backend, msg);
476 		}
477 	}
478 }
479 
dropped_notify(void)480 void dropped_notify(void)
481 {
482 	uint32_t dropped = z_log_dropped_read_and_clear();
483 
484 	STRUCT_SECTION_FOREACH(log_backend, backend) {
485 		if (log_backend_is_active(backend)) {
486 			log_backend_dropped(backend, dropped);
487 		}
488 	}
489 }
490 
unordered_notify(void)491 void unordered_notify(void)
492 {
493 	uint32_t unordered = atomic_set(&unordered_cnt, 0);
494 
495 	LOG_WRN("%d unordered messages since last report", unordered);
496 }
497 
z_log_notify_backend_enabled(void)498 void z_log_notify_backend_enabled(void)
499 {
500 	/* Wakeup logger thread after attaching first backend. It might be
501 	 * blocked with log messages pending.
502 	 */
503 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD) && !backend_attached) {
504 		k_sem_give(&log_process_thread_sem);
505 	}
506 
507 	backend_attached = true;
508 }
509 
z_log_unordered_pending(void)510 static inline bool z_log_unordered_pending(void)
511 {
512 	return IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && unordered_cnt;
513 }
514 
z_impl_log_process(void)515 bool z_impl_log_process(void)
516 {
517 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
518 		return false;
519 	}
520 
521 	k_timeout_t backoff = K_NO_WAIT;
522 	union log_msg_generic *msg;
523 
524 	if (!backend_attached) {
525 		return false;
526 	}
527 
528 	msg = z_log_msg_claim(&backoff);
529 
530 	if (msg) {
531 		atomic_dec(&buffered_cnt);
532 		msg_process(msg);
533 		z_log_msg_free(msg);
534 	} else if (CONFIG_LOG_PROCESSING_LATENCY_US > 0 && !K_TIMEOUT_EQ(backoff, K_NO_WAIT)) {
535 		/* If backoff is requested, it means that there are pending
536 		 * messages but they are too new and processing shall back off
537 		 * to allow arrival of newer messages from remote domains.
538 		 */
539 		k_timer_start(&log_process_thread_timer, backoff, K_NO_WAIT);
540 
541 		return false;
542 	}
543 
544 	if (IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
545 		bool dropped_pend = z_log_dropped_pending();
546 		bool unordered_pend = z_log_unordered_pending();
547 
548 		if ((dropped_pend || unordered_pend) &&
549 		   (k_uptime_get() - last_failure_report) > CONFIG_LOG_FAILURE_REPORT_PERIOD) {
550 			if (dropped_pend) {
551 				dropped_notify();
552 			}
553 
554 			if (unordered_pend) {
555 				unordered_notify();
556 			}
557 		}
558 
559 		last_failure_report += CONFIG_LOG_FAILURE_REPORT_PERIOD;
560 	}
561 
562 	return z_log_msg_pending();
563 }
564 
565 #ifdef CONFIG_USERSPACE
z_vrfy_log_process(void)566 bool z_vrfy_log_process(void)
567 {
568 	return z_impl_log_process();
569 }
570 #include <syscalls/log_process_mrsh.c>
571 #endif
572 
z_impl_log_buffered_cnt(void)573 uint32_t z_impl_log_buffered_cnt(void)
574 {
575 	return buffered_cnt;
576 }
577 
578 #ifdef CONFIG_USERSPACE
z_vrfy_log_buffered_cnt(void)579 uint32_t z_vrfy_log_buffered_cnt(void)
580 {
581 	return z_impl_log_buffered_cnt();
582 }
583 #include <syscalls/log_buffered_cnt_mrsh.c>
584 #endif
585 
z_log_dropped(bool buffered)586 void z_log_dropped(bool buffered)
587 {
588 	atomic_inc(&dropped_cnt);
589 	if (buffered) {
590 		atomic_dec(&buffered_cnt);
591 	}
592 }
593 
z_log_dropped_read_and_clear(void)594 uint32_t z_log_dropped_read_and_clear(void)
595 {
596 	return atomic_set(&dropped_cnt, 0);
597 }
598 
z_log_dropped_pending(void)599 bool z_log_dropped_pending(void)
600 {
601 	return dropped_cnt > 0;
602 }
603 
z_log_msg_init(void)604 void z_log_msg_init(void)
605 {
606 #ifdef CONFIG_MPSC_PBUF
607 	mpsc_pbuf_init(&log_buffer, &mpsc_config);
608 	curr_log_buffer = &log_buffer;
609 #endif
610 }
611 
msg_alloc(struct mpsc_pbuf_buffer * buffer,uint32_t wlen)612 static struct log_msg *msg_alloc(struct mpsc_pbuf_buffer *buffer, uint32_t wlen)
613 {
614 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
615 		return NULL;
616 	}
617 
618 	return (struct log_msg *)mpsc_pbuf_alloc(
619 		buffer, wlen,
620 		(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS == -1)
621 			? K_FOREVER
622 			: K_MSEC(CONFIG_LOG_BLOCK_IN_THREAD_TIMEOUT_MS));
623 }
624 
z_log_msg_alloc(uint32_t wlen)625 struct log_msg *z_log_msg_alloc(uint32_t wlen)
626 {
627 	return msg_alloc(&log_buffer, wlen);
628 }
629 
msg_commit(struct mpsc_pbuf_buffer * buffer,struct log_msg * msg)630 static void msg_commit(struct mpsc_pbuf_buffer *buffer, struct log_msg *msg)
631 {
632 	union log_msg_generic *m = (union log_msg_generic *)msg;
633 
634 	if (IS_ENABLED(CONFIG_LOG_MODE_IMMEDIATE)) {
635 		msg_process(m);
636 
637 		return;
638 	}
639 
640 #ifdef CONFIG_MPSC_PBUF
641 	mpsc_pbuf_commit(buffer, &m->buf);
642 #endif
643 	z_log_msg_post_finalize();
644 }
645 
z_log_msg_commit(struct log_msg * msg)646 void z_log_msg_commit(struct log_msg *msg)
647 {
648 	msg->hdr.timestamp = timestamp_func();
649 	msg_commit(&log_buffer, msg);
650 }
651 
z_log_msg_local_claim(void)652 union log_msg_generic *z_log_msg_local_claim(void)
653 {
654 #ifdef CONFIG_MPSC_PBUF
655 	return (union log_msg_generic *)mpsc_pbuf_claim(&log_buffer);
656 #else
657 	return NULL;
658 #endif
659 
660 }
661 
662 /* If there are buffers dedicated for each link, claim the oldest message (lowest timestamp). */
z_log_msg_claim_oldest(k_timeout_t * backoff)663 union log_msg_generic *z_log_msg_claim_oldest(k_timeout_t *backoff)
664 {
665 	union log_msg_generic *msg = NULL;
666 	struct log_msg_ptr *chosen;
667 	log_timestamp_t t_min = sizeof(log_timestamp_t) > sizeof(uint32_t) ?
668 				UINT64_MAX : UINT32_MAX;
669 	int i = 0;
670 
671 	/* Else iterate on all available buffers and get the oldest message. */
672 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
673 		struct log_mpsc_pbuf *buf;
674 
675 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
676 
677 #ifdef CONFIG_MPSC_PBUF
678 		if (msg_ptr->msg == NULL) {
679 			msg_ptr->msg = (union log_msg_generic *)mpsc_pbuf_claim(&buf->buf);
680 		}
681 #endif
682 
683 		if (msg_ptr->msg) {
684 			log_timestamp_t t = log_msg_get_timestamp(&msg_ptr->msg->log);
685 
686 			if (t < t_min) {
687 				t_min = t;
688 				msg = msg_ptr->msg;
689 				chosen = msg_ptr;
690 				curr_log_buffer = &buf->buf;
691 			}
692 		}
693 		i++;
694 	}
695 
696 	if (msg) {
697 		if (CONFIG_LOG_PROCESSING_LATENCY_US > 0) {
698 			int32_t diff = t_min - (timestamp_func() - proc_latency);
699 
700 			if (diff > 0) {
701 			       /* Entry is too new. Back off for sometime to allow new
702 				* remote messages to arrive which may have been captured
703 				* earlier (but on other platform). Calculate for how
704 				* long processing shall back off.
705 				*/
706 				if (timestamp_freq == sys_clock_hw_cycles_per_sec()) {
707 					*backoff = K_TICKS(diff);
708 				} else {
709 					*backoff = K_TICKS((diff * sys_clock_hw_cycles_per_sec()) /
710 							timestamp_freq);
711 				}
712 
713 				return NULL;
714 			}
715 		}
716 
717 		(*chosen).msg = NULL;
718 	}
719 
720 	if (t_min < prev_timestamp) {
721 		atomic_inc(&unordered_cnt);
722 	}
723 
724 	prev_timestamp = t_min;
725 
726 	return msg;
727 }
728 
z_log_msg_claim(k_timeout_t * backoff)729 union log_msg_generic *z_log_msg_claim(k_timeout_t *backoff)
730 {
731 	size_t len;
732 
733 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
734 
735 	/* Use only one buffer if others are not registered. */
736 	if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && len > 1) {
737 		return z_log_msg_claim_oldest(backoff);
738 	}
739 
740 	return z_log_msg_local_claim();
741 }
742 
msg_free(struct mpsc_pbuf_buffer * buffer,const union log_msg_generic * msg)743 static void msg_free(struct mpsc_pbuf_buffer *buffer, const union log_msg_generic *msg)
744 {
745 #ifdef CONFIG_MPSC_PBUF
746 	mpsc_pbuf_free(buffer, &msg->buf);
747 #endif
748 }
749 
z_log_msg_free(union log_msg_generic * msg)750 void z_log_msg_free(union log_msg_generic *msg)
751 {
752 	msg_free(curr_log_buffer, msg);
753 }
754 
msg_pending(struct mpsc_pbuf_buffer * buffer)755 static bool msg_pending(struct mpsc_pbuf_buffer *buffer)
756 {
757 #ifdef CONFIG_MPSC_PBUF
758 	return mpsc_pbuf_is_pending(buffer);
759 #else
760 	return false;
761 #endif
762 }
763 
z_log_msg_pending(void)764 bool z_log_msg_pending(void)
765 {
766 	size_t len;
767 	int i = 0;
768 
769 	STRUCT_SECTION_COUNT(log_mpsc_pbuf, &len);
770 
771 	if (!IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) || (len == 1)) {
772 		return msg_pending(&log_buffer);
773 	}
774 
775 	STRUCT_SECTION_FOREACH(log_msg_ptr, msg_ptr) {
776 		struct log_mpsc_pbuf *buf;
777 
778 		if (msg_ptr->msg) {
779 			return true;
780 		}
781 
782 		STRUCT_SECTION_GET(log_mpsc_pbuf, i, &buf);
783 
784 		if (msg_pending(&buf->buf)) {
785 			return true;
786 		}
787 
788 		i++;
789 	}
790 
791 	return false;
792 }
793 
z_log_msg_enqueue(const struct log_link * link,const void * data,size_t len)794 void z_log_msg_enqueue(const struct log_link *link, const void *data, size_t len)
795 {
796 	struct log_msg *log_msg = (struct log_msg *)data;
797 	size_t wlen = DIV_ROUND_UP(ROUND_UP(len, Z_LOG_MSG_ALIGNMENT), sizeof(int));
798 	struct mpsc_pbuf_buffer *mpsc_pbuffer = link->mpsc_pbuf ? link->mpsc_pbuf : &log_buffer;
799 	struct log_msg *local_msg = msg_alloc(mpsc_pbuffer, wlen);
800 
801 	if (!local_msg) {
802 		z_log_dropped(false);
803 		return;
804 	}
805 
806 	log_msg->hdr.desc.valid = 0;
807 	log_msg->hdr.desc.busy = 0;
808 	log_msg->hdr.desc.domain += link->ctrl_blk->domain_offset;
809 	memcpy((void *)local_msg, data, len);
810 	msg_commit(mpsc_pbuffer, local_msg);
811 }
812 
z_log_get_tag(void)813 const char *z_log_get_tag(void)
814 {
815 	return CONFIG_LOG_TAG_MAX_LEN > 0 ? tag : NULL;
816 }
817 
log_set_tag(const char * str)818 int log_set_tag(const char *str)
819 {
820 #if CONFIG_LOG_TAG_MAX_LEN > 0
821 	if (str == NULL) {
822 		return -EINVAL;
823 	}
824 
825 	size_t len = strlen(str);
826 	size_t cpy_len = MIN(len, CONFIG_LOG_TAG_MAX_LEN);
827 
828 	memcpy(tag, str, cpy_len);
829 	tag[cpy_len] = '\0';
830 
831 	if (cpy_len < len) {
832 		tag[cpy_len - 1] = '~';
833 		return -ENOMEM;
834 	}
835 
836 	return 0;
837 #else
838 	return -ENOTSUP;
839 #endif
840 }
841 
log_mem_get_usage(uint32_t * buf_size,uint32_t * usage)842 int log_mem_get_usage(uint32_t *buf_size, uint32_t *usage)
843 {
844 	__ASSERT_NO_MSG(buf_size != NULL);
845 	__ASSERT_NO_MSG(usage != NULL);
846 
847 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
848 		return -EINVAL;
849 	}
850 
851 	mpsc_pbuf_get_utilization(&log_buffer, buf_size, usage);
852 
853 	return 0;
854 }
855 
log_mem_get_max_usage(uint32_t * max)856 int log_mem_get_max_usage(uint32_t *max)
857 {
858 	__ASSERT_NO_MSG(max != NULL);
859 
860 	if (!IS_ENABLED(CONFIG_LOG_MODE_DEFERRED)) {
861 		return -EINVAL;
862 	}
863 
864 	return mpsc_pbuf_get_max_utilization(&log_buffer, max);
865 }
866 
log_backend_notify_all(enum log_backend_evt event,union log_backend_evt_arg * arg)867 static void log_backend_notify_all(enum log_backend_evt event,
868 				   union log_backend_evt_arg *arg)
869 {
870 	STRUCT_SECTION_FOREACH(log_backend, backend) {
871 		log_backend_notify(backend, event, arg);
872 	}
873 }
874 
log_process_thread_timer_expiry_fn(struct k_timer * timer)875 static void log_process_thread_timer_expiry_fn(struct k_timer *timer)
876 {
877 	k_sem_give(&log_process_thread_sem);
878 }
879 
log_process_thread_func(void * dummy1,void * dummy2,void * dummy3)880 static void log_process_thread_func(void *dummy1, void *dummy2, void *dummy3)
881 {
882 	__ASSERT_NO_MSG(log_backend_count_get() > 0);
883 	uint32_t links_active_mask = 0xFFFFFFFF;
884 	uint8_t domain_offset = 0;
885 	uint32_t activate_mask = z_log_init(false, false);
886 	/* If some backends are not activated yet set periodical thread wake up
887 	 * to poll backends for readiness. Period is set arbitrary.
888 	 * If all backends are ready periodic wake up is not needed.
889 	 */
890 	k_timeout_t timeout = (activate_mask != 0) ? K_MSEC(50) : K_FOREVER;
891 	bool processed_any = false;
892 	thread_set(k_current_get());
893 
894 	/* Logging thread is periodically waken up until all backends that
895 	 * should be autostarted are ready.
896 	 */
897 	while (true) {
898 		if (activate_mask) {
899 			activate_mask = activate_foreach_backend(activate_mask);
900 			if (!activate_mask) {
901 				/* Periodic wake up no longer needed since all
902 				 * backends are ready.
903 				 */
904 				timeout = K_FOREVER;
905 			}
906 		}
907 
908 		/* Keep trying to activate links until all links are active. */
909 		if (IS_ENABLED(CONFIG_LOG_MULTIDOMAIN) && links_active_mask) {
910 			links_active_mask =
911 				z_log_links_activate(links_active_mask, &domain_offset);
912 		}
913 
914 
915 		if (log_process() == false) {
916 			if (processed_any) {
917 				processed_any = false;
918 				log_backend_notify_all(LOG_BACKEND_EVT_PROCESS_THREAD_DONE, NULL);
919 			}
920 			(void)k_sem_take(&log_process_thread_sem, timeout);
921 		} else {
922 			processed_any = true;
923 		}
924 	}
925 }
926 
927 K_KERNEL_STACK_DEFINE(logging_stack, CONFIG_LOG_PROCESS_THREAD_STACK_SIZE);
928 struct k_thread logging_thread;
929 
enable_logger(void)930 static int enable_logger(void)
931 {
932 	if (IS_ENABLED(CONFIG_LOG_PROCESS_THREAD)) {
933 		k_timer_init(&log_process_thread_timer,
934 				log_process_thread_timer_expiry_fn, NULL);
935 		/* start logging thread */
936 		k_thread_create(&logging_thread, logging_stack,
937 				K_KERNEL_STACK_SIZEOF(logging_stack),
938 				log_process_thread_func, NULL, NULL, NULL,
939 				LOG_PROCESS_THREAD_PRIORITY, 0,
940 				COND_CODE_1(CONFIG_LOG_PROCESS_THREAD,
941 					K_MSEC(CONFIG_LOG_PROCESS_THREAD_STARTUP_DELAY_MS),
942 					K_NO_WAIT));
943 		k_thread_name_set(&logging_thread, "logging");
944 	} else {
945 		(void)z_log_init(false, false);
946 	}
947 
948 	return 0;
949 }
950 
951 SYS_INIT(enable_logger, POST_KERNEL, CONFIG_LOG_CORE_INIT_PRIORITY);
952