1 /*
2  * Copyright (c) 2017 Oticon A/S
3  * Copyright (c) 2023 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /*
9  * Native simulator, CPU Thread emulation (nct)
10  */
11 
12 /**
13  * Native simulator single CPU threading emulation,
14  * an *optional* module provided by the Native simulator
15  * the hosted embedded OS / SW can use to emulate the threading
16  * context switching which would be handled by a OS CPU AL
17  *
18  * Principle of operation:
19  *
20  * The embedded OS threads are run as a set of native Linux pthreads.
21  * The embedded OS only sees one of this threads executing at a time.
22  *
23  * The hosted OS (or its integration into the native simulator) shall call
24  * nct_init() to initialize the state of an instance of this module, and
25  * nct_clean_up() once it desires to destroy it.
26  *
27  * For SOCs with several micro-controllers (AMP) one instance of this module
28  * would be instantiated per simulated uC and embedded OS.
29  *
30  * To create a new embedded thread, the hosted OS shall call nct_new_thread().
31  * To swap to a thread nct_swap_threads(), and to terminate a thread
32  * nct_abort_thread().
33  * The hosted OS can optionally use nct_first_thread_start() to swap
34  * to the "first thread".
35  *
36  * Whenever a thread calls nct_swap_threads(next_thread_idx) it will be blocked,
37  * and the thread identified by next_thread_idx will continue executing.
38  *
39  *
40  * Internal design:
41  *
42  * Which thread is running is controlled using its own semaphore.
43  *
44  * The main part of the execution of each thread will occur in a fully
45  * synchronous and deterministic manner, and only when commanded by
46  * the embedded operating system kernel.
47  *
48  * The creation of a thread will spawn a new pthread whose start
49  * is asynchronous to the rest, until synchronized in nct_wait_until_allowed()
50  * below.
51  * Similarly aborting and canceling threads execute a tail in a quite an
52  * asynchronous manner.
53  *
54  * This implementation is meant to be portable in between fully compatible
55  * POSIX systems.
56  * A table (threads_table) is used to abstract the native pthreads.
57  * An index in this table is used to identify threads in the IF to the
58  * embedded OS.
59  */
60 
61 #define NCT_DEBUG_PRINTS 0
62 
63 /* For pthread_setname_np() */
64 #undef _GNU_SOURCE
65 #define _GNU_SOURCE
66 #include <stdbool.h>
67 #include <stdlib.h>
68 #include <string.h>
69 #include <stdint.h>
70 #include <pthread.h>
71 #include <semaphore.h>
72 #include <errno.h>
73 #include "nsi_utils.h"
74 #include "nct_if.h"
75 #include "nsi_internal.h"
76 #include "nsi_safe_call.h"
77 
78 #if NCT_DEBUG_PRINTS
79 #define NCT_DEBUG(fmt, ...) nsi_print_trace(PREFIX fmt, __VA_ARGS__)
80 #else
81 #define NCT_DEBUG(...)
82 #endif
83 
84 #define PREFIX     "Tread Simulator: "
85 #define ERPREFIX   PREFIX"error on "
86 #define NO_MEM_ERR PREFIX"Can't allocate memory\n"
87 
88 #define NCT_ENABLE_CANCEL 1
89 #define NCT_ALLOC_CHUNK_SIZE 64 /* In how big chunks we grow the thread table */
90 #define NCT_REUSE_ABORTED_ENTRIES 0
91 /* For the Zephyr OS, tests/kernel/threads/scheduling/schedule_api fails when setting
92  * NCT_REUSE_ABORTED_ENTRIES => don't set it by now
93  */
94 
95 struct nct_status_t;
96 
97 struct threads_table_el {
98 	/* Pointer to the overall status of the threading emulator instance */
99 	struct nct_status_t *nct_status;
100 	struct threads_table_el *next;	/* Pointer to the next element of the table */
101 	sem_t sema;			/* Semaphore to hold this thread until allowed */
102 	pthread_t thread;		/* Actual pthread_t as returned by the native kernel */
103 
104 	int thread_idx;			/* Index of this element in the threads_table*/
105 	int thead_cnt;			/* For debugging: Unique, consecutive, thread number */
106 
107 	enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
108 	bool running;	/* (For debugging purposes) Is this the currently running thread */
109 
110 	/*
111 	 * Pointer to data from the hosted OS architecture.
112 	 * What that is, if anything, is up to that the hosted OS
113 	 */
114 	void *payload;
115 };
116 
117 struct nct_status_t {
118 	struct threads_table_el *threads_table;	/* Pointer to the threads table */
119 	int thread_create_count;		/* (For debugging) Thread creation counter */
120 	int threads_table_size;			/* Size of threads_table */
121 	/* Pointer to the hosted OS function to be called when a thread is started */
122 	void (*fptr)(void *payload);
123 
124 	/* Index of the thread which is currently allowed to run now */
125 	int currently_allowed_thread;
126 
127 	bool terminate; /* Are we terminating the program == cleaning up */
128 	bool all_threads_released; /* During termination, have we released all hosted threads */
129 };
130 
131 static struct threads_table_el *ttable_get_element(struct nct_status_t *this, int index);
132 
133 /**
134  * Helper function, run by a thread which is being ended
135  */
nct_exit_this_thread(void)136 static void nct_exit_this_thread(void)
137 {
138 	/* We detach ourselves so nobody needs to join to us */
139 	pthread_detach(pthread_self());
140 	pthread_exit(NULL);
141 }
142 
143 /*
144  * Wait for the semaphore, retrying if we are interrupted by a signal
145  */
nct_sem_rewait(sem_t * semaphore)146 NSI_INLINE int nct_sem_rewait(sem_t *semaphore)
147 {
148 	int ret;
149 
150 	while (((ret = sem_wait(semaphore)) == -1) && (errno == EINTR)) {
151 		/* Restart wait if we were interrupted */
152 	}
153 	return ret;
154 }
155 
156 /**
157  * Helper function, run by a thread which is being aborted
158  */
abort_tail(struct threads_table_el * tt_el)159 static void abort_tail(struct threads_table_el *tt_el)
160 {
161 	NCT_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
162 		  tt_el->thead_cnt, tt_el->thread_idx, __func__);
163 
164 	tt_el->running = false;
165 	tt_el->state = ABORTED;
166 	nct_exit_this_thread();
167 }
168 
169 /**
170  * Helper function to block this thread until it is allowed to run again
171  * (either when the hosted OS swaps to it, or aborts it)
172  */
nct_wait_until_allowed(struct threads_table_el * tt_el,int this_th_nbr)173 static void nct_wait_until_allowed(struct threads_table_el *tt_el, int this_th_nbr)
174 {
175 	tt_el->running = false;
176 
177 	NCT_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run\n",
178 		  tt_el->thead_cnt, this_th_nbr, __func__);
179 
180 	NSI_SAFE_CALL(nct_sem_rewait(&tt_el->sema));
181 
182 	if (tt_el->nct_status->terminate) {
183 		nct_exit_this_thread();
184 	}
185 
186 	if (tt_el->state == ABORTING) {
187 		abort_tail(tt_el);
188 	}
189 
190 	tt_el->running = true;
191 
192 	NCT_DEBUG("Thread [%i] %i: %s(): I'm allowed to run!\n",
193 		  tt_el->thead_cnt, this_th_nbr, __func__);
194 }
195 
196 /**
197  * Helper function to let the thread <next_allowed_th> run
198  */
nct_let_run(struct nct_status_t * this,int next_allowed_th)199 static void nct_let_run(struct nct_status_t *this, int next_allowed_th)
200 {
201 	struct threads_table_el *tt_el = ttable_get_element(this, next_allowed_th);
202 
203 	NCT_DEBUG("%s: We let thread [%i] %i run\n", __func__, tt_el->thead_cnt, next_allowed_th);
204 
205 	this->currently_allowed_thread = next_allowed_th;
206 	NSI_SAFE_CALL(sem_post(&tt_el->sema));
207 }
208 
209 /**
210  * Let the <next_allowed_thread_nbr> run and block this managed thread until it is allowed again
211  *
212  * The hosted OS shall call this when it has decided to swap in/out two of its threads,
213  * from the thread that is being swapped out.
214  *
215  * Note: If called without having ever let another managed thread run / from a thread not
216  * managed by this nct instance, it will behave like nct_first_thread_start(),
217  * and terminate the calling thread while letting the managed thread
218  * <next_allowed_thread_nbr> continue.
219  *
220  * inputs:
221  *   this_arg: Pointer to this thread emulator instance as returned by nct_init()
222  *   next_allowed_thread_nbr: Identifier of the thread the hosted OS wants to swap in
223  */
nct_swap_threads(void * this_arg,int next_allowed_thread_nbr)224 void nct_swap_threads(void *this_arg, int next_allowed_thread_nbr)
225 {
226 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
227 	int this_th_nbr = this->currently_allowed_thread;
228 	struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
229 
230 	nct_let_run(this, next_allowed_thread_nbr);
231 
232 	if (this_th_nbr == -1) { /* This is the first time a thread was swapped in */
233 		NCT_DEBUG("%s: called from an unmanaged thread, terminating it\n", __func__);
234 		nct_exit_this_thread();
235 	}
236 
237 	if (tt_el->state == ABORTING) { /* We had set ourself as aborted => let's exit now */
238 		NCT_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
239 			  tt_el->thead_cnt, this_th_nbr, __func__);
240 		abort_tail(tt_el);
241 	} else {
242 		nct_wait_until_allowed(tt_el, this_th_nbr);
243 	}
244 }
245 
246 /**
247  * Let the very first hosted thread run, and exit the calling thread.
248  *
249  * The hosted OS shall call this when it has decided to swap into another
250  * thread, and wants to terminate the currently executing thread, which is not
251  * a thread managed by the thread emulator.
252  *
253  * This function allows to emulate a hosted OS doing its first swapping into one
254  * of its hosted threads from the init thread, abandoning/terminating that init
255  * thread.
256  */
nct_first_thread_start(void * this_arg,int next_allowed_thread_nbr)257 void nct_first_thread_start(void *this_arg, int next_allowed_thread_nbr)
258 {
259 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
260 
261 	nct_let_run(this, next_allowed_thread_nbr);
262 	NCT_DEBUG("%s: Init thread dying now (rel mut)\n", __func__);
263 	nct_exit_this_thread();
264 }
265 
266 /**
267  * Helper function to start a hosted thread as a POSIX thread:
268  *  It will block this new pthread until the embedded OS decides to "swap it in".
269  */
nct_thread_starter(void * arg_el)270 static void *nct_thread_starter(void *arg_el)
271 {
272 	struct threads_table_el *tt_el = (struct threads_table_el *)arg_el;
273 	const struct nct_status_t *this = tt_el->nct_status;
274 
275 	int thread_idx = tt_el->thread_idx;
276 
277 	NCT_DEBUG("Thread [%i] %i: %s: Starting\n", tt_el->thead_cnt, thread_idx, __func__);
278 
279 	/*
280 	 * The program may have been finished before this thread ever got to run
281 	 */
282 	/* LCOV_EXCL_START */ /* See Note1 */
283 	if (!this->threads_table || this->terminate) {
284 		nct_exit_this_thread();
285 	}
286 	/* LCOV_EXCL_STOP */
287 
288 	/* Let's wait until the thread is swapped in */
289 	nct_wait_until_allowed(tt_el, thread_idx);
290 
291 	this->fptr(tt_el->payload);
292 
293 	/*
294 	 * We only reach this point if the thread actually returns which should
295 	 * not happen. But we handle it gracefully just in case
296 	 */
297 	/* LCOV_EXCL_START */
298 	nsi_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
299 			tt_el->thead_cnt,
300 			thread_idx,
301 			pthread_self());
302 
303 	tt_el->running = false;
304 	tt_el->state = FAILED;
305 
306 	nct_exit_this_thread();
307 
308 	return NULL;
309 	/* LCOV_EXCL_STOP */
310 }
311 
312 /*
313  * Helper function to link the elements in a chunk to each other and initialize (to 0)
314  * their thread semaphores
315  */
ttable_init_elements(struct threads_table_el * chunk,int size)316 static void ttable_init_elements(struct threads_table_el *chunk, int size)
317 {
318 	for (int i = 0; i < size - 1; i++) {
319 		chunk[i].next = &chunk[i+1];
320 		NSI_SAFE_CALL(sem_init(&chunk[i].sema, 0, 0));
321 	}
322 	chunk[size - 1].next = NULL;
323 	NSI_SAFE_CALL(sem_init(&chunk[size - 1].sema, 0, 0));
324 }
325 
326 /*
327  * Get a given element in the threads table
328  */
ttable_get_element(struct nct_status_t * this,int index)329 static struct threads_table_el *ttable_get_element(struct nct_status_t *this, int index)
330 {
331 	struct threads_table_el *threads_table = this->threads_table;
332 
333 	if (index >= this->threads_table_size) { /* LCOV_EXCL_BR_LINE */
334 		nsi_print_error_and_exit("%s: Programming error, attempted out of bound access to "
335 					"thread table (%i>=%i)\n",
336 					index, this->threads_table_size); /* LCOV_EXCL_LINE */
337 	}
338 	while (index >= NCT_ALLOC_CHUNK_SIZE) {
339 		index -= NCT_ALLOC_CHUNK_SIZE;
340 		threads_table = threads_table[NCT_ALLOC_CHUNK_SIZE - 1].next;
341 	}
342 	return &threads_table[index];
343 }
344 
345 /**
346  * Return the first free entry index in the threads table
347  */
ttable_get_empty_slot(struct nct_status_t * this)348 static int ttable_get_empty_slot(struct nct_status_t *this)
349 {
350 	struct threads_table_el *tt_el = this->threads_table;
351 
352 	for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
353 		if ((tt_el->state == NOTUSED)
354 			|| (NCT_REUSE_ABORTED_ENTRIES
355 			&& (tt_el->state == ABORTED))) {
356 			return i;
357 		}
358 	}
359 
360 	/*
361 	 * else, we run out of table without finding an index
362 	 * => we expand the table:
363 	 */
364 
365 	struct threads_table_el *new_chunk;
366 
367 	new_chunk = calloc(NCT_ALLOC_CHUNK_SIZE, sizeof(struct threads_table_el));
368 	if (new_chunk == NULL) { /* LCOV_EXCL_BR_LINE */
369 		nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
370 	}
371 
372 	/* Link new chunk to last element */
373 	tt_el = ttable_get_element(this, this->threads_table_size-1);
374 	tt_el->next = new_chunk;
375 
376 	this->threads_table_size += NCT_ALLOC_CHUNK_SIZE;
377 
378 	ttable_init_elements(new_chunk, NCT_ALLOC_CHUNK_SIZE);
379 
380 	/* The first newly created entry is good, we return it */
381 	return this->threads_table_size - NCT_ALLOC_CHUNK_SIZE;
382 }
383 
384 /**
385  * Create a new pthread for a new hosted OS thread and initialize its NCT status
386  *
387  * Returns a unique integer thread identifier/index, which should be used
388  * to refer to this thread in future calls to the thread emulator.
389  *
390  * It takes as parameter a pointer which will be passed to the
391  * function registered in nct_init when the thread is swapped in.
392  *
393  * Note that the thread is created but not swapped in.
394  * The new thread execution will be held until nct_swap_threads()
395  * (or nct_first_thread_start()) is called enabling this newly created
396  * thread number.
397  */
nct_new_thread(void * this_arg,void * payload)398 int nct_new_thread(void *this_arg, void *payload)
399 {
400 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
401 	struct threads_table_el *tt_el;
402 	int t_slot;
403 
404 	t_slot = ttable_get_empty_slot(this);
405 	tt_el = ttable_get_element(this, t_slot);
406 
407 	tt_el->state = USED;
408 	tt_el->running = false;
409 	tt_el->thead_cnt = this->thread_create_count++;
410 	tt_el->payload = payload;
411 	tt_el->nct_status = this;
412 	tt_el->thread_idx = t_slot;
413 
414 	NSI_SAFE_CALL(pthread_create(&tt_el->thread,
415 				  NULL,
416 				  nct_thread_starter,
417 				  (void *)tt_el));
418 
419 	NCT_DEBUG("%s created thread [%i] %i [%lu]\n",
420 		  __func__, tt_el->thead_cnt, t_slot, tt_el->thread);
421 
422 	return t_slot;
423 }
424 
425 /**
426  * Initialize an instance of the threading emulator.
427  *
428  * Returns a pointer to the initialize threading emulator instance.
429  * This pointer shall be passed to all subsequent calls of the
430  * threading emulator when interacting with this particular instance.
431  *
432  * The input fptr is a pointer to the hosted OS function
433  * to be called the first time a thread which is created on its request
434  * with nct_new_thread() is swapped in (from that thread context)
435  */
nct_init(void (* fptr)(void *))436 void *nct_init(void (*fptr)(void *))
437 {
438 	struct nct_status_t *this;
439 
440 	/*
441 	 * Note: This (and the calloc below) won't be free'd by this code
442 	 * but left for the OS to clear at process end.
443 	 * This is a conscious choice, see nct_clean_up() for more info.
444 	 * If you got here due to valgrind's leak report, please use the
445 	 * provided valgrind suppression file valgrind.supp
446 	 */
447 	this = calloc(1, sizeof(struct nct_status_t));
448 	if (this == NULL) { /* LCOV_EXCL_BR_LINE */
449 		nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
450 	}
451 
452 	this->fptr = fptr;
453 	this->thread_create_count = 0;
454 	this->currently_allowed_thread = -1;
455 
456 	this->threads_table = calloc(NCT_ALLOC_CHUNK_SIZE, sizeof(struct threads_table_el));
457 	if (this->threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
458 		nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
459 	}
460 
461 	this->threads_table_size = NCT_ALLOC_CHUNK_SIZE;
462 
463 	ttable_init_elements(this->threads_table, NCT_ALLOC_CHUNK_SIZE);
464 
465 	return (void *)this;
466 }
467 
468 /**
469  * Free allocated memory by the threading emulator and clean up ordering all managed
470  * threads to abort.
471  * Note that this function cannot be called from a SW thread
472  * (the CPU is assumed halted. Otherwise we would cancel ourselves)
473  *
474  * Note: This function cannot guarantee the threads will be cancelled before the HW
475  * thread exists. The only way to do that, would be to wait for each of them in
476  * a join without detaching them, but that could lead to locks in some
477  * convoluted cases; as a call to this function can come due to a hosted OS
478  * assert or other error termination, we better do not assume things are working fine.
479  * This also means we do not clean all memory used by this NCT instance, as those
480  * threads need to access it still.
481  * => we prefer the supposed memory leak report from valgrind, and ensure we
482  * will not hang.
483  */
nct_clean_up(void * this_arg)484 void nct_clean_up(void *this_arg)
485 {
486 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
487 
488 	if (!this || !this->threads_table) { /* LCOV_EXCL_BR_LINE */
489 		return; /* LCOV_EXCL_LINE */
490 	}
491 
492 	this->terminate = true;
493 
494 #if NCT_ENABLE_CANCEL
495 	if (this->all_threads_released) {
496 		return;
497 	}
498 	this->all_threads_released = true;
499 
500 	struct threads_table_el *tt_el = this->threads_table;
501 
502 	for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
503 		if (tt_el->state != USED) {
504 			continue;
505 		}
506 		NSI_SAFE_CALL(sem_post(&tt_el->sema));
507 	}
508 #endif
509 
510 	/*
511 	 * This is the cleanup we do not do:
512 	 * for all threads
513 	 *   sem_destroy(&tt_el->sema);
514 	 *
515 	 * free(this->threads_table);
516 	 *   Including all chunks
517 	 * this->threads_table = NULL;
518 	 *
519 	 *
520 	 * free(this);
521 	 */
522 }
523 
524 
525 /*
526  * Mark a thread as being aborted. This will result in the underlying pthread
527  * being terminated some time later:
528  *   If the thread is marking itself as aborting, as soon as it is swapped out
529  *   by the hosted (embedded) OS
530  *   If it is marking another thread, at some non-specific time soon in the future
531  *   (But note that no embedded part of the aborted thread will execute anymore)
532  *
533  * *  thread_idx : The thread identifier as provided during creation (return from nct_new_thread())
534  */
nct_abort_thread(void * this_arg,int thread_idx)535 void nct_abort_thread(void *this_arg, int thread_idx)
536 {
537 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
538 	struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
539 
540 	if (thread_idx == this->currently_allowed_thread) {
541 		NCT_DEBUG("Thread [%i] %i: %s Marked myself as aborting\n",
542 			  tt_el->thead_cnt, thread_idx, __func__);
543 		tt_el->state = ABORTING;
544 	} else {
545 		if (tt_el->state != USED) { /* LCOV_EXCL_BR_LINE */
546 			/* The thread may have been already aborted before */
547 			return; /* LCOV_EXCL_LINE */
548 		}
549 
550 		NCT_DEBUG("Aborting not scheduled thread [%i] %i\n", tt_el->thead_cnt, thread_idx);
551 		tt_el->state = ABORTING;
552 		NSI_SAFE_CALL(sem_post(&tt_el->sema));
553 	}
554 }
555 
556 /*
557  * Return a unique thread identifier for this thread for this
558  * run. This identifier is only meant for debug purposes
559  *
560  * thread_idx is the value returned by nct_new_thread()
561  */
nct_get_unique_thread_id(void * this_arg,int thread_idx)562 int nct_get_unique_thread_id(void *this_arg, int thread_idx)
563 {
564 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
565 	struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
566 
567 	return tt_el->thead_cnt;
568 }
569 
nct_thread_name_set(void * this_arg,int thread_idx,const char * str)570 int nct_thread_name_set(void *this_arg, int thread_idx, const char *str)
571 {
572 	struct nct_status_t *this = (struct nct_status_t *)this_arg;
573 	struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
574 
575 	return pthread_setname_np(tt_el->thread, str);
576 }
577 
578 /*
579  * Notes about coverage:
580  *
581  * Note1:
582  *
583  * This condition will only be triggered in very unlikely cases
584  * (once every few full regression runs).
585  * It is therefore excluded from the coverage report to avoid confusing
586  * developers.
587  *
588  * Background: A pthread is created as soon as the hosted kernel creates
589  * a hosted thread. A pthread creation is an asynchronous process handled by the
590  * host kernel.
591  *
592  * This emulator normally keeps only 1 thread executing at a time.
593  * But part of the pre-initialization during creation of a new thread
594  * and some cleanup at the tail of the thread termination are executed
595  * in parallel to other threads.
596  * That is, the execution of those code paths is a bit indeterministic.
597  *
598  * Only when the hosted kernel attempts to swap to a new thread does this
599  * emulator need to wait until its pthread is ready and initialized
600  * (has reached nct_wait_until_allowed())
601  *
602  * In some cases (tests) hosted threads are created which are never actually needed
603  * (typically the idle thread). That means the test may finish before that
604  * thread's underlying pthread has reached nct_wait_until_allowed().
605  *
606  * In this unlikely cases the initialization or cleanup of the thread follows
607  * non-typical code paths.
608  * This code paths are there to ensure things work always, no matter
609  * the load of the host. Without them, very rare & mysterious segfault crashes
610  * would occur.
611  * But as they are very atypical and only triggered with some host loads,
612  * they will be covered in the coverage reports only rarely.
613  *
614  * Note2:
615  *
616  * Some other code will never or only very rarely trigger and is therefore
617  * excluded with LCOV_EXCL_LINE
618  *
619  */
620