1 /*
2 * Copyright (c) 2017 Oticon A/S
3 * Copyright (c) 2023 Nordic Semiconductor ASA
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 /*
9 * Native simulator, CPU Thread emulation (nct)
10 */
11
12 /**
13 * Native simulator single CPU threading emulation,
14 * an *optional* module provided by the Native simulator
15 * the hosted embedded OS / SW can use to emulate the threading
16 * context switching which would be handled by a OS CPU AL
17 *
18 * Principle of operation:
19 *
20 * The embedded OS threads are run as a set of native Linux pthreads.
21 * The embedded OS only sees one of this thread executing at a time.
22 *
23 * The hosted OS shall call nct_init() to initialize the state of an
24 * instance of this module, and nct_clean_up() once it desires to destroy it.
25 *
26 * For SOCs with several micro-controllers (AMP) one instance of this module
27 * would be instantiated per simulated uC and embedded OS.
28 *
29 * To create a new embedded thread, the hosted OS shall call nct_new_thread().
30 * To swap to a thread nct_swap_threads(), and to terminate a thread
31 * nct_abort_thread().
32 * The hosted OS can optionally use nct_first_thread_start() to swap
33 * to the "first thread".
34 *
35 * Whenever a thread calls nct_swap_threads(next_thread_idx) it will be blocked,
36 * and the thread identified by next_thread_idx will continue executing.
37 *
38 *
39 * Internal design:
40 *
41 * Which thread is running is controlled using {cond|mtx}_threads and
42 * currently_allowed_thread.
43 *
44 * The main part of the execution of each thread will occur in a fully
45 * synchronous and deterministic manner, and only when commanded by
46 * the embedded operating system kernel.
47 *
48 * The creation of a thread will spawn a new pthread whose start
49 * is asynchronous to the rest, until synchronized in nct_wait_until_allowed()
50 * below.
51 * Similarly aborting and canceling threads execute a tail in a quite an
52 * asynchronous manner.
53 *
54 * This implementation is meant to be portable in between fully compatible
55 * POSIX systems.
56 * A table (threads_table) is used to abstract the native pthreads.
57 * An index in this table is used to identify threads in the IF to the
58 * embedded OS.
59 */
60
61 #define NCT_DEBUG_PRINTS 0
62
63 #include <pthread.h>
64 #include <stdbool.h>
65 #include <stdlib.h>
66 #include <string.h>
67 #include <stdint.h>
68 #include "nct_if.h"
69 #include "nsi_internal.h"
70 #include "nsi_safe_call.h"
71
72 #if NCT_DEBUG_PRINTS
73 #define NCT_DEBUG(fmt, ...) nsi_print_trace(PREFIX fmt, __VA_ARGS__)
74 #else
75 #define NCT_DEBUG(...)
76 #endif
77
78 #define PREFIX "Tread Simulator: "
79 #define ERPREFIX PREFIX"error on "
80 #define NO_MEM_ERR PREFIX"Can't allocate memory\n"
81
82 #define NCT_ENABLE_CANCEL 0 /* See Note.c1 */
83 #define NCT_ALLOC_CHUNK_SIZE 64 /* In how big chunks we grow the thread table */
84 #define NCT_REUSE_ABORTED_ENTRIES 0
85 /* For the Zephyr OS, tests/kernel/threads/scheduling/schedule_api fails when setting
86 * NCT_REUSE_ABORTED_ENTRIES => don't set it by now
87 */
88
89 struct te_status_t;
90
91 struct threads_table_el {
92 /* Pointer to the overall status of the threading emulator instance */
93 struct te_status_t *ts_status;
94 struct threads_table_el *next; /* Pointer to the next element of the table */
95 int thread_idx; /* Index of this element in the threads_table*/
96
97 enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
98 bool running; /* Is this the currently running thread */
99 pthread_t thread; /* Actual pthread_t as returned by the native kernel */
100 int thead_cnt; /* For debugging: Unique, consecutive, thread number */
101
102 /*
103 * Pointer to data from the hosted OS architecture
104 * What that is, if anything, is up to that the hosted OS
105 */
106 void *payload;
107 };
108
109 struct te_status_t {
110 struct threads_table_el *threads_table; /* Pointer to the threads table */
111 int thread_create_count; /* (For debugging) Thread creation counter */
112 int threads_table_size; /* Size of threads_table */
113 /* Pointer to the hosted OS function to be called when a thread is started */
114 void (*fptr)(void *payload);
115 /*
116 * Conditional variable to block/awake all threads during swaps.
117 * (we only need 1 mutex and 1 cond variable for all threads)
118 */
119 pthread_cond_t cond_threads;
120 /* Mutex for the conditional variable cond_threads */
121 pthread_mutex_t mtx_threads;
122 /* Token which tells which thread is allowed to run now */
123 int currently_allowed_thread;
124 bool terminate; /* Are we terminating the program == cleaning up */
125 };
126
127 static void nct_exit_and_cleanup(struct te_status_t *this);
128 static struct threads_table_el *ttable_get_element(struct te_status_t *this, int index);
129
130 /**
131 * Helper function, run by a thread which is being aborted
132 */
abort_tail(struct te_status_t * this,int this_th_nbr)133 static void abort_tail(struct te_status_t *this, int this_th_nbr)
134 {
135 struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
136
137 NCT_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
138 tt_el->thead_cnt,
139 this_th_nbr,
140 __func__);
141
142 tt_el->running = false;
143 tt_el->state = ABORTED;
144 nct_exit_and_cleanup(this);
145 }
146
147 /**
148 * Helper function to block this thread until it is allowed again
149 *
150 * Note that we go out of this function (the while loop below)
151 * with the mutex locked by this particular thread.
152 * In normal circumstances, the mutex is only unlocked internally in
153 * pthread_cond_wait() while waiting for cond_threads to be signaled
154 */
nct_wait_until_allowed(struct te_status_t * this,int this_th_nbr)155 static void nct_wait_until_allowed(struct te_status_t *this, int this_th_nbr)
156 {
157 struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
158
159 tt_el->running = false;
160
161 NCT_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run (rel mut)\n",
162 tt_el->thead_cnt,
163 this_th_nbr,
164 __func__);
165
166 while (this_th_nbr != this->currently_allowed_thread) {
167 pthread_cond_wait(&this->cond_threads, &this->mtx_threads);
168
169 if (tt_el->state == ABORTING) {
170 abort_tail(this, this_th_nbr);
171 }
172 }
173
174 tt_el->running = true;
175
176 NCT_DEBUG("Thread [%i] %i: %s(): I'm allowed to run! (hav mut)\n",
177 tt_el->thead_cnt,
178 this_th_nbr,
179 __func__);
180 }
181
182 /**
183 * Helper function to let the thread <next_allowed_th> run
184 *
185 * Note: nct_let_run() can only be called with the mutex locked
186 */
nct_let_run(struct te_status_t * this,int next_allowed_th)187 static void nct_let_run(struct te_status_t *this, int next_allowed_th)
188 {
189 #if NCT_DEBUG_PRINTS
190 struct threads_table_el *tt_el = ttable_get_element(this, next_allowed_th);
191
192 NCT_DEBUG("%s: We let thread [%i] %i run\n",
193 __func__,
194 tt_el->thead_cnt,
195 next_allowed_th);
196 #endif
197
198 this->currently_allowed_thread = next_allowed_th;
199
200 /*
201 * We let all threads know one is able to run now (it may even be us
202 * again if fancied)
203 * Note that as we hold the mutex, they are going to be blocked until
204 * we reach our own nct_wait_until_allowed() while loop or abort_tail()
205 * mutex release
206 */
207 NSI_SAFE_CALL(pthread_cond_broadcast(&this->cond_threads));
208 }
209
210 /**
211 * Helper function, run by a thread which is being ended
212 */
nct_exit_and_cleanup(struct te_status_t * this)213 static void nct_exit_and_cleanup(struct te_status_t *this)
214 {
215 /*
216 * Release the mutex so the next allowed thread can run
217 */
218 NSI_SAFE_CALL(pthread_mutex_unlock(&this->mtx_threads));
219
220 /* We detach ourselves so nobody needs to join to us */
221 pthread_detach(pthread_self());
222
223 pthread_exit(NULL);
224 }
225
226 /**
227 * Let the ready thread run and block this managed thread until it is allowed again
228 *
229 * The hosted OS shall call this when it has decided to swap in/out two of its threads,
230 * from the thread that is being swapped out.
231 *
232 * Note: If called without having ever let another managed thread run / from a thread not
233 * managed by this nct instance, it will behave like nct_first_thread_start(),
234 * and terminate the calling thread while letting the managed thread
235 * <next_allowed_thread_nbr> continue.
236 *
237 * inputs:
238 * this_arg: Pointer to this thread emulator instance as returned by nct_init()
239 * next_allowed_thread_nbr: Identifier of the thread the hosted OS wants to swap in
240 */
nct_swap_threads(void * this_arg,int next_allowed_thread_nbr)241 void nct_swap_threads(void *this_arg, int next_allowed_thread_nbr)
242 {
243 struct te_status_t *this = (struct te_status_t *)this_arg;
244 int this_th_nbr = this->currently_allowed_thread;
245
246 nct_let_run(this, next_allowed_thread_nbr);
247
248 if (this_th_nbr == -1) { /* This is the first time a thread was swapped in */
249 NCT_DEBUG("%s: called from an unmanaged thread, terminating it\n",
250 __func__);
251 nct_exit_and_cleanup(this);
252 }
253
254 struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
255
256 if (tt_el->state == ABORTING) {
257 NCT_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
258 tt_el->thead_cnt,
259 this_th_nbr,
260 __func__);
261 abort_tail(this, this_th_nbr);
262 } else {
263 nct_wait_until_allowed(this, this_th_nbr);
264 }
265 }
266
267 /**
268 * Let the very first hosted thread run, and exit this thread.
269 *
270 * The hosted OS shall call this when it has decided to swap in into another
271 * thread, and wants to terminate the currently executing thread, which is not
272 * a thread managed by the thread emulator.
273 *
274 * This function allows to emulate a hosted OS doing its first swapping into one
275 * of its hosted threads from the init thread, abandoning/terminating the init
276 * thread.
277 */
nct_first_thread_start(void * this_arg,int next_allowed_thread_nbr)278 void nct_first_thread_start(void *this_arg, int next_allowed_thread_nbr)
279 {
280 struct te_status_t *this = (struct te_status_t *)this_arg;
281
282 nct_let_run(this, next_allowed_thread_nbr);
283 NCT_DEBUG("%s: Init thread dying now (rel mut)\n",
284 __func__);
285 nct_exit_and_cleanup(this);
286 }
287
288 /**
289 * Handler called when any thread is cancelled or exits
290 */
nct_cleanup_handler(void * arg)291 static void nct_cleanup_handler(void *arg)
292 {
293 struct threads_table_el *element = (struct threads_table_el *)arg;
294 struct te_status_t *this = element->ts_status;
295
296 /*
297 * If we are not terminating, this is just an aborted thread,
298 * and the mutex was already released
299 * Otherwise, release the mutex so other threads which may be
300 * caught waiting for it could terminate
301 */
302
303 if (!this->terminate) {
304 return;
305 }
306
307 NCT_DEBUG("Thread %i: %s: Canceling (rel mut)\n",
308 element->thread_idx,
309 __func__);
310
311
312 NSI_SAFE_CALL(pthread_mutex_unlock(&this->mtx_threads));
313
314 /* We detach ourselves so nobody needs to join to us */
315 pthread_detach(pthread_self());
316 }
317
318 /**
319 * Helper function to start a hosted thread as a POSIX thread:
320 * It will block the pthread until the embedded OS devices to "swap in"
321 * this thread.
322 */
nct_thread_starter(void * arg_el)323 static void *nct_thread_starter(void *arg_el)
324 {
325 struct threads_table_el *tt_el = (struct threads_table_el *)arg_el;
326 struct te_status_t *this = tt_el->ts_status;
327
328 int thread_idx = tt_el->thread_idx;
329
330 NCT_DEBUG("Thread [%i] %i: %s: Starting\n",
331 tt_el->thead_cnt,
332 thread_idx,
333 __func__);
334
335 /*
336 * We block until all other running threads reach the while loop
337 * in nct_wait_until_allowed() and they release the mutex
338 */
339 NSI_SAFE_CALL(pthread_mutex_lock(&this->mtx_threads));
340
341 /*
342 * The program may have been finished before this thread ever got to run
343 */
344 /* LCOV_EXCL_START */ /* See Note1 */
345 if (!this->threads_table || this->terminate) {
346 nct_cleanup_handler(arg_el);
347 pthread_exit(NULL);
348 }
349 /* LCOV_EXCL_STOP */
350
351 pthread_cleanup_push(nct_cleanup_handler, arg_el);
352
353 NCT_DEBUG("Thread [%i] %i: %s: After start mutex (hav mut)\n",
354 tt_el->thead_cnt,
355 thread_idx,
356 __func__);
357
358 /*
359 * The thread would try to execute immediately, so we block it
360 * until allowed
361 */
362 nct_wait_until_allowed(this, thread_idx);
363
364 this->fptr(tt_el->payload);
365
366 /*
367 * We only reach this point if the thread actually returns which should
368 * not happen. But we handle it gracefully just in case
369 */
370 /* LCOV_EXCL_START */
371 nsi_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
372 tt_el->thead_cnt,
373 thread_idx,
374 pthread_self());
375
376 tt_el->running = false;
377 tt_el->state = FAILED;
378
379 pthread_cleanup_pop(1);
380
381 return NULL;
382 /* LCOV_EXCL_STOP */
383 }
384
ttable_get_element(struct te_status_t * this,int index)385 static struct threads_table_el *ttable_get_element(struct te_status_t *this, int index)
386 {
387 struct threads_table_el *threads_table = this->threads_table;
388
389 if (index >= this->threads_table_size) { /* LCOV_EXCL_BR_LINE */
390 nsi_print_error_and_exit("%s: Programming error, attempted out of bound access to "
391 "thread table (%i>=%i)\n",
392 index, this->threads_table_size); /* LCOV_EXCL_LINE */
393 }
394 while (index >= NCT_ALLOC_CHUNK_SIZE) {
395 index -= NCT_ALLOC_CHUNK_SIZE;
396 threads_table = threads_table[NCT_ALLOC_CHUNK_SIZE - 1].next;
397 }
398 return &threads_table[index];
399 }
400
401 /**
402 * Return the first free entry index in the threads table
403 */
ttable_get_empty_slot(struct te_status_t * this)404 static int ttable_get_empty_slot(struct te_status_t *this)
405 {
406 struct threads_table_el *tt_el = this->threads_table;
407
408 for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
409 if ((tt_el->state == NOTUSED)
410 || (NCT_REUSE_ABORTED_ENTRIES
411 && (tt_el->state == ABORTED))) {
412 return i;
413 }
414 }
415
416 /*
417 * else, we run out of table without finding an index
418 * => we expand the table
419 */
420
421 struct threads_table_el *new_chunk;
422
423 new_chunk = calloc(NCT_ALLOC_CHUNK_SIZE, sizeof(struct threads_table_el));
424 if (new_chunk == NULL) { /* LCOV_EXCL_BR_LINE */
425 nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
426 }
427
428 /* Link new chunk to last element */
429 tt_el = ttable_get_element(this, this->threads_table_size-1);
430 tt_el->next = new_chunk;
431
432 this->threads_table_size += NCT_ALLOC_CHUNK_SIZE;
433
434 /* Link all new elements together */
435 for (int i = 0 ; i < NCT_ALLOC_CHUNK_SIZE - 1; i++) {
436 new_chunk[i].next = &new_chunk[i+1];
437 }
438 new_chunk[NCT_ALLOC_CHUNK_SIZE - 1].next = NULL;
439
440 /* The first newly created entry is good, we return it */
441 return this->threads_table_size - NCT_ALLOC_CHUNK_SIZE;
442 }
443
444 /**
445 * Create a new pthread for the new hosted OS thread.
446 *
447 * Returns a unique integer thread identifier/index, which should be used
448 * to refer to this thread for future calls to the thread emulator.
449 *
450 * It takes as parameter a pointer which will be passed to
451 * function registered in nct_init when the thread is swapped in.
452 *
453 * Note that the thread is created but not swapped in.
454 * The new thread execution will be held until nct_swap_threads()
455 * (or nct_first_thread_start()) is called with this newly created
456 * thread number.
457 */
nct_new_thread(void * this_arg,void * payload)458 int nct_new_thread(void *this_arg, void *payload)
459 {
460 struct te_status_t *this = (struct te_status_t *)this_arg;
461 struct threads_table_el *tt_el;
462 int t_slot;
463
464 t_slot = ttable_get_empty_slot(this);
465 tt_el = ttable_get_element(this, t_slot);
466
467 tt_el->state = USED;
468 tt_el->running = false;
469 tt_el->thead_cnt = this->thread_create_count++;
470 tt_el->payload = payload;
471 tt_el->ts_status = this;
472 tt_el->thread_idx = t_slot;
473
474 NSI_SAFE_CALL(pthread_create(&tt_el->thread,
475 NULL,
476 nct_thread_starter,
477 (void *)tt_el));
478
479 NCT_DEBUG("%s created thread [%i] %i [%lu]\n",
480 __func__,
481 tt_el->thead_cnt,
482 t_slot,
483 tt_el->thread);
484
485 return t_slot;
486 }
487
488 /**
489 * Initialize an instance of the threading emulator.
490 *
491 * Returns a pointer to the initialize threading emulator instance.
492 * This pointer shall be passed to all subsequent calls of the
493 * threading emulator when interacting with this particular instance.
494 *
495 * The input fptr is a pointer to the hosted OS function
496 * to be called each time a thread which is created on its request
497 * with nct_new_thread() is swapped in (from that thread context)
498 */
nct_init(void (* fptr)(void *))499 void *nct_init(void (*fptr)(void *))
500 {
501 struct te_status_t *this;
502
503 /*
504 * Note: This (and the calloc below) won't be free'd by this code
505 * but left for the OS to clear at process end.
506 * This is a conscious choice, see nct_clean_up() for more info.
507 * If you got here due to valgrind's leak report, please use the
508 * provided valgrind suppression file valgrind.supp
509 */
510 this = calloc(1, sizeof(struct te_status_t));
511 if (this == NULL) { /* LCOV_EXCL_BR_LINE */
512 nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
513 }
514
515 this->fptr = fptr;
516 this->thread_create_count = 0;
517 this->currently_allowed_thread = -1;
518
519 NSI_SAFE_CALL(pthread_cond_init(&this->cond_threads, NULL));
520 NSI_SAFE_CALL(pthread_mutex_init(&this->mtx_threads, NULL));
521
522 this->threads_table = calloc(NCT_ALLOC_CHUNK_SIZE,
523 sizeof(struct threads_table_el));
524 if (this->threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
525 nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
526 }
527
528 this->threads_table_size = NCT_ALLOC_CHUNK_SIZE;
529
530 for (int i = 0 ; i < NCT_ALLOC_CHUNK_SIZE - 1; i++) {
531 this->threads_table[i].next = &this->threads_table[i+1];
532 }
533 this->threads_table[NCT_ALLOC_CHUNK_SIZE - 1].next = NULL;
534
535 NSI_SAFE_CALL(pthread_mutex_lock(&this->mtx_threads));
536
537 return (void *)this;
538 }
539
540 /**
541 * Free any allocated memory by the threading emulator and clean up.
542 * Note that this function cannot be called from a SW thread
543 * (the CPU is assumed halted. Otherwise we would cancel ourselves)
544 *
545 * Note: This function cannot guarantee the threads will be cancelled before the HW
546 * thread exists. The only way to do that, would be to wait for each of them in
547 * a join without detaching them, but that could lead to locks in some
548 * convoluted cases; as a call to this function can come due to a hosted OS
549 * assert or other error termination, we better do not assume things are working fine.
550 * => we prefer the supposed memory leak report from valgrind, and ensure we
551 * will not hang.
552 */
nct_clean_up(void * this_arg)553 void nct_clean_up(void *this_arg)
554 {
555 struct te_status_t *this = (struct te_status_t *)this_arg;
556
557 if (!this || !this->threads_table) { /* LCOV_EXCL_BR_LINE */
558 return; /* LCOV_EXCL_LINE */
559 }
560
561 this->terminate = true;
562
563 #if (NCT_ENABLE_CANCEL)
564 struct threads_table_el *tt_el = this->threads_table;
565
566 for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
567 if (tt_el->state != USED) {
568 continue;
569 }
570
571 /* LCOV_EXCL_START */
572 if (pthread_cancel(tt_el->thread)) {
573 nsi_print_warning(
574 PREFIX"cleanup: could not stop thread %i\n",
575 i);
576 }
577 /* LCOV_EXCL_STOP */
578 }
579 #endif
580 /*
581 * This is the cleanup we do not do:
582 *
583 * free(this->threads_table);
584 * Including all chunks
585 * this->threads_table = NULL;
586 *
587 * (void)pthread_cond_destroy(&this->cond_threads);
588 * (void)pthread_mutex_destroy(&this->mtx_threads);
589 *
590 * free(this);
591 */
592 }
593
594
595 /*
596 * Mark a thread as being aborted. This will result in the underlying pthread
597 * being terminated some time later:
598 * If the thread is marking itself as aborting, as soon as it is swapped out
599 * by the hosted (embedded) OS
600 * If it is marking another thread, at some non-specific time in the future
601 * (But note that no embedded part of the aborted thread will execute anymore)
602 *
603 * * thread_idx : The thread identifier as provided during creation (return from nct_new_thread())
604 */
nct_abort_thread(void * this_arg,int thread_idx)605 void nct_abort_thread(void *this_arg, int thread_idx)
606 {
607 struct te_status_t *this = (struct te_status_t *)this_arg;
608 struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
609
610 if (thread_idx == this->currently_allowed_thread) {
611 NCT_DEBUG("Thread [%i] %i: %s Marked myself "
612 "as aborting\n",
613 tt_el->thead_cnt,
614 thread_idx,
615 __func__);
616 } else {
617 if (tt_el->state != USED) { /* LCOV_EXCL_BR_LINE */
618 /* The thread may have been already aborted before */
619 return; /* LCOV_EXCL_LINE */
620 }
621
622 NCT_DEBUG("Aborting not scheduled thread [%i] %i\n",
623 tt_el->thead_cnt,
624 thread_idx);
625 }
626 tt_el->state = ABORTING;
627 /*
628 * Note: the native thread will linger in RAM until it catches the
629 * mutex or awakes on the condition.
630 * Note that even if we would pthread_cancel() the thread here, that
631 * would be the case, but with a pthread_cancel() the mutex state would
632 * be uncontrolled
633 */
634 }
635
636 /*
637 * Return a unique thread identifier for this thread for this
638 * run. This identifier is only meant for debug purposes
639 *
640 * thread_idx is the value returned by nct_new_thread()
641 */
nct_get_unique_thread_id(void * this_arg,int thread_idx)642 int nct_get_unique_thread_id(void *this_arg, int thread_idx)
643 {
644 struct te_status_t *this = (struct te_status_t *)this_arg;
645 struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
646
647 return tt_el->thead_cnt;
648 }
649
650 /*
651 * Notes about coverage:
652 *
653 * Note1:
654 *
655 * This condition will only be triggered in very unlikely cases
656 * (once every few full regression runs).
657 * It is therefore excluded from the coverage report to avoid confusing
658 * developers.
659 *
660 * Background: A pthread is created as soon as the hosted kernel creates
661 * a hosted thread. A pthread creation is an asynchronous process handled by the
662 * host kernel.
663 *
664 * This emulator normally keeps only 1 thread executing at a time.
665 * But part of the pre-initialization during creation of a new thread
666 * and some cleanup at the tail of the thread termination are executed
667 * in parallel to other threads.
668 * That is, the execution of those code paths is a bit indeterministic.
669 *
670 * Only when the hosted kernel attempts to swap to a new thread does this
671 * emulator need to wait until its pthread is ready and initialized
672 * (has reached nct_wait_until_allowed())
673 *
674 * In some cases (tests) hosted threads are created which are never actually needed
675 * (typically the idle thread). That means the test may finish before that
676 * thread's underlying pthread has reached nct_wait_until_allowed().
677 *
678 * In this unlikely cases the initialization or cleanup of the thread follows
679 * non-typical code paths.
680 * This code paths are there to ensure things work always, no matter
681 * the load of the host. Without them, very rare & mysterious segfault crashes
682 * would occur.
683 * But as they are very atypical and only triggered with some host loads,
684 * they will be covered in the coverage reports only rarely.
685 *
686 * Note2:
687 *
688 * Some other code will never or only very rarely trigger and is therefore
689 * excluded with LCOV_EXCL_LINE
690 *
691 *
692 * Notes about (memory) cleanup:
693 *
694 * Note.c1:
695 *
696 * In some very rare cases in very loaded machines, a race in the glibc pthread_cancel()
697 * seems to be triggered.
698 * In this, the cancelled thread cleanup overtakes the pthread_cancel() code, and frees the
699 * pthread structure before pthread_cancel() has finished, resulting in a dereference into already
700 * free'd memory, and therefore a segfault.
701 * Calling pthread_cancel() during cleanup is not required beyond preventing a valgrind
702 * memory leak report (all threads will be canceled immediately on exit).
703 * Therefore we do not do this, to avoid this very rare crashes.
704 */
705