1 /*
2  * Copyright (c) 2017 Oticon A/S
3  * Copyright (c) 2023 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /*
9  * Native simulator, CPU Thread emulation (nct)
10  */
11 
12 /**
13  * Native simulator single CPU threading emulation,
14  * an *optional* module provided by the Native simulator
15  * the hosted embedded OS / SW can use to emulate the threading
16  * context switching which would be handled by a OS CPU AL
17  *
18  * Principle of operation:
19  *
20  * The embedded OS threads are run as a set of native Linux pthreads.
21  * The embedded OS only sees one of this thread executing at a time.
22  *
23  * The hosted OS shall call nct_init() to initialize the state of an
24  * instance of this module, and nct_clean_up() once it desires to destroy it.
25  *
26  * For SOCs with several micro-controllers (AMP) one instance of this module
27  * would be instantiated per simulated uC and embedded OS.
28  *
29  * To create a new embedded thread, the hosted OS shall call nct_new_thread().
30  * To swap to a thread nct_swap_threads(), and to terminate a thread
31  * nct_abort_thread().
32  * The hosted OS can optionally use nct_first_thread_start() to swap
33  * to the "first thread".
34  *
35  * Whenever a thread calls nct_swap_threads(next_thread_idx) it will be blocked,
36  * and the thread identified by next_thread_idx will continue executing.
37  *
38  *
39  * Internal design:
40  *
41  * Which thread is running is controlled using {cond|mtx}_threads and
42  * currently_allowed_thread.
43  *
44  * The main part of the execution of each thread will occur in a fully
45  * synchronous and deterministic manner, and only when commanded by
46  * the embedded operating system kernel.
47  *
48  * The creation of a thread will spawn a new pthread whose start
49  * is asynchronous to the rest, until synchronized in nct_wait_until_allowed()
50  * below.
51  * Similarly aborting and canceling threads execute a tail in a quite an
52  * asynchronous manner.
53  *
54  * This implementation is meant to be portable in between fully compatible
55  * POSIX systems.
56  * A table (threads_table) is used to abstract the native pthreads.
57  * An index in this table is used to identify threads in the IF to the
58  * embedded OS.
59  */
60 
61 #define NCT_DEBUG_PRINTS 0
62 
63 /* For pthread_setname_np() */
64 #define _GNU_SOURCE
65 #include <pthread.h>
66 #include <stdbool.h>
67 #include <stdlib.h>
68 #include <string.h>
69 #include <stdint.h>
70 #include "nct_if.h"
71 #include "nsi_internal.h"
72 #include "nsi_safe_call.h"
73 
74 #if NCT_DEBUG_PRINTS
75 #define NCT_DEBUG(fmt, ...) nsi_print_trace(PREFIX fmt, __VA_ARGS__)
76 #else
77 #define NCT_DEBUG(...)
78 #endif
79 
80 #define PREFIX     "Tread Simulator: "
81 #define ERPREFIX   PREFIX"error on "
82 #define NO_MEM_ERR PREFIX"Can't allocate memory\n"
83 
84 #define NCT_ENABLE_CANCEL 0 /* See Note.c1 */
85 #define NCT_ALLOC_CHUNK_SIZE 64 /* In how big chunks we grow the thread table */
86 #define NCT_REUSE_ABORTED_ENTRIES 0
87 /* For the Zephyr OS, tests/kernel/threads/scheduling/schedule_api fails when setting
88  * NCT_REUSE_ABORTED_ENTRIES => don't set it by now
89  */
90 
91 struct te_status_t;
92 
93 struct threads_table_el {
94 	/* Pointer to the overall status of the threading emulator instance */
95 	struct te_status_t *ts_status;
96 	struct threads_table_el *next; /* Pointer to the next element of the table */
97 	int thread_idx; /* Index of this element in the threads_table*/
98 
99 	enum {NOTUSED = 0, USED, ABORTING, ABORTED, FAILED} state;
100 	bool running;     /* Is this the currently running thread */
101 	pthread_t thread; /* Actual pthread_t as returned by the native kernel */
102 	int thead_cnt; /* For debugging: Unique, consecutive, thread number */
103 
104 	/*
105 	 * Pointer to data from the hosted OS architecture
106 	 * What that is, if anything, is up to that the hosted OS
107 	 */
108 	void *payload;
109 };
110 
111 struct te_status_t {
112 	struct threads_table_el *threads_table; /* Pointer to the threads table */
113 	int thread_create_count; /* (For debugging) Thread creation counter */
114 	int threads_table_size; /* Size of threads_table */
115 	/* Pointer to the hosted OS function to be called when a thread is started */
116 	void (*fptr)(void *payload);
117 	/*
118 	 * Conditional variable to block/awake all threads during swaps.
119 	 * (we only need 1 mutex and 1 cond variable for all threads)
120 	 */
121 	pthread_cond_t cond_threads;
122 	/* Mutex for the conditional variable cond_threads */
123 	pthread_mutex_t mtx_threads;
124 	/* Token which tells which thread is allowed to run now */
125 	int currently_allowed_thread;
126 	bool terminate; /* Are we terminating the program == cleaning up */
127 };
128 
129 static void nct_exit_and_cleanup(struct te_status_t *this);
130 static struct threads_table_el *ttable_get_element(struct te_status_t *this, int index);
131 
132 /**
133  * Helper function, run by a thread which is being aborted
134  */
abort_tail(struct te_status_t * this,int this_th_nbr)135 static void abort_tail(struct te_status_t *this, int this_th_nbr)
136 {
137 	struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
138 
139 	NCT_DEBUG("Thread [%i] %i: %s: Aborting (exiting) (rel mut)\n",
140 		tt_el->thead_cnt,
141 		this_th_nbr,
142 		__func__);
143 
144 	tt_el->running = false;
145 	tt_el->state = ABORTED;
146 	nct_exit_and_cleanup(this);
147 }
148 
149 /**
150  * Helper function to block this thread until it is allowed again
151  *
152  * Note that we go out of this function (the while loop below)
153  * with the mutex locked by this particular thread.
154  * In normal circumstances, the mutex is only unlocked internally in
155  * pthread_cond_wait() while waiting for cond_threads to be signaled
156  */
nct_wait_until_allowed(struct te_status_t * this,int this_th_nbr)157 static void nct_wait_until_allowed(struct te_status_t *this, int this_th_nbr)
158 {
159 	struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
160 
161 	tt_el->running = false;
162 
163 	NCT_DEBUG("Thread [%i] %i: %s: Waiting to be allowed to run (rel mut)\n",
164 		tt_el->thead_cnt,
165 		this_th_nbr,
166 		__func__);
167 
168 	while (this_th_nbr != this->currently_allowed_thread) {
169 		pthread_cond_wait(&this->cond_threads, &this->mtx_threads);
170 
171 		if (tt_el->state == ABORTING) {
172 			abort_tail(this, this_th_nbr);
173 		}
174 	}
175 
176 	tt_el->running = true;
177 
178 	NCT_DEBUG("Thread [%i] %i: %s(): I'm allowed to run! (hav mut)\n",
179 		tt_el->thead_cnt,
180 		this_th_nbr,
181 		__func__);
182 }
183 
184 /**
185  * Helper function to let the thread <next_allowed_th> run
186  *
187  * Note: nct_let_run() can only be called with the mutex locked
188  */
nct_let_run(struct te_status_t * this,int next_allowed_th)189 static void nct_let_run(struct te_status_t *this, int next_allowed_th)
190 {
191 #if NCT_DEBUG_PRINTS
192 	struct threads_table_el *tt_el = ttable_get_element(this, next_allowed_th);
193 
194 	NCT_DEBUG("%s: We let thread [%i] %i run\n",
195 		__func__,
196 		tt_el->thead_cnt,
197 		next_allowed_th);
198 #endif
199 
200 	this->currently_allowed_thread = next_allowed_th;
201 
202 	/*
203 	 * We let all threads know one is able to run now (it may even be us
204 	 * again if fancied)
205 	 * Note that as we hold the mutex, they are going to be blocked until
206 	 * we reach our own nct_wait_until_allowed() while loop or abort_tail()
207 	 * mutex release
208 	 */
209 	NSI_SAFE_CALL(pthread_cond_broadcast(&this->cond_threads));
210 }
211 
212 /**
213  * Helper function, run by a thread which is being ended
214  */
nct_exit_and_cleanup(struct te_status_t * this)215 static void nct_exit_and_cleanup(struct te_status_t *this)
216 {
217 	/*
218 	 * Release the mutex so the next allowed thread can run
219 	 */
220 	NSI_SAFE_CALL(pthread_mutex_unlock(&this->mtx_threads));
221 
222 	/* We detach ourselves so nobody needs to join to us */
223 	pthread_detach(pthread_self());
224 
225 	pthread_exit(NULL);
226 }
227 
228 /**
229  * Let the ready thread run and block this managed thread until it is allowed again
230  *
231  * The hosted OS shall call this when it has decided to swap in/out two of its threads,
232  * from the thread that is being swapped out.
233  *
234  * Note: If called without having ever let another managed thread run / from a thread not
235  * managed by this nct instance, it will behave like nct_first_thread_start(),
236  * and terminate the calling thread while letting the managed thread
237  * <next_allowed_thread_nbr> continue.
238  *
239  * inputs:
240  *   this_arg: Pointer to this thread emulator instance as returned by nct_init()
241  *   next_allowed_thread_nbr: Identifier of the thread the hosted OS wants to swap in
242  */
nct_swap_threads(void * this_arg,int next_allowed_thread_nbr)243 void nct_swap_threads(void *this_arg, int next_allowed_thread_nbr)
244 {
245 	struct te_status_t *this = (struct te_status_t *)this_arg;
246 	int this_th_nbr = this->currently_allowed_thread;
247 
248 	nct_let_run(this, next_allowed_thread_nbr);
249 
250 	if (this_th_nbr == -1) { /* This is the first time a thread was swapped in */
251 		NCT_DEBUG("%s: called from an unmanaged thread, terminating it\n",
252 				__func__);
253 		nct_exit_and_cleanup(this);
254 	}
255 
256 	struct threads_table_el *tt_el = ttable_get_element(this, this_th_nbr);
257 
258 	if (tt_el->state == ABORTING) {
259 		NCT_DEBUG("Thread [%i] %i: %s: Aborting curr.\n",
260 			tt_el->thead_cnt,
261 			this_th_nbr,
262 			__func__);
263 		abort_tail(this, this_th_nbr);
264 	} else {
265 		nct_wait_until_allowed(this, this_th_nbr);
266 	}
267 }
268 
269 /**
270  * Let the very first hosted thread run, and exit this thread.
271  *
272  * The hosted OS shall call this when it has decided to swap in into another
273  * thread, and wants to terminate the currently executing thread, which is not
274  * a thread managed by the thread emulator.
275  *
276  * This function allows to emulate a hosted OS doing its first swapping into one
277  * of its hosted threads from the init thread, abandoning/terminating the init
278  * thread.
279  */
nct_first_thread_start(void * this_arg,int next_allowed_thread_nbr)280 void nct_first_thread_start(void *this_arg, int next_allowed_thread_nbr)
281 {
282 	struct te_status_t *this = (struct te_status_t *)this_arg;
283 
284 	nct_let_run(this, next_allowed_thread_nbr);
285 	NCT_DEBUG("%s: Init thread dying now (rel mut)\n",
286 		__func__);
287 	nct_exit_and_cleanup(this);
288 }
289 
290 /**
291  * Handler called when any thread is cancelled or exits
292  */
nct_cleanup_handler(void * arg)293 static void nct_cleanup_handler(void *arg)
294 {
295 	struct threads_table_el *element = (struct threads_table_el *)arg;
296 	struct te_status_t *this = element->ts_status;
297 
298 	/*
299 	 * If we are not terminating, this is just an aborted thread,
300 	 * and the mutex was already released
301 	 * Otherwise, release the mutex so other threads which may be
302 	 * caught waiting for it could terminate
303 	 */
304 
305 	if (!this->terminate) {
306 		return;
307 	}
308 
309 	NCT_DEBUG("Thread %i: %s: Canceling (rel mut)\n",
310 		element->thread_idx,
311 		__func__);
312 
313 
314 	NSI_SAFE_CALL(pthread_mutex_unlock(&this->mtx_threads));
315 
316 	/* We detach ourselves so nobody needs to join to us */
317 	pthread_detach(pthread_self());
318 }
319 
320 /**
321  * Helper function to start a hosted thread as a POSIX thread:
322  *  It will block the pthread until the embedded OS devices to "swap in"
323  *  this thread.
324  */
nct_thread_starter(void * arg_el)325 static void *nct_thread_starter(void *arg_el)
326 {
327 	struct threads_table_el *tt_el = (struct threads_table_el *)arg_el;
328 	struct te_status_t *this = tt_el->ts_status;
329 
330 	int thread_idx = tt_el->thread_idx;
331 
332 	NCT_DEBUG("Thread [%i] %i: %s: Starting\n",
333 		tt_el->thead_cnt,
334 		thread_idx,
335 		__func__);
336 
337 	/*
338 	 * We block until all other running threads reach the while loop
339 	 * in nct_wait_until_allowed() and they release the mutex
340 	 */
341 	NSI_SAFE_CALL(pthread_mutex_lock(&this->mtx_threads));
342 
343 	/*
344 	 * The program may have been finished before this thread ever got to run
345 	 */
346 	/* LCOV_EXCL_START */ /* See Note1 */
347 	if (!this->threads_table || this->terminate) {
348 		nct_cleanup_handler(arg_el);
349 		pthread_exit(NULL);
350 	}
351 	/* LCOV_EXCL_STOP */
352 
353 	pthread_cleanup_push(nct_cleanup_handler, arg_el);
354 
355 	NCT_DEBUG("Thread [%i] %i: %s: After start mutex (hav mut)\n",
356 		tt_el->thead_cnt,
357 		thread_idx,
358 		__func__);
359 
360 	/*
361 	 * The thread would try to execute immediately, so we block it
362 	 * until allowed
363 	 */
364 	nct_wait_until_allowed(this, thread_idx);
365 
366 	this->fptr(tt_el->payload);
367 
368 	/*
369 	 * We only reach this point if the thread actually returns which should
370 	 * not happen. But we handle it gracefully just in case
371 	 */
372 	/* LCOV_EXCL_START */
373 	nsi_print_trace(PREFIX"Thread [%i] %i [%lu] ended!?!\n",
374 			tt_el->thead_cnt,
375 			thread_idx,
376 			pthread_self());
377 
378 	tt_el->running = false;
379 	tt_el->state = FAILED;
380 
381 	pthread_cleanup_pop(1);
382 
383 	return NULL;
384 	/* LCOV_EXCL_STOP */
385 }
386 
ttable_get_element(struct te_status_t * this,int index)387 static struct threads_table_el *ttable_get_element(struct te_status_t *this, int index)
388 {
389 	struct threads_table_el *threads_table = this->threads_table;
390 
391 	if (index >= this->threads_table_size) { /* LCOV_EXCL_BR_LINE */
392 		nsi_print_error_and_exit("%s: Programming error, attempted out of bound access to "
393 					"thread table (%i>=%i)\n",
394 					index, this->threads_table_size); /* LCOV_EXCL_LINE */
395 	}
396 	while (index >= NCT_ALLOC_CHUNK_SIZE) {
397 		index -= NCT_ALLOC_CHUNK_SIZE;
398 		threads_table = threads_table[NCT_ALLOC_CHUNK_SIZE - 1].next;
399 	}
400 	return &threads_table[index];
401 }
402 
403 /**
404  * Return the first free entry index in the threads table
405  */
ttable_get_empty_slot(struct te_status_t * this)406 static int ttable_get_empty_slot(struct te_status_t *this)
407 {
408 	struct threads_table_el *tt_el = this->threads_table;
409 
410 	for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
411 		if ((tt_el->state == NOTUSED)
412 			|| (NCT_REUSE_ABORTED_ENTRIES
413 			&& (tt_el->state == ABORTED))) {
414 			return i;
415 		}
416 	}
417 
418 	/*
419 	 * else, we run out of table without finding an index
420 	 * => we expand the table
421 	 */
422 
423 	struct threads_table_el *new_chunk;
424 
425 	new_chunk = calloc(NCT_ALLOC_CHUNK_SIZE, sizeof(struct threads_table_el));
426 	if (new_chunk == NULL) { /* LCOV_EXCL_BR_LINE */
427 		nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
428 	}
429 
430 	/* Link new chunk to last element */
431 	tt_el = ttable_get_element(this, this->threads_table_size-1);
432 	tt_el->next = new_chunk;
433 
434 	this->threads_table_size += NCT_ALLOC_CHUNK_SIZE;
435 
436 	/* Link all new elements together */
437 	for (int i = 0 ; i < NCT_ALLOC_CHUNK_SIZE - 1; i++) {
438 		new_chunk[i].next = &new_chunk[i+1];
439 	}
440 	new_chunk[NCT_ALLOC_CHUNK_SIZE - 1].next = NULL;
441 
442 	/* The first newly created entry is good, we return it */
443 	return this->threads_table_size - NCT_ALLOC_CHUNK_SIZE;
444 }
445 
446 /**
447  * Create a new pthread for the new hosted OS thread.
448  *
449  * Returns a unique integer thread identifier/index, which should be used
450  * to refer to this thread for future calls to the thread emulator.
451  *
452  * It takes as parameter a pointer which will be passed to
453  * function registered in nct_init when the thread is swapped in.
454  *
455  * Note that the thread is created but not swapped in.
456  * The new thread execution will be held until nct_swap_threads()
457  * (or nct_first_thread_start()) is called with this newly created
458  * thread number.
459  */
nct_new_thread(void * this_arg,void * payload)460 int nct_new_thread(void *this_arg, void *payload)
461 {
462 	struct te_status_t *this = (struct te_status_t *)this_arg;
463 	struct threads_table_el *tt_el;
464 	int t_slot;
465 
466 	t_slot = ttable_get_empty_slot(this);
467 	tt_el = ttable_get_element(this, t_slot);
468 
469 	tt_el->state = USED;
470 	tt_el->running = false;
471 	tt_el->thead_cnt = this->thread_create_count++;
472 	tt_el->payload = payload;
473 	tt_el->ts_status = this;
474 	tt_el->thread_idx = t_slot;
475 
476 	NSI_SAFE_CALL(pthread_create(&tt_el->thread,
477 				  NULL,
478 				  nct_thread_starter,
479 				  (void *)tt_el));
480 
481 	NCT_DEBUG("%s created thread [%i] %i [%lu]\n",
482 		__func__,
483 		tt_el->thead_cnt,
484 		t_slot,
485 		tt_el->thread);
486 
487 	return t_slot;
488 }
489 
490 /**
491  * Initialize an instance of the threading emulator.
492  *
493  * Returns a pointer to the initialize threading emulator instance.
494  * This pointer shall be passed to all subsequent calls of the
495  * threading emulator when interacting with this particular instance.
496  *
497  * The input fptr is a pointer to the hosted OS function
498  * to be called each time a thread which is created on its request
499  * with nct_new_thread() is swapped in (from that thread context)
500  */
nct_init(void (* fptr)(void *))501 void *nct_init(void (*fptr)(void *))
502 {
503 	struct te_status_t *this;
504 
505 	/*
506 	 * Note: This (and the calloc below) won't be free'd by this code
507 	 * but left for the OS to clear at process end.
508 	 * This is a conscious choice, see nct_clean_up() for more info.
509 	 * If you got here due to valgrind's leak report, please use the
510 	 * provided valgrind suppression file valgrind.supp
511 	 */
512 	this = calloc(1, sizeof(struct te_status_t));
513 	if (this == NULL) { /* LCOV_EXCL_BR_LINE */
514 		nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
515 	}
516 
517 	this->fptr = fptr;
518 	this->thread_create_count = 0;
519 	this->currently_allowed_thread = -1;
520 
521 	NSI_SAFE_CALL(pthread_cond_init(&this->cond_threads, NULL));
522 	NSI_SAFE_CALL(pthread_mutex_init(&this->mtx_threads, NULL));
523 
524 	this->threads_table = calloc(NCT_ALLOC_CHUNK_SIZE,
525 				sizeof(struct threads_table_el));
526 	if (this->threads_table == NULL) { /* LCOV_EXCL_BR_LINE */
527 		nsi_print_error_and_exit(NO_MEM_ERR); /* LCOV_EXCL_LINE */
528 	}
529 
530 	this->threads_table_size = NCT_ALLOC_CHUNK_SIZE;
531 
532 	for (int i = 0 ; i < NCT_ALLOC_CHUNK_SIZE - 1; i++) {
533 		this->threads_table[i].next = &this->threads_table[i+1];
534 	}
535 	this->threads_table[NCT_ALLOC_CHUNK_SIZE - 1].next = NULL;
536 
537 	NSI_SAFE_CALL(pthread_mutex_lock(&this->mtx_threads));
538 
539 	return (void *)this;
540 }
541 
542 /**
543  * Free any allocated memory by the threading emulator and clean up.
544  * Note that this function cannot be called from a SW thread
545  * (the CPU is assumed halted. Otherwise we would cancel ourselves)
546  *
547  * Note: This function cannot guarantee the threads will be cancelled before the HW
548  * thread exists. The only way to do that, would be to wait for each of them in
549  * a join without detaching them, but that could lead to locks in some
550  * convoluted cases; as a call to this function can come due to a hosted OS
551  * assert or other error termination, we better do not assume things are working fine.
552  * => we prefer the supposed memory leak report from valgrind, and ensure we
553  * will not hang.
554  */
nct_clean_up(void * this_arg)555 void nct_clean_up(void *this_arg)
556 {
557 	struct te_status_t *this = (struct te_status_t *)this_arg;
558 
559 	if (!this || !this->threads_table) { /* LCOV_EXCL_BR_LINE */
560 		return; /* LCOV_EXCL_LINE */
561 	}
562 
563 	this->terminate = true;
564 
565 #if (NCT_ENABLE_CANCEL)
566 	struct threads_table_el *tt_el = this->threads_table;
567 
568 	for (int i = 0; i < this->threads_table_size; i++, tt_el = tt_el->next) {
569 		if (tt_el->state != USED) {
570 			continue;
571 		}
572 
573 		/* LCOV_EXCL_START */
574 		if (pthread_cancel(tt_el->thread)) {
575 			nsi_print_warning(
576 				PREFIX"cleanup: could not stop thread %i\n",
577 				i);
578 		}
579 		/* LCOV_EXCL_STOP */
580 	}
581 #endif
582 	/*
583 	 * This is the cleanup we do not do:
584 	 *
585 	 * free(this->threads_table);
586 	 *   Including all chunks
587 	 * this->threads_table = NULL;
588 	 *
589 	 * (void)pthread_cond_destroy(&this->cond_threads);
590 	 * (void)pthread_mutex_destroy(&this->mtx_threads);
591 	 *
592 	 * free(this);
593 	 */
594 }
595 
596 
597 /*
598  * Mark a thread as being aborted. This will result in the underlying pthread
599  * being terminated some time later:
600  *   If the thread is marking itself as aborting, as soon as it is swapped out
601  *   by the hosted (embedded) OS
602  *   If it is marking another thread, at some non-specific time in the future
603  *   (But note that no embedded part of the aborted thread will execute anymore)
604  *
605  * *  thread_idx : The thread identifier as provided during creation (return from nct_new_thread())
606  */
nct_abort_thread(void * this_arg,int thread_idx)607 void nct_abort_thread(void *this_arg, int thread_idx)
608 {
609 	struct te_status_t *this = (struct te_status_t *)this_arg;
610 	struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
611 
612 	if (thread_idx == this->currently_allowed_thread) {
613 		NCT_DEBUG("Thread [%i] %i: %s Marked myself "
614 			"as aborting\n",
615 			tt_el->thead_cnt,
616 			thread_idx,
617 			__func__);
618 	} else {
619 		if (tt_el->state != USED) { /* LCOV_EXCL_BR_LINE */
620 			/* The thread may have been already aborted before */
621 			return; /* LCOV_EXCL_LINE */
622 		}
623 
624 		NCT_DEBUG("Aborting not scheduled thread [%i] %i\n",
625 			tt_el->thead_cnt,
626 			thread_idx);
627 	}
628 	tt_el->state = ABORTING;
629 	/*
630 	 * Note: the native thread will linger in RAM until it catches the
631 	 * mutex or awakes on the condition.
632 	 * Note that even if we would pthread_cancel() the thread here, that
633 	 * would be the case, but with a pthread_cancel() the mutex state would
634 	 * be uncontrolled
635 	 */
636 }
637 
638 /*
639  * Return a unique thread identifier for this thread for this
640  * run. This identifier is only meant for debug purposes
641  *
642  * thread_idx is the value returned by nct_new_thread()
643  */
nct_get_unique_thread_id(void * this_arg,int thread_idx)644 int nct_get_unique_thread_id(void *this_arg, int thread_idx)
645 {
646 	struct te_status_t *this = (struct te_status_t *)this_arg;
647 	struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
648 
649 	return tt_el->thead_cnt;
650 }
651 
nct_thread_name_set(void * this_arg,int thread_idx,const char * str)652 int nct_thread_name_set(void *this_arg, int thread_idx, const char *str)
653 {
654 	struct te_status_t *this = (struct te_status_t *)this_arg;
655 	struct threads_table_el *tt_el = ttable_get_element(this, thread_idx);
656 
657 	return pthread_setname_np(tt_el->thread, str);
658 }
659 
660 /*
661  * Notes about coverage:
662  *
663  * Note1:
664  *
665  * This condition will only be triggered in very unlikely cases
666  * (once every few full regression runs).
667  * It is therefore excluded from the coverage report to avoid confusing
668  * developers.
669  *
670  * Background: A pthread is created as soon as the hosted kernel creates
671  * a hosted thread. A pthread creation is an asynchronous process handled by the
672  * host kernel.
673  *
674  * This emulator normally keeps only 1 thread executing at a time.
675  * But part of the pre-initialization during creation of a new thread
676  * and some cleanup at the tail of the thread termination are executed
677  * in parallel to other threads.
678  * That is, the execution of those code paths is a bit indeterministic.
679  *
680  * Only when the hosted kernel attempts to swap to a new thread does this
681  * emulator need to wait until its pthread is ready and initialized
682  * (has reached nct_wait_until_allowed())
683  *
684  * In some cases (tests) hosted threads are created which are never actually needed
685  * (typically the idle thread). That means the test may finish before that
686  * thread's underlying pthread has reached nct_wait_until_allowed().
687  *
688  * In this unlikely cases the initialization or cleanup of the thread follows
689  * non-typical code paths.
690  * This code paths are there to ensure things work always, no matter
691  * the load of the host. Without them, very rare & mysterious segfault crashes
692  * would occur.
693  * But as they are very atypical and only triggered with some host loads,
694  * they will be covered in the coverage reports only rarely.
695  *
696  * Note2:
697  *
698  * Some other code will never or only very rarely trigger and is therefore
699  * excluded with LCOV_EXCL_LINE
700  *
701  *
702  * Notes about (memory) cleanup:
703  *
704  * Note.c1:
705  *
706  * In some very rare cases in very loaded machines, a race in the glibc pthread_cancel()
707  * seems to be triggered.
708  * In this, the cancelled thread cleanup overtakes the pthread_cancel() code, and frees the
709  * pthread structure before pthread_cancel() has finished, resulting in a dereference into already
710  * free'd memory, and therefore a segfault.
711  * Calling pthread_cancel() during cleanup is not required beyond preventing a valgrind
712  * memory leak report (all threads will be canceled immediately on exit).
713  * Therefore we do not do this, to avoid this very rare crashes.
714  */
715