1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** ThreadX Component */
16 /** */
17 /** Port Specific */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22
23 /**************************************************************************/
24 /* */
25 /* PORT SPECIFIC C INFORMATION RELEASE */
26 /* */
27 /* tx_port.h Cortex-M23/IAR */
28 /* 6.1.12 */
29 /* */
30 /* AUTHOR */
31 /* */
32 /* Scott Larson, Microsoft Corporation */
33 /* */
34 /* DESCRIPTION */
35 /* */
36 /* This file contains data type definitions that make the ThreadX */
37 /* real-time kernel function identically on a variety of different */
38 /* processor architectures. For example, the size or number of bits */
39 /* in an "int" data type vary between microprocessor architectures and */
40 /* even C compilers for the same microprocessor. ThreadX does not */
41 /* directly use native C data types. Instead, ThreadX creates its */
42 /* own special types that can be mapped to actual data types by this */
43 /* file to guarantee consistency in the interface and functionality. */
44 /* */
45 /* RELEASE HISTORY */
46 /* */
47 /* DATE NAME DESCRIPTION */
48 /* */
49 /* 04-02-2021 Scott Larson Initial Version 6.1.6 */
50 /* 04-25-2022 Scott Larson Modified comments and added */
51 /* volatile to registers, */
52 /* resulting in version 6.1.11 */
53 /* 07-29-2022 Scott Larson Modified comments and changed */
54 /* secure stack initialization */
55 /* macro to port-specific, */
56 /* resulting in version 6.1.12 */
57 /* */
58 /**************************************************************************/
59
60 #ifndef TX_PORT_H
61 #define TX_PORT_H
62
63
64 /* Determine if the optional ThreadX user define file should be used. */
65
66 #ifdef TX_INCLUDE_USER_DEFINE_FILE
67
68 /* Yes, include the user defines in tx_user.h. The defines in this file may
69 alternately be defined on the command line. */
70
71 #include "tx_user.h"
72 #endif
73
74
75 /* Define compiler library include files. */
76
77 #include <stdlib.h>
78 #include <string.h>
79
80 #ifdef __ICCARM__
81 #include <intrinsics.h> /* IAR Intrinsics */
82 #define __asm__ __asm /* Define to make all inline asm look similar */
83 #ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
84 #include <yvals.h>
85 #endif
86 #endif /* __ICCARM__ */
87
88
89 /* Define ThreadX basic types for this port. */
90
91 #define VOID void
92 typedef char CHAR;
93 typedef unsigned char UCHAR;
94 typedef int INT;
95 typedef unsigned int UINT;
96 typedef long LONG;
97 typedef unsigned long ULONG;
98 typedef unsigned long long ULONG64;
99 typedef short SHORT;
100 typedef unsigned short USHORT;
101 #define ULONG64_DEFINED
102
103 /* Function prototypes for this port. */
104 struct TX_THREAD_STRUCT;
105 UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
106 UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
107 UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
108 UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
109
110 /* This port overrides tx_thread_stack_error_notify with an architecture specific version */
111 #define TX_PORT_THREAD_STACK_ERROR_NOTIFY
112
113 /* This port overrides tx_thread_stack_error_handler with an architecture specific version */
114 #define TX_PORT_THREAD_STACK_ERROR_HANDLER
115
116 /* This hardware has stack checking that we take advantage of - do NOT define. */
117 #ifdef TX_ENABLE_STACK_CHECKING
118 #error "Do not define TX_ENABLE_STACK_CHECKING"
119 #endif
120
121 /* If user does not want to terminate thread on stack overflow,
122 #define the TX_THREAD_NO_TERMINATE_STACK_ERROR symbol.
123 The thread will be rescheduled and continue to cause the exception.
124 It is suggested user code handle this by registering a notification with the
125 tx_thread_stack_error_notify function. */
126 /*#define TX_THREAD_NO_TERMINATE_STACK_ERROR */
127
128 /* Define the system API mappings based on the error checking
129 selected by the user. Note: this section is only applicable to
130 application source code, hence the conditional that turns off this
131 stuff when the include file is processed by the ThreadX source. */
132
133 #ifndef TX_SOURCE_CODE
134
135
136 /* Determine if error checking is desired. If so, map API functions
137 to the appropriate error checking front-ends. Otherwise, map API
138 functions to the core functions that actually perform the work.
139 Note: error checking is enabled by default. */
140
141 #ifdef TX_DISABLE_ERROR_CHECKING
142
143 /* Services without error checking. */
144
145 #define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
146 #define tx_thread_secure_stack_free _tx_thread_secure_stack_free
147
148 #else
149
150 /* Services with error checking. */
151
152 #define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
153 #define tx_thread_secure_stack_free _txe_thread_secure_stack_free
154
155 #endif
156 #endif
157
158
159
160 /* Define the priority levels for ThreadX. Legal values range
161 from 32 to 1024 and MUST be evenly divisible by 32. */
162
163 #ifndef TX_MAX_PRIORITIES
164 #define TX_MAX_PRIORITIES 32
165 #endif
166
167
168 /* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
169 thread creation is less than this value, the thread create call will return an error. */
170
171 #ifndef TX_MINIMUM_STACK
172 #define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
173 #endif
174
175
176 /* Define the system timer thread's default stack size and priority. These are only applicable
177 if TX_TIMER_PROCESS_IN_ISR is not defined. */
178
179 #ifndef TX_TIMER_THREAD_STACK_SIZE
180 #define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
181 #endif
182
183 #ifndef TX_TIMER_THREAD_PRIORITY
184 #define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
185 #endif
186
187
188 /* Define various constants for the ThreadX Cortex-M23 port. */
189
190 #define TX_INT_DISABLE 1 /* Disable interrupts */
191 #define TX_INT_ENABLE 0 /* Enable interrupts */
192
193
194 /* Define the clock source for trace event entry time stamp. The following two item are port specific.
195 For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
196 source constants would be:
197
198 #define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
199 #define TX_TRACE_TIME_MASK 0x0000FFFFUL
200
201 */
202
203 #ifndef TX_MISRA_ENABLE
204 #ifndef TX_TRACE_TIME_SOURCE
205 #define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
206 #endif
207 #else
208 ULONG _tx_misra_time_stamp_get(VOID);
209 #define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
210 #endif
211
212 #ifndef TX_TRACE_TIME_MASK
213 #define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
214 #endif
215
216
217 /* Define the port specific options for the _tx_build_options variable. This variable indicates
218 how the ThreadX library was built. */
219
220 #define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
221
222
223 /* Define the in-line initialization constant so that modules with in-line
224 initialization capabilities can prevent their initialization from being
225 a function call. */
226
227 #ifdef TX_MISRA_ENABLE
228 #define TX_DISABLE_INLINE
229 #else
230 #define TX_INLINE_INITIALIZATION
231 #endif
232
233
234 /* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
235 disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
236 checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
237 define is negated, thereby forcing the stack fill which is necessary for the stack checking
238 logic. */
239
240 #ifndef TX_MISRA_ENABLE
241 #ifdef TX_ENABLE_STACK_CHECKING
242 #undef TX_DISABLE_STACK_FILLING
243 #endif
244 #endif
245
246
247 /* Define the TX_THREAD control block extensions for this port. The main reason
248 for the multiple macros is so that backward compatibility can be maintained with
249 existing ThreadX kernel awareness modules. */
250
251 #define TX_THREAD_EXTENSION_0
252 #define TX_THREAD_EXTENSION_1
253 #ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
254 /* IAR library support */
255 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
256 /* ThreadX in non-secure zone with calls to secure zone. */
257 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
258 VOID *tx_thread_module_entry_info_ptr; \
259 ULONG tx_thread_module_current_user_mode; \
260 ULONG tx_thread_module_user_mode; \
261 ULONG tx_thread_module_saved_lr; \
262 VOID *tx_thread_module_kernel_stack_start; \
263 VOID *tx_thread_module_kernel_stack_end; \
264 ULONG tx_thread_module_kernel_stack_size; \
265 VOID *tx_thread_module_stack_ptr; \
266 VOID *tx_thread_module_stack_start; \
267 VOID *tx_thread_module_stack_end; \
268 ULONG tx_thread_module_stack_size; \
269 VOID *tx_thread_module_reserved; \
270 VOID *tx_thread_secure_stack_context; \
271 VOID *tx_thread_iar_tls_pointer;
272 #else
273 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
274 VOID *tx_thread_module_entry_info_ptr; \
275 ULONG tx_thread_module_current_user_mode; \
276 ULONG tx_thread_module_user_mode; \
277 ULONG tx_thread_module_saved_lr; \
278 VOID *tx_thread_module_kernel_stack_start; \
279 VOID *tx_thread_module_kernel_stack_end; \
280 ULONG tx_thread_module_kernel_stack_size; \
281 VOID *tx_thread_module_stack_ptr; \
282 VOID *tx_thread_module_stack_start; \
283 VOID *tx_thread_module_stack_end; \
284 ULONG tx_thread_module_stack_size; \
285 VOID *tx_thread_module_reserved; \
286 VOID *tx_thread_iar_tls_pointer;
287 #endif
288
289 #else
290 /* No IAR library support */
291 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
292 /* ThreadX in non-secure zone with calls to secure zone. */
293 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
294 VOID *tx_thread_module_entry_info_ptr; \
295 ULONG tx_thread_module_current_user_mode; \
296 ULONG tx_thread_module_user_mode; \
297 ULONG tx_thread_module_saved_lr; \
298 VOID *tx_thread_module_kernel_stack_start; \
299 VOID *tx_thread_module_kernel_stack_end; \
300 ULONG tx_thread_module_kernel_stack_size; \
301 VOID *tx_thread_module_stack_ptr; \
302 VOID *tx_thread_module_stack_start; \
303 VOID *tx_thread_module_stack_end; \
304 ULONG tx_thread_module_stack_size; \
305 VOID *tx_thread_module_reserved; \
306 VOID *tx_thread_secure_stack_context;
307 #else
308 /* ThreadX in only one zone. */
309 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
310 VOID *tx_thread_module_entry_info_ptr; \
311 ULONG tx_thread_module_current_user_mode; \
312 ULONG tx_thread_module_user_mode; \
313 ULONG tx_thread_module_saved_lr; \
314 VOID *tx_thread_module_kernel_stack_start; \
315 VOID *tx_thread_module_kernel_stack_end; \
316 ULONG tx_thread_module_kernel_stack_size; \
317 VOID *tx_thread_module_stack_ptr; \
318 VOID *tx_thread_module_stack_start; \
319 VOID *tx_thread_module_stack_end; \
320 ULONG tx_thread_module_stack_size; \
321 VOID *tx_thread_module_reserved;
322 #endif
323
324 #endif
325 #ifndef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
326 #define TX_THREAD_EXTENSION_3
327 #else
328 #define TX_THREAD_EXTENSION_3 unsigned long long tx_thread_execution_time_total; \
329 unsigned long long tx_thread_execution_time_last_start;
330 #endif
331
332
333 /* Define the port extensions of the remaining ThreadX objects. */
334
335 #define TX_BLOCK_POOL_EXTENSION
336 #define TX_BYTE_POOL_EXTENSION
337 #define TX_MUTEX_EXTENSION
338 #define TX_EVENT_FLAGS_GROUP_EXTENSION VOID *tx_event_flags_group_module_instance; \
339 VOID (*tx_event_flags_group_set_module_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *group_ptr);
340
341 #define TX_QUEUE_EXTENSION VOID *tx_queue_module_instance; \
342 VOID (*tx_queue_send_module_notify)(struct TX_QUEUE_STRUCT *queue_ptr);
343
344 #define TX_SEMAPHORE_EXTENSION VOID *tx_semaphore_module_instance; \
345 VOID (*tx_semaphore_put_module_notify)(struct TX_SEMAPHORE_STRUCT *semaphore_ptr);
346
347 #define TX_TIMER_EXTENSION VOID *tx_timer_module_instance; \
348 VOID (*tx_timer_module_expiration_function)(ULONG id);
349
350
351 /* Define the user extension field of the thread control block. Nothing
352 additional is needed for this port so it is defined as white space. */
353
354 #ifndef TX_THREAD_USER_EXTENSION
355 #define TX_THREAD_USER_EXTENSION
356 #endif
357
358
359 /* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
360 tx_thread_shell_entry, and tx_thread_terminate. */
361
362
363 #ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
364 void *_tx_iar_create_per_thread_tls_area(void);
365 void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
366 void __iar_Initlocks(void);
367
368 #define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
369
370 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
371 #define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
372 thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
373 if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
374 #else
375 #define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
376 thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
377 #endif
378 #define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
379 #else /* No IAR library support. */
380 #define TX_THREAD_CREATE_EXTENSION(thread_ptr)
381 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
382 #define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
383 #else
384 #define TX_THREAD_DELETE_EXTENSION(thread_ptr)
385 #endif
386 #endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
387
388 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
389 /* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
390 #define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
391 #define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
392 #endif
393
394 #if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__TARGET_FPU_VFP)
395
396 #ifdef TX_MISRA_ENABLE
397
398 ULONG _tx_misra_control_get(void);
399 void _tx_misra_control_set(ULONG value);
400 ULONG _tx_misra_fpccr_get(void);
401 void _tx_misra_vfp_touch(void);
402
403 #else /* TX_MISRA_ENABLE not defined */
404
405 /* Define some helper functions (these are intrinsics in some compilers). */
406 #ifdef __GNUC__
__get_CONTROL(void)407 __attribute__( ( always_inline ) ) static inline ULONG __get_CONTROL(void)
408 {
409 ULONG control_value;
410
411 __asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
412 return(control_value);
413 }
414
__set_CONTROL(ULONG control_value)415 __attribute__( ( always_inline ) ) static inline void __set_CONTROL(ULONG control_value)
416 {
417 __asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
418 }
419
420 #define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
421
422 #endif /* __GNUC__ */
423
424 #ifdef __ICCARM__
425 #define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
426 #endif /* __ICCARM__ */
427
428 #endif /* TX_MISRA_ENABLE */
429
430
431 /* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
432 in order to ensure no lazy stacking will occur. */
433
434 #ifndef TX_MISRA_ENABLE
435
436 #define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
437 ULONG _tx_vfp_state; \
438 _tx_vfp_state = __get_CONTROL(); \
439 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
440 __set_CONTROL(_tx_vfp_state); \
441 }
442 #else
443
444 #define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
445 ULONG _tx_vfp_state; \
446 _tx_vfp_state = _tx_misra_control_get(); \
447 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
448 _tx_misra_control_set(_tx_vfp_state); \
449 }
450
451 #endif
452
453 /* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
454 If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
455 this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
456 the lazy FPU save, then restore the CONTROL.FPCA state. */
457
458 #ifndef TX_MISRA_ENABLE
459
460 #define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
461 ULONG _tx_system_state; \
462 _tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
463 if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
464 { \
465 ULONG _tx_vfp_state; \
466 _tx_vfp_state = __get_CONTROL(); \
467 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
468 __set_CONTROL(_tx_vfp_state); \
469 } \
470 else \
471 { \
472 ULONG _tx_fpccr; \
473 _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
474 _tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
475 if (_tx_fpccr == ((ULONG) 0x01)) \
476 { \
477 ULONG _tx_vfp_state; \
478 _tx_vfp_state = __get_CONTROL(); \
479 _tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
480 TX_VFP_TOUCH(); \
481 if (_tx_vfp_state == ((ULONG) 0)) \
482 { \
483 _tx_vfp_state = __get_CONTROL(); \
484 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
485 __set_CONTROL(_tx_vfp_state); \
486 } \
487 } \
488 } \
489 }
490 #else
491
492 #define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
493 ULONG _tx_system_state; \
494 _tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
495 if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
496 { \
497 ULONG _tx_vfp_state; \
498 _tx_vfp_state = _tx_misra_control_get(); \
499 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
500 _tx_misra_control_set(_tx_vfp_state); \
501 } \
502 else \
503 { \
504 ULONG _tx_fpccr; \
505 _tx_fpccr = _tx_misra_fpccr_get(); \
506 _tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
507 if (_tx_fpccr == ((ULONG) 0x01)) \
508 { \
509 ULONG _tx_vfp_state; \
510 _tx_vfp_state = _tx_misra_control_get(); \
511 _tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
512 _tx_misra_vfp_touch(); \
513 if (_tx_vfp_state == ((ULONG) 0)) \
514 { \
515 _tx_vfp_state = _tx_misra_control_get(); \
516 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
517 _tx_misra_control_set(_tx_vfp_state); \
518 } \
519 } \
520 } \
521 }
522 #endif
523
524 #else /* No VFP in use */
525
526 #define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
527 #define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
528
529 #endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__TARGET_FPU_VFP) */
530
531
532 /* Define the ThreadX object creation extensions for the remaining objects. */
533
534 #define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
535 #define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
536 #define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
537 #define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
538 #define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
539 #define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
540 #define TX_TIMER_CREATE_EXTENSION(timer_ptr)
541
542
543 /* Define the ThreadX object deletion extensions for the remaining objects. */
544
545 #define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
546 #define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
547 #define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
548 #define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
549 #define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
550 #define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
551 #define TX_TIMER_DELETE_EXTENSION(timer_ptr)
552
553
554 /* Define the get system state macro. */
555
556 #ifndef TX_THREAD_GET_SYSTEM_STATE
557 #ifndef TX_MISRA_ENABLE
558
559 #ifdef __GNUC__ /* GCC and ARM Compiler 6 */
560
__get_IPSR(void)561 __attribute__( ( always_inline ) ) static inline unsigned int __get_IPSR(void)
562 {
563 unsigned int ipsr_value;
564 __asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
565 return(ipsr_value);
566 }
567
568 #define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_IPSR())
569
570 #elif defined(__ICCARM__) /* IAR */
571
572 #define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_IPSR())
573
574 #endif /* TX_THREAD_GET_SYSTEM_STATE for different compilers */
575
576 #else /* TX_MISRA_ENABLE is defined, use MISRA function. */
577 ULONG _tx_misra_ipsr_get(VOID);
578 #define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
579 #endif /* TX_MISRA_ENABLE */
580 #endif /* TX_THREAD_GET_SYSTEM_STATE */
581
582
583 /* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
584 indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
585 for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
586 zero after initialization for Cortex-M ports. */
587
588 #ifndef TX_THREAD_SYSTEM_RETURN_CHECK
589 #define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
590 #endif
591
592 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
593 /* Initialize secure stacks for threads calling secure functions. */
594 extern void _tx_thread_secure_stack_initialize(void);
595 #define TX_PORT_SPECIFIC_PRE_INITIALIZATION _tx_thread_secure_stack_initialize();
596 #endif
597
598 /* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
599 prevent early scheduling on Cortex-M parts. */
600
601 #define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
602
603
604
605
606 #ifndef TX_DISABLE_INLINE
607
608 /* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
609 #ifdef __ICCARM__ /* IAR Compiler */
610 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
611 #elif defined(__GNUC__) /* GCC and AC6 Compiler */
612 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
613 __asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
614 #endif
615
616 /* Define the interrupt disable/restore macros for each compiler. */
617
618 #ifdef __GNUC__ /* GCC and AC6 */
619
__disable_interrupt(void)620 __attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupt(void)
621 {
622 unsigned int primask_value;
623
624 __asm__ volatile (" MRS %0,PRIMASK ": "=r" (primask_value) );
625 __asm__ volatile (" CPSID i" : : : "memory" );
626 return(primask_value);
627 }
628
__restore_interrupt(unsigned int primask_value)629 __attribute__( ( always_inline ) ) static inline void __restore_interrupt(unsigned int primask_value)
630 {
631 __asm__ volatile (" MSR PRIMASK,%0": : "r" (primask_value): "memory" );
632 }
633
__get_primask_value(void)634 __attribute__( ( always_inline ) ) static inline unsigned int __get_primask_value(void)
635 {
636 unsigned int primask_value;
637
638 __asm__ volatile (" MRS %0,PRIMASK ": "=r" (primask_value) );
639 return(primask_value);
640 }
641
__enable_interrupt(void)642 __attribute__( ( always_inline ) ) static inline void __enable_interrupt(void)
643 {
644 __asm__ volatile (" CPSIE i": : : "memory" );
645 }
646
647
_tx_thread_system_return_inline(void)648 __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
649 {
650 unsigned int interrupt_save;
651
652 /* Set PendSV to invoke ThreadX scheduler. */
653 *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
654 if (__get_IPSR() == 0)
655 {
656 interrupt_save = __get_primask_value();
657 __enable_interrupt();
658 __restore_interrupt(interrupt_save);
659 }
660 }
661
662
663 #define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
664 #define TX_DISABLE interrupt_save = __disable_interrupt();
665 #define TX_RESTORE __restore_interrupt(interrupt_save);
666
667 #elif defined(__ICCARM__) /* IAR */
668
_tx_thread_system_return_inline(void)669 static void _tx_thread_system_return_inline(void)
670 {
671 __istate_t interrupt_save;
672
673 /* Set PendSV to invoke ThreadX scheduler. */
674 *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
675 if (__get_IPSR() == 0)
676 {
677 interrupt_save = __get_interrupt_state();
678 __enable_interrupt();
679 __set_interrupt_state(interrupt_save);
680 }
681 }
682
683 #define TX_INTERRUPT_SAVE_AREA __istate_t interrupt_save;
684 #define TX_DISABLE {interrupt_save = __get_interrupt_state();__disable_interrupt();};
685 #define TX_RESTORE {__set_interrupt_state(interrupt_save);};
686
687 #endif /* Interrupt disable/restore macros for each compiler. */
688
689 /* Redefine _tx_thread_system_return for improved performance. */
690
691 #define _tx_thread_system_return _tx_thread_system_return_inline
692
693
694 #else /* TX_DISABLE_INLINE is defined */
695
696 UINT _tx_thread_interrupt_disable(VOID);
697 VOID _tx_thread_interrupt_restore(UINT previous_posture);
698
699 #define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
700
701 #define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
702 #define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
703 #endif /* TX_DISABLE_INLINE */
704
705
706 /* Define the version ID of ThreadX. This may be utilized by the application. */
707
708 #ifdef TX_THREAD_INIT
709 CHAR _tx_version_id[] =
710 "Copyright (c) 2024 Microsoft Corporation. * ThreadX Cortex-M23/IAR Version 6.4.1 *";
711 #else
712 #ifdef TX_MISRA_ENABLE
713 extern CHAR _tx_version_id[100];
714 #else
715 extern CHAR _tx_version_id[];
716 #endif
717 #endif
718
719
720 #endif
721