1 /**************************************************************************/
2 /* */
3 /* Copyright (c) Microsoft Corporation. All rights reserved. */
4 /* */
5 /* This software is licensed under the Microsoft Software License */
6 /* Terms for Microsoft Azure RTOS. Full text of the license can be */
7 /* found in the LICENSE file at https://aka.ms/AzureRTOS_EULA */
8 /* and in the root directory of this software. */
9 /* */
10 /**************************************************************************/
11
12
13 /**************************************************************************/
14 /**************************************************************************/
15 /** */
16 /** ThreadX Component */
17 /** */
18 /** Port Specific */
19 /** */
20 /**************************************************************************/
21 /**************************************************************************/
22
23
24 /**************************************************************************/
25 /* */
26 /* PORT SPECIFIC C INFORMATION RELEASE */
27 /* */
28 /* tx_port.h Cortex-M23/IAR */
29 /* 6.1.12 */
30 /* */
31 /* AUTHOR */
32 /* */
33 /* Scott Larson, Microsoft Corporation */
34 /* */
35 /* DESCRIPTION */
36 /* */
37 /* This file contains data type definitions that make the ThreadX */
38 /* real-time kernel function identically on a variety of different */
39 /* processor architectures. For example, the size or number of bits */
40 /* in an "int" data type vary between microprocessor architectures and */
41 /* even C compilers for the same microprocessor. ThreadX does not */
42 /* directly use native C data types. Instead, ThreadX creates its */
43 /* own special types that can be mapped to actual data types by this */
44 /* file to guarantee consistency in the interface and functionality. */
45 /* */
46 /* RELEASE HISTORY */
47 /* */
48 /* DATE NAME DESCRIPTION */
49 /* */
50 /* 04-02-2021 Scott Larson Initial Version 6.1.6 */
51 /* 04-25-2022 Scott Larson Modified comments and added */
52 /* volatile to registers, */
53 /* resulting in version 6.1.11 */
54 /* 07-29-2022 Scott Larson Modified comments and changed */
55 /* secure stack initialization */
56 /* macro to port-specific, */
57 /* resulting in version 6.1.12 */
58 /* */
59 /**************************************************************************/
60
61 #ifndef TX_PORT_H
62 #define TX_PORT_H
63
64
65 /* Determine if the optional ThreadX user define file should be used. */
66
67 #ifdef TX_INCLUDE_USER_DEFINE_FILE
68
69 /* Yes, include the user defines in tx_user.h. The defines in this file may
70 alternately be defined on the command line. */
71
72 #include "tx_user.h"
73 #endif
74
75
76 /* Define compiler library include files. */
77
78 #include <stdlib.h>
79 #include <string.h>
80
81 #ifdef __ICCARM__
82 #include <intrinsics.h> /* IAR Intrinsics */
83 #define __asm__ __asm /* Define to make all inline asm look similar */
84 #ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
85 #include <yvals.h>
86 #endif
87 #endif /* __ICCARM__ */
88
89
90 /* Define ThreadX basic types for this port. */
91
92 #define VOID void
93 typedef char CHAR;
94 typedef unsigned char UCHAR;
95 typedef int INT;
96 typedef unsigned int UINT;
97 typedef long LONG;
98 typedef unsigned long ULONG;
99 typedef unsigned long long ULONG64;
100 typedef short SHORT;
101 typedef unsigned short USHORT;
102 #define ULONG64_DEFINED
103
104 /* Function prototypes for this port. */
105 struct TX_THREAD_STRUCT;
106 UINT _txe_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *thread_ptr, ULONG stack_size);
107 UINT _txe_thread_secure_stack_free(struct TX_THREAD_STRUCT *thread_ptr);
108 UINT _tx_thread_secure_stack_allocate(struct TX_THREAD_STRUCT *tx_thread, ULONG stack_size);
109 UINT _tx_thread_secure_stack_free(struct TX_THREAD_STRUCT *tx_thread);
110
111 /* This port overrides tx_thread_stack_error_notify with an architecture specific version */
112 #define TX_PORT_THREAD_STACK_ERROR_NOTIFY
113
114 /* This port overrides tx_thread_stack_error_handler with an architecture specific version */
115 #define TX_PORT_THREAD_STACK_ERROR_HANDLER
116
117 /* This hardware has stack checking that we take advantage of - do NOT define. */
118 #ifdef TX_ENABLE_STACK_CHECKING
119 #error "Do not define TX_ENABLE_STACK_CHECKING"
120 #endif
121
122 /* If user does not want to terminate thread on stack overflow,
123 #define the TX_THREAD_NO_TERMINATE_STACK_ERROR symbol.
124 The thread will be rescheduled and continue to cause the exception.
125 It is suggested user code handle this by registering a notification with the
126 tx_thread_stack_error_notify function. */
127 /*#define TX_THREAD_NO_TERMINATE_STACK_ERROR */
128
129 /* Define the system API mappings based on the error checking
130 selected by the user. Note: this section is only applicable to
131 application source code, hence the conditional that turns off this
132 stuff when the include file is processed by the ThreadX source. */
133
134 #ifndef TX_SOURCE_CODE
135
136
137 /* Determine if error checking is desired. If so, map API functions
138 to the appropriate error checking front-ends. Otherwise, map API
139 functions to the core functions that actually perform the work.
140 Note: error checking is enabled by default. */
141
142 #ifdef TX_DISABLE_ERROR_CHECKING
143
144 /* Services without error checking. */
145
146 #define tx_thread_secure_stack_allocate _tx_thread_secure_stack_allocate
147 #define tx_thread_secure_stack_free _tx_thread_secure_stack_free
148
149 #else
150
151 /* Services with error checking. */
152
153 #define tx_thread_secure_stack_allocate _txe_thread_secure_stack_allocate
154 #define tx_thread_secure_stack_free _txe_thread_secure_stack_free
155
156 #endif
157 #endif
158
159
160
161 /* Define the priority levels for ThreadX. Legal values range
162 from 32 to 1024 and MUST be evenly divisible by 32. */
163
164 #ifndef TX_MAX_PRIORITIES
165 #define TX_MAX_PRIORITIES 32
166 #endif
167
168
169 /* Define the minimum stack for a ThreadX thread on this processor. If the size supplied during
170 thread creation is less than this value, the thread create call will return an error. */
171
172 #ifndef TX_MINIMUM_STACK
173 #define TX_MINIMUM_STACK 200 /* Minimum stack size for this port */
174 #endif
175
176
177 /* Define the system timer thread's default stack size and priority. These are only applicable
178 if TX_TIMER_PROCESS_IN_ISR is not defined. */
179
180 #ifndef TX_TIMER_THREAD_STACK_SIZE
181 #define TX_TIMER_THREAD_STACK_SIZE 1024 /* Default timer thread stack size */
182 #endif
183
184 #ifndef TX_TIMER_THREAD_PRIORITY
185 #define TX_TIMER_THREAD_PRIORITY 0 /* Default timer thread priority */
186 #endif
187
188
189 /* Define various constants for the ThreadX Cortex-M23 port. */
190
191 #define TX_INT_DISABLE 1 /* Disable interrupts */
192 #define TX_INT_ENABLE 0 /* Enable interrupts */
193
194
195 /* Define the clock source for trace event entry time stamp. The following two item are port specific.
196 For example, if the time source is at the address 0x0a800024 and is 16-bits in size, the clock
197 source constants would be:
198
199 #define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0x0a800024)
200 #define TX_TRACE_TIME_MASK 0x0000FFFFUL
201
202 */
203
204 #ifndef TX_MISRA_ENABLE
205 #ifndef TX_TRACE_TIME_SOURCE
206 #define TX_TRACE_TIME_SOURCE *((volatile ULONG *) 0xE0001004)
207 #endif
208 #else
209 ULONG _tx_misra_time_stamp_get(VOID);
210 #define TX_TRACE_TIME_SOURCE _tx_misra_time_stamp_get()
211 #endif
212
213 #ifndef TX_TRACE_TIME_MASK
214 #define TX_TRACE_TIME_MASK 0xFFFFFFFFUL
215 #endif
216
217
218 /* Define the port specific options for the _tx_build_options variable. This variable indicates
219 how the ThreadX library was built. */
220
221 #define TX_PORT_SPECIFIC_BUILD_OPTIONS (0)
222
223
224 /* Define the in-line initialization constant so that modules with in-line
225 initialization capabilities can prevent their initialization from being
226 a function call. */
227
228 #ifdef TX_MISRA_ENABLE
229 #define TX_DISABLE_INLINE
230 #else
231 #define TX_INLINE_INITIALIZATION
232 #endif
233
234
235 /* Determine whether or not stack checking is enabled. By default, ThreadX stack checking is
236 disabled. When the following is defined, ThreadX thread stack checking is enabled. If stack
237 checking is enabled (TX_ENABLE_STACK_CHECKING is defined), the TX_DISABLE_STACK_FILLING
238 define is negated, thereby forcing the stack fill which is necessary for the stack checking
239 logic. */
240
241 #ifndef TX_MISRA_ENABLE
242 #ifdef TX_ENABLE_STACK_CHECKING
243 #undef TX_DISABLE_STACK_FILLING
244 #endif
245 #endif
246
247
248 /* Define the TX_THREAD control block extensions for this port. The main reason
249 for the multiple macros is so that backward compatibility can be maintained with
250 existing ThreadX kernel awareness modules. */
251
252 #define TX_THREAD_EXTENSION_0
253 #define TX_THREAD_EXTENSION_1
254 #ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
255 /* IAR library support */
256 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
257 /* ThreadX in non-secure zone with calls to secure zone. */
258 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
259 VOID *tx_thread_module_entry_info_ptr; \
260 ULONG tx_thread_module_current_user_mode; \
261 ULONG tx_thread_module_user_mode; \
262 ULONG tx_thread_module_saved_lr; \
263 VOID *tx_thread_module_kernel_stack_start; \
264 VOID *tx_thread_module_kernel_stack_end; \
265 ULONG tx_thread_module_kernel_stack_size; \
266 VOID *tx_thread_module_stack_ptr; \
267 VOID *tx_thread_module_stack_start; \
268 VOID *tx_thread_module_stack_end; \
269 ULONG tx_thread_module_stack_size; \
270 VOID *tx_thread_module_reserved; \
271 VOID *tx_thread_secure_stack_context; \
272 VOID *tx_thread_iar_tls_pointer;
273 #else
274 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
275 VOID *tx_thread_module_entry_info_ptr; \
276 ULONG tx_thread_module_current_user_mode; \
277 ULONG tx_thread_module_user_mode; \
278 ULONG tx_thread_module_saved_lr; \
279 VOID *tx_thread_module_kernel_stack_start; \
280 VOID *tx_thread_module_kernel_stack_end; \
281 ULONG tx_thread_module_kernel_stack_size; \
282 VOID *tx_thread_module_stack_ptr; \
283 VOID *tx_thread_module_stack_start; \
284 VOID *tx_thread_module_stack_end; \
285 ULONG tx_thread_module_stack_size; \
286 VOID *tx_thread_module_reserved; \
287 VOID *tx_thread_iar_tls_pointer;
288 #endif
289
290 #else
291 /* No IAR library support */
292 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
293 /* ThreadX in non-secure zone with calls to secure zone. */
294 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
295 VOID *tx_thread_module_entry_info_ptr; \
296 ULONG tx_thread_module_current_user_mode; \
297 ULONG tx_thread_module_user_mode; \
298 ULONG tx_thread_module_saved_lr; \
299 VOID *tx_thread_module_kernel_stack_start; \
300 VOID *tx_thread_module_kernel_stack_end; \
301 ULONG tx_thread_module_kernel_stack_size; \
302 VOID *tx_thread_module_stack_ptr; \
303 VOID *tx_thread_module_stack_start; \
304 VOID *tx_thread_module_stack_end; \
305 ULONG tx_thread_module_stack_size; \
306 VOID *tx_thread_module_reserved; \
307 VOID *tx_thread_secure_stack_context;
308 #else
309 /* ThreadX in only one zone. */
310 #define TX_THREAD_EXTENSION_2 VOID *tx_thread_module_instance_ptr; \
311 VOID *tx_thread_module_entry_info_ptr; \
312 ULONG tx_thread_module_current_user_mode; \
313 ULONG tx_thread_module_user_mode; \
314 ULONG tx_thread_module_saved_lr; \
315 VOID *tx_thread_module_kernel_stack_start; \
316 VOID *tx_thread_module_kernel_stack_end; \
317 ULONG tx_thread_module_kernel_stack_size; \
318 VOID *tx_thread_module_stack_ptr; \
319 VOID *tx_thread_module_stack_start; \
320 VOID *tx_thread_module_stack_end; \
321 ULONG tx_thread_module_stack_size; \
322 VOID *tx_thread_module_reserved;
323 #endif
324
325 #endif
326 #ifndef TX_ENABLE_EXECUTION_CHANGE_NOTIFY
327 #define TX_THREAD_EXTENSION_3
328 #else
329 #define TX_THREAD_EXTENSION_3 unsigned long long tx_thread_execution_time_total; \
330 unsigned long long tx_thread_execution_time_last_start;
331 #endif
332
333
334 /* Define the port extensions of the remaining ThreadX objects. */
335
336 #define TX_BLOCK_POOL_EXTENSION
337 #define TX_BYTE_POOL_EXTENSION
338 #define TX_MUTEX_EXTENSION
339 #define TX_EVENT_FLAGS_GROUP_EXTENSION VOID *tx_event_flags_group_module_instance; \
340 VOID (*tx_event_flags_group_set_module_notify)(struct TX_EVENT_FLAGS_GROUP_STRUCT *group_ptr);
341
342 #define TX_QUEUE_EXTENSION VOID *tx_queue_module_instance; \
343 VOID (*tx_queue_send_module_notify)(struct TX_QUEUE_STRUCT *queue_ptr);
344
345 #define TX_SEMAPHORE_EXTENSION VOID *tx_semaphore_module_instance; \
346 VOID (*tx_semaphore_put_module_notify)(struct TX_SEMAPHORE_STRUCT *semaphore_ptr);
347
348 #define TX_TIMER_EXTENSION VOID *tx_timer_module_instance; \
349 VOID (*tx_timer_module_expiration_function)(ULONG id);
350
351
352 /* Define the user extension field of the thread control block. Nothing
353 additional is needed for this port so it is defined as white space. */
354
355 #ifndef TX_THREAD_USER_EXTENSION
356 #define TX_THREAD_USER_EXTENSION
357 #endif
358
359
360 /* Define the macros for processing extensions in tx_thread_create, tx_thread_delete,
361 tx_thread_shell_entry, and tx_thread_terminate. */
362
363
364 #ifdef TX_ENABLE_IAR_LIBRARY_SUPPORT
365 void *_tx_iar_create_per_thread_tls_area(void);
366 void _tx_iar_destroy_per_thread_tls_area(void *tls_ptr);
367 void __iar_Initlocks(void);
368
369 #define TX_THREAD_CREATE_EXTENSION(thread_ptr) thread_ptr -> tx_thread_iar_tls_pointer = _tx_iar_create_per_thread_tls_area();
370
371 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
372 #define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
373 thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0); \
374 if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
375 #else
376 #define TX_THREAD_DELETE_EXTENSION(thread_ptr) do {_tx_iar_destroy_per_thread_tls_area(thread_ptr -> tx_thread_iar_tls_pointer); \
377 thread_ptr -> tx_thread_iar_tls_pointer = TX_NULL; } while(0);
378 #endif
379 #define TX_PORT_SPECIFIC_PRE_SCHEDULER_INITIALIZATION do {__iar_Initlocks();} while(0);
380 #else /* No IAR library support. */
381 #define TX_THREAD_CREATE_EXTENSION(thread_ptr)
382 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
383 #define TX_THREAD_DELETE_EXTENSION(thread_ptr) if(thread_ptr -> tx_thread_secure_stack_context){_tx_thread_secure_stack_free(thread_ptr);}
384 #else
385 #define TX_THREAD_DELETE_EXTENSION(thread_ptr)
386 #endif
387 #endif /* TX_ENABLE_IAR_LIBRARY_SUPPORT */
388
389 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
390 /* Define the size of the secure stack for the timer thread and use the extension to allocate the secure stack. */
391 #define TX_TIMER_THREAD_SECURE_STACK_SIZE 256
392 #define TX_TIMER_INITIALIZE_EXTENSION(status) _tx_thread_secure_stack_allocate(&_tx_timer_thread, TX_TIMER_THREAD_SECURE_STACK_SIZE);
393 #endif
394
395 #if defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__TARGET_FPU_VFP)
396
397 #ifdef TX_MISRA_ENABLE
398
399 ULONG _tx_misra_control_get(void);
400 void _tx_misra_control_set(ULONG value);
401 ULONG _tx_misra_fpccr_get(void);
402 void _tx_misra_vfp_touch(void);
403
404 #else /* TX_MISRA_ENABLE not defined */
405
406 /* Define some helper functions (these are intrinsics in some compilers). */
407 #ifdef __GNUC__
__get_CONTROL(void)408 __attribute__( ( always_inline ) ) static inline ULONG __get_CONTROL(void)
409 {
410 ULONG control_value;
411
412 __asm__ volatile (" MRS %0,CONTROL ": "=r" (control_value) );
413 return(control_value);
414 }
415
__set_CONTROL(ULONG control_value)416 __attribute__( ( always_inline ) ) static inline void __set_CONTROL(ULONG control_value)
417 {
418 __asm__ volatile (" MSR CONTROL,%0": : "r" (control_value): "memory" );
419 }
420
421 #define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
422
423 #endif /* __GNUC__ */
424
425 #ifdef __ICCARM__
426 #define TX_VFP_TOUCH() __asm__ volatile ("VMOV.F32 s0, s0");
427 #endif /* __ICCARM__ */
428
429 #endif /* TX_MISRA_ENABLE */
430
431
432 /* A completed thread falls into _thread_shell_entry and we can simply deactivate the FPU via CONTROL.FPCA
433 in order to ensure no lazy stacking will occur. */
434
435 #ifndef TX_MISRA_ENABLE
436
437 #define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
438 ULONG _tx_vfp_state; \
439 _tx_vfp_state = __get_CONTROL(); \
440 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
441 __set_CONTROL(_tx_vfp_state); \
442 }
443 #else
444
445 #define TX_THREAD_COMPLETED_EXTENSION(thread_ptr) { \
446 ULONG _tx_vfp_state; \
447 _tx_vfp_state = _tx_misra_control_get(); \
448 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
449 _tx_misra_control_set(_tx_vfp_state); \
450 }
451
452 #endif
453
454 /* A thread can be terminated by another thread, so we first check if it's self-terminating and not in an ISR.
455 If so, deactivate the FPU via CONTROL.FPCA. Otherwise we are in an interrupt or another thread is terminating
456 this one, so if the FPCCR.LSPACT bit is set, we need to save the CONTROL.FPCA state, touch the FPU to flush
457 the lazy FPU save, then restore the CONTROL.FPCA state. */
458
459 #ifndef TX_MISRA_ENABLE
460
461 #define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
462 ULONG _tx_system_state; \
463 _tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
464 if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
465 { \
466 ULONG _tx_vfp_state; \
467 _tx_vfp_state = __get_CONTROL(); \
468 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
469 __set_CONTROL(_tx_vfp_state); \
470 } \
471 else \
472 { \
473 ULONG _tx_fpccr; \
474 _tx_fpccr = *((volatile ULONG *) 0xE000EF34); \
475 _tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
476 if (_tx_fpccr == ((ULONG) 0x01)) \
477 { \
478 ULONG _tx_vfp_state; \
479 _tx_vfp_state = __get_CONTROL(); \
480 _tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
481 TX_VFP_TOUCH(); \
482 if (_tx_vfp_state == ((ULONG) 0)) \
483 { \
484 _tx_vfp_state = __get_CONTROL(); \
485 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
486 __set_CONTROL(_tx_vfp_state); \
487 } \
488 } \
489 } \
490 }
491 #else
492
493 #define TX_THREAD_TERMINATED_EXTENSION(thread_ptr) { \
494 ULONG _tx_system_state; \
495 _tx_system_state = TX_THREAD_GET_SYSTEM_STATE(); \
496 if ((_tx_system_state == ((ULONG) 0)) && ((thread_ptr) == _tx_thread_current_ptr)) \
497 { \
498 ULONG _tx_vfp_state; \
499 _tx_vfp_state = _tx_misra_control_get(); \
500 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
501 _tx_misra_control_set(_tx_vfp_state); \
502 } \
503 else \
504 { \
505 ULONG _tx_fpccr; \
506 _tx_fpccr = _tx_misra_fpccr_get(); \
507 _tx_fpccr = _tx_fpccr & ((ULONG) 0x01); \
508 if (_tx_fpccr == ((ULONG) 0x01)) \
509 { \
510 ULONG _tx_vfp_state; \
511 _tx_vfp_state = _tx_misra_control_get(); \
512 _tx_vfp_state = _tx_vfp_state & ((ULONG) 0x4); \
513 _tx_misra_vfp_touch(); \
514 if (_tx_vfp_state == ((ULONG) 0)) \
515 { \
516 _tx_vfp_state = _tx_misra_control_get(); \
517 _tx_vfp_state = _tx_vfp_state & ~((ULONG) 0x4); \
518 _tx_misra_control_set(_tx_vfp_state); \
519 } \
520 } \
521 } \
522 }
523 #endif
524
525 #else /* No VFP in use */
526
527 #define TX_THREAD_COMPLETED_EXTENSION(thread_ptr)
528 #define TX_THREAD_TERMINATED_EXTENSION(thread_ptr)
529
530 #endif /* defined(__ARMVFP__) || defined(__ARM_PCS_VFP) || defined(__TARGET_FPU_VFP) */
531
532
533 /* Define the ThreadX object creation extensions for the remaining objects. */
534
535 #define TX_BLOCK_POOL_CREATE_EXTENSION(pool_ptr)
536 #define TX_BYTE_POOL_CREATE_EXTENSION(pool_ptr)
537 #define TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
538 #define TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
539 #define TX_QUEUE_CREATE_EXTENSION(queue_ptr)
540 #define TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
541 #define TX_TIMER_CREATE_EXTENSION(timer_ptr)
542
543
544 /* Define the ThreadX object deletion extensions for the remaining objects. */
545
546 #define TX_BLOCK_POOL_DELETE_EXTENSION(pool_ptr)
547 #define TX_BYTE_POOL_DELETE_EXTENSION(pool_ptr)
548 #define TX_EVENT_FLAGS_GROUP_DELETE_EXTENSION(group_ptr)
549 #define TX_MUTEX_DELETE_EXTENSION(mutex_ptr)
550 #define TX_QUEUE_DELETE_EXTENSION(queue_ptr)
551 #define TX_SEMAPHORE_DELETE_EXTENSION(semaphore_ptr)
552 #define TX_TIMER_DELETE_EXTENSION(timer_ptr)
553
554
555 /* Define the get system state macro. */
556
557 #ifndef TX_THREAD_GET_SYSTEM_STATE
558 #ifndef TX_MISRA_ENABLE
559
560 #ifdef __GNUC__ /* GCC and ARM Compiler 6 */
561
__get_IPSR(void)562 __attribute__( ( always_inline ) ) static inline unsigned int __get_IPSR(void)
563 {
564 unsigned int ipsr_value;
565 __asm__ volatile (" MRS %0,IPSR ": "=r" (ipsr_value) );
566 return(ipsr_value);
567 }
568
569 #define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_IPSR())
570
571 #elif defined(__ICCARM__) /* IAR */
572
573 #define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | __get_IPSR())
574
575 #endif /* TX_THREAD_GET_SYSTEM_STATE for different compilers */
576
577 #else /* TX_MISRA_ENABLE is defined, use MISRA function. */
578 ULONG _tx_misra_ipsr_get(VOID);
579 #define TX_THREAD_GET_SYSTEM_STATE() (_tx_thread_system_state | _tx_misra_ipsr_get())
580 #endif /* TX_MISRA_ENABLE */
581 #endif /* TX_THREAD_GET_SYSTEM_STATE */
582
583
584 /* Define the check for whether or not to call the _tx_thread_system_return function. A non-zero value
585 indicates that _tx_thread_system_return should not be called. This overrides the definition in tx_thread.h
586 for Cortex-M since so we don't waste time checking the _tx_thread_system_state variable that is always
587 zero after initialization for Cortex-M ports. */
588
589 #ifndef TX_THREAD_SYSTEM_RETURN_CHECK
590 #define TX_THREAD_SYSTEM_RETURN_CHECK(c) (c) = ((ULONG) _tx_thread_preempt_disable);
591 #endif
592
593 #if !defined(TX_SINGLE_MODE_SECURE) && !defined(TX_SINGLE_MODE_NON_SECURE)
594 /* Initialize secure stacks for threads calling secure functions. */
595 extern void _tx_thread_secure_stack_initialize(void);
596 #define TX_PORT_SPECIFIC_PRE_INITIALIZATION _tx_thread_secure_stack_initialize();
597 #endif
598
599 /* Define the macro to ensure _tx_thread_preempt_disable is set early in initialization in order to
600 prevent early scheduling on Cortex-M parts. */
601
602 #define TX_PORT_SPECIFIC_POST_INITIALIZATION _tx_thread_preempt_disable++;
603
604
605
606
607 #ifndef TX_DISABLE_INLINE
608
609 /* Define the TX_LOWEST_SET_BIT_CALCULATE macro for each compiler. */
610 #ifdef __ICCARM__ /* IAR Compiler */
611 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) (b) = (UINT) __CLZ(__RBIT((m)));
612 #elif defined(__GNUC__) /* GCC and AC6 Compiler */
613 #define TX_LOWEST_SET_BIT_CALCULATE(m, b) __asm__ volatile (" RBIT %0,%1 ": "=r" (m) : "r" (m) ); \
614 __asm__ volatile (" CLZ %0,%1 ": "=r" (b) : "r" (m) );
615 #endif
616
617 /* Define the interrupt disable/restore macros for each compiler. */
618
619 #ifdef __GNUC__ /* GCC and AC6 */
620
__disable_interrupt(void)621 __attribute__( ( always_inline ) ) static inline unsigned int __disable_interrupt(void)
622 {
623 unsigned int primask_value;
624
625 __asm__ volatile (" MRS %0,PRIMASK ": "=r" (primask_value) );
626 __asm__ volatile (" CPSID i" : : : "memory" );
627 return(primask_value);
628 }
629
__restore_interrupt(unsigned int primask_value)630 __attribute__( ( always_inline ) ) static inline void __restore_interrupt(unsigned int primask_value)
631 {
632 __asm__ volatile (" MSR PRIMASK,%0": : "r" (primask_value): "memory" );
633 }
634
__get_primask_value(void)635 __attribute__( ( always_inline ) ) static inline unsigned int __get_primask_value(void)
636 {
637 unsigned int primask_value;
638
639 __asm__ volatile (" MRS %0,PRIMASK ": "=r" (primask_value) );
640 return(primask_value);
641 }
642
__enable_interrupt(void)643 __attribute__( ( always_inline ) ) static inline void __enable_interrupt(void)
644 {
645 __asm__ volatile (" CPSIE i": : : "memory" );
646 }
647
648
_tx_thread_system_return_inline(void)649 __attribute__( ( always_inline ) ) static inline void _tx_thread_system_return_inline(void)
650 {
651 unsigned int interrupt_save;
652
653 /* Set PendSV to invoke ThreadX scheduler. */
654 *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
655 if (__get_IPSR() == 0)
656 {
657 interrupt_save = __get_primask_value();
658 __enable_interrupt();
659 __restore_interrupt(interrupt_save);
660 }
661 }
662
663
664 #define TX_INTERRUPT_SAVE_AREA UINT interrupt_save;
665 #define TX_DISABLE interrupt_save = __disable_interrupt();
666 #define TX_RESTORE __restore_interrupt(interrupt_save);
667
668 #elif defined(__ICCARM__) /* IAR */
669
_tx_thread_system_return_inline(void)670 static void _tx_thread_system_return_inline(void)
671 {
672 __istate_t interrupt_save;
673
674 /* Set PendSV to invoke ThreadX scheduler. */
675 *((volatile ULONG *) 0xE000ED04) = ((ULONG) 0x10000000);
676 if (__get_IPSR() == 0)
677 {
678 interrupt_save = __get_interrupt_state();
679 __enable_interrupt();
680 __set_interrupt_state(interrupt_save);
681 }
682 }
683
684 #define TX_INTERRUPT_SAVE_AREA __istate_t interrupt_save;
685 #define TX_DISABLE {interrupt_save = __get_interrupt_state();__disable_interrupt();};
686 #define TX_RESTORE {__set_interrupt_state(interrupt_save);};
687
688 #endif /* Interrupt disable/restore macros for each compiler. */
689
690 /* Redefine _tx_thread_system_return for improved performance. */
691
692 #define _tx_thread_system_return _tx_thread_system_return_inline
693
694
695 #else /* TX_DISABLE_INLINE is defined */
696
697 UINT _tx_thread_interrupt_disable(VOID);
698 VOID _tx_thread_interrupt_restore(UINT previous_posture);
699
700 #define TX_INTERRUPT_SAVE_AREA register UINT interrupt_save;
701
702 #define TX_DISABLE interrupt_save = _tx_thread_interrupt_disable();
703 #define TX_RESTORE _tx_thread_interrupt_restore(interrupt_save);
704 #endif /* TX_DISABLE_INLINE */
705
706
707 /* Define the version ID of ThreadX. This may be utilized by the application. */
708
709 #ifdef TX_THREAD_INIT
710 CHAR _tx_version_id[] =
711 "Copyright (c) Microsoft Corporation. All rights reserved. * ThreadX Cortex-M23/IAR Version 6.3.0 *";
712 #else
713 #ifdef TX_MISRA_ENABLE
714 extern CHAR _tx_version_id[100];
715 #else
716 extern CHAR _tx_version_id[];
717 #endif
718 #endif
719
720
721 #endif
722