1 /* This test simulator is designed to simulate ux_utility_ APIs for test.  */
2 
3 #include <stdio.h>
4 
5 #define TX_SOURCE_CODE
6 #include "tx_api.h"
7 #include "tx_thread.h"
8 #include "tx_trace.h"
9 #include "tx_mutex.h"
10 #include "tx_semaphore.h"
11 #include "tx_event_flags.h"
12 #include "tx_initialize.h"
13 
14 
15 #define NX_SOURCE_CODE
16 #include "nx_api.h"
17 #include "nx_packet.h"
18 
19 
20 #include "ux_api.h"
21 #include "ux_system.h"
22 #include "ux_utility.h"
23 #include "ux_hcd_sim_host.h"
24 #include "ux_dcd_sim_slave.h"
25 #include "ux_device_stack.h"
26 #include "ux_test_utility_sim.h"
27 #include "ux_test_hcd_sim_host.h"
28 #include "ux_test_dcd_sim_slave.h"
29 #include "ux_host_class_storage.h"
30 
31 #include "ux_test.h"
32 
33 #define FAIL_DISABLE ((ULONG)~0x00)
34 #define SYSTEM_MUTEX_ALLOC_LOG_SIZE 1024
35 
36 typedef struct UX_TEST_UTILITY_SIM_SEM_EXCEPT_STRUCT
37 {
38 
39     struct UX_TEST_UTILITY_SIM_SEM_EXCEPT_STRUCT *next;
40 
41     TX_SEMAPHORE *semaphore; /* UX_NULL to match any */
42     ULONG semaphore_signal;
43 } UX_TEST_UTILITY_SIM_SEM_EXCEPT;
44 
45 typedef struct UX_TEST_UTILITY_SIM_MEMD_STRUCT
46 {
47 
48     struct UX_TEST_UTILITY_SIM_MEMD_STRUCT *next;
49     VOID                                   *mem;
50 } UX_TEST_UTILITY_SIM_MEMD;
51 
52 typedef struct UX_TEST_UTILITY_SYSTEM_MUTEX_ALLOC_LOG_STRUCT
53 {
54 
55     ULONG first_count;
56     ULONG last_count;
57 } UX_TEST_UTILITY_SYSTEM_MUTEX_ALLOC_LOG;
58 
59 static ULONG sem_create_count = 0;
60 static ULONG sem_create_fail_after = FAIL_DISABLE;
61 
62 static ULONG sem_get_count = 0;
63 static ULONG sem_get_fail_after = FAIL_DISABLE;
64 
65 static UX_TEST_UTILITY_SIM_SEM_EXCEPT *excepts = UX_NULL;
66 
67 static ULONG mutex_create_count = 0;
68 static ULONG mutex_fail_after = FAIL_DISABLE;
69 
70 static ULONG mutex_on_count = 0;
71 static ULONG mutex_on_fail_after = FAIL_DISABLE;
72 
73 static ULONG event_create_count = 0;
74 static ULONG event_fail_after = FAIL_DISABLE;
75 
76 static void ux_system_mutex_create_callback(UX_TEST_ACTION *action, VOID *params);
77 static void ux_system_mutex_get_callback(UX_TEST_ACTION *action, VOID *params);
78 static void ux_system_mutex_put_callback(UX_TEST_ACTION *action, VOID *params);
79 
80 /* Create - 0, get - 1, put - 2 */
81 static UX_TEST_ACTION ux_system_mutex_hooks[4] = {
82     {
83         .usbx_function = UX_TEST_OVERRIDE_TX_MUTEX_CREATE,
84         .name_ptr = "ux_system_mutex",
85         .mutex_ptr = UX_NULL, /* Don't care. */
86         .inherit = TX_NO_INHERIT,
87         .do_after = UX_TRUE,
88         .action_func = ux_system_mutex_create_callback,
89     },
90     {
91         .usbx_function = UX_TEST_OVERRIDE_TX_MUTEX_GET,
92         .mutex_ptr = UX_NULL, /* Replaced on creation callback. */
93         .wait_option = TX_WAIT_FOREVER,
94         .do_after = UX_FALSE,
95         .action_func = ux_system_mutex_get_callback,
96     },
97     {
98         .usbx_function = UX_TEST_OVERRIDE_TX_MUTEX_PUT,
99         .mutex_ptr = UX_NULL, /* Replaced on creation callback. */
100         .do_after = UX_TRUE,
101         .action_func = ux_system_mutex_put_callback,
102     },
103 { 0 },
104 };
105 
106 static ULONG ux_system_mutex_on_count      = 0;
107 static ULONG ux_system_mutex_off_count     = 0;
108 UCHAR ux_system_mutex_callback_skip = UX_FALSE;
109 static ULONG rmem_free = 0;
110 static ULONG cmem_free = 0;
111 static UX_TEST_UTILITY_SYSTEM_MUTEX_ALLOC_LOG ux_system_mutex_alloc_logs[SYSTEM_MUTEX_ALLOC_LOG_SIZE];
112 static ULONG ux_system_mutex_alloc_logs_count = 0;
113 static ULONG ux_system_mutex_alloc_logs_match = 0;
114 static UCHAR ux_system_mutex_alloc_logs_area  = UX_FALSE;
115 static UCHAR ux_system_mutex_alloc_logs_lock  = UX_FALSE;
116 
117 static ULONG thread_create_count = 0;
118 static ULONG thread_create_fail_after = FAIL_DISABLE;
119 
120 static UX_TEST_UTILITY_SIM_MEMD  first_allocate[2];
121 static UX_TEST_UTILITY_SIM_MEMD *sim_allocates [2] = {UX_NULL, UX_NULL};
122 static VOID                     *last_allocate [2] = {UX_NULL, UX_NULL};
123 
124 static ULONG mem_alloc_count = 0;
125 static ULONG mem_alloc_fail_after = FAIL_DISABLE;
126 ULONG mem_alloc_do_fail = UX_FALSE;
127 
ux_test_utility_sim_cleanup(VOID)128 VOID ux_test_utility_sim_cleanup(VOID)
129 {
130 
131     sem_create_count = 0;
132     sem_create_fail_after = FAIL_DISABLE;
133 
134     sem_get_count = 0;
135     sem_get_fail_after = FAIL_DISABLE;
136 
137     excepts = UX_NULL;
138 
139     mutex_create_count = 0;
140     mutex_fail_after = FAIL_DISABLE;
141 
142     mutex_on_count = 0;
143     mutex_on_fail_after = FAIL_DISABLE;
144 
145     event_create_count = 0;
146     event_fail_after = FAIL_DISABLE;
147 
148     thread_create_count = 0;
149     thread_create_fail_after = FAIL_DISABLE;
150 
151     sim_allocates[0] = UX_NULL;
152     sim_allocates[1] = UX_NULL;
153     last_allocate[0] = UX_NULL;
154     last_allocate[1] = UX_NULL;
155 
156     ux_test_remove_hooks_from_array(ux_system_mutex_hooks);
157 
158     mem_alloc_count = 0;
159     mem_alloc_fail_after = FAIL_DISABLE;
160     mem_alloc_do_fail = UX_FALSE;
161 
162     ux_system_mutex_on_count  = 0;
163     ux_system_mutex_off_count = 0;
164     ux_system_mutex_callback_skip      = UX_FALSE;
165     ux_system_mutex_alloc_logs_count = 0;
166     ux_system_mutex_alloc_logs_match = 0;
167     ux_system_mutex_alloc_logs_area  = UX_FALSE;
168     ux_system_mutex_alloc_logs_lock  = UX_FALSE;
169     _ux_utility_memory_set(ux_system_mutex_alloc_logs, 0, sizeof(ux_system_mutex_alloc_logs));
170 }
171 
172 /* Semaphore handling simulation */
173 
ux_test_utility_sim_sem_create_count_reset(VOID)174 VOID ux_test_utility_sim_sem_create_count_reset(VOID)
175 {
176 
177     sem_create_count = 0;
178 }
179 
ux_test_utility_sim_sem_create_count(VOID)180 ULONG ux_test_utility_sim_sem_create_count(VOID)
181 {
182 
183     return sem_create_count;
184 }
185 
ux_test_utility_sim_sem_error_generation_start(ULONG fail_after)186 VOID ux_test_utility_sim_sem_error_generation_start(ULONG fail_after)
187 {
188 
189     sem_create_count = 0;
190     sem_create_fail_after = fail_after;
191 }
192 
ux_test_utility_sim_sem_error_generation_stop(VOID)193 VOID ux_test_utility_sim_sem_error_generation_stop(VOID)
194 {
195 
196     sem_create_fail_after = FAIL_DISABLE;
197     sem_create_count = 0;
198 }
199 
_tx_semaphore_create(TX_SEMAPHORE * semaphore_ptr,CHAR * name_ptr,ULONG initial_count)200 UINT  _tx_semaphore_create(TX_SEMAPHORE *semaphore_ptr, CHAR *name_ptr, ULONG initial_count)
201 {
202 
203 TX_INTERRUPT_SAVE_AREA
204 
205 TX_SEMAPHORE    *next_semaphore;
206 TX_SEMAPHORE    *previous_semaphore;
207 UX_TEST_OVERRIDE_TX_SEMAPHORE_CREATE_PARAMS     action_params = { semaphore_ptr, name_ptr, initial_count };
208 UX_TEST_ACTION                                  action;
209 
210 
211     if (sem_create_fail_after != FAIL_DISABLE)
212     {
213 
214         if (sem_create_count >= sem_create_fail_after)
215         {
216 
217             /* Return testing error instead of actual creation. */
218             return UX_MUTEX_ERROR;
219         }
220     }
221 
222     /* Perform action.  */
223     action = ux_test_action_handler(UX_TEST_OVERRIDE_TX_SEMAPHORE_CREATE, &action_params);
224     ux_test_do_action_before(&action, &action_params);
225     if (ux_test_is_expedient_on())
226     {
227         if (action.matched && !action.do_after)
228         {
229             if (!action.no_return)
230             {
231                 return action.status;
232             }
233         }
234     }
235 
236     /* Do actual creating. */
237     sem_create_count ++;
238 
239     /* Initialize semaphore control block to all zeros.  */
240     TX_MEMSET(semaphore_ptr, 0, (sizeof(TX_SEMAPHORE)));
241 
242     /* Setup the basic semaphore fields.  */
243     semaphore_ptr -> tx_semaphore_name =             name_ptr;
244     semaphore_ptr -> tx_semaphore_count =            initial_count;
245 
246     /* Disable interrupts to place the semaphore on the created list.  */
247     TX_DISABLE
248 
249     /* Setup the semaphore ID to make it valid.  */
250     semaphore_ptr -> tx_semaphore_id =  TX_SEMAPHORE_ID;
251 
252     /* Place the semaphore on the list of created semaphores.  First,
253        check for an empty list.  */
254     if (_tx_semaphore_created_count == TX_EMPTY)
255     {
256 
257         /* The created semaphore list is empty.  Add semaphore to empty list.  */
258         _tx_semaphore_created_ptr =                       semaphore_ptr;
259         semaphore_ptr -> tx_semaphore_created_next =      semaphore_ptr;
260         semaphore_ptr -> tx_semaphore_created_previous =  semaphore_ptr;
261     }
262     else
263     {
264 
265         /* This list is not NULL, add to the end of the list.  */
266         next_semaphore =      _tx_semaphore_created_ptr;
267         previous_semaphore =  next_semaphore -> tx_semaphore_created_previous;
268 
269         /* Place the new semaphore in the list.  */
270         next_semaphore -> tx_semaphore_created_previous =  semaphore_ptr;
271         previous_semaphore -> tx_semaphore_created_next =  semaphore_ptr;
272 
273         /* Setup this semaphore's next and previous created links.  */
274         semaphore_ptr -> tx_semaphore_created_previous =  previous_semaphore;
275         semaphore_ptr -> tx_semaphore_created_next =      next_semaphore;
276     }
277 
278     /* Increment the created count.  */
279     _tx_semaphore_created_count++;
280 
281     /* Optional semaphore create extended processing.  */
282     TX_SEMAPHORE_CREATE_EXTENSION(semaphore_ptr)
283 
284     /* If trace is enabled, register this object.  */
285     TX_TRACE_OBJECT_REGISTER(TX_TRACE_OBJECT_TYPE_SEMAPHORE, semaphore_ptr, name_ptr, initial_count, 0)
286 
287     /* If trace is enabled, insert this event into the trace buffer.  */
288     TX_TRACE_IN_LINE_INSERT(TX_TRACE_SEMAPHORE_CREATE, semaphore_ptr, initial_count, TX_POINTER_TO_ULONG_CONVERT(&next_semaphore), 0, TX_TRACE_SEMAPHORE_EVENTS)
289 
290     /* Log this kernel call.  */
291     TX_EL_SEMAPHORE_CREATE_INSERT
292 
293     /* Restore interrupts.  */
294     TX_RESTORE
295 
296     /* Return TX_SUCCESS.  */
297     return(TX_SUCCESS);
298 }
299 
ux_test_utility_sim_sem_get_count_reset(VOID)300 VOID  ux_test_utility_sim_sem_get_count_reset    (VOID)
301 {
302 
303     sem_get_count = 0;
304 }
305 
ux_test_utility_sim_sem_get_count(VOID)306 ULONG ux_test_utility_sim_sem_get_count          (VOID)
307 {
308 
309     return sem_get_count;
310 }
311 
ux_test_utility_sim_sem_get_error_generation_start(ULONG fail_after)312 VOID  ux_test_utility_sim_sem_get_error_generation_start(ULONG fail_after)
313 {
314 
315     sem_get_count = 0;
316     sem_get_fail_after = fail_after;
317 }
318 
ux_test_utility_sim_sem_get_error_generation_stop(VOID)319 VOID  ux_test_utility_sim_sem_get_error_generation_stop (VOID)
320 {
321 
322     sem_get_fail_after = FAIL_DISABLE;
323     sem_get_count = 0;
324 }
325 
ux_test_utility_sim_sem_get_error_exception_reset(VOID)326 VOID  ux_test_utility_sim_sem_get_error_exception_reset(VOID)
327 {
328 
329     excepts = UX_NULL;
330 }
331 
ux_test_utility_sim_sem_get_error_exception_add(TX_SEMAPHORE * semaphore,ULONG semaphore_signal)332 VOID  ux_test_utility_sim_sem_get_error_exception_add(TX_SEMAPHORE *semaphore, ULONG semaphore_signal)
333 {
334 UX_TEST_UTILITY_SIM_SEM_EXCEPT* except;
335 
336     if (_ux_system -> ux_system_memory_byte_pool[UX_MEMORY_BYTE_POOL_REGULAR] -> ux_byte_pool_start == UX_NULL)
337         return;
338 
339     except = (UX_TEST_UTILITY_SIM_SEM_EXCEPT *)ux_utility_memory_allocate(UX_NO_ALIGN, UX_REGULAR_MEMORY, sizeof(UX_TEST_UTILITY_SIM_SEM_EXCEPT));
340     if (except == UX_NULL)
341         return;
342 
343     /* Save exception */
344     except->semaphore = semaphore;
345     except->semaphore_signal = semaphore_signal;
346 
347     /* Link to head */
348     except->next = excepts;
349     excepts = except;
350 }
351 
ux_test_utility_sim_sem_in_exception_list(TX_SEMAPHORE * semaphore,ULONG semaphore_signal)352 static UCHAR ux_test_utility_sim_sem_in_exception_list(TX_SEMAPHORE *semaphore, ULONG semaphore_signal)
353 {
354 UX_TEST_UTILITY_SIM_SEM_EXCEPT* except;
355 
356     except = excepts;
357     while(except)
358     {
359 
360         if (except->semaphore == UX_NULL && semaphore_signal == except->semaphore_signal)
361             return UX_TRUE;
362 
363         if (except->semaphore == semaphore && semaphore_signal == except->semaphore_signal)
364             return UX_TRUE;
365 
366         except = except->next;
367     }
368     return UX_FALSE;
369 }
370 
_tx_semaphore_get(TX_SEMAPHORE * semaphore_ptr,ULONG wait_option)371 UINT  _tx_semaphore_get(TX_SEMAPHORE *semaphore_ptr, ULONG wait_option)
372 {
373 
374 TX_INTERRUPT_SAVE_AREA
375 
376 TX_THREAD       *thread_ptr;
377 TX_THREAD       *next_thread;
378 TX_THREAD       *previous_thread;
379 UINT            status;
380 UX_TEST_OVERRIDE_TX_SEMAPHORE_GET_PARAMS    params = { semaphore_ptr, wait_option };
381 UX_TEST_ACTION                              action;
382 
383 
384     /* Perform hooked callbacks.  */
385     ux_test_do_hooks_before(UX_TEST_OVERRIDE_TX_SEMAPHORE_GET, &params);
386 
387     action = ux_test_action_handler(UX_TEST_OVERRIDE_TX_SEMAPHORE_GET, &params);
388     ux_test_do_action_before(&action, &params);
389 
390     if (ux_test_is_expedient_on())
391     {
392 
393         if (sem_get_fail_after != FAIL_DISABLE)
394 
395             if (sem_get_count >= sem_get_fail_after)
396 
397                 /* Return testing error instead of actual creation. */
398                 if (!ux_test_utility_sim_sem_in_exception_list(semaphore_ptr, wait_option))
399 
400                     return UX_SEMAPHORE_ERROR;
401     }
402 
403     /* Default the status to TX_SUCCESS.  */
404     status =  TX_SUCCESS;
405 
406     /* Disable interrupts to get an instance from the semaphore.  */
407     TX_DISABLE
408 
409 #ifdef TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO
410 
411     /* Increment the total semaphore get counter.  */
412     _tx_semaphore_performance_get_count++;
413 
414     /* Increment the number of attempts to get this semaphore.  */
415     semaphore_ptr -> tx_semaphore_performance_get_count++;
416 #endif
417 
418     /* If trace is enabled, insert this event into the trace buffer.  */
419     TX_TRACE_IN_LINE_INSERT(TX_TRACE_SEMAPHORE_GET, semaphore_ptr, wait_option, semaphore_ptr -> tx_semaphore_count, TX_POINTER_TO_ULONG_CONVERT(&thread_ptr), TX_TRACE_SEMAPHORE_EVENTS)
420 
421     /* Log this kernel call.  */
422     TX_EL_SEMAPHORE_GET_INSERT
423 
424     /* Determine if there is an instance of the semaphore.  */
425     if (semaphore_ptr -> tx_semaphore_count != ((ULONG) 0))
426     {
427 
428         /* Decrement the semaphore count.  */
429         semaphore_ptr -> tx_semaphore_count--;
430 
431         /* Restore interrupts.  */
432         TX_RESTORE
433     }
434 
435     /* Determine if the request specifies suspension.  */
436     else if (wait_option != TX_NO_WAIT)
437     {
438 
439         /* Prepare for suspension of this thread.  */
440 
441 #ifdef TX_SEMAPHORE_ENABLE_PERFORMANCE_INFO
442 
443         /* Increment the total semaphore suspensions counter.  */
444         _tx_semaphore_performance_suspension_count++;
445 
446         /* Increment the number of suspensions on this semaphore.  */
447         semaphore_ptr -> tx_semaphore_performance_suspension_count++;
448 #endif
449 
450         /* Pickup thread pointer.  */
451         TX_THREAD_GET_CURRENT(thread_ptr)
452 
453         /* Setup cleanup routine pointer.  */
454         thread_ptr -> tx_thread_suspend_cleanup =  &(_tx_semaphore_cleanup);
455 
456         /* Setup cleanup information, i.e. this semaphore control
457            block.  */
458         thread_ptr -> tx_thread_suspend_control_block =  (VOID *) semaphore_ptr;
459 
460         /* Setup suspension list.  */
461         if (semaphore_ptr -> tx_semaphore_suspended_count == TX_NO_SUSPENSIONS)
462         {
463 
464             /* No other threads are suspended.  Setup the head pointer and
465                just setup this threads pointers to itself.  */
466             semaphore_ptr -> tx_semaphore_suspension_list =         thread_ptr;
467             thread_ptr -> tx_thread_suspended_next =                thread_ptr;
468             thread_ptr -> tx_thread_suspended_previous =            thread_ptr;
469         }
470         else
471         {
472 
473             /* This list is not NULL, add current thread to the end. */
474             next_thread =                                   semaphore_ptr -> tx_semaphore_suspension_list;
475             thread_ptr -> tx_thread_suspended_next =        next_thread;
476             previous_thread =                               next_thread -> tx_thread_suspended_previous;
477             thread_ptr -> tx_thread_suspended_previous =    previous_thread;
478             previous_thread -> tx_thread_suspended_next =   thread_ptr;
479             next_thread -> tx_thread_suspended_previous =   thread_ptr;
480         }
481 
482         /* Increment the number of suspensions.  */
483         semaphore_ptr -> tx_semaphore_suspended_count++;
484 
485         /* Set the state to suspended.  */
486         thread_ptr -> tx_thread_state =    TX_SEMAPHORE_SUSP;
487 
488 #ifdef TX_NOT_INTERRUPTABLE
489 
490         /* Call actual non-interruptable thread suspension routine.  */
491         _tx_thread_system_ni_suspend(thread_ptr, wait_option);
492 
493         /* Restore interrupts.  */
494         TX_RESTORE
495 #else
496 
497         /* Set the suspending flag.  */
498         thread_ptr -> tx_thread_suspending =  TX_TRUE;
499 
500         /* Setup the timeout period.  */
501         thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  wait_option;
502 
503         /* Temporarily disable preemption.  */
504         _tx_thread_preempt_disable++;
505 
506         /* Restore interrupts.  */
507         TX_RESTORE
508 
509         /* Call actual thread suspension routine.  */
510         _tx_thread_system_suspend(thread_ptr);
511 #endif
512 
513         /* Return the completion status.  */
514         status =  thread_ptr -> tx_thread_suspend_status;
515     }
516     else
517     {
518 
519         /* Restore interrupts.  */
520         TX_RESTORE
521 
522         /* Immediate return, return error completion.  */
523         status =  TX_NO_INSTANCE;
524     }
525 
526     ux_test_do_action_after(&action, &params);
527 
528     /* Perform hooked callbacks.  */
529     ux_test_do_hooks_after(UX_TEST_OVERRIDE_TX_SEMAPHORE_GET, &params);
530 
531     /* Return completion status.  */
532     return(status);
533 }
534 
535 /* Mutex handling simulation */
536 
ux_test_utility_sim_mutex_create_count_reset(VOID)537 VOID ux_test_utility_sim_mutex_create_count_reset(VOID)
538 {
539 
540     mutex_create_count = 0;
541 }
542 
ux_test_utility_sim_mutex_create_count(VOID)543 ULONG ux_test_utility_sim_mutex_create_count(VOID)
544 {
545 
546     return mutex_create_count;
547 }
548 
ux_test_utility_sim_mutex_error_generation_start(ULONG fail_after)549 VOID ux_test_utility_sim_mutex_error_generation_start(ULONG fail_after)
550 {
551 
552     mutex_create_count = 0;
553     mutex_fail_after = fail_after;
554 }
555 
ux_test_utility_sim_mutex_error_generation_stop(VOID)556 VOID ux_test_utility_sim_mutex_error_generation_stop(VOID)
557 {
558 
559     mutex_fail_after = FAIL_DISABLE;
560     mutex_create_count = 0;
561 }
562 
_tx_mutex_create(TX_MUTEX * mutex_ptr,CHAR * name_ptr,UINT inherit)563 UINT  _tx_mutex_create(TX_MUTEX *mutex_ptr, CHAR *name_ptr, UINT inherit)
564 {
565 
566 TX_INTERRUPT_SAVE_AREA
567 
568 TX_MUTEX        *next_mutex;
569 TX_MUTEX        *previous_mutex;
570 
571 UX_TEST_OVERRIDE_TX_MUTEX_CREATE_PARAMS action_params = { mutex_ptr, name_ptr, inherit };
572 UX_TEST_ACTION                          action;
573 
574     /* Perform hooked callbacks.  */
575     ux_test_do_hooks_before(UX_TEST_OVERRIDE_TX_MUTEX_CREATE, &action_params);
576 
577     /* Perform action.  */
578     action = ux_test_action_handler(UX_TEST_OVERRIDE_TX_MUTEX_CREATE, &action_params);
579     ux_test_do_action_before(&action, &action_params);
580 
581     if (mutex_fail_after != FAIL_DISABLE)
582     {
583 
584         if (mutex_create_count >= mutex_fail_after)
585         {
586 
587             /* Return testing error instead of actual creation. */
588             return UX_MUTEX_ERROR;
589         }
590     }
591 
592     /* Do actual creating. */
593     mutex_create_count ++;
594 
595     /* Initialize mutex control block to all zeros.  */
596     TX_MEMSET(mutex_ptr, 0, (sizeof(TX_MUTEX)));
597 
598     /* Setup the basic mutex fields.  */
599     mutex_ptr -> tx_mutex_name =             name_ptr;
600     mutex_ptr -> tx_mutex_inherit =          inherit;
601 
602     /* Disable interrupts to place the mutex on the created list.  */
603     TX_DISABLE
604 
605     /* Setup the mutex ID to make it valid.  */
606     mutex_ptr -> tx_mutex_id =  TX_MUTEX_ID;
607 
608     /* Setup the thread mutex release function pointer.  */
609     _tx_thread_mutex_release =  &(_tx_mutex_thread_release);
610 
611     /* Place the mutex on the list of created mutexes.  First,
612        check for an empty list.  */
613     if (_tx_mutex_created_count == TX_EMPTY)
614     {
615 
616         /* The created mutex list is empty.  Add mutex to empty list.  */
617         _tx_mutex_created_ptr =                   mutex_ptr;
618         mutex_ptr -> tx_mutex_created_next =      mutex_ptr;
619         mutex_ptr -> tx_mutex_created_previous =  mutex_ptr;
620     }
621     else
622     {
623 
624         /* This list is not NULL, add to the end of the list.  */
625         next_mutex =      _tx_mutex_created_ptr;
626         previous_mutex =  next_mutex -> tx_mutex_created_previous;
627 
628         /* Place the new mutex in the list.  */
629         next_mutex -> tx_mutex_created_previous =  mutex_ptr;
630         previous_mutex -> tx_mutex_created_next =  mutex_ptr;
631 
632         /* Setup this mutex's next and previous created links.  */
633         mutex_ptr -> tx_mutex_created_previous =  previous_mutex;
634         mutex_ptr -> tx_mutex_created_next =      next_mutex;
635     }
636 
637     /* Increment the ownership count.  */
638     _tx_mutex_created_count++;
639 
640     /* Optional mutex create extended processing.  */
641     TX_MUTEX_CREATE_EXTENSION(mutex_ptr)
642 
643     /* If trace is enabled, register this object.  */
644     TX_TRACE_OBJECT_REGISTER(TX_TRACE_OBJECT_TYPE_MUTEX, mutex_ptr, name_ptr, inherit, 0)
645 
646     /* If trace is enabled, insert this event into the trace buffer.  */
647     TX_TRACE_IN_LINE_INSERT(TX_TRACE_MUTEX_CREATE, mutex_ptr, inherit, TX_POINTER_TO_ULONG_CONVERT(&next_mutex), 0, TX_TRACE_MUTEX_EVENTS)
648 
649     /* Log this kernel call.  */
650     TX_EL_MUTEX_CREATE_INSERT
651 
652     /* Restore interrupts.  */
653     TX_RESTORE
654 
655     ux_test_do_action_after(&action, &action_params);
656 
657     /* Perform hooked callbacks.  */
658     ux_test_do_hooks_after(UX_TEST_OVERRIDE_TX_MUTEX_CREATE, &action_params);
659 
660     /* Return TX_SUCCESS.  */
661     return(TX_SUCCESS);
662 }
663 
ux_test_utility_sim_mutex_on_count_reset(VOID)664 VOID  ux_test_utility_sim_mutex_on_count_reset    (VOID)
665 {
666 
667     mutex_on_count = 0;
668 }
ux_test_utility_sim_mutex_on_count(VOID)669 ULONG ux_test_utility_sim_mutex_on_count          (VOID)
670 {
671 
672     return mutex_on_count;
673 }
674 
ux_test_utility_sim_mutex_on_error_generation_start(ULONG fail_after)675 VOID  ux_test_utility_sim_mutex_on_error_generation_start(ULONG fail_after)
676 {
677 
678     mutex_on_count = 0;
679     mutex_on_fail_after = fail_after;
680 }
ux_test_utility_sim_mutex_on_error_generation_stop(VOID)681 VOID  ux_test_utility_sim_mutex_on_error_generation_stop (VOID)
682 {
683 
684     mutex_on_fail_after = FAIL_DISABLE;
685     mutex_on_count = 0;
686 }
687 
688 /* Thread handling simulation */
689 
ux_test_utility_sim_thread_create_count_reset(VOID)690 VOID ux_test_utility_sim_thread_create_count_reset(VOID)
691 {
692 
693     thread_create_count = 0;
694 }
695 
ux_test_utility_sim_thread_create_count(VOID)696 ULONG ux_test_utility_sim_thread_create_count(VOID)
697 {
698 
699     return thread_create_count;
700 }
701 
ux_test_utility_sim_thread_error_generation_start(ULONG fail_after)702 VOID ux_test_utility_sim_thread_error_generation_start(ULONG fail_after)
703 {
704 
705     thread_create_count = 0;
706     thread_create_fail_after = fail_after;
707 }
708 
ux_test_utility_sim_thread_error_generation_stop(VOID)709 VOID ux_test_utility_sim_thread_error_generation_stop(VOID)
710 {
711 
712     thread_create_fail_after = FAIL_DISABLE;
713     thread_create_count = 0;
714 }
715 
_tx_thread_create(TX_THREAD * thread_ptr,CHAR * name_ptr,VOID (* entry_function)(ULONG id),ULONG entry_input,VOID * stack_start,ULONG stack_size,UINT priority,UINT preempt_threshold,ULONG time_slice,UINT auto_start)716 UINT  _tx_thread_create(TX_THREAD *thread_ptr, CHAR *name_ptr, VOID (*entry_function)(ULONG id), ULONG entry_input,
717                             VOID *stack_start, ULONG stack_size, UINT priority, UINT preempt_threshold,
718                             ULONG time_slice, UINT auto_start)
719 {
720 
721 TX_INTERRUPT_SAVE_AREA
722 
723 TX_THREAD               *next_thread;
724 TX_THREAD               *previous_thread;
725 TX_THREAD               *saved_thread_ptr;
726 UINT                    saved_threshold =  ((UINT) 0);
727 UCHAR                   *temp_ptr;
728 UX_TEST_OVERRIDE_TX_THREAD_CREATE_PARAMS        action_params = { name_ptr };
729 UX_TEST_ACTION                                  action;
730 
731 #ifdef TX_ENABLE_STACK_CHECKING
732 ULONG                   new_stack_start;
733 ULONG                   updated_stack_start;
734 #endif
735 
736 
737     if (thread_create_fail_after != FAIL_DISABLE)
738     {
739 
740         if (thread_create_count >= thread_create_fail_after)
741         {
742 
743             /* Return testing error instead of actual creation. */
744             return UX_MUTEX_ERROR;
745         }
746     }
747 
748     /* Perform action.  */
749     action = ux_test_action_handler(UX_TEST_OVERRIDE_TX_THREAD_CREATE, &action_params);
750     ux_test_do_action_before(&action, &action_params);
751     if (ux_test_is_expedient_on())
752     {
753         if (action.matched && !action.do_after)
754         {
755             if (!action.no_return)
756             {
757                 return action.status;
758             }
759         }
760     }
761 
762     /* Do actual creating. */
763     thread_create_count ++;
764 
765 #ifndef TX_DISABLE_STACK_FILLING
766 
767     /* Set the thread stack to a pattern prior to creating the initial
768        stack frame.  This pattern is used by the stack checking routines
769        to see how much has been used.  */
770     TX_MEMSET(stack_start, ((UCHAR) TX_STACK_FILL), stack_size);
771 #endif
772 
773 #ifdef TX_ENABLE_STACK_CHECKING
774 
775     /* Ensure that there are two ULONG of 0xEF patterns at the top and
776        bottom of the thread's stack. This will be used to check for stack
777        overflow conditions during run-time.  */
778     stack_size =  ((stack_size/(sizeof(ULONG))) * (sizeof(ULONG))) - (sizeof(ULONG));
779 
780     /* Ensure the starting stack address is evenly aligned.  */
781     new_stack_start =  TX_POINTER_TO_ULONG_CONVERT(stack_start);
782     updated_stack_start =  ((((ULONG) new_stack_start) + ((sizeof(ULONG)) - ((ULONG) 1)) ) & (~((sizeof(ULONG)) - ((ULONG) 1))));
783 
784     /* Determine if the starting stack address is different.  */
785     if (new_stack_start != updated_stack_start)
786     {
787 
788         /* Yes, subtract another ULONG from the size to avoid going past the stack area.  */
789         stack_size =  stack_size - (sizeof(ULONG));
790     }
791 
792     /* Update the starting stack pointer.  */
793     stack_start =  TX_ULONG_TO_POINTER_CONVERT(updated_stack_start);
794 #endif
795 
796     /* Prepare the thread control block prior to placing it on the created
797        list.  */
798 
799     /* Initialize thread control block to all zeros.  */
800     TX_MEMSET(thread_ptr, 0, (sizeof(TX_THREAD)));
801 
802     /* Place the supplied parameters into the thread's control block.  */
803     thread_ptr -> tx_thread_name =              name_ptr;
804     thread_ptr -> tx_thread_entry =             entry_function;
805     thread_ptr -> tx_thread_entry_parameter =   entry_input;
806     thread_ptr -> tx_thread_stack_start =       stack_start;
807     thread_ptr -> tx_thread_stack_size =        stack_size;
808     thread_ptr -> tx_thread_stack_end =         (VOID *) (TX_UCHAR_POINTER_ADD(stack_start, (stack_size - ((ULONG) 1))));
809     thread_ptr -> tx_thread_priority =          priority;
810     thread_ptr -> tx_thread_user_priority =     priority;
811     thread_ptr -> tx_thread_time_slice =        time_slice;
812     thread_ptr -> tx_thread_new_time_slice =    time_slice;
813     thread_ptr -> tx_thread_inherit_priority =  ((UINT) TX_MAX_PRIORITIES);
814 
815     /* Calculate the end of the thread's stack area.  */
816     temp_ptr =  TX_VOID_TO_UCHAR_POINTER_CONVERT(stack_start);
817     temp_ptr =  (TX_UCHAR_POINTER_ADD(temp_ptr, (stack_size - ((ULONG) 1))));
818     thread_ptr -> tx_thread_stack_end =         TX_UCHAR_TO_VOID_POINTER_CONVERT(temp_ptr);
819 
820 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
821 
822     /* Preemption-threshold is enabled, setup accordingly.  */
823     thread_ptr -> tx_thread_preempt_threshold =       preempt_threshold;
824     thread_ptr -> tx_thread_user_preempt_threshold =  preempt_threshold;
825 #else
826 
827     /* Preemption-threshold is disabled, determine if preemption-threshold was required.  */
828     if (priority != preempt_threshold)
829     {
830 
831         /* Preemption-threshold specified. Since specific preemption-threshold is not supported,
832            disable all preemption.  */
833         thread_ptr -> tx_thread_preempt_threshold =       ((UINT) 0);
834         thread_ptr -> tx_thread_user_preempt_threshold =  ((UINT) 0);
835     }
836     else
837     {
838 
839         /* Preemption-threshold is not specified, just setup with the priority.  */
840         thread_ptr -> tx_thread_preempt_threshold =       priority;
841         thread_ptr -> tx_thread_user_preempt_threshold =  priority;
842     }
843 #endif
844 
845     /* Now fill in the values that are required for thread initialization.  */
846     thread_ptr -> tx_thread_state =  TX_SUSPENDED;
847 
848     /* Setup the necessary fields in the thread timer block.  */
849     TX_THREAD_CREATE_TIMEOUT_SETUP(thread_ptr)
850 
851     /* Perform any additional thread setup activities for tool or user purpose.  */
852     TX_THREAD_CREATE_INTERNAL_EXTENSION(thread_ptr)
853 
854     /* Call the target specific stack frame building routine to build the
855        thread's initial stack and to setup the actual stack pointer in the
856        control block.  */
857     _tx_thread_stack_build(thread_ptr, _tx_thread_shell_entry);
858 
859 #ifdef TX_ENABLE_STACK_CHECKING
860 
861     /* Setup the highest usage stack pointer.  */
862     thread_ptr -> tx_thread_stack_highest_ptr =  thread_ptr -> tx_thread_stack_ptr;
863 #endif
864 
865     /* Prepare to make this thread a member of the created thread list.  */
866     TX_DISABLE
867 
868     /* Load the thread ID field in the thread control block.  */
869     thread_ptr -> tx_thread_id =  TX_THREAD_ID;
870 
871     /* Place the thread on the list of created threads.  First,
872        check for an empty list.  */
873     if (_tx_thread_created_count == TX_EMPTY)
874     {
875 
876         /* The created thread list is empty.  Add thread to empty list.  */
877         _tx_thread_created_ptr =                    thread_ptr;
878         thread_ptr -> tx_thread_created_next =      thread_ptr;
879         thread_ptr -> tx_thread_created_previous =  thread_ptr;
880     }
881     else
882     {
883 
884         /* This list is not NULL, add to the end of the list.  */
885         next_thread =  _tx_thread_created_ptr;
886         previous_thread =  next_thread -> tx_thread_created_previous;
887 
888         /* Place the new thread in the list.  */
889         next_thread -> tx_thread_created_previous =  thread_ptr;
890         previous_thread -> tx_thread_created_next =  thread_ptr;
891 
892         /* Setup this thread's created links.  */
893         thread_ptr -> tx_thread_created_previous =  previous_thread;
894         thread_ptr -> tx_thread_created_next =      next_thread;
895     }
896 
897     /* Increment the thread created count.  */
898     _tx_thread_created_count++;
899 
900     /* If trace is enabled, register this object.  */
901     TX_TRACE_OBJECT_REGISTER(TX_TRACE_OBJECT_TYPE_THREAD, thread_ptr, name_ptr, TX_POINTER_TO_ULONG_CONVERT(stack_start), stack_size)
902 
903     /* If trace is enabled, insert this event into the trace buffer.  */
904     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_CREATE, thread_ptr, priority, TX_POINTER_TO_ULONG_CONVERT(stack_start), stack_size, TX_TRACE_THREAD_EVENTS)
905 
906     /* Register thread in the thread array structure.  */
907     TX_EL_THREAD_REGISTER(thread_ptr)
908 
909     /* Log this kernel call.  */
910     TX_EL_THREAD_CREATE_INSERT
911 
912 #ifndef TX_NOT_INTERRUPTABLE
913 
914     /* Temporarily disable preemption.  */
915     _tx_thread_preempt_disable++;
916 #endif
917 
918     /* Determine if an automatic start was requested.  If so, call the resume
919        thread function and then check for a preemption condition.  */
920     if (auto_start == TX_AUTO_START)
921     {
922 
923         /* Determine if the create call is being called from initialization.  */
924         if (TX_THREAD_GET_SYSTEM_STATE() >= TX_INITIALIZE_IN_PROGRESS)
925         {
926 
927             /* Yes, this create call was made from initialization.  */
928 
929             /* Pickup the current thread execute pointer, which corresponds to the
930                highest priority thread ready to execute.  Interrupt lockout is
931                not required, since interrupts are assumed to be disabled during
932                initialization.  */
933             saved_thread_ptr =  _tx_thread_execute_ptr;
934 
935             /* Determine if there is thread ready for execution.  */
936             if (saved_thread_ptr != TX_NULL)
937             {
938 
939                 /* Yes, a thread is ready for execution when initialization completes.  */
940 
941                 /* Save the current preemption-threshold.  */
942                 saved_threshold =  saved_thread_ptr -> tx_thread_preempt_threshold;
943 
944                 /* For initialization, temporarily set the preemption-threshold to the
945                    priority level to make sure the highest-priority thread runs once
946                    initialization is complete.  */
947                 saved_thread_ptr -> tx_thread_preempt_threshold =  saved_thread_ptr -> tx_thread_priority;
948             }
949         }
950         else
951         {
952 
953             /* Simply set the saved thread pointer to NULL.  */
954             saved_thread_ptr =  TX_NULL;
955         }
956 
957 #ifdef TX_NOT_INTERRUPTABLE
958 
959         /* Perform any additional activities for tool or user purpose.  */
960         TX_THREAD_CREATE_EXTENSION(thread_ptr)
961 
962         /* Resume the thread!  */
963         _tx_thread_system_ni_resume(thread_ptr);
964 
965         /* Restore previous interrupt posture.  */
966         TX_RESTORE
967 #else
968 
969         /* Restore previous interrupt posture.  */
970         TX_RESTORE
971 
972         /* Perform any additional activities for tool or user purpose.  */
973         TX_THREAD_CREATE_EXTENSION(thread_ptr)
974 
975         /* Call the resume thread function to make this thread ready.  */
976         _tx_thread_system_resume(thread_ptr);
977 #endif
978 
979         /* Determine if the thread's preemption-threshold needs to be restored.  */
980         if (saved_thread_ptr != TX_NULL)
981         {
982 
983             /* Yes, restore the previous highest-priority thread's preemption-threshold. This
984                can only happen if this routine is called from initialization.  */
985             saved_thread_ptr -> tx_thread_preempt_threshold =  saved_threshold;
986         }
987     }
988     else
989     {
990 
991 #ifdef TX_NOT_INTERRUPTABLE
992 
993         /* Perform any additional activities for tool or user purpose.  */
994         TX_THREAD_CREATE_EXTENSION(thread_ptr)
995 
996         /* Restore interrupts.  */
997         TX_RESTORE
998 #else
999 
1000         /* Restore interrupts.  */
1001         TX_RESTORE
1002 
1003         /* Perform any additional activities for tool or user purpose.  */
1004         TX_THREAD_CREATE_EXTENSION(thread_ptr)
1005 
1006         /* Disable interrupts.  */
1007         TX_DISABLE
1008 
1009         /* Re-enable preemption.  */
1010         _tx_thread_preempt_disable--;
1011 
1012         /* Restore interrupts.  */
1013         TX_RESTORE
1014 
1015         /* Check for preemption.  */
1016         _tx_thread_system_preempt_check();
1017 #endif
1018     }
1019 
1020     /* Always return a success.  */
1021     return(TX_SUCCESS);
1022 }
1023 
_tx_mutex_get(TX_MUTEX * mutex_ptr,ULONG wait_option)1024 UINT  _tx_mutex_get(TX_MUTEX *mutex_ptr, ULONG wait_option)
1025 {
1026 
1027 TX_INTERRUPT_SAVE_AREA
1028 
1029 TX_THREAD       *thread_ptr;
1030 TX_MUTEX        *next_mutex;
1031 TX_MUTEX        *previous_mutex;
1032 TX_THREAD       *mutex_owner;
1033 TX_THREAD       *next_thread;
1034 TX_THREAD       *previous_thread;
1035 UINT            status;
1036 UX_TEST_OVERRIDE_TX_MUTEX_GET_PARAMS    action_params = { mutex_ptr, wait_option };
1037 UX_TEST_ACTION                          action;
1038 
1039     /* Perform hooked callbacks.  */
1040     ux_test_do_hooks_before(UX_TEST_OVERRIDE_TX_MUTEX_GET, &action_params);
1041 
1042     /* Perform action.  */
1043     action = ux_test_action_handler(UX_TEST_OVERRIDE_TX_MUTEX_GET, &action_params);
1044     ux_test_do_action_before(&action, &action_params);
1045 
1046     /* Disable interrupts to get an instance from the mutex.  */
1047     TX_DISABLE
1048 
1049 #ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
1050 
1051     /* Increment the total mutex get counter.  */
1052     _tx_mutex_performance_get_count++;
1053 
1054     /* Increment the number of attempts to get this mutex.  */
1055     mutex_ptr -> tx_mutex_performance_get_count++;
1056 #endif
1057 
1058     /* If trace is enabled, insert this event into the trace buffer.  */
1059     TX_TRACE_IN_LINE_INSERT(TX_TRACE_MUTEX_GET, mutex_ptr, wait_option, TX_POINTER_TO_ULONG_CONVERT(mutex_ptr -> tx_mutex_owner), mutex_ptr -> tx_mutex_ownership_count, TX_TRACE_MUTEX_EVENTS)
1060 
1061     /* Log this kernel call.  */
1062     TX_EL_MUTEX_GET_INSERT
1063 
1064     /* Pickup thread pointer.  */
1065     TX_THREAD_GET_CURRENT(thread_ptr)
1066 
1067     /* Determine if this mutex is available.  */
1068     if (mutex_ptr -> tx_mutex_ownership_count == ((UINT) 0))
1069     {
1070 
1071         /* Set the ownership count to 1.  */
1072         mutex_ptr -> tx_mutex_ownership_count =  ((UINT) 1);
1073 
1074         /* Remember that the calling thread owns the mutex.  */
1075         mutex_ptr -> tx_mutex_owner =  thread_ptr;
1076 
1077         /* Determine if the thread pointer is valid.  */
1078         if (thread_ptr != TX_NULL)
1079         {
1080 
1081             /* Determine if priority inheritance is required.  */
1082             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
1083             {
1084 
1085                 /* Remember the current priority of thread.  */
1086                 mutex_ptr -> tx_mutex_original_priority =   thread_ptr -> tx_thread_priority;
1087 
1088                 /* Setup the highest priority waiting thread.  */
1089                 mutex_ptr -> tx_mutex_highest_priority_waiting =  ((UINT) TX_MAX_PRIORITIES);
1090             }
1091 
1092             /* Pickup next mutex pointer, which is the head of the list.  */
1093             next_mutex =  thread_ptr -> tx_thread_owned_mutex_list;
1094 
1095             /* Determine if this thread owns any other mutexes that have priority inheritance.  */
1096             if (next_mutex != TX_NULL)
1097             {
1098 
1099                 /* Non-empty list. Link up the mutex.  */
1100 
1101                 /* Pickup the next and previous mutex pointer.  */
1102                 previous_mutex =  next_mutex -> tx_mutex_owned_previous;
1103 
1104                 /* Place the owned mutex in the list.  */
1105                 next_mutex -> tx_mutex_owned_previous =  mutex_ptr;
1106                 previous_mutex -> tx_mutex_owned_next =  mutex_ptr;
1107 
1108                 /* Setup this mutex's next and previous created links.  */
1109                 mutex_ptr -> tx_mutex_owned_previous =  previous_mutex;
1110                 mutex_ptr -> tx_mutex_owned_next =      next_mutex;
1111             }
1112             else
1113             {
1114 
1115                 /* The owned mutex list is empty.  Add mutex to empty list.  */
1116                 thread_ptr -> tx_thread_owned_mutex_list =     mutex_ptr;
1117                 mutex_ptr -> tx_mutex_owned_next =             mutex_ptr;
1118                 mutex_ptr -> tx_mutex_owned_previous =         mutex_ptr;
1119             }
1120 
1121             /* Increment the number of mutexes owned counter.  */
1122             thread_ptr -> tx_thread_owned_mutex_count++;
1123         }
1124 
1125         /* Restore interrupts.  */
1126         TX_RESTORE
1127 
1128         /* Return success.  */
1129         status =  TX_SUCCESS;
1130     }
1131 
1132     /* Otherwise, see if the owning thread is trying to obtain the same mutex.  */
1133     else if (mutex_ptr -> tx_mutex_owner == thread_ptr)
1134     {
1135 
1136         /* The owning thread is requesting the mutex again, just
1137            increment the ownership count.  */
1138         mutex_ptr -> tx_mutex_ownership_count++;
1139 
1140         /* Restore interrupts.  */
1141         TX_RESTORE
1142 
1143         /* Return success.  */
1144         status =  TX_SUCCESS;
1145     }
1146     else
1147     {
1148 
1149         /* Determine if the request specifies suspension.  */
1150         if (wait_option != TX_NO_WAIT)
1151         {
1152 
1153             /* Prepare for suspension of this thread.  */
1154 
1155             /* Pickup the mutex owner.  */
1156             mutex_owner =  mutex_ptr -> tx_mutex_owner;
1157 
1158 #ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
1159 
1160             /* Increment the total mutex suspension counter.  */
1161             _tx_mutex_performance_suspension_count++;
1162 
1163             /* Increment the number of suspensions on this mutex.  */
1164             mutex_ptr -> tx_mutex_performance_suspension_count++;
1165 
1166             /* Determine if a priority inversion is present.  */
1167             if (thread_ptr -> tx_thread_priority < mutex_owner -> tx_thread_priority)
1168             {
1169 
1170                 /* Yes, priority inversion is present!  */
1171 
1172                 /* Increment the total mutex priority inversions counter.  */
1173                 _tx_mutex_performance_priority_inversion_count++;
1174 
1175                 /* Increment the number of priority inversions on this mutex.  */
1176                 mutex_ptr -> tx_mutex_performance_priority_inversion_count++;
1177 
1178 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
1179 
1180                 /* Increment the number of total thread priority inversions.  */
1181                 _tx_thread_performance_priority_inversion_count++;
1182 
1183                 /* Increment the number of priority inversions for this thread.  */
1184                 thread_ptr -> tx_thread_performance_priority_inversion_count++;
1185 #endif
1186             }
1187 #endif
1188 
1189             /* Setup cleanup routine pointer.  */
1190             thread_ptr -> tx_thread_suspend_cleanup =  &(_tx_mutex_cleanup);
1191 
1192             /* Setup cleanup information, i.e. this mutex control
1193                block.  */
1194             thread_ptr -> tx_thread_suspend_control_block =  (VOID *) mutex_ptr;
1195 
1196             /* Setup suspension list.  */
1197             if (mutex_ptr -> tx_mutex_suspended_count == TX_NO_SUSPENSIONS)
1198             {
1199 
1200                 /* No other threads are suspended.  Setup the head pointer and
1201                    just setup this threads pointers to itself.  */
1202                 mutex_ptr -> tx_mutex_suspension_list =         thread_ptr;
1203                 thread_ptr -> tx_thread_suspended_next =        thread_ptr;
1204                 thread_ptr -> tx_thread_suspended_previous =    thread_ptr;
1205             }
1206             else
1207             {
1208 
1209                 /* This list is not NULL, add current thread to the end. */
1210                 next_thread =                                   mutex_ptr -> tx_mutex_suspension_list;
1211                 thread_ptr -> tx_thread_suspended_next =        next_thread;
1212                 previous_thread =                               next_thread -> tx_thread_suspended_previous;
1213                 thread_ptr -> tx_thread_suspended_previous =    previous_thread;
1214                 previous_thread -> tx_thread_suspended_next =   thread_ptr;
1215                 next_thread -> tx_thread_suspended_previous =   thread_ptr;
1216             }
1217 
1218             /* Increment the suspension count.  */
1219             mutex_ptr -> tx_mutex_suspended_count++;
1220 
1221             /* Set the state to suspended.  */
1222             thread_ptr -> tx_thread_state =    TX_MUTEX_SUSP;
1223 
1224 #ifdef TX_NOT_INTERRUPTABLE
1225 
1226             /* Determine if we need to raise the priority of the thread
1227                owning the mutex.  */
1228             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
1229             {
1230 
1231                 /* Determine if this is the highest priority to raise for this mutex.  */
1232                 if (mutex_ptr -> tx_mutex_highest_priority_waiting > thread_ptr -> tx_thread_priority)
1233                 {
1234 
1235                     /* Remember this priority.  */
1236                     mutex_ptr -> tx_mutex_highest_priority_waiting =  thread_ptr -> tx_thread_priority;
1237                 }
1238 
1239                 /* Priority inheritance is requested, check to see if the thread that owns the mutex is lower priority.  */
1240                 if (mutex_owner -> tx_thread_priority > thread_ptr -> tx_thread_priority)
1241                 {
1242 
1243                     /* Yes, raise the suspended, owning thread's priority to that
1244                        of the current thread.  */
1245                     _tx_mutex_priority_change(mutex_owner, thread_ptr -> tx_thread_priority);
1246 
1247 #ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
1248 
1249                     /* Increment the total mutex priority inheritance counter.  */
1250                     _tx_mutex_performance__priority_inheritance_count++;
1251 
1252                     /* Increment the number of priority inheritance situations on this mutex.  */
1253                     mutex_ptr -> tx_mutex_performance__priority_inheritance_count++;
1254 #endif
1255                 }
1256             }
1257 
1258             /* Call actual non-interruptable thread suspension routine.  */
1259             _tx_thread_system_ni_suspend(thread_ptr, wait_option);
1260 
1261             /* Restore interrupts.  */
1262             TX_RESTORE
1263 #else
1264 
1265             /* Set the suspending flag.  */
1266             thread_ptr -> tx_thread_suspending =  TX_TRUE;
1267 
1268             /* Setup the timeout period.  */
1269             thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  wait_option;
1270 
1271             /* Temporarily disable preemption.  */
1272             _tx_thread_preempt_disable++;
1273 
1274             /* Restore interrupts.  */
1275             TX_RESTORE
1276 
1277             /* Determine if we need to raise the priority of the thread
1278                owning the mutex.  */
1279             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
1280             {
1281 
1282                 /* Determine if this is the highest priority to raise for this mutex.  */
1283                 if (mutex_ptr -> tx_mutex_highest_priority_waiting > thread_ptr -> tx_thread_priority)
1284                 {
1285 
1286                     /* Remember this priority.  */
1287                     mutex_ptr -> tx_mutex_highest_priority_waiting =  thread_ptr -> tx_thread_priority;
1288                 }
1289 
1290                 /* Priority inheritance is requested, check to see if the thread that owns the mutex is lower priority.  */
1291                 if (mutex_owner -> tx_thread_priority > thread_ptr -> tx_thread_priority)
1292                 {
1293 
1294                     /* Yes, raise the suspended, owning thread's priority to that
1295                        of the current thread.  */
1296                     _tx_mutex_priority_change(mutex_owner, thread_ptr -> tx_thread_priority);
1297 
1298 #ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
1299 
1300                     /* Increment the total mutex priority inheritance counter.  */
1301                     _tx_mutex_performance__priority_inheritance_count++;
1302 
1303                     /* Increment the number of priority inheritance situations on this mutex.  */
1304                     mutex_ptr -> tx_mutex_performance__priority_inheritance_count++;
1305 #endif
1306                 }
1307             }
1308 
1309             /* Call actual thread suspension routine.  */
1310             _tx_thread_system_suspend(thread_ptr);
1311 #endif
1312             /* Return the completion status.  */
1313             status =  thread_ptr -> tx_thread_suspend_status;
1314         }
1315         else
1316         {
1317 
1318             /* Restore interrupts.  */
1319             TX_RESTORE
1320 
1321             /* Immediate return, return error completion.  */
1322             status =  TX_NOT_AVAILABLE;
1323         }
1324     }
1325 
1326     ux_test_do_action_after(&action, &action_params);
1327 
1328     /* Perform hooked callbacks.  */
1329     ux_test_do_hooks_after(UX_TEST_OVERRIDE_TX_MUTEX_GET, &action_params);
1330 
1331     /* Return completion status.  */
1332     return(status);
1333 }
1334 
1335 /* Re-target the _ux_utility_memory_allocate for testing */
1336 
ux_test_utility_sim_mem_alloc_log_enable(UCHAR enable_disable)1337 VOID  ux_test_utility_sim_mem_alloc_log_enable(UCHAR enable_disable)
1338 {
1339     if (enable_disable)
1340     {
1341         mem_alloc_fail_after = FAIL_DISABLE;
1342         mem_alloc_count = 0;
1343         mem_alloc_do_fail = UX_FALSE;
1344 
1345         ux_system_mutex_on_count = 0;
1346         ux_system_mutex_off_count = 0;
1347         ux_system_mutex_alloc_logs_count = 0;
1348         ux_system_mutex_alloc_logs_match = 0;
1349         ux_system_mutex_alloc_logs_area = UX_FALSE;
1350         ux_system_mutex_alloc_logs_lock  = UX_FALSE;
1351         _ux_utility_memory_set(ux_system_mutex_alloc_logs, 0, sizeof(ux_system_mutex_alloc_logs));
1352 
1353         ux_test_link_hooks_from_array(ux_system_mutex_hooks);
1354     }
1355     else
1356     {
1357         ux_test_remove_hooks_from_array(ux_system_mutex_hooks);
1358     }
1359 }
1360 
ux_test_utility_sim_mem_alloc_log_lock(VOID)1361 VOID ux_test_utility_sim_mem_alloc_log_lock(VOID)
1362 {
1363     ux_system_mutex_alloc_logs_lock = UX_TRUE;
1364 #if 0 /* TODO: Dump mem alloc log map.  */
1365     printf("Lock mem log map, %ld area:\n", ux_system_mutex_alloc_logs_count + 1);
1366     for (int i = 0; i <= ux_system_mutex_alloc_logs_count; i ++)
1367     {
1368         printf(" : %6ld ~ %6ld\n", ux_system_mutex_alloc_logs[i].first_count, ux_system_mutex_alloc_logs[i].last_count);
1369     }
1370 #endif
1371 }
1372 
ux_test_utility_sim_mem_alloc_count(VOID)1373 ULONG ux_test_utility_sim_mem_alloc_count(VOID)
1374 {
1375     return mem_alloc_count;
1376 }
1377 
ux_test_utility_sim_mem_alloc_count_reset(VOID)1378 VOID ux_test_utility_sim_mem_alloc_count_reset(VOID)
1379 {
1380     mem_alloc_fail_after = FAIL_DISABLE;
1381     mem_alloc_do_fail = UX_FALSE;
1382     mem_alloc_count = 0;
1383 
1384     ux_system_mutex_off_count = 0;
1385     ux_system_mutex_on_count = 0;
1386 
1387     ux_system_mutex_alloc_logs_count = 0;
1388     ux_system_mutex_alloc_logs_area = UX_FALSE;
1389     ux_system_mutex_alloc_logs_lock  = UX_FALSE;
1390     _ux_utility_memory_set(ux_system_mutex_alloc_logs, 0, sizeof(ux_system_mutex_alloc_logs));
1391 }
1392 
ux_test_utility_sim_mem_alloc_error_generation_start(ULONG fail_after)1393 VOID ux_test_utility_sim_mem_alloc_error_generation_start(ULONG fail_after)
1394 {
1395 
1396     mem_alloc_count = 0;
1397     mem_alloc_do_fail = UX_FALSE;
1398 
1399     ux_system_mutex_off_count = 0;
1400     ux_system_mutex_on_count = 0;
1401 
1402     ux_system_mutex_alloc_logs_match = 0;
1403 
1404     mem_alloc_fail_after = fail_after;
1405 }
1406 
ux_test_utility_sim_mem_alloc_error_generation_stop(VOID)1407 VOID ux_test_utility_sim_mem_alloc_error_generation_stop(VOID)
1408 {
1409 
1410     mem_alloc_fail_after = FAIL_DISABLE;
1411     mem_alloc_count = 0;
1412 
1413     ux_system_mutex_off_count = 0;
1414     ux_system_mutex_on_count = 0;
1415 }
1416 
ux_test_utility_sim_mem_alloc_error_generation_active(VOID)1417 UINT ux_test_utility_sim_mem_alloc_error_generation_active(VOID)
1418 {
1419     if (mem_alloc_fail_after == FAIL_DISABLE)
1420         return UX_ERROR;
1421     if (mem_alloc_count >= mem_alloc_fail_after)
1422         return UX_SUCCESS;
1423     return UX_ERROR;
1424 }
1425 
1426 /* Override. */
_tx_thread_preemption_change(TX_THREAD * thread_ptr,UINT new_threshold,UINT * old_threshold)1427 UINT  _tx_thread_preemption_change(TX_THREAD *thread_ptr, UINT new_threshold, UINT *old_threshold)
1428 {
1429 
1430 TX_INTERRUPT_SAVE_AREA
1431 
1432 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
1433 ULONG                                   priority_bit;
1434 #if TX_MAX_PRIORITIES > 32
1435 UINT                                    map_index;
1436 #endif
1437 #endif
1438 UINT                                    status;
1439 UX_TEST_OVERRIDE_TX_THREAD_PREEMPTION_CHANGE_PARAMS params = { thread_ptr, new_threshold };
1440 UX_TEST_ACTION                          action;
1441 
1442 
1443     action = ux_test_action_handler(UX_TEST_OVERRIDE_TX_THREAD_PREEMPTION_CHANGE, &params);
1444     ux_test_do_action_before(&action, &params);
1445 
1446     /* Default status to success.  */
1447     status =  TX_SUCCESS;
1448 
1449 #ifdef TX_DISABLE_PREEMPTION_THRESHOLD
1450 
1451     /* Only allow 0 (disable all preemption) and returning preemption-threshold to the
1452        current thread priority if preemption-threshold is disabled. All other threshold
1453        values are converted to 0.  */
1454     if (thread_ptr -> tx_thread_user_priority != new_threshold)
1455     {
1456 
1457         /* Is the new threshold zero?  */
1458         if (new_threshold != ((UINT) 0))
1459         {
1460 
1461             /* Convert the new threshold to disable all preemption, since preemption-threshold is
1462                not supported.  */
1463             new_threshold =  ((UINT) 0);
1464         }
1465     }
1466 #endif
1467 
1468     /* Lockout interrupts while the thread is being resumed.  */
1469     TX_DISABLE
1470 
1471     /* If trace is enabled, insert this event into the trace buffer.  */
1472     TX_TRACE_IN_LINE_INSERT(TX_TRACE_THREAD_PREEMPTION_CHANGE, thread_ptr, new_threshold, thread_ptr -> tx_thread_preempt_threshold, thread_ptr -> tx_thread_state, TX_TRACE_THREAD_EVENTS)
1473 
1474     /* Log this kernel call.  */
1475     TX_EL_THREAD_PREEMPTION_CHANGE_INSERT
1476 
1477     /* Determine if the new threshold is greater than the current user priority.  */
1478     if (new_threshold > thread_ptr -> tx_thread_user_priority)
1479     {
1480 
1481         /* Return error.  */
1482         status =  TX_THRESH_ERROR;
1483     }
1484     else
1485     {
1486 
1487 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
1488 
1489         /* Determine if the new threshold is the same as the priority.  */
1490         if (thread_ptr -> tx_thread_user_priority == new_threshold)
1491         {
1492 
1493             /* Determine if this thread is at the head of the list.  */
1494             if (_tx_thread_priority_list[thread_ptr -> tx_thread_priority] == thread_ptr)
1495             {
1496 
1497 #if TX_MAX_PRIORITIES > 32
1498 
1499                 /* Calculate the index into the bit map array.  */
1500                 map_index =  (thread_ptr -> tx_thread_priority)/((UINT) 32);
1501 #endif
1502 
1503                 /* Yes, this thread is at the front of the list.  Make sure
1504                    the preempted bit is cleared for this thread.  */
1505                 TX_MOD32_BIT_SET(thread_ptr -> tx_thread_priority, priority_bit)
1506                 _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] & (~(priority_bit));
1507 
1508 #if TX_MAX_PRIORITIES > 32
1509 
1510                 /* Determine if there are any other bits set in this preempt map.  */
1511                 if (_tx_thread_preempted_maps[MAP_INDEX] == ((ULONG) 0))
1512                 {
1513 
1514                     /* No, clear the active bit to signify this preempt map has nothing set.  */
1515                     TX_DIV32_BIT_SET(thread_ptr -> tx_thread_priority, priority_bit)
1516                     _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active & (~(priority_bit));
1517                 }
1518 #endif
1519             }
1520         }
1521 #endif
1522 
1523         /* Return the user's preemption-threshold.   */
1524         *old_threshold =  thread_ptr -> tx_thread_user_preempt_threshold;
1525 
1526         /* Setup the new threshold.  */
1527         thread_ptr -> tx_thread_user_preempt_threshold =  new_threshold;
1528 
1529         /* Determine if the new threshold represents a higher priority than the priority inheritance threshold.  */
1530         if (new_threshold < thread_ptr -> tx_thread_inherit_priority)
1531         {
1532 
1533             /* Update the actual preemption-threshold with the new threshold.  */
1534             thread_ptr -> tx_thread_preempt_threshold =  new_threshold;
1535         }
1536         else
1537         {
1538 
1539             /* Update the actual preemption-threshold with the priority inheritance.  */
1540             thread_ptr -> tx_thread_preempt_threshold =  thread_ptr -> tx_thread_inherit_priority;
1541         }
1542 
1543         /* Is the thread priority less than the current highest priority?  If not, no preemption is required.  */
1544         if (_tx_thread_highest_priority < thread_ptr -> tx_thread_priority)
1545         {
1546 
1547             /* Is the new thread preemption-threshold less than the current highest priority?  If not, no preemption is required.  */
1548             if (_tx_thread_highest_priority < new_threshold)
1549             {
1550 
1551                 /* If the current execute pointer is the same at this thread, preemption needs to take place.  */
1552                 if (_tx_thread_execute_ptr == thread_ptr)
1553                 {
1554 
1555                     /* Preemption needs to take place.  */
1556 
1557 #ifndef TX_DISABLE_PREEMPTION_THRESHOLD
1558 
1559                     /* Determine if this thread has preemption threshold set.  */
1560                     if (thread_ptr -> tx_thread_preempt_threshold != thread_ptr -> tx_thread_priority)
1561                     {
1562 
1563 #if TX_MAX_PRIORITIES > 32
1564 
1565                         /* Calculate the index into the bit map array.  */
1566                         map_index =  (thread_ptr -> tx_thread_priority)/((UINT) 32);
1567 
1568                         /* Set the active bit to remember that the preempt map has something set.  */
1569                         TX_DIV32_BIT_SET(thread_ptr -> tx_thread_priority, priority_bit)
1570                         _tx_thread_preempted_map_active =  _tx_thread_preempted_map_active | priority_bit;
1571 #endif
1572 
1573                         /* Remember that this thread was preempted by a thread above the thread's threshold.  */
1574                         TX_MOD32_BIT_SET(thread_ptr -> tx_thread_priority, priority_bit)
1575                         _tx_thread_preempted_maps[MAP_INDEX] =  _tx_thread_preempted_maps[MAP_INDEX] | priority_bit;
1576                     }
1577 #endif
1578 
1579 #ifdef TX_THREAD_ENABLE_PERFORMANCE_INFO
1580 
1581                     /* Determine if the caller is an interrupt or from a thread.  */
1582                     if (TX_THREAD_GET_SYSTEM_STATE() == ((ULONG) 0))
1583                     {
1584 
1585                         /* Caller is a thread, so this is a solicited preemption.  */
1586                         _tx_thread_performance_solicited_preemption_count++;
1587 
1588                         /* Increment the thread's solicited preemption counter.  */
1589                         thread_ptr -> tx_thread_performance_solicited_preemption_count++;
1590                     }
1591 
1592                     /* Remember the thread that preempted this thread.  */
1593                     thread_ptr -> tx_thread_performance_last_preempting_thread =  _tx_thread_priority_list[_tx_thread_highest_priority];
1594 
1595                     /* Is the execute pointer different?  */
1596                     if (_tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] != _tx_thread_execute_ptr)
1597                     {
1598 
1599                         /* Move to next entry.  */
1600                         _tx_thread_performance__execute_log_index++;
1601 
1602                         /* Check for wrap condition.  */
1603                         if (_tx_thread_performance__execute_log_index >= TX_THREAD_EXECUTE_LOG_SIZE)
1604                         {
1605 
1606                             /* Set the index to the beginning.  */
1607                             _tx_thread_performance__execute_log_index =  ((UINT) 0);
1608                         }
1609 
1610                         /* Log the new execute pointer.  */
1611                         _tx_thread_performance_execute_log[_tx_thread_performance__execute_log_index] =  _tx_thread_execute_ptr;
1612                     }
1613 #endif
1614 
1615                     /* Setup the highest priority thread to execute.  */
1616                     _tx_thread_execute_ptr =  _tx_thread_priority_list[_tx_thread_highest_priority];
1617 
1618                     /* Restore interrupts.  */
1619                     TX_RESTORE
1620 
1621                     /* Check for preemption.  */
1622                     _tx_thread_system_preempt_check();
1623 
1624                     /* Disable interrupts.  */
1625                     TX_DISABLE
1626                 }
1627             }
1628         }
1629     }
1630 
1631     /* Restore interrupts.  */
1632     TX_RESTORE
1633 
1634     ux_test_do_action_after(&action, &params);
1635 
1636     /* Return completion status.  */
1637     return(status);
1638 }
1639 
ux_system_mutex_create_callback(UX_TEST_ACTION * action,VOID * params)1640 static void ux_system_mutex_create_callback(UX_TEST_ACTION *action, VOID *params)
1641 {
1642 UX_TEST_OVERRIDE_TX_MUTEX_CREATE_PARAMS *mutex_create_param = params;
1643 
1644     /* Log mutex pointer.  */
1645     ux_system_mutex_hooks[1].mutex_ptr = mutex_create_param->mutex_ptr;
1646     ux_system_mutex_hooks[2].mutex_ptr = mutex_create_param->mutex_ptr;
1647 
1648     ux_system_mutex_off_count = 0;
1649     ux_system_mutex_on_count = 0;
1650 
1651     mem_alloc_count = 0;
1652 }
1653 
ux_system_mutex_get_callback(UX_TEST_ACTION * action,VOID * params)1654 static void ux_system_mutex_get_callback(UX_TEST_ACTION *action, VOID *params)
1655 {
1656 UX_TEST_OVERRIDE_TX_MUTEX_GET_PARAMS *mutex_create_param = params;
1657 ULONG                                this_count = ux_system_mutex_on_count;
1658 
1659     if (ux_system_mutex_callback_skip)
1660         return;
1661 
1662     ux_system_mutex_on_count ++;
1663 
1664     rmem_free = _ux_system -> ux_system_memory_byte_pool[UX_MEMORY_BYTE_POOL_REGULAR] -> ux_byte_pool_available;
1665     cmem_free = _ux_system -> ux_system_memory_byte_pool[UX_MEMORY_BYTE_POOL_CACHE_SAFE] -> ux_byte_pool_available;
1666 
1667     /* Just return if error generation is disabled. */
1668     if (mem_alloc_fail_after == FAIL_DISABLE)
1669         return;
1670 
1671     /* Always fail if we are short of logs. */
1672     /* When failing started, never stop it. */
1673     if (ux_system_mutex_alloc_logs_match > ux_system_mutex_alloc_logs_count || mem_alloc_do_fail == UX_TRUE)
1674         mem_alloc_do_fail = UX_TRUE;
1675 
1676     /* Check log baseline to increase memory allocate count. */
1677     else if (this_count >= ux_system_mutex_alloc_logs[ux_system_mutex_alloc_logs_match].first_count &&
1678              this_count <= ux_system_mutex_alloc_logs[ux_system_mutex_alloc_logs_match].last_count)
1679     {
1680         /* Consume all memory if we will generate memory allocation error. */
1681         if (mem_alloc_count >= mem_alloc_fail_after)
1682         {
1683             // printf("%s:%d malloc error start @ %ld (%ld)\n", __FILE__, __LINE__, this_count, mem_alloc_count);
1684             mem_alloc_do_fail = UX_TRUE;
1685         }
1686         mem_alloc_count ++;
1687 
1688         if (ux_system_mutex_on_count > ux_system_mutex_alloc_logs[ux_system_mutex_alloc_logs_match].last_count)
1689         {
1690             ux_system_mutex_alloc_logs_match ++;
1691         }
1692     }
1693 
1694     if (mem_alloc_do_fail)
1695     {
1696         ux_system_mutex_callback_skip = UX_TRUE;
1697         ux_test_utility_sim_mem_allocate_until_flagged(0, UX_REGULAR_MEMORY);
1698         ux_test_utility_sim_mem_allocate_until_flagged(0, UX_CACHE_SAFE_MEMORY);
1699         ux_system_mutex_callback_skip = UX_FALSE;
1700     }
1701 }
1702 
ux_system_mutex_put_callback(UX_TEST_ACTION * action,VOID * params)1703 static void ux_system_mutex_put_callback(UX_TEST_ACTION *action, VOID *params)
1704 {
1705 UX_TEST_OVERRIDE_TX_MUTEX_PUT_PARAMS *mutex_create_param = params;
1706 ULONG                                this_count = ux_system_mutex_off_count;
1707 
1708     if (ux_system_mutex_callback_skip)
1709         return;
1710 
1711     ux_system_mutex_off_count ++;
1712     UX_TEST_ASSERT(ux_system_mutex_on_count == ux_system_mutex_off_count);
1713 
1714     if (mem_alloc_do_fail)
1715     {
1716         ux_system_mutex_callback_skip = UX_TRUE;
1717         ux_test_utility_sim_mem_free_all_flagged(UX_REGULAR_MEMORY);
1718         ux_test_utility_sim_mem_free_all_flagged(UX_CACHE_SAFE_MEMORY);
1719         ux_system_mutex_callback_skip = UX_FALSE;
1720     }
1721 
1722     /* We stop logging when generating errors. */
1723     if (mem_alloc_fail_after != FAIL_DISABLE)
1724         return;
1725 
1726     /* We stop logging when it's locked. */
1727     if (ux_system_mutex_alloc_logs_lock)
1728         return;
1729 
1730     /* It's memory allocate, if memory level down.  */
1731     if (_ux_system -> ux_system_memory_byte_pool[UX_MEMORY_BYTE_POOL_REGULAR] -> ux_byte_pool_available < rmem_free ||
1732         _ux_system -> ux_system_memory_byte_pool[UX_MEMORY_BYTE_POOL_CACHE_SAFE] -> ux_byte_pool_available < cmem_free)
1733     {
1734         /* Memory allocate count.  */
1735         mem_alloc_count ++;
1736 
1737         /* Log memory allocate count.  */
1738         if (ux_system_mutex_alloc_logs_area == UX_FALSE)
1739         {
1740             ux_system_mutex_alloc_logs_area = UX_TRUE;
1741             ux_system_mutex_alloc_logs[ux_system_mutex_alloc_logs_count].first_count = this_count;
1742         }
1743         ux_system_mutex_alloc_logs[ux_system_mutex_alloc_logs_count].last_count  = this_count;
1744     }
1745     else if(ux_system_mutex_alloc_logs_area == UX_TRUE)
1746     {
1747         ux_system_mutex_alloc_logs_area = UX_FALSE;
1748 
1749         UX_TEST_ASSERT(ux_system_mutex_alloc_logs_count != (SYSTEM_MUTEX_ALLOC_LOG_SIZE - 1));
1750         if (ux_system_mutex_alloc_logs_count < SYSTEM_MUTEX_ALLOC_LOG_SIZE - 1)
1751         {
1752             ux_system_mutex_alloc_logs_count ++;
1753         }
1754         else
1755         {
1756             ux_system_mutex_alloc_logs_lock = UX_TRUE;
1757         }
1758     }
1759 }
1760 
_tx_mutex_put(TX_MUTEX * mutex_ptr)1761 UINT  _tx_mutex_put(TX_MUTEX *mutex_ptr)
1762 {
1763 
1764 TX_INTERRUPT_SAVE_AREA
1765 
1766 TX_THREAD       *thread_ptr;
1767 TX_THREAD       *old_owner;
1768 UINT            old_priority;
1769 UINT            status;
1770 TX_MUTEX        *next_mutex;
1771 TX_MUTEX        *previous_mutex;
1772 UINT            owned_count;
1773 UINT            suspended_count;
1774 TX_THREAD       *current_thread;
1775 TX_THREAD       *next_thread;
1776 TX_THREAD       *previous_thread;
1777 TX_THREAD       *suspended_thread;
1778 UX_TEST_OVERRIDE_TX_MUTEX_PUT_PARAMS action_params = {mutex_ptr};
1779 
1780     /* Perform hooked callbacks.  */
1781     ux_test_do_hooks_before(UX_TEST_OVERRIDE_TX_MUTEX_PUT, &action_params);
1782 
1783     /* Setup status to indicate the processing is not complete.  */
1784     status =  TX_NOT_DONE;
1785 
1786     /* Disable interrupts to put an instance back to the mutex.  */
1787     TX_DISABLE
1788 
1789 #ifdef TX_MUTEX_ENABLE_PERFORMANCE_INFO
1790 
1791     /* Increment the total mutex put counter.  */
1792     _tx_mutex_performance_put_count++;
1793 
1794     /* Increment the number of attempts to put this mutex.  */
1795     mutex_ptr -> tx_mutex_performance_put_count++;
1796 #endif
1797 
1798     /* If trace is enabled, insert this event into the trace buffer.  */
1799     TX_TRACE_IN_LINE_INSERT(TX_TRACE_MUTEX_PUT, mutex_ptr, TX_POINTER_TO_ULONG_CONVERT(mutex_ptr -> tx_mutex_owner), mutex_ptr -> tx_mutex_ownership_count, TX_POINTER_TO_ULONG_CONVERT(&old_priority), TX_TRACE_MUTEX_EVENTS)
1800 
1801     /* Log this kernel call.  */
1802     TX_EL_MUTEX_PUT_INSERT
1803 
1804     /* Determine if this mutex is owned.  */
1805     if (mutex_ptr -> tx_mutex_ownership_count != ((UINT) 0))
1806     {
1807 
1808         /* Pickup the owning thread pointer.  */
1809         thread_ptr =  mutex_ptr -> tx_mutex_owner;
1810 
1811         /* Pickup thread pointer.  */
1812         TX_THREAD_GET_CURRENT(current_thread)
1813 
1814         /* Check to see if the mutex is owned by the calling thread.  */
1815         if (mutex_ptr -> tx_mutex_owner != current_thread)
1816         {
1817 
1818             /* Determine if the preempt disable flag is set, indicating that
1819                the caller is not the application but from ThreadX. In such
1820                cases, the thread mutex owner does not need to match.  */
1821             if (_tx_thread_preempt_disable == ((UINT) 0))
1822             {
1823 
1824                 /* Invalid mutex release.  */
1825 
1826                 /* Restore interrupts.  */
1827                 TX_RESTORE
1828 
1829                 /* Caller does not own the mutex.  */
1830                 status =  TX_NOT_OWNED;
1831             }
1832         }
1833 
1834         /* Determine if we should continue.  */
1835         if (status == TX_NOT_DONE)
1836         {
1837 
1838             /* Decrement the mutex ownership count.  */
1839             mutex_ptr -> tx_mutex_ownership_count--;
1840 
1841             /* Determine if the mutex is still owned by the current thread.  */
1842             if (mutex_ptr -> tx_mutex_ownership_count != ((UINT) 0))
1843             {
1844 
1845                 /* Restore interrupts.  */
1846                 TX_RESTORE
1847 
1848                 /* Mutex is still owned, just return successful status.  */
1849                 status =  TX_SUCCESS;
1850             }
1851             else
1852             {
1853 
1854                 /* Check for a NULL thread pointer, which can only happen during initialization.   */
1855                 if (thread_ptr == TX_NULL)
1856                 {
1857 
1858                     /* Restore interrupts.  */
1859                     TX_RESTORE
1860 
1861                     /* Mutex is now available, return successful status.  */
1862                     status =  TX_SUCCESS;
1863                 }
1864                 else
1865                 {
1866 
1867                     /* The mutex is now available.   */
1868 
1869                     /* Remove this mutex from the owned mutex list.  */
1870 
1871                     /* Decrement the ownership count.  */
1872                     thread_ptr -> tx_thread_owned_mutex_count--;
1873 
1874                     /* Determine if this mutex was the only one on the list.  */
1875                     if (thread_ptr -> tx_thread_owned_mutex_count == ((UINT) 0))
1876                     {
1877 
1878                         /* Yes, the list is empty.  Simply set the head pointer to NULL.  */
1879                         thread_ptr -> tx_thread_owned_mutex_list =  TX_NULL;
1880                     }
1881                     else
1882                     {
1883 
1884                         /* No, there are more mutexes on the list.  */
1885 
1886                         /* Link-up the neighbors.  */
1887                         next_mutex =                             mutex_ptr -> tx_mutex_owned_next;
1888                         previous_mutex =                         mutex_ptr -> tx_mutex_owned_previous;
1889                         next_mutex -> tx_mutex_owned_previous =  previous_mutex;
1890                         previous_mutex -> tx_mutex_owned_next =  next_mutex;
1891 
1892                         /* See if we have to update the created list head pointer.  */
1893                         if (thread_ptr -> tx_thread_owned_mutex_list == mutex_ptr)
1894                         {
1895 
1896                             /* Yes, move the head pointer to the next link. */
1897                             thread_ptr -> tx_thread_owned_mutex_list =  next_mutex;
1898                         }
1899                     }
1900 
1901                     /* Determine if the simple, non-suspension, non-priority inheritance case is present.  */
1902                     if (mutex_ptr -> tx_mutex_suspension_list == TX_NULL)
1903                     {
1904 
1905                         /* Is this a priority inheritance mutex?  */
1906                         if (mutex_ptr -> tx_mutex_inherit == TX_FALSE)
1907                         {
1908 
1909                             /* Yes, we are done - set the mutex owner to NULL.   */
1910                             mutex_ptr -> tx_mutex_owner =  TX_NULL;
1911 
1912                             /* Restore interrupts.  */
1913                             TX_RESTORE
1914 
1915                             /* Mutex is now available, return successful status.  */
1916                             status =  TX_SUCCESS;
1917                         }
1918                     }
1919 
1920                     /* Determine if the processing is complete.  */
1921                     if (status == TX_NOT_DONE)
1922                     {
1923 
1924                         /* Initialize original owner and thread priority.  */
1925                         old_owner =      TX_NULL;
1926                         old_priority =   thread_ptr -> tx_thread_user_priority;
1927 
1928                         /* Does this mutex support priority inheritance?  */
1929                         if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
1930                         {
1931 
1932 #ifndef TX_NOT_INTERRUPTABLE
1933 
1934                             /* Temporarily disable preemption.  */
1935                             _tx_thread_preempt_disable++;
1936 
1937                             /* Restore interrupts.  */
1938                             TX_RESTORE
1939 #endif
1940 
1941                             /* Search the owned mutexes for this thread to determine the highest priority for this
1942                                former mutex owner to return to.  */
1943                             next_mutex =  thread_ptr -> tx_thread_owned_mutex_list;
1944                             while (next_mutex != TX_NULL)
1945                             {
1946 
1947                                 /* Does this mutex support priority inheritance?  */
1948                                 if (next_mutex -> tx_mutex_inherit == TX_TRUE)
1949                                 {
1950 
1951                                     /* Determine if highest priority field of the mutex is higher than the priority to
1952                                        restore.  */
1953                                     if (next_mutex -> tx_mutex_highest_priority_waiting < old_priority)
1954                                     {
1955 
1956                                         /* Use this priority to return releasing thread to.  */
1957                                         old_priority =   next_mutex -> tx_mutex_highest_priority_waiting;
1958                                     }
1959                                 }
1960 
1961                                 /* Move mutex pointer to the next mutex in the list.  */
1962                                 next_mutex =  next_mutex -> tx_mutex_owned_next;
1963 
1964                                 /* Are we at the end of the list?  */
1965                                 if (next_mutex == thread_ptr -> tx_thread_owned_mutex_list)
1966                                 {
1967 
1968                                     /* Yes, set the next mutex to NULL.  */
1969                                     next_mutex =  TX_NULL;
1970                                 }
1971                             }
1972 
1973 #ifndef TX_NOT_INTERRUPTABLE
1974 
1975                             /* Disable interrupts.  */
1976                             TX_DISABLE
1977 
1978                             /* Undo the temporarily preemption disable.  */
1979                             _tx_thread_preempt_disable--;
1980 #endif
1981                         }
1982 
1983                         /* Determine if priority inheritance is in effect and there are one or more
1984                            threads suspended on the mutex.  */
1985                         if (mutex_ptr -> tx_mutex_suspended_count > ((UINT) 1))
1986                         {
1987 
1988                             /* Is priority inheritance in effect?  */
1989                             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
1990                             {
1991 
1992                                 /* Yes, this code is simply to ensure the highest priority thread is positioned
1993                                    at the front of the suspension list.  */
1994 
1995 #ifndef TX_NOT_INTERRUPTABLE
1996 
1997                                 /* Temporarily disable preemption.  */
1998                                 _tx_thread_preempt_disable++;
1999 
2000                                 /* Restore interrupts.  */
2001                                 TX_RESTORE
2002 #endif
2003 
2004                                 /* Call the mutex prioritize processing to ensure the
2005                                    highest priority thread is resumed.  */
2006 #ifdef TX_MISRA_ENABLE
2007                                 do
2008                                 {
2009                                     status =  _tx_mutex_prioritize(mutex_ptr);
2010                                 } while (status != TX_SUCCESS);
2011 #else
2012                                 _tx_mutex_prioritize(mutex_ptr);
2013 #endif
2014 
2015                                 /* At this point, the highest priority thread is at the
2016                                    front of the suspension list.  */
2017 
2018 #ifndef TX_NOT_INTERRUPTABLE
2019 
2020                                 /* Disable interrupts.  */
2021                                 TX_DISABLE
2022 
2023                                 /* Back off the preemption disable.  */
2024                                 _tx_thread_preempt_disable--;
2025 #endif
2026                             }
2027                         }
2028 
2029                         /* Now determine if there are any threads still waiting on the mutex.  */
2030                         if (mutex_ptr -> tx_mutex_suspension_list == TX_NULL)
2031                         {
2032 
2033                             /* No, there are no longer any threads waiting on the mutex.  */
2034 
2035 #ifndef TX_NOT_INTERRUPTABLE
2036 
2037                             /* Temporarily disable preemption.  */
2038                             _tx_thread_preempt_disable++;
2039 
2040                             /* Restore interrupts.  */
2041                             TX_RESTORE
2042 #endif
2043 
2044                             /* Mutex is not owned, but it is possible that a thread that
2045                                caused a priority inheritance to occur is no longer waiting
2046                                on the mutex.  */
2047                             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
2048                             {
2049 
2050                                 /* Setup the highest priority waiting thread.  */
2051                                 mutex_ptr -> tx_mutex_highest_priority_waiting =  (UINT) TX_MAX_PRIORITIES;
2052 
2053                                 /* Determine if we need to restore priority.  */
2054                                 if ((mutex_ptr -> tx_mutex_owner) -> tx_thread_priority != old_priority)
2055                                 {
2056 
2057                                     /* Yes, restore the priority of thread.  */
2058                                     _tx_mutex_priority_change(mutex_ptr -> tx_mutex_owner, old_priority);
2059                                 }
2060                             }
2061 
2062 #ifndef TX_NOT_INTERRUPTABLE
2063 
2064                             /* Disable interrupts again.  */
2065                             TX_DISABLE
2066 
2067                             /* Back off the preemption disable.  */
2068                             _tx_thread_preempt_disable--;
2069 #endif
2070 
2071                             /* Clear the owner flag.  */
2072                             if (mutex_ptr -> tx_mutex_ownership_count == ((UINT) 0))
2073                             {
2074 
2075                                 /* Set the mutex owner to NULL.  */
2076                                 mutex_ptr -> tx_mutex_owner =  TX_NULL;
2077                             }
2078 
2079                             /* Restore interrupts.  */
2080                             TX_RESTORE
2081 
2082                             /* Check for preemption.  */
2083                             _tx_thread_system_preempt_check();
2084 
2085                             /* Set status to success.  */
2086                             status =  TX_SUCCESS;
2087                         }
2088                         else
2089                         {
2090 
2091                             /* Pickup the thread at the front of the suspension list.  */
2092                             thread_ptr =  mutex_ptr -> tx_mutex_suspension_list;
2093 
2094                             /* Save the previous ownership information, if inheritance is
2095                                in effect.  */
2096                             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
2097                             {
2098 
2099                                 /* Remember the old mutex owner.  */
2100                                 old_owner =  mutex_ptr -> tx_mutex_owner;
2101 
2102                                 /* Setup owner thread priority information.  */
2103                                 mutex_ptr -> tx_mutex_original_priority =   thread_ptr -> tx_thread_priority;
2104 
2105                                 /* Setup the highest priority waiting thread.  */
2106                                 mutex_ptr -> tx_mutex_highest_priority_waiting =  (UINT) TX_MAX_PRIORITIES;
2107                             }
2108 
2109                             /* Determine how many mutexes are owned by this thread.  */
2110                             owned_count =  thread_ptr -> tx_thread_owned_mutex_count;
2111 
2112                             /* Determine if this thread owns any other mutexes that have priority inheritance.  */
2113                             if (owned_count == ((UINT) 0))
2114                             {
2115 
2116                                 /* The owned mutex list is empty.  Add mutex to empty list.  */
2117                                 thread_ptr -> tx_thread_owned_mutex_list =     mutex_ptr;
2118                                 mutex_ptr -> tx_mutex_owned_next =             mutex_ptr;
2119                                 mutex_ptr -> tx_mutex_owned_previous =         mutex_ptr;
2120                             }
2121                             else
2122                             {
2123 
2124                                 /* Non-empty list. Link up the mutex.  */
2125 
2126                                 /* Pickup tail pointer.  */
2127                                 next_mutex =                            thread_ptr -> tx_thread_owned_mutex_list;
2128                                 previous_mutex =                        next_mutex -> tx_mutex_owned_previous;
2129 
2130                                 /* Place the owned mutex in the list.  */
2131                                 next_mutex -> tx_mutex_owned_previous =  mutex_ptr;
2132                                 previous_mutex -> tx_mutex_owned_next =  mutex_ptr;
2133 
2134                                 /* Setup this mutex's next and previous created links.  */
2135                                 mutex_ptr -> tx_mutex_owned_previous =   previous_mutex;
2136                                 mutex_ptr -> tx_mutex_owned_next =       next_mutex;
2137                             }
2138 
2139                             /* Increment the number of mutexes owned counter.  */
2140                             thread_ptr -> tx_thread_owned_mutex_count =  owned_count + ((UINT) 1);
2141 
2142                             /* Mark the Mutex as owned and fill in the corresponding information.  */
2143                             mutex_ptr -> tx_mutex_ownership_count =  (UINT) 1;
2144                             mutex_ptr -> tx_mutex_owner =            thread_ptr;
2145 
2146                             /* Remove the suspended thread from the list.  */
2147 
2148                             /* Decrement the suspension count.  */
2149                             mutex_ptr -> tx_mutex_suspended_count--;
2150 
2151                             /* Pickup the suspended count.  */
2152                             suspended_count =  mutex_ptr -> tx_mutex_suspended_count;
2153 
2154                             /* See if this is the only suspended thread on the list.  */
2155                             if (suspended_count == TX_NO_SUSPENSIONS)
2156                             {
2157 
2158                                 /* Yes, the only suspended thread.  */
2159 
2160                                 /* Update the head pointer.  */
2161                                 mutex_ptr -> tx_mutex_suspension_list =  TX_NULL;
2162                             }
2163                             else
2164                             {
2165 
2166                                 /* At least one more thread is on the same expiration list.  */
2167 
2168                                 /* Update the list head pointer.  */
2169                                 next_thread =                                  thread_ptr -> tx_thread_suspended_next;
2170                                 mutex_ptr -> tx_mutex_suspension_list =        next_thread;
2171 
2172                                 /* Update the links of the adjacent threads.  */
2173                                 previous_thread =                              thread_ptr -> tx_thread_suspended_previous;
2174                                 next_thread -> tx_thread_suspended_previous =  previous_thread;
2175                                 previous_thread -> tx_thread_suspended_next =  next_thread;
2176                             }
2177 
2178                             /* Prepare for resumption of the first thread.  */
2179 
2180                             /* Clear cleanup routine to avoid timeout.  */
2181                             thread_ptr -> tx_thread_suspend_cleanup =  TX_NULL;
2182 
2183                             /* Put return status into the thread control block.  */
2184                             thread_ptr -> tx_thread_suspend_status =  TX_SUCCESS;
2185 
2186 #ifdef TX_NOT_INTERRUPTABLE
2187 
2188                             /* Determine if priority inheritance is enabled for this mutex.  */
2189                             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
2190                             {
2191 
2192                                 /* Yes, priority inheritance is requested.  */
2193 
2194                                 /* Determine if there are any more threads still suspended on the mutex.  */
2195                                 if (mutex_ptr -> tx_mutex_suspended_count != ((ULONG) 0))
2196                                 {
2197 
2198                                     /* Determine if there are more than one thread suspended on the mutex.  */
2199                                     if (mutex_ptr -> tx_mutex_suspended_count > ((ULONG) 1))
2200                                     {
2201 
2202                                         /* If so, prioritize the list so the highest priority thread is placed at the
2203                                            front of the suspension list.  */
2204 #ifdef TX_MISRA_ENABLE
2205                                         do
2206                                         {
2207                                             status =  _tx_mutex_prioritize(mutex_ptr);
2208                                         } while (status != TX_SUCCESS);
2209 #else
2210                                         _tx_mutex_prioritize(mutex_ptr);
2211 #endif
2212                                     }
2213 
2214                                     /* Now, pickup the list head and set the priority.  */
2215 
2216                                     /* Determine if there still are threads suspended for this mutex.  */
2217                                     suspended_thread =  mutex_ptr -> tx_mutex_suspension_list;
2218                                     if (suspended_thread != TX_NULL)
2219                                     {
2220 
2221                                         /* Setup the highest priority thread waiting on this mutex.  */
2222                                         mutex_ptr -> tx_mutex_highest_priority_waiting =  suspended_thread -> tx_thread_priority;
2223                                     }
2224                                 }
2225 
2226                                 /* Restore previous priority needs to be restored after priority
2227                                    inheritance.  */
2228                                 if (old_owner != TX_NULL)
2229                                 {
2230 
2231                                     /* Determine if we need to restore priority.  */
2232                                     if (old_owner -> tx_thread_priority != old_priority)
2233                                     {
2234 
2235                                         /* Restore priority of thread.  */
2236                                         _tx_mutex_priority_change(old_owner, old_priority);
2237                                     }
2238                                 }
2239                             }
2240 
2241                             /* Resume the thread!  */
2242                             _tx_thread_system_ni_resume(thread_ptr);
2243 
2244                             /* Restore interrupts.  */
2245                             TX_RESTORE
2246 #else
2247 
2248                             /* Temporarily disable preemption.  */
2249                             _tx_thread_preempt_disable++;
2250 
2251                             /* Restore interrupts.  */
2252                             TX_RESTORE
2253 
2254                             /* Determine if priority inheritance is enabled for this mutex.  */
2255                             if (mutex_ptr -> tx_mutex_inherit == TX_TRUE)
2256                             {
2257 
2258                                 /* Yes, priority inheritance is requested.  */
2259 
2260                                 /* Determine if there are any more threads still suspended on the mutex.  */
2261                                 if (mutex_ptr -> tx_mutex_suspended_count != TX_NO_SUSPENSIONS)
2262                                 {
2263 
2264                                     /* Prioritize the list so the highest priority thread is placed at the
2265                                        front of the suspension list.  */
2266 #ifdef TX_MISRA_ENABLE
2267                                     do
2268                                     {
2269                                         status =  _tx_mutex_prioritize(mutex_ptr);
2270                                     } while (status != TX_SUCCESS);
2271 #else
2272                                     _tx_mutex_prioritize(mutex_ptr);
2273 #endif
2274 
2275                                     /* Now, pickup the list head and set the priority.  */
2276 
2277                                     /* Disable interrupts.  */
2278                                     TX_DISABLE
2279 
2280                                     /* Determine if there still are threads suspended for this mutex.  */
2281                                     suspended_thread =  mutex_ptr -> tx_mutex_suspension_list;
2282                                     if (suspended_thread != TX_NULL)
2283                                     {
2284 
2285                                         /* Setup the highest priority thread waiting on this mutex.  */
2286                                         mutex_ptr -> tx_mutex_highest_priority_waiting =  suspended_thread -> tx_thread_priority;
2287                                     }
2288 
2289                                     /* Restore interrupts.  */
2290                                     TX_RESTORE
2291                                 }
2292 
2293                                 /* Restore previous priority needs to be restored after priority
2294                                    inheritance.  */
2295                                 if (old_owner != TX_NULL)
2296                                 {
2297 
2298                                     /* Is the priority different?  */
2299                                     if (old_owner -> tx_thread_priority != old_priority)
2300                                     {
2301 
2302                                         /* Restore the priority of thread.  */
2303                                         _tx_mutex_priority_change(old_owner, old_priority);
2304                                     }
2305                                 }
2306                             }
2307 
2308                             /* Resume thread.  */
2309                             _tx_thread_system_resume(thread_ptr);
2310 #endif
2311 
2312                             /* Return a successful status.  */
2313                             status =  TX_SUCCESS;
2314                         }
2315                     }
2316                 }
2317             }
2318         }
2319     }
2320     else
2321     {
2322 
2323         /* Restore interrupts.  */
2324         TX_RESTORE
2325 
2326         /* Caller does not own the mutex.  */
2327         status =  TX_NOT_OWNED;
2328     }
2329 
2330     /* Perform hooked callbacks.  */
2331     ux_test_do_hooks_after(UX_TEST_OVERRIDE_TX_MUTEX_PUT, &action_params);
2332 
2333     /* Return the completion status.  */
2334     return(status);
2335 }
2336 
_nx_packet_pool_create(NX_PACKET_POOL * pool_ptr,CHAR * name_ptr,ULONG payload_size,VOID * pool_start,ULONG pool_size)2337 UINT  _nx_packet_pool_create(NX_PACKET_POOL *pool_ptr, CHAR *name_ptr, ULONG payload_size,
2338                              VOID *pool_start, ULONG pool_size)
2339 {
2340 
2341 TX_INTERRUPT_SAVE_AREA
2342 
2343 NX_PACKET_POOL *tail_ptr;              /* Working packet pool pointer */
2344 ULONG           packets;               /* Number of packets in pool   */
2345 ULONG           original_payload_size; /* Original payload size       */
2346 ULONG           header_size;           /* Rounded header size         */
2347 CHAR           *packet_ptr;            /* Working packet pointer      */
2348 CHAR           *next_packet_ptr;       /* Next packet pointer         */
2349 CHAR           *end_of_pool;           /* End of pool area            */
2350 CHAR           *payload_address;       /* Address of the first payload*/
2351 VOID           *rounded_pool_start;    /* Rounded stating address     */
2352 UX_TEST_OVERRIDE_TX_THREAD_CREATE_PARAMS        action_params = { name_ptr };
2353 UX_TEST_ACTION                                  action;
2354 
2355 
2356     /* Perform action.  */
2357     action = ux_test_action_handler(UX_TEST_OVERRIDE_NX_PACKET_POOL_CREATE, &action_params);
2358     ux_test_do_action_before(&action, &action_params);
2359     if (ux_test_is_expedient_on())
2360     {
2361         if (action.matched && !action.do_after)
2362         {
2363             if (!action.no_return)
2364             {
2365                 return action.status;
2366             }
2367         }
2368     }
2369 
2370     /* Save the original payload size.  */
2371     original_payload_size =  payload_size;
2372 
2373     /* Align the starting address to four bytes. */
2374     /*lint -e{923} suppress cast between ULONG and pointer.  */
2375     rounded_pool_start = (VOID *)((((ALIGN_TYPE)pool_start + NX_PACKET_ALIGNMENT  - 1) / NX_PACKET_ALIGNMENT) * NX_PACKET_ALIGNMENT);
2376 
2377     /* Round the pool size down to something that is evenly divisible by alignment.  */
2378     /*lint -e{923} suppress cast between ULONG and pointer.  */
2379     pool_size = (ULONG)(((pool_size - ((ALIGN_TYPE)rounded_pool_start - (ALIGN_TYPE)pool_start)) / NX_PACKET_ALIGNMENT) * NX_PACKET_ALIGNMENT);
2380 
2381     /* Set the pool starting address. */
2382     pool_start = rounded_pool_start;
2383 
2384     /* Calculate the address of payload. */
2385     /*lint -e{923} suppress cast between ULONG and pointer.  */
2386     payload_address = (CHAR *)((ALIGN_TYPE)rounded_pool_start + sizeof(NX_PACKET));
2387 
2388     /* Align the address of payload. */
2389     /*lint -e{923} suppress cast between ULONG and pointer.  */
2390     payload_address = (CHAR *)((((ALIGN_TYPE)payload_address + NX_PACKET_ALIGNMENT  - 1) / NX_PACKET_ALIGNMENT) * NX_PACKET_ALIGNMENT);
2391 
2392     /* Calculate the header size. */
2393     /*lint -e{923} suppress cast between ULONG and pointer.  */
2394     header_size = (ULONG)((ALIGN_TYPE)payload_address - (ALIGN_TYPE)rounded_pool_start);
2395 
2396     /* Round the packet size up to something that helps guarantee proper alignment for header and payload.  */
2397     payload_size = (ULONG)(((header_size + payload_size + NX_PACKET_ALIGNMENT  - 1) / NX_PACKET_ALIGNMENT) * NX_PACKET_ALIGNMENT - header_size);
2398 
2399     /* Clear pool fields. */
2400     memset(pool_ptr, 0, sizeof(NX_PACKET_POOL));
2401 
2402     /* Setup the basic packet pool fields.  */
2403     pool_ptr -> nx_packet_pool_name =             name_ptr;
2404     pool_ptr -> nx_packet_pool_suspension_list =  TX_NULL;
2405     pool_ptr -> nx_packet_pool_suspended_count =  0;
2406     pool_ptr -> nx_packet_pool_start =            (CHAR *)pool_start;
2407     pool_ptr -> nx_packet_pool_size =             pool_size;
2408     pool_ptr -> nx_packet_pool_payload_size =     original_payload_size;
2409 
2410     /* Calculate the end of the pool's memory area.  */
2411     end_of_pool =  ((CHAR *)pool_start) + pool_size;
2412 
2413     /* Walk through the pool area, setting up the available packet list.  */
2414     packets =            0;
2415     packet_ptr =         (CHAR *)rounded_pool_start;
2416     next_packet_ptr =    packet_ptr + (payload_size + header_size);
2417 
2418     /*lint -e{946} suppress pointer subtraction, since it is necessary. */
2419     while (next_packet_ptr <= end_of_pool)
2420     {
2421 
2422         /* Yes, we have another packet.  Increment the packet count.  */
2423         packets++;
2424 
2425         /* Setup the link to the next packet.  */
2426         /*lint -e{929} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2427         ((NX_PACKET *)packet_ptr) -> nx_packet_queue_next =  (NX_PACKET *)next_packet_ptr;
2428 
2429         /* Remember that this packet pool is the owner.  */
2430         /*lint -e{929} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2431         ((NX_PACKET *)packet_ptr) -> nx_packet_pool_owner =  pool_ptr;
2432 
2433 #ifndef NX_DISABLE_PACKET_CHAIN
2434         /* Clear the next packet pointer.  */
2435         /*lint -e{929} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2436         ((NX_PACKET *)packet_ptr) -> nx_packet_next =  (NX_PACKET *)NX_NULL;
2437 #endif /* NX_DISABLE_PACKET_CHAIN */
2438 
2439         /* Mark the packet as free.  */
2440         /*lint -e{929} -e{923} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2441         ((NX_PACKET *)packet_ptr) -> nx_packet_union_next.nx_packet_tcp_queue_next =  (NX_PACKET *)NX_PACKET_FREE;
2442 
2443         /* Setup the packet data pointers.  */
2444         /*lint -e{929} -e{928} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2445         ((NX_PACKET *)packet_ptr) -> nx_packet_data_start =  (UCHAR *)(packet_ptr + header_size);
2446 
2447         /*lint -e{929} -e{928} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2448         ((NX_PACKET *)packet_ptr) -> nx_packet_data_end =    (UCHAR *)(packet_ptr + header_size + original_payload_size);
2449 
2450         /* Add debug information. */
2451         NX_PACKET_DEBUG(__FILE__, __LINE__, (NX_PACKET *)packet_ptr);
2452 
2453         /* Advance to the next packet.  */
2454         packet_ptr =   next_packet_ptr;
2455 
2456         /* Update the next packet pointer.  */
2457         next_packet_ptr =  packet_ptr + (payload_size + header_size);
2458     }
2459 
2460     /* Backup to the last packet in the pool.  */
2461     packet_ptr =  packet_ptr - (payload_size + header_size);
2462 
2463     /* Set the last packet's forward pointer to NULL.  */
2464     /*lint -e{929} -e{740} -e{826} suppress cast of pointer to pointer, since it is necessary  */
2465     ((NX_PACKET *)packet_ptr) -> nx_packet_queue_next =  NX_NULL;
2466 
2467     /* Save the remaining information in the pool control packet.  */
2468     pool_ptr -> nx_packet_pool_available =  packets;
2469     pool_ptr -> nx_packet_pool_total =      packets;
2470 
2471     /* Set the packet pool available list.  */
2472     pool_ptr -> nx_packet_pool_available_list =  (NX_PACKET *)pool_start;
2473 
2474     /* If trace is enabled, register this object.  */
2475     NX_TRACE_OBJECT_REGISTER(NX_TRACE_OBJECT_TYPE_PACKET_POOL, pool_ptr, name_ptr, payload_size, packets);
2476 
2477     /* If trace is enabled, insert this event into the trace buffer.  */
2478     NX_TRACE_IN_LINE_INSERT(NX_TRACE_PACKET_POOL_CREATE, pool_ptr, payload_size, pool_start, pool_size, NX_TRACE_PACKET_EVENTS, 0, 0);
2479 
2480     /* Disable interrupts to place the packet pool on the created list.  */
2481     TX_DISABLE
2482 
2483     /* Setup the packet pool ID to make it valid.  */
2484     pool_ptr -> nx_packet_pool_id =  NX_PACKET_POOL_ID;
2485 
2486     /* Place the packet pool on the list of created packet pools.  First,
2487        check for an empty list.  */
2488     if (_nx_packet_pool_created_ptr)
2489     {
2490 
2491         /* Pickup tail pointer.  */
2492         tail_ptr =  _nx_packet_pool_created_ptr -> nx_packet_pool_created_previous;
2493 
2494         /* Place the new packet pool in the list.  */
2495         _nx_packet_pool_created_ptr -> nx_packet_pool_created_previous =  pool_ptr;
2496         tail_ptr -> nx_packet_pool_created_next =  pool_ptr;
2497 
2498         /* Setup this packet pool's created links.  */
2499         pool_ptr -> nx_packet_pool_created_previous =  tail_ptr;
2500         pool_ptr -> nx_packet_pool_created_next =      _nx_packet_pool_created_ptr;
2501     }
2502     else
2503     {
2504 
2505         /* The created packet pool list is empty.  Add packet pool to empty list.  */
2506         _nx_packet_pool_created_ptr =                  pool_ptr;
2507         pool_ptr -> nx_packet_pool_created_next =      pool_ptr;
2508         pool_ptr -> nx_packet_pool_created_previous =  pool_ptr;
2509     }
2510 
2511     /* Increment the number of packet pools created.  */
2512     _nx_packet_pool_created_count++;
2513 
2514     /* Restore interrupts.  */
2515     TX_RESTORE
2516 
2517     /* Return NX_SUCCESS.  */
2518     return(NX_SUCCESS);
2519 }
2520 
_nx_packet_allocate(NX_PACKET_POOL * pool_ptr,NX_PACKET ** packet_ptr,ULONG packet_type,ULONG wait_option)2521 UINT  _nx_packet_allocate(NX_PACKET_POOL *pool_ptr,  NX_PACKET **packet_ptr,
2522                           ULONG packet_type, ULONG wait_option)
2523 {
2524 TX_INTERRUPT_SAVE_AREA
2525 
2526 UINT       status;              /* Return status           */
2527 TX_THREAD *thread_ptr;          /* Working thread pointer  */
2528 NX_PACKET *work_ptr;            /* Working packet pointer  */
2529 UX_TEST_OVERRIDE_NX_PACKET_ALLOCATE_PARAMS      action_params = { pool_ptr };
2530 UX_TEST_ACTION                                  action;
2531 
2532 
2533     /* Perform action.  */
2534     action = ux_test_action_handler(UX_TEST_OVERRIDE_NX_PACKET_ALLOCATE, &action_params);
2535     ux_test_do_action_before(&action, &action_params);
2536     if (ux_test_is_expedient_on())
2537     {
2538         if (action.matched && !action.do_after)
2539         {
2540             if (!action.no_return)
2541             {
2542                 return action.status;
2543             }
2544         }
2545     }
2546 
2547 #ifdef TX_ENABLE_EVENT_TRACE
2548 TX_TRACE_BUFFER_ENTRY *trace_event;
2549 ULONG                  trace_timestamp;
2550 #endif
2551 
2552     /* Make sure the packet_type does not go beyond nx_packet_data_end. */
2553     if (pool_ptr -> nx_packet_pool_payload_size < packet_type)
2554     {
2555         return(NX_INVALID_PARAMETERS);
2556     }
2557 
2558     /* Set the return pointer to NULL initially.  */
2559     *packet_ptr =   NX_NULL;
2560 
2561     /* If trace is enabled, insert this event into the trace buffer.  */
2562     NX_TRACE_IN_LINE_INSERT(NX_TRACE_PACKET_ALLOCATE, pool_ptr, 0, packet_type, pool_ptr -> nx_packet_pool_available, NX_TRACE_PACKET_EVENTS, &trace_event, &trace_timestamp);
2563 
2564     /* Disable interrupts to get a packet from the pool.  */
2565     TX_DISABLE
2566 
2567     /* Determine if there is an available packet.  */
2568     if (pool_ptr -> nx_packet_pool_available)
2569     {
2570 
2571         /* Yes, a packet is available.  Decrement the available count.  */
2572         pool_ptr -> nx_packet_pool_available--;
2573 
2574         /* Pickup the current packet pointer.  */
2575         work_ptr =  pool_ptr -> nx_packet_pool_available_list;
2576 
2577         /* Modify the available list to point at the next packet in the pool. */
2578         pool_ptr -> nx_packet_pool_available_list =  work_ptr -> nx_packet_queue_next;
2579 
2580         /* Setup various fields for this packet.  */
2581         work_ptr -> nx_packet_queue_next =   NX_NULL;
2582 #ifndef NX_DISABLE_PACKET_CHAIN
2583         work_ptr -> nx_packet_next =         NX_NULL;
2584         work_ptr -> nx_packet_last =         NX_NULL;
2585 #endif /* NX_DISABLE_PACKET_CHAIN */
2586         work_ptr -> nx_packet_length =       0;
2587         work_ptr -> nx_packet_prepend_ptr =  work_ptr -> nx_packet_data_start + packet_type;
2588         work_ptr -> nx_packet_append_ptr =   work_ptr -> nx_packet_prepend_ptr;
2589         work_ptr -> nx_packet_address.nx_packet_interface_ptr = NX_NULL;
2590 #ifdef NX_ENABLE_INTERFACE_CAPABILITY
2591         work_ptr -> nx_packet_interface_capability_flag = 0;
2592 #endif /* NX_ENABLE_INTERFACE_CAPABILITY */
2593         /* Set the TCP queue to the value that indicates it has been allocated.  */
2594         /*lint -e{923} suppress cast of ULONG to pointer.  */
2595         work_ptr -> nx_packet_union_next.nx_packet_tcp_queue_next =  (NX_PACKET *)NX_PACKET_ALLOCATED;
2596 
2597 #ifdef FEATURE_NX_IPV6
2598 
2599         /* Clear the option state. */
2600         work_ptr -> nx_packet_option_state = 0;
2601 #endif /* FEATURE_NX_IPV6 */
2602 
2603 #ifdef NX_IPSEC_ENABLE
2604 
2605         /* Clear the ipsec state. */
2606         work_ptr -> nx_packet_ipsec_state = 0;
2607         work_ptr -> nx_packet_ipsec_sa_ptr = NX_NULL;
2608 #endif /* NX_IPSEC_ENABLE */
2609 
2610 #ifndef NX_DISABLE_IPV4
2611         /* Initialize the IP version field */
2612         work_ptr -> nx_packet_ip_version = NX_IP_VERSION_V4;
2613 #endif /* !NX_DISABLE_IPV4  */
2614 
2615         /* Initialize the IP identification flag.  */
2616         work_ptr -> nx_packet_identical_copy = NX_FALSE;
2617 
2618         /* Initialize the IP header length. */
2619         work_ptr -> nx_packet_ip_header_length = 0;
2620 
2621 #ifdef NX_ENABLE_THREAD
2622         work_ptr -> nx_packet_type = 0;
2623 #endif /* NX_ENABLE_THREAD  */
2624 
2625         /* Place the new packet pointer in the return destination.  */
2626         *packet_ptr =  work_ptr;
2627 
2628         /* Set status to success.  */
2629         status =  NX_SUCCESS;
2630 
2631         /* Add debug information. */
2632         NX_PACKET_DEBUG(__FILE__, __LINE__, work_ptr);
2633     }
2634     else
2635     {
2636 
2637 #ifndef NX_DISABLE_PACKET_INFO
2638         /* Increment the packet pool empty request count.  */
2639         pool_ptr -> nx_packet_pool_empty_requests++;
2640 #endif
2641 
2642         /* Determine if the request specifies suspension.  */
2643         if (wait_option)
2644         {
2645 
2646             /* Prepare for suspension of this thread.  */
2647 
2648 #ifndef NX_DISABLE_PACKET_INFO
2649             /* Increment the packet pool empty request suspension count.  */
2650             pool_ptr -> nx_packet_pool_empty_suspensions++;
2651 #endif
2652 
2653             /* Pickup thread pointer.  */
2654             thread_ptr =  _tx_thread_current_ptr;
2655 
2656             /* Setup cleanup routine pointer.  */
2657             thread_ptr -> tx_thread_suspend_cleanup =  _nx_packet_pool_cleanup;
2658 
2659             /* Setup cleanup information, i.e. this pool control
2660                block.  */
2661             thread_ptr -> tx_thread_suspend_control_block =  (void *)pool_ptr;
2662 
2663             /* Save the return packet pointer address as well.  */
2664             thread_ptr -> tx_thread_additional_suspend_info =  (void *)packet_ptr;
2665 
2666             /* Save the packet type (or prepend offset) so this can be added
2667                after a new packet becomes available.  */
2668             thread_ptr -> tx_thread_suspend_info =  packet_type;
2669 
2670             /* Setup suspension list.  */
2671             if (pool_ptr -> nx_packet_pool_suspension_list)
2672             {
2673 
2674                 /* This list is not NULL, add current thread to the end. */
2675                 thread_ptr -> tx_thread_suspended_next =
2676                     pool_ptr -> nx_packet_pool_suspension_list;
2677                 thread_ptr -> tx_thread_suspended_previous =
2678                     (pool_ptr -> nx_packet_pool_suspension_list) -> tx_thread_suspended_previous;
2679                 ((pool_ptr -> nx_packet_pool_suspension_list) -> tx_thread_suspended_previous) -> tx_thread_suspended_next =
2680                     thread_ptr;
2681                 (pool_ptr -> nx_packet_pool_suspension_list) -> tx_thread_suspended_previous =   thread_ptr;
2682             }
2683             else
2684             {
2685 
2686                 /* No other threads are suspended.  Setup the head pointer and
2687                    just setup this threads pointers to itself.  */
2688                 pool_ptr -> nx_packet_pool_suspension_list =  thread_ptr;
2689                 thread_ptr -> tx_thread_suspended_next =            thread_ptr;
2690                 thread_ptr -> tx_thread_suspended_previous =        thread_ptr;
2691             }
2692 
2693             /* Increment the suspended thread count.  */
2694             pool_ptr -> nx_packet_pool_suspended_count++;
2695 
2696             /* Set the state to suspended.  */
2697             thread_ptr -> tx_thread_state =  TX_TCP_IP;
2698 
2699             /* Set the suspending flag.  */
2700             thread_ptr -> tx_thread_suspending =  TX_TRUE;
2701 
2702             /* Temporarily disable preemption.  */
2703             _tx_thread_preempt_disable++;
2704 
2705             /* Save the timeout value.  */
2706             thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks =  wait_option;
2707 
2708             /* Restore interrupts.  */
2709             TX_RESTORE
2710 
2711             /* Call actual thread suspension routine.  */
2712             _tx_thread_system_suspend(thread_ptr);
2713 
2714             /* Update the trace event with the status.  */
2715             NX_TRACE_EVENT_UPDATE(trace_event, trace_timestamp, NX_TRACE_PACKET_ALLOCATE, 0, *packet_ptr, 0, 0);
2716 
2717 #ifdef NX_ENABLE_PACKET_DEBUG_INFO
2718             if (thread_ptr -> tx_thread_suspend_status == NX_SUCCESS)
2719             {
2720 
2721                 /* Add debug information. */
2722                 NX_PACKET_DEBUG(__FILE__, __LINE__, *packet_ptr);
2723             }
2724 #endif /* NX_ENABLE_PACKET_DEBUG_INFO */
2725 
2726             /* Return the completion status.  */
2727             return(thread_ptr -> tx_thread_suspend_status);
2728         }
2729         else
2730         {
2731 
2732             /* Immediate return, return error completion.  */
2733             status =  NX_NO_PACKET;
2734         }
2735     }
2736 
2737     /* Restore interrupts.  */
2738     TX_RESTORE
2739 
2740     /* Update the trace event with the status.  */
2741     NX_TRACE_EVENT_UPDATE(trace_event, trace_timestamp, NX_TRACE_PACKET_ALLOCATE, 0, *packet_ptr, 0, 0);
2742 
2743     /* Return completion status.  */
2744     return(status);
2745 }
2746 
ux_test_utility_sim_event_create_count_reset(VOID)2747 VOID  ux_test_utility_sim_event_create_count_reset    (VOID)
2748 {
2749     event_create_count = 0;
2750 }
ux_test_utility_sim_event_create_count(VOID)2751 ULONG ux_test_utility_sim_event_create_count          (VOID)
2752 {
2753     return event_create_count;
2754 }
ux_test_utility_sim_event_error_generation_start(ULONG fail_after)2755 VOID  ux_test_utility_sim_event_error_generation_start(ULONG fail_after)
2756 {
2757     event_create_count = 0;
2758     event_fail_after = fail_after;
2759 }
ux_test_utility_sim_event_error_generation_stop(VOID)2760 VOID  ux_test_utility_sim_event_error_generation_stop (VOID)
2761 {
2762     event_fail_after = FAIL_DISABLE;
2763     event_create_count = 0;
2764 }
2765 
_tx_event_flags_create(TX_EVENT_FLAGS_GROUP * group_ptr,CHAR * name_ptr)2766 UINT  _tx_event_flags_create(TX_EVENT_FLAGS_GROUP *group_ptr, CHAR *name_ptr)
2767 {
2768 
2769 TX_INTERRUPT_SAVE_AREA
2770 
2771 TX_EVENT_FLAGS_GROUP    *next_group;
2772 TX_EVENT_FLAGS_GROUP    *previous_group;
2773 
2774 
2775     if (event_fail_after != FAIL_DISABLE)
2776     {
2777 
2778         if (event_create_count >= event_fail_after)
2779         {
2780 
2781             /* Return testing error instead of actual creation. */
2782             return UX_MUTEX_ERROR;
2783         }
2784     }
2785 
2786     /* Do actual creating. */
2787     event_create_count ++;
2788 
2789 
2790     /* Initialize event flags control block to all zeros.  */
2791     TX_MEMSET(group_ptr, 0, (sizeof(TX_EVENT_FLAGS_GROUP)));
2792 
2793     /* Setup the basic event flags group fields.  */
2794     group_ptr -> tx_event_flags_group_name =             name_ptr;
2795 
2796     /* Disable interrupts to put the event flags group on the created list.  */
2797     TX_DISABLE
2798 
2799     /* Setup the event flags ID to make it valid.  */
2800     group_ptr -> tx_event_flags_group_id =  TX_EVENT_FLAGS_ID;
2801 
2802     /* Place the group on the list of created event flag groups.  First,
2803        check for an empty list.  */
2804     if (_tx_event_flags_created_count == TX_EMPTY)
2805     {
2806 
2807         /* The created event flags list is empty.  Add event flag group to empty list.  */
2808         _tx_event_flags_created_ptr =                         group_ptr;
2809         group_ptr -> tx_event_flags_group_created_next =      group_ptr;
2810         group_ptr -> tx_event_flags_group_created_previous =  group_ptr;
2811     }
2812     else
2813     {
2814 
2815         /* This list is not NULL, add to the end of the list.  */
2816         next_group =      _tx_event_flags_created_ptr;
2817         previous_group =  next_group -> tx_event_flags_group_created_previous;
2818 
2819         /* Place the new event flag group in the list.  */
2820         next_group -> tx_event_flags_group_created_previous =  group_ptr;
2821         previous_group -> tx_event_flags_group_created_next =  group_ptr;
2822 
2823         /* Setup this group's created links.  */
2824         group_ptr -> tx_event_flags_group_created_previous =  previous_group;
2825         group_ptr -> tx_event_flags_group_created_next =      next_group;
2826     }
2827 
2828     /* Increment the number of created event flag groups.  */
2829     _tx_event_flags_created_count++;
2830 
2831     /* Optional event flag group create extended processing.  */
2832     TX_EVENT_FLAGS_GROUP_CREATE_EXTENSION(group_ptr)
2833 
2834     /* If trace is enabled, register this object.  */
2835     TX_TRACE_OBJECT_REGISTER(TX_TRACE_OBJECT_TYPE_EVENT_FLAGS, group_ptr, name_ptr, 0, 0)
2836 
2837     /* If trace is enabled, insert this event into the trace buffer.  */
2838     TX_TRACE_IN_LINE_INSERT(TX_TRACE_EVENT_FLAGS_CREATE, group_ptr, TX_POINTER_TO_ULONG_CONVERT(&next_group), 0, 0, TX_TRACE_EVENT_FLAGS_EVENTS)
2839 
2840     /* Log this kernel call.  */
2841     TX_EL_EVENT_FLAGS_CREATE_INSERT
2842 
2843     /* Restore interrupts.  */
2844     TX_RESTORE
2845 
2846     /* Return TX_SUCCESS.  */
2847     return(TX_SUCCESS);
2848 }
2849 
2850 
2851 /****** IO inp?/outp? *****************/
2852 static ULONG *_ux_test_sim_inp_seq = UX_NULL;
2853 static ULONG  _ux_test_sim_inp_seq_len = 0;
2854 static ULONG  _ux_test_sim_inp_seq_i = 0;
_ux_test_sim_inp_seq_value(VOID)2855 static ULONG _ux_test_sim_inp_seq_value(VOID)
2856 {
2857     if (_ux_test_sim_inp_seq && _ux_test_sim_inp_seq_len)
2858     {
2859         if (_ux_test_sim_inp_seq_i >= _ux_test_sim_inp_seq_len)
2860             return _ux_test_sim_inp_seq[_ux_test_sim_inp_seq_len - 1];
2861 
2862         return _ux_test_sim_inp_seq[_ux_test_sim_inp_seq_i];
2863     }
2864     else
2865     {
2866         return 0;
2867     }
2868 
2869 }
_ux_test_sim_inp_seq_inc(VOID)2870 static VOID _ux_test_sim_inp_seq_inc(VOID)
2871 {
2872     if (_ux_test_sim_inp_seq_i < _ux_test_sim_inp_seq_len)
2873         _ux_test_sim_inp_seq_i ++;
2874 }
ux_test_sim_inp_sequence_set(ULONG * seq,ULONG len)2875 VOID  ux_test_sim_inp_sequence_set(ULONG* seq, ULONG len)
2876 {
2877     _ux_test_sim_inp_seq = seq;
2878     _ux_test_sim_inp_seq_len = len;
2879     _ux_test_sim_inp_seq_i = 0;
2880 }
inpb(ULONG addr)2881 UCHAR   inpb(ULONG addr)
2882 {
2883     UCHAR value = (UCHAR)_ux_test_sim_inp_seq_value();
2884     (void)addr;
2885     _ux_test_sim_inp_seq_inc();
2886     return value;
2887 }
2888 #ifndef _MSC_BUILD
inpw(ULONG addr)2889 USHORT  inpw(ULONG addr)
2890 #else
2891 USHORT  inpw_mok(ULONG addr)
2892 #endif
2893 {
2894     USHORT value = (USHORT)_ux_test_sim_inp_seq_value();
2895     (void)addr;
2896     _ux_test_sim_inp_seq_inc();
2897     return value;
2898 }
inpl(ULONG addr)2899 ULONG   inpl(ULONG addr)
2900 {
2901     ULONG value = (ULONG)_ux_test_sim_inp_seq_value();
2902     (void)addr;
2903     _ux_test_sim_inp_seq_inc();
2904     return value;
2905 }
2906 
2907 
2908 static ULONG* _ux_test_sim_outp_logbuf = UX_NULL;
2909 static ULONG  _ux_test_sim_outp_logbuf_size = 0;
2910 static ULONG  _ux_test_sim_outp_logbuf_i = 0;
_ux_test_sim_outp_log_save(ULONG addr,ULONG value)2911 static VOID  _ux_test_sim_outp_log_save(ULONG addr, ULONG value)
2912 {
2913     if (!_ux_test_sim_outp_logbuf || !_ux_test_sim_outp_logbuf_size)
2914         return;
2915 
2916     if (_ux_test_sim_outp_logbuf_i < _ux_test_sim_outp_logbuf_size - 1)
2917     {
2918         _ux_test_sim_outp_logbuf[_ux_test_sim_outp_logbuf_i ++] = addr;
2919         _ux_test_sim_outp_logbuf[_ux_test_sim_outp_logbuf_i ++] = value;
2920     }
2921 }
ux_test_sim_outp_log_count(VOID)2922 ULONG ux_test_sim_outp_log_count(VOID)
2923 {
2924     return (_ux_test_sim_outp_logbuf_i >> 1);
2925 }
ux_test_sim_outp_log_reset(VOID)2926 VOID  ux_test_sim_outp_log_reset(VOID)
2927 {
2928     _ux_test_sim_outp_logbuf_i = 0;
2929 }
ux_test_sim_outp_logbuf_set(ULONG * buf,ULONG size)2930 VOID  ux_test_sim_outp_logbuf_set(ULONG* buf, ULONG size)
2931 {
2932     _ux_test_sim_outp_logbuf = buf;
2933     _ux_test_sim_outp_logbuf_size = size;
2934     _ux_test_sim_outp_logbuf_i = 0;
2935 }
ux_test_sim_outp_log_get(ULONG seq,ULONG * addr,ULONG * value)2936 ULONG ux_test_sim_outp_log_get(ULONG seq, ULONG *addr, ULONG *value)
2937 {
2938     seq = seq << 1;
2939     if (seq >= _ux_test_sim_outp_logbuf_i)
2940         return 0;
2941     if (addr)
2942         *addr = _ux_test_sim_outp_logbuf[seq];
2943     if (value)
2944         *value = _ux_test_sim_outp_logbuf[seq + 1];
2945     return 1;
2946 }
outpb(ULONG addr,UCHAR b)2947 UCHAR outpb(ULONG addr, UCHAR b)
2948 {
2949     _ux_test_sim_outp_log_save(addr, b);
2950     return b;
2951 }
2952 #ifndef _MSC_BUILD
outpw(ULONG addr,USHORT w)2953 USHORT  outpw(ULONG addr, USHORT w)
2954 #else
2955 USHORT  outpw_mok(ULONG addr, USHORT w)
2956 #endif
2957 {
2958     _ux_test_sim_outp_log_save(addr, w);
2959     return w;
2960 }
outpl(ULONG addr,ULONG l)2961 ULONG outpl(ULONG addr, ULONG l)
2962 {
2963     _ux_test_sim_outp_log_save(addr, l);
2964 #ifdef _MSC_BUILD
2965     return l;
2966 #endif
2967 }
2968