1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** ThreadX Component */
16 /** */
17 /** Block Pool */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22 #define TX_SOURCE_CODE
23
24
25 /* Include necessary system files. */
26
27 #include "tx_api.h"
28 #ifdef TX_ENABLE_EVENT_TRACE
29 #include "tx_trace.h"
30 #endif
31 #include "tx_thread.h"
32 #include "tx_block_pool.h"
33
34
35 /**************************************************************************/
36 /* */
37 /* FUNCTION RELEASE */
38 /* */
39 /* _tx_block_allocate PORTABLE C */
40 /* 6.1 */
41 /* AUTHOR */
42 /* */
43 /* William E. Lamie, Microsoft Corporation */
44 /* */
45 /* DESCRIPTION */
46 /* */
47 /* This function allocates a block from the specified memory block */
48 /* pool. */
49 /* */
50 /* INPUT */
51 /* */
52 /* pool_ptr Pointer to pool control block */
53 /* block_ptr Pointer to place allocated block */
54 /* pointer */
55 /* wait_option Suspension option */
56 /* */
57 /* OUTPUT */
58 /* */
59 /* status Completion status */
60 /* */
61 /* CALLS */
62 /* */
63 /* _tx_thread_system_suspend Suspend thread */
64 /* _tx_thread_system_ni_suspend Non-interruptable suspend thread */
65 /* */
66 /* CALLED BY */
67 /* */
68 /* Application Code */
69 /* */
70 /* RELEASE HISTORY */
71 /* */
72 /* DATE NAME DESCRIPTION */
73 /* */
74 /* 05-19-2020 William E. Lamie Initial Version 6.0 */
75 /* 09-30-2020 Yuxin Zhou Modified comment(s), */
76 /* resulting in version 6.1 */
77 /* */
78 /**************************************************************************/
_tx_block_allocate(TX_BLOCK_POOL * pool_ptr,VOID ** block_ptr,ULONG wait_option)79 UINT _tx_block_allocate(TX_BLOCK_POOL *pool_ptr, VOID **block_ptr, ULONG wait_option)
80 {
81
82 TX_INTERRUPT_SAVE_AREA
83
84 UINT status;
85 TX_THREAD *thread_ptr;
86 UCHAR *work_ptr;
87 UCHAR *temp_ptr;
88 UCHAR **next_block_ptr;
89 UCHAR **return_ptr;
90 UINT suspended_count;
91 TX_THREAD *next_thread;
92 TX_THREAD *previous_thread;
93 #ifdef TX_ENABLE_EVENT_TRACE
94 TX_TRACE_BUFFER_ENTRY *entry_ptr;
95 ULONG time_stamp = ((ULONG) 0);
96 #endif
97 #ifdef TX_ENABLE_EVENT_LOGGING
98 UCHAR *log_entry_ptr;
99 ULONG upper_tbu;
100 ULONG lower_tbu;
101 #endif
102
103
104 /* Disable interrupts to get a block from the pool. */
105 TX_DISABLE
106
107 #ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
108
109 /* Increment the total allocations counter. */
110 _tx_block_pool_performance_allocate_count++;
111
112 /* Increment the number of allocations on this pool. */
113 pool_ptr -> tx_block_pool_performance_allocate_count++;
114 #endif
115
116 #ifdef TX_ENABLE_EVENT_TRACE
117
118 /* If trace is enabled, save the current event pointer. */
119 entry_ptr = _tx_trace_buffer_current_ptr;
120
121 /* If trace is enabled, insert this event into the trace buffer. */
122 TX_TRACE_IN_LINE_INSERT(TX_TRACE_BLOCK_ALLOCATE, pool_ptr, 0, wait_option, pool_ptr -> tx_block_pool_available, TX_TRACE_BLOCK_POOL_EVENTS)
123
124 /* Save the time stamp for later comparison to verify that
125 the event hasn't been overwritten by the time the allocate
126 call succeeds. */
127 if (entry_ptr != TX_NULL)
128 {
129
130 time_stamp = entry_ptr -> tx_trace_buffer_entry_time_stamp;
131 }
132 #endif
133
134 #ifdef TX_ENABLE_EVENT_LOGGING
135 log_entry_ptr = *(UCHAR **) _tx_el_current_event;
136
137 /* Log this kernel call. */
138 TX_EL_BLOCK_ALLOCATE_INSERT
139
140 /* Store -1 in the third event slot. */
141 *((ULONG *) (log_entry_ptr + TX_EL_EVENT_INFO_3_OFFSET)) = (ULONG) -1;
142
143 /* Save the time stamp for later comparison to verify that
144 the event hasn't been overwritten by the time the allocate
145 call succeeds. */
146 lower_tbu = *((ULONG *) (log_entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET));
147 upper_tbu = *((ULONG *) (log_entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET));
148 #endif
149
150 /* Determine if there is an available block. */
151 if (pool_ptr -> tx_block_pool_available != ((UINT) 0))
152 {
153
154 /* Yes, a block is available. Decrement the available count. */
155 pool_ptr -> tx_block_pool_available--;
156
157 /* Pickup the current block pointer. */
158 work_ptr = pool_ptr -> tx_block_pool_available_list;
159
160 /* Return the first available block to the caller. */
161 temp_ptr = TX_UCHAR_POINTER_ADD(work_ptr, (sizeof(UCHAR *)));
162 return_ptr = TX_INDIRECT_VOID_TO_UCHAR_POINTER_CONVERT(block_ptr);
163 *return_ptr = temp_ptr;
164
165 /* Modify the available list to point at the next block in the pool. */
166 next_block_ptr = TX_UCHAR_TO_INDIRECT_UCHAR_POINTER_CONVERT(work_ptr);
167 pool_ptr -> tx_block_pool_available_list = *next_block_ptr;
168
169 /* Save the pool's address in the block for when it is released! */
170 temp_ptr = TX_BLOCK_POOL_TO_UCHAR_POINTER_CONVERT(pool_ptr);
171 *next_block_ptr = temp_ptr;
172
173 #ifdef TX_ENABLE_EVENT_TRACE
174
175 /* Check that the event time stamp is unchanged. A different
176 timestamp means that a later event wrote over the byte
177 allocate event. In that case, do nothing here. */
178 if (entry_ptr != TX_NULL)
179 {
180
181 /* Is the time stamp the same? */
182 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
183 {
184
185 /* Timestamp is the same, update the entry with the address. */
186 #ifdef TX_MISRA_ENABLE
187 entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
188 #else
189 entry_ptr -> tx_trace_buffer_entry_information_field_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
190 #endif
191 }
192 }
193 #endif
194
195 #ifdef TX_ENABLE_EVENT_LOGGING
196 /* Store the address of the allocated block. */
197 *((ULONG *) (log_entry_ptr + TX_EL_EVENT_INFO_3_OFFSET)) = (ULONG) *block_ptr;
198 #endif
199
200 /* Set status to success. */
201 status = TX_SUCCESS;
202
203 /* Restore interrupts. */
204 TX_RESTORE
205 }
206 else
207 {
208
209 /* Default the return pointer to NULL. */
210 return_ptr = TX_INDIRECT_VOID_TO_UCHAR_POINTER_CONVERT(block_ptr);
211 *return_ptr = TX_NULL;
212
213 /* Determine if the request specifies suspension. */
214 if (wait_option != TX_NO_WAIT)
215 {
216
217 /* Determine if the preempt disable flag is non-zero. */
218 if (_tx_thread_preempt_disable != ((UINT) 0))
219 {
220
221 /* Suspension is not allowed if the preempt disable flag is non-zero at this point, return error completion. */
222 status = TX_NO_MEMORY;
223
224 /* Restore interrupts. */
225 TX_RESTORE
226 }
227 else
228 {
229
230 /* Prepare for suspension of this thread. */
231
232 #ifdef TX_BLOCK_POOL_ENABLE_PERFORMANCE_INFO
233
234 /* Increment the total suspensions counter. */
235 _tx_block_pool_performance_suspension_count++;
236
237 /* Increment the number of suspensions on this pool. */
238 pool_ptr -> tx_block_pool_performance_suspension_count++;
239 #endif
240
241 /* Pickup thread pointer. */
242 TX_THREAD_GET_CURRENT(thread_ptr)
243
244 /* Setup cleanup routine pointer. */
245 thread_ptr -> tx_thread_suspend_cleanup = &(_tx_block_pool_cleanup);
246
247 /* Setup cleanup information, i.e. this pool control
248 block. */
249 thread_ptr -> tx_thread_suspend_control_block = (VOID *) pool_ptr;
250
251 /* Save the return block pointer address as well. */
252 thread_ptr -> tx_thread_additional_suspend_info = (VOID *) block_ptr;
253
254 #ifndef TX_NOT_INTERRUPTABLE
255
256 /* Increment the suspension sequence number, which is used to identify
257 this suspension event. */
258 thread_ptr -> tx_thread_suspension_sequence++;
259 #endif
260
261 /* Pickup the number of suspended threads. */
262 suspended_count = (pool_ptr -> tx_block_pool_suspended_count);
263
264 /* Increment the number of suspended threads. */
265 (pool_ptr -> tx_block_pool_suspended_count)++;
266
267 /* Setup suspension list. */
268 if (suspended_count == TX_NO_SUSPENSIONS)
269 {
270
271 /* No other threads are suspended. Setup the head pointer and
272 just setup this threads pointers to itself. */
273 pool_ptr -> tx_block_pool_suspension_list = thread_ptr;
274 thread_ptr -> tx_thread_suspended_next = thread_ptr;
275 thread_ptr -> tx_thread_suspended_previous = thread_ptr;
276 }
277 else
278 {
279
280 /* This list is not NULL, add current thread to the end. */
281 next_thread = pool_ptr -> tx_block_pool_suspension_list;
282 thread_ptr -> tx_thread_suspended_next = next_thread;
283 previous_thread = next_thread -> tx_thread_suspended_previous;
284 thread_ptr -> tx_thread_suspended_previous = previous_thread;
285 previous_thread -> tx_thread_suspended_next = thread_ptr;
286 next_thread -> tx_thread_suspended_previous = thread_ptr;
287 }
288
289 /* Set the state to suspended. */
290 thread_ptr -> tx_thread_state = TX_BLOCK_MEMORY;
291
292 #ifdef TX_NOT_INTERRUPTABLE
293
294 /* Call actual non-interruptable thread suspension routine. */
295 _tx_thread_system_ni_suspend(thread_ptr, wait_option);
296
297 /* Restore interrupts. */
298 TX_RESTORE
299 #else
300
301 /* Set the suspending flag. */
302 thread_ptr -> tx_thread_suspending = TX_TRUE;
303
304 /* Setup the timeout period. */
305 thread_ptr -> tx_thread_timer.tx_timer_internal_remaining_ticks = wait_option;
306
307 /* Temporarily disable preemption. */
308 _tx_thread_preempt_disable++;
309
310 /* Restore interrupts. */
311 TX_RESTORE
312
313 /* Call actual thread suspension routine. */
314 _tx_thread_system_suspend(thread_ptr);
315 #endif
316
317 #ifdef TX_ENABLE_EVENT_TRACE
318
319 /* Check that the event time stamp is unchanged. A different
320 timestamp means that a later event wrote over the byte
321 allocate event. In that case, do nothing here. */
322 if (entry_ptr != TX_NULL)
323 {
324
325 /* Is the time-stamp the same? */
326 if (time_stamp == entry_ptr -> tx_trace_buffer_entry_time_stamp)
327 {
328
329 /* Timestamp is the same, update the entry with the address. */
330 #ifdef TX_MISRA_ENABLE
331 entry_ptr -> tx_trace_buffer_entry_info_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
332 #else
333 entry_ptr -> tx_trace_buffer_entry_information_field_2 = TX_POINTER_TO_ULONG_CONVERT(*block_ptr);
334 #endif
335 }
336 }
337 #endif
338
339 #ifdef TX_ENABLE_EVENT_LOGGING
340 /* Check that the event time stamp is unchanged and the call is about
341 to return success. A different timestamp means that a later event
342 wrote over the block allocate event. A return value other than
343 TX_SUCCESS indicates that no block was available. In those cases,
344 do nothing here. */
345 if (lower_tbu == *((ULONG *) (log_entry_ptr + TX_EL_EVENT_TIME_LOWER_OFFSET)) &&
346 upper_tbu == *((ULONG *) (log_entry_ptr + TX_EL_EVENT_TIME_UPPER_OFFSET)) &&
347 ((thread_ptr -> tx_thread_suspend_status) == TX_SUCCESS))
348 {
349
350 /* Store the address of the allocated block. */
351 *((ULONG *) (log_entry_ptr + TX_EL_EVENT_INFO_3_OFFSET)) = (ULONG) *block_ptr;
352 }
353 #endif
354
355 /* Return the completion status. */
356 status = thread_ptr -> tx_thread_suspend_status;
357 }
358 }
359 else
360 {
361
362 /* Immediate return, return error completion. */
363 status = TX_NO_MEMORY;
364
365 /* Restore interrupts. */
366 TX_RESTORE
367 }
368 }
369
370 /* Return completion status. */
371 return(status);
372 }
373
374