1 /*
2 * Copyright (c) 2024, Texas Instruments Incorporated
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/kernel_structs.h>
9
10 #include <zephyr/sys/__assert.h>
11
12 #include <inc/hw_types.h>
13 #include <inc/hw_ints.h>
14
15 #include <driverlib/interrupt.h>
16
17 #include <kernel/zephyr/dpl/dpl.h>
18 #include <ti/drivers/dpl/TaskP.h>
19 #include <ti/drivers/dpl/ClockP.h>
20 #include <ti/drivers/dpl/HwiP.h>
21
22 #if (defined(CONFIG_DYNAMIC_DPL_OBJECTS) && defined(CONFIG_DYNAMIC_THREAD) && defined(CONFIG_DYNAMIC_THREAD_ALLOC) && defined(CONFIG_THREAD_STACK_INFO))
23 #define DYNAMIC_THREADS
24 #endif
25
26 #ifdef DYNAMIC_THREADS
27 /* Space for thread objects */
28 K_MEM_SLAB_DEFINE(task_slab, sizeof(struct k_thread), CONFIG_DYNAMIC_THREAD_POOL_SIZE,\
29 MEM_ALIGN);
30
dpl_task_pool_alloc()31 static struct k_thread *dpl_task_pool_alloc()
32 {
33 struct k_thread *task_ptr = NULL;
34
35 if (k_mem_slab_alloc(&task_slab, (void **)&task_ptr, K_NO_WAIT) < 0) {
36
37 __ASSERT(0, "Increase size of DPL task pool");
38 }
39 printk("Slabs used: %d / %d \n", task_slab.info.num_used, task_slab.info.num_blocks);
40 return task_ptr;
41 }
42
dpl_task_pool_free(struct k_thread * task)43 static void dpl_task_pool_free(struct k_thread *task)
44 {
45 k_mem_slab_free(&task_slab, (void *)task);
46 return;
47 }
48
49 #endif /* CONFIG_DYNAMIC_DPL_OBJECTS */
50
51 /*
52 * ======== Array for conversion of Zephyr thread state to DPL task state ========
53 */
54 const TaskP_State taskState[] = {TaskP_State_RUNNING, /*!< Running */
55 TaskP_State_READY, /*!< Ready */
56 TaskP_State_BLOCKED, /*!< Suspended */
57 TaskP_State_INACTIVE, /*!< Suspended */
58 TaskP_State_DELETED, /*!< Terminated */
59 TaskP_State_INVALID}; /*!< Dummy */
60
61 /*
62 * ======== Default TaskP_Params values ========
63 */
64 static const TaskP_Params TaskP_defaultParams = {
65 .name = "NAME",
66 .arg = NULL,
67 .priority = 1,
68 .stackSize = TaskP_DEFAULT_STACK_SIZE,
69 .stack = NULL,
70 };
71
72 /*
73 * ======== TaskP_Params_init ========
74 */
TaskP_Params_init(TaskP_Params * params)75 void TaskP_Params_init(TaskP_Params *params)
76 {
77 /* structure copy */
78 *params = TaskP_defaultParams;
79 }
80
81 #ifdef DYNAMIC_THREADS
82 /*
83 * ======== TaskP_create ========
84 */
TaskP_create(TaskP_Function fxn,const TaskP_Params * params)85 TaskP_Handle TaskP_create(TaskP_Function fxn, const TaskP_Params *params)
86 {
87 k_tid_t task_tid;
88 struct k_thread *task = dpl_task_pool_alloc();
89
90 k_thread_stack_t * task_stack = k_thread_stack_alloc(params->stackSize, 0);
91
92 if(task_stack != NULL)
93 {
94
95 /* TaskP uses inversed priority to Zephyr */
96 task_tid = k_thread_create(task, task_stack,
97 K_THREAD_STACK_SIZEOF(task_stack),
98 (k_thread_entry_t) fxn,
99 params->arg, NULL, NULL,
100 (0 - params->priority), 0, K_NO_WAIT);
101 if(task_tid != NULL)
102 {
103 k_thread_name_set(task_tid, params->name);
104 }
105 }
106
107 return ((TaskP_Handle)task);
108 }
109
110 /*
111 * ======== TaskP_delete ========
112 */
TaskP_delete(TaskP_Handle task)113 void TaskP_delete(TaskP_Handle task)
114 {
115 if (task != NULL)
116 {
117 TaskP_State state = TaskP_getState(task);
118 if(state != TaskP_State_INVALID && state != TaskP_State_DELETED)
119 {
120 struct k_thread* thread = (struct k_thread* )task;
121 k_thread_abort((k_tid_t) thread);
122
123 int status = k_thread_stack_free((k_thread_stack_t *) thread->stack_info.start);
124 if(status == 0)
125 {
126 dpl_task_pool_free(thread);
127 }
128 }
129 }
130 }
131 #endif
132 /*
133 * ======== TaskP_construct ========
134 */
TaskP_construct(TaskP_Struct * obj,TaskP_Function fxn,const TaskP_Params * params)135 TaskP_Handle TaskP_construct(TaskP_Struct *obj, TaskP_Function fxn, const TaskP_Params *params)
136 {
137 if (params == NULL)
138 {
139 /* Set default parameter values */
140 params = &TaskP_defaultParams;
141 }
142
143 /* TaskP uses inversed priority to Zephyr */
144 k_tid_t task_tid = k_thread_create((struct k_thread*) obj, (k_thread_stack_t*) params->stack,
145 params->stackSize,
146 (k_thread_entry_t) fxn,
147 params->arg, NULL, NULL,
148 (0 - params->priority), 0, K_NO_WAIT);
149
150 if(task_tid != NULL)
151 {
152 k_thread_name_set(task_tid, params->name);
153 }
154
155 return ((TaskP_Handle) obj);
156 }
157
158 /*
159 * ======== TaskP_destruct ========
160 */
TaskP_destruct(TaskP_Struct * obj)161 void TaskP_destruct(TaskP_Struct *obj)
162 {
163 if (obj != NULL)
164 {
165 TaskP_State state = TaskP_getState((TaskP_Handle) obj);
166 if(state != TaskP_State_INVALID && state != TaskP_State_DELETED)
167 {
168 struct k_thread* thread = (struct k_thread* )obj;
169 k_thread_abort((k_tid_t) thread);
170 }
171 }
172 }
173
174 /*
175 * ======== TaskP_getState ========
176 */
TaskP_getState(TaskP_Handle task)177 TaskP_State TaskP_getState(TaskP_Handle task)
178 {
179 TaskP_State state;
180
181 switch (((struct k_thread*) task)->base.thread_state) {
182 case _THREAD_DUMMY:
183 state = TaskP_State_INVALID;
184 break;
185 case _THREAD_DEAD:
186 state = TaskP_State_DELETED;
187 break;
188 case _THREAD_SUSPENDED:
189 case _THREAD_PENDING:
190 state = TaskP_State_BLOCKED;
191 break;
192 case _THREAD_QUEUED:
193 state = TaskP_State_READY;
194 break;
195 default:
196 state = TaskP_State_INVALID;
197 break;
198 }
199
200 /* Check if we are the currently running thread */
201 if (k_current_get() == ((k_tid_t) ((struct k_thread *) task))) {
202 state = TaskP_State_RUNNING;
203 }
204
205 return state;
206 }
207
208 /*
209 * ======== TaskP_getCurrentTask ========
210 */
TaskP_getCurrentTask(void)211 TaskP_Handle TaskP_getCurrentTask(void)
212 {
213 return ((TaskP_Handle)k_current_get());
214 }
215
216 /*
217 * ======== TaskP_disableScheduler ========
218 */
TaskP_disableScheduler(void)219 uintptr_t TaskP_disableScheduler(void)
220 {
221 k_sched_lock();
222 return (0);
223 }
224
225 /*
226 * ======== TaskP_restoreScheduler ========
227 */
TaskP_restoreScheduler(uintptr_t key)228 void TaskP_restoreScheduler(uintptr_t key)
229 {
230 k_sched_unlock();
231 }
232
233 /*
234 * ======== TaskP_yield ========
235 */
TaskP_yield(void)236 void TaskP_yield(void)
237 {
238 k_yield();
239 }
240
241 /*
242 * ======== TaskP_getTaskObjectSize ========
243 */
TaskP_getTaskObjectSize(void)244 uint32_t TaskP_getTaskObjectSize(void)
245 {
246 return (sizeof(struct k_thread));
247 }
248