1 /***************************************************************************//**
2 * \file cyhal_ipc.c
3 *
4 * \brief
5 * Provides a high level interface for interacting with the Infineon Inter Processor Communication.
6 * This interface abstracts out the chip specific details. If any chip specific
7 * functionality is necessary, or performance is critical the low level functions
8 * can be used directly.
9 *
10 ********************************************************************************
11 * \copyright
12 * Copyright 2018-2022 Cypress Semiconductor Corporation (an Infineon company) or
13 * an affiliate of Cypress Semiconductor Corporation
14 *
15 * SPDX-License-Identifier: Apache-2.0
16 *
17 * Licensed under the Apache License, Version 2.0 (the "License");
18 * you may not use this file except in compliance with the License.
19 * You may obtain a copy of the License at
20 *
21 *     http://www.apache.org/licenses/LICENSE-2.0
22 *
23 * Unless required by applicable law or agreed to in writing, software
24 * distributed under the License is distributed on an "AS IS" BASIS,
25 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
26 * See the License for the specific language governing permissions and
27 * limitations under the License.
28 *******************************************************************************/
29 
30 #include "cy_ipc_drv.h"
31 #include "cy_ipc_pipe.h"
32 #include "cy_ipc_sema.h"
33 #include "cyhal_ipc.h"
34 #include "cyhal_ipc_impl.h"
35 #include "cyhal_system.h"
36 #include "cyhal_irq_impl.h"
37 #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
38 #include "cyabs_rtos.h"
39 #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
40 
41 #include <string.h>
42 
43 #if (CYHAL_DRIVER_AVAILABLE_IPC)
44 
45 #if defined(__cplusplus)
46 extern "C" {
47 #endif
48 
49 /**
50 * \addtogroup group_hal_impl_ipc IPC (Inter-Processor Communication)
51 * \ingroup group_hal_impl
52 * \{
53 * \section section_hal_impl_ipc_preemtable_sema Preemtable Semaphore parameter
54 * If preemptable parameter is enabled (true) for semaphore, the user must ensure that there are no deadlocks in the
55 * system, which can be caused by an interrupt that occurs after the IPC channel is locked. Unless the user is ready to
56 * handle IPC channel locks correctly at the application level, set preemptable to false.
57 *
58 * \section section_hal_impl_ipc_interrupts_priorities IPC interrupts implementation and priorities
59 * In current HAL IPC implementation, each core has its "own" IPC INTR structure, which services all possible HAL IPC
60 * channels. Due to that, callbacks (interrupts) priorities are not flexible in configuration, which means that
61 * priority, set by \ref cyhal_ipc_queue_enable_event function, will only be applied, if it is lower,
62 * than the one, which is currently applied for the source (IPC INTR structure), that services interrupts for current
63 * core. Priority is being applied core-wide, for all channels and queue numbers.
64 *
65 * \section section_hal_impl_ipc_queue_operation_isr IPC queues operations in ISR context
66 * In current HAL IPC implementation, due to multi-core interrupts synchronization specifics, IPC queue put/get operations
67 * cannot be performed in callbacks (ISR context). Such operations will end with \ref CYHAL_IPC_RSLT_ERR_CANT_OPERATE_IN_ISR
68 * error code generation.
69 *
70 * \section section_hal_impl_ipc_last_sema_occupied Last available IPC semaphore is occupied by HAL IPC
71 * Last available IPC semaphore (CYHAL_IPC_SEMA_COUNT - 1) is occupied by multi-core interrupt synchronization mechanism
72 * and is not available for user.
73 *
74 * \section section_hal_impl_ipc_semaphores_initialization On some devices (currently, CAT1C and CAT1D devices), startup
75 * code does not initialize IPC PDL driver semaphores, so it is done by cyhal_ipc_semaphore_init() function, which
76 * requires one device core call cyhal_ipc_semaphore_init() prior to other cores. By default, for CAT1C, CM7_0 should
77 * call this function before other cores, while on CAT1D - CM55 core. This can be changed by user by defining
78 * `CYHAL_IPC_INIT_CORE` with required core name. For example, definition of CYHAL_IPC_INIT_CORE to be equal
79 * CORE_NAME_CM7_1 will make cyhal_ipc_semaphore_init() on CAT1C device initialize IPC PDL semaphores on CM7_1 core.
80 * Note: Defines `CORE_NAME_*` are being generated for each device by corresponding recipe-make-* asset.
81 *
82 * \} group_hal_impl_ipc
83 */
84 
85 typedef struct
86 {
87     uint8_t *isr_enable_sync;
88     uint8_t *isr_clear_sync;
89     cyhal_ipc_queue_t *queues_ll_pointer;
90 #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
91     uint32_t padding[5]; /* Necessary to make sure the total size is a multiple of __SCB_DCACHE_LINE_SIZE */
92 #endif
93 } _cyhal_ipc_sevice_data_t;
94 
95 _cyhal_ipc_sevice_data_t *_cyhal_ipc_service_data = NULL;
96 
97 /* Step in us between IPC semaphore unsuccessful takes when performing
98 *  internal IPC service tasks. */
99 #define _CYHAL_IPC_SERVICE_SEMA_STEP_US         (100)
100 /* IPC semaphore timeout when performing internal IPC service tasks */
101 #define _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US      (2000)
102 /* IPC semaphore acquire / give  tries */
103 #define _CYHAL_IPC_SERVICE_SEMA_TRIES           (100)
104 
105 #if (CYHAL_IPC_USR_CHANNELS <= 8)
106     #define _CYHAL_IPC_FIX_CHAN_NUM(channel)                    (channel - CYHAL_IPC_CHAN_0)
107     #define _CYHAL_IPC_BIT_ENABLE(variable, channel, core)      variable[core] |= (1 << _CYHAL_IPC_FIX_CHAN_NUM(channel))
108     #define _CYHAL_IPC_BIT_DISABLE(variable, channel, core)     variable[core] &= ~(1 << _CYHAL_IPC_FIX_CHAN_NUM(channel))
109     #define _CYHAL_IPC_CHECK_BIT(variable, channel, core)       ((variable[core] & (1 << _CYHAL_IPC_FIX_CHAN_NUM(channel))) != 0)
110     #define _CYHAL_IPC_CHECK_BIT_NOT(variable, channel, core)   ((variable[core] & (1 << _CYHAL_IPC_FIX_CHAN_NUM(channel))) == 0)
111 
112     /* Set ISR Enabled */
113     #define _CYHAL_IPC_SIE(channel, core)       _CYHAL_IPC_BIT_ENABLE(_cyhal_ipc_service_data->isr_enable_sync, channel, core)
114     /* Set ISR Disabled */
115     #define _CYHAL_IPC_SID(channel, core)       _CYHAL_IPC_BIT_DISABLE(_cyhal_ipc_service_data->isr_enable_sync, channel, core)
116     /* Is ISR Enabled */
117     #define _CYHAL_IPC_IIE(channel, core)       _CYHAL_IPC_CHECK_BIT(_cyhal_ipc_service_data->isr_enable_sync, channel, core)
118     /* Set ISR (ISR is expected) */
119     #define _CYHAL_IPC_SI(channel, core)        _CYHAL_IPC_BIT_ENABLE(_cyhal_ipc_service_data->isr_clear_sync, channel, core)
120     /* Clear ISR (ISR is handled and cleared) */
121     #define _CYHAL_IPC_CI(channel, core)        _CYHAL_IPC_BIT_DISABLE(_cyhal_ipc_service_data->isr_clear_sync, channel, core)
122     /* Is ISR Serviced */
123     #define _CYHAL_IPC_IIS(channel, core)       _CYHAL_IPC_CHECK_BIT_NOT(_cyhal_ipc_service_data->isr_clear_sync, channel, core)
124 #else
125     #error "Unhandled number of IPC channels"
126 #endif /* CYHAL_IPC_USR_CHANNELS <= 8 or other */
127 
128 #if defined(COMPONENT_CAT1A)
129 /* Number of cores being serviced by the driver */
130 #define _CYHAL_IPC_CORE_NUM                         (2)
131 #define _CYHAL_IPC_CM0P_IDX                         (0)
132 #define _CYHAL_IPC_CM4_IDX                          (1)
133 
134 /* IPC INTR of HAL IPC CHAN 0 is used to service interrupts on CM0p, IPC_CHAN 1 is used for CM4 */
135 #define _CYHAL_IPC_TRIGGER_ISR_MASK                 ((1UL << CYHAL_IPC_CHAN_0) | (1UL << CYHAL_IPC_CHAN_1))
136 #if (CY_CPU_CORTEX_M0P)
137 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_0)
138 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM0P_IDX)
139 #define _CYHAL_IPC_OTHER_CORE_IDX                   (_CYHAL_IPC_CM4_IDX)
140 #else /* CY_CPU_CORTEX_M4 */
141 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_1)
142 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM4_IDX)
143 #define _CYHAL_IPC_OTHER_CORE_IDX                   (_CYHAL_IPC_CM0P_IDX)
144 #endif /* CY_CPU_CORTEX_M0P or CY_CPU_CORTEX_M4 */
145 
146 #elif defined(COMPONENT_CAT1C) || defined(COMPONENT_CAT1D)
147 /* Number of cores being serviced by the driver */
148 #define _CYHAL_IPC_CORE_NUM                         (3)
149 
150 #if defined(COMPONENT_CAT1C)
151 #define _CYHAL_IPC_CM0P_IDX                         (0)
152 #define _CYHAL_IPC_CM7_0_IDX                        (1)
153 #define _CYHAL_IPC_CM7_1_IDX                        (2)
154 
155 #if (CY_CPU_CORTEX_M0P)
156 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_0)
157 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM0P_IDX)
158 #define _CYHAL_IPC_OTHER_CORE_0_IDX                 (_CYHAL_IPC_CM7_0_IDX)
159 #define _CYHAL_IPC_OTHER_CORE_1_IDX                 (_CYHAL_IPC_CM7_1_IDX)
160 #elif (CY_CPU_CORTEX_M7)
161 #if (CORE_NAME_CM7_0)
162 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_1)
163 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM7_0_IDX)
164 #define _CYHAL_IPC_OTHER_CORE_0_IDX                 (_CYHAL_IPC_CM0P_IDX)
165 #define _CYHAL_IPC_OTHER_CORE_1_IDX                 (_CYHAL_IPC_CM7_1_IDX)
166 #elif (CORE_NAME_CM7_1)
167 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_2)
168 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM7_1_IDX)
169 #define _CYHAL_IPC_OTHER_CORE_0_IDX                 (_CYHAL_IPC_CM0P_IDX)
170 #define _CYHAL_IPC_OTHER_CORE_1_IDX                 (_CYHAL_IPC_CM7_0_IDX)
171 #else
172 #error "Unable to determine CM7 core index"
173 #endif /* defined (ACTIVE_CORE_CM7_0) or defined (ACTIVE_CORE_CM7_0) or error */
174 #endif /* CY_CPU_CORTEX_M0P or CY_CPU_CORTEX_M7 */
175 #else
176 #define _CYHAL_IPC_CM33_IDX                         (0)
177 #define _CYHAL_IPC_CM55_IDX                         (1)
178 #define _CYHAL_IPC_CU55_IDX                         (2)
179 #if (CY_CPU_CORTEX_M33)
180 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_0)
181 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM33_IDX)
182 #define _CYHAL_IPC_OTHER_CORE_0_IDX                 (_CYHAL_IPC_CM55_IDX)
183 #define _CYHAL_IPC_OTHER_CORE_1_IDX                 (_CYHAL_IPC_CU55_IDX)
184 #elif (CY_CPU_CORTEX_M55)
185 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_1)
186 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CM55_IDX)
187 #define _CYHAL_IPC_OTHER_CORE_0_IDX                 (_CYHAL_IPC_CM33_IDX)
188 #define _CYHAL_IPC_OTHER_CORE_1_IDX                 (_CYHAL_IPC_CU55_IDX)
189 #else
190 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN       (CYHAL_IPC_CHAN_2)
191 #define _CYHAL_IPC_CUR_CORE_IDX                     (_CYHAL_IPC_CU55_IDX)
192 #define _CYHAL_IPC_OTHER_CORE_0_IDX                 (_CYHAL_IPC_CM33_IDX)
193 #define _CYHAL_IPC_OTHER_CORE_1_IDX                 (_CYHAL_IPC_CM55_IDX)
194 #endif /* CY_CPU_CORTEX_M33 or CY_CPU_CORTEX_M55 or other */
195 #endif /* defined(COMPONENT_CAT1C) or other */
196 
197 #define _CYHAL_IPC_TRIGGER_ISR_MASK                 ((1UL << CYHAL_IPC_CHAN_0) | (1UL << CYHAL_IPC_CHAN_1) | (1UL << CYHAL_IPC_CHAN_2))
198 #else /* !defined(COMPONENT_CAT1A) */
199     #error "Unhandled device"
200 #endif /* defined(COMPONENT_CAT1A) */
201 
202 #define _CYHAL_IPC_CURRENT_CORE_IPC_INTR_SRC        ((_cyhal_system_irq_t)(cpuss_interrupts_ipc_0_IRQn + _CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN))
203 
204 /*
205 *  Macro to work with triggered events and their signatures.
206 *  Such operations are being performed by IPC queue functions, that can provoke interrupts.
207 *  Signatures are the bits in corresponding bitfields, that are being inverted each
208 *  time new event is being generated. They are needed to make IPC irq handler be able
209 *  to understand whether a certain event was already passed to a user's callback or not. This mechanism works
210 *  in the following way:
211 *   - Queue modification function sets the triggered event and swaps the inverse bit
212 *   - irq handler compares the current signature with that previously passed to the user. If they differ, pass the event to the user. If not, then
213 *   ignore the event; Store the processed signature.
214 *
215 */
216 
217 /* How many bits are used to store events / their signatures */
218 #define _CYHAL_IPC_EVENTS_SIGNATURES_BITS           (16UL)
219 #define _CYHAL_IPC_EVENTS_MASK                      ((1UL << _CYHAL_IPC_EVENTS_SIGNATURES_BITS) - 1)
220 #define _CYHAL_IPC_SIGNATURES_MASK                  (_CYHAL_IPC_EVENTS_MASK << _CYHAL_IPC_EVENTS_SIGNATURES_BITS)
221 /* Add event into triggered events list and change signature */
222 #define _CYHAL_IPC_ADD_TRIGGERED_EVENT(var, event)  var |= event; var ^= (event << _CYHAL_IPC_EVENTS_SIGNATURES_BITS)
223 /* Get triggered events w/o signatures */
224 #define _CYHAL_IPC_GET_TRIGGERED_EVENT(var)         (var & _CYHAL_IPC_EVENTS_MASK)
225 /* Get signatures w/o triggered events */
226 #define _CYHAL_IPC_GET_SIGNATURES(var)              (((var) & _CYHAL_IPC_SIGNATURES_MASK) >> _CYHAL_IPC_EVENTS_SIGNATURES_BITS)
227 /* Remove triggered events (not signatures touched) */
228 #define _CYHAL_IPC_RMV_TRIGGERED_EVENT(var, event)  var &= ~event
229 /* Clear triggered events, but not signatures */
230 #define _CYHAL_IPC_CLR_TRIGGERED_EVENT(var)         var &= _CYHAL_IPC_SIGNATURES_MASK
231 
232 cyhal_ipc_t *_ipc_objects[CYHAL_IPC_USR_CHANNELS];
233 bool interrupts_initialized = false;
234 bool semas_initialized = false;
235 
236 /* As _ipc_objects stores only info about user-accessible IPC channels, we need to apply offset when accessing the array */
237 #define _CYHAL_IPC_OBJ_ARR_EL(channel_num)  (_ipc_objects[(channel_num - CYHAL_IPC_CHAN_0)])
238 
239 #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
240 #if defined(CY_IPC_INTR_SPARE)
241 #define _CYHAL_IPC_SEMA_INTR_STR_NUM                (CY_IPC_INTR_SPARE)
242 #elif defined(CY_IPC_INTR_USER) && (CY_IPC_INTR_USER < CY_IPC_CHAN_USER)
243 #define _CYHAL_IPC_SEMA_INTR_STR_NUM                (CY_IPC_INTR_USER)
244 #else
245 #error "Cannot determine IPC INTR struct for semaphores"
246 #endif /* defined(CY_IPC_INTR_SPARE) or defined(CY_IPC_CHAN_USER) or other */
247 
248 #if (_CYHAL_IPC_SEMA_INTR_STR_NUM >= CYHAL_IPC_CHAN_0)
249 #error "Cannot proceed with overlapping SEMA and QUEUE interrupts"
250 #endif /* (CY_IPC_INTR_SPARE >= CYHAL_IPC_CHAN_0) */
251 
252 typedef struct
253 {
254     cy_semaphore_t semaphore;
255     uint32_t sema_num;
256     bool initialized;
257 } _cyhal_ipc_rtos_sema_t;
258 
259 static _cyhal_ipc_rtos_sema_t _cyhal_ipc_rtos_semaphores[CYHAL_IPC_RTOS_SEMA_NUM] = { 0 };
260 #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
261 
262 /* Shared memory is non-cacheable memory. Currently we invalidate/clean all memory as if it is cached.
263  *
264  * Before Reading cacheable Memory:
265  *     SCB_InvalidateDCache_by_Addr(addr,size) for invalidating the D-Cache (to Read RAM, updating the cache)
266  * After Writing to cacheable Memory:
267  *     SCB_CleanDCache_by_Addr(addr,size) for cleaning the D-Cache (writing the cache through to RAM)
268  */
269 #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
270 /* NOTES:
271  *  D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity.
272  *  D-Cache memory blocks which are part of given address + given size are invalidated.
273  */
274 #define INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(addr,size)      \
275     SCB_InvalidateDCache_by_Addr( (volatile void *)( (uint32_t)addr), size)
276 
277 #define CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(addr,size)              \
278     SCB_CleanDCache_by_Addr( (volatile void *)( (uint32_t)addr), size)
279 #else
280 #define INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(addr,size)
281 #define CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(addr,size)
282 #endif
283 /*************************************** INTERNAL FUNCTIONS PROTOTYPES *************************************************/
284 
285 static void _cyhal_ipc_irq_handler(void);
286 static cy_rslt_t _cyhal_ipc_sema_take(cyhal_ipc_t *obj, uint32_t *timeout_us, uint32_t step_us);
287 static void _cyhal_ipc_wait_step(uint32_t *timeout_us, uint32_t polling_interval_us);
288 
289 /********************************************* SEMAPHORE HELPERS *******************************************************/
290 
291 #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
292 
293 /* Is any of RTOS semaphores was initialized previously? */
_cyhal_ipc_rtos_is_any_sema_initialized()294 bool _cyhal_ipc_rtos_is_any_sema_initialized()
295 {
296     for (size_t sema_idx = 0; sema_idx < CYHAL_IPC_RTOS_SEMA_NUM; ++sema_idx)
297     {
298         if (_cyhal_ipc_rtos_semaphores[sema_idx].initialized)
299         {
300             return true;
301         }
302     }
303     return false;
304 }
305 
_cyhal_ipc_rtos_get_rtos_sema_index(void * sema_address)306 size_t _cyhal_ipc_rtos_get_rtos_sema_index(void *sema_address)
307 {
308     for (size_t sema_idx = 0; sema_idx < CYHAL_IPC_RTOS_SEMA_NUM; ++sema_idx)
309     {
310         if ((void *)&_cyhal_ipc_rtos_semaphores[sema_idx] == sema_address)
311         {
312             return sema_idx;
313         }
314     }
315     CY_ASSERT(false);
316     return 0;
317 }
318 
319 #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
320 
_cyhal_ipc_sema_init(cyhal_ipc_t * obj,uint32_t semaphore_num,bool preemptable)321 static cy_rslt_t _cyhal_ipc_sema_init(cyhal_ipc_t *obj, uint32_t semaphore_num, bool preemptable)
322 {
323     cy_rslt_t result = CY_RSLT_SUCCESS;
324 
325     /* On CAT1C and CAT1D devices, unlike CAT1A devices, startup code does not initialized IPC PDL semaphore and
326     *  does not allocate shared memory for them. */
327     #if defined(COMPONENT_CAT1D) || (defined(SRSS_HT_VARIANT) && (SRSS_HT_VARIANT > 0))
328     if (false == semas_initialized)
329     {
330         #if (CYHAL_IPC_INIT_CORE)
331         CY_SECTION_SHAREDMEM
332         #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
333         static uint32_t ipc_sema_array[L1_DCACHE_ROUND_UP_WORDS(CYHAL_IPC_SEMA_COUNT / CY_IPC_SEMA_PER_WORD)]
334         CY_ALIGN(__SCB_DCACHE_LINE_SIZE)
335         #else
336         static uint32_t ipc_sema_array[CYHAL_IPC_SEMA_COUNT / CY_IPC_SEMA_PER_WORD]
337         #endif /* defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) */
338         ;
339         result = (cy_rslt_t)Cy_IPC_Sema_Init(CY_IPC_CHAN_SEMA, CYHAL_IPC_SEMA_COUNT, ipc_sema_array);
340         #else
341         result = (cy_rslt_t)Cy_IPC_Sema_Init(CY_IPC_CHAN_SEMA, 0, NULL);
342         #endif /* CYHAL_IPC_INIT_CORE or other */
343         if (CY_RSLT_SUCCESS == result)
344         {
345             semas_initialized = true;
346         }
347     }
348     #endif /* defined(COMPONENT_CAT1D) || (defined(SRSS_HT_VARIANT) && (SRSS_HT_VARIANT > 0)) */
349 
350     if (CY_RSLT_SUCCESS == result)
351     {
352         obj->sema_preemptable = preemptable;
353         obj->sema_number = semaphore_num;
354         obj->sema_taken = false;
355         #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
356         obj->rtos_sema = NULL;
357         #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
358     }
359 
360     return result;
361 }
362 
363 /*
364     step_us - for how long to wait between unsuccessful IPC sema takes
365     use_rtos_sema - whether RTOS semaphores should be used in RTOS environment
366 */
_cyhal_ipc_sema_take(cyhal_ipc_t * obj,uint32_t * timeout_us,uint32_t step_us)367 static cy_rslt_t _cyhal_ipc_sema_take(cyhal_ipc_t *obj, uint32_t *timeout_us, uint32_t step_us)
368 {
369     cy_rslt_t result = ~CY_RSLT_SUCCESS;
370     bool is_never_timeout = (*timeout_us == CYHAL_IPC_NEVER_TIMEOUT);
371 
372     #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
373     cy_rslt_t rtos_sema_result = CY_RSLT_SUCCESS;
374     if (NULL != obj->rtos_sema)
375     {
376         /* Using RTOS semaphore as mechanism to most effectively wait for IPC semaphore to be free. */
377 
378         /* When using an RTOS if a cyhal_ipc_semaphore_take api is called from and ISR in
379         * in certain RTOSes cy_rtos_get_semaphore returns immediately without blocking. So we can
380         * either busy wait around the semaphore being set in the ISR or use the normal polling method
381         * we use in the non-RTOS case. For simplicity and to avoid the calling ISR from depending on
382         * the IPC ISR priority we use the normal polling method.
383         */
384         bool in_isr = (SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0;
385         if (((*timeout_us >= 1000) || is_never_timeout) && (false == in_isr))
386         {
387             while ((result = (cy_rslt_t)Cy_IPC_Sema_Set(obj->sema_number, obj->sema_preemptable)) != (cy_rslt_t)CY_IPC_SEMA_SUCCESS)
388             {
389                 _cyhal_ipc_wait_step(is_never_timeout ? NULL : timeout_us, step_us);
390             }
391             if ((cy_rslt_t)CY_IPC_SEMA_LOCKED != result)
392             {
393                 bool sema_not_yet_available = ((cy_rslt_t)CY_IPC_SEMA_NOT_ACQUIRED == result);
394                 if (sema_not_yet_available)
395                 {
396                     _cyhal_irq_enable((_cyhal_system_irq_t)(cpuss_interrupts_ipc_0_IRQn + _CYHAL_IPC_SEMA_INTR_STR_NUM));
397 
398                     if (is_never_timeout)
399                     {
400                         while (CY_RSLT_SUCCESS != result)
401                         {
402                             uint32_t timeout_ms = CYHAL_IPC_POLLING_INTERVAL_uS / 1000;
403                             if (timeout_ms == 0)
404                             {
405                                 ++timeout_ms;
406                             }
407                             /* cy_rtos_get_semaphore works here as advanced delay mechanism, which in RTOS environment
408                             *  helps utilize CPU in most efficient way while we are waiting for IPC Semaphore to be given.
409                             *  Only unexpected return codes (expected are CY_RSLT_SUCCESS and CY_RTOS_TIMEOUT) will
410                             *  be reported to user in case of unsuccessful semaphore take. */
411                             rtos_sema_result = cy_rtos_get_semaphore(
412                                         &(((_cyhal_ipc_rtos_sema_t *)obj->rtos_sema)->semaphore), timeout_ms, false);
413                             result = (cy_rslt_t)Cy_IPC_Sema_Set(obj->sema_number, obj->sema_preemptable);
414                         }
415                     }
416                     else
417                     {
418                         uint32_t timeout_ms = *timeout_us / 1000;
419                         /* cy_rtos_get_semaphore works here as advanced delay mechanism, which in RTOS environment
420                         *  helps utilize CPU in most efficient way while we are waiting for IPC Semaphore to be given.
421                         *  Only unexpected return codes (expected are CY_RSLT_SUCCESS and CY_RTOS_TIMEOUT) will
422                         *  be reported to user in case of unsuccessful semaphore take. */
423                         rtos_sema_result = cy_rtos_get_semaphore(
424                                     &(((_cyhal_ipc_rtos_sema_t *)obj->rtos_sema)->semaphore), timeout_ms, false);
425                         result = (cy_rslt_t)Cy_IPC_Sema_Set(obj->sema_number, obj->sema_preemptable);
426                         *timeout_us = 0;
427                     }
428 
429                     _cyhal_irq_disable((_cyhal_system_irq_t)(cpuss_interrupts_ipc_0_IRQn + _CYHAL_IPC_SEMA_INTR_STR_NUM));
430                 }
431                 else
432                 {
433                     result = CY_RSLT_SUCCESS;
434                 }
435             }
436         }
437     }
438     #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
439 
440     if (CY_RSLT_SUCCESS != result)
441     {
442         while(
443             ((result = (cy_rslt_t)Cy_IPC_Sema_Set(obj->sema_number, obj->sema_preemptable)) != CY_RSLT_SUCCESS) &&
444             ((*timeout_us != 0) || is_never_timeout)
445         )
446         {
447             _cyhal_ipc_wait_step(is_never_timeout ? NULL : timeout_us, step_us);
448         }
449     }
450 
451     #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
452     if (obj->sema_number < _CYHAL_IPC_RELEASE_INTR_BITS)
453     {
454         IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(_CYHAL_IPC_SEMA_INTR_STR_NUM);
455         uint32_t intr_status_masked = _FLD2VAL(IPC_INTR_STRUCT_INTR_MASKED_RELEASE, Cy_IPC_Drv_GetInterruptStatusMasked(ipc_intr_base));
456         uint32_t current_sema_intr_mask = 1 << obj->sema_number;
457         if ((CY_RSLT_SUCCESS == result) && (intr_status_masked & current_sema_intr_mask))
458         {
459             /* If semaphore get was successful and interrupt was not cleared by IRQ handler (e.g. interrupts are disabled),
460             *   clear pending interrupt, that is related to this semaphore number */
461             Cy_IPC_Drv_ClearInterrupt(ipc_intr_base, current_sema_intr_mask, 0);
462         }
463     }
464     /* If IPC semaphore was not successfully got and unexpected result was returned by cy_rtos_get_semaphore,
465     *  forward the RTOS result to the user. */
466     if ((CY_RSLT_SUCCESS != result) && ((CY_RSLT_SUCCESS != rtos_sema_result) && (CY_RTOS_TIMEOUT != rtos_sema_result)))
467     {
468         result = rtos_sema_result;
469     }
470     #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
471 
472     if (CY_RSLT_SUCCESS == result)
473     {
474         obj->sema_taken = true;
475     }
476 
477     return result;
478 }
479 
480 
481 /********************************************** QUEUES HELPERS ********************************************************/
482 
_cyhal_ipc_acquire_core_sync_sema(cyhal_ipc_t * obj,uint32_t * timeout_us)483 static cy_rslt_t _cyhal_ipc_acquire_core_sync_sema(cyhal_ipc_t *obj, uint32_t *timeout_us)
484 {
485     CY_ASSERT(NULL != obj);
486     CY_ASSERT(NULL != timeout_us);
487 
488     return _cyhal_ipc_sema_take(obj, timeout_us, _CYHAL_IPC_SERVICE_SEMA_STEP_US);
489 }
490 
_cyhal_ipc_give_core_sync_sema(cyhal_ipc_t * obj,uint32_t * timeout_us)491 static cy_rslt_t _cyhal_ipc_give_core_sync_sema(cyhal_ipc_t *obj, uint32_t *timeout_us)
492 {
493     cy_rslt_t result;
494     bool is_never_timeout = (*timeout_us == CYHAL_IPC_NEVER_TIMEOUT);
495     while ((CY_RSLT_SUCCESS != (result = cyhal_ipc_semaphore_give(obj))) && (is_never_timeout || (*timeout_us != 0)))
496     {
497         if (is_never_timeout || (*timeout_us > _CYHAL_IPC_SERVICE_SEMA_STEP_US))
498         {
499             cyhal_system_delay_us(_CYHAL_IPC_SERVICE_SEMA_STEP_US);
500             if (false == is_never_timeout)
501             {
502                 *timeout_us -= _CYHAL_IPC_SERVICE_SEMA_STEP_US;
503             }
504         }
505         else
506         {
507             cyhal_system_delay_us(*timeout_us);
508             *timeout_us = 0;
509         }
510     }
511     return result;
512 }
513 
_cyhal_ipc_clear_interrupt(cyhal_ipc_t * obj,uint32_t isr_source_chan,uint32_t isr_chan)514 static cy_rslt_t _cyhal_ipc_clear_interrupt(cyhal_ipc_t *obj, uint32_t isr_source_chan, uint32_t isr_chan)
515 {
516     CY_ASSERT(NULL != obj);
517 
518     cy_rslt_t result;
519     IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(isr_source_chan);
520     uint32_t timeout_acq = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
521     uint32_t timeout_give = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
522     /* We cannot allow interrupt to happen between _cyhal_ipc_acquire_core_sync_sema and successful
523     *   _cyhal_ipc_give_core_sync_sema, as interrupt will also attempt to acquire semaphore (which
524     *   will obviously fail) */
525     uint32_t intr_status = cyhal_system_critical_section_enter();
526     if (CY_RSLT_SUCCESS == (result = _cyhal_ipc_acquire_core_sync_sema(obj, &timeout_acq)))
527     {
528         Cy_IPC_Drv_ClearInterrupt(ipc_intr_base, (1UL << isr_chan), 0);
529         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data->isr_clear_sync, sizeof(*_cyhal_ipc_service_data->isr_clear_sync));
530         _CYHAL_IPC_CI(isr_chan, _CYHAL_IPC_CUR_CORE_IDX);
531         CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(_cyhal_ipc_service_data->isr_clear_sync, sizeof(*_cyhal_ipc_service_data->isr_clear_sync));
532         result = _cyhal_ipc_give_core_sync_sema(obj, &timeout_give);
533     }
534     cyhal_system_critical_section_exit(intr_status);
535     return result;
536 }
537 
_cyhal_ipc_check_isr_handled(cyhal_ipc_t * obj,uint32_t channel,uint32_t * timeout)538 static bool _cyhal_ipc_check_isr_handled(cyhal_ipc_t *obj, uint32_t channel, uint32_t *timeout)
539 {
540     CY_ASSERT(NULL != obj);
541     CY_ASSERT(NULL != timeout);
542 
543     bool handled = false;
544     /* We cannot allow interrupt to happen between _cyhal_ipc_acquire_core_sync_sema and successful
545     *   _cyhal_ipc_give_core_sync_sema, as interrupt will also attempt to acquire semaphore (which
546     *   will obviously fail) */
547     uint32_t intr_status = cyhal_system_critical_section_enter();
548     if (CY_RSLT_SUCCESS == _cyhal_ipc_acquire_core_sync_sema(obj, timeout))
549     {
550         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data->isr_enable_sync, sizeof(*_cyhal_ipc_service_data->isr_enable_sync));
551         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data->isr_clear_sync, sizeof(*_cyhal_ipc_service_data->isr_clear_sync));
552         /* interrupt is disabled for specific channel and core
553         *   or (if enabled) interrupt is serviced */
554         handled =   (((false == _CYHAL_IPC_IIE(channel, _CYHAL_IPC_CUR_CORE_IDX))
555                         || _CYHAL_IPC_IIS(channel, _CYHAL_IPC_CUR_CORE_IDX)) &&
556         #if (_CYHAL_IPC_CORE_NUM == 2)
557                     ((false == _CYHAL_IPC_IIE(channel, _CYHAL_IPC_OTHER_CORE_IDX))
558                         || _CYHAL_IPC_IIS(channel, _CYHAL_IPC_OTHER_CORE_IDX)));
559         #elif (_CYHAL_IPC_CORE_NUM == 3)
560                     ((false == _CYHAL_IPC_IIE(channel, _CYHAL_IPC_OTHER_CORE_0_IDX))
561                         || _CYHAL_IPC_IIS(channel, _CYHAL_IPC_OTHER_CORE_0_IDX)) &&
562                     ((false == _CYHAL_IPC_IIE(channel, _CYHAL_IPC_OTHER_CORE_1_IDX))
563                         || _CYHAL_IPC_IIS(channel, _CYHAL_IPC_OTHER_CORE_1_IDX)));
564         #endif /* (_CYHAL_IPC_CORE_NUM == 2) or (_CYHAL_IPC_CORE_NUM == 3) */
565         (void)_cyhal_ipc_give_core_sync_sema(obj, timeout);
566     }
567     cyhal_system_critical_section_exit(intr_status);
568     return handled;
569 }
570 
_cyhal_ipc_set_isr_expected(cyhal_ipc_t * obj,uint32_t channel)571 static cy_rslt_t _cyhal_ipc_set_isr_expected(cyhal_ipc_t *obj, uint32_t channel)
572 {
573     cy_rslt_t result;
574     uint32_t timeout_acq = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
575     uint32_t timeout_give = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
576     /* We cannot allow interrupt to happen between _cyhal_ipc_acquire_core_sync_sema and successful
577     *   _cyhal_ipc_give_core_sync_sema, as interrupt will also attempt to acquire semaphore (which
578     *   will obviously fail) */
579     uint32_t intr_status = cyhal_system_critical_section_enter();
580     if (CY_RSLT_SUCCESS == (result = _cyhal_ipc_acquire_core_sync_sema(obj, &timeout_acq)))
581     {
582         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data->isr_clear_sync, sizeof(*_cyhal_ipc_service_data->isr_clear_sync));
583         _CYHAL_IPC_SI(channel, _CYHAL_IPC_CUR_CORE_IDX);
584         #if (_CYHAL_IPC_CORE_NUM == 2)
585         _CYHAL_IPC_SI(channel, _CYHAL_IPC_OTHER_CORE_IDX);
586         #elif (_CYHAL_IPC_CORE_NUM == 3)
587         _CYHAL_IPC_SI(channel, _CYHAL_IPC_OTHER_CORE_0_IDX);
588         _CYHAL_IPC_SI(channel, _CYHAL_IPC_OTHER_CORE_1_IDX);
589         #endif /* (_CYHAL_IPC_CORE_NUM == 2) or (_CYHAL_IPC_CORE_NUM == 3) */
590         CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(_cyhal_ipc_service_data->isr_clear_sync, sizeof(*_cyhal_ipc_service_data->isr_clear_sync));
591         result = _cyhal_ipc_give_core_sync_sema(obj, &timeout_give);
592     }
593     cyhal_system_critical_section_exit(intr_status);
594     return result;
595 }
596 
_cyhal_ipc_enable_interrupt(cyhal_ipc_t * obj,uint32_t channel,bool enable)597 static cy_rslt_t _cyhal_ipc_enable_interrupt(cyhal_ipc_t *obj, uint32_t channel, bool enable)
598 {
599     CY_ASSERT(NULL != obj);
600 
601     cy_rslt_t result;
602     IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(_CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN);
603     uint32_t current_ipc_interrupt_mask = Cy_IPC_Drv_GetInterruptMask(ipc_intr_base);
604     uint32_t channel_intr_mask = (1UL << channel);
605     uint32_t timeout_acq = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
606     uint32_t timeout_give = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
607 
608     /* We cannot allow interrupt to happen between _cyhal_ipc_acquire_core_sync_sema and successful
609     *   _cyhal_ipc_give_core_sync_sema, as interrupt will also attempt to acquire semaphore (which
610     *   will obviously fail) */
611     uint32_t intr_status = cyhal_system_critical_section_enter();
612     if (CY_RSLT_SUCCESS == (result = _cyhal_ipc_acquire_core_sync_sema(obj, &timeout_acq)))
613     {
614         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data->isr_enable_sync, sizeof(*_cyhal_ipc_service_data->isr_enable_sync));
615         if (enable)
616         {
617             Cy_IPC_Drv_ClearInterrupt(ipc_intr_base, channel_intr_mask, 0);
618             Cy_IPC_Drv_SetInterruptMask(ipc_intr_base, current_ipc_interrupt_mask | channel_intr_mask, 0);
619             _CYHAL_IPC_SIE(channel, _CYHAL_IPC_CUR_CORE_IDX);
620         }
621         else
622         {
623             Cy_IPC_Drv_SetInterruptMask(ipc_intr_base, current_ipc_interrupt_mask & ~channel_intr_mask, 0);
624             _CYHAL_IPC_SID(channel, _CYHAL_IPC_CUR_CORE_IDX);
625         }
626         CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(_cyhal_ipc_service_data->isr_enable_sync, sizeof(*_cyhal_ipc_service_data->isr_enable_sync));
627         result = _cyhal_ipc_give_core_sync_sema(obj, &timeout_give);
628     }
629     cyhal_system_critical_section_exit(intr_status);
630     return result;
631 }
632 
_cyhal_ipc_find_last_element(cyhal_ipc_queue_t * queue_handle)633 static cyhal_ipc_queue_t *_cyhal_ipc_find_last_element(cyhal_ipc_queue_t *queue_handle)
634 {
635     CY_ASSERT(NULL != queue_handle);
636 
637     cyhal_ipc_queue_t *retval = queue_handle;
638     do
639     {
640         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(retval, sizeof(*retval));
641         if (retval->next_queue_obj == NULL)
642             break;
643         retval = retval->next_queue_obj;
644     } while (true);
645 
646     return retval;
647 }
648 
_cyhal_ipc_check_queue_number_used(const cyhal_ipc_queue_t * queue_to_be_added)649 static bool _cyhal_ipc_check_queue_number_used(const cyhal_ipc_queue_t *queue_to_be_added)
650 {
651     CY_ASSERT(NULL != queue_to_be_added);
652 
653     if (NULL == _cyhal_ipc_service_data)
654         return false;
655 
656     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
657     if (NULL != _cyhal_ipc_service_data->queues_ll_pointer)
658     {
659         cyhal_ipc_queue_t *queue_handle = _cyhal_ipc_service_data->queues_ll_pointer;
660         do
661         {
662             INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(queue_handle, sizeof(*queue_handle));
663             if ((queue_handle->channel_num == queue_to_be_added->channel_num) &&
664                 (queue_handle->queue_num == queue_to_be_added->queue_num))
665             {
666                 return true;
667             }
668         }
669         while ((queue_handle = queue_handle->next_queue_obj) != NULL);
670     }
671     return false;
672 }
673 
_cyhal_ipc_add_queue_element(cyhal_ipc_t * obj,cyhal_ipc_queue_t * queue_handle_to_add)674 static void _cyhal_ipc_add_queue_element(cyhal_ipc_t *obj, cyhal_ipc_queue_t *queue_handle_to_add)
675 {
676     CY_ASSERT(NULL != obj);
677     CY_ASSERT(NULL != queue_handle_to_add);
678     CY_ASSERT(NULL != _cyhal_ipc_service_data);
679 
680     cyhal_ipc_queue_t *queue_handle = obj->queue_obj;
681 
682     /* Fill in service data */
683     queue_handle_to_add->curr_items = 0;
684     queue_handle_to_add->queue_head = (void *)&(((uint8_t *)(queue_handle_to_add->queue_pool))[0]);
685     queue_handle_to_add->queue_tail = (void *)&(((uint8_t *)(queue_handle_to_add->queue_pool))[0]);
686     queue_handle_to_add->next_queue_obj = NULL;
687     queue_handle_to_add->triggered_events = 0;
688 
689     CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(queue_handle_to_add, sizeof(*queue_handle_to_add));
690     CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(queue_handle_to_add->queue_pool, queue_handle_to_add->num_items * queue_handle_to_add->item_size);
691     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
692 
693     if (NULL != queue_handle)
694     {
695         cyhal_ipc_queue_t *last_queue_handle = _cyhal_ipc_find_last_element(queue_handle);
696         last_queue_handle->next_queue_obj = queue_handle_to_add;
697         CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(last_queue_handle, sizeof(*last_queue_handle));
698     }
699     else
700     {
701         /* First queue in current cyhal_ipc_t object */
702         obj->queue_obj = (void *)queue_handle_to_add;
703 
704         if (NULL != _cyhal_ipc_service_data->queues_ll_pointer)
705         {
706             cyhal_ipc_queue_t *last_queue_handle = _cyhal_ipc_find_last_element(_cyhal_ipc_service_data->queues_ll_pointer);
707             last_queue_handle->next_queue_obj = obj->queue_obj;
708             CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(last_queue_handle, sizeof(*last_queue_handle));
709         }
710     }
711 
712     if (NULL == _cyhal_ipc_service_data->queues_ll_pointer)
713     {
714         _cyhal_ipc_service_data->queues_ll_pointer = queue_handle_to_add;
715         CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
716     }
717 }
718 
_cyhal_ipc_wait_step(uint32_t * timeout_us,uint32_t polling_interval_us)719 static void _cyhal_ipc_wait_step(uint32_t *timeout_us, uint32_t polling_interval_us)
720 {
721     uint32_t us_wait_step = polling_interval_us % 1000;
722     uint32_t ms_wait_step = (polling_interval_us - us_wait_step) / 1000;
723 
724     if (NULL == timeout_us)
725     {
726         if (ms_wait_step != 0)
727         {
728             cyhal_system_delay_ms(ms_wait_step);
729         }
730         if (us_wait_step != 0)
731         {
732             cyhal_system_delay_us(us_wait_step);
733         }
734     }
735     else
736     {
737         if (us_wait_step + (ms_wait_step * 1000) <= *timeout_us)
738         {
739             if (ms_wait_step != 0)
740             {
741                 cyhal_system_delay_ms(ms_wait_step);
742                 *timeout_us -= (ms_wait_step * 1000);
743             }
744             if (us_wait_step != 0)
745             {
746                 cyhal_system_delay_us(us_wait_step);
747                 *timeout_us -= us_wait_step;
748             }
749         }
750         else if (*timeout_us != 0)
751         {
752             cyhal_system_delay_us(*timeout_us);
753             *timeout_us = 0;
754         }
755     }
756 }
757 
_cyhal_ipc_wait_lock_acquire(IPC_STRUCT_Type * ipc_base,uint32_t * timeout_us,bool wait_forever)758 static cy_rslt_t _cyhal_ipc_wait_lock_acquire(IPC_STRUCT_Type *ipc_base, uint32_t *timeout_us, bool wait_forever)
759 {
760     CY_ASSERT(NULL != ipc_base);
761     cy_rslt_t result;
762 
763     while (((result = (cy_rslt_t)Cy_IPC_Drv_LockAcquire(ipc_base)) != CY_RSLT_SUCCESS) &&
764             (wait_forever || ((NULL != timeout_us) && (*timeout_us != 0))))
765     {
766         _cyhal_ipc_wait_step(wait_forever ? NULL : timeout_us, CYHAL_IPC_POLLING_INTERVAL_uS);
767     }
768 
769     return result;
770 }
771 
_cyhal_ipc_queue_put_get(cyhal_ipc_t * obj,void * msg,uint32_t timeout_us,bool put)772 static cy_rslt_t _cyhal_ipc_queue_put_get(cyhal_ipc_t *obj, void *msg, uint32_t timeout_us, bool put)
773 {
774     CY_ASSERT(NULL != obj);
775     CY_ASSERT(NULL != msg);
776     CY_ASSERT(NULL != obj->queue_obj);
777 
778     if ((SCB->ICSR & SCB_ICSR_VECTACTIVE_Msk) != 0)
779     {
780         return CYHAL_IPC_RSLT_ERR_CANT_OPERATE_IN_ISR;
781     }
782 
783     bool is_never_timeout = (timeout_us == CYHAL_IPC_NEVER_TIMEOUT);
784 
785     cyhal_ipc_queue_t *queue_handle = obj->queue_obj;
786     uint32_t channel = obj->queue_obj->channel_num;
787     IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(channel);
788 
789     uint32_t timeout_us_left = timeout_us;
790     cy_rslt_t result = CY_RSLT_SUCCESS;
791 
792     /* Used to perform last loop when timeout from non-zero become zero */
793     bool last_loop;
794     do
795     {
796         bool is_prev_isr_handled;
797         while ((false == (is_prev_isr_handled = _cyhal_ipc_check_isr_handled(obj, channel, &timeout_us_left)))
798                 && (is_never_timeout || (timeout_us_left != 0)))
799         {
800             if (is_never_timeout || (timeout_us_left > _CYHAL_IPC_SERVICE_SEMA_STEP_US))
801             {
802                 cyhal_system_delay_us(_CYHAL_IPC_SERVICE_SEMA_STEP_US);
803                 if (false == is_never_timeout)
804                 {
805                     timeout_us_left -= _CYHAL_IPC_SERVICE_SEMA_STEP_US;
806                 }
807             }
808             else
809             {
810                 cyhal_system_delay_us(timeout_us_left);
811                 timeout_us_left = 0;
812             }
813         }
814 
815         if (false == is_prev_isr_handled)
816         {
817             /* One or more cores didn't handle previous operation that caused interrupt */
818             return CYHAL_IPC_RSLT_ERR_ISR_WAS_NOT_HANDLED;
819         }
820 
821         last_loop = false;
822         result = _cyhal_ipc_wait_lock_acquire(ipc_base, &timeout_us_left, is_never_timeout);
823 
824         uint16_t triggered_events = 0;
825         if (CY_RSLT_SUCCESS == result)
826         {
827             INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(queue_handle, sizeof(*queue_handle));
828             INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(queue_handle->queue_pool, queue_handle->num_items * queue_handle->item_size);
829             _CYHAL_IPC_CLR_TRIGGERED_EVENT(queue_handle->triggered_events);
830 
831             if (put)
832             {
833                 if (queue_handle->curr_items < queue_handle->num_items)
834                 {
835                     memcpy(queue_handle->queue_head, msg, queue_handle->item_size);
836                     ++queue_handle->curr_items;
837                     /* Checking the position of queue_head in roundary buffer. If it is at the end of buffer, moving head to the start */
838                     if (((uint32_t)queue_handle->queue_head - (uint32_t)queue_handle->queue_pool) ==
839                         (uint32_t)((queue_handle->num_items - 1) * queue_handle->item_size))
840                     {
841                         queue_handle->queue_head = queue_handle->queue_pool;
842                     }
843                     else
844                     {
845                         queue_handle->queue_head = (void *)((uint8_t *)(queue_handle->queue_head) + queue_handle->item_size);
846                     }
847 
848                     triggered_events = CYHAL_IPC_QUEUE_WRITE;
849                     if (queue_handle->curr_items == queue_handle->num_items)
850                     {
851                         triggered_events |= CYHAL_IPC_QUEUE_FULL;
852                     }
853 
854                     result = CY_RSLT_SUCCESS;
855                 }
856                 else
857                 {
858                     result = CYHAL_IPC_RSLT_ERR_QUEUE_FULL;
859                 }
860             }
861             else
862             {
863                 if (queue_handle->curr_items > 0)
864                 {
865                     memcpy(msg, queue_handle->queue_tail, queue_handle->item_size);
866                     --queue_handle->curr_items;
867                     /* Checking the position of queue_tail in roundary buffer. If it is at the end of buffer, moving tail to the start */
868                     if (((uint32_t)(queue_handle->queue_tail) - (uint32_t)(queue_handle->queue_pool)) == (uint32_t)((queue_handle->num_items - 1) * queue_handle->item_size))
869                     {
870                         queue_handle->queue_tail = queue_handle->queue_pool;
871                     }
872                     else
873                     {
874                         queue_handle->queue_tail = (void *)((uint8_t *)(queue_handle->queue_tail) + queue_handle->item_size);
875                     }
876 
877                     triggered_events = CYHAL_IPC_QUEUE_READ;
878                     if (queue_handle->curr_items == 0)
879                     {
880                         triggered_events |= CYHAL_IPC_QUEUE_EMPTY;
881                     }
882 
883                     result = CY_RSLT_SUCCESS;
884                 }
885                 else
886                 {
887                     result = CYHAL_IPC_RSLT_ERR_QUEUE_EMPTY;
888                 }
889             }
890 
891             bool inform_ipcs_via_interrupt = false;
892             if ((uint16_t)CYHAL_IPC_NO_INTR != triggered_events)
893             {
894                 inform_ipcs_via_interrupt = true;
895                 _CYHAL_IPC_ADD_TRIGGERED_EVENT(queue_handle->triggered_events, triggered_events);
896                 result = _cyhal_ipc_set_isr_expected(obj, channel);
897             }
898 
899             CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(queue_handle->queue_pool, queue_handle->num_items * queue_handle->item_size);
900             CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(queue_handle, sizeof(*queue_handle));
901             /* No reason to check the return value, as this function can return either:
902             * - CY_IPC_DRV_SUCCESS - if lock was successfully released, or
903             * - CY_IPC_DRV_ERROR - if IPC channel was not acquired before the function call, which is impossible in this
904             * situation */
905             /* No reason to generate interrupt if no events were triggered by performed operation */
906             (void)Cy_IPC_Drv_LockRelease(ipc_base, inform_ipcs_via_interrupt ? _CYHAL_IPC_TRIGGER_ISR_MASK : 0u);
907         }
908 
909         if ((CY_RSLT_SUCCESS != result) && (is_never_timeout || (timeout_us_left != 0)))
910         {
911             _cyhal_ipc_wait_step(is_never_timeout ? NULL : &timeout_us_left, CYHAL_IPC_POLLING_INTERVAL_uS);
912             if ((false == is_never_timeout) && (timeout_us_left == 0))
913             {
914                 last_loop = true;
915             }
916         }
917 
918     } while ((CY_RSLT_SUCCESS != result) && (is_never_timeout || (timeout_us_left != 0) || last_loop));
919 
920     return result;
921 }
922 
_cyhal_ipc_decode_triggered_events(cyhal_ipc_t * obj)923 uint16_t _cyhal_ipc_decode_triggered_events(cyhal_ipc_t *obj)
924 {
925     CY_ASSERT(NULL != obj);
926     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(obj->queue_obj, sizeof(*obj->queue_obj));
927     /* Check what events signatures are changed and combine with triggered events */
928     uint16_t retval = _CYHAL_IPC_GET_SIGNATURES(obj->queue_obj->triggered_events ^ obj->processed_events) &
929         _CYHAL_IPC_GET_TRIGGERED_EVENT(obj->queue_obj->triggered_events);
930     obj->processed_events = obj->queue_obj->triggered_events;
931     return retval;
932 }
933 
934 /********************************* SEMAPHORES / QUEUES IPC INTERRUPT HANDLER********************************************/
935 
_cyhal_ipc_irq_handler(void)936 static void _cyhal_ipc_irq_handler(void)
937 {
938     _cyhal_system_irq_t irqn = _cyhal_irq_get_active();
939     uint32_t isr_channel = irqn - cpuss_interrupts_ipc_0_IRQn;
940     IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(isr_channel);
941     /* We are interested only in Release events */
942     uint32_t interrupt_status_masked =
943             _FLD2VAL(IPC_INTR_STRUCT_INTR_MASKED_RELEASE, Cy_IPC_Drv_GetInterruptStatusMasked(ipc_intr_base));
944 
945     #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
946     if (_CYHAL_IPC_SEMA_INTR_STR_NUM == isr_channel)
947     {
948         if (interrupt_status_masked != 0)
949         {
950             for (size_t sema_idx = 0; sema_idx < CYHAL_IPC_RTOS_SEMA_NUM; sema_idx++)
951             {
952                 uint32_t current_sema_mask = 1 << _cyhal_ipc_rtos_semaphores[sema_idx].sema_num;
953                 if (_cyhal_ipc_rtos_semaphores[sema_idx].initialized && (interrupt_status_masked & current_sema_mask))
954                 {
955                     (void)cy_rtos_set_semaphore(&_cyhal_ipc_rtos_semaphores[sema_idx].semaphore, true);
956                     Cy_IPC_Drv_ClearInterrupt(ipc_intr_base, current_sema_mask, 0);
957                     break;
958                 }
959             }
960         }
961     }
962     else
963     #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
964     if (isr_channel >= CYHAL_IPC_CHAN_0)
965     {
966         uint32_t channel = CYHAL_IPC_CHAN_0;
967         while (interrupt_status_masked != 0)
968         {
969             uint32_t channel_mask = 1UL << channel;
970             if ((interrupt_status_masked & (channel_mask)) != 0)
971             {
972                 interrupt_status_masked &= ~channel_mask;
973                 cyhal_ipc_t *obj = _CYHAL_IPC_OBJ_ARR_EL(channel);
974                 if (CY_RSLT_SUCCESS == _cyhal_ipc_clear_interrupt(obj, isr_channel, channel))
975                 {
976                     /* Going through all known cyhal_ipc_t objects, that are tied to the IPC channel, that provoked interrupt */
977                     while (obj != NULL)
978                     {
979                         /* User enabled callback for certain events */
980                         if ((obj->user_events != CYHAL_IPC_NO_INTR) && (obj->callback_data.callback != NULL))
981                         {
982                             /* Check triggered events and (by checking signatures) filter only those events,
983                             *   that were not yet processed */
984                             uint16_t queue_events = _cyhal_ipc_decode_triggered_events(obj);
985                             /* Checking if last operation for certain queue provoked one of the activated by user events */
986                             uint16_t activated_occurred_events = obj->user_events & queue_events;
987                             if (activated_occurred_events)
988                             {
989                                 cyhal_ipc_event_callback_t callback = (cyhal_ipc_event_callback_t)obj->callback_data.callback;
990                                 callback(obj->callback_data.callback_arg, (cyhal_ipc_event_t)activated_occurred_events);
991                             }
992                         }
993                         obj = obj->prev_object;
994                     }
995                 }
996             }
997             channel++;
998         }
999     }
1000 }
1001 
1002 /**************************************** SEMAPHORE PUBLIC FUNCTIONS **************************************************/
1003 
cyhal_ipc_semaphore_init(cyhal_ipc_t * obj,uint32_t semaphore_num,bool preemptable)1004 cy_rslt_t cyhal_ipc_semaphore_init(cyhal_ipc_t *obj, uint32_t semaphore_num, bool preemptable)
1005 {
1006     CY_ASSERT(NULL != obj);
1007 
1008     /* Last semaphore is used for internal IPC queues functionality */
1009     if (semaphore_num >= (CYHAL_IPC_SEMA_COUNT - 1))
1010     {
1011         /* Semaphore index exceeds the number of allowed Semaphores */
1012         return CYHAL_IPC_RSLT_ERR_INVALID_PARAMETER;
1013     }
1014 
1015     cy_rslt_t result = _cyhal_ipc_sema_init(obj, semaphore_num, preemptable);
1016 
1017     #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
1018     if ((CY_RSLT_SUCCESS == result) && (semaphore_num < _CYHAL_IPC_RELEASE_INTR_BITS))
1019     {
1020         /* Looking for free RTOS semaphore from the pre-allocated pool */
1021         for (size_t rtos_sema_idx = 0; rtos_sema_idx < CYHAL_IPC_RTOS_SEMA_NUM; ++rtos_sema_idx)
1022         {
1023             if (false == _cyhal_ipc_rtos_semaphores[rtos_sema_idx].initialized)
1024             {
1025                 result = cy_rtos_init_semaphore(&(_cyhal_ipc_rtos_semaphores[rtos_sema_idx].semaphore), 1, 0);
1026                 if (CY_RSLT_SUCCESS == result)
1027                 {
1028                     obj->rtos_sema = (void *)&_cyhal_ipc_rtos_semaphores[rtos_sema_idx];
1029                     _cyhal_ipc_rtos_semaphores[rtos_sema_idx].initialized = true;
1030                     _cyhal_ipc_rtos_semaphores[rtos_sema_idx].sema_num = semaphore_num;
1031                     break;
1032                 }
1033             }
1034         }
1035         if ((CY_RSLT_SUCCESS == result) && (NULL != obj->rtos_sema))
1036         {
1037             IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(_CYHAL_IPC_SEMA_INTR_STR_NUM);
1038             /* Enable all possible interrupt bits for sema interrupt */
1039             Cy_IPC_Drv_SetInterruptMask(ipc_intr_base, (1 << obj->sema_number) - 1, 0);
1040             _cyhal_irq_register((_cyhal_system_irq_t)(cpuss_interrupts_ipc_0_IRQn + _CYHAL_IPC_SEMA_INTR_STR_NUM), CYHAL_ISR_PRIORITY_DEFAULT, _cyhal_ipc_irq_handler);
1041             /* No IRQ enable, as it will be done before each time interrupt is needed */
1042         }
1043     }
1044     #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
1045 
1046     return result;
1047 }
1048 
cyhal_ipc_semaphore_free(cyhal_ipc_t * obj)1049 void cyhal_ipc_semaphore_free(cyhal_ipc_t *obj)
1050 {
1051     #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
1052     if (NULL != obj->rtos_sema)
1053     {
1054         (void)cy_rtos_deinit_semaphore(&((_cyhal_ipc_rtos_sema_t *)obj->rtos_sema)->semaphore);
1055         ((_cyhal_ipc_rtos_sema_t *)obj->rtos_sema)->initialized = false;
1056         ((_cyhal_ipc_rtos_sema_t *)obj->rtos_sema)->sema_num = 0;
1057         obj->rtos_sema = NULL;
1058     }
1059 
1060     /* clear the interrupt mask for this semaphore */
1061     IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(_CYHAL_IPC_SEMA_INTR_STR_NUM);
1062     uint32_t mask = Cy_IPC_Drv_GetInterruptMask(ipc_intr_base);
1063     mask &= ~(1 << (obj->sema_number));
1064     Cy_IPC_Drv_SetInterruptMask(ipc_intr_base, mask, 0);
1065 
1066     #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) or other */
1067 
1068     if (obj->sema_taken)
1069     {
1070         (void)cyhal_ipc_semaphore_give(obj);
1071     }
1072 }
1073 
cyhal_ipc_semaphore_take(cyhal_ipc_t * obj,uint32_t timeout_us)1074 cy_rslt_t cyhal_ipc_semaphore_take(cyhal_ipc_t *obj, uint32_t timeout_us)
1075 {
1076     CY_ASSERT(NULL != obj);
1077     return _cyhal_ipc_sema_take(obj, &timeout_us, CYHAL_IPC_POLLING_INTERVAL_uS);
1078 }
1079 
cyhal_ipc_semaphore_give(cyhal_ipc_t * obj)1080 cy_rslt_t cyhal_ipc_semaphore_give(cyhal_ipc_t *obj)
1081 {
1082     CY_ASSERT(NULL != obj);
1083     cy_en_ipcsema_status_t ipc_sema_result = Cy_IPC_Sema_Clear(obj->sema_number, obj->sema_preemptable);
1084     cy_rslt_t result = ((CY_IPC_SEMA_SUCCESS == ipc_sema_result) || (CY_IPC_SEMA_NOT_ACQUIRED == ipc_sema_result)) ?
1085             CY_RSLT_SUCCESS : (cy_rslt_t)ipc_sema_result;
1086     #if (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0)
1087     if ((obj->sema_number < _CYHAL_IPC_RELEASE_INTR_BITS) && (CY_IPC_SEMA_SUCCESS == ipc_sema_result))
1088     {
1089         IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(_CYHAL_IPC_SEMA_INTR_STR_NUM);
1090         Cy_IPC_Drv_SetInterrupt(ipc_intr_base, 1 << obj->sema_number, 0);
1091     }
1092     #endif /* (defined(CY_RTOS_AWARE) || defined(COMPONENT_RTOS_AWARE)) && (CYHAL_IPC_RTOS_SEMA_NUM > 0) */
1093 
1094     if (CY_RSLT_SUCCESS == result)
1095     {
1096         obj->sema_taken = false;
1097     }
1098 
1099     return result;
1100 }
1101 
1102 /***************************************** QUEUES PUBLIC FUNCTIONS ****************************************************/
1103 
cyhal_ipc_queue_init(cyhal_ipc_t * obj,cyhal_ipc_queue_t * queue_handle)1104 cy_rslt_t cyhal_ipc_queue_init(cyhal_ipc_t *obj, cyhal_ipc_queue_t *queue_handle)
1105 {
1106     CY_ASSERT(NULL != obj);
1107     CY_ASSERT(NULL != queue_handle);
1108     CY_ASSERT(NULL != queue_handle->queue_pool);
1109 
1110     cy_rslt_t result = CY_RSLT_SUCCESS;
1111 
1112     /* Queue IPC channel number and number of items check */
1113     if ((false == _CYHAL_IPC_CHAN_IDX_CORRECT(queue_handle->channel_num)) || (queue_handle->num_items == 0) ||
1114             (queue_handle->item_size == 0))
1115     {
1116         return CYHAL_IPC_RSLT_ERR_INVALID_PARAMETER;
1117     }
1118 
1119     uint32_t channel = queue_handle->channel_num;
1120     if (false == _cyhal_ipc_check_queue_number_used(queue_handle))
1121     {
1122         memset(obj, 0, sizeof(cyhal_ipc_t));
1123 
1124         result = _cyhal_ipc_sema_init(obj, CYHAL_IPC_SEMA_COUNT - 1, false);
1125         if (CY_RSLT_SUCCESS == result)
1126         {
1127             /* If this is first IPC object being initialized,
1128             *  we need to clear isr_clear_sync and isr_enable_sync flags, as the are
1129             *  located in shared memory, which is not being cleared upon initialization.
1130             *
1131             *  In addition, clear all IPC HAL user - available IPC channels' data to prevent
1132             *  cyhal_queue_get_handle calls read random value and treat it as _cyhal_ipc_sevice_data_t struct pointer
1133             */
1134             if (NULL == _cyhal_ipc_service_data)
1135             {
1136                 CY_SECTION_SHAREDMEM
1137                 #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
1138                 static uint8_t isr_clear_sync[L1_DCACHE_ROUND_UP_BYTES(_CYHAL_IPC_CORE_NUM)]
1139                 CY_ALIGN(__SCB_DCACHE_LINE_SIZE)
1140                 #else
1141                 static uint8_t isr_clear_sync[_CYHAL_IPC_CORE_NUM]
1142                 #endif /* defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) */
1143                 ;
1144                 CY_SECTION_SHAREDMEM
1145                 #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
1146                 static uint8_t isr_enable_sync[L1_DCACHE_ROUND_UP_BYTES(_CYHAL_IPC_CORE_NUM)]
1147                 CY_ALIGN(__SCB_DCACHE_LINE_SIZE)
1148                 #else
1149                 static uint8_t isr_enable_sync[_CYHAL_IPC_CORE_NUM]
1150                 #endif /* defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) */
1151                 ;
1152                 CY_SECTION_SHAREDMEM
1153                 static _cyhal_ipc_sevice_data_t ipc_service_data
1154                 #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U)
1155                 CY_ALIGN(__SCB_DCACHE_LINE_SIZE)
1156                 #endif /* defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) */
1157                 ;
1158 
1159                 memset(isr_clear_sync, 0, sizeof(isr_clear_sync));
1160                 memset(isr_enable_sync, 0, sizeof(isr_enable_sync));
1161 
1162                 ipc_service_data.isr_clear_sync = isr_clear_sync;
1163                 ipc_service_data.isr_enable_sync = isr_enable_sync;
1164                 ipc_service_data.queues_ll_pointer = NULL;
1165                 _cyhal_ipc_service_data = &ipc_service_data;
1166                 CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(isr_clear_sync, sizeof(*isr_clear_sync));
1167                 CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(isr_enable_sync, sizeof(*isr_enable_sync));
1168                 CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
1169 
1170                 for (uint32_t chan_idx = CYHAL_IPC_CHAN_0; chan_idx < CYHAL_IPC_CHAN_0 + CYHAL_IPC_USR_CHANNELS; ++chan_idx)
1171                 {
1172                     IPC_STRUCT_Type *cur_chan_ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(chan_idx);
1173                     Cy_IPC_Drv_WriteDataValue(cur_chan_ipc_base, 0);
1174                 }
1175 
1176                 for (uint8_t obj_idx = 0; obj_idx < CYHAL_IPC_USR_CHANNELS; ++obj_idx)
1177                 {
1178                     _ipc_objects[obj_idx] = NULL;
1179                 }
1180             }
1181 
1182             _cyhal_ipc_add_queue_element(obj, queue_handle);
1183 
1184             obj->callback_data.callback = NULL;
1185             obj->callback_data.callback_arg = NULL;
1186             obj->user_events = CYHAL_IPC_NO_INTR;
1187             obj->processed_events = CYHAL_IPC_NO_INTR;
1188 
1189             IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(channel);
1190             Cy_IPC_Drv_WriteDataValue(ipc_base, (uint32_t)_cyhal_ipc_service_data);
1191 
1192             if (false == interrupts_initialized)
1193             {
1194                 _cyhal_irq_register(_CYHAL_IPC_CURRENT_CORE_IPC_INTR_SRC, CYHAL_ISR_PRIORITY_DEFAULT, _cyhal_ipc_irq_handler);
1195                 _cyhal_irq_enable(_CYHAL_IPC_CURRENT_CORE_IPC_INTR_SRC);
1196                 interrupts_initialized = true;
1197             }
1198 
1199             obj->prev_object = _CYHAL_IPC_OBJ_ARR_EL(channel);
1200             _CYHAL_IPC_OBJ_ARR_EL(channel) = obj;
1201         }
1202     }
1203     else
1204     {
1205         return CYHAL_IPC_RSLT_ERR_QUEUE_NUM_IN_USE;
1206     }
1207     return result;
1208 }
1209 
cyhal_ipc_queue_free(cyhal_ipc_t * obj)1210 void cyhal_ipc_queue_free(cyhal_ipc_t *obj)
1211 {
1212     CY_ASSERT(NULL != obj);
1213     CY_ASSERT(NULL != obj->queue_obj);
1214 
1215     uint32_t channel = obj->queue_obj->channel_num;
1216 
1217     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
1218     if (NULL != _cyhal_ipc_service_data->queues_ll_pointer)
1219     {
1220         cyhal_ipc_queue_t *current_queue_obj = _cyhal_ipc_service_data->queues_ll_pointer;
1221 
1222         /* Queue element to be deleted is the first one in the linked list */
1223         if (_cyhal_ipc_service_data->queues_ll_pointer == obj->queue_obj)
1224         {
1225             uint32_t timeout = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;
1226             while (CY_RSLT_SUCCESS != _cyhal_ipc_sema_take(obj, &timeout, _CYHAL_IPC_SERVICE_SEMA_STEP_US))
1227             {
1228                 timeout = _CYHAL_IPC_SERVICE_SEMA_TIMEOUT_US;;
1229             }
1230             _cyhal_ipc_service_data->queues_ll_pointer = obj->queue_obj->next_queue_obj;
1231             CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
1232             while (CY_RSLT_SUCCESS != cyhal_ipc_semaphore_give(obj)) { cyhal_system_delay_us(_CYHAL_IPC_SERVICE_SEMA_STEP_US); }
1233         }
1234         else
1235         {
1236             while (NULL != current_queue_obj)
1237             {
1238                 INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(current_queue_obj, sizeof(*current_queue_obj));
1239                 if (current_queue_obj->next_queue_obj == obj->queue_obj)
1240                 {
1241                     IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(current_queue_obj->next_queue_obj->channel_num);
1242                     /* Locking IPC channel before modifying one of its queues */
1243                     _cyhal_ipc_wait_lock_acquire(ipc_base, NULL, true);
1244                     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(current_queue_obj->next_queue_obj, sizeof(*current_queue_obj->next_queue_obj));
1245                     current_queue_obj->next_queue_obj = current_queue_obj->next_queue_obj->next_queue_obj;
1246                     CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(current_queue_obj, sizeof(*current_queue_obj));
1247                     (void)Cy_IPC_Drv_LockRelease(ipc_base, 0);
1248                     break;
1249                 }
1250                 current_queue_obj = current_queue_obj->next_queue_obj;
1251             }
1252         }
1253     }
1254 
1255     cyhal_ipc_t *curr_ipc_obj = _CYHAL_IPC_OBJ_ARR_EL(channel);
1256     if (curr_ipc_obj == obj)
1257     {
1258         _CYHAL_IPC_OBJ_ARR_EL(channel) = obj->prev_object;
1259     }
1260     else
1261     {
1262         while (NULL != curr_ipc_obj)
1263         {
1264             if (curr_ipc_obj->prev_object == obj)
1265             {
1266                 curr_ipc_obj->prev_object = curr_ipc_obj->prev_object->prev_object;
1267                 break;
1268             }
1269             curr_ipc_obj = curr_ipc_obj->prev_object;
1270         }
1271     }
1272 
1273     cyhal_ipc_semaphore_free(obj);
1274     memset(obj, 0, sizeof(cyhal_ipc_t));
1275 }
1276 
cyhal_ipc_queue_get_handle(cyhal_ipc_t * obj,uint32_t channel_num,uint32_t queue_num)1277 cy_rslt_t cyhal_ipc_queue_get_handle(cyhal_ipc_t *obj, uint32_t channel_num, uint32_t queue_num)
1278 {
1279     CY_ASSERT(NULL != obj);
1280 
1281     cy_rslt_t result = CY_RSLT_SUCCESS;
1282 
1283     /* Queue IPC channel number check */
1284     if (false == _CYHAL_IPC_CHAN_IDX_CORRECT(channel_num))
1285     {
1286         return CYHAL_IPC_RSLT_ERR_INVALID_PARAMETER;
1287     }
1288 
1289     if (NULL == _cyhal_ipc_service_data)
1290     {
1291         /* Getting shared memory memory pointer to the first element of queues linked list */
1292         IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(channel_num);
1293         _cyhal_ipc_service_data = (_cyhal_ipc_sevice_data_t *)Cy_IPC_Drv_ReadDataValue(ipc_base);
1294         if (NULL == _cyhal_ipc_service_data)
1295         {
1296             return CYHAL_IPC_RSLT_ERR_QUEUE_NOT_FOUND;
1297         }
1298         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
1299         if (NULL == _cyhal_ipc_service_data->queues_ll_pointer)
1300         {
1301             return CYHAL_IPC_RSLT_ERR_QUEUE_NOT_FOUND;
1302         }
1303         for (uint8_t obj_idx = 0; obj_idx < CYHAL_IPC_USR_CHANNELS; ++obj_idx)
1304         {
1305             _ipc_objects[obj_idx] = NULL;
1306         }
1307     }
1308 
1309     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(_cyhal_ipc_service_data, sizeof(*_cyhal_ipc_service_data));
1310     if (NULL != _cyhal_ipc_service_data->queues_ll_pointer)
1311     {
1312         bool queue_obj_found = false;
1313         cyhal_ipc_queue_t *queue_ptr = _cyhal_ipc_service_data->queues_ll_pointer;
1314 
1315         while (queue_ptr != NULL)
1316         {
1317             INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(queue_ptr, sizeof(*queue_ptr));
1318             if ((queue_ptr->channel_num == channel_num) && (queue_ptr->queue_num == queue_num))
1319             {
1320                 queue_obj_found = true;
1321                 break;
1322             }
1323             queue_ptr = queue_ptr->next_queue_obj;
1324         }
1325 
1326         if (queue_obj_found)
1327         {
1328             memset(obj, 0, sizeof(cyhal_ipc_t));
1329             obj->callback_data.callback = NULL;
1330             obj->callback_data.callback_arg = NULL;
1331             obj->user_events = CYHAL_IPC_NO_INTR;
1332             obj->processed_events = CYHAL_IPC_NO_INTR;
1333             obj->queue_obj = queue_ptr;
1334             obj->prev_object = _CYHAL_IPC_OBJ_ARR_EL(channel_num);
1335             _CYHAL_IPC_OBJ_ARR_EL(channel_num) = obj;
1336 
1337             result = _cyhal_ipc_sema_init(obj, CYHAL_IPC_SEMA_COUNT - 1, false);
1338 
1339             if ((CY_RSLT_SUCCESS == result) && (false == interrupts_initialized))
1340             {
1341                 _cyhal_irq_register(_CYHAL_IPC_CURRENT_CORE_IPC_INTR_SRC, CYHAL_ISR_PRIORITY_DEFAULT, _cyhal_ipc_irq_handler);
1342                 _cyhal_irq_enable(_CYHAL_IPC_CURRENT_CORE_IPC_INTR_SRC);
1343                 interrupts_initialized = true;
1344             }
1345         }
1346         else
1347         {
1348             result = CYHAL_IPC_RSLT_ERR_QUEUE_NOT_FOUND;
1349         }
1350     }
1351     return result;
1352 }
1353 
cyhal_ipc_queue_register_callback(cyhal_ipc_t * obj,cyhal_ipc_event_callback_t callback,void * callback_arg)1354 void cyhal_ipc_queue_register_callback(cyhal_ipc_t *obj, cyhal_ipc_event_callback_t callback, void *callback_arg)
1355 {
1356     CY_ASSERT(NULL != obj);
1357     obj->callback_data.callback = (cy_israddress) callback;
1358     obj->callback_data.callback_arg = callback_arg;
1359 }
1360 
cyhal_ipc_queue_enable_event(cyhal_ipc_t * obj,cyhal_ipc_event_t event,uint8_t intr_priority,bool enable)1361 void cyhal_ipc_queue_enable_event(cyhal_ipc_t *obj, cyhal_ipc_event_t event, uint8_t intr_priority, bool enable)
1362 {
1363     CY_ASSERT(NULL != obj);
1364     CY_ASSERT(NULL != obj->queue_obj);
1365 
1366     uint32_t channel = obj->queue_obj->channel_num;
1367     IPC_INTR_STRUCT_Type *ipc_intr_base = Cy_IPC_Drv_GetIntrBaseAddr(_CYHAL_IPC_CURRENT_CORE_IPC_INTR_CHAN);
1368 
1369     if (enable)
1370     {
1371         IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(channel);
1372         _cyhal_ipc_wait_lock_acquire(ipc_base, NULL, true);
1373         uint32_t event_and_sign_mask = (event << _CYHAL_IPC_EVENTS_SIGNATURES_BITS) | event;
1374         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(obj->queue_obj, sizeof(*obj->queue_obj));
1375         /* Extract event in question and its signature from current queue triggered events and copy such bits into
1376         *  obj->processed_events, so callback will not be called for currently pending (but being now activated) events. */
1377         obj->processed_events =
1378                 (obj->processed_events & ~event_and_sign_mask) | (obj->queue_obj->triggered_events & event_and_sign_mask);
1379         (void)Cy_IPC_Drv_LockRelease(ipc_base, 0);
1380 
1381         obj->user_events |= event;
1382     }
1383     else
1384     {
1385         obj->user_events &= ~event;
1386     }
1387 
1388     _cyhal_system_irq_t irqn = _CYHAL_IPC_CURRENT_CORE_IPC_INTR_SRC;
1389     /* As one IPC INTR structure service all IPC channels on certain core, we can't change interrupt priority
1390     *  for all channels as requested, we can only make it higher. */
1391     if (intr_priority < _cyhal_irq_get_priority(irqn))
1392     {
1393         _cyhal_irq_set_priority(irqn, intr_priority);
1394     }
1395 
1396     uint32_t current_ipc_interrupt_mask = Cy_IPC_Drv_GetInterruptMask(ipc_intr_base);
1397     uint32_t channel_intr_mask = (1UL << channel);
1398     if (CYHAL_IPC_NO_INTR != obj->user_events)
1399     {
1400         if ((current_ipc_interrupt_mask & channel_intr_mask) == 0)
1401         {
1402             /* This interrupt was not yet enabled before */
1403             (void)_cyhal_ipc_enable_interrupt(obj, channel, true);
1404         }
1405     }
1406     else
1407     {
1408         /* Go through all queues, which tied to current channel and check they have no events activated either */
1409         uint32_t all_queues_events = CYHAL_IPC_NO_INTR;
1410         cyhal_ipc_t *obj_to_check = _CYHAL_IPC_OBJ_ARR_EL(channel);
1411 
1412         while (NULL != obj_to_check)
1413         {
1414             all_queues_events |= obj_to_check->user_events;
1415             /* When at least one obj has events enabled, no reason to continue and waste CPU cycles */
1416             if (CYHAL_IPC_NO_INTR != all_queues_events)
1417             {
1418                 break;
1419             }
1420             obj_to_check = obj_to_check->prev_object;
1421         }
1422 
1423         /* All channel-related queues have no events enabled, disabling interrupt */
1424         if (CYHAL_IPC_NO_INTR == all_queues_events)
1425         {
1426             (void)_cyhal_ipc_enable_interrupt(obj, channel, false);
1427         }
1428     }
1429 }
1430 
cyhal_ipc_queue_put(cyhal_ipc_t * obj,void * msg,uint32_t timeout_us)1431 cy_rslt_t cyhal_ipc_queue_put(cyhal_ipc_t *obj, void *msg, uint32_t timeout_us)
1432 {
1433     return _cyhal_ipc_queue_put_get(obj, msg, timeout_us, true);
1434 }
1435 
cyhal_ipc_queue_get(cyhal_ipc_t * obj,void * msg,uint32_t timeout_us)1436 cy_rslt_t cyhal_ipc_queue_get(cyhal_ipc_t *obj, void *msg, uint32_t timeout_us)
1437 {
1438     return _cyhal_ipc_queue_put_get(obj, msg, timeout_us, false);
1439 }
1440 
cyhal_ipc_queue_count(cyhal_ipc_t * obj)1441 uint32_t cyhal_ipc_queue_count(cyhal_ipc_t *obj)
1442 {
1443     CY_ASSERT(NULL != obj);
1444     CY_ASSERT(NULL != obj->queue_obj);
1445     uint32_t channel = obj->queue_obj->channel_num;
1446     IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(channel);
1447     (void)_cyhal_ipc_wait_lock_acquire(ipc_base, NULL, true);
1448     INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(obj->queue_obj, sizeof(*obj->queue_obj));
1449     uint32_t curr_items = obj->queue_obj->curr_items;
1450     (void)Cy_IPC_Drv_LockRelease(ipc_base, 0);
1451     return curr_items;
1452 }
1453 
cyhal_ipc_queue_reset(cyhal_ipc_t * obj)1454 cy_rslt_t cyhal_ipc_queue_reset(cyhal_ipc_t *obj)
1455 {
1456     CY_ASSERT(NULL != obj);
1457     CY_ASSERT(NULL != obj->queue_obj);
1458 
1459     cyhal_ipc_queue_t *queue_handle = obj->queue_obj;
1460     uint32_t channel = obj->queue_obj->channel_num;
1461     IPC_STRUCT_Type *ipc_base = Cy_IPC_Drv_GetIpcBaseAddress(channel);
1462 
1463     cy_rslt_t result = _cyhal_ipc_wait_lock_acquire(ipc_base, NULL, true);
1464 
1465     if (CY_RSLT_SUCCESS == result)
1466     {
1467         INVALIDATE_DCACHE_BEFORE_READING_FROM_MEMORY(queue_handle, sizeof(*queue_handle));
1468         queue_handle->curr_items = 0;
1469         queue_handle->queue_head = queue_handle->queue_pool;
1470         queue_handle->queue_tail = queue_handle->queue_pool;
1471         _CYHAL_IPC_ADD_TRIGGERED_EVENT(queue_handle->triggered_events, CYHAL_IPC_QUEUE_RESET);
1472         CLEAR_DCACHE_AFTER_WRITING_TO_MEMORY(queue_handle, sizeof(*queue_handle));
1473         (void)Cy_IPC_Drv_LockRelease(ipc_base, _CYHAL_IPC_TRIGGER_ISR_MASK);
1474     }
1475 
1476     return result;
1477 }
1478 
1479 #if defined(__cplusplus)
1480 }
1481 #endif
1482 
1483 #endif /* CYHAL_DRIVER_AVAILABLE_IPC */
1484