1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <string.h>
8 #include "esp_log.h"
9 #include "esp_heap_caps.h"
10 #include "xtensa/core-macros.h"
11 #include "soc/dport_reg.h"
12 #include "hli_api.h"
13 #include "freertos/FreeRTOS.h"
14 #include "freertos/queue.h"
15 
16 #if CONFIG_BTDM_CTRL_HLI
17 #define HLI_MAX_HANDLERS    4
18 
19 typedef struct {
20     intr_handler_t handler;
21     void* arg;
22     uint32_t intr_reg;
23     uint32_t intr_mask;
24 } hli_handler_info_t;
25 
26 typedef struct {
27 #define CUSTOMER_TYPE_REQUEST (0)
28 #define CUSTOMER_TYPE_RELEASE (1)
29     struct {
30         uint32_t cb_type;
31         union {
32             int (* request)(uint32_t, uint32_t, uint32_t);
33             int (* release)(uint32_t);
34         } cb;
35     } customer_cb;
36     uint32_t arg0, arg1, arg2;
37 } customer_swisr_t;
38 
customer_swisr_handle(customer_swisr_t * cus_swisr)39 static void IRAM_ATTR customer_swisr_handle(customer_swisr_t *cus_swisr)
40 {
41     if (cus_swisr->customer_cb.cb_type == CUSTOMER_TYPE_REQUEST) {
42         if (cus_swisr->customer_cb.cb.request != NULL) {
43             cus_swisr->customer_cb.cb.request(cus_swisr->arg0, cus_swisr->arg1, cus_swisr->arg2);
44         }
45     } else if(cus_swisr->customer_cb.cb_type == CUSTOMER_TYPE_RELEASE) {
46         if (cus_swisr->customer_cb.cb.release != NULL) {
47             cus_swisr->customer_cb.cb.release(cus_swisr->arg0);
48         }
49     }
50 }
51 
52 static DRAM_ATTR hli_handler_info_t s_hli_handlers[HLI_MAX_HANDLERS];
53 
hli_intr_register(intr_handler_t handler,void * arg,uint32_t intr_reg,uint32_t intr_mask)54 esp_err_t hli_intr_register(intr_handler_t handler, void* arg, uint32_t intr_reg, uint32_t intr_mask)
55 {
56     for (hli_handler_info_t* hip = s_hli_handlers;
57          hip < s_hli_handlers + HLI_MAX_HANDLERS;
58          ++hip) {
59         if (hip->handler == NULL) {
60             hip->arg = arg;
61             hip->intr_reg = intr_reg;
62             hip->intr_mask = intr_mask;
63             hip->handler = handler;     /* set last, indicates the entry as valid */
64             return ESP_OK;
65         }
66     }
67     return ESP_ERR_NO_MEM;
68 }
69 
hli_c_handler(void)70 void IRAM_ATTR hli_c_handler(void)
71 {
72     bool handled = false;
73     /* Iterate over registered interrupt handlers,
74      * and check if the expected mask is present in the interrupt status register.
75      */
76     for (hli_handler_info_t* hip = s_hli_handlers;
77          hip < s_hli_handlers + HLI_MAX_HANDLERS;
78          ++hip) {
79         if (hip->handler == NULL) {
80             continue;
81         }
82         uint32_t reg = hip->intr_reg;
83         uint32_t val;
84         if (reg == 0) { /* special case for CPU internal interrupts */
85             val = XTHAL_GET_INTERRUPT();
86         } else {
87             /* "reg" might not be in DPORT, but this will work in any case */
88             val = DPORT_REG_READ(reg);
89         }
90         if ((val & hip->intr_mask) != 0) {
91             handled = true;
92             (*hip->handler)(hip->arg);
93         }
94     }
95     if (!handled) {
96         /* no handler found, it is OK in this case. */
97     }
98 }
99 
hli_intr_disable(void)100 uint32_t IRAM_ATTR hli_intr_disable(void)
101 {
102     /* disable level 4 and below */
103     return XTOS_SET_INTLEVEL(XCHAL_DEBUGLEVEL - 2);
104 }
105 
hli_intr_restore(uint32_t state)106 void IRAM_ATTR hli_intr_restore(uint32_t state)
107 {
108     XTOS_RESTORE_JUST_INTLEVEL(state);
109 }
110 
111 #define HLI_META_QUEUE_SIZE     16
112 #define HLI_QUEUE_MAX_ELEM_SIZE 32
113 #define HLI_QUEUE_SW_INT_NUM    29
114 
115 #define HLI_QUEUE_FLAG_SEMAPHORE    BIT(0)
116 #define HLI_QUEUE_FLAG_CUSTOMER     BIT(1)
117 
118 static DRAM_ATTR struct hli_queue_t *s_meta_queue_ptr = NULL;
119 static intr_handle_t ret_handle;
120 
wrap_ptr(hli_queue_handle_t queue,char * ptr)121 static inline char* IRAM_ATTR wrap_ptr(hli_queue_handle_t queue, char *ptr)
122 {
123     return (ptr == queue->bufend) ? queue->buf : ptr;
124 }
125 
queue_empty(hli_queue_handle_t queue)126 static inline bool IRAM_ATTR queue_empty(hli_queue_handle_t queue)
127 {
128     return queue->begin == queue->end;
129 }
130 
queue_full(hli_queue_handle_t queue)131 static inline bool IRAM_ATTR queue_full(hli_queue_handle_t queue)
132 {
133     return wrap_ptr(queue, queue->end + queue->elem_size) == queue->begin;
134 }
135 
queue_isr_handler(void * arg)136 static void IRAM_ATTR queue_isr_handler(void* arg)
137 {
138     int do_yield = pdFALSE;
139     XTHAL_SET_INTCLEAR(BIT(HLI_QUEUE_SW_INT_NUM));
140     hli_queue_handle_t queue;
141 
142     while (hli_queue_get(s_meta_queue_ptr, &queue)) {
143         static DRAM_ATTR char scratch[HLI_QUEUE_MAX_ELEM_SIZE];
144         while (hli_queue_get(queue, scratch)) {
145             int res = pdPASS;
146             if ((queue->flags & HLI_QUEUE_FLAG_CUSTOMER) != 0) {
147                 customer_swisr_handle((customer_swisr_t *)scratch);
148             } else if ((queue->flags & HLI_QUEUE_FLAG_SEMAPHORE) != 0) {
149                 res = xSemaphoreGiveFromISR((SemaphoreHandle_t) queue->downstream, &do_yield);
150             } else {
151                 res = xQueueSendFromISR(queue->downstream, scratch, &do_yield);
152             }
153             if (res == pdFAIL) {
154                 /* Failed to send to downstream queue, it is OK in this case. */
155             }
156         }
157     }
158     if (do_yield) {
159         portYIELD_FROM_ISR();
160     }
161 }
162 
163 /* Notify the level 3 handler that an element is added to the given hli queue.
164  * Do this by placing the queue handle onto s_meta_queue, and raising a SW interrupt.
165  *
166  * This function must be called with HL interrupts disabled!
167  */
queue_signal(hli_queue_handle_t queue)168 static void IRAM_ATTR queue_signal(hli_queue_handle_t queue)
169 {
170     /* See if the queue is already in s_meta_queue, before adding */
171     bool found = false;
172     const hli_queue_handle_t *end = (hli_queue_handle_t*) s_meta_queue_ptr->end;
173     hli_queue_handle_t *item = (hli_queue_handle_t*) s_meta_queue_ptr->begin;
174     for (;item != end; item = (hli_queue_handle_t*) wrap_ptr(s_meta_queue_ptr, (char*) (item + 1))) {
175         if (*item == queue) {
176             found = true;
177             break;
178         }
179     }
180     if (!found) {
181         bool res = hli_queue_put(s_meta_queue_ptr, &queue);
182         if (!res) {
183             esp_rom_printf(DRAM_STR("Fatal error in queue_signal: s_meta_queue full\n"));
184             abort();
185         }
186         XTHAL_SET_INTSET(BIT(HLI_QUEUE_SW_INT_NUM));
187     }
188 }
189 
queue_init(hli_queue_handle_t queue,size_t buf_size,size_t elem_size,QueueHandle_t downstream)190 static void queue_init(hli_queue_handle_t queue, size_t buf_size, size_t elem_size, QueueHandle_t downstream)
191 {
192     queue->elem_size = elem_size;
193     queue->begin = queue->buf;
194     queue->end = queue->buf;
195     queue->bufend = queue->buf + buf_size;
196     queue->downstream = downstream;
197     queue->flags = 0;
198 }
199 
hli_queue_setup(void)200 void hli_queue_setup(void)
201 {
202     if (s_meta_queue_ptr == NULL) {
203         s_meta_queue_ptr = hli_queue_create(HLI_META_QUEUE_SIZE, sizeof(void*), NULL);
204         ESP_ERROR_CHECK(esp_intr_alloc(ETS_INTERNAL_SW1_INTR_SOURCE, ESP_INTR_FLAG_IRAM, queue_isr_handler, NULL, &ret_handle));
205         xt_ints_on(BIT(HLI_QUEUE_SW_INT_NUM));
206     }
207 }
208 
hli_queue_shutdown(void)209 void hli_queue_shutdown(void)
210 {
211     if (s_meta_queue_ptr != NULL) {
212         hli_queue_delete(s_meta_queue_ptr);
213         s_meta_queue_ptr = NULL;
214         esp_intr_free(ret_handle);
215         xt_ints_off(BIT(HLI_QUEUE_SW_INT_NUM));
216     }
217 }
218 
hli_queue_create(size_t nelem,size_t elem_size,QueueHandle_t downstream)219 hli_queue_handle_t hli_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream)
220 {
221     const size_t buf_elem = nelem + 1;
222     if (elem_size > HLI_QUEUE_MAX_ELEM_SIZE) {
223         return NULL;
224     }
225     size_t buf_size = buf_elem * elem_size;
226     hli_queue_handle_t res = (hli_queue_handle_t) heap_caps_malloc(sizeof(struct hli_queue_t) + buf_size,
227         MALLOC_CAP_INTERNAL|MALLOC_CAP_8BIT);
228     if (res == NULL) {
229         return NULL;
230     }
231     queue_init(res, buf_size, elem_size, downstream);
232     return res;
233 }
234 
hli_customer_queue_create(size_t nelem,size_t elem_size,QueueHandle_t downstream)235 hli_queue_handle_t hli_customer_queue_create(size_t nelem, size_t elem_size, QueueHandle_t downstream)
236 {
237     hli_queue_handle_t res = hli_queue_create(nelem, elem_size, (QueueHandle_t) downstream);
238     if (res == NULL) {
239         return NULL;
240     }
241     res->flags |= HLI_QUEUE_FLAG_CUSTOMER;
242     return res;
243 }
244 
hli_semaphore_create(size_t max_count,SemaphoreHandle_t downstream)245 hli_queue_handle_t hli_semaphore_create(size_t max_count, SemaphoreHandle_t downstream)
246 {
247     const size_t elem_size = 1;
248     hli_queue_handle_t res = hli_queue_create(max_count, elem_size, (QueueHandle_t) downstream);
249     if (res == NULL) {
250         return NULL;
251     }
252     res->flags |= HLI_QUEUE_FLAG_SEMAPHORE;
253     return res;
254 }
255 
hli_queue_delete(hli_queue_handle_t queue)256 void hli_queue_delete(hli_queue_handle_t queue)
257 {
258     free(queue);
259 }
260 
hli_queue_get(hli_queue_handle_t queue,void * out)261 bool IRAM_ATTR hli_queue_get(hli_queue_handle_t queue, void* out)
262 {
263     uint32_t int_state = hli_intr_disable();
264     bool res = false;
265     if (!queue_empty(queue)) {
266         memcpy(out, queue->begin, queue->elem_size);
267         queue->begin = wrap_ptr(queue, queue->begin + queue->elem_size);
268         res = true;
269     }
270     hli_intr_restore(int_state);
271     return res;
272 }
273 
hli_queue_put(hli_queue_handle_t queue,const void * data)274 bool IRAM_ATTR hli_queue_put(hli_queue_handle_t queue, const void* data)
275 {
276     uint32_t int_state = hli_intr_disable();
277     bool res = false;
278     bool was_empty = queue_empty(queue);
279     if (!queue_full(queue)) {
280         memcpy(queue->end, data, queue->elem_size);
281         queue->end = wrap_ptr(queue, queue->end + queue->elem_size);
282         if (was_empty && queue != s_meta_queue_ptr) {
283             queue_signal(queue);
284         }
285         res = true;
286     }
287     hli_intr_restore(int_state);
288     return res;
289 }
290 
hli_semaphore_give(hli_queue_handle_t queue)291 bool IRAM_ATTR hli_semaphore_give(hli_queue_handle_t queue)
292 {
293     uint8_t data = 0;
294     return hli_queue_put(queue, &data);
295 }
296 
297 #endif /* CONFIG_BTDM_CTRL_HLI */
298