1 /*
2  * SPDX-FileCopyrightText: 2020-2024 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 // #define LOG_LOCAL_LEVEL ESP_LOG_DEBUG
8 
9 #include <stdlib.h>
10 #include <sys/cdefs.h>
11 #include "sdkconfig.h"
12 #include "freertos/FreeRTOS.h"
13 #include "freertos/task.h"
14 #include "soc/soc_caps.h"
15 #include "soc/periph_defs.h"
16 #include "esp_log.h"
17 #include "esp_check.h"
18 #include "esp_memory_utils.h"
19 #include "esp_private/periph_ctrl.h"
20 #include "gdma_priv.h"
21 #include "hal/cache_hal.h"
22 
23 #if CONFIG_PM_ENABLE && SOC_PAU_SUPPORTED
24 #include "esp_private/gdma_sleep_retention.h"
25 #endif
26 
27 static const char *TAG = "gdma";
28 
29 #define GDMA_INVALID_PERIPH_TRIG  (0x3F)
30 #define SEARCH_REQUEST_RX_CHANNEL (1 << 0)
31 #define SEARCH_REQUEST_TX_CHANNEL (1 << 1)
32 
33 /**
34  * GDMA driver consists of there object class, namely: Group, Pair and Channel.
35  * Channel is allocated when user calls `gdma_new_channel`, its lifecycle is maintained by user.
36  * Pair and Group are all lazy allocated, their life cycles are maintained by this driver.
37  * We use reference count to track their life cycles, i.e. the driver will free their memory only when their reference count reached to 0.
38  *
39  * We don't use an all-in-one spin lock in this driver, instead, we created different spin locks at different level.
40  * For platform, it has a spinlock, which is used to protect the group handle slots and reference count of each group.
41  * For group, it has a spinlock, which is used to protect group level stuffs, e.g. hal object, pair handle slots and reference count of each pair.
42  * For pair, it has a spinlock, which is used to protect pair level stuffs, e.g. channel handle slots, occupy code.
43  */
44 
45 typedef struct gdma_platform_t {
46     portMUX_TYPE spinlock;                 // platform level spinlock
47     gdma_group_t *groups[SOC_GDMA_GROUPS]; // array of GDMA group instances
48     int group_ref_counts[SOC_GDMA_GROUPS]; // reference count used to protect group install/uninstall
49 } gdma_platform_t;
50 
51 static gdma_group_t *gdma_acquire_group_handle(int group_id);
52 static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id);
53 static void gdma_release_group_handle(gdma_group_t *group);
54 static void gdma_release_pair_handle(gdma_pair_t *pair);
55 static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel);
56 static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel);
57 static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan);
58 static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan);
59 
60 // gdma driver platform
61 static gdma_platform_t s_platform = {
62     .spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED,
63     .groups = {} // groups will be lazy installed
64 };
65 
gdma_new_channel(const gdma_channel_alloc_config_t * config,gdma_channel_handle_t * ret_chan)66 esp_err_t gdma_new_channel(const gdma_channel_alloc_config_t *config, gdma_channel_handle_t *ret_chan)
67 {
68     esp_err_t ret = ESP_OK;
69     gdma_tx_channel_t *alloc_tx_channel = NULL;
70     gdma_rx_channel_t *alloc_rx_channel = NULL;
71     int search_code = 0;
72     gdma_pair_t *pair = NULL;
73     gdma_group_t *group = NULL;
74     ESP_GOTO_ON_FALSE(config && ret_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
75 
76     if (config->flags.reserve_sibling) {
77         search_code = SEARCH_REQUEST_RX_CHANNEL | SEARCH_REQUEST_TX_CHANNEL; // search for a pair of channels
78     }
79     if (config->direction == GDMA_CHANNEL_DIRECTION_TX) {
80         search_code |= SEARCH_REQUEST_TX_CHANNEL; // search TX only
81         alloc_tx_channel = heap_caps_calloc(1, sizeof(gdma_tx_channel_t), GDMA_MEM_ALLOC_CAPS);
82         ESP_GOTO_ON_FALSE(alloc_tx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for gdma tx channel");
83     } else if (config->direction == GDMA_CHANNEL_DIRECTION_RX) {
84         search_code |= SEARCH_REQUEST_RX_CHANNEL; // search RX only
85         alloc_rx_channel = heap_caps_calloc(1, sizeof(gdma_rx_channel_t), GDMA_MEM_ALLOC_CAPS);
86         ESP_GOTO_ON_FALSE(alloc_rx_channel, ESP_ERR_NO_MEM, err, TAG, "no mem for gdma rx channel");
87     }
88 
89     if (config->sibling_chan) {
90         pair = config->sibling_chan->pair;
91         ESP_GOTO_ON_FALSE(pair, ESP_ERR_INVALID_ARG, err, TAG, "invalid sibling channel");
92         ESP_GOTO_ON_FALSE(config->sibling_chan->direction != config->direction, ESP_ERR_INVALID_ARG, err, TAG, "sibling channel should have a different direction");
93         group = pair->group;
94         portENTER_CRITICAL(&group->spinlock);
95         group->pair_ref_counts[pair->pair_id]++; // channel obtains a reference to pair
96         portEXIT_CRITICAL(&group->spinlock);
97         goto search_done; // skip the search path below if user has specify a sibling channel
98     }
99 
100     for (int i = 0; i < SOC_GDMA_GROUPS && search_code; i++) { // loop to search group
101         group = gdma_acquire_group_handle(i);
102         ESP_GOTO_ON_FALSE(group, ESP_ERR_NO_MEM, err, TAG, "no mem for group(%d)", i);
103         for (int j = 0; j < SOC_GDMA_PAIRS_PER_GROUP && search_code; j++) { // loop to search pair
104             pair = gdma_acquire_pair_handle(group, j);
105             ESP_GOTO_ON_FALSE(pair, ESP_ERR_NO_MEM, err, TAG, "no mem for pair(%d,%d)", i, j);
106             portENTER_CRITICAL(&pair->spinlock);
107             if (!(search_code & pair->occupy_code)) { // pair has suitable position for acquired channel(s)
108                 pair->occupy_code |= search_code;
109                 search_code = 0; // exit search loop
110             }
111             portEXIT_CRITICAL(&pair->spinlock);
112             if (search_code) {
113                 gdma_release_pair_handle(pair);
114                 pair = NULL;
115             }
116         } // loop used to search pair
117         if (search_code) {
118             gdma_release_group_handle(group);
119             group = NULL;
120         }
121     } // loop used to search group
122     ESP_GOTO_ON_FALSE(search_code == 0, ESP_ERR_NOT_FOUND, err, TAG, "no free gdma channel, search code=%d", search_code);
123     assert(pair && group); // pair and group handle shouldn't be NULL
124 search_done:
125     // register TX channel
126     if (alloc_tx_channel) {
127         pair->tx_chan = alloc_tx_channel;
128         alloc_tx_channel->base.pair = pair;
129         alloc_tx_channel->base.direction = GDMA_CHANNEL_DIRECTION_TX;
130         alloc_tx_channel->base.periph_id = GDMA_INVALID_PERIPH_TRIG;
131         alloc_tx_channel->base.del = gdma_del_tx_channel; // set channel deletion function
132         *ret_chan = &alloc_tx_channel->base; // return the installed channel
133     }
134 
135     // register RX channel
136     if (alloc_rx_channel) {
137         pair->rx_chan = alloc_rx_channel;
138         alloc_rx_channel->base.pair = pair;
139         alloc_rx_channel->base.direction = GDMA_CHANNEL_DIRECTION_RX;
140         alloc_rx_channel->base.periph_id = GDMA_INVALID_PERIPH_TRIG;
141         alloc_rx_channel->base.del = gdma_del_rx_channel; // set channel deletion function
142         *ret_chan = &alloc_rx_channel->base; // return the installed channel
143     }
144 
145     (*ret_chan)->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
146     ESP_LOGD(TAG, "new %s channel (%d,%d) at %p", (config->direction == GDMA_CHANNEL_DIRECTION_TX) ? "tx" : "rx",
147              group->group_id, pair->pair_id, *ret_chan);
148     return ESP_OK;
149 
150 err:
151     if (alloc_tx_channel) {
152         free(alloc_tx_channel);
153     }
154     if (alloc_rx_channel) {
155         free(alloc_rx_channel);
156     }
157     if (pair) {
158         gdma_release_pair_handle(pair);
159     }
160     if (group) {
161         gdma_release_group_handle(group);
162     }
163     return ret;
164 }
165 
gdma_del_channel(gdma_channel_handle_t dma_chan)166 esp_err_t gdma_del_channel(gdma_channel_handle_t dma_chan)
167 {
168     esp_err_t ret = ESP_OK;
169     ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
170 
171     ret = dma_chan->del(dma_chan); // call `gdma_del_tx_channel` or `gdma_del_rx_channel`
172 
173 err:
174     return ret;
175 }
176 
gdma_get_channel_id(gdma_channel_handle_t dma_chan,int * channel_id)177 esp_err_t gdma_get_channel_id(gdma_channel_handle_t dma_chan, int *channel_id)
178 {
179     esp_err_t ret = ESP_OK;
180     gdma_pair_t *pair = NULL;
181     ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
182     pair = dma_chan->pair;
183     *channel_id = pair->pair_id;
184 err:
185     return ret;
186 }
187 
gdma_connect(gdma_channel_handle_t dma_chan,gdma_trigger_t trig_periph)188 esp_err_t gdma_connect(gdma_channel_handle_t dma_chan, gdma_trigger_t trig_periph)
189 {
190     gdma_pair_t *pair = NULL;
191     gdma_group_t *group = NULL;
192     ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
193     ESP_RETURN_ON_FALSE(dma_chan->periph_id == GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "channel is using by peripheral: %d", dma_chan->periph_id);
194     pair = dma_chan->pair;
195     group = pair->group;
196     bool periph_conflict = false;
197 
198     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
199         if (trig_periph.instance_id >= 0) {
200             portENTER_CRITICAL(&group->spinlock);
201             if (group->tx_periph_in_use_mask & (1 << trig_periph.instance_id)) {
202                 periph_conflict = true;
203             } else {
204                 group->tx_periph_in_use_mask |= (1 << trig_periph.instance_id);
205             }
206             portEXIT_CRITICAL(&group->spinlock);
207         }
208         if (!periph_conflict) {
209             gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
210             gdma_ll_tx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.periph, trig_periph.instance_id);
211         }
212     } else {
213         if (trig_periph.instance_id >= 0) {
214             portENTER_CRITICAL(&group->spinlock);
215             if (group->rx_periph_in_use_mask & (1 << trig_periph.instance_id)) {
216                 periph_conflict = true;
217             } else {
218                 group->rx_periph_in_use_mask |= (1 << trig_periph.instance_id);
219             }
220             portEXIT_CRITICAL(&group->spinlock);
221         }
222         if (!periph_conflict) {
223             gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id); // reset channel
224             gdma_ll_rx_connect_to_periph(group->hal.dev, pair->pair_id, trig_periph.periph, trig_periph.instance_id);
225         }
226     }
227 
228     ESP_RETURN_ON_FALSE(!periph_conflict, ESP_ERR_INVALID_STATE, TAG, "peripheral %d is already used by another channel", trig_periph.instance_id);
229     dma_chan->periph_id = trig_periph.instance_id;
230     return ESP_OK;
231 }
232 
gdma_disconnect(gdma_channel_handle_t dma_chan)233 esp_err_t gdma_disconnect(gdma_channel_handle_t dma_chan)
234 {
235     gdma_pair_t *pair = NULL;
236     gdma_group_t *group = NULL;
237     ESP_RETURN_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
238     ESP_RETURN_ON_FALSE(dma_chan->periph_id != GDMA_INVALID_PERIPH_TRIG, ESP_ERR_INVALID_STATE, TAG, "no peripheral is connected to the channel");
239 
240     pair = dma_chan->pair;
241     group = pair->group;
242     int save_periph_id = dma_chan->periph_id;
243 
244     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
245         if (save_periph_id >= 0) {
246             portENTER_CRITICAL(&group->spinlock);
247             group->tx_periph_in_use_mask &= ~(1 << save_periph_id);
248             portEXIT_CRITICAL(&group->spinlock);
249         }
250         gdma_ll_tx_disconnect_from_periph(group->hal.dev, pair->pair_id);
251     } else {
252         if (save_periph_id >= 0) {
253             portENTER_CRITICAL(&group->spinlock);
254             group->rx_periph_in_use_mask &= ~(1 << save_periph_id);
255             portEXIT_CRITICAL(&group->spinlock);
256         }
257         gdma_ll_rx_disconnect_from_periph(group->hal.dev, pair->pair_id);
258     }
259 
260     dma_chan->periph_id = GDMA_INVALID_PERIPH_TRIG;
261     return ESP_OK;
262 }
263 
gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan,uint32_t * mask)264 esp_err_t gdma_get_free_m2m_trig_id_mask(gdma_channel_handle_t dma_chan, uint32_t *mask)
265 {
266     gdma_pair_t *pair = NULL;
267     gdma_group_t *group = NULL;
268     ESP_RETURN_ON_FALSE(dma_chan && mask, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
269 
270     uint32_t free_mask = GDMA_LL_M2M_FREE_PERIPH_ID_MASK;
271     pair = dma_chan->pair;
272     group = pair->group;
273 
274     portENTER_CRITICAL(&group->spinlock);
275     free_mask &= ~(group->tx_periph_in_use_mask);
276     free_mask &= ~(group->rx_periph_in_use_mask);
277     portEXIT_CRITICAL(&group->spinlock);
278 
279     *mask = free_mask;
280     return ESP_OK;
281 }
282 
gdma_set_transfer_ability(gdma_channel_handle_t dma_chan,const gdma_transfer_ability_t * ability)283 esp_err_t gdma_set_transfer_ability(gdma_channel_handle_t dma_chan, const gdma_transfer_ability_t *ability)
284 {
285     esp_err_t ret = ESP_OK;
286     gdma_pair_t *pair = NULL;
287     gdma_group_t *group = NULL;
288     bool en_burst = true;
289     ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
290     pair = dma_chan->pair;
291     group = pair->group;
292     size_t sram_alignment = ability->sram_trans_align;
293     size_t psram_alignment = ability->psram_trans_align;
294     // alignment should be 2^n
295     ESP_GOTO_ON_FALSE((sram_alignment & (sram_alignment - 1)) == 0, ESP_ERR_INVALID_ARG, err, TAG, "invalid sram alignment: %zu", sram_alignment);
296 
297 #if SOC_GDMA_SUPPORT_PSRAM
298     uint32_t data_cache_line_size = cache_hal_get_cache_line_size(CACHE_TYPE_DATA);
299     int block_size_index = 0;
300     switch (psram_alignment) {
301     case 64: // 64 Bytes alignment
302         block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_64B;
303         break;
304     case 32: // 32 Bytes alignment
305         block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_32B;
306         break;
307     case 16: // 16 Bytes alignment
308         block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_16B;
309         break;
310     case 0: // no alignment is requirement
311         block_size_index = GDMA_LL_EXT_MEM_BK_SIZE_16B;
312         psram_alignment = data_cache_line_size; // fall back to use the same size of the psram data cache line size
313         break;
314     default:
315         ESP_GOTO_ON_FALSE(false, ESP_ERR_INVALID_ARG, err, TAG, "invalid psram alignment: %zu", psram_alignment);
316         break;
317     }
318     ESP_GOTO_ON_FALSE(((psram_alignment % data_cache_line_size) == 0), ESP_ERR_INVALID_ARG, err, TAG, "psram alignment (%d)B should be multiple of the data cache line size (%d)B", psram_alignment, data_cache_line_size);
319 #endif // #if SOC_GDMA_SUPPORT_PSRAM
320 
321     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
322         // TX channel can always enable burst mode, no matter data alignment
323         gdma_ll_tx_enable_data_burst(group->hal.dev, pair->pair_id, true);
324         gdma_ll_tx_enable_descriptor_burst(group->hal.dev, pair->pair_id, true);
325 #if SOC_GDMA_SUPPORT_PSRAM
326         gdma_ll_tx_set_block_size_psram(group->hal.dev, pair->pair_id, block_size_index);
327 #endif // #if SOC_GDMA_SUPPORT_PSRAM
328     } else {
329         // RX channel burst mode depends on specific data alignment
330         en_burst = sram_alignment >= 4;
331         gdma_ll_rx_enable_data_burst(group->hal.dev, pair->pair_id, en_burst);
332         gdma_ll_rx_enable_descriptor_burst(group->hal.dev, pair->pair_id, en_burst);
333 #if SOC_GDMA_SUPPORT_PSRAM
334         gdma_ll_rx_set_block_size_psram(group->hal.dev, pair->pair_id, block_size_index);
335 #endif // #if SOC_GDMA_SUPPORT_PSRAM
336     }
337 
338     dma_chan->sram_alignment = sram_alignment;
339     dma_chan->psram_alignment = psram_alignment;
340     ESP_LOGD(TAG, "%s channel (%d,%d), (%u:%u) bytes aligned, burst %s", dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX ? "tx" : "rx",
341              group->group_id, pair->pair_id, sram_alignment, psram_alignment, en_burst ? "enabled" : "disabled");
342 err:
343     return ret;
344 }
345 
gdma_apply_strategy(gdma_channel_handle_t dma_chan,const gdma_strategy_config_t * config)346 esp_err_t gdma_apply_strategy(gdma_channel_handle_t dma_chan, const gdma_strategy_config_t *config)
347 {
348     esp_err_t ret = ESP_OK;
349     gdma_pair_t *pair = NULL;
350     gdma_group_t *group = NULL;
351     ESP_GOTO_ON_FALSE(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
352     pair = dma_chan->pair;
353     group = pair->group;
354 
355     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
356         gdma_ll_tx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
357         gdma_ll_tx_enable_auto_write_back(group->hal.dev, pair->pair_id, config->auto_update_desc);
358     } else {
359         gdma_ll_rx_enable_owner_check(group->hal.dev, pair->pair_id, config->owner_check);
360     }
361 
362 err:
363     return ret;
364 }
365 
gdma_set_priority(gdma_channel_handle_t dma_chan,uint32_t priority)366 esp_err_t gdma_set_priority(gdma_channel_handle_t dma_chan, uint32_t priority)
367 {
368     gdma_pair_t *pair = NULL;
369     gdma_group_t *group = NULL;
370     ESP_RETURN_ON_FALSE(dma_chan && priority <= GDMA_LL_CHANNEL_MAX_PRIORITY, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
371     pair = dma_chan->pair;
372     group = pair->group;
373 
374     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX) {
375         gdma_ll_tx_set_priority(group->hal.dev, pair->pair_id, priority);
376     } else {
377         gdma_ll_rx_set_priority(group->hal.dev, pair->pair_id, priority);
378     }
379 
380     return ESP_OK;
381 
382 }
383 
gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan,gdma_tx_event_callbacks_t * cbs,void * user_data)384 esp_err_t gdma_register_tx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_tx_event_callbacks_t *cbs, void *user_data)
385 {
386     esp_err_t ret = ESP_OK;
387     gdma_pair_t *pair = NULL;
388     gdma_group_t *group = NULL;
389     ESP_GOTO_ON_FALSE(dma_chan && dma_chan->direction == GDMA_CHANNEL_DIRECTION_TX, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
390     pair = dma_chan->pair;
391     group = pair->group;
392     gdma_tx_channel_t *tx_chan = __containerof(dma_chan, gdma_tx_channel_t, base);
393 
394 #if CONFIG_GDMA_ISR_IRAM_SAFE
395     if (cbs->on_trans_eof) {
396         ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_trans_eof), ESP_ERR_INVALID_ARG, err, TAG, "on_trans_eof not in IRAM");
397     }
398     if (user_data) {
399         ESP_GOTO_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
400     }
401 #endif // CONFIG_GDMA_ISR_IRAM_SAFE
402 
403     // lazy install interrupt service
404     ESP_GOTO_ON_ERROR(gdma_install_tx_interrupt(tx_chan), err, TAG, "install interrupt service failed");
405 
406     // enable/disable GDMA interrupt events for TX channel
407     portENTER_CRITICAL(&pair->spinlock);
408     gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_TX_EOF, cbs->on_trans_eof != NULL);
409     portEXIT_CRITICAL(&pair->spinlock);
410 
411     tx_chan->on_trans_eof = cbs->on_trans_eof;
412     tx_chan->user_data = user_data;
413 
414     ESP_GOTO_ON_ERROR(esp_intr_enable(dma_chan->intr), err, TAG, "enable interrupt failed");
415 
416 err:
417     return ret;
418 }
419 
gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan,gdma_rx_event_callbacks_t * cbs,void * user_data)420 esp_err_t gdma_register_rx_event_callbacks(gdma_channel_handle_t dma_chan, gdma_rx_event_callbacks_t *cbs, void *user_data)
421 {
422     esp_err_t ret = ESP_OK;
423     gdma_pair_t *pair = NULL;
424     gdma_group_t *group = NULL;
425     ESP_GOTO_ON_FALSE(dma_chan && dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
426     pair = dma_chan->pair;
427     group = pair->group;
428     gdma_rx_channel_t *rx_chan = __containerof(dma_chan, gdma_rx_channel_t, base);
429 
430 #if CONFIG_GDMA_ISR_IRAM_SAFE
431     if (cbs->on_recv_eof) {
432         ESP_GOTO_ON_FALSE(esp_ptr_in_iram(cbs->on_recv_eof), ESP_ERR_INVALID_ARG, err, TAG, "on_recv_eof not in IRAM");
433     }
434     if (user_data) {
435         ESP_GOTO_ON_FALSE(esp_ptr_internal(user_data), ESP_ERR_INVALID_ARG, err, TAG, "user context not in internal RAM");
436     }
437 #endif // CONFIG_GDMA_ISR_IRAM_SAFE
438 
439     // lazy install interrupt service
440     ESP_GOTO_ON_ERROR(gdma_install_rx_interrupt(rx_chan), err, TAG, "install interrupt service failed");
441 
442     // enable/disable GDMA interrupt events for RX channel
443     portENTER_CRITICAL(&pair->spinlock);
444     gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, GDMA_LL_EVENT_RX_SUC_EOF, cbs->on_recv_eof != NULL);
445     portEXIT_CRITICAL(&pair->spinlock);
446 
447     rx_chan->on_recv_eof = cbs->on_recv_eof;
448     rx_chan->user_data = user_data;
449 
450     ESP_GOTO_ON_ERROR(esp_intr_enable(dma_chan->intr), err, TAG, "enable interrupt failed");
451 
452 err:
453     return ret;
454 }
455 
gdma_start(gdma_channel_handle_t dma_chan,intptr_t desc_base_addr)456 esp_err_t gdma_start(gdma_channel_handle_t dma_chan, intptr_t desc_base_addr)
457 {
458     gdma_pair_t *pair = NULL;
459     gdma_group_t *group = NULL;
460     ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
461     ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
462     pair = dma_chan->pair;
463     group = pair->group;
464 
465     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
466     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
467         gdma_ll_rx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
468         gdma_ll_rx_start(group->hal.dev, pair->pair_id);
469     } else {
470         gdma_ll_tx_set_desc_addr(group->hal.dev, pair->pair_id, desc_base_addr);
471         gdma_ll_tx_start(group->hal.dev, pair->pair_id);
472     }
473     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
474 
475     return ESP_OK;
476 }
477 
gdma_stop(gdma_channel_handle_t dma_chan)478 esp_err_t gdma_stop(gdma_channel_handle_t dma_chan)
479 {
480     gdma_pair_t *pair = NULL;
481     gdma_group_t *group = NULL;
482     ESP_RETURN_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, TAG, "invalid argument");
483     ESP_RETURN_ON_FALSE_ISR(dma_chan->flags.start_stop_by_etm == false, ESP_ERR_INVALID_STATE, TAG, "channel is controlled by ETM");
484     pair = dma_chan->pair;
485     group = pair->group;
486 
487     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
488     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
489         gdma_ll_rx_stop(group->hal.dev, pair->pair_id);
490     } else {
491         gdma_ll_tx_stop(group->hal.dev, pair->pair_id);
492     }
493     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
494 
495     return ESP_OK;
496 }
497 
gdma_append(gdma_channel_handle_t dma_chan)498 esp_err_t gdma_append(gdma_channel_handle_t dma_chan)
499 {
500     esp_err_t ret = ESP_OK;
501     gdma_pair_t *pair = NULL;
502     gdma_group_t *group = NULL;
503     ESP_GOTO_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
504     pair = dma_chan->pair;
505     group = pair->group;
506 
507     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
508     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
509         gdma_ll_rx_restart(group->hal.dev, pair->pair_id);
510     } else {
511         gdma_ll_tx_restart(group->hal.dev, pair->pair_id);
512     }
513     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
514 
515 err:
516     return ret;
517 }
518 
gdma_reset(gdma_channel_handle_t dma_chan)519 esp_err_t gdma_reset(gdma_channel_handle_t dma_chan)
520 {
521     esp_err_t ret = ESP_OK;
522     gdma_pair_t *pair = NULL;
523     gdma_group_t *group = NULL;
524     ESP_GOTO_ON_FALSE_ISR(dma_chan, ESP_ERR_INVALID_ARG, err, TAG, "invalid argument");
525     pair = dma_chan->pair;
526     group = pair->group;
527 
528     portENTER_CRITICAL_SAFE(&dma_chan->spinlock);
529     if (dma_chan->direction == GDMA_CHANNEL_DIRECTION_RX) {
530         gdma_ll_rx_reset_channel(group->hal.dev, pair->pair_id);
531     } else {
532         gdma_ll_tx_reset_channel(group->hal.dev, pair->pair_id);
533     }
534     portEXIT_CRITICAL_SAFE(&dma_chan->spinlock);
535 
536 err:
537     return ret;
538 }
539 
gdma_release_group_handle(gdma_group_t * group)540 static void gdma_release_group_handle(gdma_group_t *group)
541 {
542     int group_id = group->group_id;
543     bool do_deinitialize = false;
544 
545     portENTER_CRITICAL(&s_platform.spinlock);
546     s_platform.group_ref_counts[group_id]--;
547     if (s_platform.group_ref_counts[group_id] == 0) {
548         assert(s_platform.groups[group_id]);
549         do_deinitialize = true;
550         s_platform.groups[group_id] = NULL; // deregister from platfrom
551         gdma_ll_enable_clock(group->hal.dev, false);
552         periph_module_disable(gdma_periph_signals.groups[group_id].module);
553     }
554     portEXIT_CRITICAL(&s_platform.spinlock);
555 
556     if (do_deinitialize) {
557         free(group);
558         ESP_LOGD(TAG, "del group %d", group_id);
559     }
560 }
561 
gdma_acquire_group_handle(int group_id)562 static gdma_group_t *gdma_acquire_group_handle(int group_id)
563 {
564     bool new_group = false;
565     gdma_group_t *group = NULL;
566     gdma_group_t *pre_alloc_group = heap_caps_calloc(1, sizeof(gdma_group_t), GDMA_MEM_ALLOC_CAPS);
567     if (!pre_alloc_group) {
568         goto out;
569     }
570     portENTER_CRITICAL(&s_platform.spinlock);
571     if (!s_platform.groups[group_id]) {
572         new_group = true;
573         group = pre_alloc_group;
574         s_platform.groups[group_id] = group; // register to platform
575         group->group_id = group_id;
576         group->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
577         periph_module_enable(gdma_periph_signals.groups[group_id].module); // enable APB to access GDMA registers
578         gdma_hal_init(&group->hal, group_id);       // initialize HAL context
579         gdma_ll_enable_clock(group->hal.dev, true); // enable gdma clock
580     } else {
581         group = s_platform.groups[group_id];
582     }
583     // someone acquired the group handle means we have a new object that refer to this group
584     s_platform.group_ref_counts[group_id]++;
585     portEXIT_CRITICAL(&s_platform.spinlock);
586 
587     if (new_group) {
588         ESP_LOGD(TAG, "new group (%d) at %p", group->group_id, group);
589     } else {
590         free(pre_alloc_group);
591     }
592 out:
593     return group;
594 }
595 
gdma_release_pair_handle(gdma_pair_t * pair)596 static void gdma_release_pair_handle(gdma_pair_t *pair)
597 {
598     gdma_group_t *group = pair->group;
599     int pair_id = pair->pair_id;
600     bool do_deinitialize = false;
601 
602     portENTER_CRITICAL(&group->spinlock);
603     group->pair_ref_counts[pair_id]--;
604     if (group->pair_ref_counts[pair_id] == 0) {
605         assert(group->pairs[pair_id]);
606         do_deinitialize = true;
607         group->pairs[pair_id] = NULL; // deregister from pair
608     }
609     portEXIT_CRITICAL(&group->spinlock);
610 
611     if (do_deinitialize) {
612         free(pair);
613 #if CONFIG_PM_ENABLE && SOC_PAU_SUPPORTED
614         gdma_sleep_retention_deinit(group->group_id, pair_id);
615 #endif
616         ESP_LOGD(TAG, "del pair (%d,%d)", group->group_id, pair_id);
617         gdma_release_group_handle(group);
618     }
619 }
620 
gdma_acquire_pair_handle(gdma_group_t * group,int pair_id)621 static gdma_pair_t *gdma_acquire_pair_handle(gdma_group_t *group, int pair_id)
622 {
623     bool new_pair = false;
624     gdma_pair_t *pair = NULL;
625     gdma_pair_t *pre_alloc_pair = heap_caps_calloc(1, sizeof(gdma_pair_t), GDMA_MEM_ALLOC_CAPS);
626     if (!pre_alloc_pair) {
627         goto out;
628     }
629     portENTER_CRITICAL(&group->spinlock);
630     if (!group->pairs[pair_id]) {
631         new_pair = true;
632         pair = pre_alloc_pair;
633         group->pairs[pair_id] = pair; // register to group
634         pair->group = group;
635         pair->pair_id = pair_id;
636         pair->spinlock = (portMUX_TYPE)portMUX_INITIALIZER_UNLOCKED;
637     } else {
638         pair = group->pairs[pair_id];
639     }
640     // someone acquired the pair handle means we have a new object that refer to this pair
641     group->pair_ref_counts[pair_id]++;
642     portEXIT_CRITICAL(&group->spinlock);
643 
644     if (new_pair) {
645         portENTER_CRITICAL(&s_platform.spinlock);
646         s_platform.group_ref_counts[group->group_id]++; // pair obtains a reference to group
647         portEXIT_CRITICAL(&s_platform.spinlock);
648 
649 #if CONFIG_PM_ENABLE && SOC_PAU_SUPPORTED
650         gdma_sleep_retention_init(group->group_id, pair->pair_id);
651 #endif
652         ESP_LOGD(TAG, "new pair (%d,%d) at %p", group->group_id, pair->pair_id, pair);
653     } else {
654         free(pre_alloc_pair);
655     }
656 out:
657     return pair;
658 }
659 
gdma_del_tx_channel(gdma_channel_t * dma_channel)660 static esp_err_t gdma_del_tx_channel(gdma_channel_t *dma_channel)
661 {
662     gdma_pair_t *pair = dma_channel->pair;
663     gdma_group_t *group = pair->group;
664     int pair_id = pair->pair_id;
665     int group_id = group->group_id;
666     gdma_tx_channel_t *tx_chan = __containerof(dma_channel, gdma_tx_channel_t, base);
667     portENTER_CRITICAL(&pair->spinlock);
668     pair->tx_chan = NULL;
669     pair->occupy_code &= ~SEARCH_REQUEST_TX_CHANNEL;
670     portEXIT_CRITICAL(&pair->spinlock);
671 
672     if (dma_channel->intr) {
673         esp_intr_free(dma_channel->intr);
674         portENTER_CRITICAL(&pair->spinlock);
675         gdma_ll_tx_enable_interrupt(group->hal.dev, pair_id, UINT32_MAX, false); // disable all interupt events
676         gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair_id, UINT32_MAX);  // clear all pending events
677         portEXIT_CRITICAL(&pair->spinlock);
678         ESP_LOGD(TAG, "uninstall interrupt service for tx channel (%d,%d)", group_id, pair_id);
679     }
680 
681     gdma_ll_tx_set_priority(group->hal.dev, pair_id, 0); // reset the priority to 0 (lowest)
682 
683     free(tx_chan);
684     ESP_LOGD(TAG, "del tx channel (%d,%d)", group_id, pair_id);
685     // channel has a reference on pair, release it now
686     gdma_release_pair_handle(pair);
687     return ESP_OK;
688 }
689 
gdma_del_rx_channel(gdma_channel_t * dma_channel)690 static esp_err_t gdma_del_rx_channel(gdma_channel_t *dma_channel)
691 {
692     gdma_pair_t *pair = dma_channel->pair;
693     gdma_group_t *group = pair->group;
694     int pair_id = pair->pair_id;
695     int group_id = group->group_id;
696     gdma_rx_channel_t *rx_chan = __containerof(dma_channel, gdma_rx_channel_t, base);
697     portENTER_CRITICAL(&pair->spinlock);
698     pair->rx_chan = NULL;
699     pair->occupy_code &= ~SEARCH_REQUEST_RX_CHANNEL;
700     portEXIT_CRITICAL(&pair->spinlock);
701 
702     if (dma_channel->intr) {
703         esp_intr_free(dma_channel->intr);
704         portENTER_CRITICAL(&pair->spinlock);
705         gdma_ll_rx_enable_interrupt(group->hal.dev, pair_id, UINT32_MAX, false); // disable all interupt events
706         gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair_id, UINT32_MAX);  // clear all pending events
707         portEXIT_CRITICAL(&pair->spinlock);
708         ESP_LOGD(TAG, "uninstall interrupt service for rx channel (%d,%d)", group_id, pair_id);
709     }
710 
711     gdma_ll_rx_set_priority(group->hal.dev, pair_id, 0); // reset the priority to 0 (lowest)
712 
713     free(rx_chan);
714     ESP_LOGD(TAG, "del rx channel (%d,%d)", group_id, pair_id);
715     gdma_release_pair_handle(pair);
716     return ESP_OK;
717 }
718 
gdma_default_rx_isr(void * args)719 static void IRAM_ATTR gdma_default_rx_isr(void *args)
720 {
721     gdma_rx_channel_t *rx_chan = (gdma_rx_channel_t *)args;
722     gdma_pair_t *pair = rx_chan->base.pair;
723     gdma_group_t *group = pair->group;
724     bool need_yield = false;
725     // clear pending interrupt event
726     uint32_t intr_status = gdma_ll_rx_get_interrupt_status(group->hal.dev, pair->pair_id);
727     gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
728 
729     if (intr_status & GDMA_LL_EVENT_RX_SUC_EOF) {
730         if (rx_chan->on_recv_eof) {
731             uint32_t eof_addr = gdma_ll_rx_get_success_eof_desc_addr(group->hal.dev, pair->pair_id);
732             gdma_event_data_t edata = {
733                 .rx_eof_desc_addr = eof_addr
734             };
735             if (rx_chan->on_recv_eof(&rx_chan->base, &edata, rx_chan->user_data)) {
736                 need_yield = true;
737             }
738         }
739     }
740 
741     if (need_yield) {
742         portYIELD_FROM_ISR();
743     }
744 }
745 
gdma_default_tx_isr(void * args)746 static void IRAM_ATTR gdma_default_tx_isr(void *args)
747 {
748     gdma_tx_channel_t *tx_chan = (gdma_tx_channel_t *)args;
749     gdma_pair_t *pair = tx_chan->base.pair;
750     gdma_group_t *group = pair->group;
751     bool need_yield = false;
752     // clear pending interrupt event
753     uint32_t intr_status = gdma_ll_tx_get_interrupt_status(group->hal.dev, pair->pair_id);
754     gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair->pair_id, intr_status);
755 
756     if (intr_status & GDMA_LL_EVENT_TX_EOF) {
757         if (tx_chan && tx_chan->on_trans_eof) {
758             uint32_t eof_addr = gdma_ll_tx_get_eof_desc_addr(group->hal.dev, pair->pair_id);
759             gdma_event_data_t edata = {
760                 .tx_eof_desc_addr = eof_addr
761             };
762             if (tx_chan->on_trans_eof(&tx_chan->base, &edata, tx_chan->user_data)) {
763                 need_yield = true;
764             }
765         }
766     }
767 
768     if (need_yield) {
769         portYIELD_FROM_ISR();
770     }
771 }
772 
gdma_install_rx_interrupt(gdma_rx_channel_t * rx_chan)773 static esp_err_t gdma_install_rx_interrupt(gdma_rx_channel_t *rx_chan)
774 {
775     esp_err_t ret = ESP_OK;
776     gdma_pair_t *pair = rx_chan->base.pair;
777     gdma_group_t *group = pair->group;
778     // pre-alloc a interrupt handle, with handler disabled
779     int isr_flags = GDMA_INTR_ALLOC_FLAGS;
780 #if SOC_GDMA_TX_RX_SHARE_INTERRUPT
781     isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
782 #endif
783     intr_handle_t intr = NULL;
784     ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].rx_irq_id, isr_flags,
785                                     (uint32_t)gdma_ll_rx_get_interrupt_status_reg(group->hal.dev, pair->pair_id), GDMA_LL_RX_EVENT_MASK,
786                                     gdma_default_rx_isr, rx_chan, &intr);
787     ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
788     rx_chan->base.intr = intr;
789 
790     portENTER_CRITICAL(&pair->spinlock);
791     gdma_ll_rx_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
792     gdma_ll_rx_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX);  // clear all pending events
793     portEXIT_CRITICAL(&pair->spinlock);
794     ESP_LOGD(TAG, "install interrupt service for rx channel (%d,%d)", group->group_id, pair->pair_id);
795 
796 err:
797     return ret;
798 }
799 
gdma_install_tx_interrupt(gdma_tx_channel_t * tx_chan)800 static esp_err_t gdma_install_tx_interrupt(gdma_tx_channel_t *tx_chan)
801 {
802     esp_err_t ret = ESP_OK;
803     gdma_pair_t *pair = tx_chan->base.pair;
804     gdma_group_t *group = pair->group;
805     // pre-alloc a interrupt handle, with handler disabled
806     int isr_flags = GDMA_INTR_ALLOC_FLAGS;
807 #if SOC_GDMA_TX_RX_SHARE_INTERRUPT
808     isr_flags |= ESP_INTR_FLAG_SHARED | ESP_INTR_FLAG_LOWMED;
809 #endif
810     intr_handle_t intr = NULL;
811     ret = esp_intr_alloc_intrstatus(gdma_periph_signals.groups[group->group_id].pairs[pair->pair_id].tx_irq_id, isr_flags,
812                                     (uint32_t)gdma_ll_tx_get_interrupt_status_reg(group->hal.dev, pair->pair_id), GDMA_LL_TX_EVENT_MASK,
813                                     gdma_default_tx_isr, tx_chan, &intr);
814     ESP_GOTO_ON_ERROR(ret, err, TAG, "alloc interrupt failed");
815     tx_chan->base.intr = intr;
816 
817     portENTER_CRITICAL(&pair->spinlock);
818     gdma_ll_tx_enable_interrupt(group->hal.dev, pair->pair_id, UINT32_MAX, false); // disable all interupt events
819     gdma_ll_tx_clear_interrupt_status(group->hal.dev, pair->pair_id, UINT32_MAX);  // clear all pending events
820     portEXIT_CRITICAL(&pair->spinlock);
821     ESP_LOGD(TAG, "install interrupt service for tx channel (%d,%d)", group->group_id, pair->pair_id);
822 
823 err:
824     return ret;
825 }
826