1 /*
2 * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include "freertos/FreeRTOS.h"
8 #include "soc/periph_defs.h"
9 #include "soc/soc_memory_layout.h"
10 #include "soc/soc_caps.h"
11 #include "hal/gdma_ll.h"
12 #include "hal/gdma_hal.h"
13 #include "esp_private/periph_ctrl.h"
14 #include "esp_log.h"
15 #include "esp_attr.h"
16 #include "esp_err.h"
17 #include "esp_async_memcpy_impl.h"
18 #if SOC_APM_SUPPORTED
19 #include "hal/apm_ll.h"
20 #endif
21
async_memcpy_impl_rx_eof_callback(gdma_channel_handle_t dma_chan,gdma_event_data_t * event_data,void * user_data)22 IRAM_ATTR static bool async_memcpy_impl_rx_eof_callback(gdma_channel_handle_t dma_chan, gdma_event_data_t *event_data, void *user_data)
23 {
24 async_memcpy_impl_t *mcp_impl = (async_memcpy_impl_t *)user_data;
25 mcp_impl->rx_eof_addr = event_data->rx_eof_desc_addr;
26
27 async_memcpy_isr_on_rx_done_event(mcp_impl);
28 return mcp_impl->isr_need_yield;
29 }
30
async_memcpy_impl_init(async_memcpy_impl_t * impl)31 esp_err_t async_memcpy_impl_init(async_memcpy_impl_t *impl)
32 {
33 esp_err_t ret = ESP_OK;
34 // create TX channel and reserve sibling channel for future use
35 gdma_channel_alloc_config_t tx_alloc_config = {
36 .flags.reserve_sibling = 1,
37 .direction = GDMA_CHANNEL_DIRECTION_TX,
38 };
39 ret = gdma_new_channel(&tx_alloc_config, &impl->tx_channel);
40 if (ret != ESP_OK) {
41 goto err;
42 }
43
44 // create RX channel and specify it should be reside in the same pair as TX
45 gdma_channel_alloc_config_t rx_alloc_config = {
46 .direction = GDMA_CHANNEL_DIRECTION_RX,
47 .sibling_chan = impl->tx_channel,
48 };
49 ret = gdma_new_channel(&rx_alloc_config, &impl->rx_channel);
50 if (ret != ESP_OK) {
51 goto err;
52 }
53
54 gdma_trigger_t m2m_trigger = GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_M2M, 0);
55 // get a free DMA trigger ID for memory copy
56 uint32_t free_m2m_id_mask = 0;
57 gdma_get_free_m2m_trig_id_mask(impl->tx_channel, &free_m2m_id_mask);
58 m2m_trigger.instance_id = __builtin_ctz(free_m2m_id_mask);
59 gdma_connect(impl->rx_channel, m2m_trigger);
60 gdma_connect(impl->tx_channel, m2m_trigger);
61
62 gdma_strategy_config_t strategy_config = {
63 .auto_update_desc = true,
64 .owner_check = true,
65 };
66
67 gdma_transfer_ability_t transfer_ability = {
68 .sram_trans_align = impl->sram_trans_align,
69 .psram_trans_align = impl->psram_trans_align,
70 };
71 ret = gdma_set_transfer_ability(impl->tx_channel, &transfer_ability);
72 if (ret != ESP_OK) {
73 goto err;
74 }
75 ret = gdma_set_transfer_ability(impl->rx_channel, &transfer_ability);
76 if (ret != ESP_OK) {
77 goto err;
78 }
79 gdma_apply_strategy(impl->tx_channel, &strategy_config);
80 gdma_apply_strategy(impl->rx_channel, &strategy_config);
81
82 #if SOC_APM_SUPPORTED
83 // APM strategy: trusted mode
84 // TODO: IDF-5354 GDMA for M2M usage only need read and write permissions, we should disable the execute permission by the APM controller
85 apm_tee_ll_set_master_secure_mode(APM_LL_MASTER_GDMA + m2m_trigger.instance_id, APM_LL_SECURE_MODE_TEE);
86 #endif // SOC_APM_SUPPORTED
87
88 gdma_rx_event_callbacks_t cbs = {
89 .on_recv_eof = async_memcpy_impl_rx_eof_callback
90 };
91 ret = gdma_register_rx_event_callbacks(impl->rx_channel, &cbs, impl);
92
93 err:
94 return ret;
95 }
96
async_memcpy_impl_deinit(async_memcpy_impl_t * impl)97 esp_err_t async_memcpy_impl_deinit(async_memcpy_impl_t *impl)
98 {
99 gdma_disconnect(impl->rx_channel);
100 gdma_disconnect(impl->tx_channel);
101 gdma_del_channel(impl->rx_channel);
102 gdma_del_channel(impl->tx_channel);
103 return ESP_OK;
104 }
105
async_memcpy_impl_start(async_memcpy_impl_t * impl,intptr_t outlink_base,intptr_t inlink_base)106 esp_err_t async_memcpy_impl_start(async_memcpy_impl_t *impl, intptr_t outlink_base, intptr_t inlink_base)
107 {
108 gdma_start(impl->rx_channel, inlink_base);
109 gdma_start(impl->tx_channel, outlink_base);
110 return ESP_OK;
111 }
112
async_memcpy_impl_stop(async_memcpy_impl_t * impl)113 esp_err_t async_memcpy_impl_stop(async_memcpy_impl_t *impl)
114 {
115 gdma_stop(impl->rx_channel);
116 gdma_stop(impl->tx_channel);
117 return ESP_OK;
118 }
119
async_memcpy_impl_restart(async_memcpy_impl_t * impl)120 esp_err_t async_memcpy_impl_restart(async_memcpy_impl_t *impl)
121 {
122 gdma_append(impl->rx_channel);
123 gdma_append(impl->tx_channel);
124 return ESP_OK;
125 }
126
async_memcpy_impl_new_etm_event(async_memcpy_impl_t * impl,async_memcpy_etm_event_t event_type,esp_etm_event_handle_t * out_event)127 esp_err_t async_memcpy_impl_new_etm_event(async_memcpy_impl_t *impl, async_memcpy_etm_event_t event_type, esp_etm_event_handle_t *out_event)
128 {
129 if (event_type == ASYNC_MEMCPY_ETM_EVENT_COPY_DONE) {
130 // use the RX EOF to indicate the async memcpy done event
131 gdma_etm_event_config_t etm_event_conf = {
132 .event_type = GDMA_ETM_EVENT_EOF,
133 };
134 return gdma_new_etm_event(impl->rx_channel, &etm_event_conf, out_event);
135 } else {
136 return ESP_ERR_NOT_SUPPORTED;
137 }
138 }
139
async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t * impl,void * src,void * dst)140 bool async_memcpy_impl_is_buffer_address_valid(async_memcpy_impl_t *impl, void *src, void *dst)
141 {
142 bool valid = true;
143 if (esp_ptr_external_ram(dst)) {
144 if (impl->psram_trans_align) {
145 valid = valid && (((intptr_t)dst & (impl->psram_trans_align - 1)) == 0);
146 }
147 } else {
148 if (impl->sram_trans_align) {
149 valid = valid && (((intptr_t)dst & (impl->sram_trans_align - 1)) == 0);
150 }
151 }
152 return valid;
153 }
154