1 /*
2  * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 #include <stdlib.h>
7 #include <string.h>
8 #include <sys/lock.h>
9 #include <sys/cdefs.h>
10 #include "esp_compiler.h"
11 #include "esp_intr_alloc.h"
12 #include "esp_log.h"
13 #include "esp_check.h"
14 #include "driver/gpio.h"
15 #include "driver/periph_ctrl.h"
16 #include "driver/rmt.h"
17 #include "freertos/FreeRTOS.h"
18 #include "freertos/task.h"
19 #include "freertos/semphr.h"
20 #include "freertos/ringbuf.h"
21 #include "soc/soc_memory_layout.h"
22 #include "soc/rmt_periph.h"
23 #include "soc/rtc.h"
24 #include "hal/rmt_hal.h"
25 #include "hal/rmt_ll.h"
26 #include "hal/gpio_hal.h"
27 #include "esp_rom_gpio.h"
28 
29 #define RMT_CHANNEL_ERROR_STR "RMT CHANNEL ERR"
30 #define RMT_ADDR_ERROR_STR "RMT ADDRESS ERR"
31 #define RMT_MEM_CNT_ERROR_STR "RMT MEM BLOCK NUM ERR"
32 #define RMT_CARRIER_ERROR_STR "RMT CARRIER LEVEL ERR"
33 #define RMT_MEM_OWNER_ERROR_STR "RMT MEM OWNER_ERR"
34 #define RMT_BASECLK_ERROR_STR "RMT BASECLK ERR"
35 #define RMT_WR_MEM_OVF_ERROR_STR "RMT WR MEM OVERFLOW"
36 #define RMT_GPIO_ERROR_STR "RMT GPIO ERROR"
37 #define RMT_MODE_ERROR_STR "RMT MODE ERROR"
38 #define RMT_CLK_DIV_ERROR_STR "RMT CLK DIV ERR"
39 #define RMT_DRIVER_ERROR_STR "RMT DRIVER ERR"
40 #define RMT_DRIVER_LENGTH_ERROR_STR "RMT PARAM LEN ERROR"
41 #define RMT_PSRAM_BUFFER_WARN_STR "Using buffer allocated from psram"
42 #define RMT_TRANSLATOR_NULL_STR "RMT translator is null"
43 #define RMT_TRANSLATOR_UNINIT_STR "RMT translator not init"
44 #define RMT_PARAM_ERR_STR "RMT param error"
45 
46 static const char *TAG = "rmt";
47 
48 // Spinlock for protecting concurrent register-level access only
49 #define RMT_ENTER_CRITICAL()  portENTER_CRITICAL_SAFE(&(rmt_contex.rmt_spinlock))
50 #define RMT_EXIT_CRITICAL()   portEXIT_CRITICAL_SAFE(&(rmt_contex.rmt_spinlock))
51 
52 #define RMT_RX_CHANNEL_ENCODING_START (SOC_RMT_CHANNELS_PER_GROUP-SOC_RMT_TX_CANDIDATES_PER_GROUP)
53 #define RMT_TX_CHANNEL_ENCODING_END   (SOC_RMT_TX_CANDIDATES_PER_GROUP-1)
54 
55 #define RMT_IS_RX_CHANNEL(channel) ((channel) >= RMT_RX_CHANNEL_ENCODING_START)
56 #define RMT_IS_TX_CHANNEL(channel) ((channel) <= RMT_TX_CHANNEL_ENCODING_END)
57 #define RMT_DECODE_RX_CHANNEL(encode_chan) ((encode_chan - RMT_RX_CHANNEL_ENCODING_START))
58 #define RMT_ENCODE_RX_CHANNEL(decode_chan) ((decode_chan + RMT_RX_CHANNEL_ENCODING_START))
59 
60 typedef struct {
61     rmt_hal_context_t hal;
62     _lock_t rmt_driver_isr_lock;
63     portMUX_TYPE rmt_spinlock; // Mutex lock for protecting concurrent register/unregister of RMT channels' ISR
64     rmt_isr_handle_t rmt_driver_intr_handle;
65     rmt_tx_end_callback_t rmt_tx_end_callback;// Event called when transmission is ended
66     uint8_t rmt_driver_channels; // Bitmask of installed drivers' channels
67     bool rmt_module_enabled;
68     uint32_t synchro_channel_mask; // Bitmap of channels already added in the synchronous group
69 } rmt_contex_t;
70 
71 typedef struct {
72     size_t tx_offset;
73     size_t tx_len_rem;
74     size_t tx_sub_len;
75     bool translator;
76     bool wait_done; //Mark whether wait tx done.
77     bool loop_autostop; // mark whether loop auto-stop is enabled
78     rmt_channel_t channel;
79     const rmt_item32_t *tx_data;
80     xSemaphoreHandle tx_sem;
81 #if CONFIG_SPIRAM_USE_MALLOC
82     int intr_alloc_flags;
83     StaticSemaphore_t tx_sem_buffer;
84 #endif
85     rmt_item32_t *tx_buf;
86     RingbufHandle_t rx_buf;
87 #if SOC_RMT_SUPPORT_RX_PINGPONG
88     rmt_item32_t *rx_item_buf;
89     uint32_t rx_item_buf_size;
90     uint32_t rx_item_len;
91     int rx_item_start_idx;
92 #endif
93     sample_to_rmt_t sample_to_rmt;
94     void *tx_context;
95     size_t sample_size_remain;
96     const uint8_t *sample_cur;
97 } rmt_obj_t;
98 
99 static rmt_contex_t rmt_contex = {
100     .hal.regs = RMT_LL_HW_BASE,
101     .hal.mem = RMT_LL_MEM_BASE,
102     .rmt_spinlock = portMUX_INITIALIZER_UNLOCKED,
103     .rmt_driver_intr_handle = NULL,
104     .rmt_tx_end_callback = {
105         .function = NULL,
106     },
107     .rmt_driver_channels = 0,
108     .rmt_module_enabled = false,
109     .synchro_channel_mask = 0
110 };
111 
112 static rmt_obj_t *p_rmt_obj[RMT_CHANNEL_MAX] = {0};
113 
114 #if SOC_RMT_CHANNEL_CLK_INDEPENDENT
115 static uint32_t s_rmt_source_clock_hz[RMT_CHANNEL_MAX];
116 #else
117 static uint32_t s_rmt_source_clock_hz;
118 #endif
119 
120 //Enable RMT module
rmt_module_enable(void)121 static void rmt_module_enable(void)
122 {
123     RMT_ENTER_CRITICAL();
124     if (rmt_contex.rmt_module_enabled == false) {
125         periph_module_reset(rmt_periph_signals.groups[0].module);
126         periph_module_enable(rmt_periph_signals.groups[0].module);
127         rmt_contex.rmt_module_enabled = true;
128     }
129     RMT_EXIT_CRITICAL();
130 }
131 
132 //Disable RMT module
rmt_module_disable(void)133 static void rmt_module_disable(void)
134 {
135     RMT_ENTER_CRITICAL();
136     if (rmt_contex.rmt_module_enabled == true) {
137         periph_module_disable(rmt_periph_signals.groups[0].module);
138         rmt_contex.rmt_module_enabled = false;
139     }
140     RMT_EXIT_CRITICAL();
141 }
142 
rmt_set_clk_div(rmt_channel_t channel,uint8_t div_cnt)143 esp_err_t rmt_set_clk_div(rmt_channel_t channel, uint8_t div_cnt)
144 {
145     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
146     RMT_ENTER_CRITICAL();
147     if (RMT_IS_RX_CHANNEL(channel)) {
148         rmt_ll_rx_set_channel_clock_div(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), div_cnt);
149     } else {
150         rmt_ll_tx_set_channel_clock_div(rmt_contex.hal.regs, channel, div_cnt);
151     }
152     RMT_EXIT_CRITICAL();
153     return ESP_OK;
154 }
155 
rmt_get_clk_div(rmt_channel_t channel,uint8_t * div_cnt)156 esp_err_t rmt_get_clk_div(rmt_channel_t channel, uint8_t *div_cnt)
157 {
158     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
159     ESP_RETURN_ON_FALSE(div_cnt, ESP_ERR_INVALID_ARG, TAG, RMT_ADDR_ERROR_STR);
160     RMT_ENTER_CRITICAL();
161     if (RMT_IS_RX_CHANNEL(channel)) {
162         *div_cnt = (uint8_t)rmt_ll_rx_get_channel_clock_div(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
163     } else {
164         *div_cnt = (uint8_t)rmt_ll_tx_get_channel_clock_div(rmt_contex.hal.regs, channel);
165     }
166     RMT_EXIT_CRITICAL();
167     return ESP_OK;
168 }
169 
rmt_set_rx_idle_thresh(rmt_channel_t channel,uint16_t thresh)170 esp_err_t rmt_set_rx_idle_thresh(rmt_channel_t channel, uint16_t thresh)
171 {
172     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
173     RMT_ENTER_CRITICAL();
174     rmt_ll_rx_set_idle_thres(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), thresh);
175     RMT_EXIT_CRITICAL();
176     return ESP_OK;
177 }
178 
rmt_get_rx_idle_thresh(rmt_channel_t channel,uint16_t * thresh)179 esp_err_t rmt_get_rx_idle_thresh(rmt_channel_t channel, uint16_t *thresh)
180 {
181     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
182     ESP_RETURN_ON_FALSE(thresh, ESP_ERR_INVALID_ARG, TAG, RMT_ADDR_ERROR_STR);
183     RMT_ENTER_CRITICAL();
184     *thresh = (uint16_t)rmt_ll_rx_get_idle_thres(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
185     RMT_EXIT_CRITICAL();
186     return ESP_OK;
187 }
188 
rmt_set_mem_block_num(rmt_channel_t channel,uint8_t rmt_mem_num)189 esp_err_t rmt_set_mem_block_num(rmt_channel_t channel, uint8_t rmt_mem_num)
190 {
191     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
192     ESP_RETURN_ON_FALSE(rmt_mem_num <= RMT_CHANNEL_MAX - channel, ESP_ERR_INVALID_ARG, TAG, RMT_MEM_CNT_ERROR_STR);
193     RMT_ENTER_CRITICAL();
194     if (RMT_IS_RX_CHANNEL(channel)) {
195         rmt_ll_rx_set_mem_blocks(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), rmt_mem_num);
196     } else {
197         rmt_ll_tx_set_mem_blocks(rmt_contex.hal.regs, channel, rmt_mem_num);
198     }
199     RMT_EXIT_CRITICAL();
200     return ESP_OK;
201 }
202 
rmt_get_mem_block_num(rmt_channel_t channel,uint8_t * rmt_mem_num)203 esp_err_t rmt_get_mem_block_num(rmt_channel_t channel, uint8_t *rmt_mem_num)
204 {
205     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
206     ESP_RETURN_ON_FALSE(rmt_mem_num, ESP_ERR_INVALID_ARG, TAG, RMT_ADDR_ERROR_STR);
207     RMT_ENTER_CRITICAL();
208     if (RMT_IS_RX_CHANNEL(channel)) {
209         *rmt_mem_num = (uint8_t)rmt_ll_rx_get_mem_blocks(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
210     } else {
211         *rmt_mem_num = (uint8_t)rmt_ll_tx_get_mem_blocks(rmt_contex.hal.regs, channel);
212     }
213     RMT_EXIT_CRITICAL();
214     return ESP_OK;
215 }
216 
rmt_set_tx_carrier(rmt_channel_t channel,bool carrier_en,uint16_t high_level,uint16_t low_level,rmt_carrier_level_t carrier_level)217 esp_err_t rmt_set_tx_carrier(rmt_channel_t channel, bool carrier_en, uint16_t high_level, uint16_t low_level,
218                              rmt_carrier_level_t carrier_level)
219 {
220     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
221     ESP_RETURN_ON_FALSE(carrier_level < RMT_CARRIER_LEVEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CARRIER_ERROR_STR);
222     RMT_ENTER_CRITICAL();
223     rmt_ll_tx_set_carrier_high_low_ticks(rmt_contex.hal.regs, channel, high_level, low_level);
224     rmt_ll_tx_set_carrier_level(rmt_contex.hal.regs, channel, carrier_level);
225     rmt_ll_tx_enable_carrier_modulation(rmt_contex.hal.regs, channel, carrier_en);
226     RMT_EXIT_CRITICAL();
227     return ESP_OK;
228 }
229 
rmt_set_mem_pd(rmt_channel_t channel,bool pd_en)230 esp_err_t rmt_set_mem_pd(rmt_channel_t channel, bool pd_en)
231 {
232     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
233     RMT_ENTER_CRITICAL();
234     rmt_ll_power_down_mem(rmt_contex.hal.regs, pd_en);
235     RMT_EXIT_CRITICAL();
236     return ESP_OK;
237 }
238 
rmt_get_mem_pd(rmt_channel_t channel,bool * pd_en)239 esp_err_t rmt_get_mem_pd(rmt_channel_t channel, bool *pd_en)
240 {
241     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
242     RMT_ENTER_CRITICAL();
243     *pd_en = rmt_ll_is_mem_power_down(rmt_contex.hal.regs);
244     RMT_EXIT_CRITICAL();
245     return ESP_OK;
246 }
247 
rmt_tx_start(rmt_channel_t channel,bool tx_idx_rst)248 esp_err_t rmt_tx_start(rmt_channel_t channel, bool tx_idx_rst)
249 {
250     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
251     RMT_ENTER_CRITICAL();
252     if (tx_idx_rst) {
253         rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
254     }
255     rmt_ll_clear_tx_end_interrupt(rmt_contex.hal.regs, channel);
256     // enable tx end interrupt in non-loop mode
257     if (!rmt_ll_is_tx_loop_enabled(rmt_contex.hal.regs, channel)) {
258         rmt_ll_enable_tx_end_interrupt(rmt_contex.hal.regs, channel, true);
259     } else {
260 #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
261         rmt_ll_tx_reset_loop(rmt_contex.hal.regs, channel);
262         rmt_ll_tx_enable_loop_count(rmt_contex.hal.regs, channel, true);
263         rmt_ll_clear_tx_loop_interrupt(rmt_contex.hal.regs, channel);
264         rmt_ll_enable_tx_loop_interrupt(rmt_contex.hal.regs, channel, true);
265 #endif
266     }
267     rmt_ll_tx_start(rmt_contex.hal.regs, channel);
268     RMT_EXIT_CRITICAL();
269     return ESP_OK;
270 }
271 
rmt_tx_stop(rmt_channel_t channel)272 esp_err_t rmt_tx_stop(rmt_channel_t channel)
273 {
274     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
275     RMT_ENTER_CRITICAL();
276     rmt_ll_tx_stop(rmt_contex.hal.regs, channel);
277     rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
278     RMT_EXIT_CRITICAL();
279     return ESP_OK;
280 }
281 
rmt_rx_start(rmt_channel_t channel,bool rx_idx_rst)282 esp_err_t rmt_rx_start(rmt_channel_t channel, bool rx_idx_rst)
283 {
284     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
285     RMT_ENTER_CRITICAL();
286     rmt_ll_rx_enable(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), false);
287     if (rx_idx_rst) {
288         rmt_ll_rx_reset_pointer(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
289     }
290     rmt_ll_clear_rx_end_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
291     rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), true);
292 
293 #if SOC_RMT_SUPPORT_RX_PINGPONG
294     const uint32_t item_block_len = rmt_ll_rx_get_mem_blocks(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel)) * RMT_MEM_ITEM_NUM;
295     p_rmt_obj[channel]->rx_item_start_idx = 0;
296     p_rmt_obj[channel]->rx_item_len = 0;
297     rmt_set_rx_thr_intr_en(channel, true, item_block_len / 2);
298 #endif
299 
300     rmt_ll_rx_enable(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), true);
301     RMT_EXIT_CRITICAL();
302     return ESP_OK;
303 }
304 
rmt_rx_stop(rmt_channel_t channel)305 esp_err_t rmt_rx_stop(rmt_channel_t channel)
306 {
307     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
308     RMT_ENTER_CRITICAL();
309     rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), false);
310     rmt_ll_rx_enable(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), false);
311     rmt_ll_rx_reset_pointer(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
312 #if SOC_RMT_SUPPORT_RX_PINGPONG
313     rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), false);
314 #endif
315     RMT_EXIT_CRITICAL();
316     return ESP_OK;
317 }
318 
rmt_tx_memory_reset(rmt_channel_t channel)319 esp_err_t rmt_tx_memory_reset(rmt_channel_t channel)
320 {
321     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
322     RMT_ENTER_CRITICAL();
323     rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
324     RMT_EXIT_CRITICAL();
325     return ESP_OK;
326 }
327 
rmt_rx_memory_reset(rmt_channel_t channel)328 esp_err_t rmt_rx_memory_reset(rmt_channel_t channel)
329 {
330     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
331     RMT_ENTER_CRITICAL();
332     rmt_ll_rx_reset_pointer(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
333     RMT_EXIT_CRITICAL();
334     return ESP_OK;
335 }
336 
rmt_set_memory_owner(rmt_channel_t channel,rmt_mem_owner_t owner)337 esp_err_t rmt_set_memory_owner(rmt_channel_t channel, rmt_mem_owner_t owner)
338 {
339     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
340     ESP_RETURN_ON_FALSE(owner < RMT_MEM_OWNER_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_MEM_OWNER_ERROR_STR);
341     RMT_ENTER_CRITICAL();
342     rmt_ll_rx_set_mem_owner(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), owner);
343     RMT_EXIT_CRITICAL();
344     return ESP_OK;
345 }
346 
rmt_get_memory_owner(rmt_channel_t channel,rmt_mem_owner_t * owner)347 esp_err_t rmt_get_memory_owner(rmt_channel_t channel, rmt_mem_owner_t *owner)
348 {
349     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
350     ESP_RETURN_ON_FALSE(owner, ESP_ERR_INVALID_ARG, TAG, RMT_MEM_OWNER_ERROR_STR);
351     RMT_ENTER_CRITICAL();
352     *owner = (rmt_mem_owner_t)rmt_ll_rx_get_mem_owner(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
353     RMT_EXIT_CRITICAL();
354     return ESP_OK;
355 }
356 
rmt_set_tx_loop_mode(rmt_channel_t channel,bool loop_en)357 esp_err_t rmt_set_tx_loop_mode(rmt_channel_t channel, bool loop_en)
358 {
359     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
360     RMT_ENTER_CRITICAL();
361     rmt_ll_tx_enable_loop(rmt_contex.hal.regs, channel, loop_en);
362     RMT_EXIT_CRITICAL();
363     return ESP_OK;
364 }
365 
rmt_get_tx_loop_mode(rmt_channel_t channel,bool * loop_en)366 esp_err_t rmt_get_tx_loop_mode(rmt_channel_t channel, bool *loop_en)
367 {
368     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
369     RMT_ENTER_CRITICAL();
370     *loop_en = rmt_ll_is_tx_loop_enabled(rmt_contex.hal.regs, channel);
371     RMT_EXIT_CRITICAL();
372     return ESP_OK;
373 }
374 
rmt_set_rx_filter(rmt_channel_t channel,bool rx_filter_en,uint8_t thresh)375 esp_err_t rmt_set_rx_filter(rmt_channel_t channel, bool rx_filter_en, uint8_t thresh)
376 {
377     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
378     RMT_ENTER_CRITICAL();
379     rmt_ll_rx_enable_filter(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), rx_filter_en);
380     rmt_ll_rx_set_filter_thres(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), thresh);
381     RMT_EXIT_CRITICAL();
382     return ESP_OK;
383 }
384 
rmt_set_source_clk(rmt_channel_t channel,rmt_source_clk_t base_clk)385 esp_err_t rmt_set_source_clk(rmt_channel_t channel, rmt_source_clk_t base_clk)
386 {
387     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
388     ESP_RETURN_ON_FALSE(base_clk < RMT_BASECLK_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_BASECLK_ERROR_STR);
389     RMT_ENTER_CRITICAL();
390     rmt_ll_set_group_clock_src(rmt_contex.hal.regs, channel, base_clk, 0, 0, 0);
391     RMT_EXIT_CRITICAL();
392     return ESP_OK;
393 }
394 
rmt_get_source_clk(rmt_channel_t channel,rmt_source_clk_t * src_clk)395 esp_err_t rmt_get_source_clk(rmt_channel_t channel, rmt_source_clk_t *src_clk)
396 {
397     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
398     RMT_ENTER_CRITICAL();
399     *src_clk = (rmt_source_clk_t)rmt_ll_get_group_clock_src(rmt_contex.hal.regs, channel);
400     RMT_EXIT_CRITICAL();
401     return ESP_OK;
402 }
403 
rmt_set_idle_level(rmt_channel_t channel,bool idle_out_en,rmt_idle_level_t level)404 esp_err_t rmt_set_idle_level(rmt_channel_t channel, bool idle_out_en, rmt_idle_level_t level)
405 {
406     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
407     ESP_RETURN_ON_FALSE(level < RMT_IDLE_LEVEL_MAX, ESP_ERR_INVALID_ARG, TAG, "RMT IDLE LEVEL ERR");
408     RMT_ENTER_CRITICAL();
409     rmt_ll_tx_enable_idle(rmt_contex.hal.regs, channel, idle_out_en);
410     rmt_ll_tx_set_idle_level(rmt_contex.hal.regs, channel, level);
411     RMT_EXIT_CRITICAL();
412     return ESP_OK;
413 }
414 
rmt_get_idle_level(rmt_channel_t channel,bool * idle_out_en,rmt_idle_level_t * level)415 esp_err_t rmt_get_idle_level(rmt_channel_t channel, bool *idle_out_en, rmt_idle_level_t *level)
416 {
417     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
418     RMT_ENTER_CRITICAL();
419     *idle_out_en = rmt_ll_is_tx_idle_enabled(rmt_contex.hal.regs, channel);
420     *level = rmt_ll_tx_get_idle_level(rmt_contex.hal.regs, channel);
421     RMT_EXIT_CRITICAL();
422     return ESP_OK;
423 }
424 
rmt_get_status(rmt_channel_t channel,uint32_t * status)425 esp_err_t rmt_get_status(rmt_channel_t channel, uint32_t *status)
426 {
427     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
428     RMT_ENTER_CRITICAL();
429     if (RMT_IS_RX_CHANNEL(channel)) {
430         *status = rmt_ll_rx_get_channel_status(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
431     } else {
432         *status = rmt_ll_tx_get_channel_status(rmt_contex.hal.regs, channel);
433     }
434     RMT_EXIT_CRITICAL();
435     return ESP_OK;
436 }
437 
rmt_set_intr_enable_mask(uint32_t mask)438 void rmt_set_intr_enable_mask(uint32_t mask)
439 {
440     RMT_ENTER_CRITICAL();
441     rmt_ll_enable_interrupt(rmt_contex.hal.regs, mask, true);
442     RMT_EXIT_CRITICAL();
443 }
444 
rmt_clr_intr_enable_mask(uint32_t mask)445 void rmt_clr_intr_enable_mask(uint32_t mask)
446 {
447     RMT_ENTER_CRITICAL();
448     rmt_ll_enable_interrupt(rmt_contex.hal.regs, mask, false);
449     RMT_EXIT_CRITICAL();
450 }
451 
rmt_set_rx_intr_en(rmt_channel_t channel,bool en)452 esp_err_t rmt_set_rx_intr_en(rmt_channel_t channel, bool en)
453 {
454     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
455     RMT_ENTER_CRITICAL();
456     rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), en);
457     RMT_EXIT_CRITICAL();
458     return ESP_OK;
459 }
460 
461 #if SOC_RMT_SUPPORT_RX_PINGPONG
rmt_set_rx_thr_intr_en(rmt_channel_t channel,bool en,uint16_t evt_thresh)462 esp_err_t rmt_set_rx_thr_intr_en(rmt_channel_t channel, bool en, uint16_t evt_thresh)
463 {
464     ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel) && channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
465     if (en) {
466         uint32_t item_block_len = rmt_ll_rx_get_mem_blocks(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel)) * RMT_MEM_ITEM_NUM;
467         ESP_RETURN_ON_FALSE(evt_thresh <= item_block_len, ESP_ERR_INVALID_ARG, TAG, "RMT EVT THRESH ERR");
468         RMT_ENTER_CRITICAL();
469         rmt_ll_rx_set_limit(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), evt_thresh);
470         rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), true);
471         RMT_EXIT_CRITICAL();
472     } else {
473         RMT_ENTER_CRITICAL();
474         rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), false);
475         RMT_EXIT_CRITICAL();
476     }
477     return ESP_OK;
478 }
479 #endif
480 
rmt_set_err_intr_en(rmt_channel_t channel,bool en)481 esp_err_t rmt_set_err_intr_en(rmt_channel_t channel, bool en)
482 {
483     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
484     RMT_ENTER_CRITICAL();
485     if (RMT_IS_RX_CHANNEL(channel)) {
486         rmt_ll_enable_rx_err_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), en);
487     } else {
488         rmt_ll_enable_tx_err_interrupt(rmt_contex.hal.regs, channel, en);
489     }
490     RMT_EXIT_CRITICAL();
491     return ESP_OK;
492 }
493 
rmt_set_tx_intr_en(rmt_channel_t channel,bool en)494 esp_err_t rmt_set_tx_intr_en(rmt_channel_t channel, bool en)
495 {
496     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
497     RMT_ENTER_CRITICAL();
498     rmt_ll_enable_tx_end_interrupt(rmt_contex.hal.regs, channel, en);
499     RMT_EXIT_CRITICAL();
500     return ESP_OK;
501 }
502 
rmt_set_tx_thr_intr_en(rmt_channel_t channel,bool en,uint16_t evt_thresh)503 esp_err_t rmt_set_tx_thr_intr_en(rmt_channel_t channel, bool en, uint16_t evt_thresh)
504 {
505     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
506     if (en) {
507         uint32_t item_block_len = rmt_ll_tx_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
508         ESP_RETURN_ON_FALSE(evt_thresh <= item_block_len, ESP_ERR_INVALID_ARG, TAG, "RMT EVT THRESH ERR");
509         RMT_ENTER_CRITICAL();
510         rmt_ll_tx_set_limit(rmt_contex.hal.regs, channel, evt_thresh);
511         rmt_ll_enable_tx_thres_interrupt(rmt_contex.hal.regs, channel, true);
512         RMT_EXIT_CRITICAL();
513     } else {
514         RMT_ENTER_CRITICAL();
515         rmt_ll_enable_tx_thres_interrupt(rmt_contex.hal.regs, channel, false);
516         RMT_EXIT_CRITICAL();
517     }
518     return ESP_OK;
519 }
520 
rmt_set_gpio(rmt_channel_t channel,rmt_mode_t mode,gpio_num_t gpio_num,bool invert_signal)521 esp_err_t rmt_set_gpio(rmt_channel_t channel, rmt_mode_t mode, gpio_num_t gpio_num, bool invert_signal)
522 {
523     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
524     ESP_RETURN_ON_FALSE(mode < RMT_MODE_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_MODE_ERROR_STR);
525     ESP_RETURN_ON_FALSE(((GPIO_IS_VALID_GPIO(gpio_num) && (mode == RMT_MODE_RX)) ||
526                          (GPIO_IS_VALID_OUTPUT_GPIO(gpio_num) && (mode == RMT_MODE_TX))), ESP_ERR_INVALID_ARG, TAG, RMT_GPIO_ERROR_STR);
527 
528     gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[gpio_num], PIN_FUNC_GPIO);
529     if (mode == RMT_MODE_TX) {
530         ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
531         gpio_set_direction(gpio_num, GPIO_MODE_OUTPUT);
532         esp_rom_gpio_connect_out_signal(gpio_num, rmt_periph_signals.groups[0].channels[channel].tx_sig, invert_signal, 0);
533     } else {
534         ESP_RETURN_ON_FALSE(RMT_IS_RX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
535         gpio_set_direction(gpio_num, GPIO_MODE_INPUT);
536         esp_rom_gpio_connect_in_signal(gpio_num, rmt_periph_signals.groups[0].channels[channel].rx_sig, invert_signal);
537     }
538     return ESP_OK;
539 }
540 
rmt_set_pin(rmt_channel_t channel,rmt_mode_t mode,gpio_num_t gpio_num)541 esp_err_t rmt_set_pin(rmt_channel_t channel, rmt_mode_t mode, gpio_num_t gpio_num)
542 {
543     // only for backword compatibility
544     return rmt_set_gpio(channel, mode, gpio_num, false);
545 }
546 
rmt_is_channel_number_valid(rmt_channel_t channel,uint8_t mode)547 static bool rmt_is_channel_number_valid(rmt_channel_t channel, uint8_t mode)
548 {
549     // RX mode
550     if (mode == RMT_MODE_RX) {
551         return RMT_IS_RX_CHANNEL(channel) && (channel < RMT_CHANNEL_MAX);
552     }
553     // TX mode
554     return (channel >= 0) && RMT_IS_TX_CHANNEL(channel);
555 }
556 
rmt_internal_config(rmt_dev_t * dev,const rmt_config_t * rmt_param)557 static esp_err_t rmt_internal_config(rmt_dev_t *dev, const rmt_config_t *rmt_param)
558 {
559     uint8_t mode = rmt_param->rmt_mode;
560     uint8_t channel = rmt_param->channel;
561     uint8_t gpio_num = rmt_param->gpio_num;
562     uint8_t mem_cnt = rmt_param->mem_block_num;
563     uint8_t clk_div = rmt_param->clk_div;
564     uint32_t carrier_freq_hz = rmt_param->tx_config.carrier_freq_hz;
565     bool carrier_en = rmt_param->tx_config.carrier_en;
566     uint32_t rmt_source_clk_hz;
567 
568     ESP_RETURN_ON_FALSE(rmt_is_channel_number_valid(channel, mode), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
569     ESP_RETURN_ON_FALSE(mem_cnt + channel <= 8 && mem_cnt > 0, ESP_ERR_INVALID_ARG, TAG, RMT_MEM_CNT_ERROR_STR);
570     ESP_RETURN_ON_FALSE(clk_div > 0, ESP_ERR_INVALID_ARG, TAG, RMT_CLK_DIV_ERROR_STR);
571 
572     if (mode == RMT_MODE_TX) {
573         ESP_RETURN_ON_FALSE(!carrier_en || carrier_freq_hz > 0, ESP_ERR_INVALID_ARG, TAG, "RMT carrier frequency can't be zero");
574     }
575 
576     RMT_ENTER_CRITICAL();
577     rmt_ll_enable_mem_access(dev, true);
578 
579     if (rmt_param->flags & RMT_CHANNEL_FLAGS_AWARE_DFS) {
580 #if SOC_RMT_SUPPORT_XTAL
581         // clock src: XTAL_CLK
582         rmt_source_clk_hz = rtc_clk_xtal_freq_get() * 1000000;
583         rmt_ll_set_group_clock_src(dev, channel, RMT_BASECLK_XTAL, 0, 0, 0);
584 #elif SOC_RMT_SUPPORT_REF_TICK
585         // clock src: REF_CLK
586         rmt_source_clk_hz = REF_CLK_FREQ;
587         rmt_ll_set_group_clock_src(dev, channel, RMT_BASECLK_REF, 0, 0, 0);
588 #endif
589     } else {
590         // clock src: APB_CLK
591         rmt_source_clk_hz = APB_CLK_FREQ;
592         rmt_ll_set_group_clock_src(dev, channel, RMT_BASECLK_APB, 0, 0, 0);
593     }
594     RMT_EXIT_CRITICAL();
595 
596 #if SOC_RMT_CHANNEL_CLK_INDEPENDENT
597     s_rmt_source_clock_hz[channel] = rmt_source_clk_hz;
598 #else
599     if (s_rmt_source_clock_hz && rmt_source_clk_hz != s_rmt_source_clock_hz) {
600         ESP_LOGW(TAG, "RMT clock source has been configured to %d by other channel, now reconfigure it to %d", s_rmt_source_clock_hz, rmt_source_clk_hz);
601     }
602     s_rmt_source_clock_hz = rmt_source_clk_hz;
603 #endif
604     ESP_LOGD(TAG, "rmt_source_clk_hz: %d\n", rmt_source_clk_hz);
605 
606     if (mode == RMT_MODE_TX) {
607         uint16_t carrier_duty_percent = rmt_param->tx_config.carrier_duty_percent;
608         uint8_t carrier_level = rmt_param->tx_config.carrier_level;
609         uint8_t idle_level = rmt_param->tx_config.idle_level;
610 
611         RMT_ENTER_CRITICAL();
612         rmt_ll_tx_set_channel_clock_div(dev, channel, clk_div);
613         rmt_ll_tx_set_mem_blocks(dev, channel, mem_cnt);
614         rmt_ll_tx_reset_pointer(dev, channel);
615         rmt_ll_tx_enable_loop(dev, channel, rmt_param->tx_config.loop_en);
616 #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
617         if (rmt_param->tx_config.loop_en) {
618             rmt_ll_tx_set_loop_count(dev, channel, rmt_param->tx_config.loop_count);
619         }
620 #endif
621         /* always enable tx ping-pong */
622         rmt_ll_tx_enable_pingpong(dev, channel, true);
623         /*Set idle level */
624         rmt_ll_tx_enable_idle(dev, channel, rmt_param->tx_config.idle_output_en);
625         rmt_ll_tx_set_idle_level(dev, channel, idle_level);
626         /*Set carrier*/
627         rmt_ll_tx_enable_carrier_modulation(dev, channel, carrier_en);
628         if (carrier_en) {
629             uint32_t duty_div, duty_h, duty_l;
630             duty_div = rmt_source_clk_hz / carrier_freq_hz;
631             duty_h = duty_div * carrier_duty_percent / 100;
632             duty_l = duty_div - duty_h;
633             rmt_ll_tx_set_carrier_level(dev, channel, carrier_level);
634             rmt_ll_tx_set_carrier_high_low_ticks(dev, channel, duty_h, duty_l);
635         } else {
636             rmt_ll_tx_set_carrier_level(dev, channel, 0);
637             rmt_ll_tx_set_carrier_high_low_ticks(dev, channel, 0, 0);
638         }
639         RMT_EXIT_CRITICAL();
640 
641         ESP_LOGD(TAG, "Rmt Tx Channel %u|Gpio %u|Sclk_Hz %u|Div %u|Carrier_Hz %u|Duty %u",
642                  channel, gpio_num, rmt_source_clk_hz, clk_div, carrier_freq_hz, carrier_duty_percent);
643     } else if (RMT_MODE_RX == mode) {
644         uint8_t filter_cnt = rmt_param->rx_config.filter_ticks_thresh;
645         uint16_t threshold = rmt_param->rx_config.idle_threshold;
646 
647         RMT_ENTER_CRITICAL();
648         rmt_ll_rx_set_channel_clock_div(dev, RMT_DECODE_RX_CHANNEL(channel), clk_div);
649         rmt_ll_rx_set_mem_blocks(dev, RMT_DECODE_RX_CHANNEL(channel), mem_cnt);
650         rmt_ll_rx_reset_pointer(dev, RMT_DECODE_RX_CHANNEL(channel));
651         rmt_ll_rx_set_mem_owner(dev, RMT_DECODE_RX_CHANNEL(channel), RMT_MEM_OWNER_HW);
652         /*Set idle threshold*/
653         rmt_ll_rx_set_idle_thres(dev, RMT_DECODE_RX_CHANNEL(channel), threshold);
654         /* Set RX filter */
655         rmt_ll_rx_set_filter_thres(dev, RMT_DECODE_RX_CHANNEL(channel), filter_cnt);
656         rmt_ll_rx_enable_filter(dev, RMT_DECODE_RX_CHANNEL(channel), rmt_param->rx_config.filter_en);
657 
658 #if SOC_RMT_SUPPORT_RX_PINGPONG
659         /* always enable rx ping-pong */
660         rmt_ll_rx_enable_pingpong(dev, RMT_DECODE_RX_CHANNEL(channel), true);
661 #endif
662 
663 #if SOC_RMT_SUPPORT_RX_DEMODULATION
664         rmt_ll_rx_enable_carrier_demodulation(dev, RMT_DECODE_RX_CHANNEL(channel), rmt_param->rx_config.rm_carrier);
665         if (rmt_param->rx_config.rm_carrier) {
666             uint32_t duty_total = rmt_source_clk_hz / rmt_ll_rx_get_channel_clock_div(dev, RMT_DECODE_RX_CHANNEL(channel)) / rmt_param->rx_config.carrier_freq_hz;
667             uint32_t duty_high = duty_total * rmt_param->rx_config.carrier_duty_percent / 100;
668             // there could be residual in timing the carrier pulse, so double enlarge the theoretical value
669             rmt_ll_rx_set_carrier_high_low_ticks(dev, RMT_DECODE_RX_CHANNEL(channel), duty_high * 2, (duty_total - duty_high) * 2);
670             rmt_ll_rx_set_carrier_level(dev, RMT_DECODE_RX_CHANNEL(channel), rmt_param->rx_config.carrier_level);
671         }
672 #endif
673         RMT_EXIT_CRITICAL();
674 
675         ESP_LOGD(TAG, "Rmt Rx Channel %u|Gpio %u|Sclk_Hz %u|Div %u|Thresold %u|Filter %u",
676                  channel, gpio_num, rmt_source_clk_hz, clk_div, threshold, filter_cnt);
677     }
678 
679     return ESP_OK;
680 }
681 
rmt_config(const rmt_config_t * rmt_param)682 esp_err_t rmt_config(const rmt_config_t *rmt_param)
683 {
684     rmt_module_enable();
685 
686     ESP_RETURN_ON_ERROR(rmt_set_gpio(rmt_param->channel, rmt_param->rmt_mode, rmt_param->gpio_num, rmt_param->flags & RMT_CHANNEL_FLAGS_INVERT_SIG), TAG, "set gpio for RMT driver failed");
687     ESP_RETURN_ON_ERROR(rmt_internal_config(&RMT, rmt_param), TAG, "initialize RMT driver failed");
688 
689     return ESP_OK;
690 }
691 
rmt_fill_memory(rmt_channel_t channel,const rmt_item32_t * item,uint16_t item_num,uint16_t mem_offset)692 static void IRAM_ATTR rmt_fill_memory(rmt_channel_t channel, const rmt_item32_t *item,
693                                       uint16_t item_num, uint16_t mem_offset)
694 {
695     RMT_ENTER_CRITICAL();
696     rmt_ll_write_memory(rmt_contex.hal.mem, channel, item, item_num, mem_offset);
697     RMT_EXIT_CRITICAL();
698 }
699 
rmt_fill_tx_items(rmt_channel_t channel,const rmt_item32_t * item,uint16_t item_num,uint16_t mem_offset)700 esp_err_t rmt_fill_tx_items(rmt_channel_t channel, const rmt_item32_t *item, uint16_t item_num, uint16_t mem_offset)
701 {
702     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), (0), TAG, RMT_CHANNEL_ERROR_STR);
703     ESP_RETURN_ON_FALSE(item, ESP_ERR_INVALID_ARG, TAG, RMT_ADDR_ERROR_STR);
704     ESP_RETURN_ON_FALSE(item_num > 0, ESP_ERR_INVALID_ARG, TAG, RMT_DRIVER_LENGTH_ERROR_STR);
705 
706     /*Each block has 64 x 32 bits of data*/
707     uint8_t mem_cnt = rmt_ll_tx_get_mem_blocks(rmt_contex.hal.regs, channel);
708     ESP_RETURN_ON_FALSE(mem_cnt * RMT_MEM_ITEM_NUM >= item_num, ESP_ERR_INVALID_ARG, TAG, RMT_WR_MEM_OVF_ERROR_STR);
709     rmt_fill_memory(channel, item, item_num, mem_offset);
710     return ESP_OK;
711 }
712 
rmt_isr_register(void (* fn)(void *),void * arg,int intr_alloc_flags,rmt_isr_handle_t * handle)713 esp_err_t rmt_isr_register(void (*fn)(void *), void *arg, int intr_alloc_flags, rmt_isr_handle_t *handle)
714 {
715     ESP_RETURN_ON_FALSE(fn, ESP_ERR_INVALID_ARG, TAG, RMT_ADDR_ERROR_STR);
716     ESP_RETURN_ON_FALSE(rmt_contex.rmt_driver_channels == 0, ESP_FAIL, TAG, "RMT driver installed, can not install generic ISR handler");
717 
718     return esp_intr_alloc(rmt_periph_signals.groups[0].irq, intr_alloc_flags, fn, arg, handle);
719 }
720 
rmt_isr_deregister(rmt_isr_handle_t handle)721 esp_err_t rmt_isr_deregister(rmt_isr_handle_t handle)
722 {
723     return esp_intr_free(handle);
724 }
725 
rmt_rx_get_mem_len_in_isr(rmt_channel_t channel)726 static int IRAM_ATTR rmt_rx_get_mem_len_in_isr(rmt_channel_t channel)
727 {
728     int block_num = rmt_ll_rx_get_mem_blocks(rmt_contex.hal.regs, channel);
729     int item_block_len = block_num * RMT_MEM_ITEM_NUM;
730     volatile rmt_item32_t *data = (rmt_item32_t *)RMTMEM.chan[RMT_ENCODE_RX_CHANNEL(channel)].data32;
731     int idx;
732     for (idx = 0; idx < item_block_len; idx++) {
733         if (data[idx].duration0 == 0) {
734             return idx;
735         } else if (data[idx].duration1 == 0) {
736             return idx + 1;
737         }
738     }
739     return idx;
740 }
741 
rmt_driver_isr_default(void * arg)742 static void IRAM_ATTR rmt_driver_isr_default(void *arg)
743 {
744     uint32_t status = 0;
745     rmt_item32_t volatile *addr = NULL;
746     uint8_t channel = 0;
747     rmt_hal_context_t *hal = (rmt_hal_context_t *)arg;
748     portBASE_TYPE HPTaskAwoken = pdFALSE;
749 
750     // Tx end interrupt
751     status = rmt_ll_get_tx_end_interrupt_status(hal->regs);
752     while (status) {
753         channel = __builtin_ffs(status) - 1;
754         status &= ~(1 << channel);
755         rmt_obj_t *p_rmt = p_rmt_obj[channel];
756         if (p_rmt) {
757             xSemaphoreGiveFromISR(p_rmt->tx_sem, &HPTaskAwoken);
758             rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
759             p_rmt->tx_data = NULL;
760             p_rmt->tx_len_rem = 0;
761             p_rmt->tx_offset = 0;
762             p_rmt->tx_sub_len = 0;
763             p_rmt->sample_cur = NULL;
764             p_rmt->translator = false;
765             if (rmt_contex.rmt_tx_end_callback.function) {
766                 rmt_contex.rmt_tx_end_callback.function(channel, rmt_contex.rmt_tx_end_callback.arg);
767             }
768         }
769         rmt_ll_clear_tx_end_interrupt(hal->regs, channel);
770     }
771 
772     // Tx thres interrupt
773     status = rmt_ll_get_tx_thres_interrupt_status(hal->regs);
774     while (status) {
775         channel = __builtin_ffs(status) - 1;
776         status &= ~(1 << channel);
777         rmt_obj_t *p_rmt = p_rmt_obj[channel];
778         if (p_rmt) {
779             if (p_rmt->translator) {
780                 if (p_rmt->sample_size_remain > 0) {
781                     size_t translated_size = 0;
782                     p_rmt->sample_to_rmt((void *)p_rmt->sample_cur,
783                                          p_rmt->tx_buf,
784                                          p_rmt->sample_size_remain,
785                                          p_rmt->tx_sub_len,
786                                          &translated_size,
787                                          &p_rmt->tx_len_rem);
788                     p_rmt->sample_size_remain -= translated_size;
789                     p_rmt->sample_cur += translated_size;
790                     p_rmt->tx_data = p_rmt->tx_buf;
791                 } else {
792                     p_rmt->sample_cur = NULL;
793                     p_rmt->translator = false;
794                 }
795             }
796             const rmt_item32_t *pdata = p_rmt->tx_data;
797             size_t len_rem = p_rmt->tx_len_rem;
798             if (len_rem >= p_rmt->tx_sub_len) {
799                 rmt_fill_memory(channel, pdata, p_rmt->tx_sub_len, p_rmt->tx_offset);
800                 p_rmt->tx_data += p_rmt->tx_sub_len;
801                 p_rmt->tx_len_rem -= p_rmt->tx_sub_len;
802             } else if (len_rem == 0) {
803                 rmt_item32_t stop_data = {0};
804                 rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, p_rmt->tx_offset);
805             } else {
806                 rmt_fill_memory(channel, pdata, len_rem, p_rmt->tx_offset);
807                 rmt_item32_t stop_data = {0};
808                 rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, p_rmt->tx_offset + len_rem);
809                 p_rmt->tx_data += len_rem;
810                 p_rmt->tx_len_rem -= len_rem;
811             }
812             if (p_rmt->tx_offset == 0) {
813                 p_rmt->tx_offset = p_rmt->tx_sub_len;
814             } else {
815                 p_rmt->tx_offset = 0;
816             }
817         }
818         rmt_ll_clear_tx_thres_interrupt(hal->regs, channel);
819     }
820 
821     // Rx end interrupt
822     status = rmt_ll_get_rx_end_interrupt_status(hal->regs);
823     while (status) {
824         channel = __builtin_ffs(status) - 1;
825         status &= ~(1 << channel);
826         rmt_obj_t *p_rmt = p_rmt_obj[RMT_ENCODE_RX_CHANNEL(channel)];
827         if (p_rmt) {
828             rmt_ll_rx_enable(rmt_contex.hal.regs, channel, false);
829             int item_len = rmt_rx_get_mem_len_in_isr(channel);
830             rmt_ll_rx_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_SW);
831             if (p_rmt->rx_buf) {
832                 addr = RMTMEM.chan[RMT_ENCODE_RX_CHANNEL(channel)].data32;
833 #if SOC_RMT_SUPPORT_RX_PINGPONG
834                 if (item_len > p_rmt->rx_item_start_idx) {
835                     item_len = item_len - p_rmt->rx_item_start_idx;
836                 }
837                 memcpy((void *)(p_rmt->rx_item_buf + p_rmt->rx_item_len), (void *)(addr + p_rmt->rx_item_start_idx), item_len * 4);
838                 p_rmt->rx_item_len += item_len;
839                 BaseType_t res = xRingbufferSendFromISR(p_rmt->rx_buf, (void *)(p_rmt->rx_item_buf), p_rmt->rx_item_len * 4, &HPTaskAwoken);
840 #else
841                 BaseType_t res = xRingbufferSendFromISR(p_rmt->rx_buf, (void *)addr, item_len * 4, &HPTaskAwoken);
842 #endif
843                 if (res == pdFALSE) {
844                     ESP_EARLY_LOGE(TAG, "RMT RX BUFFER FULL");
845                 }
846             } else {
847                 ESP_EARLY_LOGE(TAG, "RMT RX BUFFER ERROR");
848             }
849 
850 #if SOC_RMT_SUPPORT_RX_PINGPONG
851             p_rmt->rx_item_start_idx = 0;
852             p_rmt->rx_item_len = 0;
853             memset((void *)p_rmt->rx_item_buf, 0, p_rmt->rx_item_buf_size);
854 #endif
855             rmt_ll_rx_reset_pointer(rmt_contex.hal.regs, channel);
856             rmt_ll_rx_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_HW);
857             rmt_ll_rx_enable(rmt_contex.hal.regs, channel, true);
858         }
859         rmt_ll_clear_rx_end_interrupt(hal->regs, channel);
860     }
861 
862 #if SOC_RMT_SUPPORT_RX_PINGPONG
863     // Rx thres interrupt
864     status = rmt_ll_get_rx_thres_interrupt_status(hal->regs);
865     while (status) {
866         channel = __builtin_ffs(status) - 1;
867         status &= ~(1 << channel);
868         rmt_obj_t *p_rmt = p_rmt_obj[RMT_ENCODE_RX_CHANNEL(channel)];
869         int mem_item_size = rmt_ll_rx_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
870         int rx_thres_lim = rmt_ll_rx_get_limit(rmt_contex.hal.regs, channel);
871         int item_len = (p_rmt->rx_item_start_idx == 0) ? rx_thres_lim : (mem_item_size - rx_thres_lim);
872         if ((p_rmt->rx_item_len + item_len) < (p_rmt->rx_item_buf_size / 4)) {
873             rmt_ll_rx_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_SW);
874             memcpy((void *)(p_rmt->rx_item_buf + p_rmt->rx_item_len), (void *)(RMTMEM.chan[RMT_ENCODE_RX_CHANNEL(channel)].data32 + p_rmt->rx_item_start_idx), item_len * 4);
875             rmt_ll_rx_set_mem_owner(rmt_contex.hal.regs, channel, RMT_MEM_OWNER_HW);
876             p_rmt->rx_item_len += item_len;
877             p_rmt->rx_item_start_idx += item_len;
878             if (p_rmt->rx_item_start_idx >= mem_item_size) {
879                 p_rmt->rx_item_start_idx = 0;
880             }
881         } else {
882             ESP_EARLY_LOGE(TAG, "---RX buffer too small: %d", sizeof(p_rmt->rx_item_buf));
883         }
884         rmt_ll_clear_rx_thres_interrupt(hal->regs, channel);
885     }
886 #endif
887 
888 #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
889     // loop count interrupt
890     status = rmt_ll_get_tx_loop_interrupt_status(hal->regs);
891     while (status) {
892         channel = __builtin_ffs(status) - 1;
893         status &= ~(1 << channel);
894         rmt_obj_t *p_rmt = p_rmt_obj[channel];
895         if (p_rmt) {
896             if (p_rmt->loop_autostop) {
897 #ifndef SOC_RMT_SUPPORT_TX_LOOP_AUTOSTOP
898                 // hardware doesn't support automatically stop output so driver should stop output here (possibility already overshotted several us)
899                 rmt_ll_tx_stop(rmt_contex.hal.regs, channel);
900                 rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
901 #endif
902             }
903             xSemaphoreGiveFromISR(p_rmt->tx_sem, &HPTaskAwoken);
904             if (rmt_contex.rmt_tx_end_callback.function) {
905                 rmt_contex.rmt_tx_end_callback.function(channel,  rmt_contex.rmt_tx_end_callback.arg);
906             }
907         }
908         rmt_ll_clear_tx_loop_interrupt(hal->regs, channel);
909     }
910 #endif
911 
912     // RX Err interrupt
913     status = rmt_ll_get_rx_err_interrupt_status(hal->regs);
914     while (status) {
915         channel = __builtin_ffs(status) - 1;
916         status &= ~(1 << channel);
917         rmt_obj_t *p_rmt = p_rmt_obj[RMT_ENCODE_RX_CHANNEL(channel)];
918         if (p_rmt) {
919             // Reset the receiver's write/read addresses to prevent endless err interrupts.
920             rmt_ll_rx_reset_pointer(rmt_contex.hal.regs, channel);
921             ESP_EARLY_LOGD(TAG, "RMT RX channel %d error", channel);
922             ESP_EARLY_LOGD(TAG, "status: 0x%08x", rmt_ll_rx_get_channel_status(rmt_contex.hal.regs, channel));
923         }
924         rmt_ll_clear_rx_err_interrupt(hal->regs, channel);
925     }
926 
927     // TX Err interrupt
928     status = rmt_ll_get_tx_err_interrupt_status(hal->regs);
929     while (status) {
930         channel = __builtin_ffs(status) - 1;
931         status &= ~(1 << channel);
932         rmt_obj_t *p_rmt = p_rmt_obj[channel];
933         if (p_rmt) {
934             // Reset the transmitter's write/read addresses to prevent endless err interrupts.
935             rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
936             ESP_EARLY_LOGD(TAG, "RMT TX channel %d error", channel);
937             ESP_EARLY_LOGD(TAG, "status: 0x%08x", rmt_ll_tx_get_channel_status(rmt_contex.hal.regs, channel));
938         }
939         rmt_ll_clear_tx_err_interrupt(hal->regs, channel);
940     }
941 
942     if (HPTaskAwoken == pdTRUE) {
943         portYIELD_FROM_ISR();
944     }
945 }
946 
rmt_driver_uninstall(rmt_channel_t channel)947 esp_err_t rmt_driver_uninstall(rmt_channel_t channel)
948 {
949     esp_err_t err = ESP_OK;
950     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
951     ESP_RETURN_ON_FALSE(rmt_contex.rmt_driver_channels & BIT(channel), ESP_ERR_INVALID_STATE, TAG, "No RMT driver for this channel");
952     if (p_rmt_obj[channel] == NULL) {
953         return ESP_OK;
954     }
955     //Avoid blocking here(when the interrupt is disabled and do not wait tx done).
956     if (p_rmt_obj[channel]->wait_done) {
957         xSemaphoreTake(p_rmt_obj[channel]->tx_sem, portMAX_DELAY);
958     }
959 
960     RMT_ENTER_CRITICAL();
961     // check channel's working mode
962     if (p_rmt_obj[channel]->rx_buf) {
963         rmt_ll_enable_rx_end_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), 0);
964         rmt_ll_enable_rx_err_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), 0);
965 #if SOC_RMT_SUPPORT_RX_PINGPONG
966         rmt_ll_enable_rx_thres_interrupt(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel), 0);
967 #endif
968     } else {
969         rmt_ll_enable_tx_end_interrupt(rmt_contex.hal.regs, channel, 0);
970         rmt_ll_enable_tx_err_interrupt(rmt_contex.hal.regs, channel, 0);
971         rmt_ll_enable_tx_thres_interrupt(rmt_contex.hal.regs, channel, false);
972     }
973     RMT_EXIT_CRITICAL();
974 
975     _lock_acquire_recursive(&(rmt_contex.rmt_driver_isr_lock));
976     rmt_contex.rmt_driver_channels &= ~BIT(channel);
977     if (rmt_contex.rmt_driver_channels == 0) {
978         rmt_module_disable();
979         // all channels have driver disabled
980         err = rmt_isr_deregister(rmt_contex.rmt_driver_intr_handle);
981         rmt_contex.rmt_driver_intr_handle = NULL;
982     }
983     _lock_release_recursive(&(rmt_contex.rmt_driver_isr_lock));
984 
985     if (err != ESP_OK) {
986         return err;
987     }
988 
989     if (p_rmt_obj[channel]->tx_sem) {
990         vSemaphoreDelete(p_rmt_obj[channel]->tx_sem);
991         p_rmt_obj[channel]->tx_sem = NULL;
992     }
993     if (p_rmt_obj[channel]->rx_buf) {
994         vRingbufferDelete(p_rmt_obj[channel]->rx_buf);
995         p_rmt_obj[channel]->rx_buf = NULL;
996     }
997     if (p_rmt_obj[channel]->tx_buf) {
998         free(p_rmt_obj[channel]->tx_buf);
999         p_rmt_obj[channel]->tx_buf = NULL;
1000     }
1001     if (p_rmt_obj[channel]->sample_to_rmt) {
1002         p_rmt_obj[channel]->sample_to_rmt = NULL;
1003     }
1004 #if SOC_RMT_SUPPORT_RX_PINGPONG
1005     if (p_rmt_obj[channel]->rx_item_buf) {
1006         free(p_rmt_obj[channel]->rx_item_buf);
1007         p_rmt_obj[channel]->rx_item_buf = NULL;
1008         p_rmt_obj[channel]->rx_item_buf_size = 0;
1009     }
1010 #endif
1011 
1012     free(p_rmt_obj[channel]);
1013     p_rmt_obj[channel] = NULL;
1014     return ESP_OK;
1015 }
1016 
rmt_driver_install(rmt_channel_t channel,size_t rx_buf_size,int intr_alloc_flags)1017 esp_err_t rmt_driver_install(rmt_channel_t channel, size_t rx_buf_size, int intr_alloc_flags)
1018 {
1019     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1020     ESP_RETURN_ON_FALSE((rmt_contex.rmt_driver_channels & BIT(channel)) == 0, ESP_ERR_INVALID_STATE, TAG, "RMT driver already installed for channel");
1021 
1022     esp_err_t err = ESP_OK;
1023 
1024     if (p_rmt_obj[channel]) {
1025         ESP_LOGD(TAG, "RMT driver already installed");
1026         return ESP_ERR_INVALID_STATE;
1027     }
1028 
1029 #if !CONFIG_SPIRAM_USE_MALLOC
1030     p_rmt_obj[channel] = calloc(1, sizeof(rmt_obj_t));
1031 #else
1032     if (!(intr_alloc_flags & ESP_INTR_FLAG_IRAM)) {
1033         p_rmt_obj[channel] = calloc(1, sizeof(rmt_obj_t));
1034     } else {
1035         p_rmt_obj[channel] = heap_caps_calloc(1, sizeof(rmt_obj_t), MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
1036     }
1037 #endif
1038 
1039     if (p_rmt_obj[channel] == NULL) {
1040         ESP_LOGE(TAG, "RMT driver malloc error");
1041         return ESP_ERR_NO_MEM;
1042     }
1043 
1044     p_rmt_obj[channel]->tx_len_rem = 0;
1045     p_rmt_obj[channel]->tx_data = NULL;
1046     p_rmt_obj[channel]->channel = channel;
1047     p_rmt_obj[channel]->tx_offset = 0;
1048     p_rmt_obj[channel]->tx_sub_len = 0;
1049     p_rmt_obj[channel]->wait_done = false;
1050     p_rmt_obj[channel]->loop_autostop = false;
1051     p_rmt_obj[channel]->translator = false;
1052     p_rmt_obj[channel]->sample_to_rmt = NULL;
1053     if (p_rmt_obj[channel]->tx_sem == NULL) {
1054 #if !CONFIG_SPIRAM_USE_MALLOC
1055         p_rmt_obj[channel]->tx_sem = xSemaphoreCreateBinary();
1056 #else
1057         p_rmt_obj[channel]->intr_alloc_flags = intr_alloc_flags;
1058         if (!(intr_alloc_flags & ESP_INTR_FLAG_IRAM)) {
1059             p_rmt_obj[channel]->tx_sem = xSemaphoreCreateBinary();
1060         } else {
1061             p_rmt_obj[channel]->tx_sem = xSemaphoreCreateBinaryStatic(&p_rmt_obj[channel]->tx_sem_buffer);
1062         }
1063 #endif
1064         xSemaphoreGive(p_rmt_obj[channel]->tx_sem);
1065     }
1066     if (p_rmt_obj[channel]->rx_buf == NULL && rx_buf_size > 0) {
1067         p_rmt_obj[channel]->rx_buf = xRingbufferCreate(rx_buf_size, RINGBUF_TYPE_NOSPLIT);
1068     }
1069 
1070 #if SOC_RMT_SUPPORT_RX_PINGPONG
1071     if (p_rmt_obj[channel]->rx_item_buf == NULL && rx_buf_size > 0) {
1072 #if !CONFIG_SPIRAM_USE_MALLOC
1073         p_rmt_obj[channel]->rx_item_buf = calloc(1, rx_buf_size);
1074 #else
1075         if (!(p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM)) {
1076             p_rmt_obj[channel]->rx_item_buf = calloc(1, rx_buf_size);
1077         } else {
1078             p_rmt_obj[channel]->rx_item_buf = heap_caps_calloc(1, rx_buf_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
1079         }
1080 #endif
1081         if (p_rmt_obj[channel]->rx_item_buf == NULL) {
1082             ESP_LOGE(TAG, "RMT malloc fail");
1083             return ESP_FAIL;
1084         }
1085         p_rmt_obj[channel]->rx_item_buf_size = rx_buf_size;
1086     }
1087 #endif
1088 
1089     _lock_acquire_recursive(&(rmt_contex.rmt_driver_isr_lock));
1090 
1091     if (rmt_contex.rmt_driver_channels == 0) {
1092         // first RMT channel using driver
1093         err = rmt_isr_register(rmt_driver_isr_default, &rmt_contex.hal, intr_alloc_flags, &(rmt_contex.rmt_driver_intr_handle));
1094     }
1095     if (err == ESP_OK) {
1096         rmt_contex.rmt_driver_channels |= BIT(channel);
1097     }
1098     _lock_release_recursive(&(rmt_contex.rmt_driver_isr_lock));
1099 
1100     rmt_module_enable();
1101 
1102     if (RMT_IS_RX_CHANNEL(channel)) {
1103         rmt_hal_rx_channel_reset(&rmt_contex.hal, RMT_DECODE_RX_CHANNEL(channel));
1104     } else {
1105         rmt_hal_tx_channel_reset(&rmt_contex.hal, channel);
1106     }
1107 
1108     return err;
1109 }
1110 
rmt_write_items(rmt_channel_t channel,const rmt_item32_t * rmt_item,int item_num,bool wait_tx_done)1111 esp_err_t rmt_write_items(rmt_channel_t channel, const rmt_item32_t *rmt_item, int item_num, bool wait_tx_done)
1112 {
1113     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1114     ESP_RETURN_ON_FALSE(p_rmt_obj[channel], ESP_FAIL, TAG, RMT_DRIVER_ERROR_STR);
1115     ESP_RETURN_ON_FALSE(rmt_item, ESP_FAIL, TAG, RMT_ADDR_ERROR_STR);
1116     ESP_RETURN_ON_FALSE(item_num > 0, ESP_ERR_INVALID_ARG, TAG, RMT_DRIVER_LENGTH_ERROR_STR);
1117 #if CONFIG_SPIRAM_USE_MALLOC
1118     if (p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM) {
1119         if (!esp_ptr_internal(rmt_item)) {
1120             ESP_LOGE(TAG, RMT_PSRAM_BUFFER_WARN_STR);
1121             return ESP_ERR_INVALID_ARG;
1122         }
1123     }
1124 #endif
1125     rmt_obj_t *p_rmt = p_rmt_obj[channel];
1126     int block_num = rmt_ll_tx_get_mem_blocks(rmt_contex.hal.regs, channel);
1127     int item_block_len = block_num * RMT_MEM_ITEM_NUM;
1128     int item_sub_len = block_num * RMT_MEM_ITEM_NUM / 2;
1129     int len_rem = item_num;
1130     xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1131     // fill the memory block first
1132     if (item_num >= item_block_len) {
1133         rmt_fill_memory(channel, rmt_item, item_block_len, 0);
1134         len_rem -= item_block_len;
1135         rmt_set_tx_loop_mode(channel, false);
1136         rmt_set_tx_thr_intr_en(channel, 1, item_sub_len);
1137         p_rmt->tx_data = rmt_item + item_block_len;
1138         p_rmt->tx_len_rem = len_rem;
1139         p_rmt->tx_offset = 0;
1140         p_rmt->tx_sub_len = item_sub_len;
1141     } else {
1142         rmt_fill_memory(channel, rmt_item, len_rem, 0);
1143         rmt_item32_t stop_data = {0};
1144         rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, len_rem);
1145         p_rmt->tx_len_rem = 0;
1146     }
1147     rmt_tx_start(channel, true);
1148     p_rmt->wait_done = wait_tx_done;
1149     if (wait_tx_done) {
1150         // wait loop done
1151         if (rmt_ll_is_tx_loop_enabled(rmt_contex.hal.regs, channel)) {
1152 #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
1153             xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1154             xSemaphoreGive(p_rmt->tx_sem);
1155 #endif
1156         } else {
1157             // wait tx end
1158             xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1159             xSemaphoreGive(p_rmt->tx_sem);
1160         }
1161     }
1162     return ESP_OK;
1163 }
1164 
rmt_wait_tx_done(rmt_channel_t channel,TickType_t wait_time)1165 esp_err_t rmt_wait_tx_done(rmt_channel_t channel, TickType_t wait_time)
1166 {
1167     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1168     ESP_RETURN_ON_FALSE(p_rmt_obj[channel], ESP_FAIL, TAG, RMT_DRIVER_ERROR_STR);
1169     if (xSemaphoreTake(p_rmt_obj[channel]->tx_sem, wait_time) == pdTRUE) {
1170         p_rmt_obj[channel]->wait_done = false;
1171         xSemaphoreGive(p_rmt_obj[channel]->tx_sem);
1172         return ESP_OK;
1173     } else {
1174         if (wait_time != 0) {
1175             // Don't emit error message if just polling.
1176             ESP_LOGE(TAG, "Timeout on wait_tx_done");
1177         }
1178         return ESP_ERR_TIMEOUT;
1179     }
1180 }
1181 
rmt_get_ringbuf_handle(rmt_channel_t channel,RingbufHandle_t * buf_handle)1182 esp_err_t rmt_get_ringbuf_handle(rmt_channel_t channel, RingbufHandle_t *buf_handle)
1183 {
1184     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1185     ESP_RETURN_ON_FALSE(p_rmt_obj[channel], ESP_FAIL, TAG, RMT_DRIVER_ERROR_STR);
1186     ESP_RETURN_ON_FALSE(buf_handle, ESP_ERR_INVALID_ARG, TAG, RMT_ADDR_ERROR_STR);
1187     *buf_handle = p_rmt_obj[channel]->rx_buf;
1188     return ESP_OK;
1189 }
1190 
rmt_register_tx_end_callback(rmt_tx_end_fn_t function,void * arg)1191 rmt_tx_end_callback_t rmt_register_tx_end_callback(rmt_tx_end_fn_t function, void *arg)
1192 {
1193     rmt_tx_end_callback_t previous = rmt_contex.rmt_tx_end_callback;
1194     rmt_contex.rmt_tx_end_callback.function = function;
1195     rmt_contex.rmt_tx_end_callback.arg = arg;
1196     return previous;
1197 }
1198 
rmt_translator_init(rmt_channel_t channel,sample_to_rmt_t fn)1199 esp_err_t rmt_translator_init(rmt_channel_t channel, sample_to_rmt_t fn)
1200 {
1201     ESP_RETURN_ON_FALSE(fn, ESP_ERR_INVALID_ARG, TAG, RMT_TRANSLATOR_NULL_STR);
1202     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1203     ESP_RETURN_ON_FALSE(p_rmt_obj[channel], ESP_FAIL, TAG, RMT_DRIVER_ERROR_STR);
1204     const uint32_t block_size = rmt_ll_tx_get_mem_blocks(rmt_contex.hal.regs, channel) *
1205                                 RMT_MEM_ITEM_NUM * sizeof(rmt_item32_t);
1206     if (p_rmt_obj[channel]->tx_buf == NULL) {
1207 #if !CONFIG_SPIRAM_USE_MALLOC
1208         p_rmt_obj[channel]->tx_buf = (rmt_item32_t *)malloc(block_size);
1209 #else
1210         if (p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM) {
1211             p_rmt_obj[channel]->tx_buf = (rmt_item32_t *)malloc(block_size);
1212         } else {
1213             p_rmt_obj[channel]->tx_buf = (rmt_item32_t *)heap_caps_calloc(1, block_size, MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT);
1214         }
1215 #endif
1216         if (p_rmt_obj[channel]->tx_buf == NULL) {
1217             ESP_LOGE(TAG, "RMT translator buffer create fail");
1218             return ESP_FAIL;
1219         }
1220     }
1221     p_rmt_obj[channel]->sample_to_rmt = fn;
1222     p_rmt_obj[channel]->tx_context = NULL;
1223     p_rmt_obj[channel]->sample_size_remain = 0;
1224     p_rmt_obj[channel]->sample_cur = NULL;
1225     ESP_LOGD(TAG, "RMT translator init done");
1226     return ESP_OK;
1227 }
1228 
rmt_translator_set_context(rmt_channel_t channel,void * context)1229 esp_err_t rmt_translator_set_context(rmt_channel_t channel, void *context)
1230 {
1231     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1232     ESP_RETURN_ON_FALSE(p_rmt_obj[channel], ESP_FAIL, TAG, RMT_DRIVER_ERROR_STR);
1233 
1234     p_rmt_obj[channel]->tx_context = context;
1235     return ESP_OK;
1236 }
1237 
rmt_translator_get_context(const size_t * item_num,void ** context)1238 esp_err_t rmt_translator_get_context(const size_t *item_num, void **context)
1239 {
1240     ESP_RETURN_ON_FALSE(item_num && context, ESP_ERR_INVALID_ARG, TAG, "invalid arguments");
1241 
1242     // the address of tx_len_rem is directlly passed to the callback,
1243     // so it's possible to get the object address from that
1244     rmt_obj_t *obj = __containerof(item_num, rmt_obj_t, tx_len_rem);
1245     *context = obj->tx_context;
1246 
1247     return ESP_OK;
1248 }
1249 
rmt_write_sample(rmt_channel_t channel,const uint8_t * src,size_t src_size,bool wait_tx_done)1250 esp_err_t rmt_write_sample(rmt_channel_t channel, const uint8_t *src, size_t src_size, bool wait_tx_done)
1251 {
1252     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1253     ESP_RETURN_ON_FALSE(p_rmt_obj[channel], ESP_FAIL, TAG, RMT_DRIVER_ERROR_STR);
1254     ESP_RETURN_ON_FALSE(p_rmt_obj[channel]->sample_to_rmt, ESP_FAIL, TAG, RMT_TRANSLATOR_UNINIT_STR);
1255 #if CONFIG_SPIRAM_USE_MALLOC
1256     if (p_rmt_obj[channel]->intr_alloc_flags & ESP_INTR_FLAG_IRAM) {
1257         if (!esp_ptr_internal(src)) {
1258             ESP_LOGE(TAG, RMT_PSRAM_BUFFER_WARN_STR);
1259             return ESP_ERR_INVALID_ARG;
1260         }
1261     }
1262 #endif
1263     size_t translated_size = 0;
1264     rmt_obj_t *p_rmt = p_rmt_obj[channel];
1265     const uint32_t item_block_len = rmt_ll_tx_get_mem_blocks(rmt_contex.hal.regs, channel) * RMT_MEM_ITEM_NUM;
1266     const uint32_t item_sub_len = item_block_len / 2;
1267     xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1268     p_rmt->sample_to_rmt((void *)src, p_rmt->tx_buf, src_size, item_block_len, &translated_size, &p_rmt->tx_len_rem);
1269     p_rmt->sample_size_remain = src_size - translated_size;
1270     p_rmt->sample_cur = src + translated_size;
1271     rmt_fill_memory(channel, p_rmt->tx_buf, p_rmt->tx_len_rem, 0);
1272     if (p_rmt->tx_len_rem == item_block_len) {
1273         rmt_set_tx_thr_intr_en(channel, 1, item_sub_len);
1274         p_rmt->tx_data = p_rmt->tx_buf;
1275         p_rmt->tx_offset = 0;
1276         p_rmt->tx_sub_len = item_sub_len;
1277         p_rmt->translator = true;
1278     } else {
1279         rmt_item32_t stop_data = {0};
1280         rmt_ll_write_memory(rmt_contex.hal.mem, channel, &stop_data, 1, p_rmt->tx_len_rem);
1281         p_rmt->tx_len_rem = 0;
1282         p_rmt->sample_cur = NULL;
1283         p_rmt->translator = false;
1284     }
1285     rmt_tx_start(channel, true);
1286     p_rmt->wait_done = wait_tx_done;
1287     if (wait_tx_done) {
1288         xSemaphoreTake(p_rmt->tx_sem, portMAX_DELAY);
1289         xSemaphoreGive(p_rmt->tx_sem);
1290     }
1291     return ESP_OK;
1292 }
1293 
rmt_get_channel_status(rmt_channel_status_result_t * channel_status)1294 esp_err_t rmt_get_channel_status(rmt_channel_status_result_t *channel_status)
1295 {
1296     ESP_RETURN_ON_FALSE(channel_status, ESP_ERR_INVALID_ARG, TAG, RMT_PARAM_ERR_STR);
1297     for (int i = 0; i < RMT_CHANNEL_MAX; i++) {
1298         channel_status->status[i] = RMT_CHANNEL_UNINIT;
1299         if (p_rmt_obj[i]) {
1300             if (p_rmt_obj[i]->tx_sem) {
1301                 if (xSemaphoreTake(p_rmt_obj[i]->tx_sem, (TickType_t)0) == pdTRUE) {
1302                     channel_status->status[i] = RMT_CHANNEL_IDLE;
1303                     xSemaphoreGive(p_rmt_obj[i]->tx_sem);
1304                 } else {
1305                     channel_status->status[i] = RMT_CHANNEL_BUSY;
1306                 }
1307             }
1308         }
1309     }
1310     return ESP_OK;
1311 }
1312 
rmt_get_counter_clock(rmt_channel_t channel,uint32_t * clock_hz)1313 esp_err_t rmt_get_counter_clock(rmt_channel_t channel, uint32_t *clock_hz)
1314 {
1315     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1316     ESP_RETURN_ON_FALSE(clock_hz, ESP_ERR_INVALID_ARG, TAG, "parameter clock_hz can't be null");
1317     RMT_ENTER_CRITICAL();
1318     uint32_t rmt_source_clk_hz = 0;
1319 #if SOC_RMT_CHANNEL_CLK_INDEPENDENT
1320     rmt_source_clk_hz = s_rmt_source_clock_hz[channel];
1321 #else
1322     rmt_source_clk_hz = s_rmt_source_clock_hz;
1323 #endif
1324     if (RMT_IS_RX_CHANNEL(channel)) {
1325         *clock_hz = rmt_source_clk_hz / rmt_ll_rx_get_channel_clock_div(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
1326     } else {
1327         *clock_hz = rmt_source_clk_hz / rmt_ll_tx_get_channel_clock_div(rmt_contex.hal.regs, channel);
1328     }
1329     RMT_EXIT_CRITICAL();
1330     return ESP_OK;
1331 }
1332 
1333 #if SOC_RMT_SUPPORT_TX_SYNCHRO
rmt_add_channel_to_group(rmt_channel_t channel)1334 esp_err_t rmt_add_channel_to_group(rmt_channel_t channel)
1335 {
1336     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1337     RMT_ENTER_CRITICAL();
1338     rmt_ll_tx_enable_sync(rmt_contex.hal.regs, true);
1339     rmt_contex.synchro_channel_mask |= (1 << channel);
1340     rmt_ll_tx_add_to_sync_group(rmt_contex.hal.regs, channel);
1341     rmt_ll_tx_reset_channels_clock_div(rmt_contex.hal.regs, rmt_contex.synchro_channel_mask);
1342     RMT_EXIT_CRITICAL();
1343     return ESP_OK;
1344 }
1345 
rmt_remove_channel_from_group(rmt_channel_t channel)1346 esp_err_t rmt_remove_channel_from_group(rmt_channel_t channel)
1347 {
1348     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1349     RMT_ENTER_CRITICAL();
1350     rmt_contex.synchro_channel_mask &= ~(1 << channel);
1351     rmt_ll_tx_remove_from_sync_group(rmt_contex.hal.regs, channel);
1352     if (rmt_contex.synchro_channel_mask == 0) {
1353         rmt_ll_tx_enable_sync(rmt_contex.hal.regs, false);
1354     }
1355     RMT_EXIT_CRITICAL();
1356     return ESP_OK;
1357 }
1358 #endif
1359 
rmt_memory_rw_rst(rmt_channel_t channel)1360 esp_err_t rmt_memory_rw_rst(rmt_channel_t channel)
1361 {
1362     ESP_RETURN_ON_FALSE(channel < RMT_CHANNEL_MAX, ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1363     RMT_ENTER_CRITICAL();
1364     if (RMT_IS_RX_CHANNEL(channel)) {
1365         rmt_ll_rx_reset_pointer(rmt_contex.hal.regs, RMT_DECODE_RX_CHANNEL(channel));
1366     } else {
1367         rmt_ll_tx_reset_pointer(rmt_contex.hal.regs, channel);
1368     }
1369     RMT_EXIT_CRITICAL();
1370     return ESP_OK;
1371 }
1372 
1373 #if SOC_RMT_SUPPORT_TX_LOOP_COUNT
rmt_set_tx_loop_count(rmt_channel_t channel,uint32_t count)1374 esp_err_t rmt_set_tx_loop_count(rmt_channel_t channel, uint32_t count)
1375 {
1376     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1377     ESP_RETURN_ON_FALSE(count <= RMT_LL_MAX_LOOP_COUNT, ESP_ERR_INVALID_ARG, TAG, "Invalid count value");
1378     RMT_ENTER_CRITICAL();
1379     rmt_ll_tx_set_loop_count(rmt_contex.hal.regs, channel, count);
1380     RMT_EXIT_CRITICAL();
1381     return ESP_OK;
1382 }
1383 
rmt_enable_tx_loop_autostop(rmt_channel_t channel,bool en)1384 esp_err_t rmt_enable_tx_loop_autostop(rmt_channel_t channel, bool en)
1385 {
1386     ESP_RETURN_ON_FALSE(RMT_IS_TX_CHANNEL(channel), ESP_ERR_INVALID_ARG, TAG, RMT_CHANNEL_ERROR_STR);
1387     p_rmt_obj[channel]->loop_autostop = en;
1388 #if SOC_RMT_SUPPORT_TX_LOOP_AUTOSTOP
1389     RMT_ENTER_CRITICAL();
1390     rmt_ll_tx_enable_loop_autostop(rmt_contex.hal.regs, channel, en);
1391     RMT_EXIT_CRITICAL();
1392 #endif
1393     return ESP_OK;
1394 }
1395 #endif
1396