1 /*
2  * SPDX-FileCopyrightText: 2020-2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #pragma once
8 
9 #include <stdint.h>
10 #include <stdbool.h>
11 #include <stddef.h>
12 #include "soc/rmt_struct.h"
13 #include "hal/misc.h"
14 
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18 
19 #define RMT_LL_MAX_LOOP_COUNT           (1023)/*!< Max loop count that hardware is supported */
20 
21 #define RMT_LL_HW_BASE  (&RMT)
22 #define RMT_LL_MEM_BASE (&RMTMEM)
23 
24 // Note: TX and RX channel number are all index from zero in the LL driver
25 // i.e. tx_channel belongs to [0,2], and rx_channel belongs to [0,2]
26 
rmt_ll_enable_drive_clock(rmt_dev_t * dev,bool enable)27 static inline void rmt_ll_enable_drive_clock(rmt_dev_t *dev, bool enable)
28 {
29     dev->sys_conf.clk_en = enable; // register clock gating
30     dev->sys_conf.mem_clk_force_on = enable; // memory clock gating
31 }
32 
rmt_ll_power_down_mem(rmt_dev_t * dev,bool enable)33 static inline void rmt_ll_power_down_mem(rmt_dev_t *dev, bool enable)
34 {
35     dev->sys_conf.mem_force_pu = !enable;
36     dev->sys_conf.mem_force_pd = enable;
37 }
38 
rmt_ll_is_mem_power_down(rmt_dev_t * dev)39 static inline bool rmt_ll_is_mem_power_down(rmt_dev_t *dev)
40 {
41     // the RTC domain can also power down RMT memory
42     // so it's probably not enough to detect whether it's powered down or not
43     // mem_force_pd has higher priority than mem_force_pu
44     return (dev->sys_conf.mem_force_pd) || !(dev->sys_conf.mem_force_pu);
45 }
46 
rmt_ll_enable_mem_access(rmt_dev_t * dev,bool enable)47 static inline void rmt_ll_enable_mem_access(rmt_dev_t *dev, bool enable)
48 {
49     dev->sys_conf.fifo_mask = enable;
50 }
51 
rmt_ll_set_group_clock_src(rmt_dev_t * dev,uint32_t channel,uint8_t src,uint8_t div_num,uint8_t div_a,uint8_t div_b)52 static inline void rmt_ll_set_group_clock_src(rmt_dev_t *dev, uint32_t channel, uint8_t src, uint8_t div_num, uint8_t div_a, uint8_t div_b)
53 {
54     // Formula: rmt_sclk = module_clock_src / (1 + div_num + div_a / div_b)
55     dev->sys_conf.sclk_active = 0;
56     dev->sys_conf.sclk_sel = src;
57     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sys_conf, sclk_div_num, div_num);
58     dev->sys_conf.sclk_div_a = div_a;
59     dev->sys_conf.sclk_div_b = div_b;
60     dev->sys_conf.sclk_active = 1;
61 }
62 
rmt_ll_get_group_clock_src(rmt_dev_t * dev,uint32_t channel)63 static inline uint32_t rmt_ll_get_group_clock_src(rmt_dev_t *dev, uint32_t channel)
64 {
65     return dev->sys_conf.sclk_sel;
66 }
67 
rmt_ll_tx_reset_channel_clock_div(rmt_dev_t * dev,uint32_t channel)68 static inline void rmt_ll_tx_reset_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
69 {
70     dev->ref_cnt_rst.val |= (1 << channel);
71 }
72 
rmt_ll_tx_reset_channels_clock_div(rmt_dev_t * dev,uint32_t channel_mask)73 static inline void rmt_ll_tx_reset_channels_clock_div(rmt_dev_t *dev, uint32_t channel_mask)
74 {
75     dev->ref_cnt_rst.val |= channel_mask;
76 }
77 
rmt_ll_rx_reset_channel_clock_div(rmt_dev_t * dev,uint32_t channel)78 static inline void rmt_ll_rx_reset_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
79 {
80     dev->ref_cnt_rst.val |= (1 << (channel + 2));
81 }
82 
rmt_ll_tx_reset_pointer(rmt_dev_t * dev,uint32_t channel)83 static inline void rmt_ll_tx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
84 {
85     dev->tx_conf[channel].mem_rd_rst = 1;
86     dev->tx_conf[channel].mem_rd_rst = 0;
87     dev->tx_conf[channel].mem_rst = 1;
88     dev->tx_conf[channel].mem_rst = 0;
89 }
90 
rmt_ll_rx_reset_pointer(rmt_dev_t * dev,uint32_t channel)91 static inline void rmt_ll_rx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
92 {
93     dev->rx_conf[channel].conf1.mem_wr_rst = 1;
94     dev->rx_conf[channel].conf1.mem_wr_rst = 0;
95     dev->rx_conf[channel].conf1.mem_rst = 1;
96     dev->rx_conf[channel].conf1.mem_rst = 0;
97 }
98 
rmt_ll_tx_start(rmt_dev_t * dev,uint32_t channel)99 static inline void rmt_ll_tx_start(rmt_dev_t *dev, uint32_t channel)
100 {
101     dev->tx_conf[channel].conf_update = 1;
102     dev->tx_conf[channel].tx_start = 1;
103 }
104 
rmt_ll_tx_stop(rmt_dev_t * dev,uint32_t channel)105 static inline void rmt_ll_tx_stop(rmt_dev_t *dev, uint32_t channel)
106 {
107     dev->tx_conf[channel].tx_stop = 1;
108     dev->tx_conf[channel].conf_update = 1;
109 }
110 
rmt_ll_rx_enable(rmt_dev_t * dev,uint32_t channel,bool enable)111 static inline void rmt_ll_rx_enable(rmt_dev_t *dev, uint32_t channel, bool enable)
112 {
113     dev->rx_conf[channel].conf1.rx_en = enable;
114     dev->rx_conf[channel].conf1.conf_update = 1;
115 }
116 
rmt_ll_tx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)117 static inline void rmt_ll_tx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
118 {
119     dev->tx_conf[channel].mem_size = block_num;
120 }
121 
rmt_ll_rx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)122 static inline void rmt_ll_rx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
123 {
124     dev->rx_conf[channel].conf0.mem_size = block_num;
125 }
126 
rmt_ll_tx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)127 static inline uint32_t rmt_ll_tx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
128 {
129     return dev->tx_conf[channel].mem_size;
130 }
131 
rmt_ll_rx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)132 static inline uint32_t rmt_ll_rx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
133 {
134     return dev->rx_conf[channel].conf0.mem_size;
135 }
136 
rmt_ll_tx_set_channel_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)137 static inline void rmt_ll_tx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
138 {
139     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->tx_conf[channel], div_cnt, div);
140 }
141 
rmt_ll_rx_set_channel_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)142 static inline void rmt_ll_rx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
143 {
144     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->rx_conf[channel].conf0, div_cnt, div);
145 }
146 
rmt_ll_tx_get_channel_clock_div(rmt_dev_t * dev,uint32_t channel)147 static inline uint32_t rmt_ll_tx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
148 {
149     return HAL_FORCE_READ_U32_REG_FIELD(dev->tx_conf[channel], div_cnt);
150 }
151 
rmt_ll_rx_get_channel_clock_div(rmt_dev_t * dev,uint32_t channel)152 static inline uint32_t rmt_ll_rx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
153 {
154     return HAL_FORCE_READ_U32_REG_FIELD(dev->rx_conf[channel].conf0, div_cnt);
155 }
156 
rmt_ll_tx_enable_pingpong(rmt_dev_t * dev,uint32_t channel,bool enable)157 static inline void rmt_ll_tx_enable_pingpong(rmt_dev_t *dev, uint32_t channel, bool enable)
158 {
159     dev->tx_conf[channel].mem_tx_wrap_en = enable;
160 }
161 
rmt_ll_rx_set_idle_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)162 static inline void rmt_ll_rx_set_idle_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
163 {
164     dev->rx_conf[channel].conf0.idle_thres = thres;
165 }
166 
rmt_ll_rx_get_idle_thres(rmt_dev_t * dev,uint32_t channel)167 static inline uint32_t rmt_ll_rx_get_idle_thres(rmt_dev_t *dev, uint32_t channel)
168 {
169     return dev->rx_conf[channel].conf0.idle_thres;
170 }
171 
rmt_ll_rx_set_mem_owner(rmt_dev_t * dev,uint32_t channel,uint8_t owner)172 static inline void rmt_ll_rx_set_mem_owner(rmt_dev_t *dev, uint32_t channel, uint8_t owner)
173 {
174     dev->rx_conf[channel].conf1.mem_owner = owner;
175 }
176 
rmt_ll_rx_get_mem_owner(rmt_dev_t * dev,uint32_t channel)177 static inline uint32_t rmt_ll_rx_get_mem_owner(rmt_dev_t *dev, uint32_t channel)
178 {
179     return dev->rx_conf[channel].conf1.mem_owner;
180 }
181 
rmt_ll_tx_enable_loop(rmt_dev_t * dev,uint32_t channel,bool enable)182 static inline void rmt_ll_tx_enable_loop(rmt_dev_t *dev, uint32_t channel, bool enable)
183 {
184     dev->tx_conf[channel].tx_conti_mode = enable;
185 }
186 
rmt_ll_is_tx_loop_enabled(rmt_dev_t * dev,uint32_t channel)187 static inline bool rmt_ll_is_tx_loop_enabled(rmt_dev_t *dev, uint32_t channel)
188 {
189     return dev->tx_conf[channel].tx_conti_mode;
190 }
191 
rmt_ll_tx_set_loop_count(rmt_dev_t * dev,uint32_t channel,uint32_t count)192 static inline void rmt_ll_tx_set_loop_count(rmt_dev_t *dev, uint32_t channel, uint32_t count)
193 {
194     dev->tx_lim[channel].tx_loop_num = count;
195 }
196 
rmt_ll_tx_reset_loop(rmt_dev_t * dev,uint32_t channel)197 static inline void rmt_ll_tx_reset_loop(rmt_dev_t *dev, uint32_t channel)
198 {
199     dev->tx_lim[channel].loop_count_reset = 1;
200     dev->tx_lim[channel].loop_count_reset = 0;
201 }
202 
rmt_ll_tx_enable_loop_count(rmt_dev_t * dev,uint32_t channel,bool enable)203 static inline void rmt_ll_tx_enable_loop_count(rmt_dev_t *dev, uint32_t channel, bool enable)
204 {
205     dev->tx_lim[channel].tx_loop_cnt_en = enable;
206 }
207 
rmt_ll_tx_enable_sync(rmt_dev_t * dev,bool enable)208 static inline void rmt_ll_tx_enable_sync(rmt_dev_t *dev, bool enable)
209 {
210     dev->tx_sim.en = enable;
211 }
212 
rmt_ll_tx_add_to_sync_group(rmt_dev_t * dev,uint32_t channel)213 static inline void rmt_ll_tx_add_to_sync_group(rmt_dev_t *dev, uint32_t channel)
214 {
215     dev->tx_sim.val |= 1 << channel;
216 }
217 
rmt_ll_tx_remove_from_sync_group(rmt_dev_t * dev,uint32_t channel)218 static inline void rmt_ll_tx_remove_from_sync_group(rmt_dev_t *dev, uint32_t channel)
219 {
220     dev->tx_sim.val &= ~(1 << channel);
221 }
222 
rmt_ll_rx_enable_filter(rmt_dev_t * dev,uint32_t channel,bool enable)223 static inline void rmt_ll_rx_enable_filter(rmt_dev_t *dev, uint32_t channel, bool enable)
224 {
225     dev->rx_conf[channel].conf1.rx_filter_en = enable;
226 }
227 
rmt_ll_rx_set_filter_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)228 static inline void rmt_ll_rx_set_filter_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
229 {
230     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->rx_conf[channel].conf1, rx_filter_thres, thres);
231 }
232 
rmt_ll_tx_enable_idle(rmt_dev_t * dev,uint32_t channel,bool enable)233 static inline void rmt_ll_tx_enable_idle(rmt_dev_t *dev, uint32_t channel, bool enable)
234 {
235     dev->tx_conf[channel].idle_out_en = enable;
236 }
237 
rmt_ll_is_tx_idle_enabled(rmt_dev_t * dev,uint32_t channel)238 static inline bool rmt_ll_is_tx_idle_enabled(rmt_dev_t *dev, uint32_t channel)
239 {
240     return dev->tx_conf[channel].idle_out_en;
241 }
242 
rmt_ll_tx_set_idle_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)243 static inline void rmt_ll_tx_set_idle_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
244 {
245     dev->tx_conf[channel].idle_out_lv = level;
246 }
247 
rmt_ll_tx_get_idle_level(rmt_dev_t * dev,uint32_t channel)248 static inline uint32_t rmt_ll_tx_get_idle_level(rmt_dev_t *dev, uint32_t channel)
249 {
250     return dev->tx_conf[channel].idle_out_lv;
251 }
252 
rmt_ll_rx_get_channel_status(rmt_dev_t * dev,uint32_t channel)253 static inline uint32_t rmt_ll_rx_get_channel_status(rmt_dev_t *dev, uint32_t channel)
254 {
255     return dev->rx_status[channel].val;
256 }
257 
rmt_ll_tx_get_channel_status(rmt_dev_t * dev,uint32_t channel)258 static inline uint32_t rmt_ll_tx_get_channel_status(rmt_dev_t *dev, uint32_t channel)
259 {
260     return dev->tx_status[channel].val;
261 }
262 
rmt_ll_tx_set_limit(rmt_dev_t * dev,uint32_t channel,uint32_t limit)263 static inline void rmt_ll_tx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
264 {
265     dev->tx_lim[channel].limit = limit;
266 }
267 
rmt_ll_rx_set_limit(rmt_dev_t * dev,uint32_t channel,uint32_t limit)268 static inline void rmt_ll_rx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
269 {
270     dev->rx_lim[channel].rx_lim = limit;
271 }
272 
rmt_ll_rx_get_limit(rmt_dev_t * dev,uint32_t channel)273 static inline uint32_t rmt_ll_rx_get_limit(rmt_dev_t *dev, uint32_t channel)
274 {
275     return dev->rx_lim[channel].rx_lim;
276 }
277 
rmt_ll_enable_interrupt(rmt_dev_t * dev,uint32_t mask,bool enable)278 static inline void rmt_ll_enable_interrupt(rmt_dev_t *dev, uint32_t mask, bool enable)
279 {
280     if (enable) {
281         dev->int_ena.val |= mask;
282     } else {
283         dev->int_ena.val &= ~mask;
284     }
285 }
286 
rmt_ll_enable_tx_end_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)287 static inline void rmt_ll_enable_tx_end_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
288 {
289     if (enable) {
290         dev->int_ena.val |= (1 << channel);
291     } else {
292         dev->int_ena.val &= ~(1 << channel);
293     }
294 }
295 
rmt_ll_enable_tx_err_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)296 static inline void rmt_ll_enable_tx_err_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
297 {
298     if (enable) {
299         dev->int_ena.val |= (1 << (channel + 4));
300     } else {
301         dev->int_ena.val &= ~(1 << (channel + 4));
302     }
303 }
304 
rmt_ll_enable_rx_end_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)305 static inline void rmt_ll_enable_rx_end_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
306 {
307     if (enable) {
308         dev->int_ena.val |= (1 << (channel + 2));
309     } else {
310         dev->int_ena.val &= ~(1 << (channel + 2));
311     }
312 }
313 
rmt_ll_enable_rx_err_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)314 static inline void rmt_ll_enable_rx_err_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
315 {
316     if (enable) {
317         dev->int_ena.val |= (1 << (channel + 6));
318     } else {
319         dev->int_ena.val &= ~(1 << (channel + 6));
320     }
321 }
322 
rmt_ll_enable_tx_thres_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)323 static inline void rmt_ll_enable_tx_thres_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
324 {
325     if (enable) {
326         dev->int_ena.val |= (1 << (channel + 8));
327     } else {
328         dev->int_ena.val &= ~(1 << (channel + 8));
329     }
330 }
331 
rmt_ll_enable_tx_loop_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)332 static inline void rmt_ll_enable_tx_loop_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
333 {
334     if (enable) {
335         dev->int_ena.val |= (1 << (channel + 12));
336     } else {
337         dev->int_ena.val &= ~(1 << (channel + 12));
338     }
339 }
340 
rmt_ll_enable_rx_thres_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)341 static inline void rmt_ll_enable_rx_thres_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
342 {
343     if (enable) {
344         dev->int_ena.val |= (1 << (channel + 10));
345     } else {
346         dev->int_ena.val &= ~(1 << (channel + 10));
347     }
348 }
349 
rmt_ll_clear_tx_end_interrupt(rmt_dev_t * dev,uint32_t channel)350 static inline void rmt_ll_clear_tx_end_interrupt(rmt_dev_t *dev, uint32_t channel)
351 {
352     dev->int_clr.val = (1 << (channel));
353 }
354 
rmt_ll_clear_rx_end_interrupt(rmt_dev_t * dev,uint32_t channel)355 static inline void rmt_ll_clear_rx_end_interrupt(rmt_dev_t *dev, uint32_t channel)
356 {
357     dev->int_clr.val = (1 << (channel + 2));
358 }
359 
rmt_ll_clear_tx_err_interrupt(rmt_dev_t * dev,uint32_t channel)360 static inline void rmt_ll_clear_tx_err_interrupt(rmt_dev_t *dev, uint32_t channel)
361 {
362     dev->int_clr.val = (1 << (channel + 4));
363 }
364 
rmt_ll_clear_rx_err_interrupt(rmt_dev_t * dev,uint32_t channel)365 static inline void rmt_ll_clear_rx_err_interrupt(rmt_dev_t *dev, uint32_t channel)
366 {
367     dev->int_clr.val = (1 << (channel + 6));
368 }
369 
rmt_ll_clear_tx_thres_interrupt(rmt_dev_t * dev,uint32_t channel)370 static inline void rmt_ll_clear_tx_thres_interrupt(rmt_dev_t *dev, uint32_t channel)
371 {
372     dev->int_clr.val = (1 << (channel + 8));
373 }
374 
rmt_ll_clear_tx_loop_interrupt(rmt_dev_t * dev,uint32_t channel)375 static inline void rmt_ll_clear_tx_loop_interrupt(rmt_dev_t *dev, uint32_t channel)
376 {
377     dev->int_clr.val = (1 << (channel + 12));
378 }
379 
rmt_ll_clear_rx_thres_interrupt(rmt_dev_t * dev,uint32_t channel)380 static inline void rmt_ll_clear_rx_thres_interrupt(rmt_dev_t *dev, uint32_t channel)
381 {
382     dev->int_clr.val = (1 << (channel + 10));
383 }
384 
rmt_ll_get_tx_end_interrupt_status(rmt_dev_t * dev)385 static inline uint32_t rmt_ll_get_tx_end_interrupt_status(rmt_dev_t *dev)
386 {
387     return dev->int_st.val & 0x03;
388 }
389 
rmt_ll_get_rx_end_interrupt_status(rmt_dev_t * dev)390 static inline uint32_t rmt_ll_get_rx_end_interrupt_status(rmt_dev_t *dev)
391 {
392     return (dev->int_st.val >> 2) & 0x03;
393 }
394 
rmt_ll_get_tx_err_interrupt_status(rmt_dev_t * dev)395 static inline uint32_t rmt_ll_get_tx_err_interrupt_status(rmt_dev_t *dev)
396 {
397     return (dev->int_st.val >> 4) & 0x03;
398 }
399 
rmt_ll_get_rx_err_interrupt_status(rmt_dev_t * dev)400 static inline uint32_t rmt_ll_get_rx_err_interrupt_status(rmt_dev_t *dev)
401 {
402     return (dev->int_st.val >> 6) & 0x03;
403 }
404 
rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t * dev)405 static inline uint32_t rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t *dev)
406 {
407     return (dev->int_st.val >> 8) & 0x03;
408 }
409 
rmt_ll_get_rx_thres_interrupt_status(rmt_dev_t * dev)410 static inline uint32_t rmt_ll_get_rx_thres_interrupt_status(rmt_dev_t *dev)
411 {
412     return (dev->int_st.val >> 10) & 0x03;
413 }
414 
rmt_ll_get_tx_loop_interrupt_status(rmt_dev_t * dev)415 static inline uint32_t rmt_ll_get_tx_loop_interrupt_status(rmt_dev_t *dev)
416 {
417     return (dev->int_st.val >> 12) & 0x03;
418 }
419 
rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t high_ticks,uint32_t low_ticks)420 static inline void rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
421 {
422     // In case the compiler optimise a 32bit instruction (e.g. s32i) into two 16bit instruction (e.g. s16i, which is not allowed to access a register)
423     // We take care of the "read-modify-write" procedure by ourselves.
424     __typeof__(dev->tx_carrier[0]) reg;
425     reg.high = high_ticks;
426     reg.low = low_ticks;
427     dev->tx_carrier[channel].val = reg.val;
428 }
429 
rmt_ll_rx_set_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t high_ticks,uint32_t low_ticks)430 static inline void rmt_ll_rx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
431 {
432     __typeof__(dev->rx_carrier[0]) reg;
433     reg.high_thres = high_ticks;
434     reg.low_thres = low_ticks;
435     dev->rx_carrier[channel].val = reg.val;
436 }
437 
rmt_ll_tx_get_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t * high_ticks,uint32_t * low_ticks)438 static inline void rmt_ll_tx_get_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t *high_ticks, uint32_t *low_ticks)
439 {
440     *high_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->tx_carrier[channel], high);
441     *low_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->tx_carrier[channel], low);
442 }
443 
rmt_ll_rx_get_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t * high_ticks,uint32_t * low_ticks)444 static inline void rmt_ll_rx_get_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t *high_ticks, uint32_t *low_ticks)
445 {
446     *high_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->rx_carrier[channel], high_thres);
447     *low_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->rx_carrier[channel], low_thres);
448 }
449 
rmt_ll_tx_enable_carrier_modulation(rmt_dev_t * dev,uint32_t channel,bool enable)450 static inline void rmt_ll_tx_enable_carrier_modulation(rmt_dev_t *dev, uint32_t channel, bool enable)
451 {
452     dev->tx_conf[channel].carrier_en = enable;
453 }
454 
rmt_ll_rx_enable_carrier_demodulation(rmt_dev_t * dev,uint32_t channel,bool enable)455 static inline void rmt_ll_rx_enable_carrier_demodulation(rmt_dev_t *dev, uint32_t channel, bool enable)
456 {
457     dev->rx_conf[channel].conf0.carrier_en = enable;
458 }
459 
rmt_ll_tx_set_carrier_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)460 static inline void rmt_ll_tx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
461 {
462     dev->tx_conf[channel].carrier_out_lv = level;
463 }
464 
rmt_ll_rx_set_carrier_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)465 static inline void rmt_ll_rx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
466 {
467     dev->rx_conf[channel].conf0.carrier_out_lv = level;
468 }
469 
470 // set true, enable carrier in all RMT state (idle, reading, sending)
471 // set false, enable carrier only in sending state (i.e. there're effective data in RAM to be sent)
rmt_ll_tx_set_carrier_always_on(rmt_dev_t * dev,uint32_t channel,bool enable)472 static inline void rmt_ll_tx_set_carrier_always_on(rmt_dev_t *dev, uint32_t channel, bool enable)
473 {
474     dev->tx_conf[channel].carrier_eff_en = !enable;
475 }
476 
477 //Writes items to the specified TX channel memory with the given offset and length.
478 //the caller should ensure that (length + off) <= (memory block * SOC_RMT_MEM_WORDS_PER_CHANNEL)
rmt_ll_write_memory(rmt_mem_t * mem,uint32_t channel,const void * data,size_t length_in_words,size_t off)479 static inline void rmt_ll_write_memory(rmt_mem_t *mem, uint32_t channel, const void *data, size_t length_in_words, size_t off)
480 {
481     volatile uint32_t *to = (volatile uint32_t *)&mem->chan[channel].data32[off];
482     uint32_t *from = (uint32_t *)data;
483     while (length_in_words--) {
484         *to++ = *from++;
485     }
486 }
487 
rmt_ll_rx_enable_pingpong(rmt_dev_t * dev,uint32_t channel,bool enable)488 static inline void rmt_ll_rx_enable_pingpong(rmt_dev_t *dev, uint32_t channel, bool enable)
489 {
490     dev->rx_conf[channel].conf1.mem_rx_wrap_en = enable;
491 }
492 
493 #ifdef __cplusplus
494 }
495 #endif
496