1 /*
2  * SPDX-FileCopyrightText: 2021 Espressif Systems (Shanghai) CO LTD
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #pragma once
8 
9 #include <stddef.h>
10 #include <stdbool.h>
11 #include "hal/misc.h"
12 #include "soc/rmt_struct.h"
13 
14 #ifdef __cplusplus
15 extern "C" {
16 #endif
17 
18 #define RMT_LL_MAX_LOOP_COUNT           (1023)/*!< Max loop count that hardware is supported */
19 #define RMT_LL_HW_BASE  (&RMT)
20 #define RMT_LL_MEM_BASE (&RMTMEM)
21 
22 // Note: TX and RX channel number are all index from zero in the LL driver
23 // i.e. tx_channel belongs to [0,3], and rx_channel belongs to [0,3]
24 
rmt_ll_enable_drive_clock(rmt_dev_t * dev,bool enable)25 static inline void rmt_ll_enable_drive_clock(rmt_dev_t *dev, bool enable)
26 {
27     dev->sys_conf.clk_en = enable; // register clock gating
28     dev->sys_conf.mem_clk_force_on = enable; // memory clock gating
29 }
30 
rmt_ll_power_down_mem(rmt_dev_t * dev,bool enable)31 static inline void rmt_ll_power_down_mem(rmt_dev_t *dev, bool enable)
32 {
33     dev->sys_conf.mem_force_pu = !enable;
34     dev->sys_conf.mem_force_pd = enable;
35 }
36 
rmt_ll_is_mem_power_down(rmt_dev_t * dev)37 static inline bool rmt_ll_is_mem_power_down(rmt_dev_t *dev)
38 {
39     // the RTC domain can also power down RMT memory
40     // so it's probably not enough to detect whether it's powered down or not
41     // mem_force_pd has higher priority than mem_force_pu
42     return (dev->sys_conf.mem_force_pd) || !(dev->sys_conf.mem_force_pu);
43 }
44 
rmt_ll_enable_mem_access(rmt_dev_t * dev,bool enable)45 static inline void rmt_ll_enable_mem_access(rmt_dev_t *dev, bool enable)
46 {
47     dev->sys_conf.apb_fifo_mask = enable;
48 }
49 
rmt_ll_set_group_clock_src(rmt_dev_t * dev,uint32_t channel,uint8_t src,uint8_t div_num,uint8_t div_a,uint8_t div_b)50 static inline void rmt_ll_set_group_clock_src(rmt_dev_t *dev, uint32_t channel, uint8_t src, uint8_t div_num, uint8_t div_a, uint8_t div_b)
51 {
52     // Formula: rmt_sclk = module_clock_src / (1 + div_num + div_a / div_b)
53     dev->sys_conf.sclk_active = 0;
54     dev->sys_conf.sclk_sel = src;
55     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->sys_conf, sclk_div_num, div_num);
56     dev->sys_conf.sclk_div_a = div_a;
57     dev->sys_conf.sclk_div_b = div_b;
58     dev->sys_conf.sclk_active = 1;
59 }
60 
rmt_ll_get_group_clock_src(rmt_dev_t * dev,uint32_t channel)61 static inline uint32_t rmt_ll_get_group_clock_src(rmt_dev_t *dev, uint32_t channel)
62 {
63     return dev->sys_conf.sclk_sel;
64 }
65 
rmt_ll_tx_reset_channel_clock_div(rmt_dev_t * dev,uint32_t channel)66 static inline void rmt_ll_tx_reset_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
67 {
68     dev->ref_cnt_rst.val |= (1 << channel);
69 }
70 
rmt_ll_tx_reset_channels_clock_div(rmt_dev_t * dev,uint32_t channel_mask)71 static inline void rmt_ll_tx_reset_channels_clock_div(rmt_dev_t *dev, uint32_t channel_mask)
72 {
73     dev->ref_cnt_rst.val |= channel_mask;
74 }
75 
rmt_ll_rx_reset_channel_clock_div(rmt_dev_t * dev,uint32_t channel)76 static inline void rmt_ll_rx_reset_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
77 {
78     dev->ref_cnt_rst.val |= (1 << (channel + 4));
79 }
80 
rmt_ll_tx_reset_pointer(rmt_dev_t * dev,uint32_t channel)81 static inline void rmt_ll_tx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
82 {
83     dev->chnconf0[channel].mem_rd_rst_n = 1;
84     dev->chnconf0[channel].mem_rd_rst_n = 0;
85     dev->chnconf0[channel].apb_mem_rst_n = 1;
86     dev->chnconf0[channel].apb_mem_rst_n = 0;
87 }
88 
rmt_ll_rx_reset_pointer(rmt_dev_t * dev,uint32_t channel)89 static inline void rmt_ll_rx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
90 {
91     dev->chmconf[channel].conf1.mem_wr_rst_m = 1;
92     dev->chmconf[channel].conf1.mem_wr_rst_m = 0;
93     dev->chmconf[channel].conf1.apb_mem_rst_m = 1;
94     dev->chmconf[channel].conf1.apb_mem_rst_m = 0;
95 }
96 
rmt_ll_tx_start(rmt_dev_t * dev,uint32_t channel)97 static inline void rmt_ll_tx_start(rmt_dev_t *dev, uint32_t channel)
98 {
99     dev->chnconf0[channel].conf_update_n = 1;
100     dev->chnconf0[channel].tx_start_n = 1;
101 }
102 
rmt_ll_tx_stop(rmt_dev_t * dev,uint32_t channel)103 static inline void rmt_ll_tx_stop(rmt_dev_t *dev, uint32_t channel)
104 {
105     dev->chnconf0[channel].tx_stop_n = 1;
106     dev->chnconf0[channel].conf_update_n = 1;
107 }
108 
rmt_ll_rx_enable(rmt_dev_t * dev,uint32_t channel,bool enable)109 static inline void rmt_ll_rx_enable(rmt_dev_t *dev, uint32_t channel, bool enable)
110 {
111     dev->chmconf[channel].conf1.rx_en_m = enable;
112     dev->chmconf[channel].conf1.conf_update_m = 1;
113 }
114 
rmt_ll_tx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)115 static inline void rmt_ll_tx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
116 {
117     dev->chnconf0[channel].mem_size_n = block_num;
118 }
119 
rmt_ll_rx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)120 static inline void rmt_ll_rx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
121 {
122     dev->chmconf[channel].conf0.mem_size_m = block_num;
123 }
124 
rmt_ll_tx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)125 static inline uint32_t rmt_ll_tx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
126 {
127     return dev->chnconf0[channel].mem_size_n;
128 }
129 
rmt_ll_rx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)130 static inline uint32_t rmt_ll_rx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
131 {
132     return dev->chmconf[channel].conf0.mem_size_m;
133 }
134 
rmt_ll_tx_set_channel_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)135 static inline void rmt_ll_tx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
136 {
137     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chnconf0[channel], div_cnt_n, div);
138 }
139 
rmt_ll_rx_set_channel_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)140 static inline void rmt_ll_rx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
141 {
142     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chmconf[channel].conf0, div_cnt_m, div);
143 }
144 
rmt_ll_tx_get_channel_clock_div(rmt_dev_t * dev,uint32_t channel)145 static inline uint32_t rmt_ll_tx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
146 {
147     return HAL_FORCE_READ_U32_REG_FIELD(dev->chnconf0[channel], div_cnt_n);
148 }
149 
rmt_ll_rx_get_channel_clock_div(rmt_dev_t * dev,uint32_t channel)150 static inline uint32_t rmt_ll_rx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
151 {
152     return HAL_FORCE_READ_U32_REG_FIELD(dev->chmconf[channel].conf0, div_cnt_m);
153 }
154 
rmt_ll_tx_enable_pingpong(rmt_dev_t * dev,uint32_t channel,bool enable)155 static inline void rmt_ll_tx_enable_pingpong(rmt_dev_t *dev, uint32_t channel, bool enable)
156 {
157     dev->chnconf0[channel].mem_tx_wrap_en_n = enable;
158 }
159 
rmt_ll_rx_set_idle_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)160 static inline void rmt_ll_rx_set_idle_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
161 {
162     dev->chmconf[channel].conf0.idle_thres_m = thres;
163 }
164 
rmt_ll_rx_get_idle_thres(rmt_dev_t * dev,uint32_t channel)165 static inline uint32_t rmt_ll_rx_get_idle_thres(rmt_dev_t *dev, uint32_t channel)
166 {
167     return dev->chmconf[channel].conf0.idle_thres_m;
168 }
169 
rmt_ll_rx_set_mem_owner(rmt_dev_t * dev,uint32_t channel,uint8_t owner)170 static inline void rmt_ll_rx_set_mem_owner(rmt_dev_t *dev, uint32_t channel, uint8_t owner)
171 {
172     dev->chmconf[channel].conf1.mem_owner_m = owner;
173 }
174 
rmt_ll_rx_get_mem_owner(rmt_dev_t * dev,uint32_t channel)175 static inline uint32_t rmt_ll_rx_get_mem_owner(rmt_dev_t *dev, uint32_t channel)
176 {
177     return dev->chmconf[channel].conf1.mem_owner_m;
178 }
179 
rmt_ll_tx_enable_loop(rmt_dev_t * dev,uint32_t channel,bool enable)180 static inline void rmt_ll_tx_enable_loop(rmt_dev_t *dev, uint32_t channel, bool enable)
181 {
182     dev->chnconf0[channel].tx_conti_mode_n = enable;
183 }
184 
rmt_ll_is_tx_loop_enabled(rmt_dev_t * dev,uint32_t channel)185 static inline bool rmt_ll_is_tx_loop_enabled(rmt_dev_t *dev, uint32_t channel)
186 {
187     return dev->chnconf0[channel].tx_conti_mode_n;
188 }
189 
rmt_ll_tx_enable_loop_autostop(rmt_dev_t * dev,uint32_t channel,bool enable)190 static inline void rmt_ll_tx_enable_loop_autostop(rmt_dev_t *dev, uint32_t channel, bool enable)
191 {
192     dev->chn_tx_lim[channel].loop_stop_en_chn = enable;
193 }
194 
rmt_ll_tx_set_loop_count(rmt_dev_t * dev,uint32_t channel,uint32_t count)195 static inline void rmt_ll_tx_set_loop_count(rmt_dev_t *dev, uint32_t channel, uint32_t count)
196 {
197     dev->chn_tx_lim[channel].tx_loop_num_chn = count;
198 }
199 
rmt_ll_tx_reset_loop(rmt_dev_t * dev,uint32_t channel)200 static inline void rmt_ll_tx_reset_loop(rmt_dev_t *dev, uint32_t channel)
201 {
202     dev->chn_tx_lim[channel].loop_count_reset_chn = 1;
203     dev->chn_tx_lim[channel].loop_count_reset_chn = 0;
204 }
205 
rmt_ll_tx_enable_loop_count(rmt_dev_t * dev,uint32_t channel,bool enable)206 static inline void rmt_ll_tx_enable_loop_count(rmt_dev_t *dev, uint32_t channel, bool enable)
207 {
208     dev->chn_tx_lim[channel].tx_loop_cnt_en_chn = enable;
209 }
210 
rmt_ll_tx_enable_sync(rmt_dev_t * dev,bool enable)211 static inline void rmt_ll_tx_enable_sync(rmt_dev_t *dev, bool enable)
212 {
213     dev->tx_sim.tx_sim_en = enable;
214 }
215 
rmt_ll_tx_add_to_sync_group(rmt_dev_t * dev,uint32_t channel)216 static inline void rmt_ll_tx_add_to_sync_group(rmt_dev_t *dev, uint32_t channel)
217 {
218     dev->tx_sim.val |= 1 << channel;
219 }
220 
rmt_ll_tx_remove_from_sync_group(rmt_dev_t * dev,uint32_t channel)221 static inline void rmt_ll_tx_remove_from_sync_group(rmt_dev_t *dev, uint32_t channel)
222 {
223     dev->tx_sim.val &= ~(1 << channel);
224 }
225 
rmt_ll_rx_enable_filter(rmt_dev_t * dev,uint32_t channel,bool enable)226 static inline void rmt_ll_rx_enable_filter(rmt_dev_t *dev, uint32_t channel, bool enable)
227 {
228     dev->chmconf[channel].conf1.rx_filter_en_m = enable;
229 }
230 
rmt_ll_rx_set_filter_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)231 static inline void rmt_ll_rx_set_filter_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
232 {
233     HAL_FORCE_MODIFY_U32_REG_FIELD(dev->chmconf[channel].conf1, rx_filter_thres_m, thres);
234 }
235 
rmt_ll_tx_enable_idle(rmt_dev_t * dev,uint32_t channel,bool enable)236 static inline void rmt_ll_tx_enable_idle(rmt_dev_t *dev, uint32_t channel, bool enable)
237 {
238     dev->chnconf0[channel].idle_out_en_n = enable;
239 }
240 
rmt_ll_is_tx_idle_enabled(rmt_dev_t * dev,uint32_t channel)241 static inline bool rmt_ll_is_tx_idle_enabled(rmt_dev_t *dev, uint32_t channel)
242 {
243     return dev->chnconf0[channel].idle_out_en_n;
244 }
245 
rmt_ll_tx_set_idle_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)246 static inline void rmt_ll_tx_set_idle_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
247 {
248     dev->chnconf0[channel].idle_out_lv_n = level;
249 }
250 
rmt_ll_tx_get_idle_level(rmt_dev_t * dev,uint32_t channel)251 static inline uint32_t rmt_ll_tx_get_idle_level(rmt_dev_t *dev, uint32_t channel)
252 {
253     return dev->chnconf0[channel].idle_out_lv_n;
254 }
255 
rmt_ll_rx_get_channel_status(rmt_dev_t * dev,uint32_t channel)256 static inline uint32_t rmt_ll_rx_get_channel_status(rmt_dev_t *dev, uint32_t channel)
257 {
258     return dev->chmstatus[channel].val;
259 }
260 
rmt_ll_tx_get_channel_status(rmt_dev_t * dev,uint32_t channel)261 static inline uint32_t rmt_ll_tx_get_channel_status(rmt_dev_t *dev, uint32_t channel)
262 {
263     return dev->chnstatus[channel].val;
264 }
265 
rmt_ll_tx_set_limit(rmt_dev_t * dev,uint32_t channel,uint32_t limit)266 static inline void rmt_ll_tx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
267 {
268     dev->chn_tx_lim[channel].tx_lim_chn = limit;
269 }
270 
rmt_ll_rx_set_limit(rmt_dev_t * dev,uint32_t channel,uint32_t limit)271 static inline void rmt_ll_rx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
272 {
273     dev->chm_rx_lim[channel].chm_rx_lim_reg = limit;
274 }
275 
rmt_ll_rx_get_limit(rmt_dev_t * dev,uint32_t channel)276 static inline uint32_t rmt_ll_rx_get_limit(rmt_dev_t *dev, uint32_t channel)
277 {
278     return dev->chm_rx_lim[channel].chm_rx_lim_reg;
279 }
280 
rmt_ll_enable_interrupt(rmt_dev_t * dev,uint32_t mask,bool enable)281 static inline void rmt_ll_enable_interrupt(rmt_dev_t *dev, uint32_t mask, bool enable)
282 {
283     if (enable) {
284         dev->int_ena.val |= mask;
285     } else {
286         dev->int_ena.val &= ~mask;
287     }
288 }
289 
rmt_ll_enable_tx_end_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)290 static inline void rmt_ll_enable_tx_end_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
291 {
292     if (enable) {
293         dev->int_ena.val |= (1 << channel);
294     } else {
295         dev->int_ena.val &= ~(1 << channel);
296     }
297 }
298 
rmt_ll_enable_tx_err_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)299 static inline void rmt_ll_enable_tx_err_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
300 {
301     if (enable) {
302         dev->int_ena.val |= (1 << (channel + 4));
303     } else {
304         dev->int_ena.val &= ~(1 << (channel + 4));
305     }
306 }
307 
rmt_ll_enable_rx_end_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)308 static inline void rmt_ll_enable_rx_end_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
309 {
310     if (enable) {
311         dev->int_ena.val |= (1 << (channel + 16));
312     } else {
313         dev->int_ena.val &= ~(1 << (channel + 16));
314     }
315 }
316 
rmt_ll_enable_rx_err_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)317 static inline void rmt_ll_enable_rx_err_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
318 {
319     if (enable) {
320         dev->int_ena.val |= (1 << (channel + 20));
321     } else {
322         dev->int_ena.val &= ~(1 << (channel + 20));
323     }
324 }
325 
rmt_ll_enable_tx_thres_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)326 static inline void rmt_ll_enable_tx_thres_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
327 {
328     if (enable) {
329         dev->int_ena.val |= (1 << (channel + 8));
330     } else {
331         dev->int_ena.val &= ~(1 << (channel + 8));
332     }
333 }
334 
rmt_ll_enable_tx_loop_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)335 static inline void rmt_ll_enable_tx_loop_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
336 {
337     if (enable) {
338         dev->int_ena.val |= (1 << (channel + 12));
339     } else {
340         dev->int_ena.val &= ~(1 << (channel + 12));
341     }
342 }
343 
rmt_ll_enable_rx_thres_interrupt(rmt_dev_t * dev,uint32_t channel,bool enable)344 static inline void rmt_ll_enable_rx_thres_interrupt(rmt_dev_t *dev, uint32_t channel, bool enable)
345 {
346     if (enable) {
347         dev->int_ena.val |= (1 << (channel + 24));
348     } else {
349         dev->int_ena.val &= ~(1 << (channel + 24));
350     }
351 }
352 
rmt_ll_clear_tx_end_interrupt(rmt_dev_t * dev,uint32_t channel)353 static inline void rmt_ll_clear_tx_end_interrupt(rmt_dev_t *dev, uint32_t channel)
354 {
355     dev->int_clr.val = (1 << (channel));
356 }
357 
rmt_ll_clear_rx_end_interrupt(rmt_dev_t * dev,uint32_t channel)358 static inline void rmt_ll_clear_rx_end_interrupt(rmt_dev_t *dev, uint32_t channel)
359 {
360     dev->int_clr.val = (1 << (channel + 16));
361 }
362 
rmt_ll_clear_tx_err_interrupt(rmt_dev_t * dev,uint32_t channel)363 static inline void rmt_ll_clear_tx_err_interrupt(rmt_dev_t *dev, uint32_t channel)
364 {
365     dev->int_clr.val = (1 << (channel + 4));
366 }
367 
rmt_ll_clear_rx_err_interrupt(rmt_dev_t * dev,uint32_t channel)368 static inline void rmt_ll_clear_rx_err_interrupt(rmt_dev_t *dev, uint32_t channel)
369 {
370     dev->int_clr.val = (1 << (channel + 20));
371 }
372 
rmt_ll_clear_tx_thres_interrupt(rmt_dev_t * dev,uint32_t channel)373 static inline void rmt_ll_clear_tx_thres_interrupt(rmt_dev_t *dev, uint32_t channel)
374 {
375     dev->int_clr.val = (1 << (channel + 8));
376 }
377 
rmt_ll_clear_tx_loop_interrupt(rmt_dev_t * dev,uint32_t channel)378 static inline void rmt_ll_clear_tx_loop_interrupt(rmt_dev_t *dev, uint32_t channel)
379 {
380     dev->int_clr.val = (1 << (channel + 12));
381 }
382 
rmt_ll_clear_rx_thres_interrupt(rmt_dev_t * dev,uint32_t channel)383 static inline void rmt_ll_clear_rx_thres_interrupt(rmt_dev_t *dev, uint32_t channel)
384 {
385     dev->int_clr.val = (1 << (channel + 24));
386 }
387 
rmt_ll_get_tx_end_interrupt_status(rmt_dev_t * dev)388 static inline uint32_t rmt_ll_get_tx_end_interrupt_status(rmt_dev_t *dev)
389 {
390     return dev->int_st.val & 0x0F;
391 }
392 
rmt_ll_get_rx_end_interrupt_status(rmt_dev_t * dev)393 static inline uint32_t rmt_ll_get_rx_end_interrupt_status(rmt_dev_t *dev)
394 {
395     return (dev->int_st.val >> 16) & 0x0F;
396 }
397 
rmt_ll_get_tx_err_interrupt_status(rmt_dev_t * dev)398 static inline uint32_t rmt_ll_get_tx_err_interrupt_status(rmt_dev_t *dev)
399 {
400     return (dev->int_st.val >> 4) & 0x0F;
401 }
402 
rmt_ll_get_rx_err_interrupt_status(rmt_dev_t * dev)403 static inline uint32_t rmt_ll_get_rx_err_interrupt_status(rmt_dev_t *dev)
404 {
405     return (dev->int_st.val >> 20) & 0x0F;
406 }
407 
rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t * dev)408 static inline uint32_t rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t *dev)
409 {
410     return (dev->int_st.val >> 8) & 0x0F;
411 }
412 
rmt_ll_get_rx_thres_interrupt_status(rmt_dev_t * dev)413 static inline uint32_t rmt_ll_get_rx_thres_interrupt_status(rmt_dev_t *dev)
414 {
415     return (dev->int_st.val >> 24) & 0x0F;
416 }
417 
rmt_ll_get_tx_loop_interrupt_status(rmt_dev_t * dev)418 static inline uint32_t rmt_ll_get_tx_loop_interrupt_status(rmt_dev_t *dev)
419 {
420     return (dev->int_st.val >> 12) & 0x0F;
421 }
422 
rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t high_ticks,uint32_t low_ticks)423 static inline void rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
424 {
425     // In case the compiler optimise a 32bit instruction (e.g. s32i) into two 16bit instruction (e.g. s16i, which is not allowed to access a register)
426     // We take care of the "read-modify-write" procedure by ourselves.
427     rmt_chncarrier_duty_reg_t reg;
428     reg.carrier_high_chn = high_ticks;
429     reg.carrier_low_chn = low_ticks;
430     dev->chncarrier_duty[channel].val = reg.val;
431 }
432 
rmt_ll_rx_set_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t high_ticks,uint32_t low_ticks)433 static inline void rmt_ll_rx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
434 {
435     rmt_chm_rx_carrier_rm_reg_t reg;
436     reg.carrier_high_thres_chm = high_ticks;
437     reg.carrier_low_thres_chm = low_ticks;
438     dev->chm_rx_carrier_rm[channel].val = reg.val;
439 }
440 
rmt_ll_tx_get_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t * high_ticks,uint32_t * low_ticks)441 static inline void rmt_ll_tx_get_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t *high_ticks, uint32_t *low_ticks )
442 {
443     *high_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->chncarrier_duty[channel], carrier_high_chn);
444     *low_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->chncarrier_duty[channel], carrier_low_chn);
445 }
446 
rmt_ll_rx_get_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t * high_ticks,uint32_t * low_ticks)447 static inline void rmt_ll_rx_get_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t *high_ticks, uint32_t *low_ticks)
448 {
449     *high_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->chm_rx_carrier_rm[channel], carrier_high_thres_chm);
450     *low_ticks = HAL_FORCE_READ_U32_REG_FIELD(dev->chm_rx_carrier_rm[channel], carrier_low_thres_chm);
451 }
452 
rmt_ll_tx_enable_carrier_modulation(rmt_dev_t * dev,uint32_t channel,bool enable)453 static inline void rmt_ll_tx_enable_carrier_modulation(rmt_dev_t *dev, uint32_t channel, bool enable)
454 {
455     dev->chnconf0[channel].carrier_en_n = enable;
456 }
457 
rmt_ll_rx_enable_carrier_demodulation(rmt_dev_t * dev,uint32_t channel,bool enable)458 static inline void rmt_ll_rx_enable_carrier_demodulation(rmt_dev_t *dev, uint32_t channel, bool enable)
459 {
460     dev->chmconf[channel].conf0.carrier_en_m = enable;
461 }
462 
rmt_ll_tx_set_carrier_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)463 static inline void rmt_ll_tx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
464 {
465     dev->chnconf0[channel].carrier_out_lv_n = level;
466 }
467 
rmt_ll_rx_set_carrier_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)468 static inline void rmt_ll_rx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
469 {
470     dev->chmconf[channel].conf0.carrier_out_lv_m = level;
471 }
472 
473 // set true, enable carrier in all RMT state (idle, reading, sending)
474 // set false, enable carrier only in sending state (i.e. there're effective data in RAM to be sent)
rmt_ll_tx_set_carrier_always_on(rmt_dev_t * dev,uint32_t channel,bool enable)475 static inline void rmt_ll_tx_set_carrier_always_on(rmt_dev_t *dev, uint32_t channel, bool enable)
476 {
477     dev->chnconf0[channel].carrier_eff_en_n = !enable;
478 }
479 
480 //Writes items to the specified TX channel memory with the given offset and length.
481 //the caller should ensure that (length + off) <= (memory block * SOC_RMT_MEM_WORDS_PER_CHANNEL)
rmt_ll_write_memory(rmt_mem_t * mem,uint32_t channel,const void * data,size_t length_in_words,size_t off)482 static inline void rmt_ll_write_memory(rmt_mem_t *mem, uint32_t channel, const void *data, size_t length_in_words, size_t off)
483 {
484     volatile uint32_t *to = (volatile uint32_t *)&mem->chan[channel].data32[off];
485     uint32_t *from = (uint32_t *)data;
486     while (length_in_words--) {
487         *to++ = *from++;
488     }
489 }
490 
rmt_ll_rx_enable_pingpong(rmt_dev_t * dev,uint32_t channel,bool enable)491 static inline void rmt_ll_rx_enable_pingpong(rmt_dev_t *dev, uint32_t channel, bool enable)
492 {
493     dev->chmconf[channel].conf1.mem_rx_wrap_en_m = enable;
494 }
495 
496 #ifdef __cplusplus
497 }
498 #endif
499