1 /*
2 * SPDX-FileCopyrightText: 2019-2022 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @note TX and RX channels are index from 0 in the LL driver, i.e. tx_channel = [0,7], rx_channel = [0,7]
9 */
10
11 #pragma once
12
13 #include <stdint.h>
14 #include <stdbool.h>
15 #include <stddef.h>
16 #include "hal/misc.h"
17 #include "hal/assert.h"
18 #include "soc/rmt_struct.h"
19 #include "hal/rmt_types.h"
20
21 #ifdef __cplusplus
22 extern "C" {
23 #endif
24
25 #define RMT_LL_EVENT_TX_DONE(channel) (1 << ((channel) * 3))
26 #define RMT_LL_EVENT_TX_THRES(channel) (1 << ((channel) + 24))
27 #define RMT_LL_EVENT_TX_LOOP_END(channel) (0) // esp32 doesn't support tx loop count
28 #define RMT_LL_EVENT_TX_ERROR(channel) (1 << ((channel) * 3 + 2))
29 #define RMT_LL_EVENT_RX_DONE(channel) (1 << ((channel) * 3 + 1))
30 #define RMT_LL_EVENT_RX_THRES(channel) (0) // esp32 doesn't support rx wrap
31 #define RMT_LL_EVENT_RX_ERROR(channel) (1 << ((channel) * 3 + 2))
32 #define RMT_LL_EVENT_TX_MASK(channel) (RMT_LL_EVENT_TX_DONE(channel) | RMT_LL_EVENT_TX_THRES(channel) | RMT_LL_EVENT_TX_LOOP_END(channel))
33 #define RMT_LL_EVENT_RX_MASK(channel) (RMT_LL_EVENT_RX_DONE(channel) | RMT_LL_EVENT_RX_THRES(channel))
34
35 #define RMT_LL_MAX_FILTER_VALUE 255
36 #define RMT_LL_MAX_IDLE_VALUE 65535
37
38 typedef enum {
39 RMT_LL_MEM_OWNER_SW = 0,
40 RMT_LL_MEM_OWNER_HW = 1,
41 } rmt_ll_mem_owner_t;
42
43 /**
44 * @brief Enable clock gate for register and memory
45 *
46 * @param dev Peripheral instance address
47 * @param enable True to enable, False to disable
48 */
rmt_ll_enable_periph_clock(rmt_dev_t * dev,bool enable)49 static inline void rmt_ll_enable_periph_clock(rmt_dev_t *dev, bool enable)
50 {
51 dev->conf_ch[0].conf0.clk_en = enable; // register clock gating
52 }
53
54 /**
55 * @brief Force power on the RMT memory block, regardless of the outside PMU logic
56 *
57 * @param dev Peripheral instance address
58 */
rmt_ll_mem_force_power_on(rmt_dev_t * dev)59 static inline void rmt_ll_mem_force_power_on(rmt_dev_t *dev)
60 {
61 (void)dev;
62 }
63
64 /**
65 * @brief Force power off the RMT memory block, regardless of the outside PMU logic
66 *
67 * @param dev Peripheral instance address
68 */
rmt_ll_mem_force_power_off(rmt_dev_t * dev)69 static inline void rmt_ll_mem_force_power_off(rmt_dev_t *dev)
70 {
71 dev->conf_ch[0].conf0.mem_pd = 1;
72 }
73
74 /**
75 * @brief Power control the RMT memory block by the outside PMU logic
76 *
77 * @param dev Peripheral instance address
78 */
rmt_ll_mem_power_by_pmu(rmt_dev_t * dev)79 static inline void rmt_ll_mem_power_by_pmu(rmt_dev_t *dev)
80 {
81 dev->conf_ch[0].conf0.mem_pd = 0;
82 }
83
84 /**
85 * @brief Enable APB accessing RMT memory in nonfifo mode
86 *
87 * @param dev Peripheral instance address
88 * @param enable True to enable, False to disable
89 */
rmt_ll_enable_mem_access_nonfifo(rmt_dev_t * dev,bool enable)90 static inline void rmt_ll_enable_mem_access_nonfifo(rmt_dev_t *dev, bool enable)
91 {
92 dev->apb_conf.fifo_mask = enable;
93 }
94
95 /**
96 * @brief Set clock source and divider for RMT channel group
97 *
98 * @param dev Peripheral instance address
99 * @param channel not used as clock source is set for all channels
100 * @param src Clock source
101 * @param divider_integral Integral part of the divider
102 * @param divider_denominator Denominator part of the divider
103 * @param divider_numerator Numerator part of the divider
104 */
rmt_ll_set_group_clock_src(rmt_dev_t * dev,uint32_t channel,rmt_clock_source_t src,uint32_t divider_integral,uint32_t divider_denominator,uint32_t divider_numerator)105 static inline void rmt_ll_set_group_clock_src(rmt_dev_t *dev, uint32_t channel, rmt_clock_source_t src,
106 uint32_t divider_integral, uint32_t divider_denominator, uint32_t divider_numerator)
107 {
108 (void)divider_integral;
109 (void)divider_denominator;
110 (void)divider_numerator;
111 switch (src) {
112 case RMT_CLK_SRC_APB:
113 dev->conf_ch[channel].conf1.ref_always_on = 1;
114 break;
115 case RMT_CLK_SRC_REF_TICK:
116 dev->conf_ch[channel].conf1.ref_always_on = 0;
117 break;
118 default:
119 HAL_ASSERT(false && "unsupported RMT clock source");
120 break;
121 }
122 }
123
124 /**
125 * @brief Enable RMT peripheral source clock
126 *
127 * @note RMT doesn't support enable/disable clock source, this function is only for compatibility
128 *
129 * @param dev Peripheral instance address
130 * @param en True to enable, False to disable
131 */
rmt_ll_enable_group_clock(rmt_dev_t * dev,bool en)132 static inline void rmt_ll_enable_group_clock(rmt_dev_t *dev, bool en)
133 {
134 (void)dev;
135 (void)en;
136 }
137
138 ////////////////////////////////////////TX Channel Specific/////////////////////////////////////////////////////////////
139
140 /**
141 * @brief Reset clock divider for TX channels by mask
142 *
143 * @param dev Peripheral instance address
144 * @param channel_mask Mask of TX channels
145 */
rmt_ll_tx_reset_channels_clock_div(rmt_dev_t * dev,uint32_t channel_mask)146 static inline void rmt_ll_tx_reset_channels_clock_div(rmt_dev_t *dev, uint32_t channel_mask)
147 {
148 for (int i = 0; i < 8; i++) {
149 if (channel_mask & (1 << i)) {
150 dev->conf_ch[i].conf1.ref_cnt_rst = 1;
151 }
152 }
153 }
154
155 /**
156 * @brief Set TX channel clock divider
157 *
158 * @param dev Peripheral instance address
159 * @param channel RMT TX channel number
160 * @param div Division value
161 */
rmt_ll_tx_set_channel_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)162 static inline void rmt_ll_tx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
163 {
164 HAL_ASSERT(div >= 1 && div <= 256 && "divider out of range");
165 // limit the maximum divider to 256
166 if (div >= 256) {
167 div = 0; // 0 means 256 division
168 }
169 HAL_FORCE_MODIFY_U32_REG_FIELD(dev->conf_ch[channel].conf0, div_cnt, div);
170 }
171
172 /**
173 * @brief Reset RMT reading pointer for TX channel
174 *
175 * @param dev Peripheral instance address
176 * @param channel RMT TX channel number
177 */
178 __attribute__((always_inline))
rmt_ll_tx_reset_pointer(rmt_dev_t * dev,uint32_t channel)179 static inline void rmt_ll_tx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
180 {
181 dev->conf_ch[channel].conf1.mem_rd_rst = 1;
182 dev->conf_ch[channel].conf1.mem_rd_rst = 0;
183 dev->conf_ch[channel].conf1.apb_mem_rst = 1;
184 dev->conf_ch[channel].conf1.apb_mem_rst = 0;
185 }
186
187 /**
188 * @brief Start transmitting for TX channel
189 *
190 * @param dev Peripheral instance address
191 * @param channel RMT TX channel number
192 */
193 __attribute__((always_inline))
rmt_ll_tx_start(rmt_dev_t * dev,uint32_t channel)194 static inline void rmt_ll_tx_start(rmt_dev_t *dev, uint32_t channel)
195 {
196 dev->conf_ch[channel].conf1.tx_start = 1;
197 }
198
199 /**
200 * @brief Set memory block number for TX channel
201 *
202 * @param dev Peripheral instance address
203 * @param channel RMT TX channel number
204 * @param block_num memory block number
205 */
rmt_ll_tx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)206 static inline void rmt_ll_tx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
207 {
208 dev->conf_ch[channel].conf0.mem_size = block_num;
209 }
210
211 /**
212 * @brief Enable TX wrap
213 *
214 * @param dev Peripheral instance address
215 * @param channel RMT TX channel number
216 * @param enable True to enable, False to disable
217 */
rmt_ll_tx_enable_wrap(rmt_dev_t * dev,uint32_t channel,bool enable)218 static inline void rmt_ll_tx_enable_wrap(rmt_dev_t *dev, uint32_t channel, bool enable)
219 {
220 dev->apb_conf.mem_tx_wrap_en = enable;
221 }
222
223 /**
224 * @brief Enable transmitting in a loop
225 *
226 * @param dev Peripheral instance address
227 * @param channel RMT TX channel number
228 * @param enable True to enable, False to disable
229 */
230 __attribute__((always_inline))
rmt_ll_tx_enable_loop(rmt_dev_t * dev,uint32_t channel,bool enable)231 static inline void rmt_ll_tx_enable_loop(rmt_dev_t *dev, uint32_t channel, bool enable)
232 {
233 dev->conf_ch[channel].conf1.tx_conti_mode = enable;
234 }
235
236 /**
237 * @brief Fix the output level when TX channel is in IDLE state
238 *
239 * @param dev Peripheral instance address
240 * @param channel RMT TX channel number
241 * @param level IDLE level (1 => high, 0 => low)
242 * @param enable True to fix the IDLE level, otherwise the IDLE level is determined by EOF encoder
243 */
244 __attribute__((always_inline))
rmt_ll_tx_fix_idle_level(rmt_dev_t * dev,uint32_t channel,uint8_t level,bool enable)245 static inline void rmt_ll_tx_fix_idle_level(rmt_dev_t *dev, uint32_t channel, uint8_t level, bool enable)
246 {
247 dev->conf_ch[channel].conf1.idle_out_en = enable;
248 dev->conf_ch[channel].conf1.idle_out_lv = level;
249 }
250
251 /**
252 * @brief Set the amount of RMT symbols that can trigger the limitation interrupt
253 *
254 * @param dev Peripheral instance address
255 * @param channel RMT TX channel number
256 * @param limit Specify the number of symbols
257 */
rmt_ll_tx_set_limit(rmt_dev_t * dev,uint32_t channel,uint32_t limit)258 static inline void rmt_ll_tx_set_limit(rmt_dev_t *dev, uint32_t channel, uint32_t limit)
259 {
260 dev->tx_lim_ch[channel].limit = limit;
261 }
262
263 /**
264 * @brief Set high and low duration of carrier signal
265 *
266 * @param dev Peripheral instance address
267 * @param channel RMT TX channel number
268 * @param high_ticks Duration of high level
269 * @param low_ticks Duration of low level
270 */
rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t * dev,uint32_t channel,uint32_t high_ticks,uint32_t low_ticks)271 static inline void rmt_ll_tx_set_carrier_high_low_ticks(rmt_dev_t *dev, uint32_t channel, uint32_t high_ticks, uint32_t low_ticks)
272 {
273 HAL_ASSERT(high_ticks >= 1 && high_ticks <= 65536 && low_ticks >= 1 && low_ticks <= 65536 && "out of range high/low ticks");
274 // ticks=0 means 65536 in hardware
275 if (high_ticks >= 65536) {
276 high_ticks = 0;
277 }
278 if (low_ticks >= 65536) {
279 low_ticks = 0;
280 }
281 HAL_FORCE_MODIFY_U32_REG_FIELD(dev->carrier_duty_ch[channel], high, high_ticks);
282 HAL_FORCE_MODIFY_U32_REG_FIELD(dev->carrier_duty_ch[channel], low, low_ticks);
283 }
284
285 /**
286 * @brief Enable modulating carrier signal to TX channel
287 *
288 * @param dev Peripheral instance address
289 * @param channel RMT TX channel number
290 * @param enable True to enable, False to disable
291 */
rmt_ll_tx_enable_carrier_modulation(rmt_dev_t * dev,uint32_t channel,bool enable)292 static inline void rmt_ll_tx_enable_carrier_modulation(rmt_dev_t *dev, uint32_t channel, bool enable)
293 {
294 dev->conf_ch[channel].conf0.carrier_en = enable;
295 }
296
297 /**
298 * @brief Set on high or low to modulate the carrier signal
299 *
300 * @param dev Peripheral instance address
301 * @param channel RMT TX channel number
302 * @param level Which level to modulate on (0=>low level, 1=>high level)
303 */
rmt_ll_tx_set_carrier_level(rmt_dev_t * dev,uint32_t channel,uint8_t level)304 static inline void rmt_ll_tx_set_carrier_level(rmt_dev_t *dev, uint32_t channel, uint8_t level)
305 {
306 dev->conf_ch[channel].conf0.carrier_out_lv = level;
307 }
308
309 ////////////////////////////////////////RX Channel Specific/////////////////////////////////////////////////////////////
310
311 /**
312 * @brief Reset clock divider for RX channels by mask
313 *
314 * @param dev Peripheral instance address
315 * @param channel_mask Mask of RX channels
316 */
rmt_ll_rx_reset_channels_clock_div(rmt_dev_t * dev,uint32_t channel_mask)317 static inline void rmt_ll_rx_reset_channels_clock_div(rmt_dev_t *dev, uint32_t channel_mask)
318 {
319 for (int i = 0; i < 8; i++) {
320 if (channel_mask & (1 << i)) {
321 dev->conf_ch[i].conf1.ref_cnt_rst = 1;
322 }
323 }
324 }
325
326 /**
327 * @brief Set RX channel clock divider
328 *
329 * @param dev Peripheral instance address
330 * @param channel RMT RX channel number
331 * @param div Division value
332 */
rmt_ll_rx_set_channel_clock_div(rmt_dev_t * dev,uint32_t channel,uint32_t div)333 static inline void rmt_ll_rx_set_channel_clock_div(rmt_dev_t *dev, uint32_t channel, uint32_t div)
334 {
335 HAL_ASSERT(div >= 1 && div <= 256 && "divider out of range");
336 // limit the maximum divider to 256
337 if (div >= 256) {
338 div = 0; // 0 means 256 division
339 }
340 HAL_FORCE_MODIFY_U32_REG_FIELD(dev->conf_ch[channel].conf0, div_cnt, div);
341 }
342
343 /**
344 * @brief Reset RMT writing pointer for RX channel
345 *
346 * @param dev Peripheral instance address
347 * @param channel RMT RX channel number
348 */
349 __attribute__((always_inline))
rmt_ll_rx_reset_pointer(rmt_dev_t * dev,uint32_t channel)350 static inline void rmt_ll_rx_reset_pointer(rmt_dev_t *dev, uint32_t channel)
351 {
352 dev->conf_ch[channel].conf1.mem_wr_rst = 1;
353 dev->conf_ch[channel].conf1.mem_wr_rst = 0;
354 dev->conf_ch[channel].conf1.apb_mem_rst = 1;
355 dev->conf_ch[channel].conf1.apb_mem_rst = 0;
356 }
357
358 /**
359 * @brief Enable receiving for RX channel
360 *
361 * @param dev Peripheral instance address
362 * @param channel RMT RX channel number
363 * @param enable True to enable, False to disable
364 */
365 __attribute__((always_inline))
rmt_ll_rx_enable(rmt_dev_t * dev,uint32_t channel,bool enable)366 static inline void rmt_ll_rx_enable(rmt_dev_t *dev, uint32_t channel, bool enable)
367 {
368 dev->conf_ch[channel].conf1.rx_en = enable;
369 }
370
371 /**
372 * @brief Set memory block number for RX channel
373 *
374 * @param dev Peripheral instance address
375 * @param channel RMT RX channel number
376 * @param block_num memory block number
377 */
rmt_ll_rx_set_mem_blocks(rmt_dev_t * dev,uint32_t channel,uint8_t block_num)378 static inline void rmt_ll_rx_set_mem_blocks(rmt_dev_t *dev, uint32_t channel, uint8_t block_num)
379 {
380 dev->conf_ch[channel].conf0.mem_size = block_num;
381 }
382
383 /**
384 * @brief Set the time length for RX channel before going into IDLE state
385 *
386 * @param dev Peripheral instance address
387 * @param channel RMT RX channel number
388 * @param thres Time length threshold
389 */
390 __attribute__((always_inline))
rmt_ll_rx_set_idle_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)391 static inline void rmt_ll_rx_set_idle_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
392 {
393 HAL_FORCE_MODIFY_U32_REG_FIELD(dev->conf_ch[channel].conf0, idle_thres, thres);
394 }
395
396 /**
397 * @brief Set RMT memory owner for RX channel
398 *
399 * @param dev Peripheral instance address
400 * @param channel RMT RX channel number
401 * @param owner Memory owner
402 */
403 __attribute__((always_inline))
rmt_ll_rx_set_mem_owner(rmt_dev_t * dev,uint32_t channel,rmt_ll_mem_owner_t owner)404 static inline void rmt_ll_rx_set_mem_owner(rmt_dev_t *dev, uint32_t channel, rmt_ll_mem_owner_t owner)
405 {
406 dev->conf_ch[channel].conf1.mem_owner = owner;
407 }
408
409 /**
410 * @brief Enable filter for RX channel
411 *
412 * @param dev Peripheral instance address
413 * @param channel RMT RX chanenl number
414 * @param enable True to enable, False to disable
415 */
416 __attribute__((always_inline))
rmt_ll_rx_enable_filter(rmt_dev_t * dev,uint32_t channel,bool enable)417 static inline void rmt_ll_rx_enable_filter(rmt_dev_t *dev, uint32_t channel, bool enable)
418 {
419 dev->conf_ch[channel].conf1.rx_filter_en = enable;
420 }
421
422 /**
423 * @brief Set RX channel filter threshold (i.e. the maximum width of one pulse signal that would be treated as a noise)
424 *
425 * @param dev Peripheral instance address
426 * @param channel RMT RX channel number
427 * @param thres Filter threshold
428 */
429 __attribute__((always_inline))
rmt_ll_rx_set_filter_thres(rmt_dev_t * dev,uint32_t channel,uint32_t thres)430 static inline void rmt_ll_rx_set_filter_thres(rmt_dev_t *dev, uint32_t channel, uint32_t thres)
431 {
432 HAL_FORCE_MODIFY_U32_REG_FIELD(dev->conf_ch[channel].conf1, rx_filter_thres, thres);
433 }
434
435 /**
436 * @brief Get RMT memory write cursor offset
437 *
438 * @param dev Peripheral instance address
439 * @param channel RMT RX channel number
440 * @return writer offset
441 */
442 __attribute__((always_inline))
rmt_ll_rx_get_memory_writer_offset(rmt_dev_t * dev,uint32_t channel)443 static inline uint32_t rmt_ll_rx_get_memory_writer_offset(rmt_dev_t *dev, uint32_t channel)
444 {
445 return (dev->status_ch[channel] & 0x3FF) - (channel) * 64;
446 }
447
448 //////////////////////////////////////////Interrupt Specific////////////////////////////////////////////////////////////
449
450 /**
451 * @brief Enable RMT interrupt for specific event mask
452 *
453 * @param dev Peripheral instance address
454 * @param mask Event mask
455 * @param enable True to enable, False to disable
456 */
457 __attribute__((always_inline))
rmt_ll_enable_interrupt(rmt_dev_t * dev,uint32_t mask,bool enable)458 static inline void rmt_ll_enable_interrupt(rmt_dev_t *dev, uint32_t mask, bool enable)
459 {
460 if (enable) {
461 dev->int_ena.val |= mask;
462 } else {
463 dev->int_ena.val &= ~mask;
464 }
465 }
466
467 /**
468 * @brief Clear RMT interrupt status by mask
469 *
470 * @param dev Peripheral instance address
471 * @param mask Interupt status mask
472 */
473 __attribute__((always_inline))
rmt_ll_clear_interrupt_status(rmt_dev_t * dev,uint32_t mask)474 static inline void rmt_ll_clear_interrupt_status(rmt_dev_t *dev, uint32_t mask)
475 {
476 dev->int_clr.val = mask;
477 }
478
479 /**
480 * @brief Get interrupt status register address
481 *
482 * @param dev Peripheral instance address
483 * @return Register address
484 */
rmt_ll_get_interrupt_status_reg(rmt_dev_t * dev)485 static inline volatile void *rmt_ll_get_interrupt_status_reg(rmt_dev_t *dev)
486 {
487 return &dev->int_st;
488 }
489
490 /**
491 * @brief Get interrupt status for TX channel
492 *
493 * @param dev Peripheral instance address
494 * @param channel RMT TX channel number
495 * @return Interrupt status
496 */
497 __attribute__((always_inline))
rmt_ll_tx_get_interrupt_status(rmt_dev_t * dev,uint32_t channel)498 static inline uint32_t rmt_ll_tx_get_interrupt_status(rmt_dev_t *dev, uint32_t channel)
499 {
500 return dev->int_st.val & RMT_LL_EVENT_TX_MASK(channel);
501 }
502
503 /**
504 * @brief Get interrupt raw status for TX channel
505 *
506 * @param dev Peripheral instance address
507 * @param channel RMT TX channel number
508 * @return Interrupt raw status
509 */
rmt_ll_tx_get_interrupt_status_raw(rmt_dev_t * dev,uint32_t channel)510 static inline uint32_t rmt_ll_tx_get_interrupt_status_raw(rmt_dev_t *dev, uint32_t channel)
511 {
512 return dev->int_raw.val & (RMT_LL_EVENT_TX_MASK(channel) | RMT_LL_EVENT_TX_ERROR(channel));
513 }
514
515 /**
516 * @brief Get interrupt raw status for RX channel
517 *
518 * @param dev Peripheral instance address
519 * @param channel RMT RX channel number
520 * @return Interrupt raw status
521 */
522 __attribute__((always_inline))
rmt_ll_rx_get_interrupt_status_raw(rmt_dev_t * dev,uint32_t channel)523 static inline uint32_t rmt_ll_rx_get_interrupt_status_raw(rmt_dev_t *dev, uint32_t channel)
524 {
525 return dev->int_raw.val & (RMT_LL_EVENT_RX_MASK(channel) | RMT_LL_EVENT_RX_ERROR(channel));
526 }
527
528 /**
529 * @brief Get interrupt status for RX channel
530 *
531 * @param dev Peripheral instance address
532 * @param channel RMT RX channel number
533 * @return Interrupt status
534 */
535 __attribute__((always_inline))
rmt_ll_rx_get_interrupt_status(rmt_dev_t * dev,uint32_t channel)536 static inline uint32_t rmt_ll_rx_get_interrupt_status(rmt_dev_t *dev, uint32_t channel)
537 {
538 return dev->int_st.val & RMT_LL_EVENT_RX_MASK(channel);
539 }
540
541 //////////////////////////////////////////Deprecated Functions//////////////////////////////////////////////////////////
542 /////////////////////////////The following functions are only used by the legacy driver/////////////////////////////////
543 /////////////////////////////They might be removed in the next major release (ESP-IDF 6.0)//////////////////////////////
544 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
545
546 __attribute__((always_inline))
rmt_ll_tx_get_status_word(rmt_dev_t * dev,uint32_t channel)547 static inline uint32_t rmt_ll_tx_get_status_word(rmt_dev_t *dev, uint32_t channel)
548 {
549 return dev->status_ch[channel];
550 }
551
552 __attribute__((always_inline))
rmt_ll_rx_get_status_word(rmt_dev_t * dev,uint32_t channel)553 static inline uint32_t rmt_ll_rx_get_status_word(rmt_dev_t *dev, uint32_t channel)
554 {
555 return dev->status_ch[channel];
556 }
557
558 __attribute__((always_inline))
rmt_ll_tx_get_channel_clock_div(rmt_dev_t * dev,uint32_t channel)559 static inline uint32_t rmt_ll_tx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
560 {
561 uint32_t div = HAL_FORCE_READ_U32_REG_FIELD(dev->conf_ch[channel].conf0, div_cnt);
562 return div == 0 ? 256 : div;
563 }
564
565 __attribute__((always_inline))
rmt_ll_rx_get_channel_clock_div(rmt_dev_t * dev,uint32_t channel)566 static inline uint32_t rmt_ll_rx_get_channel_clock_div(rmt_dev_t *dev, uint32_t channel)
567 {
568 uint32_t div = HAL_FORCE_READ_U32_REG_FIELD(dev->conf_ch[channel].conf0, div_cnt);
569 return div == 0 ? 256 : div;
570 }
571
572 __attribute__((always_inline))
rmt_ll_rx_get_idle_thres(rmt_dev_t * dev,uint32_t channel)573 static inline uint32_t rmt_ll_rx_get_idle_thres(rmt_dev_t *dev, uint32_t channel)
574 {
575 return HAL_FORCE_READ_U32_REG_FIELD(dev->conf_ch[channel].conf0, idle_thres);
576 }
577
578 __attribute__((always_inline))
rmt_ll_tx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)579 static inline uint32_t rmt_ll_tx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
580 {
581 return dev->conf_ch[channel].conf0.mem_size;
582 }
583
584 __attribute__((always_inline))
rmt_ll_rx_get_mem_blocks(rmt_dev_t * dev,uint32_t channel)585 static inline uint32_t rmt_ll_rx_get_mem_blocks(rmt_dev_t *dev, uint32_t channel)
586 {
587 return dev->conf_ch[channel].conf0.mem_size;
588 }
589
590 __attribute__((always_inline))
rmt_ll_tx_is_loop_enabled(rmt_dev_t * dev,uint32_t channel)591 static inline bool rmt_ll_tx_is_loop_enabled(rmt_dev_t *dev, uint32_t channel)
592 {
593 return dev->conf_ch[channel].conf1.tx_conti_mode;
594 }
595
596 __attribute__((always_inline))
rmt_ll_get_group_clock_src(rmt_dev_t * dev,uint32_t channel)597 static inline rmt_clock_source_t rmt_ll_get_group_clock_src(rmt_dev_t *dev, uint32_t channel)
598 {
599 if (dev->conf_ch[channel].conf1.ref_always_on) {
600 return RMT_CLK_SRC_APB;
601 }
602 return RMT_CLK_SRC_REF_TICK;
603 }
604
605 __attribute__((always_inline))
rmt_ll_tx_is_idle_enabled(rmt_dev_t * dev,uint32_t channel)606 static inline bool rmt_ll_tx_is_idle_enabled(rmt_dev_t *dev, uint32_t channel)
607 {
608 return dev->conf_ch[channel].conf1.idle_out_en;
609 }
610
611 __attribute__((always_inline))
rmt_ll_tx_get_idle_level(rmt_dev_t * dev,uint32_t channel)612 static inline uint32_t rmt_ll_tx_get_idle_level(rmt_dev_t *dev, uint32_t channel)
613 {
614 return dev->conf_ch[channel].conf1.idle_out_lv;
615 }
616
rmt_ll_is_mem_force_powered_down(rmt_dev_t * dev)617 static inline bool rmt_ll_is_mem_force_powered_down(rmt_dev_t *dev)
618 {
619 // Only conf0 register of channel0 has `mem_pd`
620 return dev->conf_ch[0].conf0.mem_pd;
621 }
622
623 __attribute__((always_inline))
rmt_ll_rx_get_mem_owner(rmt_dev_t * dev,uint32_t channel)624 static inline uint32_t rmt_ll_rx_get_mem_owner(rmt_dev_t *dev, uint32_t channel)
625 {
626 return dev->conf_ch[channel].conf1.mem_owner;
627 }
628
629 __attribute__((always_inline))
rmt_ll_get_tx_end_interrupt_status(rmt_dev_t * dev)630 static inline uint32_t rmt_ll_get_tx_end_interrupt_status(rmt_dev_t *dev)
631 {
632 uint32_t status = dev->int_st.val;
633 return ((status & 0x01) >> 0) | ((status & 0x08) >> 2) | ((status & 0x40) >> 4) | ((status & 0x200) >> 6) |
634 ((status & 0x1000) >> 8) | ((status & 0x8000) >> 10) | ((status & 0x40000) >> 12) | ((status & 0x200000) >> 14);
635 }
636
637 __attribute__((always_inline))
rmt_ll_get_rx_end_interrupt_status(rmt_dev_t * dev)638 static inline uint32_t rmt_ll_get_rx_end_interrupt_status(rmt_dev_t *dev)
639 {
640 uint32_t status = dev->int_st.val;
641 return ((status & 0x02) >> 1) | ((status & 0x10) >> 3) | ((status & 0x80) >> 5) | ((status & 0x400) >> 7) |
642 ((status & 0x2000) >> 9) | ((status & 0x10000) >> 11) | ((status & 0x80000) >> 13) | ((status & 0x400000) >> 15);
643 }
644
645 __attribute__((always_inline))
rmt_ll_get_tx_err_interrupt_status(rmt_dev_t * dev)646 static inline uint32_t rmt_ll_get_tx_err_interrupt_status(rmt_dev_t *dev)
647 {
648 uint32_t status = dev->int_st.val;
649 return ((status & 0x04) >> 2) | ((status & 0x20) >> 4) | ((status & 0x100) >> 6) | ((status & 0x800) >> 8) |
650 ((status & 0x4000) >> 10) | ((status & 0x20000) >> 12) | ((status & 0x100000) >> 14) | ((status & 0x800000) >> 16);
651 }
652
653 __attribute__((always_inline))
rmt_ll_get_rx_err_interrupt_status(rmt_dev_t * dev)654 static inline uint32_t rmt_ll_get_rx_err_interrupt_status(rmt_dev_t *dev)
655 {
656 uint32_t status = dev->int_st.val;
657 return ((status & 0x04) >> 2) | ((status & 0x20) >> 4) | ((status & 0x100) >> 6) | ((status & 0x800) >> 8) |
658 ((status & 0x4000) >> 10) | ((status & 0x20000) >> 12) | ((status & 0x100000) >> 14) | ((status & 0x800000) >> 16);
659 }
660
661 __attribute__((always_inline))
rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t * dev)662 static inline uint32_t rmt_ll_get_tx_thres_interrupt_status(rmt_dev_t *dev)
663 {
664 uint32_t status = dev->int_st.val;
665 return (status & 0xFF000000) >> 24;
666 }
667
668 #ifdef __cplusplus
669 }
670 #endif
671