1 /*
2 * SPDX-FileCopyrightText: 2015-2021 Espressif Systems (Shanghai) CO LTD
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6 #include <string.h>
7 #include "esp_types.h"
8 #include "esp_attr.h"
9 #include "esp_intr_alloc.h"
10 #include "esp_log.h"
11 #include "esp_err.h"
12 #include "esp_check.h"
13 #include "malloc.h"
14 #include "freertos/FreeRTOS.h"
15 #include "freertos/semphr.h"
16 #include "freertos/ringbuf.h"
17 #include "hal/uart_hal.h"
18 #include "hal/gpio_hal.h"
19 #include "soc/uart_periph.h"
20 #include "soc/rtc_cntl_reg.h"
21 #include "driver/uart.h"
22 #include "driver/gpio.h"
23 #include "driver/uart_select.h"
24 #include "driver/periph_ctrl.h"
25 #include "sdkconfig.h"
26 #include "esp_rom_gpio.h"
27
28 #if CONFIG_IDF_TARGET_ESP32
29 #include "esp32/clk.h"
30 #elif CONFIG_IDF_TARGET_ESP32S2
31 #include "esp32s2/clk.h"
32 #elif CONFIG_IDF_TARGET_ESP32S3
33 #include "esp32s3/clk.h"
34 #elif CONFIG_IDF_TARGET_ESP32C3
35 #include "esp32c3/clk.h"
36 #elif CONFIG_IDF_TARGET_ESP32H2
37 #include "esp32h2/clk.h"
38 #endif
39
40 #ifdef CONFIG_UART_ISR_IN_IRAM
41 #define UART_ISR_ATTR IRAM_ATTR
42 #define UART_MALLOC_CAPS (MALLOC_CAP_INTERNAL | MALLOC_CAP_8BIT)
43 #else
44 #define UART_ISR_ATTR
45 #define UART_MALLOC_CAPS MALLOC_CAP_DEFAULT
46 #endif
47
48 #define XOFF (0x13)
49 #define XON (0x11)
50
51 static const char *UART_TAG = "uart";
52
53 #define UART_EMPTY_THRESH_DEFAULT (10)
54 #define UART_FULL_THRESH_DEFAULT (120)
55 #define UART_TOUT_THRESH_DEFAULT (10)
56 #define UART_CLKDIV_FRAG_BIT_WIDTH (3)
57 #define UART_TX_IDLE_NUM_DEFAULT (0)
58 #define UART_PATTERN_DET_QLEN_DEFAULT (10)
59 #define UART_MIN_WAKEUP_THRESH (UART_LL_MIN_WAKEUP_THRESH)
60
61 #define UART_INTR_CONFIG_FLAG ((UART_INTR_RXFIFO_FULL) \
62 | (UART_INTR_RXFIFO_TOUT) \
63 | (UART_INTR_RXFIFO_OVF) \
64 | (UART_INTR_BRK_DET) \
65 | (UART_INTR_PARITY_ERR))
66
67 #define UART_ENTER_CRITICAL_ISR(mux) portENTER_CRITICAL_ISR(mux)
68 #define UART_EXIT_CRITICAL_ISR(mux) portEXIT_CRITICAL_ISR(mux)
69 #define UART_ENTER_CRITICAL(mux) portENTER_CRITICAL(mux)
70 #define UART_EXIT_CRITICAL(mux) portEXIT_CRITICAL(mux)
71
72
73 // Check actual UART mode set
74 #define UART_IS_MODE_SET(uart_number, mode) ((p_uart_obj[uart_number]->uart_mode == mode))
75
76 #define UART_CONTEX_INIT_DEF(uart_num) {\
77 .hal.dev = UART_LL_GET_HW(uart_num),\
78 .spinlock = portMUX_INITIALIZER_UNLOCKED,\
79 .hw_enabled = false,\
80 }
81
82 #if SOC_UART_SUPPORT_RTC_CLK
83 #define RTC_ENABLED(uart_num) (BIT(uart_num))
84 #endif
85
86 typedef struct {
87 uart_event_type_t type; /*!< UART TX data type */
88 struct {
89 int brk_len;
90 size_t size;
91 uint8_t data[0];
92 } tx_data;
93 } uart_tx_data_t;
94
95 typedef struct {
96 int wr;
97 int rd;
98 int len;
99 int *data;
100 } uart_pat_rb_t;
101
102 typedef struct {
103 uart_port_t uart_num; /*!< UART port number*/
104 int event_queue_size; /*!< UART event queue size*/
105 intr_handle_t intr_handle; /*!< UART interrupt handle*/
106 uart_mode_t uart_mode; /*!< UART controller actual mode set by uart_set_mode() */
107 bool coll_det_flg; /*!< UART collision detection flag */
108 bool rx_always_timeout_flg; /*!< UART always detect rx timeout flag */
109 int rx_buffered_len; /*!< UART cached data length */
110 int rx_buf_size; /*!< RX ring buffer size */
111 bool rx_buffer_full_flg; /*!< RX ring buffer full flag. */
112 uint32_t rx_cur_remain; /*!< Data number that waiting to be read out in ring buffer item*/
113 uint8_t *rx_ptr; /*!< pointer to the current data in ring buffer*/
114 uint8_t *rx_head_ptr; /*!< pointer to the head of RX item*/
115 uint8_t rx_data_buf[SOC_UART_FIFO_LEN]; /*!< Data buffer to stash FIFO data*/
116 uint8_t rx_stash_len; /*!< stashed data length.(When using flow control, after reading out FIFO data, if we fail to push to buffer, we can just stash them.) */
117 uint32_t rx_int_usr_mask; /*!< RX interrupt status. Valid at any time, regardless of RX buffer status. */
118 uart_pat_rb_t rx_pattern_pos;
119 int tx_buf_size; /*!< TX ring buffer size */
120 bool tx_waiting_fifo; /*!< this flag indicates that some task is waiting for FIFO empty interrupt, used to send all data without any data buffer*/
121 uint8_t *tx_ptr; /*!< TX data pointer to push to FIFO in TX buffer mode*/
122 uart_tx_data_t *tx_head; /*!< TX data pointer to head of the current buffer in TX ring buffer*/
123 uint32_t tx_len_tot; /*!< Total length of current item in ring buffer*/
124 uint32_t tx_len_cur;
125 uint8_t tx_brk_flg; /*!< Flag to indicate to send a break signal in the end of the item sending procedure */
126 uint8_t tx_brk_len; /*!< TX break signal cycle length/number */
127 uint8_t tx_waiting_brk; /*!< Flag to indicate that TX FIFO is ready to send break signal after FIFO is empty, do not push data into TX FIFO right now.*/
128 uart_select_notif_callback_t uart_select_notif_callback; /*!< Notification about select() events */
129 QueueHandle_t event_queue; /*!< UART event queue handler*/
130 RingbufHandle_t rx_ring_buf; /*!< RX ring buffer handler*/
131 RingbufHandle_t tx_ring_buf; /*!< TX ring buffer handler*/
132 SemaphoreHandle_t rx_mux; /*!< UART RX data mutex*/
133 SemaphoreHandle_t tx_mux; /*!< UART TX mutex*/
134 SemaphoreHandle_t tx_fifo_sem; /*!< UART TX FIFO semaphore*/
135 SemaphoreHandle_t tx_done_sem; /*!< UART TX done semaphore*/
136 SemaphoreHandle_t tx_brk_sem; /*!< UART TX send break done semaphore*/
137 #if CONFIG_UART_ISR_IN_IRAM
138 void *event_queue_storage;
139 void *event_queue_struct;
140 void *rx_ring_buf_storage;
141 void *rx_ring_buf_struct;
142 void *tx_ring_buf_storage;
143 void *tx_ring_buf_struct;
144 void *rx_mux_struct;
145 void *tx_mux_struct;
146 void *tx_fifo_sem_struct;
147 void *tx_done_sem_struct;
148 void *tx_brk_sem_struct;
149 #endif
150 } uart_obj_t;
151
152 typedef struct {
153 uart_hal_context_t hal; /*!< UART hal context*/
154 portMUX_TYPE spinlock;
155 bool hw_enabled;
156 } uart_context_t;
157
158 static uart_obj_t *p_uart_obj[UART_NUM_MAX] = {0};
159
160 static uart_context_t uart_context[UART_NUM_MAX] = {
161 UART_CONTEX_INIT_DEF(UART_NUM_0),
162 UART_CONTEX_INIT_DEF(UART_NUM_1),
163 #if UART_NUM_MAX > 2
164 UART_CONTEX_INIT_DEF(UART_NUM_2),
165 #endif
166 };
167
168 static portMUX_TYPE uart_selectlock = portMUX_INITIALIZER_UNLOCKED;
169
170 #if SOC_UART_SUPPORT_RTC_CLK
171
172 static uint8_t rtc_enabled = 0;
173 static portMUX_TYPE rtc_num_spinlock = portMUX_INITIALIZER_UNLOCKED;
174
rtc_clk_enable(uart_port_t uart_num)175 static void rtc_clk_enable(uart_port_t uart_num)
176 {
177 portENTER_CRITICAL(&rtc_num_spinlock);
178 if (!(rtc_enabled & RTC_ENABLED(uart_num))) {
179 rtc_enabled |= RTC_ENABLED(uart_num);
180 }
181 SET_PERI_REG_MASK(RTC_CNTL_CLK_CONF_REG, RTC_CNTL_DIG_CLK8M_EN_M);
182 portEXIT_CRITICAL(&rtc_num_spinlock);
183 }
184
rtc_clk_disable(uart_port_t uart_num)185 static void rtc_clk_disable(uart_port_t uart_num)
186 {
187 assert(rtc_enabled & RTC_ENABLED(uart_num));
188
189 portENTER_CRITICAL(&rtc_num_spinlock);
190 rtc_enabled &= ~RTC_ENABLED(uart_num);
191 if (rtc_enabled == 0) {
192 CLEAR_PERI_REG_MASK(RTC_CNTL_CLK_CONF_REG, RTC_CNTL_DIG_CLK8M_EN_M);
193 }
194 portEXIT_CRITICAL(&rtc_num_spinlock);
195 }
196 #endif
197
uart_module_enable(uart_port_t uart_num)198 static void uart_module_enable(uart_port_t uart_num)
199 {
200 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
201 if (uart_context[uart_num].hw_enabled != true) {
202 periph_module_enable(uart_periph_signal[uart_num].module);
203 if (uart_num != CONFIG_ESP_CONSOLE_UART_NUM) {
204 // Workaround for ESP32C3: enable core reset
205 // before enabling uart module clock
206 // to prevent uart output garbage value.
207 #if SOC_UART_REQUIRE_CORE_RESET
208 uart_hal_set_reset_core(&(uart_context[uart_num].hal), true);
209 periph_module_reset(uart_periph_signal[uart_num].module);
210 uart_hal_set_reset_core(&(uart_context[uart_num].hal), false);
211 #else
212 periph_module_reset(uart_periph_signal[uart_num].module);
213 #endif
214 }
215 uart_context[uart_num].hw_enabled = true;
216 }
217 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
218 }
219
uart_module_disable(uart_port_t uart_num)220 static void uart_module_disable(uart_port_t uart_num)
221 {
222 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
223 if (uart_context[uart_num].hw_enabled != false) {
224 if (uart_num != CONFIG_ESP_CONSOLE_UART_NUM ) {
225 periph_module_disable(uart_periph_signal[uart_num].module);
226 }
227 uart_context[uart_num].hw_enabled = false;
228 }
229 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
230 }
231
uart_set_word_length(uart_port_t uart_num,uart_word_length_t data_bit)232 esp_err_t uart_set_word_length(uart_port_t uart_num, uart_word_length_t data_bit)
233 {
234 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
235 ESP_RETURN_ON_FALSE((data_bit < UART_DATA_BITS_MAX), ESP_FAIL, UART_TAG, "data bit error");
236 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
237 uart_hal_set_data_bit_num(&(uart_context[uart_num].hal), data_bit);
238 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
239 return ESP_OK;
240 }
241
uart_get_word_length(uart_port_t uart_num,uart_word_length_t * data_bit)242 esp_err_t uart_get_word_length(uart_port_t uart_num, uart_word_length_t *data_bit)
243 {
244 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
245 uart_hal_get_data_bit_num(&(uart_context[uart_num].hal), data_bit);
246 return ESP_OK;
247 }
248
uart_set_stop_bits(uart_port_t uart_num,uart_stop_bits_t stop_bit)249 esp_err_t uart_set_stop_bits(uart_port_t uart_num, uart_stop_bits_t stop_bit)
250 {
251 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
252 ESP_RETURN_ON_FALSE((stop_bit < UART_STOP_BITS_MAX), ESP_FAIL, UART_TAG, "stop bit error");
253 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
254 uart_hal_set_stop_bits(&(uart_context[uart_num].hal), stop_bit);
255 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
256 return ESP_OK;
257 }
258
uart_get_stop_bits(uart_port_t uart_num,uart_stop_bits_t * stop_bit)259 esp_err_t uart_get_stop_bits(uart_port_t uart_num, uart_stop_bits_t *stop_bit)
260 {
261 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
262 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
263 uart_hal_get_stop_bits(&(uart_context[uart_num].hal), stop_bit);
264 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
265 return ESP_OK;
266 }
267
uart_set_parity(uart_port_t uart_num,uart_parity_t parity_mode)268 esp_err_t uart_set_parity(uart_port_t uart_num, uart_parity_t parity_mode)
269 {
270 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
271 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
272 uart_hal_set_parity(&(uart_context[uart_num].hal), parity_mode);
273 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
274 return ESP_OK;
275 }
276
uart_get_parity(uart_port_t uart_num,uart_parity_t * parity_mode)277 esp_err_t uart_get_parity(uart_port_t uart_num, uart_parity_t *parity_mode)
278 {
279 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
280 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
281 uart_hal_get_parity(&(uart_context[uart_num].hal), parity_mode);
282 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
283 return ESP_OK;
284 }
285
uart_set_baudrate(uart_port_t uart_num,uint32_t baud_rate)286 esp_err_t uart_set_baudrate(uart_port_t uart_num, uint32_t baud_rate)
287 {
288 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
289 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
290 uart_hal_set_baudrate(&(uart_context[uart_num].hal), baud_rate);
291 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
292 return ESP_OK;
293 }
294
uart_get_baudrate(uart_port_t uart_num,uint32_t * baudrate)295 esp_err_t uart_get_baudrate(uart_port_t uart_num, uint32_t *baudrate)
296 {
297 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
298 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
299 uart_hal_get_baudrate(&(uart_context[uart_num].hal), baudrate);
300 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
301 return ESP_OK;
302 }
303
uart_set_line_inverse(uart_port_t uart_num,uint32_t inverse_mask)304 esp_err_t uart_set_line_inverse(uart_port_t uart_num, uint32_t inverse_mask)
305 {
306 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
307 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
308 uart_hal_inverse_signal(&(uart_context[uart_num].hal), inverse_mask);
309 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
310 return ESP_OK;
311 }
312
uart_set_sw_flow_ctrl(uart_port_t uart_num,bool enable,uint8_t rx_thresh_xon,uint8_t rx_thresh_xoff)313 esp_err_t uart_set_sw_flow_ctrl(uart_port_t uart_num, bool enable, uint8_t rx_thresh_xon, uint8_t rx_thresh_xoff)
314 {
315 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
316 ESP_RETURN_ON_FALSE((rx_thresh_xon < SOC_UART_FIFO_LEN), ESP_FAIL, UART_TAG, "rx flow xon thresh error");
317 ESP_RETURN_ON_FALSE((rx_thresh_xoff < SOC_UART_FIFO_LEN), ESP_FAIL, UART_TAG, "rx flow xoff thresh error");
318 uart_sw_flowctrl_t sw_flow_ctl = {
319 .xon_char = XON,
320 .xoff_char = XOFF,
321 .xon_thrd = rx_thresh_xon,
322 .xoff_thrd = rx_thresh_xoff,
323 };
324 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
325 uart_hal_set_sw_flow_ctrl(&(uart_context[uart_num].hal), &sw_flow_ctl, enable);
326 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
327 return ESP_OK;
328 }
329
uart_set_hw_flow_ctrl(uart_port_t uart_num,uart_hw_flowcontrol_t flow_ctrl,uint8_t rx_thresh)330 esp_err_t uart_set_hw_flow_ctrl(uart_port_t uart_num, uart_hw_flowcontrol_t flow_ctrl, uint8_t rx_thresh)
331 {
332 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
333 ESP_RETURN_ON_FALSE((rx_thresh < SOC_UART_FIFO_LEN), ESP_FAIL, UART_TAG, "rx flow thresh error");
334 ESP_RETURN_ON_FALSE((flow_ctrl < UART_HW_FLOWCTRL_MAX), ESP_FAIL, UART_TAG, "hw_flowctrl mode error");
335 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
336 uart_hal_set_hw_flow_ctrl(&(uart_context[uart_num].hal), flow_ctrl, rx_thresh);
337 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
338 return ESP_OK;
339 }
340
uart_get_hw_flow_ctrl(uart_port_t uart_num,uart_hw_flowcontrol_t * flow_ctrl)341 esp_err_t uart_get_hw_flow_ctrl(uart_port_t uart_num, uart_hw_flowcontrol_t *flow_ctrl)
342 {
343 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
344 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
345 uart_hal_get_hw_flow_ctrl(&(uart_context[uart_num].hal), flow_ctrl);
346 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
347 return ESP_OK;
348 }
349
uart_clear_intr_status(uart_port_t uart_num,uint32_t clr_mask)350 esp_err_t UART_ISR_ATTR uart_clear_intr_status(uart_port_t uart_num, uint32_t clr_mask)
351 {
352 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
353 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), clr_mask);
354 return ESP_OK;
355 }
356
uart_enable_intr_mask(uart_port_t uart_num,uint32_t enable_mask)357 esp_err_t uart_enable_intr_mask(uart_port_t uart_num, uint32_t enable_mask)
358 {
359 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
360 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
361 /* Keep track of the interrupt toggling. In fact, without such variable,
362 * once the RX buffer is full and the RX interrupts disabled, it is
363 * impossible what was the previous state (enabled/disabled) of these
364 * interrupt masks. Thus, this will be very particularly handy when
365 * emptying a filled RX buffer. */
366 p_uart_obj[uart_num]->rx_int_usr_mask |= enable_mask;
367 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), enable_mask);
368 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), enable_mask);
369 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
370 return ESP_OK;
371 }
372
373 /**
374 * @brief Function re-enabling the given interrupts (mask) if and only if
375 * they have not been disabled by the user.
376 *
377 * @param uart_num UART number to perform the operation on
378 * @param enable_mask Interrupts (flags) to be re-enabled
379 *
380 * @return ESP_OK in success, ESP_FAIL if uart_num is incorrect
381 */
uart_reenable_intr_mask(uart_port_t uart_num,uint32_t enable_mask)382 static esp_err_t uart_reenable_intr_mask(uart_port_t uart_num, uint32_t enable_mask)
383 {
384 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
385 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
386 /* Mask will only contain the interrupt flags that needs to be re-enabled
387 * AND which have NOT been explicitly disabled by the user. */
388 uint32_t mask = p_uart_obj[uart_num]->rx_int_usr_mask & enable_mask;
389 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), mask);
390 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), mask);
391 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
392 return ESP_OK;
393 }
394
uart_disable_intr_mask(uart_port_t uart_num,uint32_t disable_mask)395 esp_err_t uart_disable_intr_mask(uart_port_t uart_num, uint32_t disable_mask)
396 {
397 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
398 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
399 p_uart_obj[uart_num]->rx_int_usr_mask &= ~disable_mask;
400 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), disable_mask);
401 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
402 return ESP_OK;
403 }
404
uart_pattern_link_free(uart_port_t uart_num)405 static esp_err_t uart_pattern_link_free(uart_port_t uart_num)
406 {
407 int *pdata = NULL;
408 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
409 if (p_uart_obj[uart_num]->rx_pattern_pos.data != NULL) {
410 pdata = p_uart_obj[uart_num]->rx_pattern_pos.data;
411 p_uart_obj[uart_num]->rx_pattern_pos.data = NULL;
412 p_uart_obj[uart_num]->rx_pattern_pos.wr = 0;
413 p_uart_obj[uart_num]->rx_pattern_pos.rd = 0;
414 }
415 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
416 free(pdata);
417 return ESP_OK;
418 }
419
uart_pattern_enqueue(uart_port_t uart_num,int pos)420 static esp_err_t UART_ISR_ATTR uart_pattern_enqueue(uart_port_t uart_num, int pos)
421 {
422 esp_err_t ret = ESP_OK;
423 uart_pat_rb_t *p_pos = &p_uart_obj[uart_num]->rx_pattern_pos;
424 int next = p_pos->wr + 1;
425 if (next >= p_pos->len) {
426 next = 0;
427 }
428 if (next == p_pos->rd) {
429 #ifndef CONFIG_UART_ISR_IN_IRAM //Only log if ISR is not in IRAM
430 ESP_EARLY_LOGW(UART_TAG, "Fail to enqueue pattern position, pattern queue is full.");
431 #endif
432 ret = ESP_FAIL;
433 } else {
434 p_pos->data[p_pos->wr] = pos;
435 p_pos->wr = next;
436 ret = ESP_OK;
437 }
438 return ret;
439 }
440
uart_pattern_dequeue(uart_port_t uart_num)441 static esp_err_t uart_pattern_dequeue(uart_port_t uart_num)
442 {
443 if (p_uart_obj[uart_num]->rx_pattern_pos.data == NULL) {
444 return ESP_ERR_INVALID_STATE;
445 } else {
446 esp_err_t ret = ESP_OK;
447 uart_pat_rb_t *p_pos = &p_uart_obj[uart_num]->rx_pattern_pos;
448 if (p_pos->rd == p_pos->wr) {
449 ret = ESP_FAIL;
450 } else {
451 p_pos->rd++;
452 }
453 if (p_pos->rd >= p_pos->len) {
454 p_pos->rd = 0;
455 }
456 return ret;
457 }
458 }
459
uart_pattern_queue_update(uart_port_t uart_num,int diff_len)460 static esp_err_t uart_pattern_queue_update(uart_port_t uart_num, int diff_len)
461 {
462 uart_pat_rb_t *p_pos = &p_uart_obj[uart_num]->rx_pattern_pos;
463 int rd = p_pos->rd;
464 while (rd != p_pos->wr) {
465 p_pos->data[rd] -= diff_len;
466 int rd_rec = rd;
467 rd ++;
468 if (rd >= p_pos->len) {
469 rd = 0;
470 }
471 if (p_pos->data[rd_rec] < 0) {
472 p_pos->rd = rd;
473 }
474 }
475 return ESP_OK;
476 }
477
uart_pattern_pop_pos(uart_port_t uart_num)478 int uart_pattern_pop_pos(uart_port_t uart_num)
479 {
480 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), (-1), UART_TAG, "uart driver error");
481 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
482 uart_pat_rb_t *pat_pos = &p_uart_obj[uart_num]->rx_pattern_pos;
483 int pos = -1;
484 if (pat_pos != NULL && pat_pos->rd != pat_pos->wr) {
485 pos = pat_pos->data[pat_pos->rd];
486 uart_pattern_dequeue(uart_num);
487 }
488 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
489 return pos;
490 }
491
uart_pattern_get_pos(uart_port_t uart_num)492 int uart_pattern_get_pos(uart_port_t uart_num)
493 {
494 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), (-1), UART_TAG, "uart driver error");
495 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
496 uart_pat_rb_t *pat_pos = &p_uart_obj[uart_num]->rx_pattern_pos;
497 int pos = -1;
498 if (pat_pos != NULL && pat_pos->rd != pat_pos->wr) {
499 pos = pat_pos->data[pat_pos->rd];
500 }
501 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
502 return pos;
503 }
504
uart_pattern_queue_reset(uart_port_t uart_num,int queue_length)505 esp_err_t uart_pattern_queue_reset(uart_port_t uart_num, int queue_length)
506 {
507 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
508 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_ERR_INVALID_STATE, UART_TAG, "uart driver error");
509
510 int *pdata = (int *) malloc(queue_length * sizeof(int));
511 if (pdata == NULL) {
512 return ESP_ERR_NO_MEM;
513 }
514 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
515 int *ptmp = p_uart_obj[uart_num]->rx_pattern_pos.data;
516 p_uart_obj[uart_num]->rx_pattern_pos.data = pdata;
517 p_uart_obj[uart_num]->rx_pattern_pos.len = queue_length;
518 p_uart_obj[uart_num]->rx_pattern_pos.rd = 0;
519 p_uart_obj[uart_num]->rx_pattern_pos.wr = 0;
520 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
521 free(ptmp);
522 return ESP_OK;
523 }
524
525 #if CONFIG_IDF_TARGET_ESP32
uart_enable_pattern_det_intr(uart_port_t uart_num,char pattern_chr,uint8_t chr_num,int chr_tout,int post_idle,int pre_idle)526 esp_err_t uart_enable_pattern_det_intr(uart_port_t uart_num, char pattern_chr, uint8_t chr_num, int chr_tout, int post_idle, int pre_idle)
527 {
528 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
529 ESP_RETURN_ON_FALSE(chr_tout >= 0 && chr_tout <= UART_RX_GAP_TOUT_V, ESP_FAIL, UART_TAG, "uart pattern set error\n");
530 ESP_RETURN_ON_FALSE(post_idle >= 0 && post_idle <= UART_POST_IDLE_NUM_V, ESP_FAIL, UART_TAG, "uart pattern set error\n");
531 ESP_RETURN_ON_FALSE(pre_idle >= 0 && pre_idle <= UART_PRE_IDLE_NUM_V, ESP_FAIL, UART_TAG, "uart pattern set error\n");
532 uart_at_cmd_t at_cmd = {0};
533 at_cmd.cmd_char = pattern_chr;
534 at_cmd.char_num = chr_num;
535 at_cmd.gap_tout = chr_tout;
536 at_cmd.pre_idle = pre_idle;
537 at_cmd.post_idle = post_idle;
538 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
539 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
540 uart_hal_set_at_cmd_char(&(uart_context[uart_num].hal), &at_cmd);
541 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
542 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
543 return ESP_OK;
544 }
545 #endif
546
uart_enable_pattern_det_baud_intr(uart_port_t uart_num,char pattern_chr,uint8_t chr_num,int chr_tout,int post_idle,int pre_idle)547 esp_err_t uart_enable_pattern_det_baud_intr(uart_port_t uart_num, char pattern_chr, uint8_t chr_num, int chr_tout, int post_idle, int pre_idle)
548 {
549 ESP_RETURN_ON_FALSE(uart_num < UART_NUM_MAX, ESP_FAIL, UART_TAG, "uart_num error");
550 ESP_RETURN_ON_FALSE(chr_tout >= 0 && chr_tout <= UART_RX_GAP_TOUT_V, ESP_FAIL, UART_TAG, "uart pattern set error\n");
551 ESP_RETURN_ON_FALSE(post_idle >= 0 && post_idle <= UART_POST_IDLE_NUM_V, ESP_FAIL, UART_TAG, "uart pattern set error\n");
552 ESP_RETURN_ON_FALSE(pre_idle >= 0 && pre_idle <= UART_PRE_IDLE_NUM_V, ESP_FAIL, UART_TAG, "uart pattern set error\n");
553 uart_at_cmd_t at_cmd = {0};
554 at_cmd.cmd_char = pattern_chr;
555 at_cmd.char_num = chr_num;
556
557 #if CONFIG_IDF_TARGET_ESP32
558 int apb_clk_freq = 0;
559 uint32_t uart_baud = 0;
560 uint32_t uart_div = 0;
561 uart_get_baudrate(uart_num, &uart_baud);
562 apb_clk_freq = esp_clk_apb_freq();
563 uart_div = apb_clk_freq / uart_baud;
564
565 at_cmd.gap_tout = chr_tout * uart_div;
566 at_cmd.pre_idle = pre_idle * uart_div;
567 at_cmd.post_idle = post_idle * uart_div;
568 #else
569 at_cmd.gap_tout = chr_tout;
570 at_cmd.pre_idle = pre_idle;
571 at_cmd.post_idle = post_idle;
572 #endif
573 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
574 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
575 uart_hal_set_at_cmd_char(&(uart_context[uart_num].hal), &at_cmd);
576 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
577 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
578 return ESP_OK;
579 }
580
581
uart_disable_pattern_det_intr(uart_port_t uart_num)582 esp_err_t uart_disable_pattern_det_intr(uart_port_t uart_num)
583 {
584 return uart_disable_intr_mask(uart_num, UART_INTR_CMD_CHAR_DET);
585 }
586
uart_enable_rx_intr(uart_port_t uart_num)587 esp_err_t uart_enable_rx_intr(uart_port_t uart_num)
588 {
589 return uart_enable_intr_mask(uart_num, UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT);
590 }
591
uart_disable_rx_intr(uart_port_t uart_num)592 esp_err_t uart_disable_rx_intr(uart_port_t uart_num)
593 {
594 return uart_disable_intr_mask(uart_num, UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT);
595 }
596
uart_disable_tx_intr(uart_port_t uart_num)597 esp_err_t uart_disable_tx_intr(uart_port_t uart_num)
598 {
599 return uart_disable_intr_mask(uart_num, UART_INTR_TXFIFO_EMPTY);
600 }
601
uart_enable_tx_intr(uart_port_t uart_num,int enable,int thresh)602 esp_err_t uart_enable_tx_intr(uart_port_t uart_num, int enable, int thresh)
603 {
604 if (enable == 0) {
605 return uart_disable_tx_intr(uart_num);
606 }
607 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
608 ESP_RETURN_ON_FALSE((thresh < SOC_UART_FIFO_LEN), ESP_FAIL, UART_TAG, "empty intr threshold error");
609 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
610 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
611 uart_hal_set_txfifo_empty_thr(&(uart_context[uart_num].hal), thresh);
612 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
613 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
614 return ESP_OK;
615 }
616
uart_isr_register(uart_port_t uart_num,void (* fn)(void *),void * arg,int intr_alloc_flags,uart_isr_handle_t * handle)617 esp_err_t uart_isr_register(uart_port_t uart_num, void (*fn)(void *), void *arg, int intr_alloc_flags, uart_isr_handle_t *handle)
618 {
619 int ret;
620 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
621 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
622 ret = esp_intr_alloc(uart_periph_signal[uart_num].irq, intr_alloc_flags, fn, arg, handle);
623 if (ret == ESP_OK) {
624 p_uart_obj[uart_num]->intr_handle = *handle;
625 }
626 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
627 return ret;
628 }
629
uart_isr_free(uart_port_t uart_num)630 esp_err_t uart_isr_free(uart_port_t uart_num)
631 {
632 esp_err_t ret;
633 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
634 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_FAIL, UART_TAG, "uart driver error");
635 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]->intr_handle != NULL), ESP_ERR_INVALID_ARG, UART_TAG, "uart driver error");
636 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
637 ret = esp_intr_free(p_uart_obj[uart_num]->intr_handle);
638 p_uart_obj[uart_num]->intr_handle = NULL;
639 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
640 return ret;
641 }
642
uart_try_set_iomux_pin(uart_port_t uart_num,int io_num,uint32_t idx)643 static bool uart_try_set_iomux_pin(uart_port_t uart_num, int io_num, uint32_t idx)
644 {
645 /* Store a pointer to the default pin, to optimize access to its fields. */
646 const uart_periph_sig_t *upin = &uart_periph_signal[uart_num].pins[idx];
647
648 /* In theory, if default_gpio is -1, iomux_func should also be -1, but
649 * let's be safe and test both. */
650 if (upin->iomux_func == -1 || upin->default_gpio == -1 || upin->default_gpio != io_num) {
651 return false;
652 }
653
654 /* Assign the correct funct to the GPIO. */
655 assert (upin->iomux_func != -1);
656 gpio_iomux_out(io_num, upin->iomux_func, false);
657
658 /* If the pin is input, we also have to redirect the signal,
659 * in order to bypasse the GPIO matrix. */
660 if (upin->input) {
661 gpio_iomux_in(io_num, upin->signal);
662 }
663
664 return true;
665 }
666
667 //internal signal can be output to multiple GPIO pads
668 //only one GPIO pad can connect with input signal
uart_set_pin(uart_port_t uart_num,int tx_io_num,int rx_io_num,int rts_io_num,int cts_io_num)669 esp_err_t uart_set_pin(uart_port_t uart_num, int tx_io_num, int rx_io_num, int rts_io_num, int cts_io_num)
670 {
671 ESP_RETURN_ON_FALSE((uart_num >= 0), ESP_FAIL, UART_TAG, "uart_num error");
672 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
673 ESP_RETURN_ON_FALSE((tx_io_num < 0 || (GPIO_IS_VALID_OUTPUT_GPIO(tx_io_num))), ESP_FAIL, UART_TAG, "tx_io_num error");
674 ESP_RETURN_ON_FALSE((rx_io_num < 0 || (GPIO_IS_VALID_GPIO(rx_io_num))), ESP_FAIL, UART_TAG, "rx_io_num error");
675 ESP_RETURN_ON_FALSE((rts_io_num < 0 || (GPIO_IS_VALID_OUTPUT_GPIO(rts_io_num))), ESP_FAIL, UART_TAG, "rts_io_num error");
676 ESP_RETURN_ON_FALSE((cts_io_num < 0 || (GPIO_IS_VALID_GPIO(cts_io_num))), ESP_FAIL, UART_TAG, "cts_io_num error");
677
678 /* In the following statements, if the io_num is negative, no need to configure anything. */
679 if (tx_io_num >= 0 && !uart_try_set_iomux_pin(uart_num, tx_io_num, SOC_UART_TX_PIN_IDX)) {
680 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[tx_io_num], PIN_FUNC_GPIO);
681 gpio_set_level(tx_io_num, 1);
682 esp_rom_gpio_connect_out_signal(tx_io_num, UART_PERIPH_SIGNAL(uart_num, SOC_UART_TX_PIN_IDX), 0, 0);
683 }
684
685 if (rx_io_num >= 0 && !uart_try_set_iomux_pin(uart_num, rx_io_num, SOC_UART_RX_PIN_IDX)) {
686 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[rx_io_num], PIN_FUNC_GPIO);
687 gpio_set_pull_mode(rx_io_num, GPIO_PULLUP_ONLY);
688 gpio_set_direction(rx_io_num, GPIO_MODE_INPUT);
689 esp_rom_gpio_connect_in_signal(rx_io_num, UART_PERIPH_SIGNAL(uart_num, SOC_UART_RX_PIN_IDX), 0);
690 }
691
692 if (rts_io_num >= 0 && !uart_try_set_iomux_pin(uart_num, rts_io_num, SOC_UART_RTS_PIN_IDX)) {
693 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[rts_io_num], PIN_FUNC_GPIO);
694 gpio_set_direction(rts_io_num, GPIO_MODE_OUTPUT);
695 esp_rom_gpio_connect_out_signal(rts_io_num, UART_PERIPH_SIGNAL(uart_num, SOC_UART_RTS_PIN_IDX), 0, 0);
696 }
697
698 if (cts_io_num >= 0 && !uart_try_set_iomux_pin(uart_num, cts_io_num, SOC_UART_CTS_PIN_IDX)) {
699 gpio_hal_iomux_func_sel(GPIO_PIN_MUX_REG[cts_io_num], PIN_FUNC_GPIO);
700 gpio_set_pull_mode(cts_io_num, GPIO_PULLUP_ONLY);
701 gpio_set_direction(cts_io_num, GPIO_MODE_INPUT);
702 esp_rom_gpio_connect_in_signal(cts_io_num, UART_PERIPH_SIGNAL(uart_num, SOC_UART_CTS_PIN_IDX), 0);
703 }
704
705 return ESP_OK;
706 }
707
uart_set_rts(uart_port_t uart_num,int level)708 esp_err_t uart_set_rts(uart_port_t uart_num, int level)
709 {
710 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
711 ESP_RETURN_ON_FALSE((!uart_hal_is_hw_rts_en(&(uart_context[uart_num].hal))), ESP_FAIL, UART_TAG, "disable hw flowctrl before using sw control");
712 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
713 uart_hal_set_rts(&(uart_context[uart_num].hal), level);
714 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
715 return ESP_OK;
716 }
717
uart_set_dtr(uart_port_t uart_num,int level)718 esp_err_t uart_set_dtr(uart_port_t uart_num, int level)
719 {
720 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
721 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
722 uart_hal_set_dtr(&(uart_context[uart_num].hal), level);
723 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
724 return ESP_OK;
725 }
726
uart_set_tx_idle_num(uart_port_t uart_num,uint16_t idle_num)727 esp_err_t uart_set_tx_idle_num(uart_port_t uart_num, uint16_t idle_num)
728 {
729 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
730 ESP_RETURN_ON_FALSE((idle_num <= UART_TX_IDLE_NUM_V), ESP_FAIL, UART_TAG, "uart idle num error");
731 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
732 uart_hal_set_tx_idle_num(&(uart_context[uart_num].hal), idle_num);
733 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
734 return ESP_OK;
735 }
736
uart_param_config(uart_port_t uart_num,const uart_config_t * uart_config)737 esp_err_t uart_param_config(uart_port_t uart_num, const uart_config_t *uart_config)
738 {
739 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
740 ESP_RETURN_ON_FALSE((uart_config), ESP_FAIL, UART_TAG, "param null");
741 ESP_RETURN_ON_FALSE((uart_config->rx_flow_ctrl_thresh < SOC_UART_FIFO_LEN), ESP_FAIL, UART_TAG, "rx flow thresh error");
742 ESP_RETURN_ON_FALSE((uart_config->flow_ctrl < UART_HW_FLOWCTRL_MAX), ESP_FAIL, UART_TAG, "hw_flowctrl mode error");
743 ESP_RETURN_ON_FALSE((uart_config->data_bits < UART_DATA_BITS_MAX), ESP_FAIL, UART_TAG, "data bit error");
744 uart_module_enable(uart_num);
745 #if SOC_UART_SUPPORT_RTC_CLK
746 if (uart_config->source_clk == UART_SCLK_RTC) {
747 rtc_clk_enable(uart_num);
748 }
749 #endif
750 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
751 uart_hal_init(&(uart_context[uart_num].hal), uart_num);
752 uart_hal_set_sclk(&(uart_context[uart_num].hal), uart_config->source_clk);
753 uart_hal_set_baudrate(&(uart_context[uart_num].hal), uart_config->baud_rate);
754 uart_hal_set_parity(&(uart_context[uart_num].hal), uart_config->parity);
755 uart_hal_set_data_bit_num(&(uart_context[uart_num].hal), uart_config->data_bits);
756 uart_hal_set_stop_bits(&(uart_context[uart_num].hal), uart_config->stop_bits);
757 uart_hal_set_tx_idle_num(&(uart_context[uart_num].hal), UART_TX_IDLE_NUM_DEFAULT);
758 uart_hal_set_hw_flow_ctrl(&(uart_context[uart_num].hal), uart_config->flow_ctrl, uart_config->rx_flow_ctrl_thresh);
759 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
760 uart_hal_rxfifo_rst(&(uart_context[uart_num].hal));
761 uart_hal_txfifo_rst(&(uart_context[uart_num].hal));
762 return ESP_OK;
763 }
764
uart_intr_config(uart_port_t uart_num,const uart_intr_config_t * intr_conf)765 esp_err_t uart_intr_config(uart_port_t uart_num, const uart_intr_config_t *intr_conf)
766 {
767 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
768 ESP_RETURN_ON_FALSE((intr_conf), ESP_FAIL, UART_TAG, "param null");
769 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_LL_INTR_MASK);
770 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
771 if (intr_conf->intr_enable_mask & UART_INTR_RXFIFO_TOUT) {
772 uart_hal_set_rx_timeout(&(uart_context[uart_num].hal), intr_conf->rx_timeout_thresh);
773 } else {
774 //Disable rx_tout intr
775 uart_hal_set_rx_timeout(&(uart_context[uart_num].hal), 0);
776 }
777 if (intr_conf->intr_enable_mask & UART_INTR_RXFIFO_FULL) {
778 uart_hal_set_rxfifo_full_thr(&(uart_context[uart_num].hal), intr_conf->rxfifo_full_thresh);
779 }
780 if (intr_conf->intr_enable_mask & UART_INTR_TXFIFO_EMPTY) {
781 uart_hal_set_txfifo_empty_thr(&(uart_context[uart_num].hal), intr_conf->txfifo_empty_intr_thresh);
782 }
783 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), intr_conf->intr_enable_mask);
784 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
785 return ESP_OK;
786 }
787
uart_find_pattern_from_last(uint8_t * buf,int length,uint8_t pat_chr,uint8_t pat_num)788 static int UART_ISR_ATTR uart_find_pattern_from_last(uint8_t *buf, int length, uint8_t pat_chr, uint8_t pat_num)
789 {
790 int cnt = 0;
791 int len = length;
792 while (len >= 0) {
793 if (buf[len] == pat_chr) {
794 cnt++;
795 } else {
796 cnt = 0;
797 }
798 if (cnt >= pat_num) {
799 break;
800 }
801 len --;
802 }
803 return len;
804 }
805
806 //internal isr handler for default driver code.
uart_rx_intr_handler_default(void * param)807 static void UART_ISR_ATTR uart_rx_intr_handler_default(void *param)
808 {
809 uart_obj_t *p_uart = (uart_obj_t *) param;
810 uint8_t uart_num = p_uart->uart_num;
811 int rx_fifo_len = 0;
812 uint32_t uart_intr_status = 0;
813 uart_event_t uart_event;
814 portBASE_TYPE HPTaskAwoken = 0;
815 static uint8_t pat_flg = 0;
816 while (1) {
817 // The `continue statement` may cause the interrupt to loop infinitely
818 // we exit the interrupt here
819 uart_intr_status = uart_hal_get_intsts_mask(&(uart_context[uart_num].hal));
820 //Exit form while loop
821 if (uart_intr_status == 0) {
822 break;
823 }
824 uart_event.type = UART_EVENT_MAX;
825 if (uart_intr_status & UART_INTR_TXFIFO_EMPTY) {
826 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
827 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
828 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
829 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
830 if (p_uart->tx_waiting_brk) {
831 continue;
832 }
833 //TX semaphore will only be used when tx_buf_size is zero.
834 if (p_uart->tx_waiting_fifo == true && p_uart->tx_buf_size == 0) {
835 p_uart->tx_waiting_fifo = false;
836 xSemaphoreGiveFromISR(p_uart->tx_fifo_sem, &HPTaskAwoken);
837 } else {
838 //We don't use TX ring buffer, because the size is zero.
839 if (p_uart->tx_buf_size == 0) {
840 continue;
841 }
842 bool en_tx_flg = false;
843 uint32_t tx_fifo_rem = uart_hal_get_txfifo_len(&(uart_context[uart_num].hal));
844 //We need to put a loop here, in case all the buffer items are very short.
845 //That would cause a watch_dog reset because empty interrupt happens so often.
846 //Although this is a loop in ISR, this loop will execute at most 128 turns.
847 while (tx_fifo_rem) {
848 if (p_uart->tx_len_tot == 0 || p_uart->tx_ptr == NULL || p_uart->tx_len_cur == 0) {
849 size_t size;
850 p_uart->tx_head = (uart_tx_data_t *) xRingbufferReceiveFromISR(p_uart->tx_ring_buf, &size);
851 if (p_uart->tx_head) {
852 //The first item is the data description
853 //Get the first item to get the data information
854 if (p_uart->tx_len_tot == 0) {
855 p_uart->tx_ptr = NULL;
856 p_uart->tx_len_tot = p_uart->tx_head->tx_data.size;
857 if (p_uart->tx_head->type == UART_DATA_BREAK) {
858 p_uart->tx_brk_flg = 1;
859 p_uart->tx_brk_len = p_uart->tx_head->tx_data.brk_len;
860 }
861 //We have saved the data description from the 1st item, return buffer.
862 vRingbufferReturnItemFromISR(p_uart->tx_ring_buf, p_uart->tx_head, &HPTaskAwoken);
863 } else if (p_uart->tx_ptr == NULL) {
864 //Update the TX item pointer, we will need this to return item to buffer.
865 p_uart->tx_ptr = (uint8_t *)p_uart->tx_head;
866 en_tx_flg = true;
867 p_uart->tx_len_cur = size;
868 }
869 } else {
870 //Can not get data from ring buffer, return;
871 break;
872 }
873 }
874 if (p_uart->tx_len_tot > 0 && p_uart->tx_ptr && p_uart->tx_len_cur > 0) {
875 //To fill the TX FIFO.
876 uint32_t send_len = 0;
877 // Set RS485 RTS pin before transmission if the half duplex mode is enabled
878 if (UART_IS_MODE_SET(uart_num, UART_MODE_RS485_HALF_DUPLEX)) {
879 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
880 uart_hal_set_rts(&(uart_context[uart_num].hal), 0);
881 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_DONE);
882 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
883 }
884 uart_hal_write_txfifo(&(uart_context[uart_num].hal),
885 (const uint8_t *)p_uart->tx_ptr,
886 (p_uart->tx_len_cur > tx_fifo_rem) ? tx_fifo_rem : p_uart->tx_len_cur,
887 &send_len);
888 p_uart->tx_ptr += send_len;
889 p_uart->tx_len_tot -= send_len;
890 p_uart->tx_len_cur -= send_len;
891 tx_fifo_rem -= send_len;
892 if (p_uart->tx_len_cur == 0) {
893 //Return item to ring buffer.
894 vRingbufferReturnItemFromISR(p_uart->tx_ring_buf, p_uart->tx_head, &HPTaskAwoken);
895 p_uart->tx_head = NULL;
896 p_uart->tx_ptr = NULL;
897 //Sending item done, now we need to send break if there is a record.
898 //Set TX break signal after FIFO is empty
899 if (p_uart->tx_len_tot == 0 && p_uart->tx_brk_flg == 1) {
900 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_DONE);
901 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
902 uart_hal_tx_break(&(uart_context[uart_num].hal), p_uart->tx_brk_len);
903 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_DONE);
904 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
905 p_uart->tx_waiting_brk = 1;
906 //do not enable TX empty interrupt
907 en_tx_flg = false;
908 } else {
909 //enable TX empty interrupt
910 en_tx_flg = true;
911 }
912 } else {
913 //enable TX empty interrupt
914 en_tx_flg = true;
915 }
916 }
917 }
918 if (en_tx_flg) {
919 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
920 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
921 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
922 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
923 }
924 }
925 } else if ((uart_intr_status & UART_INTR_RXFIFO_TOUT)
926 || (uart_intr_status & UART_INTR_RXFIFO_FULL)
927 || (uart_intr_status & UART_INTR_CMD_CHAR_DET)
928 ) {
929 if (pat_flg == 1) {
930 uart_intr_status |= UART_INTR_CMD_CHAR_DET;
931 pat_flg = 0;
932 }
933 if (p_uart->rx_buffer_full_flg == false) {
934 rx_fifo_len = uart_hal_get_rxfifo_len(&(uart_context[uart_num].hal));
935 if ((p_uart_obj[uart_num]->rx_always_timeout_flg) && !(uart_intr_status & UART_INTR_RXFIFO_TOUT)) {
936 rx_fifo_len--; // leave one byte in the fifo in order to trigger uart_intr_rxfifo_tout
937 }
938 uart_hal_read_rxfifo(&(uart_context[uart_num].hal), p_uart->rx_data_buf, &rx_fifo_len);
939 uint8_t pat_chr = 0;
940 uint8_t pat_num = 0;
941 int pat_idx = -1;
942 uart_hal_get_at_cmd_char(&(uart_context[uart_num].hal), &pat_chr, &pat_num);
943
944 //Get the buffer from the FIFO
945 if (uart_intr_status & UART_INTR_CMD_CHAR_DET) {
946 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
947 uart_event.type = UART_PATTERN_DET;
948 uart_event.size = rx_fifo_len;
949 pat_idx = uart_find_pattern_from_last(p_uart->rx_data_buf, rx_fifo_len - 1, pat_chr, pat_num);
950 } else {
951 //After Copying the Data From FIFO ,Clear intr_status
952 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL);
953 uart_event.type = UART_DATA;
954 uart_event.size = rx_fifo_len;
955 uart_event.timeout_flag = (uart_intr_status & UART_INTR_RXFIFO_TOUT) ? true : false;
956 UART_ENTER_CRITICAL_ISR(&uart_selectlock);
957 if (p_uart->uart_select_notif_callback) {
958 p_uart->uart_select_notif_callback(uart_num, UART_SELECT_READ_NOTIF, &HPTaskAwoken);
959 }
960 UART_EXIT_CRITICAL_ISR(&uart_selectlock);
961 }
962 p_uart->rx_stash_len = rx_fifo_len;
963 //If we fail to push data to ring buffer, we will have to stash the data, and send next time.
964 //Mainly for applications that uses flow control or small ring buffer.
965 if (pdFALSE == xRingbufferSendFromISR(p_uart->rx_ring_buf, p_uart->rx_data_buf, p_uart->rx_stash_len, &HPTaskAwoken)) {
966 p_uart->rx_buffer_full_flg = true;
967 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
968 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL);
969 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
970 if (uart_event.type == UART_PATTERN_DET) {
971 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
972 if (rx_fifo_len < pat_num) {
973 //some of the characters are read out in last interrupt
974 uart_pattern_enqueue(uart_num, p_uart->rx_buffered_len - (pat_num - rx_fifo_len));
975 } else {
976 uart_pattern_enqueue(uart_num,
977 pat_idx <= -1 ?
978 //can not find the pattern in buffer,
979 p_uart->rx_buffered_len + p_uart->rx_stash_len :
980 // find the pattern in buffer
981 p_uart->rx_buffered_len + pat_idx);
982 }
983 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
984 if ((p_uart->event_queue != NULL) && (pdFALSE == xQueueSendFromISR(p_uart->event_queue, (void * )&uart_event, &HPTaskAwoken))) {
985 #ifndef CONFIG_UART_ISR_IN_IRAM //Only log if ISR is not in IRAM
986 ESP_EARLY_LOGV(UART_TAG, "UART event queue full");
987 #endif
988 }
989 }
990 uart_event.type = UART_BUFFER_FULL;
991 } else {
992 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
993 if (uart_intr_status & UART_INTR_CMD_CHAR_DET) {
994 if (rx_fifo_len < pat_num) {
995 //some of the characters are read out in last interrupt
996 uart_pattern_enqueue(uart_num, p_uart->rx_buffered_len - (pat_num - rx_fifo_len));
997 } else if (pat_idx >= 0) {
998 // find the pattern in stash buffer.
999 uart_pattern_enqueue(uart_num, p_uart->rx_buffered_len + pat_idx);
1000 }
1001 }
1002 p_uart->rx_buffered_len += p_uart->rx_stash_len;
1003 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1004 }
1005 } else {
1006 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1007 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT);
1008 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1009 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT);
1010 if (uart_intr_status & UART_INTR_CMD_CHAR_DET) {
1011 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
1012 uart_event.type = UART_PATTERN_DET;
1013 uart_event.size = rx_fifo_len;
1014 pat_flg = 1;
1015 }
1016 }
1017 } else if (uart_intr_status & UART_INTR_RXFIFO_OVF) {
1018 // When fifo overflows, we reset the fifo.
1019 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1020 uart_hal_rxfifo_rst(&(uart_context[uart_num].hal));
1021 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1022 UART_ENTER_CRITICAL_ISR(&uart_selectlock);
1023 if (p_uart->uart_select_notif_callback) {
1024 p_uart->uart_select_notif_callback(uart_num, UART_SELECT_ERROR_NOTIF, &HPTaskAwoken);
1025 }
1026 UART_EXIT_CRITICAL_ISR(&uart_selectlock);
1027 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_OVF);
1028 uart_event.type = UART_FIFO_OVF;
1029 } else if (uart_intr_status & UART_INTR_BRK_DET) {
1030 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_BRK_DET);
1031 uart_event.type = UART_BREAK;
1032 } else if (uart_intr_status & UART_INTR_FRAM_ERR) {
1033 UART_ENTER_CRITICAL_ISR(&uart_selectlock);
1034 if (p_uart->uart_select_notif_callback) {
1035 p_uart->uart_select_notif_callback(uart_num, UART_SELECT_ERROR_NOTIF, &HPTaskAwoken);
1036 }
1037 UART_EXIT_CRITICAL_ISR(&uart_selectlock);
1038 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_FRAM_ERR);
1039 uart_event.type = UART_FRAME_ERR;
1040 } else if (uart_intr_status & UART_INTR_PARITY_ERR) {
1041 UART_ENTER_CRITICAL_ISR(&uart_selectlock);
1042 if (p_uart->uart_select_notif_callback) {
1043 p_uart->uart_select_notif_callback(uart_num, UART_SELECT_ERROR_NOTIF, &HPTaskAwoken);
1044 }
1045 UART_EXIT_CRITICAL_ISR(&uart_selectlock);
1046 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_PARITY_ERR);
1047 uart_event.type = UART_PARITY_ERR;
1048 } else if (uart_intr_status & UART_INTR_TX_BRK_DONE) {
1049 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1050 uart_hal_tx_break(&(uart_context[uart_num].hal), 0);
1051 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_DONE);
1052 if (p_uart->tx_brk_flg == 1) {
1053 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TXFIFO_EMPTY);
1054 }
1055 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1056 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_DONE);
1057 if (p_uart->tx_brk_flg == 1) {
1058 p_uart->tx_brk_flg = 0;
1059 p_uart->tx_waiting_brk = 0;
1060 } else {
1061 xSemaphoreGiveFromISR(p_uart->tx_brk_sem, &HPTaskAwoken);
1062 }
1063 } else if (uart_intr_status & UART_INTR_TX_BRK_IDLE) {
1064 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1065 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_IDLE);
1066 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1067 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_IDLE);
1068 } else if (uart_intr_status & UART_INTR_CMD_CHAR_DET) {
1069 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_CMD_CHAR_DET);
1070 uart_event.type = UART_PATTERN_DET;
1071 } else if ((uart_intr_status & UART_INTR_RS485_PARITY_ERR)
1072 || (uart_intr_status & UART_INTR_RS485_FRM_ERR)
1073 || (uart_intr_status & UART_INTR_RS485_CLASH)) {
1074 // RS485 collision or frame error interrupt triggered
1075 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1076 uart_hal_rxfifo_rst(&(uart_context[uart_num].hal));
1077 // Set collision detection flag
1078 p_uart_obj[uart_num]->coll_det_flg = true;
1079 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1080 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_RS485_CLASH | UART_INTR_RS485_FRM_ERR | UART_INTR_RS485_PARITY_ERR);
1081 uart_event.type = UART_EVENT_MAX;
1082 } else if (uart_intr_status & UART_INTR_TX_DONE) {
1083 if (UART_IS_MODE_SET(uart_num, UART_MODE_RS485_HALF_DUPLEX) && uart_hal_is_tx_idle(&(uart_context[uart_num].hal)) != true) {
1084 // The TX_DONE interrupt is triggered but transmit is active
1085 // then postpone interrupt processing for next interrupt
1086 uart_event.type = UART_EVENT_MAX;
1087 } else {
1088 // Workaround for RS485: If the RS485 half duplex mode is active
1089 // and transmitter is in idle state then reset received buffer and reset RTS pin
1090 // skip this behavior for other UART modes
1091 UART_ENTER_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1092 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_DONE);
1093 if (UART_IS_MODE_SET(uart_num, UART_MODE_RS485_HALF_DUPLEX)) {
1094 uart_hal_rxfifo_rst(&(uart_context[uart_num].hal));
1095 uart_hal_set_rts(&(uart_context[uart_num].hal), 1);
1096 }
1097 UART_EXIT_CRITICAL_ISR(&(uart_context[uart_num].spinlock));
1098 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TX_DONE);
1099 xSemaphoreGiveFromISR(p_uart_obj[uart_num]->tx_done_sem, &HPTaskAwoken);
1100 }
1101 } else {
1102 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), uart_intr_status); /*simply clear all other intr status*/
1103 uart_event.type = UART_EVENT_MAX;
1104 }
1105
1106 if (uart_event.type != UART_EVENT_MAX && p_uart->event_queue) {
1107 if (pdFALSE == xQueueSendFromISR(p_uart->event_queue, (void * )&uart_event, &HPTaskAwoken)) {
1108 #ifndef CONFIG_UART_ISR_IN_IRAM //Only log if ISR is not in IRAM
1109 ESP_EARLY_LOGV(UART_TAG, "UART event queue full");
1110 #endif
1111 }
1112 }
1113 }
1114 if (HPTaskAwoken == pdTRUE) {
1115 portYIELD_FROM_ISR();
1116 }
1117 }
1118
1119 /**************************************************************/
uart_wait_tx_done(uart_port_t uart_num,TickType_t ticks_to_wait)1120 esp_err_t uart_wait_tx_done(uart_port_t uart_num, TickType_t ticks_to_wait)
1121 {
1122 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
1123 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_FAIL, UART_TAG, "uart driver error");
1124 BaseType_t res;
1125 portTickType ticks_start = xTaskGetTickCount();
1126 //Take tx_mux
1127 res = xSemaphoreTake(p_uart_obj[uart_num]->tx_mux, (portTickType)ticks_to_wait);
1128 if (res == pdFALSE) {
1129 return ESP_ERR_TIMEOUT;
1130 }
1131 xSemaphoreTake(p_uart_obj[uart_num]->tx_done_sem, 0);
1132 if (uart_hal_is_tx_idle(&(uart_context[uart_num].hal))) {
1133 xSemaphoreGive(p_uart_obj[uart_num]->tx_mux);
1134 return ESP_OK;
1135 }
1136 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1137 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_DONE);
1138 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1139
1140 TickType_t ticks_end = xTaskGetTickCount();
1141 if (ticks_end - ticks_start > ticks_to_wait) {
1142 ticks_to_wait = 0;
1143 } else {
1144 ticks_to_wait = ticks_to_wait - (ticks_end - ticks_start);
1145 }
1146 //take 2nd tx_done_sem, wait given from ISR
1147 res = xSemaphoreTake(p_uart_obj[uart_num]->tx_done_sem, (portTickType)ticks_to_wait);
1148 if (res == pdFALSE) {
1149 // The TX_DONE interrupt will be disabled in ISR
1150 xSemaphoreGive(p_uart_obj[uart_num]->tx_mux);
1151 return ESP_ERR_TIMEOUT;
1152 }
1153 xSemaphoreGive(p_uart_obj[uart_num]->tx_mux);
1154 return ESP_OK;
1155 }
1156
uart_tx_chars(uart_port_t uart_num,const char * buffer,uint32_t len)1157 int uart_tx_chars(uart_port_t uart_num, const char *buffer, uint32_t len)
1158 {
1159 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), (-1), UART_TAG, "uart_num error");
1160 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), (-1), UART_TAG, "uart driver error");
1161 ESP_RETURN_ON_FALSE(buffer, (-1), UART_TAG, "buffer null");
1162 if (len == 0) {
1163 return 0;
1164 }
1165 int tx_len = 0;
1166 xSemaphoreTake(p_uart_obj[uart_num]->tx_mux, (portTickType)portMAX_DELAY);
1167 if (UART_IS_MODE_SET(uart_num, UART_MODE_RS485_HALF_DUPLEX)) {
1168 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1169 uart_hal_set_rts(&(uart_context[uart_num].hal), 0);
1170 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_DONE);
1171 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1172 }
1173 uart_hal_write_txfifo(&(uart_context[uart_num].hal), (const uint8_t *) buffer, len, (uint32_t *)&tx_len);
1174 xSemaphoreGive(p_uart_obj[uart_num]->tx_mux);
1175 return tx_len;
1176 }
1177
uart_tx_all(uart_port_t uart_num,const char * src,size_t size,bool brk_en,int brk_len)1178 static int uart_tx_all(uart_port_t uart_num, const char *src, size_t size, bool brk_en, int brk_len)
1179 {
1180 if (size == 0) {
1181 return 0;
1182 }
1183 size_t original_size = size;
1184
1185 //lock for uart_tx
1186 xSemaphoreTake(p_uart_obj[uart_num]->tx_mux, (portTickType)portMAX_DELAY);
1187 p_uart_obj[uart_num]->coll_det_flg = false;
1188 if (p_uart_obj[uart_num]->tx_buf_size > 0) {
1189 size_t max_size = xRingbufferGetMaxItemSize(p_uart_obj[uart_num]->tx_ring_buf);
1190 int offset = 0;
1191 uart_tx_data_t evt;
1192 evt.tx_data.size = size;
1193 evt.tx_data.brk_len = brk_len;
1194 if (brk_en) {
1195 evt.type = UART_DATA_BREAK;
1196 } else {
1197 evt.type = UART_DATA;
1198 }
1199 xRingbufferSend(p_uart_obj[uart_num]->tx_ring_buf, (void *) &evt, sizeof(uart_tx_data_t), portMAX_DELAY);
1200 while (size > 0) {
1201 size_t send_size = size > max_size / 2 ? max_size / 2 : size;
1202 xRingbufferSend(p_uart_obj[uart_num]->tx_ring_buf, (void *) (src + offset), send_size, portMAX_DELAY);
1203 size -= send_size;
1204 offset += send_size;
1205 uart_enable_tx_intr(uart_num, 1, UART_EMPTY_THRESH_DEFAULT);
1206 }
1207 } else {
1208 while (size) {
1209 //semaphore for tx_fifo available
1210 if (pdTRUE == xSemaphoreTake(p_uart_obj[uart_num]->tx_fifo_sem, (portTickType)portMAX_DELAY)) {
1211 uint32_t sent = 0;
1212 if (UART_IS_MODE_SET(uart_num, UART_MODE_RS485_HALF_DUPLEX)) {
1213 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1214 uart_hal_set_rts(&(uart_context[uart_num].hal), 0);
1215 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_DONE);
1216 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1217 }
1218 uart_hal_write_txfifo(&(uart_context[uart_num].hal), (const uint8_t *)src, size, &sent);
1219 if (sent < size) {
1220 p_uart_obj[uart_num]->tx_waiting_fifo = true;
1221 uart_enable_tx_intr(uart_num, 1, UART_EMPTY_THRESH_DEFAULT);
1222 }
1223 size -= sent;
1224 src += sent;
1225 }
1226 }
1227 if (brk_en) {
1228 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_DONE);
1229 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1230 uart_hal_tx_break(&(uart_context[uart_num].hal), brk_len);
1231 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_TX_BRK_DONE);
1232 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1233 xSemaphoreTake(p_uart_obj[uart_num]->tx_brk_sem, (portTickType)portMAX_DELAY);
1234 }
1235 xSemaphoreGive(p_uart_obj[uart_num]->tx_fifo_sem);
1236 }
1237 xSemaphoreGive(p_uart_obj[uart_num]->tx_mux);
1238 return original_size;
1239 }
1240
uart_write_bytes(uart_port_t uart_num,const void * src,size_t size)1241 int uart_write_bytes(uart_port_t uart_num, const void *src, size_t size)
1242 {
1243 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), (-1), UART_TAG, "uart_num error");
1244 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num] != NULL), (-1), UART_TAG, "uart driver error");
1245 ESP_RETURN_ON_FALSE(src, (-1), UART_TAG, "buffer null");
1246 return uart_tx_all(uart_num, src, size, 0, 0);
1247 }
1248
uart_write_bytes_with_break(uart_port_t uart_num,const void * src,size_t size,int brk_len)1249 int uart_write_bytes_with_break(uart_port_t uart_num, const void *src, size_t size, int brk_len)
1250 {
1251 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), (-1), UART_TAG, "uart_num error");
1252 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), (-1), UART_TAG, "uart driver error");
1253 ESP_RETURN_ON_FALSE((size > 0), (-1), UART_TAG, "uart size error");
1254 ESP_RETURN_ON_FALSE((src), (-1), UART_TAG, "uart data null");
1255 ESP_RETURN_ON_FALSE((brk_len > 0 && brk_len < 256), (-1), UART_TAG, "break_num error");
1256 return uart_tx_all(uart_num, src, size, 1, brk_len);
1257 }
1258
uart_check_buf_full(uart_port_t uart_num)1259 static bool uart_check_buf_full(uart_port_t uart_num)
1260 {
1261 if (p_uart_obj[uart_num]->rx_buffer_full_flg) {
1262 BaseType_t res = xRingbufferSend(p_uart_obj[uart_num]->rx_ring_buf, p_uart_obj[uart_num]->rx_data_buf, p_uart_obj[uart_num]->rx_stash_len, 1);
1263 if (res == pdTRUE) {
1264 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1265 p_uart_obj[uart_num]->rx_buffered_len += p_uart_obj[uart_num]->rx_stash_len;
1266 p_uart_obj[uart_num]->rx_buffer_full_flg = false;
1267 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1268 /* Only re-activate UART_INTR_RXFIFO_TOUT or UART_INTR_RXFIFO_FULL
1269 * interrupts if they were NOT explicitly disabled by the user. */
1270 uart_reenable_intr_mask(p_uart_obj[uart_num]->uart_num, UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL);
1271 return true;
1272 }
1273 }
1274 return false;
1275 }
1276
uart_read_bytes(uart_port_t uart_num,void * buf,uint32_t length,TickType_t ticks_to_wait)1277 int uart_read_bytes(uart_port_t uart_num, void *buf, uint32_t length, TickType_t ticks_to_wait)
1278 {
1279 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), (-1), UART_TAG, "uart_num error");
1280 ESP_RETURN_ON_FALSE((buf), (-1), UART_TAG, "uart data null");
1281 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), (-1), UART_TAG, "uart driver error");
1282 uint8_t *data = NULL;
1283 size_t size;
1284 size_t copy_len = 0;
1285 int len_tmp;
1286 if (xSemaphoreTake(p_uart_obj[uart_num]->rx_mux, (portTickType)ticks_to_wait) != pdTRUE) {
1287 return -1;
1288 }
1289 while (length) {
1290 if (p_uart_obj[uart_num]->rx_cur_remain == 0) {
1291 data = (uint8_t *) xRingbufferReceive(p_uart_obj[uart_num]->rx_ring_buf, &size, (portTickType) ticks_to_wait);
1292 if (data) {
1293 p_uart_obj[uart_num]->rx_head_ptr = data;
1294 p_uart_obj[uart_num]->rx_ptr = data;
1295 p_uart_obj[uart_num]->rx_cur_remain = size;
1296 } else {
1297 //When using dual cores, `rx_buffer_full_flg` may read and write on different cores at same time,
1298 //which may lose synchronization. So we also need to call `uart_check_buf_full` once when ringbuffer is empty
1299 //to solve the possible asynchronous issues.
1300 if (uart_check_buf_full(uart_num)) {
1301 //This condition will never be true if `uart_read_bytes`
1302 //and `uart_rx_intr_handler_default` are scheduled on the same core.
1303 continue;
1304 } else {
1305 xSemaphoreGive(p_uart_obj[uart_num]->rx_mux);
1306 return copy_len;
1307 }
1308 }
1309 }
1310 if (p_uart_obj[uart_num]->rx_cur_remain > length) {
1311 len_tmp = length;
1312 } else {
1313 len_tmp = p_uart_obj[uart_num]->rx_cur_remain;
1314 }
1315 memcpy((uint8_t *)buf + copy_len, p_uart_obj[uart_num]->rx_ptr, len_tmp);
1316 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1317 p_uart_obj[uart_num]->rx_buffered_len -= len_tmp;
1318 uart_pattern_queue_update(uart_num, len_tmp);
1319 p_uart_obj[uart_num]->rx_ptr += len_tmp;
1320 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1321 p_uart_obj[uart_num]->rx_cur_remain -= len_tmp;
1322 copy_len += len_tmp;
1323 length -= len_tmp;
1324 if (p_uart_obj[uart_num]->rx_cur_remain == 0) {
1325 vRingbufferReturnItem(p_uart_obj[uart_num]->rx_ring_buf, p_uart_obj[uart_num]->rx_head_ptr);
1326 p_uart_obj[uart_num]->rx_head_ptr = NULL;
1327 p_uart_obj[uart_num]->rx_ptr = NULL;
1328 uart_check_buf_full(uart_num);
1329 }
1330 }
1331
1332 xSemaphoreGive(p_uart_obj[uart_num]->rx_mux);
1333 return copy_len;
1334 }
1335
uart_get_buffered_data_len(uart_port_t uart_num,size_t * size)1336 esp_err_t uart_get_buffered_data_len(uart_port_t uart_num, size_t *size)
1337 {
1338 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
1339 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_FAIL, UART_TAG, "uart driver error");
1340 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1341 *size = p_uart_obj[uart_num]->rx_buffered_len;
1342 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1343 return ESP_OK;
1344 }
1345
1346 esp_err_t uart_flush(uart_port_t uart_num) __attribute__((alias("uart_flush_input")));
1347
uart_flush_input(uart_port_t uart_num)1348 esp_err_t uart_flush_input(uart_port_t uart_num)
1349 {
1350 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
1351 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_FAIL, UART_TAG, "uart driver error");
1352 uart_obj_t *p_uart = p_uart_obj[uart_num];
1353 uint8_t *data;
1354 size_t size;
1355
1356 //rx sem protect the ring buffer read related functions
1357 xSemaphoreTake(p_uart->rx_mux, (portTickType)portMAX_DELAY);
1358 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1359 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT);
1360 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1361 while (true) {
1362 if (p_uart->rx_head_ptr) {
1363 vRingbufferReturnItem(p_uart->rx_ring_buf, p_uart->rx_head_ptr);
1364 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1365 p_uart_obj[uart_num]->rx_buffered_len -= p_uart->rx_cur_remain;
1366 uart_pattern_queue_update(uart_num, p_uart->rx_cur_remain);
1367 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1368 p_uart->rx_ptr = NULL;
1369 p_uart->rx_cur_remain = 0;
1370 p_uart->rx_head_ptr = NULL;
1371 }
1372 data = (uint8_t*) xRingbufferReceive(p_uart->rx_ring_buf, &size, (portTickType) 0);
1373 if(data == NULL) {
1374 bool error = false;
1375 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1376 if( p_uart_obj[uart_num]->rx_buffered_len != 0 ) {
1377 p_uart_obj[uart_num]->rx_buffered_len = 0;
1378 error = true;
1379 }
1380 //We also need to clear the `rx_buffer_full_flg` here.
1381 p_uart_obj[uart_num]->rx_buffer_full_flg = false;
1382 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1383 if (error) {
1384 // this must be called outside the critical section
1385 ESP_LOGE(UART_TAG, "rx_buffered_len error");
1386 }
1387 break;
1388 }
1389 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1390 p_uart_obj[uart_num]->rx_buffered_len -= size;
1391 uart_pattern_queue_update(uart_num, size);
1392 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1393 vRingbufferReturnItem(p_uart->rx_ring_buf, data);
1394 if (p_uart_obj[uart_num]->rx_buffer_full_flg) {
1395 BaseType_t res = xRingbufferSend(p_uart_obj[uart_num]->rx_ring_buf, p_uart_obj[uart_num]->rx_data_buf, p_uart_obj[uart_num]->rx_stash_len, 1);
1396 if (res == pdTRUE) {
1397 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1398 p_uart_obj[uart_num]->rx_buffered_len += p_uart_obj[uart_num]->rx_stash_len;
1399 p_uart_obj[uart_num]->rx_buffer_full_flg = false;
1400 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1401 }
1402 }
1403 }
1404 p_uart->rx_ptr = NULL;
1405 p_uart->rx_cur_remain = 0;
1406 p_uart->rx_head_ptr = NULL;
1407 uart_hal_rxfifo_rst(&(uart_context[uart_num].hal));
1408 /* Only re-enable UART_INTR_RXFIFO_TOUT or UART_INTR_RXFIFO_FULL if they
1409 * were explicitly enabled by the user. */
1410 uart_reenable_intr_mask(uart_num, UART_INTR_RXFIFO_TOUT | UART_INTR_RXFIFO_FULL);
1411 xSemaphoreGive(p_uart->rx_mux);
1412 return ESP_OK;
1413 }
1414
uart_free_driver_obj(uart_obj_t * uart_obj)1415 static void uart_free_driver_obj(uart_obj_t *uart_obj)
1416 {
1417 if (uart_obj->tx_fifo_sem) {
1418 vSemaphoreDelete(uart_obj->tx_fifo_sem);
1419 }
1420 if (uart_obj->tx_done_sem) {
1421 vSemaphoreDelete(uart_obj->tx_done_sem);
1422 }
1423 if (uart_obj->tx_brk_sem) {
1424 vSemaphoreDelete(uart_obj->tx_brk_sem);
1425 }
1426 if (uart_obj->tx_mux) {
1427 vSemaphoreDelete(uart_obj->tx_mux);
1428 }
1429 if (uart_obj->rx_mux) {
1430 vSemaphoreDelete(uart_obj->rx_mux);
1431 }
1432 if (uart_obj->event_queue) {
1433 vQueueDelete(uart_obj->event_queue);
1434 }
1435 if (uart_obj->rx_ring_buf) {
1436 vRingbufferDelete(uart_obj->rx_ring_buf);
1437 }
1438 if (uart_obj->tx_ring_buf) {
1439 vRingbufferDelete(uart_obj->tx_ring_buf);
1440 }
1441 #if CONFIG_UART_ISR_IN_IRAM
1442 free(uart_obj->event_queue_storage);
1443 free(uart_obj->event_queue_struct);
1444 free(uart_obj->tx_ring_buf_storage);
1445 free(uart_obj->tx_ring_buf_struct);
1446 free(uart_obj->rx_ring_buf_storage);
1447 free(uart_obj->rx_ring_buf_struct);
1448 free(uart_obj->rx_mux_struct);
1449 free(uart_obj->tx_mux_struct);
1450 free(uart_obj->tx_brk_sem_struct);
1451 free(uart_obj->tx_done_sem_struct);
1452 free(uart_obj->tx_fifo_sem_struct);
1453 #endif
1454 free(uart_obj);
1455 }
1456
uart_alloc_driver_obj(int event_queue_size,int tx_buffer_size,int rx_buffer_size)1457 static uart_obj_t *uart_alloc_driver_obj(int event_queue_size, int tx_buffer_size, int rx_buffer_size)
1458 {
1459 uart_obj_t *uart_obj = heap_caps_calloc(1, sizeof(uart_obj_t), UART_MALLOC_CAPS);
1460 if (!uart_obj) {
1461 return NULL;
1462 }
1463 #if CONFIG_UART_ISR_IN_IRAM
1464 if (event_queue_size > 0) {
1465 uart_obj->event_queue_storage = heap_caps_calloc(event_queue_size, sizeof(uart_event_t), UART_MALLOC_CAPS);
1466 uart_obj->event_queue_struct = heap_caps_calloc(1, sizeof(StaticQueue_t), UART_MALLOC_CAPS);
1467 if (!uart_obj->event_queue_storage || !uart_obj->event_queue_struct) {
1468 goto err;
1469 }
1470 }
1471 if (tx_buffer_size > 0) {
1472 uart_obj->tx_ring_buf_storage = heap_caps_calloc(1, tx_buffer_size, UART_MALLOC_CAPS);
1473 uart_obj->tx_ring_buf_struct = heap_caps_calloc(1, sizeof(StaticRingbuffer_t), UART_MALLOC_CAPS);
1474 if (!uart_obj->tx_ring_buf_storage || !uart_obj->tx_ring_buf_struct) {
1475 goto err;
1476 }
1477 }
1478 uart_obj->rx_ring_buf_storage = heap_caps_calloc(1, rx_buffer_size, UART_MALLOC_CAPS);
1479 uart_obj->rx_ring_buf_struct = heap_caps_calloc(1, sizeof(StaticRingbuffer_t), UART_MALLOC_CAPS);
1480 uart_obj->rx_mux_struct = heap_caps_calloc(1, sizeof(StaticSemaphore_t), UART_MALLOC_CAPS);
1481 uart_obj->tx_mux_struct = heap_caps_calloc(1, sizeof(StaticSemaphore_t), UART_MALLOC_CAPS);
1482 uart_obj->tx_brk_sem_struct = heap_caps_calloc(1, sizeof(StaticSemaphore_t), UART_MALLOC_CAPS);
1483 uart_obj->tx_done_sem_struct = heap_caps_calloc(1, sizeof(StaticSemaphore_t), UART_MALLOC_CAPS);
1484 uart_obj->tx_fifo_sem_struct = heap_caps_calloc(1, sizeof(StaticSemaphore_t), UART_MALLOC_CAPS);
1485 if (!uart_obj->rx_ring_buf_storage || !uart_obj->rx_ring_buf_struct || !uart_obj->rx_mux_struct ||
1486 !uart_obj->tx_mux_struct || !uart_obj->tx_brk_sem_struct || !uart_obj->tx_done_sem_struct ||
1487 !uart_obj->tx_fifo_sem_struct) {
1488 goto err;
1489 }
1490 if (event_queue_size > 0) {
1491 uart_obj->event_queue = xQueueCreateStatic(event_queue_size, sizeof(uart_event_t),
1492 uart_obj->event_queue_storage, uart_obj->event_queue_struct);
1493 if (!uart_obj->event_queue) {
1494 goto err;
1495 }
1496 }
1497 if (tx_buffer_size > 0) {
1498 uart_obj->tx_ring_buf = xRingbufferCreateStatic(tx_buffer_size, RINGBUF_TYPE_NOSPLIT,
1499 uart_obj->tx_ring_buf_storage, uart_obj->tx_ring_buf_struct);
1500 if (!uart_obj->tx_ring_buf) {
1501 goto err;
1502 }
1503 }
1504 uart_obj->rx_ring_buf = xRingbufferCreateStatic(rx_buffer_size, RINGBUF_TYPE_BYTEBUF,
1505 uart_obj->rx_ring_buf_storage, uart_obj->rx_ring_buf_struct);
1506 uart_obj->rx_mux = xSemaphoreCreateMutexStatic(uart_obj->rx_mux_struct);
1507 uart_obj->tx_mux = xSemaphoreCreateMutexStatic(uart_obj->tx_mux_struct);
1508 uart_obj->tx_brk_sem = xSemaphoreCreateBinaryStatic(uart_obj->tx_brk_sem_struct);
1509 uart_obj->tx_done_sem = xSemaphoreCreateBinaryStatic(uart_obj->tx_done_sem_struct);
1510 uart_obj->tx_fifo_sem = xSemaphoreCreateBinaryStatic(uart_obj->tx_fifo_sem_struct);
1511 if (!uart_obj->rx_ring_buf || !uart_obj->rx_mux || !uart_obj->tx_mux || !uart_obj->tx_brk_sem ||
1512 !uart_obj->tx_done_sem || !uart_obj->tx_fifo_sem) {
1513 goto err;
1514 }
1515 #else
1516 if (event_queue_size > 0) {
1517 uart_obj->event_queue = xQueueCreate(event_queue_size, sizeof(uart_event_t));
1518 if (!uart_obj->event_queue) {
1519 goto err;
1520 }
1521 }
1522 if (tx_buffer_size > 0) {
1523 uart_obj->tx_ring_buf = xRingbufferCreate(tx_buffer_size, RINGBUF_TYPE_NOSPLIT);
1524 if (!uart_obj->tx_ring_buf) {
1525 goto err;
1526 }
1527 }
1528 uart_obj->rx_ring_buf = xRingbufferCreate(rx_buffer_size, RINGBUF_TYPE_BYTEBUF);
1529 uart_obj->tx_mux = xSemaphoreCreateMutex();
1530 uart_obj->rx_mux = xSemaphoreCreateMutex();
1531 uart_obj->tx_brk_sem = xSemaphoreCreateBinary();
1532 uart_obj->tx_done_sem = xSemaphoreCreateBinary();
1533 uart_obj->tx_fifo_sem = xSemaphoreCreateBinary();
1534 if (!uart_obj->rx_ring_buf || !uart_obj->rx_mux || !uart_obj->tx_mux || !uart_obj->tx_brk_sem ||
1535 !uart_obj->tx_done_sem || !uart_obj->tx_fifo_sem) {
1536 goto err;
1537 }
1538 #endif
1539 return uart_obj;
1540
1541 err:
1542 uart_free_driver_obj(uart_obj);
1543 return NULL;
1544 }
1545
uart_driver_install(uart_port_t uart_num,int rx_buffer_size,int tx_buffer_size,int event_queue_size,QueueHandle_t * uart_queue,int intr_alloc_flags)1546 esp_err_t uart_driver_install(uart_port_t uart_num, int rx_buffer_size, int tx_buffer_size, int event_queue_size, QueueHandle_t *uart_queue, int intr_alloc_flags)
1547 {
1548 esp_err_t r;
1549 #ifdef CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
1550 ESP_RETURN_ON_FALSE((uart_num != CONFIG_ESP_CONSOLE_UART_NUM), ESP_FAIL, UART_TAG, "UART used by GDB-stubs! Please disable GDB in menuconfig.");
1551 #endif // CONFIG_ESP_SYSTEM_GDBSTUB_RUNTIME
1552 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
1553 ESP_RETURN_ON_FALSE((rx_buffer_size > SOC_UART_FIFO_LEN), ESP_FAIL, UART_TAG, "uart rx buffer length error");
1554 ESP_RETURN_ON_FALSE((tx_buffer_size > SOC_UART_FIFO_LEN) || (tx_buffer_size == 0), ESP_FAIL, UART_TAG, "uart tx buffer length error");
1555 #if CONFIG_UART_ISR_IN_IRAM
1556 if ((intr_alloc_flags & ESP_INTR_FLAG_IRAM) == 0) {
1557 ESP_LOGI(UART_TAG, "ESP_INTR_FLAG_IRAM flag not set while CONFIG_UART_ISR_IN_IRAM is enabled, flag updated");
1558 intr_alloc_flags |= ESP_INTR_FLAG_IRAM;
1559 }
1560 #else
1561 if ((intr_alloc_flags & ESP_INTR_FLAG_IRAM) != 0) {
1562 ESP_LOGW(UART_TAG, "ESP_INTR_FLAG_IRAM flag is set while CONFIG_UART_ISR_IN_IRAM is not enabled, flag updated");
1563 intr_alloc_flags &= ~ESP_INTR_FLAG_IRAM;
1564 }
1565 #endif
1566
1567 if (p_uart_obj[uart_num] == NULL) {
1568 p_uart_obj[uart_num] = uart_alloc_driver_obj(event_queue_size, tx_buffer_size, rx_buffer_size);
1569 if (p_uart_obj[uart_num] == NULL) {
1570 ESP_LOGE(UART_TAG, "UART driver malloc error");
1571 return ESP_FAIL;
1572 }
1573 p_uart_obj[uart_num]->uart_num = uart_num;
1574 p_uart_obj[uart_num]->uart_mode = UART_MODE_UART;
1575 p_uart_obj[uart_num]->coll_det_flg = false;
1576 p_uart_obj[uart_num]->rx_always_timeout_flg = false;
1577 p_uart_obj[uart_num]->event_queue_size = event_queue_size;
1578 p_uart_obj[uart_num]->tx_ptr = NULL;
1579 p_uart_obj[uart_num]->tx_head = NULL;
1580 p_uart_obj[uart_num]->tx_len_tot = 0;
1581 p_uart_obj[uart_num]->tx_brk_flg = 0;
1582 p_uart_obj[uart_num]->tx_brk_len = 0;
1583 p_uart_obj[uart_num]->tx_waiting_brk = 0;
1584 p_uart_obj[uart_num]->rx_buffered_len = 0;
1585 p_uart_obj[uart_num]->rx_buffer_full_flg = false;
1586 p_uart_obj[uart_num]->tx_waiting_fifo = false;
1587 p_uart_obj[uart_num]->rx_ptr = NULL;
1588 p_uart_obj[uart_num]->rx_cur_remain = 0;
1589 p_uart_obj[uart_num]->rx_int_usr_mask = UART_INTR_RXFIFO_FULL | UART_INTR_RXFIFO_TOUT;
1590 p_uart_obj[uart_num]->rx_head_ptr = NULL;
1591 p_uart_obj[uart_num]->tx_buf_size = tx_buffer_size;
1592 p_uart_obj[uart_num]->uart_select_notif_callback = NULL;
1593 xSemaphoreGive(p_uart_obj[uart_num]->tx_fifo_sem);
1594 uart_pattern_queue_reset(uart_num, UART_PATTERN_DET_QLEN_DEFAULT);
1595 if (uart_queue) {
1596 *uart_queue = p_uart_obj[uart_num]->event_queue;
1597 ESP_LOGI(UART_TAG, "queue free spaces: %d", uxQueueSpacesAvailable(p_uart_obj[uart_num]->event_queue));
1598 }
1599 } else {
1600 ESP_LOGE(UART_TAG, "UART driver already installed");
1601 return ESP_FAIL;
1602 }
1603
1604 uart_intr_config_t uart_intr = {
1605 .intr_enable_mask = UART_INTR_CONFIG_FLAG,
1606 .rxfifo_full_thresh = UART_FULL_THRESH_DEFAULT,
1607 .rx_timeout_thresh = UART_TOUT_THRESH_DEFAULT,
1608 .txfifo_empty_intr_thresh = UART_EMPTY_THRESH_DEFAULT,
1609 };
1610 uart_module_enable(uart_num);
1611 uart_hal_disable_intr_mask(&(uart_context[uart_num].hal), UART_LL_INTR_MASK);
1612 uart_hal_clr_intsts_mask(&(uart_context[uart_num].hal), UART_LL_INTR_MASK);
1613 r = uart_isr_register(uart_num, uart_rx_intr_handler_default, p_uart_obj[uart_num], intr_alloc_flags, &p_uart_obj[uart_num]->intr_handle);
1614 if (r != ESP_OK) {
1615 goto err;
1616 }
1617 r = uart_intr_config(uart_num, &uart_intr);
1618 if (r != ESP_OK) {
1619 goto err;
1620 }
1621 return r;
1622
1623 err:
1624 uart_driver_delete(uart_num);
1625 return r;
1626 }
1627
1628 //Make sure no other tasks are still using UART before you call this function
uart_driver_delete(uart_port_t uart_num)1629 esp_err_t uart_driver_delete(uart_port_t uart_num)
1630 {
1631 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_FAIL, UART_TAG, "uart_num error");
1632 if (p_uart_obj[uart_num] == NULL) {
1633 ESP_LOGI(UART_TAG, "ALREADY NULL");
1634 return ESP_OK;
1635 }
1636 esp_intr_free(p_uart_obj[uart_num]->intr_handle);
1637 uart_disable_rx_intr(uart_num);
1638 uart_disable_tx_intr(uart_num);
1639 uart_pattern_link_free(uart_num);
1640 uart_free_driver_obj(p_uart_obj[uart_num]);
1641 p_uart_obj[uart_num] = NULL;
1642
1643 #if SOC_UART_SUPPORT_RTC_CLK
1644 uart_sclk_t sclk = 0;
1645 uart_hal_get_sclk(&(uart_context[uart_num].hal), &sclk);
1646 if (sclk == UART_SCLK_RTC) {
1647 rtc_clk_disable(uart_num);
1648 }
1649 #endif
1650 uart_module_disable(uart_num);
1651 return ESP_OK;
1652 }
1653
uart_is_driver_installed(uart_port_t uart_num)1654 bool uart_is_driver_installed(uart_port_t uart_num)
1655 {
1656 return uart_num < UART_NUM_MAX && (p_uart_obj[uart_num] != NULL);
1657 }
1658
uart_set_select_notif_callback(uart_port_t uart_num,uart_select_notif_callback_t uart_select_notif_callback)1659 void uart_set_select_notif_callback(uart_port_t uart_num, uart_select_notif_callback_t uart_select_notif_callback)
1660 {
1661 if (uart_num < UART_NUM_MAX && p_uart_obj[uart_num]) {
1662 p_uart_obj[uart_num]->uart_select_notif_callback = (uart_select_notif_callback_t) uart_select_notif_callback;
1663 }
1664 }
1665
uart_get_selectlock(void)1666 portMUX_TYPE *uart_get_selectlock(void)
1667 {
1668 return &uart_selectlock;
1669 }
1670
1671 // Set UART mode
uart_set_mode(uart_port_t uart_num,uart_mode_t mode)1672 esp_err_t uart_set_mode(uart_port_t uart_num, uart_mode_t mode)
1673 {
1674 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1675 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_ERR_INVALID_STATE, UART_TAG, "uart driver error");
1676 if ((mode == UART_MODE_RS485_COLLISION_DETECT) || (mode == UART_MODE_RS485_APP_CTRL)
1677 || (mode == UART_MODE_RS485_HALF_DUPLEX)) {
1678 ESP_RETURN_ON_FALSE((!uart_hal_is_hw_rts_en(&(uart_context[uart_num].hal))), ESP_ERR_INVALID_ARG, UART_TAG,
1679 "disable hw flowctrl before using RS485 mode");
1680 }
1681 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1682 uart_hal_set_mode(&(uart_context[uart_num].hal), mode);
1683 if (mode == UART_MODE_RS485_COLLISION_DETECT) {
1684 // This mode allows read while transmitting that allows collision detection
1685 p_uart_obj[uart_num]->coll_det_flg = false;
1686 // Enable collision detection interrupts
1687 uart_hal_ena_intr_mask(&(uart_context[uart_num].hal), UART_INTR_RXFIFO_TOUT
1688 | UART_INTR_RXFIFO_FULL
1689 | UART_INTR_RS485_CLASH
1690 | UART_INTR_RS485_FRM_ERR
1691 | UART_INTR_RS485_PARITY_ERR);
1692 }
1693 p_uart_obj[uart_num]->uart_mode = mode;
1694 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1695 return ESP_OK;
1696 }
1697
uart_set_rx_full_threshold(uart_port_t uart_num,int threshold)1698 esp_err_t uart_set_rx_full_threshold(uart_port_t uart_num, int threshold)
1699 {
1700 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1701 ESP_RETURN_ON_FALSE((threshold < UART_RXFIFO_FULL_THRHD_V) && (threshold > 0), ESP_ERR_INVALID_ARG, UART_TAG,
1702 "rx fifo full threshold value error");
1703 if (p_uart_obj[uart_num] == NULL) {
1704 ESP_LOGE(UART_TAG, "call uart_driver_install API first");
1705 return ESP_ERR_INVALID_STATE;
1706 }
1707 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1708 if (uart_hal_get_intr_ena_status(&(uart_context[uart_num].hal)) & UART_INTR_RXFIFO_FULL) {
1709 uart_hal_set_rxfifo_full_thr(&(uart_context[uart_num].hal), threshold);
1710 }
1711 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1712 return ESP_OK;
1713 }
1714
uart_set_tx_empty_threshold(uart_port_t uart_num,int threshold)1715 esp_err_t uart_set_tx_empty_threshold(uart_port_t uart_num, int threshold)
1716 {
1717 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1718 ESP_RETURN_ON_FALSE((threshold < UART_TXFIFO_EMPTY_THRHD_V) && (threshold > 0), ESP_ERR_INVALID_ARG, UART_TAG,
1719 "tx fifo empty threshold value error");
1720 if (p_uart_obj[uart_num] == NULL) {
1721 ESP_LOGE(UART_TAG, "call uart_driver_install API first");
1722 return ESP_ERR_INVALID_STATE;
1723 }
1724 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1725 if (uart_hal_get_intr_ena_status(&(uart_context[uart_num].hal)) & UART_INTR_TXFIFO_EMPTY) {
1726 uart_hal_set_txfifo_empty_thr(&(uart_context[uart_num].hal), threshold);
1727 }
1728 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1729 return ESP_OK;
1730 }
1731
uart_set_rx_timeout(uart_port_t uart_num,const uint8_t tout_thresh)1732 esp_err_t uart_set_rx_timeout(uart_port_t uart_num, const uint8_t tout_thresh)
1733 {
1734 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1735 // get maximum timeout threshold
1736 uint16_t tout_max_thresh = uart_hal_get_max_rx_timeout_thrd(&(uart_context[uart_num].hal));
1737 if (tout_thresh > tout_max_thresh) {
1738 ESP_LOGE(UART_TAG, "tout_thresh = %d > maximum value = %d", tout_thresh, tout_max_thresh);
1739 return ESP_ERR_INVALID_ARG;
1740 }
1741 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1742 uart_hal_set_rx_timeout(&(uart_context[uart_num].hal), tout_thresh);
1743 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1744 return ESP_OK;
1745 }
1746
uart_get_collision_flag(uart_port_t uart_num,bool * collision_flag)1747 esp_err_t uart_get_collision_flag(uart_port_t uart_num, bool *collision_flag)
1748 {
1749 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1750 ESP_RETURN_ON_FALSE((p_uart_obj[uart_num]), ESP_FAIL, UART_TAG, "uart driver error");
1751 ESP_RETURN_ON_FALSE((collision_flag != NULL), ESP_ERR_INVALID_ARG, UART_TAG, "wrong parameter pointer");
1752 ESP_RETURN_ON_FALSE((UART_IS_MODE_SET(uart_num, UART_MODE_RS485_HALF_DUPLEX) || UART_IS_MODE_SET(uart_num, UART_MODE_RS485_COLLISION_DETECT)),
1753 ESP_ERR_INVALID_ARG, UART_TAG, "wrong mode");
1754 *collision_flag = p_uart_obj[uart_num]->coll_det_flg;
1755 return ESP_OK;
1756 }
1757
uart_set_wakeup_threshold(uart_port_t uart_num,int wakeup_threshold)1758 esp_err_t uart_set_wakeup_threshold(uart_port_t uart_num, int wakeup_threshold)
1759 {
1760 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1761 ESP_RETURN_ON_FALSE((wakeup_threshold <= UART_ACTIVE_THRESHOLD_V && wakeup_threshold > UART_MIN_WAKEUP_THRESH), ESP_ERR_INVALID_ARG, UART_TAG,
1762 "wakeup_threshold out of bounds");
1763 UART_ENTER_CRITICAL(&(uart_context[uart_num].spinlock));
1764 uart_hal_set_wakeup_thrd(&(uart_context[uart_num].hal), wakeup_threshold);
1765 UART_EXIT_CRITICAL(&(uart_context[uart_num].spinlock));
1766 return ESP_OK;
1767 }
1768
uart_get_wakeup_threshold(uart_port_t uart_num,int * out_wakeup_threshold)1769 esp_err_t uart_get_wakeup_threshold(uart_port_t uart_num, int *out_wakeup_threshold)
1770 {
1771 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1772 ESP_RETURN_ON_FALSE((out_wakeup_threshold != NULL), ESP_ERR_INVALID_ARG, UART_TAG, "argument is NULL");
1773 uart_hal_get_wakeup_thrd(&(uart_context[uart_num].hal), (uint32_t *)out_wakeup_threshold);
1774 return ESP_OK;
1775 }
1776
uart_wait_tx_idle_polling(uart_port_t uart_num)1777 esp_err_t uart_wait_tx_idle_polling(uart_port_t uart_num)
1778 {
1779 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1780 while (!uart_hal_is_tx_idle(&(uart_context[uart_num].hal)));
1781 return ESP_OK;
1782 }
1783
uart_set_loop_back(uart_port_t uart_num,bool loop_back_en)1784 esp_err_t uart_set_loop_back(uart_port_t uart_num, bool loop_back_en)
1785 {
1786 ESP_RETURN_ON_FALSE((uart_num < UART_NUM_MAX), ESP_ERR_INVALID_ARG, UART_TAG, "uart_num error");
1787 uart_hal_set_loop_back(&(uart_context[uart_num].hal), loop_back_en);
1788 return ESP_OK;
1789 }
1790
uart_set_always_rx_timeout(uart_port_t uart_num,bool always_rx_timeout)1791 void uart_set_always_rx_timeout(uart_port_t uart_num, bool always_rx_timeout)
1792 {
1793 uint16_t rx_tout = uart_hal_get_rx_tout_thr(&(uart_context[uart_num].hal));
1794 if (rx_tout) {
1795 p_uart_obj[uart_num]->rx_always_timeout_flg = always_rx_timeout;
1796 } else {
1797 p_uart_obj[uart_num]->rx_always_timeout_flg = false;
1798 }
1799 }
1800