1 /*
2 * Copyright (c) 2023 TOKITA Hiroshi <tokita.hiroshi@fujitsu.com>
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #define DT_DRV_COMPAT renesas_ra_uart_sci
8
9 #include <zephyr/drivers/uart.h>
10 #include <zephyr/drivers/clock_control.h>
11 #include <zephyr/drivers/interrupt_controller/intc_ra_icu.h>
12 #include <zephyr/drivers/pinctrl.h>
13 #include <zephyr/irq.h>
14 #include <zephyr/spinlock.h>
15
16 #include <zephyr/logging/log.h>
17
18 LOG_MODULE_REGISTER(ra_uart_sci, CONFIG_UART_LOG_LEVEL);
19
20 enum {
21 UART_RA_INT_RXI,
22 UART_RA_INT_TXI,
23 UART_RA_INT_ERI,
24 NUM_OF_UART_RA_INT,
25 };
26
27 struct uart_ra_cfg {
28 mem_addr_t regs;
29 const struct device *clock_dev;
30 clock_control_subsys_t clock_id;
31 const struct pinctrl_dev_config *pcfg;
32 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
33 int (*irq_config_func)(const struct device *dev);
34 #endif
35 };
36
37 struct uart_ra_data {
38 struct uart_config current_config;
39 uint32_t clk_rate;
40 struct k_spinlock lock;
41 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
42 uint32_t irqn[NUM_OF_UART_RA_INT];
43 uart_irq_callback_user_data_t callback;
44 void *cb_data;
45 #endif
46 };
47
48 #define REG_MASK(reg) (BIT_MASK(_CONCAT(reg, _LEN)) << _CONCAT(reg, _POS))
49
50 /* Registers */
51 #define SMR 0x00 /*!< Serial Mode Register */
52 #define BRR 0x01 /*!< Bit Rate Register */
53 #define SCR 0x02 /*!< Serial Control Register */
54 #define TDR 0x03 /*!< Transmit Data Register */
55 #define SSR 0x04 /*!< Serial Status Register */
56 #define RDR 0x05 /*!< Receive Data Register */
57 #define SEMR 0x07 /*!< Serial Extended Mode Register */
58 #define MDDR 0x12 /*!< Modulation Duty Register */
59 #define LSR 0x18 /*!< Line Status Register */
60
61 /*
62 * SMR (Serial Mode Register)
63 *
64 * - CKS[0..2]: Clock Select
65 * - MP[2..3]: Multi-Processor Mode(Valid only in asynchronous mode)
66 * - STOP[3..4]: Stop Bit Length(Valid only in asynchronous mode)
67 * - PM[4..5]: Parity Mode (Valid only when the PE bit is 1)
68 * - PE[5..6]: Parity Enable(Valid only in asynchronous mode)
69 * - CHR[6..7]: Character Length(Valid only in asynchronous mode)
70 * - CM[7..8]: Communication Mode
71 */
72 #define SMR_CKS_POS (0)
73 #define SMR_CKS_LEN (2)
74 #define SMR_MP_POS (2)
75 #define SMR_MP_LEN (1)
76 #define SMR_STOP_POS (3)
77 #define SMR_STOP_LEN (1)
78 #define SMR_PM_POS (4)
79 #define SMR_PM_LEN (1)
80 #define SMR_PE_POS (5)
81 #define SMR_PE_LEN (1)
82 #define SMR_CHR_POS (6)
83 #define SMR_CHR_LEN (1)
84 #define SMR_CM_POS (7)
85 #define SMR_CM_LEN (1)
86
87 /**
88 * SCR (Serial Control Register)
89 *
90 * - CKE[0..2]: Clock Enable
91 * - TEIE[2..3]: Transmit End Interrupt Enable
92 * - MPIE[3..4]: Multi-Processor Interrupt Enable (Valid in asynchronous
93 * - RE[4..5]: Receive Enable
94 * - TE[5..6]: Transmit Enable
95 * - RIE[6..7]: Receive Interrupt Enable
96 * - TIE[7..8]: Transmit Interrupt Enable
97 */
98 #define SCR_CKE_POS (0)
99 #define SCR_CKE_LEN (2)
100 #define SCR_TEIE_POS (2)
101 #define SCR_TEIE_LEN (1)
102 #define SCR_MPIE_POS (3)
103 #define SCR_MPIE_LEN (1)
104 #define SCR_RE_POS (4)
105 #define SCR_RE_LEN (1)
106 #define SCR_TE_POS (5)
107 #define SCR_TE_LEN (1)
108 #define SCR_RIE_POS (6)
109 #define SCR_RIE_LEN (1)
110 #define SCR_TIE_POS (7)
111 #define SCR_TIE_LEN (1)
112
113 /**
114 * SSR (Serial Status Register)
115 *
116 * - MPBT[0..1]: Multi-Processor Bit Transfer
117 * - MPB[1..2]: Multi-Processor
118 * - TEND[2..3]: Transmit End Flag
119 * - PER[3..4]: Parity Error Flag
120 * - FER[4..5]: Framing Error Flag
121 * - ORER[5..6]: Overrun Error Flag
122 * - RDRF[6..7]: Receive Data Full Flag
123 * - TDRE[7..8]: Transmit Data Empty Flag
124 */
125 #define SSR_MPBT_POS (0)
126 #define SSR_MPBT_LEN (1)
127 #define SSR_MPB_POS (1)
128 #define SSR_MPB_LEN (1)
129 #define SSR_TEND_POS (2)
130 #define SSR_TEND_LEN (1)
131 #define SSR_PER_POS (3)
132 #define SSR_PER_LEN (1)
133 #define SSR_FER_POS (4)
134 #define SSR_FER_LEN (1)
135 #define SSR_ORER_POS (5)
136 #define SSR_ORER_LEN (1)
137 #define SSR_RDRF_POS (6)
138 #define SSR_RDRF_LEN (1)
139 #define SSR_TDRE_POS (7)
140 #define SSR_TDRE_LEN (1)
141
142 /**
143 * SEMR (Serial Extended Mode Register)
144 *
145 * - ACS0[0..1]: Asynchronous Mode Clock Source Select
146 * - PADIS[1..2]: Preamble function Disable
147 * - BRME[2..3]: Bit Rate Modulation Enable
148 * - ABCSE[3..4]: Asynchronous Mode Extended Base Clock Select
149 * - ABCS[4..5]: Asynchronous Mode Base Clock Select
150 * - NFEN[5..6]: Digital Noise Filter Function Enable
151 * - BGDM[6..7]: Baud Rate Generator Double-Speed Mode Select
152 * - RXDESEL[7..8]: Asynchronous Start Bit Edge Detection Select
153 */
154 #define SEMR_ACS0_POS (0)
155 #define SEMR_ACS0_LEN (1)
156 #define SEMR_PADIS_POS (1)
157 #define SEMR_PADIS_LEN (1)
158 #define SEMR_BRME_POS (2)
159 #define SEMR_BRME_LEN (1)
160 #define SEMR_ABCSE_POS (3)
161 #define SEMR_ABCSE_LEN (1)
162 #define SEMR_ABCS_POS (4)
163 #define SEMR_ABCS_LEN (1)
164 #define SEMR_NFEN_POS (5)
165 #define SEMR_NFEN_LEN (1)
166 #define SEMR_BGDM_POS (6)
167 #define SEMR_BGDM_LEN (1)
168 #define SEMR_RXDESEL_POS (7)
169 #define SEMR_RXDESEL_LEN (1)
170
171 /**
172 * LSR (Line Status Register)
173 *
174 * - ORER[0..1]: Overrun Error Flag
175 * - FNUM[2..7]: Framing Error Count
176 * - PNUM[8..13]: Parity Error Count
177 */
178 #define LSR_ORER_POS (0)
179 #define LSR_ORER_LEN (1)
180 #define LSR_FNUM_POS (2)
181 #define LSR_FNUM_LEN (5)
182 #define LSR_PNUM_POS (8)
183 #define LSR_PNUM_LEN (5)
184
uart_ra_read_8(const struct device * dev,uint32_t offs)185 static uint8_t uart_ra_read_8(const struct device *dev, uint32_t offs)
186 {
187 const struct uart_ra_cfg *config = dev->config;
188
189 return sys_read8(config->regs + offs);
190 }
191
uart_ra_write_8(const struct device * dev,uint32_t offs,uint8_t value)192 static void uart_ra_write_8(const struct device *dev, uint32_t offs, uint8_t value)
193 {
194 const struct uart_ra_cfg *config = dev->config;
195
196 sys_write8(value, config->regs + offs);
197 }
198
uart_ra_read_16(const struct device * dev,uint32_t offs)199 static uint16_t uart_ra_read_16(const struct device *dev, uint32_t offs)
200 {
201 const struct uart_ra_cfg *config = dev->config;
202
203 return sys_read16(config->regs + offs);
204 }
205
uart_ra_write_16(const struct device * dev,uint32_t offs,uint16_t value)206 static void uart_ra_write_16(const struct device *dev, uint32_t offs, uint16_t value)
207 {
208 const struct uart_ra_cfg *config = dev->config;
209
210 sys_write16(value, config->regs + offs);
211 }
212
uart_ra_set_baudrate(const struct device * dev,uint32_t baud_rate)213 static void uart_ra_set_baudrate(const struct device *dev, uint32_t baud_rate)
214 {
215 struct uart_ra_data *data = dev->data;
216 uint8_t reg_val;
217
218 reg_val = uart_ra_read_8(dev, SEMR);
219 reg_val |= (REG_MASK(SEMR_BGDM) | REG_MASK(SEMR_ABCS));
220 reg_val &= ~(REG_MASK(SEMR_BRME) | REG_MASK(SEMR_ABCSE));
221 uart_ra_write_8(dev, SEMR, reg_val);
222
223 reg_val = (data->clk_rate / (8 * data->current_config.baudrate)) - 1;
224 uart_ra_write_8(dev, BRR, reg_val);
225 }
226
uart_ra_poll_in(const struct device * dev,unsigned char * p_char)227 static int uart_ra_poll_in(const struct device *dev, unsigned char *p_char)
228 {
229 struct uart_ra_data *data = dev->data;
230 int ret = 0;
231
232 k_spinlock_key_t key = k_spin_lock(&data->lock);
233
234 /* If interrupts are enabled, return -EINVAL */
235 if ((uart_ra_read_8(dev, SCR) & REG_MASK(SCR_RIE))) {
236 ret = -EINVAL;
237 goto unlock;
238 }
239
240 if ((uart_ra_read_8(dev, SSR) & REG_MASK(SSR_RDRF)) == 0) {
241 ret = -1;
242 goto unlock;
243 }
244
245 *p_char = uart_ra_read_8(dev, RDR);
246 unlock:
247 k_spin_unlock(&data->lock, key);
248
249 return ret;
250 }
251
uart_ra_poll_out(const struct device * dev,unsigned char out_char)252 static void uart_ra_poll_out(const struct device *dev, unsigned char out_char)
253 {
254 struct uart_ra_data *data = dev->data;
255 uint8_t reg_val;
256 k_spinlock_key_t key = k_spin_lock(&data->lock);
257
258 while (!(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TEND)) ||
259 !(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TDRE))) {
260 ;
261 }
262
263 /* If interrupts are enabled, temporarily disable them */
264 reg_val = uart_ra_read_8(dev, SCR);
265 uart_ra_write_8(dev, SCR, reg_val & ~REG_MASK(SCR_TIE));
266
267 uart_ra_write_8(dev, TDR, out_char);
268 while (!(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TEND)) ||
269 !(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_TDRE))) {
270 ;
271 }
272
273 uart_ra_write_8(dev, SCR, reg_val);
274 k_spin_unlock(&data->lock, key);
275 }
276
uart_ra_err_check(const struct device * dev)277 static int uart_ra_err_check(const struct device *dev)
278 {
279 struct uart_ra_data *data = dev->data;
280
281 uint8_t reg_val;
282 int errors = 0;
283 k_spinlock_key_t key;
284
285 key = k_spin_lock(&data->lock);
286 reg_val = uart_ra_read_8(dev, SSR);
287
288 if (reg_val & REG_MASK(SSR_PER)) {
289 errors |= UART_ERROR_PARITY;
290 }
291
292 if (reg_val & REG_MASK(SSR_FER)) {
293 errors |= UART_ERROR_FRAMING;
294 }
295
296 if (reg_val & REG_MASK(SSR_ORER)) {
297 errors |= UART_ERROR_OVERRUN;
298 }
299
300 reg_val &= ~(REG_MASK(SSR_PER) | REG_MASK(SSR_FER) | REG_MASK(SSR_ORER));
301 uart_ra_write_8(dev, SSR, reg_val);
302
303 k_spin_unlock(&data->lock, key);
304
305 return errors;
306 }
307
uart_ra_configure(const struct device * dev,const struct uart_config * cfg)308 static int uart_ra_configure(const struct device *dev, const struct uart_config *cfg)
309 {
310 struct uart_ra_data *data = dev->data;
311
312 uint16_t reg_val;
313 k_spinlock_key_t key;
314
315 if (cfg->parity != UART_CFG_PARITY_NONE || cfg->stop_bits != UART_CFG_STOP_BITS_1 ||
316 cfg->data_bits != UART_CFG_DATA_BITS_8 || cfg->flow_ctrl != UART_CFG_FLOW_CTRL_NONE) {
317 return -ENOTSUP;
318 }
319
320 key = k_spin_lock(&data->lock);
321
322 /* Disable Transmit and Receive */
323 reg_val = uart_ra_read_8(dev, SCR);
324 reg_val &= ~(REG_MASK(SCR_TE) | REG_MASK(SCR_RE));
325 uart_ra_write_8(dev, SCR, reg_val);
326
327 /* Resetting Errors Registers */
328 reg_val = uart_ra_read_8(dev, SSR);
329 reg_val &= ~(REG_MASK(SSR_PER) | REG_MASK(SSR_FER) | REG_MASK(SSR_ORER) |
330 REG_MASK(SSR_RDRF) | REG_MASK(SSR_TDRE));
331 uart_ra_write_8(dev, SSR, reg_val);
332
333 reg_val = uart_ra_read_16(dev, LSR);
334 reg_val &= ~(REG_MASK(LSR_ORER));
335 uart_ra_write_16(dev, LSR, reg_val);
336
337 /* Select internal clock */
338 reg_val = uart_ra_read_8(dev, SCR);
339 reg_val &= ~(REG_MASK(SCR_CKE));
340 uart_ra_write_8(dev, SCR, reg_val);
341
342 /* Serial Configuration (8N1) & Clock divider selection */
343 reg_val = uart_ra_read_8(dev, SMR);
344 reg_val &= ~(REG_MASK(SMR_CM) | REG_MASK(SMR_CHR) | REG_MASK(SMR_PE) | REG_MASK(SMR_PM) |
345 REG_MASK(SMR_STOP) | REG_MASK(SMR_CKS));
346 uart_ra_write_8(dev, SMR, reg_val);
347
348 /* Set baudrate */
349 uart_ra_set_baudrate(dev, cfg->baudrate);
350
351 /* Enable Transmit & Receive + disable Interrupts */
352 reg_val = uart_ra_read_8(dev, SCR);
353 reg_val |= (REG_MASK(SCR_TE) | REG_MASK(SCR_RE));
354 reg_val &=
355 ~(REG_MASK(SCR_TIE) | REG_MASK(SCR_RIE) | REG_MASK(SCR_MPIE) | REG_MASK(SCR_TEIE));
356 uart_ra_write_8(dev, SCR, reg_val);
357
358 data->current_config = *cfg;
359
360 k_spin_unlock(&data->lock, key);
361
362 return 0;
363 }
364
365 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
uart_ra_config_get(const struct device * dev,struct uart_config * cfg)366 static int uart_ra_config_get(const struct device *dev, struct uart_config *cfg)
367 {
368 struct uart_ra_data *data = dev->data;
369
370 *cfg = data->current_config;
371
372 return 0;
373 }
374 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
375
uart_ra_init(const struct device * dev)376 static int uart_ra_init(const struct device *dev)
377 {
378 const struct uart_ra_cfg *config = dev->config;
379 struct uart_ra_data *data = dev->data;
380 int ret;
381
382 /* Configure dt provided device signals when available */
383 ret = pinctrl_apply_state(config->pcfg, PINCTRL_STATE_DEFAULT);
384 if (ret < 0) {
385 return ret;
386 }
387
388 if (!device_is_ready(config->clock_dev)) {
389 return -ENODEV;
390 }
391
392 ret = clock_control_on(config->clock_dev, config->clock_id);
393 if (ret < 0) {
394 return ret;
395 }
396
397 ret = clock_control_get_rate(config->clock_dev, config->clock_id, &data->clk_rate);
398 if (ret < 0) {
399 return ret;
400 }
401
402 ret = uart_ra_configure(dev, &data->current_config);
403 if (ret != 0) {
404 return ret;
405 }
406
407 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
408 ret = config->irq_config_func(dev);
409 if (ret != 0) {
410 return ret;
411 }
412 #endif
413
414 return 0;
415 }
416
417 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
418
uart_ra_irq_is_enabled(const struct device * dev,uint32_t irq)419 static bool uart_ra_irq_is_enabled(const struct device *dev, uint32_t irq)
420 {
421 return uart_ra_read_8(dev, SCR) & irq;
422 }
423
uart_ra_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)424 static int uart_ra_fifo_fill(const struct device *dev, const uint8_t *tx_data, int len)
425 {
426 struct uart_ra_data *data = dev->data;
427 uint8_t reg_val;
428 k_spinlock_key_t key;
429
430 if (len <= 0 || tx_data == NULL) {
431 return 0;
432 }
433
434 key = k_spin_lock(&data->lock);
435 reg_val = uart_ra_read_8(dev, SCR);
436 reg_val &= ~(REG_MASK(SCR_TIE));
437 uart_ra_write_8(dev, SCR, reg_val);
438
439 uart_ra_write_8(dev, TDR, tx_data[0]);
440
441 reg_val |= REG_MASK(SCR_TIE);
442 uart_ra_write_8(dev, SCR, reg_val);
443
444 k_spin_unlock(&data->lock, key);
445
446 return 1;
447 }
448
uart_ra_fifo_read(const struct device * dev,uint8_t * rx_data,const int size)449 static int uart_ra_fifo_read(const struct device *dev, uint8_t *rx_data, const int size)
450 {
451 uint8_t data;
452
453 if (size <= 0) {
454 return 0;
455 }
456
457 if ((uart_ra_read_8(dev, SSR) & REG_MASK(SSR_RDRF)) == 0) {
458 return 0;
459 }
460
461 data = uart_ra_read_8(dev, RDR);
462
463 if (rx_data) {
464 rx_data[0] = data;
465 }
466
467 return 1;
468 }
469
uart_ra_irq_tx_enable(const struct device * dev)470 static void uart_ra_irq_tx_enable(const struct device *dev)
471 {
472 struct uart_ra_data *data = dev->data;
473 k_spinlock_key_t key;
474 uint16_t reg_val;
475
476 key = k_spin_lock(&data->lock);
477
478 reg_val = uart_ra_read_8(dev, SCR);
479 reg_val |= (REG_MASK(SCR_TIE));
480 uart_ra_write_8(dev, SCR, reg_val);
481
482 irq_enable(data->irqn[UART_RA_INT_TXI]);
483
484 k_spin_unlock(&data->lock, key);
485 }
486
uart_ra_irq_tx_disable(const struct device * dev)487 static void uart_ra_irq_tx_disable(const struct device *dev)
488 {
489 struct uart_ra_data *data = dev->data;
490 k_spinlock_key_t key;
491 uint16_t reg_val;
492
493 key = k_spin_lock(&data->lock);
494
495 reg_val = uart_ra_read_8(dev, SCR);
496 reg_val &= ~(REG_MASK(SCR_TIE));
497 uart_ra_write_8(dev, SCR, reg_val);
498
499 irq_disable(data->irqn[UART_RA_INT_TXI]);
500
501 k_spin_unlock(&data->lock, key);
502 }
503
uart_ra_irq_tx_ready(const struct device * dev)504 static int uart_ra_irq_tx_ready(const struct device *dev)
505 {
506 const uint8_t reg_val = uart_ra_read_8(dev, SSR);
507 const uint8_t mask = REG_MASK(SSR_TEND) & REG_MASK(SSR_TDRE);
508
509 return (reg_val & mask) == mask;
510 }
511
uart_ra_irq_rx_enable(const struct device * dev)512 static void uart_ra_irq_rx_enable(const struct device *dev)
513 {
514 struct uart_ra_data *data = dev->data;
515 k_spinlock_key_t key;
516 uint16_t reg_val;
517
518 key = k_spin_lock(&data->lock);
519
520 reg_val = uart_ra_read_8(dev, SCR);
521 reg_val |= REG_MASK(SCR_RIE);
522 uart_ra_write_8(dev, SCR, reg_val);
523
524 irq_enable(data->irqn[UART_RA_INT_RXI]);
525
526 k_spin_unlock(&data->lock, key);
527 }
528
uart_ra_irq_rx_disable(const struct device * dev)529 static void uart_ra_irq_rx_disable(const struct device *dev)
530 {
531 struct uart_ra_data *data = dev->data;
532 k_spinlock_key_t key;
533 uint16_t reg_val;
534
535 key = k_spin_lock(&data->lock);
536
537 reg_val = uart_ra_read_8(dev, SCR);
538 reg_val &= ~REG_MASK(SCR_RIE);
539 uart_ra_write_8(dev, SCR, reg_val);
540
541 irq_disable(data->irqn[UART_RA_INT_RXI]);
542
543 k_spin_unlock(&data->lock, key);
544 }
545
uart_ra_irq_rx_ready(const struct device * dev)546 static int uart_ra_irq_rx_ready(const struct device *dev)
547 {
548 return !!(uart_ra_read_8(dev, SSR) & REG_MASK(SSR_RDRF));
549 }
550
uart_ra_irq_err_enable(const struct device * dev)551 static void uart_ra_irq_err_enable(const struct device *dev)
552 {
553 struct uart_ra_data *data = dev->data;
554
555 irq_enable(data->irqn[UART_RA_INT_ERI]);
556 }
557
uart_ra_irq_err_disable(const struct device * dev)558 static void uart_ra_irq_err_disable(const struct device *dev)
559 {
560 struct uart_ra_data *data = dev->data;
561
562 irq_disable(data->irqn[UART_RA_INT_ERI]);
563 }
564
uart_ra_irq_is_pending(const struct device * dev)565 static int uart_ra_irq_is_pending(const struct device *dev)
566 {
567 return (uart_ra_irq_rx_ready(dev) && uart_ra_irq_is_enabled(dev, REG_MASK(SCR_RIE))) ||
568 (uart_ra_irq_tx_ready(dev) && uart_ra_irq_is_enabled(dev, REG_MASK(SCR_TIE)));
569 }
570
uart_ra_irq_update(const struct device * dev)571 static int uart_ra_irq_update(const struct device *dev)
572 {
573 return 1;
574 }
575
uart_ra_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)576 static void uart_ra_irq_callback_set(const struct device *dev, uart_irq_callback_user_data_t cb,
577 void *cb_data)
578 {
579 struct uart_ra_data *data = dev->data;
580
581 data->callback = cb;
582 data->cb_data = cb_data;
583 }
584
585 /**
586 * @brief Interrupt service routine.
587 *
588 * This simply calls the callback function, if one exists.
589 *
590 * @param arg Argument to ISR.
591 */
uart_ra_isr(const struct device * dev)592 static inline void uart_ra_isr(const struct device *dev)
593 {
594 struct uart_ra_data *data = dev->data;
595
596 if (data->callback) {
597 data->callback(dev, data->cb_data);
598 }
599 }
600
uart_ra_isr_rxi(const void * param)601 static void uart_ra_isr_rxi(const void *param)
602 {
603 const struct device *dev = param;
604 struct uart_ra_data *data = dev->data;
605
606 uart_ra_isr(dev);
607 ra_icu_clear_int_flag(data->irqn[UART_RA_INT_RXI]);
608 }
609
uart_ra_isr_txi(const void * param)610 static void uart_ra_isr_txi(const void *param)
611 {
612 const struct device *dev = param;
613 struct uart_ra_data *data = dev->data;
614
615 uart_ra_isr(dev);
616 ra_icu_clear_int_flag(data->irqn[UART_RA_INT_TXI]);
617 }
618
uart_ra_isr_eri(const void * param)619 static void uart_ra_isr_eri(const void *param)
620 {
621 const struct device *dev = param;
622 struct uart_ra_data *data = dev->data;
623
624 uart_ra_isr(dev);
625 ra_icu_clear_int_flag(data->irqn[UART_RA_INT_ERI]);
626 }
627
628 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
629
630 static const struct uart_driver_api uart_ra_driver_api = {
631 .poll_in = uart_ra_poll_in,
632 .poll_out = uart_ra_poll_out,
633 .err_check = uart_ra_err_check,
634 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
635 .configure = uart_ra_configure,
636 .config_get = uart_ra_config_get,
637 #endif
638 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
639 .fifo_fill = uart_ra_fifo_fill,
640 .fifo_read = uart_ra_fifo_read,
641 .irq_tx_enable = uart_ra_irq_tx_enable,
642 .irq_tx_disable = uart_ra_irq_tx_disable,
643 .irq_tx_ready = uart_ra_irq_tx_ready,
644 .irq_rx_enable = uart_ra_irq_rx_enable,
645 .irq_rx_disable = uart_ra_irq_rx_disable,
646 .irq_rx_ready = uart_ra_irq_rx_ready,
647 .irq_err_enable = uart_ra_irq_err_enable,
648 .irq_err_disable = uart_ra_irq_err_disable,
649 .irq_is_pending = uart_ra_irq_is_pending,
650 .irq_update = uart_ra_irq_update,
651 .irq_callback_set = uart_ra_irq_callback_set,
652 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
653 };
654
655 /* Device Instantiation */
656 #define UART_RA_INIT_CFG(n) \
657 PINCTRL_DT_DEFINE(DT_INST_PARENT(n)); \
658 static const struct uart_ra_cfg uart_ra_cfg_##n = { \
659 .regs = DT_REG_ADDR(DT_INST_PARENT(n)), \
660 .clock_dev = DEVICE_DT_GET(DT_CLOCKS_CTLR(DT_INST_PARENT(n))), \
661 .clock_id = \
662 (clock_control_subsys_t)DT_CLOCKS_CELL_BY_IDX(DT_INST_PARENT(n), 0, id), \
663 .pcfg = PINCTRL_DT_DEV_CONFIG_GET(DT_INST_PARENT(n)), \
664 IF_ENABLED(CONFIG_UART_INTERRUPT_DRIVEN, ( \
665 .irq_config_func = irq_config_func_##n, \
666 )) \
667 }
668
669 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
670
671 #define RA_IRQ_CONNECT_DYNAMIC(n, name, dev, isr) \
672 ra_icu_irq_connect_dynamic(DT_IRQ_BY_NAME(DT_INST_PARENT(n), name, irq), \
673 DT_IRQ_BY_NAME(DT_INST_PARENT(n), name, priority), isr, dev, \
674 DT_IRQ_BY_NAME(DT_INST_PARENT(n), name, flags));
675
676 #define RA_IRQ_DISCONNECT_DYNAMIC(n, name, dev, isr) \
677 ra_icu_irq_disconnect_dynamic(irqn, 0, NULL, NULL, 0)
678
679 #define UART_RA_CONFIG_FUNC(n) \
680 static int irq_config_func_##n(const struct device *dev) \
681 { \
682 struct uart_ra_data *data = dev->data; \
683 int irqn; \
684 \
685 irqn = RA_IRQ_CONNECT_DYNAMIC(n, rxi, dev, uart_ra_isr_rxi); \
686 if (irqn < 0) { \
687 return irqn; \
688 } \
689 data->irqn[UART_RA_INT_RXI] = irqn; \
690 irqn = RA_IRQ_CONNECT_DYNAMIC(n, txi, dev, uart_ra_isr_txi); \
691 if (irqn < 0) { \
692 goto err_txi; \
693 } \
694 data->irqn[UART_RA_INT_TXI] = irqn; \
695 irqn = RA_IRQ_CONNECT_DYNAMIC(n, eri, dev, uart_ra_isr_eri); \
696 if (irqn < 0) { \
697 goto err_eri; \
698 } \
699 data->irqn[UART_RA_INT_ERI] = irqn; \
700 return 0; \
701 \
702 err_eri: \
703 RA_IRQ_DISCONNECT_DYNAMIC(data->irq[UART_RA_INT_TXI], eri, dev, uart_ra_isr_eri); \
704 err_txi: \
705 RA_IRQ_DISCONNECT_DYNAMIC(data->irq[UART_RA_INT_RXI], txi, dev, uart_ra_isr_txi); \
706 \
707 return irqn; \
708 }
709 #else
710 #define UART_RA_CONFIG_FUNC(n)
711 #endif
712
713 #define UART_RA_INIT(n) \
714 UART_RA_CONFIG_FUNC(n) \
715 UART_RA_INIT_CFG(n); \
716 \
717 static struct uart_ra_data uart_ra_data_##n = { \
718 .current_config = { \
719 .baudrate = DT_INST_PROP(n, current_speed), \
720 .parity = UART_CFG_PARITY_NONE, \
721 .stop_bits = UART_CFG_STOP_BITS_1, \
722 .data_bits = UART_CFG_DATA_BITS_8, \
723 .flow_ctrl = UART_CFG_FLOW_CTRL_NONE, \
724 }, \
725 }; \
726 \
727 DEVICE_DT_INST_DEFINE(n, uart_ra_init, NULL, &uart_ra_data_##n, &uart_ra_cfg_##n, \
728 PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, &uart_ra_driver_api);
729
730 DT_INST_FOREACH_STATUS_OKAY(UART_RA_INIT)
731