1 /*
2 * Copyright (c) 2018 Linaro Limited
3 * Copyright (c) 2022 Arm Limited (or its affiliates). All rights reserved.
4 * Copyright (c) 2023 Antmicro <www.antmicro.com>
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #define DT_DRV_COMPAT arm_pl011
10 #define SBSA_COMPAT arm_sbsa_uart
11
12 #include <string.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/arch/cpu.h>
15 #include <zephyr/init.h>
16 #include <zephyr/device.h>
17 #include <zephyr/drivers/uart.h>
18 #include <zephyr/sys/device_mmio.h>
19 #include <zephyr/sys/barrier.h>
20 #include <zephyr/irq.h>
21 #if defined(CONFIG_PINCTRL)
22 #include <zephyr/drivers/pinctrl.h>
23 #endif
24 #if defined(CONFIG_RESET)
25 #include <zephyr/drivers/reset.h>
26 #endif
27 #if defined(CONFIG_CLOCK_CONTROL)
28 #include <zephyr/drivers/clock_control.h>
29 #endif
30
31 #ifdef CONFIG_CPU_CORTEX_M
32 #include <cmsis_compiler.h>
33 #endif
34
35 #include "uart_pl011_registers.h"
36
37 #if defined(CONFIG_SOC_FAMILY_AMBIQ)
38 #include "uart_pl011_ambiq.h"
39 #endif
40
41 #if defined(CONFIG_SOC_SERIES_APOLLO3X)
42 #define PM_INST_GET(n) PM_DEVICE_DT_INST_GET(n)
43 #else
44 #define PM_INST_GET(n) NULL
45 #endif
46
47 #include "uart_pl011_raspberrypi_pico.h"
48
49 struct pl011_config {
50 DEVICE_MMIO_ROM;
51 #if defined(CONFIG_PINCTRL)
52 const struct pinctrl_dev_config *pincfg;
53 #endif
54 #if defined(CONFIG_RESET)
55 const struct reset_dt_spec reset;
56 #endif
57 #if defined(CONFIG_CLOCK_CONTROL)
58 const struct device *clock_dev;
59 clock_control_subsys_t clock_id;
60 #endif
61 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
62 uart_irq_config_func_t irq_config_func;
63 #endif
64 bool fifo_disable;
65 int (*clk_enable_func)(const struct device *dev, uint32_t clk);
66 int (*pwr_on_func)(void);
67 };
68
69 /* Device data structure */
70 struct pl011_data {
71 DEVICE_MMIO_RAM;
72 struct uart_config uart_cfg;
73 bool sbsa; /* SBSA mode */
74 uint32_t clk_freq;
75 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
76 volatile bool sw_call_txdrdy;
77 uart_irq_callback_user_data_t irq_cb;
78 struct k_spinlock irq_cb_lock;
79 void *irq_cb_data;
80 #endif
81 };
82
pl011_enable(const struct device * dev)83 static void pl011_enable(const struct device *dev)
84 {
85 get_uart(dev)->cr |= PL011_CR_UARTEN;
86 }
87
pl011_disable(const struct device * dev)88 static void pl011_disable(const struct device *dev)
89 {
90 get_uart(dev)->cr &= ~PL011_CR_UARTEN;
91 }
92
pl011_enable_fifo(const struct device * dev)93 static void pl011_enable_fifo(const struct device *dev)
94 {
95 get_uart(dev)->lcr_h |= PL011_LCRH_FEN;
96 }
97
pl011_disable_fifo(const struct device * dev)98 static void pl011_disable_fifo(const struct device *dev)
99 {
100 get_uart(dev)->lcr_h &= ~PL011_LCRH_FEN;
101 }
102
pl011_set_flow_control(const struct device * dev,bool rts,bool cts)103 static void pl011_set_flow_control(const struct device *dev, bool rts, bool cts)
104 {
105 if (rts) {
106 get_uart(dev)->cr |= PL011_CR_RTSEn;
107 } else {
108 get_uart(dev)->cr &= ~PL011_CR_RTSEn;
109 }
110 if (cts) {
111 get_uart(dev)->cr |= PL011_CR_CTSEn;
112 } else {
113 get_uart(dev)->cr &= ~PL011_CR_CTSEn;
114 }
115 }
116
pl011_set_baudrate(const struct device * dev,uint32_t clk,uint32_t baudrate)117 static int pl011_set_baudrate(const struct device *dev,
118 uint32_t clk, uint32_t baudrate)
119 {
120 /* Avoiding float calculations, bauddiv is left shifted by 6 */
121 uint64_t bauddiv = (((uint64_t)clk) << PL011_FBRD_WIDTH)
122 / (baudrate * 16U);
123
124 /* Valid bauddiv value
125 * uart_clk (min) >= 16 x baud_rate (max)
126 * uart_clk (max) <= 16 x 65535 x baud_rate (min)
127 */
128 if ((bauddiv < (1u << PL011_FBRD_WIDTH))
129 || (bauddiv > (65535u << PL011_FBRD_WIDTH))) {
130 return -EINVAL;
131 }
132
133 get_uart(dev)->ibrd = bauddiv >> PL011_FBRD_WIDTH;
134 get_uart(dev)->fbrd = bauddiv & ((1u << PL011_FBRD_WIDTH) - 1u);
135
136 barrier_dmem_fence_full();
137
138 /* In order to internally update the contents of ibrd or fbrd, a
139 * lcr_h write must always be performed at the end
140 * ARM DDI 0183F, Pg 3-13
141 */
142 get_uart(dev)->lcr_h = get_uart(dev)->lcr_h;
143
144 return 0;
145 }
146
pl011_is_readable(const struct device * dev)147 static bool pl011_is_readable(const struct device *dev)
148 {
149 struct pl011_data *data = dev->data;
150
151 if (!data->sbsa &&
152 (!(get_uart(dev)->cr & PL011_CR_UARTEN) || !(get_uart(dev)->cr & PL011_CR_RXE))) {
153 return false;
154 }
155
156 return (get_uart(dev)->fr & PL011_FR_RXFE) == 0U;
157 }
158
pl011_poll_in(const struct device * dev,unsigned char * c)159 static int pl011_poll_in(const struct device *dev, unsigned char *c)
160 {
161 if (!pl011_is_readable(dev)) {
162 return -1;
163 }
164
165 /* got a character */
166 *c = (unsigned char)get_uart(dev)->dr;
167
168 return get_uart(dev)->rsr & PL011_RSR_ERROR_MASK;
169 }
170
pl011_poll_out(const struct device * dev,unsigned char c)171 static void pl011_poll_out(const struct device *dev,
172 unsigned char c)
173 {
174 /* Wait for space in FIFO */
175 while (get_uart(dev)->fr & PL011_FR_TXFF) {
176 ; /* Wait */
177 }
178
179 /* Send a character */
180 get_uart(dev)->dr = (uint32_t)c;
181 }
182
pl011_err_check(const struct device * dev)183 static int pl011_err_check(const struct device *dev)
184 {
185 int errors = 0;
186
187 if (get_uart(dev)->rsr & PL011_RSR_ECR_OE) {
188 errors |= UART_ERROR_OVERRUN;
189 }
190
191 if (get_uart(dev)->rsr & PL011_RSR_ECR_BE) {
192 errors |= UART_BREAK;
193 }
194
195 if (get_uart(dev)->rsr & PL011_RSR_ECR_PE) {
196 errors |= UART_ERROR_PARITY;
197 }
198
199 if (get_uart(dev)->rsr & PL011_RSR_ECR_FE) {
200 errors |= UART_ERROR_FRAMING;
201 }
202
203 return errors;
204 }
205
pl011_runtime_configure_internal(const struct device * dev,const struct uart_config * cfg,bool disable)206 static int pl011_runtime_configure_internal(const struct device *dev,
207 const struct uart_config *cfg,
208 bool disable)
209 {
210 const struct pl011_config *config = dev->config;
211 struct pl011_data *data = dev->data;
212 uint32_t lcrh;
213 int ret = -ENOTSUP;
214
215 if (data->sbsa) {
216 goto out;
217 }
218
219 if (disable) {
220 pl011_disable(dev);
221 pl011_disable_fifo(dev);
222 }
223
224 lcrh = get_uart(dev)->lcr_h & ~(PL011_LCRH_FORMAT_MASK | PL011_LCRH_STP2);
225
226 switch (cfg->parity) {
227 case UART_CFG_PARITY_NONE:
228 lcrh &= ~(BIT(1) | BIT(2));
229 break;
230 case UART_CFG_PARITY_ODD:
231 lcrh |= PL011_LCRH_PARITY_ODD;
232 break;
233 case UART_CFG_PARITY_EVEN:
234 lcrh |= PL011_LCRH_PARTIY_EVEN;
235 break;
236 default:
237 goto enable;
238 }
239
240 switch (cfg->stop_bits) {
241 case UART_CFG_STOP_BITS_1:
242 lcrh &= ~(PL011_LCRH_STP2);
243 break;
244 case UART_CFG_STOP_BITS_2:
245 lcrh |= PL011_LCRH_STP2;
246 break;
247 default:
248 goto enable;
249 }
250
251 switch (cfg->data_bits) {
252 case UART_CFG_DATA_BITS_5:
253 lcrh |= PL011_LCRH_WLEN_SIZE(5) << PL011_LCRH_WLEN_SHIFT;
254 break;
255 case UART_CFG_DATA_BITS_6:
256 lcrh |= PL011_LCRH_WLEN_SIZE(6) << PL011_LCRH_WLEN_SHIFT;
257 break;
258 case UART_CFG_DATA_BITS_7:
259 lcrh |= PL011_LCRH_WLEN_SIZE(7) << PL011_LCRH_WLEN_SHIFT;
260 break;
261 case UART_CFG_DATA_BITS_8:
262 lcrh |= PL011_LCRH_WLEN_SIZE(8) << PL011_LCRH_WLEN_SHIFT;
263 break;
264 default:
265 goto enable;
266 }
267
268 switch (cfg->flow_ctrl) {
269 case UART_CFG_FLOW_CTRL_NONE:
270 pl011_set_flow_control(dev, false, false);
271 break;
272 case UART_CFG_FLOW_CTRL_RTS_CTS:
273 pl011_set_flow_control(dev, true, true);
274 break;
275 default:
276 goto enable;
277 }
278
279 /* Set baud rate */
280 ret = pl011_set_baudrate(dev, data->clk_freq, cfg->baudrate);
281 if (ret != 0) {
282 goto enable;
283 }
284
285 /* Update settings */
286 get_uart(dev)->lcr_h = lcrh;
287
288 memcpy(&data->uart_cfg, cfg, sizeof(data->uart_cfg));
289
290 enable:
291 if (disable) {
292 if (!config->fifo_disable) {
293 pl011_enable_fifo(dev);
294 }
295 pl011_enable(dev);
296 }
297
298 out:
299 return ret;
300 }
301
302 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
303
pl011_runtime_configure(const struct device * dev,const struct uart_config * cfg)304 static int pl011_runtime_configure(const struct device *dev,
305 const struct uart_config *cfg)
306 {
307 return pl011_runtime_configure_internal(dev, cfg, true);
308 }
309
pl011_runtime_config_get(const struct device * dev,struct uart_config * cfg)310 static int pl011_runtime_config_get(const struct device *dev,
311 struct uart_config *cfg)
312 {
313 struct pl011_data *data = dev->data;
314
315 *cfg = data->uart_cfg;
316 return 0;
317 }
318
319 #endif /* CONFIG_UART_USE_RUNTIME_CONFIGURE */
320
321 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
pl011_fifo_fill(const struct device * dev,const uint8_t * tx_data,int len)322 static int pl011_fifo_fill(const struct device *dev,
323 const uint8_t *tx_data, int len)
324 {
325 int num_tx = 0U;
326
327 while (!(get_uart(dev)->fr & PL011_FR_TXFF) && (len - num_tx > 0)) {
328 get_uart(dev)->dr = tx_data[num_tx++];
329 }
330 return num_tx;
331 }
332
pl011_fifo_read(const struct device * dev,uint8_t * rx_data,const int len)333 static int pl011_fifo_read(const struct device *dev,
334 uint8_t *rx_data, const int len)
335 {
336 int num_rx = 0U;
337
338 while ((len - num_rx > 0) && !(get_uart(dev)->fr & PL011_FR_RXFE)) {
339 rx_data[num_rx++] = get_uart(dev)->dr;
340 }
341
342 return num_rx;
343 }
344
pl011_irq_tx_enable(const struct device * dev)345 static void pl011_irq_tx_enable(const struct device *dev)
346 {
347 struct pl011_data *data = dev->data;
348
349 get_uart(dev)->imsc |= PL011_IMSC_TXIM;
350 if (!data->sw_call_txdrdy) {
351 return;
352 }
353 data->sw_call_txdrdy = false;
354
355 /*
356 * Verify if the callback has been registered. Due to HW limitation, the
357 * first TX interrupt should be triggered by the software.
358 *
359 * PL011 TX interrupt is based on a transition through a level, rather
360 * than on the level itself[1]. So that, enable TX interrupt can not
361 * trigger TX interrupt if no data was filled to TX FIFO at the
362 * beginning.
363 *
364 * [1]: PrimeCell UART (PL011) Technical Reference Manual
365 * functional-overview/interrupts
366 */
367 if (!data->irq_cb) {
368 return;
369 }
370
371 /*
372 * Execute callback while TX interrupt remains enabled. If
373 * uart_fifo_fill() is called with small amounts of data, the 1/8 TX
374 * FIFO threshold may never be reached, and the hardware TX interrupt
375 * will never trigger.
376 */
377 while (get_uart(dev)->imsc & PL011_IMSC_TXIM) {
378 K_SPINLOCK(&data->irq_cb_lock) {
379 data->irq_cb(dev, data->irq_cb_data);
380 }
381 }
382 }
383
pl011_irq_tx_disable(const struct device * dev)384 static void pl011_irq_tx_disable(const struct device *dev)
385 {
386 struct pl011_data *data = dev->data;
387
388 data->sw_call_txdrdy = true;
389 get_uart(dev)->imsc &= ~PL011_IMSC_TXIM;
390 }
391
pl011_irq_tx_complete(const struct device * dev)392 static int pl011_irq_tx_complete(const struct device *dev)
393 {
394 /* Check for UART is busy transmitting data. */
395 return ((get_uart(dev)->fr & PL011_FR_BUSY) == 0);
396 }
397
pl011_irq_tx_ready(const struct device * dev)398 static int pl011_irq_tx_ready(const struct device *dev)
399 {
400 struct pl011_data *data = dev->data;
401
402 if (!data->sbsa && !(get_uart(dev)->cr & PL011_CR_TXE)) {
403 return false;
404 }
405
406 return ((get_uart(dev)->imsc & PL011_IMSC_TXIM) &&
407 /* Check for TX interrupt status is set or TX FIFO is empty. */
408 (get_uart(dev)->ris & PL011_RIS_TXRIS || get_uart(dev)->fr & PL011_FR_TXFE));
409 }
410
pl011_irq_rx_enable(const struct device * dev)411 static void pl011_irq_rx_enable(const struct device *dev)
412 {
413 get_uart(dev)->imsc |= PL011_IMSC_RXIM | PL011_IMSC_RTIM;
414 }
415
pl011_irq_rx_disable(const struct device * dev)416 static void pl011_irq_rx_disable(const struct device *dev)
417 {
418 get_uart(dev)->imsc &= ~(PL011_IMSC_RXIM | PL011_IMSC_RTIM);
419 }
420
pl011_irq_rx_ready(const struct device * dev)421 static int pl011_irq_rx_ready(const struct device *dev)
422 {
423 struct pl011_data *data = dev->data;
424
425 if (!data->sbsa && !(get_uart(dev)->cr & PL011_CR_RXE)) {
426 return false;
427 }
428
429 return ((get_uart(dev)->imsc & PL011_IMSC_RXIM) &&
430 (!(get_uart(dev)->fr & PL011_FR_RXFE)));
431 }
432
pl011_irq_err_enable(const struct device * dev)433 static void pl011_irq_err_enable(const struct device *dev)
434 {
435 /* enable framing, parity, break, and overrun */
436 get_uart(dev)->imsc |= PL011_IMSC_ERROR_MASK;
437 }
438
pl011_irq_err_disable(const struct device * dev)439 static void pl011_irq_err_disable(const struct device *dev)
440 {
441 get_uart(dev)->imsc &= ~PL011_IMSC_ERROR_MASK;
442 }
443
pl011_irq_is_pending(const struct device * dev)444 static int pl011_irq_is_pending(const struct device *dev)
445 {
446 return pl011_irq_rx_ready(dev) || pl011_irq_tx_ready(dev);
447 }
448
pl011_irq_update(const struct device * dev)449 static int pl011_irq_update(const struct device *dev)
450 {
451 return 1;
452 }
453
pl011_irq_callback_set(const struct device * dev,uart_irq_callback_user_data_t cb,void * cb_data)454 static void pl011_irq_callback_set(const struct device *dev,
455 uart_irq_callback_user_data_t cb,
456 void *cb_data)
457 {
458 struct pl011_data *data = dev->data;
459
460 data->irq_cb = cb;
461 data->irq_cb_data = cb_data;
462 }
463 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
464
465 static DEVICE_API(uart, pl011_driver_api) = {
466 .poll_in = pl011_poll_in,
467 .poll_out = pl011_poll_out,
468 .err_check = pl011_err_check,
469 #ifdef CONFIG_UART_USE_RUNTIME_CONFIGURE
470 .configure = pl011_runtime_configure,
471 .config_get = pl011_runtime_config_get,
472 #endif
473 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
474 .fifo_fill = pl011_fifo_fill,
475 .fifo_read = pl011_fifo_read,
476 .irq_tx_enable = pl011_irq_tx_enable,
477 .irq_tx_disable = pl011_irq_tx_disable,
478 .irq_tx_ready = pl011_irq_tx_ready,
479 .irq_rx_enable = pl011_irq_rx_enable,
480 .irq_rx_disable = pl011_irq_rx_disable,
481 .irq_tx_complete = pl011_irq_tx_complete,
482 .irq_rx_ready = pl011_irq_rx_ready,
483 .irq_err_enable = pl011_irq_err_enable,
484 .irq_err_disable = pl011_irq_err_disable,
485 .irq_is_pending = pl011_irq_is_pending,
486 .irq_update = pl011_irq_update,
487 .irq_callback_set = pl011_irq_callback_set,
488 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
489 };
490
pl011_init(const struct device * dev)491 static int pl011_init(const struct device *dev)
492 {
493 const struct pl011_config *config = dev->config;
494 struct pl011_data *data = dev->data;
495 int ret;
496
497 DEVICE_MMIO_MAP(dev, K_MEM_CACHE_NONE);
498
499 #if defined(CONFIG_RESET)
500 if (config->reset.dev) {
501 ret = reset_line_toggle_dt(&config->reset);
502 if (ret) {
503 return ret;
504 }
505 }
506 #endif
507
508 #if defined(CONFIG_CLOCK_CONTROL)
509 if (config->clock_dev) {
510 clock_control_on(config->clock_dev, config->clock_id);
511 clock_control_get_rate(config->clock_dev, config->clock_id, &data->clk_freq);
512 }
513 #endif
514
515 /*
516 * If working in SBSA mode, we assume that UART is already configured,
517 * or does not require configuration at all (if UART is emulated by
518 * virtualization software).
519 */
520 if (!data->sbsa) {
521 #if defined(CONFIG_PINCTRL)
522 ret = pinctrl_apply_state(config->pincfg, PINCTRL_STATE_DEFAULT);
523 if (ret) {
524 return ret;
525 }
526 #endif
527 /* Call vendor-specific function to power on the peripheral */
528 if (config->pwr_on_func != NULL) {
529 ret = config->pwr_on_func();
530 }
531
532 /* disable the uart */
533 pl011_disable(dev);
534 pl011_disable_fifo(dev);
535
536 /* Call vendor-specific function to enable clock for the peripheral */
537 if (config->clk_enable_func != NULL) {
538 ret = config->clk_enable_func(dev, data->clk_freq);
539 if (ret) {
540 return ret;
541 }
542 }
543
544 pl011_runtime_configure_internal(dev, &data->uart_cfg, false);
545
546 /* Setting transmit and receive interrupt FIFO level */
547 get_uart(dev)->ifls = FIELD_PREP(PL011_IFLS_TXIFLSEL_M, TXIFLSEL_1_8_FULL)
548 | FIELD_PREP(PL011_IFLS_RXIFLSEL_M, RXIFLSEL_1_2_FULL);
549
550 /* Enabling the FIFOs */
551 if (!config->fifo_disable) {
552 pl011_enable_fifo(dev);
553 }
554 }
555 /* initialize all IRQs as masked */
556 get_uart(dev)->imsc = 0U;
557 get_uart(dev)->icr = PL011_IMSC_MASK_ALL;
558
559 if (!data->sbsa) {
560 get_uart(dev)->dmacr = 0U;
561 barrier_isync_fence_full();
562 get_uart(dev)->cr &= ~PL011_CR_SIREN;
563 get_uart(dev)->cr |= PL011_CR_RXE | PL011_CR_TXE;
564 barrier_isync_fence_full();
565 }
566 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
567 config->irq_config_func(dev);
568 data->sw_call_txdrdy = true;
569 #endif
570 if (!data->sbsa) {
571 pl011_enable(dev);
572 }
573
574 return 0;
575 }
576
577 #define COMPAT_SPECIFIC_FUNC_NAME(prefix, name) _CONCAT(_CONCAT(prefix, name), _)
578
579 /*
580 * The first element of compatible is used to determine the type.
581 * When compatible defines as "ambiq,uart", "arm,pl011",
582 * this macro expands to pwr_on_ambiq_uart_[n].
583 */
584 #define COMPAT_SPECIFIC_PWR_ON_FUNC(n) \
585 _CONCAT(COMPAT_SPECIFIC_FUNC_NAME(pwr_on_, DT_INST_STRING_TOKEN_BY_IDX(n, compatible, 0)), \
586 n)
587
588 /*
589 * The first element of compatible is used to determine the type.
590 * When compatible defines as "ambiq,uart", "arm,pl011",
591 * this macro expands to clk_enable_ambiq_uart_[n].
592 */
593 #define COMPAT_SPECIFIC_CLK_ENABLE_FUNC(n) \
594 _CONCAT(COMPAT_SPECIFIC_FUNC_NAME(clk_enable_, \
595 DT_INST_STRING_TOKEN_BY_IDX(n, compatible, 0)), n)
596
597 /*
598 * The first element of compatible is used to determine the type.
599 * When compatible defines as "ambiq,uart", "arm,pl011",
600 * this macro expands to AMBIQ_UART_DEFINE(n).
601 */
602 #define COMPAT_SPECIFIC_DEFINE(n) \
603 _CONCAT(DT_INST_STRING_UPPER_TOKEN_BY_IDX(n, compatible, 0), _DEFINE)(n)
604
605 #define COMPAT_SPECIFIC_CLOCK_CTLR_SUBSYS_CELL(n) \
606 _CONCAT(DT_INST_STRING_UPPER_TOKEN_BY_IDX(n, compatible, 0), _CLOCK_CTLR_SUBSYS_CELL)
607
608 #if defined(CONFIG_PINCTRL)
609 #define PINCTRL_DEFINE(n) PINCTRL_DT_INST_DEFINE(n);
610 #define PINCTRL_INIT(n) .pincfg = PINCTRL_DT_INST_DEV_CONFIG_GET(n),
611 #else
612 #define PINCTRL_DEFINE(n)
613 #define PINCTRL_INIT(n)
614 #endif /* CONFIG_PINCTRL */
615
616 #if defined(CONFIG_RESET)
617 #define RESET_INIT(n) \
618 IF_ENABLED(DT_INST_NODE_HAS_PROP(0, resets), (.reset = RESET_DT_SPEC_INST_GET(n),))
619 #else
620 #define RESET_INIT(n)
621 #endif
622
623 #define CLOCK_INIT(n) \
624 COND_CODE_1(DT_NODE_HAS_COMPAT(DT_INST_CLOCKS_CTLR(n), fixed_clock), (), \
625 (.clock_dev = DEVICE_DT_GET(DT_INST_CLOCKS_CTLR(n)), \
626 .clock_id = (clock_control_subsys_t)DT_INST_CLOCKS_CELL(n, \
627 COMPAT_SPECIFIC_CLOCK_CTLR_SUBSYS_CELL(n)),))
628
629 #define ARM_PL011_DEFINE(n) \
630 static inline int pwr_on_arm_pl011_##n(void) \
631 { \
632 return 0; \
633 } \
634 static inline int clk_enable_arm_pl011_##n(const struct device *dev, uint32_t clk) \
635 { \
636 return 0; \
637 }
638
639 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
pl011_isr(const struct device * dev)640 void pl011_isr(const struct device *dev)
641 {
642 struct pl011_data *data = dev->data;
643
644 /* Verify if the callback has been registered */
645 if (data->irq_cb) {
646 K_SPINLOCK(&data->irq_cb_lock) {
647 data->irq_cb(dev, data->irq_cb_data);
648 }
649 }
650 }
651 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
652
653 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
654 #define PL011_IRQ_CONFIG_FUNC_BODY(n, prop, i) \
655 { \
656 IRQ_CONNECT(DT_IRQ_BY_IDX(n, i, irq), \
657 DT_IRQ_BY_IDX(n, i, priority), \
658 pl011_isr, \
659 DEVICE_DT_GET(n), \
660 0); \
661 irq_enable(DT_IRQ_BY_IDX(n, i, irq)); \
662 }
663
664 #define PL011_CONFIG_PORT(n) \
665 static void pl011_irq_config_func_##n(const struct device *dev) \
666 { \
667 DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, \
668 PL011_IRQ_CONFIG_FUNC_BODY) \
669 }; \
670 \
671 static struct pl011_config pl011_cfg_port_##n = { \
672 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \
673 CLOCK_INIT(n) \
674 PINCTRL_INIT(n) \
675 .irq_config_func = pl011_irq_config_func_##n, \
676 .fifo_disable = DT_INST_PROP(n, fifo_disable), \
677 .clk_enable_func = COMPAT_SPECIFIC_CLK_ENABLE_FUNC(n), \
678 .pwr_on_func = COMPAT_SPECIFIC_PWR_ON_FUNC(n), \
679 };
680 #else
681 #define PL011_CONFIG_PORT(n) \
682 static struct pl011_config pl011_cfg_port_##n = { \
683 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \
684 CLOCK_INIT(n) \
685 PINCTRL_INIT(n) \
686 };
687 #endif /* CONFIG_UART_INTERRUPT_DRIVEN */
688
689 #define PL011_INIT(n) \
690 PINCTRL_DEFINE(n) \
691 COMPAT_SPECIFIC_DEFINE(n) \
692 PL011_CONFIG_PORT(n) \
693 \
694 static struct pl011_data pl011_data_port_##n = { \
695 .uart_cfg = \
696 { \
697 .baudrate = DT_INST_PROP(n, current_speed), \
698 .parity = UART_CFG_PARITY_NONE, \
699 .stop_bits = UART_CFG_STOP_BITS_1, \
700 .data_bits = UART_CFG_DATA_BITS_8, \
701 .flow_ctrl = DT_INST_PROP(n, hw_flow_control) \
702 ? UART_CFG_FLOW_CTRL_RTS_CTS \
703 : UART_CFG_FLOW_CTRL_NONE, \
704 }, \
705 .clk_freq = \
706 COND_CODE_1(DT_NODE_HAS_COMPAT(DT_INST_CLOCKS_CTLR(n), fixed_clock), \
707 (DT_INST_PROP_BY_PHANDLE(n, clocks, clock_frequency)), (0)), \
708 }; \
709 \
710 DEVICE_DT_INST_DEFINE(n, pl011_init, PM_INST_GET(n), &pl011_data_port_##n, \
711 &pl011_cfg_port_##n, PRE_KERNEL_1, CONFIG_SERIAL_INIT_PRIORITY, \
712 &pl011_driver_api);
713
714 DT_INST_FOREACH_STATUS_OKAY(PL011_INIT)
715
716 #ifdef CONFIG_UART_PL011_SBSA
717
718 #undef DT_DRV_COMPAT
719 #define DT_DRV_COMPAT SBSA_COMPAT
720
721 #ifdef CONFIG_UART_INTERRUPT_DRIVEN
722 #define PL011_SBSA_CONFIG_PORT(n) \
723 static void pl011_irq_config_func_sbsa_##n(const struct device *dev) \
724 { \
725 DT_INST_FOREACH_PROP_ELEM(n, interrupt_names, \
726 PL011_IRQ_CONFIG_FUNC_BODY) \
727 }; \
728 \
729 static struct pl011_config pl011_cfg_sbsa_##n = { \
730 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \
731 .irq_config_func = pl011_irq_config_func_sbsa_##n, \
732 };
733 #else
734 #define PL011_SBSA_CONFIG_PORT(n) \
735 static struct pl011_config pl011_cfg_sbsa_##n = { \
736 DEVICE_MMIO_ROM_INIT(DT_DRV_INST(n)), \
737 };
738 #endif
739
740 #define PL011_SBSA_INIT(n) \
741 PL011_SBSA_CONFIG_PORT(n) \
742 \
743 static struct pl011_data pl011_data_sbsa_##n = { \
744 .sbsa = true, \
745 }; \
746 \
747 DEVICE_DT_INST_DEFINE(n, pl011_init, \
748 NULL, \
749 &pl011_data_sbsa_##n, \
750 &pl011_cfg_sbsa_##n, \
751 PRE_KERNEL_1, \
752 CONFIG_SERIAL_INIT_PRIORITY, \
753 &pl011_driver_api);
754
755 DT_INST_FOREACH_STATUS_OKAY(PL011_SBSA_INIT)
756
757 #endif /* CONFIG_UART_PL011_SBSA */
758