1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * ***************************************************************************
4 * Marvell Armada-3700 Serial Driver
5 * Author: Wilson Ding <dingwei@marvell.com>
6 * Copyright (C) 2015 Marvell International Ltd.
7 * ***************************************************************************
8 */
9
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/console.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/init.h>
16 #include <linux/io.h>
17 #include <linux/iopoll.h>
18 #include <linux/math64.h>
19 #include <linux/of.h>
20 #include <linux/of_address.h>
21 #include <linux/of_device.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/serial.h>
26 #include <linux/serial_core.h>
27 #include <linux/slab.h>
28 #include <linux/tty.h>
29 #include <linux/tty_flip.h>
30
31 /* Register Map */
32 #define UART_STD_RBR 0x00
33 #define UART_EXT_RBR 0x18
34
35 #define UART_STD_TSH 0x04
36 #define UART_EXT_TSH 0x1C
37
38 #define UART_STD_CTRL1 0x08
39 #define UART_EXT_CTRL1 0x04
40 #define CTRL_SOFT_RST BIT(31)
41 #define CTRL_TXFIFO_RST BIT(15)
42 #define CTRL_RXFIFO_RST BIT(14)
43 #define CTRL_SND_BRK_SEQ BIT(11)
44 #define CTRL_BRK_DET_INT BIT(3)
45 #define CTRL_FRM_ERR_INT BIT(2)
46 #define CTRL_PAR_ERR_INT BIT(1)
47 #define CTRL_OVR_ERR_INT BIT(0)
48 #define CTRL_BRK_INT (CTRL_BRK_DET_INT | CTRL_FRM_ERR_INT | \
49 CTRL_PAR_ERR_INT | CTRL_OVR_ERR_INT)
50
51 #define UART_STD_CTRL2 UART_STD_CTRL1
52 #define UART_EXT_CTRL2 0x20
53 #define CTRL_STD_TX_RDY_INT BIT(5)
54 #define CTRL_EXT_TX_RDY_INT BIT(6)
55 #define CTRL_STD_RX_RDY_INT BIT(4)
56 #define CTRL_EXT_RX_RDY_INT BIT(5)
57
58 #define UART_STAT 0x0C
59 #define STAT_TX_FIFO_EMP BIT(13)
60 #define STAT_TX_FIFO_FUL BIT(11)
61 #define STAT_TX_EMP BIT(6)
62 #define STAT_STD_TX_RDY BIT(5)
63 #define STAT_EXT_TX_RDY BIT(15)
64 #define STAT_STD_RX_RDY BIT(4)
65 #define STAT_EXT_RX_RDY BIT(14)
66 #define STAT_BRK_DET BIT(3)
67 #define STAT_FRM_ERR BIT(2)
68 #define STAT_PAR_ERR BIT(1)
69 #define STAT_OVR_ERR BIT(0)
70 #define STAT_BRK_ERR (STAT_BRK_DET | STAT_FRM_ERR \
71 | STAT_PAR_ERR | STAT_OVR_ERR)
72
73 /*
74 * Marvell Armada 3700 Functional Specifications describes that bit 21 of UART
75 * Clock Control register controls UART1 and bit 20 controls UART2. But in
76 * reality bit 21 controls UART2 and bit 20 controls UART1. This seems to be an
77 * error in Marvell's documentation. Hence following CLK_DIS macros are swapped.
78 */
79
80 #define UART_BRDV 0x10
81 /* These bits are located in UART1 address space and control UART2 */
82 #define UART2_CLK_DIS BIT(21)
83 /* These bits are located in UART1 address space and control UART1 */
84 #define UART1_CLK_DIS BIT(20)
85 /* These bits are located in UART1 address space and control both UARTs */
86 #define CLK_NO_XTAL BIT(19)
87 #define CLK_TBG_DIV1_SHIFT 15
88 #define CLK_TBG_DIV1_MASK 0x7
89 #define CLK_TBG_DIV1_MAX 6
90 #define CLK_TBG_DIV2_SHIFT 12
91 #define CLK_TBG_DIV2_MASK 0x7
92 #define CLK_TBG_DIV2_MAX 6
93 #define CLK_TBG_SEL_SHIFT 10
94 #define CLK_TBG_SEL_MASK 0x3
95 /* These bits are located in both UARTs address space */
96 #define BRDV_BAUD_MASK 0x3FF
97 #define BRDV_BAUD_MAX BRDV_BAUD_MASK
98
99 #define UART_OSAMP 0x14
100 #define OSAMP_DEFAULT_DIVISOR 16
101 #define OSAMP_DIVISORS_MASK 0x3F3F3F3F
102 #define OSAMP_MAX_DIVISOR 63
103
104 #define MVEBU_NR_UARTS 2
105
106 #define MVEBU_UART_TYPE "mvebu-uart"
107 #define DRIVER_NAME "mvebu_serial"
108
109 enum {
110 /* Either there is only one summed IRQ... */
111 UART_IRQ_SUM = 0,
112 /* ...or there are two separate IRQ for RX and TX */
113 UART_RX_IRQ = 0,
114 UART_TX_IRQ,
115 UART_IRQ_COUNT
116 };
117
118 /* Diverging register offsets */
119 struct uart_regs_layout {
120 unsigned int rbr;
121 unsigned int tsh;
122 unsigned int ctrl;
123 unsigned int intr;
124 };
125
126 /* Diverging flags */
127 struct uart_flags {
128 unsigned int ctrl_tx_rdy_int;
129 unsigned int ctrl_rx_rdy_int;
130 unsigned int stat_tx_rdy;
131 unsigned int stat_rx_rdy;
132 };
133
134 /* Driver data, a structure for each UART port */
135 struct mvebu_uart_driver_data {
136 bool is_ext;
137 struct uart_regs_layout regs;
138 struct uart_flags flags;
139 };
140
141 /* Saved registers during suspend */
142 struct mvebu_uart_pm_regs {
143 unsigned int rbr;
144 unsigned int tsh;
145 unsigned int ctrl;
146 unsigned int intr;
147 unsigned int stat;
148 unsigned int brdv;
149 unsigned int osamp;
150 };
151
152 /* MVEBU UART driver structure */
153 struct mvebu_uart {
154 struct uart_port *port;
155 struct clk *clk;
156 int irq[UART_IRQ_COUNT];
157 struct mvebu_uart_driver_data *data;
158 #if defined(CONFIG_PM)
159 struct mvebu_uart_pm_regs pm_regs;
160 #endif /* CONFIG_PM */
161 };
162
to_mvuart(struct uart_port * port)163 static struct mvebu_uart *to_mvuart(struct uart_port *port)
164 {
165 return (struct mvebu_uart *)port->private_data;
166 }
167
168 #define IS_EXTENDED(port) (to_mvuart(port)->data->is_ext)
169
170 #define UART_RBR(port) (to_mvuart(port)->data->regs.rbr)
171 #define UART_TSH(port) (to_mvuart(port)->data->regs.tsh)
172 #define UART_CTRL(port) (to_mvuart(port)->data->regs.ctrl)
173 #define UART_INTR(port) (to_mvuart(port)->data->regs.intr)
174
175 #define CTRL_TX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_tx_rdy_int)
176 #define CTRL_RX_RDY_INT(port) (to_mvuart(port)->data->flags.ctrl_rx_rdy_int)
177 #define STAT_TX_RDY(port) (to_mvuart(port)->data->flags.stat_tx_rdy)
178 #define STAT_RX_RDY(port) (to_mvuart(port)->data->flags.stat_rx_rdy)
179
180 static struct uart_port mvebu_uart_ports[MVEBU_NR_UARTS];
181
182 static DEFINE_SPINLOCK(mvebu_uart_lock);
183
184 /* Core UART Driver Operations */
mvebu_uart_tx_empty(struct uart_port * port)185 static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
186 {
187 unsigned long flags;
188 unsigned int st;
189
190 spin_lock_irqsave(&port->lock, flags);
191 st = readl(port->membase + UART_STAT);
192 spin_unlock_irqrestore(&port->lock, flags);
193
194 return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
195 }
196
mvebu_uart_get_mctrl(struct uart_port * port)197 static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
198 {
199 return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
200 }
201
mvebu_uart_set_mctrl(struct uart_port * port,unsigned int mctrl)202 static void mvebu_uart_set_mctrl(struct uart_port *port,
203 unsigned int mctrl)
204 {
205 /*
206 * Even if we do not support configuring the modem control lines, this
207 * function must be proided to the serial core
208 */
209 }
210
mvebu_uart_stop_tx(struct uart_port * port)211 static void mvebu_uart_stop_tx(struct uart_port *port)
212 {
213 unsigned int ctl = readl(port->membase + UART_INTR(port));
214
215 ctl &= ~CTRL_TX_RDY_INT(port);
216 writel(ctl, port->membase + UART_INTR(port));
217 }
218
mvebu_uart_start_tx(struct uart_port * port)219 static void mvebu_uart_start_tx(struct uart_port *port)
220 {
221 unsigned int ctl;
222 struct circ_buf *xmit = &port->state->xmit;
223
224 if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) {
225 writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
226 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
227 port->icount.tx++;
228 }
229
230 ctl = readl(port->membase + UART_INTR(port));
231 ctl |= CTRL_TX_RDY_INT(port);
232 writel(ctl, port->membase + UART_INTR(port));
233 }
234
mvebu_uart_stop_rx(struct uart_port * port)235 static void mvebu_uart_stop_rx(struct uart_port *port)
236 {
237 unsigned int ctl;
238
239 ctl = readl(port->membase + UART_CTRL(port));
240 ctl &= ~CTRL_BRK_INT;
241 writel(ctl, port->membase + UART_CTRL(port));
242
243 ctl = readl(port->membase + UART_INTR(port));
244 ctl &= ~CTRL_RX_RDY_INT(port);
245 writel(ctl, port->membase + UART_INTR(port));
246 }
247
mvebu_uart_break_ctl(struct uart_port * port,int brk)248 static void mvebu_uart_break_ctl(struct uart_port *port, int brk)
249 {
250 unsigned int ctl;
251 unsigned long flags;
252
253 spin_lock_irqsave(&port->lock, flags);
254 ctl = readl(port->membase + UART_CTRL(port));
255 if (brk == -1)
256 ctl |= CTRL_SND_BRK_SEQ;
257 else
258 ctl &= ~CTRL_SND_BRK_SEQ;
259 writel(ctl, port->membase + UART_CTRL(port));
260 spin_unlock_irqrestore(&port->lock, flags);
261 }
262
mvebu_uart_rx_chars(struct uart_port * port,unsigned int status)263 static void mvebu_uart_rx_chars(struct uart_port *port, unsigned int status)
264 {
265 struct tty_port *tport = &port->state->port;
266 unsigned char ch = 0;
267 char flag = 0;
268 int ret;
269
270 do {
271 if (status & STAT_RX_RDY(port)) {
272 ch = readl(port->membase + UART_RBR(port));
273 ch &= 0xff;
274 flag = TTY_NORMAL;
275 port->icount.rx++;
276
277 if (status & STAT_PAR_ERR)
278 port->icount.parity++;
279 }
280
281 /*
282 * For UART2, error bits are not cleared on buffer read.
283 * This causes interrupt loop and system hang.
284 */
285 if (IS_EXTENDED(port) && (status & STAT_BRK_ERR)) {
286 ret = readl(port->membase + UART_STAT);
287 ret |= STAT_BRK_ERR;
288 writel(ret, port->membase + UART_STAT);
289 }
290
291 if (status & STAT_BRK_DET) {
292 port->icount.brk++;
293 status &= ~(STAT_FRM_ERR | STAT_PAR_ERR);
294 if (uart_handle_break(port))
295 goto ignore_char;
296 }
297
298 if (status & STAT_OVR_ERR)
299 port->icount.overrun++;
300
301 if (status & STAT_FRM_ERR)
302 port->icount.frame++;
303
304 if (uart_handle_sysrq_char(port, ch))
305 goto ignore_char;
306
307 if (status & port->ignore_status_mask & STAT_PAR_ERR)
308 status &= ~STAT_RX_RDY(port);
309
310 status &= port->read_status_mask;
311
312 if (status & STAT_PAR_ERR)
313 flag = TTY_PARITY;
314
315 status &= ~port->ignore_status_mask;
316
317 if (status & STAT_RX_RDY(port))
318 tty_insert_flip_char(tport, ch, flag);
319
320 if (status & STAT_BRK_DET)
321 tty_insert_flip_char(tport, 0, TTY_BREAK);
322
323 if (status & STAT_FRM_ERR)
324 tty_insert_flip_char(tport, 0, TTY_FRAME);
325
326 if (status & STAT_OVR_ERR)
327 tty_insert_flip_char(tport, 0, TTY_OVERRUN);
328
329 ignore_char:
330 status = readl(port->membase + UART_STAT);
331 } while (status & (STAT_RX_RDY(port) | STAT_BRK_DET));
332
333 tty_flip_buffer_push(tport);
334 }
335
mvebu_uart_tx_chars(struct uart_port * port,unsigned int status)336 static void mvebu_uart_tx_chars(struct uart_port *port, unsigned int status)
337 {
338 struct circ_buf *xmit = &port->state->xmit;
339 unsigned int count;
340 unsigned int st;
341
342 if (port->x_char) {
343 writel(port->x_char, port->membase + UART_TSH(port));
344 port->icount.tx++;
345 port->x_char = 0;
346 return;
347 }
348
349 if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
350 mvebu_uart_stop_tx(port);
351 return;
352 }
353
354 for (count = 0; count < port->fifosize; count++) {
355 writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port));
356 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
357 port->icount.tx++;
358
359 if (uart_circ_empty(xmit))
360 break;
361
362 st = readl(port->membase + UART_STAT);
363 if (st & STAT_TX_FIFO_FUL)
364 break;
365 }
366
367 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
368 uart_write_wakeup(port);
369
370 if (uart_circ_empty(xmit))
371 mvebu_uart_stop_tx(port);
372 }
373
mvebu_uart_isr(int irq,void * dev_id)374 static irqreturn_t mvebu_uart_isr(int irq, void *dev_id)
375 {
376 struct uart_port *port = (struct uart_port *)dev_id;
377 unsigned int st = readl(port->membase + UART_STAT);
378
379 if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
380 STAT_BRK_DET))
381 mvebu_uart_rx_chars(port, st);
382
383 if (st & STAT_TX_RDY(port))
384 mvebu_uart_tx_chars(port, st);
385
386 return IRQ_HANDLED;
387 }
388
mvebu_uart_rx_isr(int irq,void * dev_id)389 static irqreturn_t mvebu_uart_rx_isr(int irq, void *dev_id)
390 {
391 struct uart_port *port = (struct uart_port *)dev_id;
392 unsigned int st = readl(port->membase + UART_STAT);
393
394 if (st & (STAT_RX_RDY(port) | STAT_OVR_ERR | STAT_FRM_ERR |
395 STAT_BRK_DET))
396 mvebu_uart_rx_chars(port, st);
397
398 return IRQ_HANDLED;
399 }
400
mvebu_uart_tx_isr(int irq,void * dev_id)401 static irqreturn_t mvebu_uart_tx_isr(int irq, void *dev_id)
402 {
403 struct uart_port *port = (struct uart_port *)dev_id;
404 unsigned int st = readl(port->membase + UART_STAT);
405
406 if (st & STAT_TX_RDY(port))
407 mvebu_uart_tx_chars(port, st);
408
409 return IRQ_HANDLED;
410 }
411
mvebu_uart_startup(struct uart_port * port)412 static int mvebu_uart_startup(struct uart_port *port)
413 {
414 struct mvebu_uart *mvuart = to_mvuart(port);
415 unsigned int ctl;
416 int ret;
417
418 writel(CTRL_TXFIFO_RST | CTRL_RXFIFO_RST,
419 port->membase + UART_CTRL(port));
420 udelay(1);
421
422 /* Clear the error bits of state register before IRQ request */
423 ret = readl(port->membase + UART_STAT);
424 ret |= STAT_BRK_ERR;
425 writel(ret, port->membase + UART_STAT);
426
427 writel(CTRL_BRK_INT, port->membase + UART_CTRL(port));
428
429 ctl = readl(port->membase + UART_INTR(port));
430 ctl |= CTRL_RX_RDY_INT(port);
431 writel(ctl, port->membase + UART_INTR(port));
432
433 if (!mvuart->irq[UART_TX_IRQ]) {
434 /* Old bindings with just one interrupt (UART0 only) */
435 ret = devm_request_irq(port->dev, mvuart->irq[UART_IRQ_SUM],
436 mvebu_uart_isr, port->irqflags,
437 dev_name(port->dev), port);
438 if (ret) {
439 dev_err(port->dev, "unable to request IRQ %d\n",
440 mvuart->irq[UART_IRQ_SUM]);
441 return ret;
442 }
443 } else {
444 /* New bindings with an IRQ for RX and TX (both UART) */
445 ret = devm_request_irq(port->dev, mvuart->irq[UART_RX_IRQ],
446 mvebu_uart_rx_isr, port->irqflags,
447 dev_name(port->dev), port);
448 if (ret) {
449 dev_err(port->dev, "unable to request IRQ %d\n",
450 mvuart->irq[UART_RX_IRQ]);
451 return ret;
452 }
453
454 ret = devm_request_irq(port->dev, mvuart->irq[UART_TX_IRQ],
455 mvebu_uart_tx_isr, port->irqflags,
456 dev_name(port->dev),
457 port);
458 if (ret) {
459 dev_err(port->dev, "unable to request IRQ %d\n",
460 mvuart->irq[UART_TX_IRQ]);
461 devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ],
462 port);
463 return ret;
464 }
465 }
466
467 return 0;
468 }
469
mvebu_uart_shutdown(struct uart_port * port)470 static void mvebu_uart_shutdown(struct uart_port *port)
471 {
472 struct mvebu_uart *mvuart = to_mvuart(port);
473
474 writel(0, port->membase + UART_INTR(port));
475
476 if (!mvuart->irq[UART_TX_IRQ]) {
477 devm_free_irq(port->dev, mvuart->irq[UART_IRQ_SUM], port);
478 } else {
479 devm_free_irq(port->dev, mvuart->irq[UART_RX_IRQ], port);
480 devm_free_irq(port->dev, mvuart->irq[UART_TX_IRQ], port);
481 }
482 }
483
mvebu_uart_baud_rate_set(struct uart_port * port,unsigned int baud)484 static unsigned int mvebu_uart_baud_rate_set(struct uart_port *port, unsigned int baud)
485 {
486 unsigned int d_divisor, m_divisor;
487 unsigned long flags;
488 u32 brdv, osamp;
489
490 if (!port->uartclk)
491 return 0;
492
493 /*
494 * The baudrate is derived from the UART clock thanks to divisors:
495 * > d1 * d2 ("TBG divisors"): can divide only TBG clock from 1 to 6
496 * > D ("baud generator"): can divide the clock from 1 to 1023
497 * > M ("fractional divisor"): allows a better accuracy (from 1 to 63)
498 *
499 * Exact formulas for calculating baudrate:
500 *
501 * with default x16 scheme:
502 * baudrate = xtal / (d * 16)
503 * baudrate = tbg / (d1 * d2 * d * 16)
504 *
505 * with fractional divisor:
506 * baudrate = 10 * xtal / (d * (3 * (m1 + m2) + 2 * (m3 + m4)))
507 * baudrate = 10 * tbg / (d1*d2 * d * (3 * (m1 + m2) + 2 * (m3 + m4)))
508 *
509 * Oversampling value:
510 * osamp = (m1 << 0) | (m2 << 8) | (m3 << 16) | (m4 << 24);
511 *
512 * Where m1 controls number of clock cycles per bit for bits 1,2,3;
513 * m2 for bits 4,5,6; m3 for bits 7,8 and m4 for bits 9,10.
514 *
515 * To simplify baudrate setup set all the M prescalers to the same
516 * value. For baudrates 9600 Bd and higher, it is enough to use the
517 * default (x16) divisor or fractional divisor with M = 63, so there
518 * is no need to use real fractional support (where the M prescalers
519 * are not equal).
520 *
521 * When all the M prescalers are zeroed then default (x16) divisor is
522 * used. Default x16 scheme is more stable than M (fractional divisor),
523 * so use M only when D divisor is not enough to derive baudrate.
524 *
525 * Member port->uartclk is either xtal clock rate or TBG clock rate
526 * divided by (d1 * d2). So d1 and d2 are already set by the UART clock
527 * driver (and UART driver itself cannot change them). Moreover they are
528 * shared between both UARTs.
529 */
530
531 m_divisor = OSAMP_DEFAULT_DIVISOR;
532 d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
533
534 if (d_divisor > BRDV_BAUD_MAX) {
535 /*
536 * Experiments show that small M divisors are unstable.
537 * Use maximal possible M = 63 and calculate D divisor.
538 */
539 m_divisor = OSAMP_MAX_DIVISOR;
540 d_divisor = DIV_ROUND_CLOSEST(port->uartclk, baud * m_divisor);
541 }
542
543 if (d_divisor < 1)
544 d_divisor = 1;
545 else if (d_divisor > BRDV_BAUD_MAX)
546 d_divisor = BRDV_BAUD_MAX;
547
548 spin_lock_irqsave(&mvebu_uart_lock, flags);
549 brdv = readl(port->membase + UART_BRDV);
550 brdv &= ~BRDV_BAUD_MASK;
551 brdv |= d_divisor;
552 writel(brdv, port->membase + UART_BRDV);
553 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
554
555 osamp = readl(port->membase + UART_OSAMP);
556 osamp &= ~OSAMP_DIVISORS_MASK;
557 if (m_divisor != OSAMP_DEFAULT_DIVISOR)
558 osamp |= (m_divisor << 0) | (m_divisor << 8) |
559 (m_divisor << 16) | (m_divisor << 24);
560 writel(osamp, port->membase + UART_OSAMP);
561
562 return DIV_ROUND_CLOSEST(port->uartclk, d_divisor * m_divisor);
563 }
564
mvebu_uart_set_termios(struct uart_port * port,struct ktermios * termios,const struct ktermios * old)565 static void mvebu_uart_set_termios(struct uart_port *port,
566 struct ktermios *termios,
567 const struct ktermios *old)
568 {
569 unsigned long flags;
570 unsigned int baud, min_baud, max_baud;
571
572 spin_lock_irqsave(&port->lock, flags);
573
574 port->read_status_mask = STAT_RX_RDY(port) | STAT_OVR_ERR |
575 STAT_TX_RDY(port) | STAT_TX_FIFO_FUL;
576
577 if (termios->c_iflag & INPCK)
578 port->read_status_mask |= STAT_FRM_ERR | STAT_PAR_ERR;
579
580 port->ignore_status_mask = 0;
581 if (termios->c_iflag & IGNPAR)
582 port->ignore_status_mask |=
583 STAT_FRM_ERR | STAT_PAR_ERR | STAT_OVR_ERR;
584
585 if ((termios->c_cflag & CREAD) == 0)
586 port->ignore_status_mask |= STAT_RX_RDY(port) | STAT_BRK_ERR;
587
588 /*
589 * Maximal divisor is 1023 and maximal fractional divisor is 63. And
590 * experiments show that baudrates above 1/80 of parent clock rate are
591 * not stable. So disallow baudrates above 1/80 of the parent clock
592 * rate. If port->uartclk is not available, then
593 * mvebu_uart_baud_rate_set() fails, so values min_baud and max_baud
594 * in this case do not matter.
595 */
596 min_baud = DIV_ROUND_UP(port->uartclk, BRDV_BAUD_MAX *
597 OSAMP_MAX_DIVISOR);
598 max_baud = port->uartclk / 80;
599
600 baud = uart_get_baud_rate(port, termios, old, min_baud, max_baud);
601 baud = mvebu_uart_baud_rate_set(port, baud);
602
603 /* In case baudrate cannot be changed, report previous old value */
604 if (baud == 0 && old)
605 baud = tty_termios_baud_rate(old);
606
607 /* Only the following flag changes are supported */
608 if (old) {
609 termios->c_iflag &= INPCK | IGNPAR;
610 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
611 termios->c_cflag &= CREAD | CBAUD;
612 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
613 termios->c_cflag |= CS8;
614 }
615
616 if (baud != 0) {
617 tty_termios_encode_baud_rate(termios, baud, baud);
618 uart_update_timeout(port, termios->c_cflag, baud);
619 }
620
621 spin_unlock_irqrestore(&port->lock, flags);
622 }
623
mvebu_uart_type(struct uart_port * port)624 static const char *mvebu_uart_type(struct uart_port *port)
625 {
626 return MVEBU_UART_TYPE;
627 }
628
mvebu_uart_release_port(struct uart_port * port)629 static void mvebu_uart_release_port(struct uart_port *port)
630 {
631 /* Nothing to do here */
632 }
633
mvebu_uart_request_port(struct uart_port * port)634 static int mvebu_uart_request_port(struct uart_port *port)
635 {
636 return 0;
637 }
638
639 #ifdef CONFIG_CONSOLE_POLL
mvebu_uart_get_poll_char(struct uart_port * port)640 static int mvebu_uart_get_poll_char(struct uart_port *port)
641 {
642 unsigned int st = readl(port->membase + UART_STAT);
643
644 if (!(st & STAT_RX_RDY(port)))
645 return NO_POLL_CHAR;
646
647 return readl(port->membase + UART_RBR(port));
648 }
649
mvebu_uart_put_poll_char(struct uart_port * port,unsigned char c)650 static void mvebu_uart_put_poll_char(struct uart_port *port, unsigned char c)
651 {
652 unsigned int st;
653
654 for (;;) {
655 st = readl(port->membase + UART_STAT);
656
657 if (!(st & STAT_TX_FIFO_FUL))
658 break;
659
660 udelay(1);
661 }
662
663 writel(c, port->membase + UART_TSH(port));
664 }
665 #endif
666
667 static const struct uart_ops mvebu_uart_ops = {
668 .tx_empty = mvebu_uart_tx_empty,
669 .set_mctrl = mvebu_uart_set_mctrl,
670 .get_mctrl = mvebu_uart_get_mctrl,
671 .stop_tx = mvebu_uart_stop_tx,
672 .start_tx = mvebu_uart_start_tx,
673 .stop_rx = mvebu_uart_stop_rx,
674 .break_ctl = mvebu_uart_break_ctl,
675 .startup = mvebu_uart_startup,
676 .shutdown = mvebu_uart_shutdown,
677 .set_termios = mvebu_uart_set_termios,
678 .type = mvebu_uart_type,
679 .release_port = mvebu_uart_release_port,
680 .request_port = mvebu_uart_request_port,
681 #ifdef CONFIG_CONSOLE_POLL
682 .poll_get_char = mvebu_uart_get_poll_char,
683 .poll_put_char = mvebu_uart_put_poll_char,
684 #endif
685 };
686
687 /* Console Driver Operations */
688
689 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
690 /* Early Console */
mvebu_uart_putc(struct uart_port * port,unsigned char c)691 static void mvebu_uart_putc(struct uart_port *port, unsigned char c)
692 {
693 unsigned int st;
694
695 for (;;) {
696 st = readl(port->membase + UART_STAT);
697 if (!(st & STAT_TX_FIFO_FUL))
698 break;
699 }
700
701 /* At early stage, DT is not parsed yet, only use UART0 */
702 writel(c, port->membase + UART_STD_TSH);
703
704 for (;;) {
705 st = readl(port->membase + UART_STAT);
706 if (st & STAT_TX_FIFO_EMP)
707 break;
708 }
709 }
710
mvebu_uart_putc_early_write(struct console * con,const char * s,unsigned int n)711 static void mvebu_uart_putc_early_write(struct console *con,
712 const char *s,
713 unsigned int n)
714 {
715 struct earlycon_device *dev = con->data;
716
717 uart_console_write(&dev->port, s, n, mvebu_uart_putc);
718 }
719
720 static int __init
mvebu_uart_early_console_setup(struct earlycon_device * device,const char * opt)721 mvebu_uart_early_console_setup(struct earlycon_device *device,
722 const char *opt)
723 {
724 if (!device->port.membase)
725 return -ENODEV;
726
727 device->con->write = mvebu_uart_putc_early_write;
728
729 return 0;
730 }
731
732 EARLYCON_DECLARE(ar3700_uart, mvebu_uart_early_console_setup);
733 OF_EARLYCON_DECLARE(ar3700_uart, "marvell,armada-3700-uart",
734 mvebu_uart_early_console_setup);
735
wait_for_xmitr(struct uart_port * port)736 static void wait_for_xmitr(struct uart_port *port)
737 {
738 u32 val;
739
740 readl_poll_timeout_atomic(port->membase + UART_STAT, val,
741 (val & STAT_TX_RDY(port)), 1, 10000);
742 }
743
wait_for_xmite(struct uart_port * port)744 static void wait_for_xmite(struct uart_port *port)
745 {
746 u32 val;
747
748 readl_poll_timeout_atomic(port->membase + UART_STAT, val,
749 (val & STAT_TX_EMP), 1, 10000);
750 }
751
mvebu_uart_console_putchar(struct uart_port * port,unsigned char ch)752 static void mvebu_uart_console_putchar(struct uart_port *port, unsigned char ch)
753 {
754 wait_for_xmitr(port);
755 writel(ch, port->membase + UART_TSH(port));
756 }
757
mvebu_uart_console_write(struct console * co,const char * s,unsigned int count)758 static void mvebu_uart_console_write(struct console *co, const char *s,
759 unsigned int count)
760 {
761 struct uart_port *port = &mvebu_uart_ports[co->index];
762 unsigned long flags;
763 unsigned int ier, intr, ctl;
764 int locked = 1;
765
766 if (oops_in_progress)
767 locked = spin_trylock_irqsave(&port->lock, flags);
768 else
769 spin_lock_irqsave(&port->lock, flags);
770
771 ier = readl(port->membase + UART_CTRL(port)) & CTRL_BRK_INT;
772 intr = readl(port->membase + UART_INTR(port)) &
773 (CTRL_RX_RDY_INT(port) | CTRL_TX_RDY_INT(port));
774 writel(0, port->membase + UART_CTRL(port));
775 writel(0, port->membase + UART_INTR(port));
776
777 uart_console_write(port, s, count, mvebu_uart_console_putchar);
778
779 wait_for_xmite(port);
780
781 if (ier)
782 writel(ier, port->membase + UART_CTRL(port));
783
784 if (intr) {
785 ctl = intr | readl(port->membase + UART_INTR(port));
786 writel(ctl, port->membase + UART_INTR(port));
787 }
788
789 if (locked)
790 spin_unlock_irqrestore(&port->lock, flags);
791 }
792
mvebu_uart_console_setup(struct console * co,char * options)793 static int mvebu_uart_console_setup(struct console *co, char *options)
794 {
795 struct uart_port *port;
796 int baud = 9600;
797 int bits = 8;
798 int parity = 'n';
799 int flow = 'n';
800
801 if (co->index < 0 || co->index >= MVEBU_NR_UARTS)
802 return -EINVAL;
803
804 port = &mvebu_uart_ports[co->index];
805
806 if (!port->mapbase || !port->membase) {
807 pr_debug("console on ttyMV%i not present\n", co->index);
808 return -ENODEV;
809 }
810
811 if (options)
812 uart_parse_options(options, &baud, &parity, &bits, &flow);
813
814 return uart_set_options(port, co, baud, parity, bits, flow);
815 }
816
817 static struct uart_driver mvebu_uart_driver;
818
819 static struct console mvebu_uart_console = {
820 .name = "ttyMV",
821 .write = mvebu_uart_console_write,
822 .device = uart_console_device,
823 .setup = mvebu_uart_console_setup,
824 .flags = CON_PRINTBUFFER,
825 .index = -1,
826 .data = &mvebu_uart_driver,
827 };
828
mvebu_uart_console_init(void)829 static int __init mvebu_uart_console_init(void)
830 {
831 register_console(&mvebu_uart_console);
832 return 0;
833 }
834
835 console_initcall(mvebu_uart_console_init);
836
837
838 #endif /* CONFIG_SERIAL_MVEBU_CONSOLE */
839
840 static struct uart_driver mvebu_uart_driver = {
841 .owner = THIS_MODULE,
842 .driver_name = DRIVER_NAME,
843 .dev_name = "ttyMV",
844 .nr = MVEBU_NR_UARTS,
845 #ifdef CONFIG_SERIAL_MVEBU_CONSOLE
846 .cons = &mvebu_uart_console,
847 #endif
848 };
849
850 #if defined(CONFIG_PM)
mvebu_uart_suspend(struct device * dev)851 static int mvebu_uart_suspend(struct device *dev)
852 {
853 struct mvebu_uart *mvuart = dev_get_drvdata(dev);
854 struct uart_port *port = mvuart->port;
855 unsigned long flags;
856
857 uart_suspend_port(&mvebu_uart_driver, port);
858
859 mvuart->pm_regs.rbr = readl(port->membase + UART_RBR(port));
860 mvuart->pm_regs.tsh = readl(port->membase + UART_TSH(port));
861 mvuart->pm_regs.ctrl = readl(port->membase + UART_CTRL(port));
862 mvuart->pm_regs.intr = readl(port->membase + UART_INTR(port));
863 mvuart->pm_regs.stat = readl(port->membase + UART_STAT);
864 spin_lock_irqsave(&mvebu_uart_lock, flags);
865 mvuart->pm_regs.brdv = readl(port->membase + UART_BRDV);
866 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
867 mvuart->pm_regs.osamp = readl(port->membase + UART_OSAMP);
868
869 device_set_wakeup_enable(dev, true);
870
871 return 0;
872 }
873
mvebu_uart_resume(struct device * dev)874 static int mvebu_uart_resume(struct device *dev)
875 {
876 struct mvebu_uart *mvuart = dev_get_drvdata(dev);
877 struct uart_port *port = mvuart->port;
878 unsigned long flags;
879
880 writel(mvuart->pm_regs.rbr, port->membase + UART_RBR(port));
881 writel(mvuart->pm_regs.tsh, port->membase + UART_TSH(port));
882 writel(mvuart->pm_regs.ctrl, port->membase + UART_CTRL(port));
883 writel(mvuart->pm_regs.intr, port->membase + UART_INTR(port));
884 writel(mvuart->pm_regs.stat, port->membase + UART_STAT);
885 spin_lock_irqsave(&mvebu_uart_lock, flags);
886 writel(mvuart->pm_regs.brdv, port->membase + UART_BRDV);
887 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
888 writel(mvuart->pm_regs.osamp, port->membase + UART_OSAMP);
889
890 uart_resume_port(&mvebu_uart_driver, port);
891
892 return 0;
893 }
894
895 static const struct dev_pm_ops mvebu_uart_pm_ops = {
896 .suspend = mvebu_uart_suspend,
897 .resume = mvebu_uart_resume,
898 };
899 #endif /* CONFIG_PM */
900
901 static const struct of_device_id mvebu_uart_of_match[];
902
903 /* Counter to keep track of each UART port id when not using CONFIG_OF */
904 static int uart_num_counter;
905
mvebu_uart_probe(struct platform_device * pdev)906 static int mvebu_uart_probe(struct platform_device *pdev)
907 {
908 struct resource *reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
909 const struct of_device_id *match = of_match_device(mvebu_uart_of_match,
910 &pdev->dev);
911 struct uart_port *port;
912 struct mvebu_uart *mvuart;
913 int id, irq;
914
915 if (!reg) {
916 dev_err(&pdev->dev, "no registers defined\n");
917 return -EINVAL;
918 }
919
920 /* Assume that all UART ports have a DT alias or none has */
921 id = of_alias_get_id(pdev->dev.of_node, "serial");
922 if (!pdev->dev.of_node || id < 0)
923 pdev->id = uart_num_counter++;
924 else
925 pdev->id = id;
926
927 if (pdev->id >= MVEBU_NR_UARTS) {
928 dev_err(&pdev->dev, "cannot have more than %d UART ports\n",
929 MVEBU_NR_UARTS);
930 return -EINVAL;
931 }
932
933 port = &mvebu_uart_ports[pdev->id];
934
935 spin_lock_init(&port->lock);
936
937 port->dev = &pdev->dev;
938 port->type = PORT_MVEBU;
939 port->ops = &mvebu_uart_ops;
940 port->regshift = 0;
941
942 port->fifosize = 32;
943 port->iotype = UPIO_MEM32;
944 port->flags = UPF_FIXED_PORT;
945 port->line = pdev->id;
946
947 /*
948 * IRQ number is not stored in this structure because we may have two of
949 * them per port (RX and TX). Instead, use the driver UART structure
950 * array so called ->irq[].
951 */
952 port->irq = 0;
953 port->irqflags = 0;
954 port->mapbase = reg->start;
955
956 port->membase = devm_ioremap_resource(&pdev->dev, reg);
957 if (IS_ERR(port->membase))
958 return PTR_ERR(port->membase);
959
960 mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
961 GFP_KERNEL);
962 if (!mvuart)
963 return -ENOMEM;
964
965 /* Get controller data depending on the compatible string */
966 mvuart->data = (struct mvebu_uart_driver_data *)match->data;
967 mvuart->port = port;
968
969 port->private_data = mvuart;
970 platform_set_drvdata(pdev, mvuart);
971
972 /* Get fixed clock frequency */
973 mvuart->clk = devm_clk_get(&pdev->dev, NULL);
974 if (IS_ERR(mvuart->clk)) {
975 if (PTR_ERR(mvuart->clk) == -EPROBE_DEFER)
976 return PTR_ERR(mvuart->clk);
977
978 if (IS_EXTENDED(port)) {
979 dev_err(&pdev->dev, "unable to get UART clock\n");
980 return PTR_ERR(mvuart->clk);
981 }
982 } else {
983 if (!clk_prepare_enable(mvuart->clk))
984 port->uartclk = clk_get_rate(mvuart->clk);
985 }
986
987 /* Manage interrupts */
988 if (platform_irq_count(pdev) == 1) {
989 /* Old bindings: no name on the single unamed UART0 IRQ */
990 irq = platform_get_irq(pdev, 0);
991 if (irq < 0)
992 return irq;
993
994 mvuart->irq[UART_IRQ_SUM] = irq;
995 } else {
996 /*
997 * New bindings: named interrupts (RX, TX) for both UARTS,
998 * only make use of uart-rx and uart-tx interrupts, do not use
999 * uart-sum of UART0 port.
1000 */
1001 irq = platform_get_irq_byname(pdev, "uart-rx");
1002 if (irq < 0)
1003 return irq;
1004
1005 mvuart->irq[UART_RX_IRQ] = irq;
1006
1007 irq = platform_get_irq_byname(pdev, "uart-tx");
1008 if (irq < 0)
1009 return irq;
1010
1011 mvuart->irq[UART_TX_IRQ] = irq;
1012 }
1013
1014 /* UART Soft Reset*/
1015 writel(CTRL_SOFT_RST, port->membase + UART_CTRL(port));
1016 udelay(1);
1017 writel(0, port->membase + UART_CTRL(port));
1018
1019 return uart_add_one_port(&mvebu_uart_driver, port);
1020 }
1021
1022 static struct mvebu_uart_driver_data uart_std_driver_data = {
1023 .is_ext = false,
1024 .regs.rbr = UART_STD_RBR,
1025 .regs.tsh = UART_STD_TSH,
1026 .regs.ctrl = UART_STD_CTRL1,
1027 .regs.intr = UART_STD_CTRL2,
1028 .flags.ctrl_tx_rdy_int = CTRL_STD_TX_RDY_INT,
1029 .flags.ctrl_rx_rdy_int = CTRL_STD_RX_RDY_INT,
1030 .flags.stat_tx_rdy = STAT_STD_TX_RDY,
1031 .flags.stat_rx_rdy = STAT_STD_RX_RDY,
1032 };
1033
1034 static struct mvebu_uart_driver_data uart_ext_driver_data = {
1035 .is_ext = true,
1036 .regs.rbr = UART_EXT_RBR,
1037 .regs.tsh = UART_EXT_TSH,
1038 .regs.ctrl = UART_EXT_CTRL1,
1039 .regs.intr = UART_EXT_CTRL2,
1040 .flags.ctrl_tx_rdy_int = CTRL_EXT_TX_RDY_INT,
1041 .flags.ctrl_rx_rdy_int = CTRL_EXT_RX_RDY_INT,
1042 .flags.stat_tx_rdy = STAT_EXT_TX_RDY,
1043 .flags.stat_rx_rdy = STAT_EXT_RX_RDY,
1044 };
1045
1046 /* Match table for of_platform binding */
1047 static const struct of_device_id mvebu_uart_of_match[] = {
1048 {
1049 .compatible = "marvell,armada-3700-uart",
1050 .data = (void *)&uart_std_driver_data,
1051 },
1052 {
1053 .compatible = "marvell,armada-3700-uart-ext",
1054 .data = (void *)&uart_ext_driver_data,
1055 },
1056 {}
1057 };
1058
1059 static struct platform_driver mvebu_uart_platform_driver = {
1060 .probe = mvebu_uart_probe,
1061 .driver = {
1062 .name = "mvebu-uart",
1063 .of_match_table = of_match_ptr(mvebu_uart_of_match),
1064 .suppress_bind_attrs = true,
1065 #if defined(CONFIG_PM)
1066 .pm = &mvebu_uart_pm_ops,
1067 #endif /* CONFIG_PM */
1068 },
1069 };
1070
1071 /* This code is based on clk-fixed-factor.c driver and modified. */
1072
1073 struct mvebu_uart_clock {
1074 struct clk_hw clk_hw;
1075 int clock_idx;
1076 u32 pm_context_reg1;
1077 u32 pm_context_reg2;
1078 };
1079
1080 struct mvebu_uart_clock_base {
1081 struct mvebu_uart_clock clocks[2];
1082 unsigned int parent_rates[5];
1083 int parent_idx;
1084 unsigned int div;
1085 void __iomem *reg1;
1086 void __iomem *reg2;
1087 bool configured;
1088 };
1089
1090 #define PARENT_CLOCK_XTAL 4
1091
1092 #define to_uart_clock(hw) container_of(hw, struct mvebu_uart_clock, clk_hw)
1093 #define to_uart_clock_base(uart_clock) container_of(uart_clock, \
1094 struct mvebu_uart_clock_base, clocks[uart_clock->clock_idx])
1095
mvebu_uart_clock_prepare(struct clk_hw * hw)1096 static int mvebu_uart_clock_prepare(struct clk_hw *hw)
1097 {
1098 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1099 struct mvebu_uart_clock_base *uart_clock_base =
1100 to_uart_clock_base(uart_clock);
1101 unsigned int prev_clock_idx, prev_clock_rate, prev_d1d2;
1102 unsigned int parent_clock_idx, parent_clock_rate;
1103 unsigned long flags;
1104 unsigned int d1, d2;
1105 u64 divisor;
1106 u32 val;
1107
1108 /*
1109 * This function just reconfigures UART Clock Control register (located
1110 * in UART1 address space which controls both UART1 and UART2) to
1111 * selected UART base clock and recalculates current UART1/UART2
1112 * divisors in their address spaces, so that final baudrate will not be
1113 * changed by switching UART parent clock. This is required for
1114 * otherwise kernel's boot log stops working - we need to ensure that
1115 * UART baudrate does not change during this setup. It is a one time
1116 * operation, it will execute only once and set `configured` to true,
1117 * and be skipped on subsequent calls. Because this UART Clock Control
1118 * register (UART_BRDV) is shared between UART1 baudrate function,
1119 * UART1 clock selector and UART2 clock selector, every access to
1120 * UART_BRDV (reg1) needs to be protected by a lock.
1121 */
1122
1123 spin_lock_irqsave(&mvebu_uart_lock, flags);
1124
1125 if (uart_clock_base->configured) {
1126 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1127 return 0;
1128 }
1129
1130 parent_clock_idx = uart_clock_base->parent_idx;
1131 parent_clock_rate = uart_clock_base->parent_rates[parent_clock_idx];
1132
1133 val = readl(uart_clock_base->reg1);
1134
1135 if (uart_clock_base->div > CLK_TBG_DIV1_MAX) {
1136 d1 = CLK_TBG_DIV1_MAX;
1137 d2 = uart_clock_base->div / CLK_TBG_DIV1_MAX;
1138 } else {
1139 d1 = uart_clock_base->div;
1140 d2 = 1;
1141 }
1142
1143 if (val & CLK_NO_XTAL) {
1144 prev_clock_idx = (val >> CLK_TBG_SEL_SHIFT) & CLK_TBG_SEL_MASK;
1145 prev_d1d2 = ((val >> CLK_TBG_DIV1_SHIFT) & CLK_TBG_DIV1_MASK) *
1146 ((val >> CLK_TBG_DIV2_SHIFT) & CLK_TBG_DIV2_MASK);
1147 } else {
1148 prev_clock_idx = PARENT_CLOCK_XTAL;
1149 prev_d1d2 = 1;
1150 }
1151
1152 /* Note that uart_clock_base->parent_rates[i] may not be available */
1153 prev_clock_rate = uart_clock_base->parent_rates[prev_clock_idx];
1154
1155 /* Recalculate UART1 divisor so UART1 baudrate does not change */
1156 if (prev_clock_rate) {
1157 divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
1158 parent_clock_rate * prev_d1d2,
1159 prev_clock_rate * d1 * d2);
1160 if (divisor < 1)
1161 divisor = 1;
1162 else if (divisor > BRDV_BAUD_MAX)
1163 divisor = BRDV_BAUD_MAX;
1164 val = (val & ~BRDV_BAUD_MASK) | divisor;
1165 }
1166
1167 if (parent_clock_idx != PARENT_CLOCK_XTAL) {
1168 /* Do not use XTAL, select TBG clock and TBG d1 * d2 divisors */
1169 val |= CLK_NO_XTAL;
1170 val &= ~(CLK_TBG_DIV1_MASK << CLK_TBG_DIV1_SHIFT);
1171 val |= d1 << CLK_TBG_DIV1_SHIFT;
1172 val &= ~(CLK_TBG_DIV2_MASK << CLK_TBG_DIV2_SHIFT);
1173 val |= d2 << CLK_TBG_DIV2_SHIFT;
1174 val &= ~(CLK_TBG_SEL_MASK << CLK_TBG_SEL_SHIFT);
1175 val |= parent_clock_idx << CLK_TBG_SEL_SHIFT;
1176 } else {
1177 /* Use XTAL, TBG bits are then ignored */
1178 val &= ~CLK_NO_XTAL;
1179 }
1180
1181 writel(val, uart_clock_base->reg1);
1182
1183 /* Recalculate UART2 divisor so UART2 baudrate does not change */
1184 if (prev_clock_rate) {
1185 val = readl(uart_clock_base->reg2);
1186 divisor = DIV_U64_ROUND_CLOSEST((u64)(val & BRDV_BAUD_MASK) *
1187 parent_clock_rate * prev_d1d2,
1188 prev_clock_rate * d1 * d2);
1189 if (divisor < 1)
1190 divisor = 1;
1191 else if (divisor > BRDV_BAUD_MAX)
1192 divisor = BRDV_BAUD_MAX;
1193 val = (val & ~BRDV_BAUD_MASK) | divisor;
1194 writel(val, uart_clock_base->reg2);
1195 }
1196
1197 uart_clock_base->configured = true;
1198
1199 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1200
1201 return 0;
1202 }
1203
mvebu_uart_clock_enable(struct clk_hw * hw)1204 static int mvebu_uart_clock_enable(struct clk_hw *hw)
1205 {
1206 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1207 struct mvebu_uart_clock_base *uart_clock_base =
1208 to_uart_clock_base(uart_clock);
1209 unsigned long flags;
1210 u32 val;
1211
1212 spin_lock_irqsave(&mvebu_uart_lock, flags);
1213
1214 val = readl(uart_clock_base->reg1);
1215
1216 if (uart_clock->clock_idx == 0)
1217 val &= ~UART1_CLK_DIS;
1218 else
1219 val &= ~UART2_CLK_DIS;
1220
1221 writel(val, uart_clock_base->reg1);
1222
1223 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1224
1225 return 0;
1226 }
1227
mvebu_uart_clock_disable(struct clk_hw * hw)1228 static void mvebu_uart_clock_disable(struct clk_hw *hw)
1229 {
1230 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1231 struct mvebu_uart_clock_base *uart_clock_base =
1232 to_uart_clock_base(uart_clock);
1233 unsigned long flags;
1234 u32 val;
1235
1236 spin_lock_irqsave(&mvebu_uart_lock, flags);
1237
1238 val = readl(uart_clock_base->reg1);
1239
1240 if (uart_clock->clock_idx == 0)
1241 val |= UART1_CLK_DIS;
1242 else
1243 val |= UART2_CLK_DIS;
1244
1245 writel(val, uart_clock_base->reg1);
1246
1247 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1248 }
1249
mvebu_uart_clock_is_enabled(struct clk_hw * hw)1250 static int mvebu_uart_clock_is_enabled(struct clk_hw *hw)
1251 {
1252 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1253 struct mvebu_uart_clock_base *uart_clock_base =
1254 to_uart_clock_base(uart_clock);
1255 u32 val;
1256
1257 val = readl(uart_clock_base->reg1);
1258
1259 if (uart_clock->clock_idx == 0)
1260 return !(val & UART1_CLK_DIS);
1261 else
1262 return !(val & UART2_CLK_DIS);
1263 }
1264
mvebu_uart_clock_save_context(struct clk_hw * hw)1265 static int mvebu_uart_clock_save_context(struct clk_hw *hw)
1266 {
1267 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1268 struct mvebu_uart_clock_base *uart_clock_base =
1269 to_uart_clock_base(uart_clock);
1270 unsigned long flags;
1271
1272 spin_lock_irqsave(&mvebu_uart_lock, flags);
1273 uart_clock->pm_context_reg1 = readl(uart_clock_base->reg1);
1274 uart_clock->pm_context_reg2 = readl(uart_clock_base->reg2);
1275 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1276
1277 return 0;
1278 }
1279
mvebu_uart_clock_restore_context(struct clk_hw * hw)1280 static void mvebu_uart_clock_restore_context(struct clk_hw *hw)
1281 {
1282 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1283 struct mvebu_uart_clock_base *uart_clock_base =
1284 to_uart_clock_base(uart_clock);
1285 unsigned long flags;
1286
1287 spin_lock_irqsave(&mvebu_uart_lock, flags);
1288 writel(uart_clock->pm_context_reg1, uart_clock_base->reg1);
1289 writel(uart_clock->pm_context_reg2, uart_clock_base->reg2);
1290 spin_unlock_irqrestore(&mvebu_uart_lock, flags);
1291 }
1292
mvebu_uart_clock_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1293 static unsigned long mvebu_uart_clock_recalc_rate(struct clk_hw *hw,
1294 unsigned long parent_rate)
1295 {
1296 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1297 struct mvebu_uart_clock_base *uart_clock_base =
1298 to_uart_clock_base(uart_clock);
1299
1300 return parent_rate / uart_clock_base->div;
1301 }
1302
mvebu_uart_clock_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)1303 static long mvebu_uart_clock_round_rate(struct clk_hw *hw, unsigned long rate,
1304 unsigned long *parent_rate)
1305 {
1306 struct mvebu_uart_clock *uart_clock = to_uart_clock(hw);
1307 struct mvebu_uart_clock_base *uart_clock_base =
1308 to_uart_clock_base(uart_clock);
1309
1310 return *parent_rate / uart_clock_base->div;
1311 }
1312
mvebu_uart_clock_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)1313 static int mvebu_uart_clock_set_rate(struct clk_hw *hw, unsigned long rate,
1314 unsigned long parent_rate)
1315 {
1316 /*
1317 * We must report success but we can do so unconditionally because
1318 * mvebu_uart_clock_round_rate returns values that ensure this call is a
1319 * nop.
1320 */
1321
1322 return 0;
1323 }
1324
1325 static const struct clk_ops mvebu_uart_clock_ops = {
1326 .prepare = mvebu_uart_clock_prepare,
1327 .enable = mvebu_uart_clock_enable,
1328 .disable = mvebu_uart_clock_disable,
1329 .is_enabled = mvebu_uart_clock_is_enabled,
1330 .save_context = mvebu_uart_clock_save_context,
1331 .restore_context = mvebu_uart_clock_restore_context,
1332 .round_rate = mvebu_uart_clock_round_rate,
1333 .set_rate = mvebu_uart_clock_set_rate,
1334 .recalc_rate = mvebu_uart_clock_recalc_rate,
1335 };
1336
mvebu_uart_clock_register(struct device * dev,struct mvebu_uart_clock * uart_clock,const char * name,const char * parent_name)1337 static int mvebu_uart_clock_register(struct device *dev,
1338 struct mvebu_uart_clock *uart_clock,
1339 const char *name,
1340 const char *parent_name)
1341 {
1342 struct clk_init_data init = { };
1343
1344 uart_clock->clk_hw.init = &init;
1345
1346 init.name = name;
1347 init.ops = &mvebu_uart_clock_ops;
1348 init.flags = 0;
1349 init.num_parents = 1;
1350 init.parent_names = &parent_name;
1351
1352 return devm_clk_hw_register(dev, &uart_clock->clk_hw);
1353 }
1354
mvebu_uart_clock_probe(struct platform_device * pdev)1355 static int mvebu_uart_clock_probe(struct platform_device *pdev)
1356 {
1357 static const char *const uart_clk_names[] = { "uart_1", "uart_2" };
1358 static const char *const parent_clk_names[] = { "TBG-A-P", "TBG-B-P",
1359 "TBG-A-S", "TBG-B-S",
1360 "xtal" };
1361 struct clk *parent_clks[ARRAY_SIZE(parent_clk_names)];
1362 struct mvebu_uart_clock_base *uart_clock_base;
1363 struct clk_hw_onecell_data *hw_clk_data;
1364 struct device *dev = &pdev->dev;
1365 int i, parent_clk_idx, ret;
1366 unsigned long div, rate;
1367 struct resource *res;
1368 unsigned int d1, d2;
1369
1370 BUILD_BUG_ON(ARRAY_SIZE(uart_clk_names) !=
1371 ARRAY_SIZE(uart_clock_base->clocks));
1372 BUILD_BUG_ON(ARRAY_SIZE(parent_clk_names) !=
1373 ARRAY_SIZE(uart_clock_base->parent_rates));
1374
1375 uart_clock_base = devm_kzalloc(dev,
1376 sizeof(*uart_clock_base),
1377 GFP_KERNEL);
1378 if (!uart_clock_base)
1379 return -ENOMEM;
1380
1381 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1382 if (!res) {
1383 dev_err(dev, "Couldn't get first register\n");
1384 return -ENOENT;
1385 }
1386
1387 /*
1388 * UART Clock Control register (reg1 / UART_BRDV) is in the address
1389 * space of UART1 (standard UART variant), controls parent clock and
1390 * dividers for both UART1 and UART2 and is supplied via DT as the first
1391 * resource. Therefore use ioremap() rather than ioremap_resource() to
1392 * avoid conflicts with UART1 driver. Access to UART_BRDV is protected
1393 * by a lock shared between clock and UART driver.
1394 */
1395 uart_clock_base->reg1 = devm_ioremap(dev, res->start,
1396 resource_size(res));
1397 if (!uart_clock_base->reg1)
1398 return -ENOMEM;
1399
1400 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1401 if (!res) {
1402 dev_err(dev, "Couldn't get second register\n");
1403 return -ENOENT;
1404 }
1405
1406 /*
1407 * UART 2 Baud Rate Divisor register (reg2 / UART_BRDV) is in address
1408 * space of UART2 (extended UART variant), controls only one UART2
1409 * specific divider and is supplied via DT as second resource.
1410 * Therefore use ioremap() rather than ioremap_resource() to avoid
1411 * conflicts with UART2 driver. Access to UART_BRDV is protected by a
1412 * by lock shared between clock and UART driver.
1413 */
1414 uart_clock_base->reg2 = devm_ioremap(dev, res->start,
1415 resource_size(res));
1416 if (!uart_clock_base->reg2)
1417 return -ENOMEM;
1418
1419 hw_clk_data = devm_kzalloc(dev,
1420 struct_size(hw_clk_data, hws,
1421 ARRAY_SIZE(uart_clk_names)),
1422 GFP_KERNEL);
1423 if (!hw_clk_data)
1424 return -ENOMEM;
1425
1426 hw_clk_data->num = ARRAY_SIZE(uart_clk_names);
1427 for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
1428 hw_clk_data->hws[i] = &uart_clock_base->clocks[i].clk_hw;
1429 uart_clock_base->clocks[i].clock_idx = i;
1430 }
1431
1432 parent_clk_idx = -1;
1433
1434 for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
1435 parent_clks[i] = devm_clk_get(dev, parent_clk_names[i]);
1436 if (IS_ERR(parent_clks[i])) {
1437 if (PTR_ERR(parent_clks[i]) == -EPROBE_DEFER)
1438 return -EPROBE_DEFER;
1439 dev_warn(dev, "Couldn't get the parent clock %s: %ld\n",
1440 parent_clk_names[i], PTR_ERR(parent_clks[i]));
1441 continue;
1442 }
1443
1444 ret = clk_prepare_enable(parent_clks[i]);
1445 if (ret) {
1446 dev_warn(dev, "Couldn't enable parent clock %s: %d\n",
1447 parent_clk_names[i], ret);
1448 continue;
1449 }
1450 rate = clk_get_rate(parent_clks[i]);
1451 uart_clock_base->parent_rates[i] = rate;
1452
1453 if (i != PARENT_CLOCK_XTAL) {
1454 /*
1455 * Calculate the smallest TBG d1 and d2 divisors that
1456 * still can provide 9600 baudrate.
1457 */
1458 d1 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
1459 BRDV_BAUD_MAX);
1460 if (d1 < 1)
1461 d1 = 1;
1462 else if (d1 > CLK_TBG_DIV1_MAX)
1463 d1 = CLK_TBG_DIV1_MAX;
1464
1465 d2 = DIV_ROUND_UP(rate, 9600 * OSAMP_MAX_DIVISOR *
1466 BRDV_BAUD_MAX * d1);
1467 if (d2 < 1)
1468 d2 = 1;
1469 else if (d2 > CLK_TBG_DIV2_MAX)
1470 d2 = CLK_TBG_DIV2_MAX;
1471 } else {
1472 /*
1473 * When UART clock uses XTAL clock as a source then it
1474 * is not possible to use d1 and d2 divisors.
1475 */
1476 d1 = d2 = 1;
1477 }
1478
1479 /* Skip clock source which cannot provide 9600 baudrate */
1480 if (rate > 9600 * OSAMP_MAX_DIVISOR * BRDV_BAUD_MAX * d1 * d2)
1481 continue;
1482
1483 /*
1484 * Choose TBG clock source with the smallest divisors. Use XTAL
1485 * clock source only in case TBG is not available as XTAL cannot
1486 * be used for baudrates higher than 230400.
1487 */
1488 if (parent_clk_idx == -1 ||
1489 (i != PARENT_CLOCK_XTAL && div > d1 * d2)) {
1490 parent_clk_idx = i;
1491 div = d1 * d2;
1492 }
1493 }
1494
1495 for (i = 0; i < ARRAY_SIZE(parent_clk_names); i++) {
1496 if (i == parent_clk_idx || IS_ERR(parent_clks[i]))
1497 continue;
1498 clk_disable_unprepare(parent_clks[i]);
1499 devm_clk_put(dev, parent_clks[i]);
1500 }
1501
1502 if (parent_clk_idx == -1) {
1503 dev_err(dev, "No usable parent clock\n");
1504 return -ENOENT;
1505 }
1506
1507 uart_clock_base->parent_idx = parent_clk_idx;
1508 uart_clock_base->div = div;
1509
1510 dev_notice(dev, "Using parent clock %s as base UART clock\n",
1511 __clk_get_name(parent_clks[parent_clk_idx]));
1512
1513 for (i = 0; i < ARRAY_SIZE(uart_clk_names); i++) {
1514 ret = mvebu_uart_clock_register(dev,
1515 &uart_clock_base->clocks[i],
1516 uart_clk_names[i],
1517 __clk_get_name(parent_clks[parent_clk_idx]));
1518 if (ret) {
1519 dev_err(dev, "Can't register UART clock %d: %d\n",
1520 i, ret);
1521 return ret;
1522 }
1523 }
1524
1525 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1526 hw_clk_data);
1527 }
1528
1529 static const struct of_device_id mvebu_uart_clock_of_match[] = {
1530 { .compatible = "marvell,armada-3700-uart-clock", },
1531 { }
1532 };
1533
1534 static struct platform_driver mvebu_uart_clock_platform_driver = {
1535 .probe = mvebu_uart_clock_probe,
1536 .driver = {
1537 .name = "mvebu-uart-clock",
1538 .of_match_table = mvebu_uart_clock_of_match,
1539 },
1540 };
1541
mvebu_uart_init(void)1542 static int __init mvebu_uart_init(void)
1543 {
1544 int ret;
1545
1546 ret = uart_register_driver(&mvebu_uart_driver);
1547 if (ret)
1548 return ret;
1549
1550 ret = platform_driver_register(&mvebu_uart_clock_platform_driver);
1551 if (ret) {
1552 uart_unregister_driver(&mvebu_uart_driver);
1553 return ret;
1554 }
1555
1556 ret = platform_driver_register(&mvebu_uart_platform_driver);
1557 if (ret) {
1558 platform_driver_unregister(&mvebu_uart_clock_platform_driver);
1559 uart_unregister_driver(&mvebu_uart_driver);
1560 return ret;
1561 }
1562
1563 return 0;
1564 }
1565 arch_initcall(mvebu_uart_init);
1566