Lines Matching +full:rs485 +full:- +full:enabled +full:- +full:at +full:- +full:boot +full:- +full:time

1 // SPDX-License-Identifier: GPL-2.0+
9 * Copyright (C) 2010 ST-Ericsson SA
11 * This is a generic driver for ARM AMBA-type serial ports. They
12 * have a lot of 16550-like features, but are not register compatible.
34 #include <linux/dma-mapping.h>
45 #include "amba-pl011.h"
74 /* There is by now at least one vendor with differing details, so handle it */
264 unsigned int fifosize; /* vendor-specific */
266 unsigned int fixed_baud; /* vendor-set fixed baud rate */
285 return uap->reg_offset[reg]; in pl011_reg_to_offset()
291 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); in pl011_read()
293 return (uap->port.iotype == UPIO_MEM32) ? in pl011_read()
300 void __iomem *addr = uap->port.membase + pl011_reg_to_offset(uap, reg); in pl011_write()
302 if (uap->port.iotype == UPIO_MEM32) in pl011_write()
327 uap->port.icount.rx++; in pl011_fifo_to_tty()
332 uap->port.icount.brk++; in pl011_fifo_to_tty()
333 if (uart_handle_break(&uap->port)) in pl011_fifo_to_tty()
336 uap->port.icount.parity++; in pl011_fifo_to_tty()
338 uap->port.icount.frame++; in pl011_fifo_to_tty()
340 uap->port.icount.overrun++; in pl011_fifo_to_tty()
342 ch &= uap->port.read_status_mask; in pl011_fifo_to_tty()
352 spin_unlock(&uap->port.lock); in pl011_fifo_to_tty()
353 sysrq = uart_handle_sysrq_char(&uap->port, ch & 255); in pl011_fifo_to_tty()
354 spin_lock(&uap->port.lock); in pl011_fifo_to_tty()
357 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag); in pl011_fifo_to_tty()
378 sg->buf = dma_alloc_coherent(chan->device->dev, in pl011_sgbuf_init()
380 if (!sg->buf) in pl011_sgbuf_init()
381 return -ENOMEM; in pl011_sgbuf_init()
383 sg_init_table(&sg->sg, 1); in pl011_sgbuf_init()
384 sg_set_page(&sg->sg, phys_to_page(dma_addr), in pl011_sgbuf_init()
386 sg_dma_address(&sg->sg) = dma_addr; in pl011_sgbuf_init()
387 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE; in pl011_sgbuf_init()
395 if (sg->buf) { in pl011_sgbuf_free()
396 dma_free_coherent(chan->device->dev, in pl011_sgbuf_free()
397 PL011_DMA_BUFFER_SIZE, sg->buf, in pl011_sgbuf_free()
398 sg_dma_address(&sg->sg)); in pl011_sgbuf_free()
405 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev); in pl011_dma_probe()
406 struct device *dev = uap->port.dev; in pl011_dma_probe()
408 .dst_addr = uap->port.mapbase + in pl011_dma_probe()
412 .dst_maxburst = uap->fifosize >> 1, in pl011_dma_probe()
418 uap->dma_probed = true; in pl011_dma_probe()
421 if (PTR_ERR(chan) == -EPROBE_DEFER) { in pl011_dma_probe()
422 uap->dma_probed = false; in pl011_dma_probe()
427 if (!plat || !plat->dma_filter) { in pl011_dma_probe()
428 dev_info(uap->port.dev, "no DMA platform data\n"); in pl011_dma_probe()
436 chan = dma_request_channel(mask, plat->dma_filter, in pl011_dma_probe()
437 plat->dma_tx_param); in pl011_dma_probe()
439 dev_err(uap->port.dev, "no TX DMA channel!\n"); in pl011_dma_probe()
445 uap->dmatx.chan = chan; in pl011_dma_probe()
447 dev_info(uap->port.dev, "DMA channel TX %s\n", in pl011_dma_probe()
448 dma_chan_name(uap->dmatx.chan)); in pl011_dma_probe()
453 if (!chan && plat && plat->dma_rx_param) { in pl011_dma_probe()
454 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param); in pl011_dma_probe()
457 dev_err(uap->port.dev, "no RX DMA channel!\n"); in pl011_dma_probe()
464 .src_addr = uap->port.mapbase + in pl011_dma_probe()
468 .src_maxburst = uap->fifosize >> 2, in pl011_dma_probe()
482 dev_info(uap->port.dev, in pl011_dma_probe()
483 "RX DMA disabled - no residue processing\n"); in pl011_dma_probe()
488 uap->dmarx.chan = chan; in pl011_dma_probe()
490 uap->dmarx.auto_poll_rate = false; in pl011_dma_probe()
491 if (plat && plat->dma_rx_poll_enable) { in pl011_dma_probe()
493 if (plat->dma_rx_poll_rate) { in pl011_dma_probe()
494 uap->dmarx.auto_poll_rate = false; in pl011_dma_probe()
495 uap->dmarx.poll_rate = plat->dma_rx_poll_rate; in pl011_dma_probe()
500 * the baud rate at set_termios. in pl011_dma_probe()
502 uap->dmarx.auto_poll_rate = true; in pl011_dma_probe()
503 uap->dmarx.poll_rate = 100; in pl011_dma_probe()
506 if (plat->dma_rx_poll_timeout) in pl011_dma_probe()
507 uap->dmarx.poll_timeout = in pl011_dma_probe()
508 plat->dma_rx_poll_timeout; in pl011_dma_probe()
510 uap->dmarx.poll_timeout = 3000; in pl011_dma_probe()
511 } else if (!plat && dev->of_node) { in pl011_dma_probe()
512 uap->dmarx.auto_poll_rate = of_property_read_bool( in pl011_dma_probe()
513 dev->of_node, "auto-poll"); in pl011_dma_probe()
514 if (uap->dmarx.auto_poll_rate) { in pl011_dma_probe()
517 if (0 == of_property_read_u32(dev->of_node, in pl011_dma_probe()
518 "poll-rate-ms", &x)) in pl011_dma_probe()
519 uap->dmarx.poll_rate = x; in pl011_dma_probe()
521 uap->dmarx.poll_rate = 100; in pl011_dma_probe()
522 if (0 == of_property_read_u32(dev->of_node, in pl011_dma_probe()
523 "poll-timeout-ms", &x)) in pl011_dma_probe()
524 uap->dmarx.poll_timeout = x; in pl011_dma_probe()
526 uap->dmarx.poll_timeout = 3000; in pl011_dma_probe()
529 dev_info(uap->port.dev, "DMA channel RX %s\n", in pl011_dma_probe()
530 dma_chan_name(uap->dmarx.chan)); in pl011_dma_probe()
536 if (uap->dmatx.chan) in pl011_dma_remove()
537 dma_release_channel(uap->dmatx.chan); in pl011_dma_remove()
538 if (uap->dmarx.chan) in pl011_dma_remove()
539 dma_release_channel(uap->dmarx.chan); in pl011_dma_remove()
553 struct pl011_dmatx_data *dmatx = &uap->dmatx; in pl011_dma_tx_callback()
557 spin_lock_irqsave(&uap->port.lock, flags); in pl011_dma_tx_callback()
558 if (uap->dmatx.queued) in pl011_dma_tx_callback()
559 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, in pl011_dma_tx_callback()
562 dmacr = uap->dmacr; in pl011_dma_tx_callback()
563 uap->dmacr = dmacr & ~UART011_TXDMAE; in pl011_dma_tx_callback()
564 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_callback()
568 * some reason (eg, XOFF received, or we want to send an X-char.) in pl011_dma_tx_callback()
571 * and the rest of the driver - if the driver disables TX DMA while in pl011_dma_tx_callback()
575 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || in pl011_dma_tx_callback()
576 uart_circ_empty(&uap->port.state->xmit)) { in pl011_dma_tx_callback()
577 uap->dmatx.queued = false; in pl011_dma_tx_callback()
578 spin_unlock_irqrestore(&uap->port.lock, flags); in pl011_dma_tx_callback()
585 * have data pending to be sent. Re-enable the TX IRQ. in pl011_dma_tx_callback()
589 spin_unlock_irqrestore(&uap->port.lock, flags); in pl011_dma_tx_callback()
602 struct pl011_dmatx_data *dmatx = &uap->dmatx; in pl011_dma_tx_refill()
603 struct dma_chan *chan = dmatx->chan; in pl011_dma_tx_refill()
604 struct dma_device *dma_dev = chan->device; in pl011_dma_tx_refill()
606 struct circ_buf *xmit = &uap->port.state->xmit; in pl011_dma_tx_refill()
613 * issue a uart_write_wakeup() at the appropriate time. in pl011_dma_tx_refill()
616 if (count < (uap->fifosize >> 1)) { in pl011_dma_tx_refill()
617 uap->dmatx.queued = false; in pl011_dma_tx_refill()
625 count -= 1; in pl011_dma_tx_refill()
631 if (xmit->tail < xmit->head) in pl011_dma_tx_refill()
632 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); in pl011_dma_tx_refill()
634 size_t first = UART_XMIT_SIZE - xmit->tail; in pl011_dma_tx_refill()
639 second = count - first; in pl011_dma_tx_refill()
641 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); in pl011_dma_tx_refill()
643 memcpy(&dmatx->buf[first], &xmit->buf[0], second); in pl011_dma_tx_refill()
646 dmatx->sg.length = count; in pl011_dma_tx_refill()
648 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { in pl011_dma_tx_refill()
649 uap->dmatx.queued = false; in pl011_dma_tx_refill()
650 dev_dbg(uap->port.dev, "unable to map TX DMA\n"); in pl011_dma_tx_refill()
651 return -EBUSY; in pl011_dma_tx_refill()
654 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, in pl011_dma_tx_refill()
657 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); in pl011_dma_tx_refill()
658 uap->dmatx.queued = false; in pl011_dma_tx_refill()
663 dev_dbg(uap->port.dev, "TX DMA busy\n"); in pl011_dma_tx_refill()
664 return -EBUSY; in pl011_dma_tx_refill()
668 desc->callback = pl011_dma_tx_callback; in pl011_dma_tx_refill()
669 desc->callback_param = uap; in pl011_dma_tx_refill()
671 /* All errors should happen at prepare time */ in pl011_dma_tx_refill()
675 dma_dev->device_issue_pending(chan); in pl011_dma_tx_refill()
677 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_refill()
678 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_refill()
679 uap->dmatx.queued = true; in pl011_dma_tx_refill()
685 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); in pl011_dma_tx_refill()
686 uap->port.icount.tx += count; in pl011_dma_tx_refill()
689 uart_write_wakeup(&uap->port); in pl011_dma_tx_refill()
695 * We received a transmit interrupt without a pending X-char but with
704 if (!uap->using_tx_dma) in pl011_dma_tx_irq()
709 * TX interrupt, it will be because we've just sent an X-char. in pl011_dma_tx_irq()
710 * Ensure the TX DMA is enabled and the TX IRQ is disabled. in pl011_dma_tx_irq()
712 if (uap->dmatx.queued) { in pl011_dma_tx_irq()
713 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_irq()
714 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_irq()
715 uap->im &= ~UART011_TXIM; in pl011_dma_tx_irq()
716 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_irq()
725 uap->im &= ~UART011_TXIM; in pl011_dma_tx_irq()
726 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_irq()
738 if (uap->dmatx.queued) { in pl011_dma_tx_stop()
739 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_tx_stop()
740 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_stop()
749 * false if we want the TX IRQ to be enabled
756 if (!uap->using_tx_dma) in pl011_dma_tx_start()
759 if (!uap->port.x_char) { in pl011_dma_tx_start()
760 /* no X-char, try to push chars out in DMA mode */ in pl011_dma_tx_start()
763 if (!uap->dmatx.queued) { in pl011_dma_tx_start()
765 uap->im &= ~UART011_TXIM; in pl011_dma_tx_start()
766 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_tx_start()
769 } else if (!(uap->dmacr & UART011_TXDMAE)) { in pl011_dma_tx_start()
770 uap->dmacr |= UART011_TXDMAE; in pl011_dma_tx_start()
771 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_start()
777 * We have an X-char to send. Disable DMA to prevent it loading in pl011_dma_tx_start()
780 dmacr = uap->dmacr; in pl011_dma_tx_start()
781 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_tx_start()
782 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_tx_start()
788 * loaded the character, we should just re-enable DMA. in pl011_dma_tx_start()
793 pl011_write(uap->port.x_char, uap, REG_DR); in pl011_dma_tx_start()
794 uap->port.icount.tx++; in pl011_dma_tx_start()
795 uap->port.x_char = 0; in pl011_dma_tx_start()
797 /* Success - restore the DMA state */ in pl011_dma_tx_start()
798 uap->dmacr = dmacr; in pl011_dma_tx_start()
809 __releases(&uap->port.lock) in pl011_dma_flush_buffer()
810 __acquires(&uap->port.lock) in pl011_dma_flush_buffer()
815 if (!uap->using_tx_dma) in pl011_dma_flush_buffer()
818 dmaengine_terminate_async(uap->dmatx.chan); in pl011_dma_flush_buffer()
820 if (uap->dmatx.queued) { in pl011_dma_flush_buffer()
821 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, in pl011_dma_flush_buffer()
823 uap->dmatx.queued = false; in pl011_dma_flush_buffer()
824 uap->dmacr &= ~UART011_TXDMAE; in pl011_dma_flush_buffer()
825 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_flush_buffer()
833 struct dma_chan *rxchan = uap->dmarx.chan; in pl011_dma_rx_trigger_dma()
834 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_trigger_dma()
839 return -EIO; in pl011_dma_rx_trigger_dma()
842 sgbuf = uap->dmarx.use_buf_b ? in pl011_dma_rx_trigger_dma()
843 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; in pl011_dma_rx_trigger_dma()
844 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, in pl011_dma_rx_trigger_dma()
853 uap->dmarx.running = false; in pl011_dma_rx_trigger_dma()
855 return -EBUSY; in pl011_dma_rx_trigger_dma()
859 desc->callback = pl011_dma_rx_callback; in pl011_dma_rx_trigger_dma()
860 desc->callback_param = uap; in pl011_dma_rx_trigger_dma()
861 dmarx->cookie = dmaengine_submit(desc); in pl011_dma_rx_trigger_dma()
864 uap->dmacr |= UART011_RXDMAE; in pl011_dma_rx_trigger_dma()
865 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_trigger_dma()
866 uap->dmarx.running = true; in pl011_dma_rx_trigger_dma()
868 uap->im &= ~UART011_RXIM; in pl011_dma_rx_trigger_dma()
869 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_trigger_dma()
877 * with the port spinlock uap->port.lock held.
883 struct tty_port *port = &uap->port.state->port; in pl011_dma_rx_chars()
885 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; in pl011_dma_rx_chars()
889 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_chars()
892 if (uap->dmarx.poll_rate) { in pl011_dma_rx_chars()
894 dmataken = sgbuf->sg.length - dmarx->last_residue; in pl011_dma_rx_chars()
897 pending -= dmataken; in pl011_dma_rx_chars()
908 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, in pl011_dma_rx_chars()
911 uap->port.icount.rx += dma_count; in pl011_dma_rx_chars()
913 dev_warn(uap->port.dev, in pl011_dma_rx_chars()
918 if (uap->dmarx.poll_rate) in pl011_dma_rx_chars()
919 dmarx->last_residue = sgbuf->sg.length; in pl011_dma_rx_chars()
944 dev_vdbg(uap->port.dev, in pl011_dma_rx_chars()
952 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_irq()
953 struct dma_chan *rxchan = dmarx->chan; in pl011_dma_rx_irq()
954 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? in pl011_dma_rx_irq()
955 &dmarx->sgbuf_b : &dmarx->sgbuf_a; in pl011_dma_rx_irq()
966 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); in pl011_dma_rx_irq()
967 dmastat = rxchan->device->device_tx_status(rxchan, in pl011_dma_rx_irq()
968 dmarx->cookie, &state); in pl011_dma_rx_irq()
970 dev_err(uap->port.dev, "unable to pause DMA transfer\n"); in pl011_dma_rx_irq()
972 /* Disable RX DMA - incoming data will wait in the FIFO */ in pl011_dma_rx_irq()
973 uap->dmacr &= ~UART011_RXDMAE; in pl011_dma_rx_irq()
974 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_irq()
975 uap->dmarx.running = false; in pl011_dma_rx_irq()
977 pending = sgbuf->sg.length - state.residue; in pl011_dma_rx_irq()
979 /* Then we terminate the transfer - we now know our residue */ in pl011_dma_rx_irq()
986 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true); in pl011_dma_rx_irq()
988 /* Switch buffer & re-trigger DMA job */ in pl011_dma_rx_irq()
989 dmarx->use_buf_b = !dmarx->use_buf_b; in pl011_dma_rx_irq()
991 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " in pl011_dma_rx_irq()
993 uap->im |= UART011_RXIM; in pl011_dma_rx_irq()
994 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_irq()
1001 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_callback()
1002 struct dma_chan *rxchan = dmarx->chan; in pl011_dma_rx_callback()
1003 bool lastbuf = dmarx->use_buf_b; in pl011_dma_rx_callback()
1004 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? in pl011_dma_rx_callback()
1005 &dmarx->sgbuf_b : &dmarx->sgbuf_a; in pl011_dma_rx_callback()
1017 spin_lock_irq(&uap->port.lock); in pl011_dma_rx_callback()
1022 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); in pl011_dma_rx_callback()
1023 pending = sgbuf->sg.length - state.residue; in pl011_dma_rx_callback()
1025 /* Then we terminate the transfer - we now know our residue */ in pl011_dma_rx_callback()
1028 uap->dmarx.running = false; in pl011_dma_rx_callback()
1029 dmarx->use_buf_b = !lastbuf; in pl011_dma_rx_callback()
1033 spin_unlock_irq(&uap->port.lock); in pl011_dma_rx_callback()
1039 dev_dbg(uap->port.dev, "could not retrigger RX DMA job " in pl011_dma_rx_callback()
1041 uap->im |= UART011_RXIM; in pl011_dma_rx_callback()
1042 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_callback()
1054 uap->dmacr &= ~UART011_RXDMAE; in pl011_dma_rx_stop()
1055 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_rx_stop()
1066 struct tty_port *port = &uap->port.state->port; in pl011_dma_rx_poll()
1067 struct pl011_dmarx_data *dmarx = &uap->dmarx; in pl011_dma_rx_poll()
1068 struct dma_chan *rxchan = uap->dmarx.chan; in pl011_dma_rx_poll()
1076 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; in pl011_dma_rx_poll()
1077 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); in pl011_dma_rx_poll()
1078 if (likely(state.residue < dmarx->last_residue)) { in pl011_dma_rx_poll()
1079 dmataken = sgbuf->sg.length - dmarx->last_residue; in pl011_dma_rx_poll()
1080 size = dmarx->last_residue - state.residue; in pl011_dma_rx_poll()
1081 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, in pl011_dma_rx_poll()
1084 dmarx->last_residue = state.residue; in pl011_dma_rx_poll()
1085 dmarx->last_jiffies = jiffies; in pl011_dma_rx_poll()
1091 * to interrupt mode. We will retrigger DMA at the first interrupt. in pl011_dma_rx_poll()
1093 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies) in pl011_dma_rx_poll()
1094 > uap->dmarx.poll_timeout) { in pl011_dma_rx_poll()
1096 spin_lock_irqsave(&uap->port.lock, flags); in pl011_dma_rx_poll()
1098 uap->im |= UART011_RXIM; in pl011_dma_rx_poll()
1099 pl011_write(uap->im, uap, REG_IMSC); in pl011_dma_rx_poll()
1100 spin_unlock_irqrestore(&uap->port.lock, flags); in pl011_dma_rx_poll()
1102 uap->dmarx.running = false; in pl011_dma_rx_poll()
1104 del_timer(&uap->dmarx.timer); in pl011_dma_rx_poll()
1106 mod_timer(&uap->dmarx.timer, in pl011_dma_rx_poll()
1107 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_dma_rx_poll()
1115 if (!uap->dma_probed) in pl011_dma_startup()
1118 if (!uap->dmatx.chan) in pl011_dma_startup()
1121 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA); in pl011_dma_startup()
1122 if (!uap->dmatx.buf) { in pl011_dma_startup()
1123 dev_err(uap->port.dev, "no memory for DMA TX buffer\n"); in pl011_dma_startup()
1124 uap->port.fifosize = uap->fifosize; in pl011_dma_startup()
1128 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); in pl011_dma_startup()
1131 uap->port.fifosize = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1132 uap->using_tx_dma = true; in pl011_dma_startup()
1134 if (!uap->dmarx.chan) in pl011_dma_startup()
1138 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, in pl011_dma_startup()
1141 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", in pl011_dma_startup()
1146 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, in pl011_dma_startup()
1149 dev_err(uap->port.dev, "failed to init DMA %s: %d\n", in pl011_dma_startup()
1151 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, in pl011_dma_startup()
1156 uap->using_rx_dma = true; in pl011_dma_startup()
1159 /* Turn on DMA error (RX/TX will be enabled on demand) */ in pl011_dma_startup()
1160 uap->dmacr |= UART011_DMAONERR; in pl011_dma_startup()
1161 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_startup()
1168 if (uap->vendor->dma_threshold) in pl011_dma_startup()
1172 if (uap->using_rx_dma) { in pl011_dma_startup()
1174 dev_dbg(uap->port.dev, "could not trigger initial " in pl011_dma_startup()
1176 if (uap->dmarx.poll_rate) { in pl011_dma_startup()
1177 timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0); in pl011_dma_startup()
1178 mod_timer(&uap->dmarx.timer, in pl011_dma_startup()
1180 msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_dma_startup()
1181 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; in pl011_dma_startup()
1182 uap->dmarx.last_jiffies = jiffies; in pl011_dma_startup()
1189 if (!(uap->using_tx_dma || uap->using_rx_dma)) in pl011_dma_shutdown()
1193 while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy) in pl011_dma_shutdown()
1196 spin_lock_irq(&uap->port.lock); in pl011_dma_shutdown()
1197 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE); in pl011_dma_shutdown()
1198 pl011_write(uap->dmacr, uap, REG_DMACR); in pl011_dma_shutdown()
1199 spin_unlock_irq(&uap->port.lock); in pl011_dma_shutdown()
1201 if (uap->using_tx_dma) { in pl011_dma_shutdown()
1203 dmaengine_terminate_all(uap->dmatx.chan); in pl011_dma_shutdown()
1204 if (uap->dmatx.queued) { in pl011_dma_shutdown()
1205 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, in pl011_dma_shutdown()
1207 uap->dmatx.queued = false; in pl011_dma_shutdown()
1210 kfree(uap->dmatx.buf); in pl011_dma_shutdown()
1211 uap->using_tx_dma = false; in pl011_dma_shutdown()
1214 if (uap->using_rx_dma) { in pl011_dma_shutdown()
1215 dmaengine_terminate_all(uap->dmarx.chan); in pl011_dma_shutdown()
1217 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); in pl011_dma_shutdown()
1218 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); in pl011_dma_shutdown()
1219 if (uap->dmarx.poll_rate) in pl011_dma_shutdown()
1220 del_timer_sync(&uap->dmarx.timer); in pl011_dma_shutdown()
1221 uap->using_rx_dma = false; in pl011_dma_shutdown()
1227 return uap->using_rx_dma; in pl011_dma_rx_available()
1232 return uap->using_rx_dma && uap->dmarx.running; in pl011_dma_rx_running()
1273 return -EIO; in pl011_dma_rx_trigger_dma()
1291 struct uart_port *port = &uap->port; in pl011_rs485_tx_stop()
1297 if (i == port->fifosize) { in pl011_rs485_tx_stop()
1298 dev_warn(port->dev, in pl011_rs485_tx_stop()
1303 udelay(uap->rs485_tx_drain_interval); in pl011_rs485_tx_stop()
1307 if (port->rs485.delay_rts_after_send) in pl011_rs485_tx_stop()
1308 mdelay(port->rs485.delay_rts_after_send); in pl011_rs485_tx_stop()
1312 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) in pl011_rs485_tx_stop()
1322 uap->rs485_tx_started = false; in pl011_rs485_tx_stop()
1330 uap->im &= ~UART011_TXIM; in pl011_stop_tx()
1331 pl011_write(uap->im, uap, REG_IMSC); in pl011_stop_tx()
1334 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) in pl011_stop_tx()
1344 uap->im |= UART011_TXIM; in pl011_start_tx_pio()
1345 pl011_write(uap->im, uap, REG_IMSC); in pl011_start_tx_pio()
1363 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM| in pl011_stop_rx()
1365 pl011_write(uap->im, uap, REG_IMSC); in pl011_stop_rx()
1375 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM; in pl011_enable_ms()
1376 pl011_write(uap->im, uap, REG_IMSC); in pl011_enable_ms()
1380 __releases(&uap->port.lock) in pl011_rx_chars()
1381 __acquires(&uap->port.lock) in pl011_rx_chars()
1385 spin_unlock(&uap->port.lock); in pl011_rx_chars()
1386 tty_flip_buffer_push(&uap->port.state->port); in pl011_rx_chars()
1393 dev_dbg(uap->port.dev, "could not trigger RX DMA job " in pl011_rx_chars()
1395 uap->im |= UART011_RXIM; in pl011_rx_chars()
1396 pl011_write(uap->im, uap, REG_IMSC); in pl011_rx_chars()
1400 if (uap->dmarx.poll_rate) { in pl011_rx_chars()
1401 uap->dmarx.last_jiffies = jiffies; in pl011_rx_chars()
1402 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE; in pl011_rx_chars()
1403 mod_timer(&uap->dmarx.timer, in pl011_rx_chars()
1405 msecs_to_jiffies(uap->dmarx.poll_rate)); in pl011_rx_chars()
1410 spin_lock(&uap->port.lock); in pl011_rx_chars()
1421 uap->port.icount.tx++; in pl011_tx_char()
1428 struct uart_port *port = &uap->port; in pl011_rs485_tx_start()
1435 /* Disable receiver if half-duplex */ in pl011_rs485_tx_start()
1436 if (!(port->rs485.flags & SER_RS485_RX_DURING_TX)) in pl011_rs485_tx_start()
1439 if (port->rs485.flags & SER_RS485_RTS_ON_SEND) in pl011_rs485_tx_start()
1446 if (port->rs485.delay_rts_before_send) in pl011_rs485_tx_start()
1447 mdelay(port->rs485.delay_rts_before_send); in pl011_rs485_tx_start()
1449 uap->rs485_tx_started = true; in pl011_rs485_tx_start()
1452 /* Returns true if tx interrupts have to be (kept) enabled */
1455 struct circ_buf *xmit = &uap->port.state->xmit; in pl011_tx_chars()
1456 int count = uap->fifosize >> 1; in pl011_tx_chars()
1458 if (uap->port.x_char) { in pl011_tx_chars()
1459 if (!pl011_tx_char(uap, uap->port.x_char, from_irq)) in pl011_tx_chars()
1461 uap->port.x_char = 0; in pl011_tx_chars()
1462 --count; in pl011_tx_chars()
1464 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { in pl011_tx_chars()
1465 pl011_stop_tx(&uap->port); in pl011_tx_chars()
1469 if ((uap->port.rs485.flags & SER_RS485_ENABLED) && in pl011_tx_chars()
1470 !uap->rs485_tx_started) in pl011_tx_chars()
1478 if (likely(from_irq) && count-- == 0) in pl011_tx_chars()
1481 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq)) in pl011_tx_chars()
1484 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); in pl011_tx_chars()
1488 uart_write_wakeup(&uap->port); in pl011_tx_chars()
1491 pl011_stop_tx(&uap->port); in pl011_tx_chars()
1503 delta = status ^ uap->old_status; in pl011_modem_status()
1504 uap->old_status = status; in pl011_modem_status()
1510 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD); in pl011_modem_status()
1512 if (delta & uap->vendor->fr_dsr) in pl011_modem_status()
1513 uap->port.icount.dsr++; in pl011_modem_status()
1515 if (delta & uap->vendor->fr_cts) in pl011_modem_status()
1516 uart_handle_cts_change(&uap->port, in pl011_modem_status()
1517 status & uap->vendor->fr_cts); in pl011_modem_status()
1519 wake_up_interruptible(&uap->port.state->port.delta_msr_wait); in pl011_modem_status()
1524 if (!uap->vendor->cts_event_workaround) in check_apply_cts_event_workaround()
1546 spin_lock_irqsave(&uap->port.lock, flags); in pl011_int()
1547 status = pl011_read(uap, REG_RIS) & uap->im; in pl011_int()
1568 if (pass_counter-- == 0) in pl011_int()
1571 status = pl011_read(uap, REG_RIS) & uap->im; in pl011_int()
1576 spin_unlock_irqrestore(&uap->port.lock, flags); in pl011_int()
1587 unsigned int status = pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr; in pl011_tx_empty()
1589 return status & (uap->vendor->fr_busy | UART01x_FR_TXFF) ? in pl011_tx_empty()
1605 TIOCMBIT(uap->vendor->fr_dsr, TIOCM_DSR); in pl011_get_mctrl()
1606 TIOCMBIT(uap->vendor->fr_cts, TIOCM_CTS); in pl011_get_mctrl()
1607 TIOCMBIT(uap->vendor->fr_ri, TIOCM_RNG); in pl011_get_mctrl()
1618 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_set_mctrl()
1635 if (port->status & UPSTAT_AUTORTS) { in pl011_set_mctrl()
1636 /* We need to disable auto-RTS if we want to turn RTS off */ in pl011_set_mctrl()
1651 spin_lock_irqsave(&uap->port.lock, flags); in pl011_break_ctl()
1653 if (break_state == -1) in pl011_break_ctl()
1658 spin_unlock_irqrestore(&uap->port.lock, flags); in pl011_break_ctl()
1726 pinctrl_pm_select_default_state(port->dev); in pl011_hwinit()
1731 retval = clk_prepare_enable(uap->clk); in pl011_hwinit()
1735 uap->port.uartclk = clk_get_rate(uap->clk); in pl011_hwinit()
1746 uap->im = pl011_read(uap, REG_IMSC); in pl011_hwinit()
1749 if (dev_get_platdata(uap->port.dev)) { in pl011_hwinit()
1752 plat = dev_get_platdata(uap->port.dev); in pl011_hwinit()
1753 if (plat->init) in pl011_hwinit()
1754 plat->init(); in pl011_hwinit()
1782 pl011_write(uap->im, uap, REG_IMSC); in pl011_allocate_irq()
1784 return request_irq(uap->port.irq, pl011_int, IRQF_SHARED, "uart-pl011", uap); in pl011_allocate_irq()
1796 spin_lock_irq(&uap->port.lock); in pl011_enable_interrupts()
1807 for (i = 0; i < uap->fifosize * 2; ++i) { in pl011_enable_interrupts()
1814 uap->im = UART011_RTIM; in pl011_enable_interrupts()
1816 uap->im |= UART011_RXIM; in pl011_enable_interrupts()
1817 pl011_write(uap->im, uap, REG_IMSC); in pl011_enable_interrupts()
1818 spin_unlock_irq(&uap->port.lock); in pl011_enable_interrupts()
1836 pl011_write(uap->vendor->ifls, uap, REG_IFLS); in pl011_startup()
1838 spin_lock_irq(&uap->port.lock); in pl011_startup()
1841 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR); in pl011_startup()
1844 if (port->rs485.flags & SER_RS485_ENABLED) { in pl011_startup()
1845 if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) in pl011_startup()
1855 spin_unlock_irq(&uap->port.lock); in pl011_startup()
1860 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY; in pl011_startup()
1870 clk_disable_unprepare(uap->clk); in pl011_startup()
1889 uap->old_status = 0; in sbsa_uart_startup()
1915 uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); in pl011_disable_uart()
1916 spin_lock_irq(&uap->port.lock); in pl011_disable_uart()
1918 uap->old_cr = cr; in pl011_disable_uart()
1922 spin_unlock_irq(&uap->port.lock); in pl011_disable_uart()
1934 spin_lock_irq(&uap->port.lock); in pl011_disable_interrupts()
1937 uap->im = 0; in pl011_disable_interrupts()
1938 pl011_write(uap->im, uap, REG_IMSC); in pl011_disable_interrupts()
1941 spin_unlock_irq(&uap->port.lock); in pl011_disable_interrupts()
1953 if ((port->rs485.flags & SER_RS485_ENABLED) && uap->rs485_tx_started) in pl011_shutdown()
1956 free_irq(uap->port.irq, uap); in pl011_shutdown()
1963 clk_disable_unprepare(uap->clk); in pl011_shutdown()
1965 pinctrl_pm_select_sleep_state(port->dev); in pl011_shutdown()
1967 if (dev_get_platdata(uap->port.dev)) { in pl011_shutdown()
1970 plat = dev_get_platdata(uap->port.dev); in pl011_shutdown()
1971 if (plat->exit) in pl011_shutdown()
1972 plat->exit(); in pl011_shutdown()
1975 if (uap->port.ops->flush_buffer) in pl011_shutdown()
1976 uap->port.ops->flush_buffer(port); in pl011_shutdown()
1986 free_irq(uap->port.irq, uap); in sbsa_uart_shutdown()
1988 if (uap->port.ops->flush_buffer) in sbsa_uart_shutdown()
1989 uap->port.ops->flush_buffer(port); in sbsa_uart_shutdown()
1995 port->read_status_mask = UART011_DR_OE | 255; in pl011_setup_status_masks()
1996 if (termios->c_iflag & INPCK) in pl011_setup_status_masks()
1997 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE; in pl011_setup_status_masks()
1998 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) in pl011_setup_status_masks()
1999 port->read_status_mask |= UART011_DR_BE; in pl011_setup_status_masks()
2004 port->ignore_status_mask = 0; in pl011_setup_status_masks()
2005 if (termios->c_iflag & IGNPAR) in pl011_setup_status_masks()
2006 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE; in pl011_setup_status_masks()
2007 if (termios->c_iflag & IGNBRK) { in pl011_setup_status_masks()
2008 port->ignore_status_mask |= UART011_DR_BE; in pl011_setup_status_masks()
2013 if (termios->c_iflag & IGNPAR) in pl011_setup_status_masks()
2014 port->ignore_status_mask |= UART011_DR_OE; in pl011_setup_status_masks()
2020 if ((termios->c_cflag & CREAD) == 0) in pl011_setup_status_masks()
2021 port->ignore_status_mask |= UART_DUMMY_DR_RX; in pl011_setup_status_masks()
2035 if (uap->vendor->oversampling) in pl011_set_termios()
2044 port->uartclk / clkdiv); in pl011_set_termios()
2049 if (uap->dmarx.auto_poll_rate) in pl011_set_termios()
2050 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud); in pl011_set_termios()
2053 if (baud > port->uartclk/16) in pl011_set_termios()
2054 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud); in pl011_set_termios()
2056 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud); in pl011_set_termios()
2058 switch (termios->c_cflag & CSIZE) { in pl011_set_termios()
2072 if (termios->c_cflag & CSTOPB) in pl011_set_termios()
2074 if (termios->c_cflag & PARENB) { in pl011_set_termios()
2076 if (!(termios->c_cflag & PARODD)) in pl011_set_termios()
2078 if (termios->c_cflag & CMSPAR) in pl011_set_termios()
2081 if (uap->fifosize > 1) in pl011_set_termios()
2084 bits = tty_get_frame_size(termios->c_cflag); in pl011_set_termios()
2086 spin_lock_irqsave(&port->lock, flags); in pl011_set_termios()
2089 * Update the per-port timeout. in pl011_set_termios()
2091 uart_update_timeout(port, termios->c_cflag, baud); in pl011_set_termios()
2094 * Calculate the approximated time it takes to transmit one character in pl011_set_termios()
2098 uap->rs485_tx_drain_interval = (bits * 1000 * 1000) / baud; in pl011_set_termios()
2102 if (UART_ENABLE_MS(port, termios->c_cflag)) in pl011_set_termios()
2105 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_set_termios()
2106 termios->c_cflag &= ~CRTSCTS; in pl011_set_termios()
2112 if (termios->c_cflag & CRTSCTS) { in pl011_set_termios()
2117 port->status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS; in pl011_set_termios()
2120 port->status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS); in pl011_set_termios()
2123 if (uap->vendor->oversampling) { in pl011_set_termios()
2124 if (baud > port->uartclk / 16) in pl011_set_termios()
2133 * to avoid delayed sampling of start bit at high speeds, in pl011_set_termios()
2136 if (uap->vendor->oversampling) { in pl011_set_termios()
2138 quot -= 1; in pl011_set_termios()
2140 quot -= 2; in pl011_set_termios()
2147 * ----------v----------v----------v----------v----- in pl011_set_termios()
2150 * ----------^----------^----------^----------^----- in pl011_set_termios()
2155 spin_unlock_irqrestore(&port->lock, flags); in pl011_set_termios()
2166 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud); in sbsa_uart_set_termios()
2169 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD); in sbsa_uart_set_termios()
2170 termios->c_cflag &= ~(CMSPAR | CRTSCTS); in sbsa_uart_set_termios()
2171 termios->c_cflag |= CS8 | CLOCAL; in sbsa_uart_set_termios()
2173 spin_lock_irqsave(&port->lock, flags); in sbsa_uart_set_termios()
2174 uart_update_timeout(port, CS8, uap->fixed_baud); in sbsa_uart_set_termios()
2176 spin_unlock_irqrestore(&port->lock, flags); in sbsa_uart_set_termios()
2183 return uap->port.type == PORT_AMBA ? uap->type : NULL; in pl011_type()
2191 release_mem_region(port->mapbase, SZ_4K); in pl011_release_port()
2199 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011") in pl011_request_port()
2200 != NULL ? 0 : -EBUSY; in pl011_request_port()
2209 port->type = PORT_AMBA; in pl011_config_port()
2220 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA) in pl011_verify_port()
2221 ret = -EINVAL; in pl011_verify_port()
2222 if (ser->irq < 0 || ser->irq >= nr_irqs) in pl011_verify_port()
2223 ret = -EINVAL; in pl011_verify_port()
2224 if (ser->baud_base < 9600) in pl011_verify_port()
2225 ret = -EINVAL; in pl011_verify_port()
2230 struct serial_rs485 *rs485) in pl011_rs485_config() argument
2236 if (!(rs485->flags & SER_RS485_RTS_ON_SEND) == in pl011_rs485_config()
2237 !(rs485->flags & SER_RS485_RTS_AFTER_SEND)) { in pl011_rs485_config()
2238 rs485->flags |= SER_RS485_RTS_ON_SEND; in pl011_rs485_config()
2239 rs485->flags &= ~SER_RS485_RTS_AFTER_SEND; in pl011_rs485_config()
2242 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U); in pl011_rs485_config()
2243 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U); in pl011_rs485_config()
2244 memset(rs485->padding, 0, sizeof(rs485->padding)); in pl011_rs485_config()
2246 if (port->rs485.flags & SER_RS485_ENABLED) in pl011_rs485_config()
2250 port->rs485 = *rs485; in pl011_rs485_config()
2253 if (port->rs485.flags & SER_RS485_ENABLED) { in pl011_rs485_config()
2258 port->status &= ~UPSTAT_AUTORTS; in pl011_rs485_config()
2337 struct uart_amba_port *uap = amba_ports[co->index]; in pl011_console_write()
2342 clk_enable(uap->clk); in pl011_console_write()
2345 if (uap->port.sysrq) in pl011_console_write()
2348 locked = spin_trylock(&uap->port.lock); in pl011_console_write()
2350 spin_lock(&uap->port.lock); in pl011_console_write()
2355 if (!uap->vendor->always_enabled) { in pl011_console_write()
2362 uart_console_write(&uap->port, s, count, pl011_console_putchar); in pl011_console_write()
2369 while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr) in pl011_console_write()
2370 & uap->vendor->fr_busy) in pl011_console_write()
2372 if (!uap->vendor->always_enabled) in pl011_console_write()
2376 spin_unlock(&uap->port.lock); in pl011_console_write()
2379 clk_disable(uap->clk); in pl011_console_write()
2406 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd); in pl011_console_get_options()
2408 if (uap->vendor->oversampling) { in pl011_console_get_options()
2430 if (co->index >= UART_NR) in pl011_console_setup()
2431 co->index = 0; in pl011_console_setup()
2432 uap = amba_ports[co->index]; in pl011_console_setup()
2434 return -ENODEV; in pl011_console_setup()
2437 pinctrl_pm_select_default_state(uap->port.dev); in pl011_console_setup()
2439 ret = clk_prepare(uap->clk); in pl011_console_setup()
2443 if (dev_get_platdata(uap->port.dev)) { in pl011_console_setup()
2446 plat = dev_get_platdata(uap->port.dev); in pl011_console_setup()
2447 if (plat->init) in pl011_console_setup()
2448 plat->init(); in pl011_console_setup()
2451 uap->port.uartclk = clk_get_rate(uap->clk); in pl011_console_setup()
2453 if (uap->vendor->fixed_options) { in pl011_console_setup()
2454 baud = uap->fixed_baud; in pl011_console_setup()
2463 return uart_set_options(&uap->port, co, baud, parity, bits, flow); in pl011_console_setup()
2467 * pl011_console_match - non-standard console matching
2476 * This form is used to register an initial earlycon boot console and
2477 * replace it with the amba_console at pl011 driver init.
2482 * Returns 0 if console matches; otherwise non-zero to use default matching
2498 return -ENODEV; in pl011_console_match()
2501 return -ENODEV; in pl011_console_match()
2504 return -ENODEV; in pl011_console_match()
2513 port = &amba_ports[i]->port; in pl011_console_match()
2515 if (port->mapbase != addr) in pl011_console_match()
2518 co->index = i; in pl011_console_match()
2519 port->cons = co; in pl011_console_match()
2523 return -ENODEV; in pl011_console_match()
2534 .index = -1,
2542 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) in qdf2400_e44_putc()
2544 writel(c, port->membase + UART01x_DR); in qdf2400_e44_putc()
2545 while (!(readl(port->membase + UART01x_FR) & UART011_FR_TXFE)) in qdf2400_e44_putc()
2551 struct earlycon_device *dev = con->data; in qdf2400_e44_early_write()
2553 uart_console_write(&dev->port, s, n, qdf2400_e44_putc); in qdf2400_e44_early_write()
2558 while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF) in pl011_putc()
2560 if (port->iotype == UPIO_MEM32) in pl011_putc()
2561 writel(c, port->membase + UART01x_DR); in pl011_putc()
2563 writeb(c, port->membase + UART01x_DR); in pl011_putc()
2564 while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY) in pl011_putc()
2570 struct earlycon_device *dev = con->data; in pl011_early_write()
2572 uart_console_write(&dev->port, s, n, pl011_putc); in pl011_early_write()
2578 if (readl(port->membase + UART01x_FR) & UART01x_FR_RXFE) in pl011_getc()
2581 if (port->iotype == UPIO_MEM32) in pl011_getc()
2582 return readl(port->membase + UART01x_DR); in pl011_getc()
2584 return readb(port->membase + UART01x_DR); in pl011_getc()
2589 struct earlycon_device *dev = con->data; in pl011_early_read()
2593 ch = pl011_getc(&dev->port); in pl011_early_read()
2607 * On non-ACPI systems, earlycon is enabled by specifying
2610 * On ACPI ARM64 systems, an "early" console is enabled via the SPCR table,
2621 if (!device->port.membase) in pl011_early_console_setup()
2622 return -ENODEV; in pl011_early_console_setup()
2624 device->con->write = pl011_early_write; in pl011_early_console_setup()
2625 device->con->read = pl011_early_read; in pl011_early_console_setup()
2630 OF_EARLYCON_DECLARE(pl011, "arm,sbsa-uart", pl011_early_console_setup);
2634 * Erratum 44, traditional earlycon can be enabled by specifying
2638 * will be enabled with the information from the SPCR table. In this
2639 * case, the SPCR code will detect the need for the E44 work-around,
2646 if (!device->port.membase) in qdf2400_e44_early_console_setup()
2647 return -ENODEV; in qdf2400_e44_early_console_setup()
2649 device->con->write = qdf2400_e44_early_write; in qdf2400_e44_early_console_setup()
2678 np = dev->of_node; in pl011_probe_dt_alias()
2695 …dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeratio… in pl011_probe_dt_alias()
2725 return -EBUSY; in pl011_find_free_port()
2730 struct uart_port *port = &uap->port; in pl011_get_rs485_mode()
2731 struct serial_rs485 *rs485 = &port->rs485; in pl011_get_rs485_mode() local
2739 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U); in pl011_get_rs485_mode()
2740 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U); in pl011_get_rs485_mode()
2757 uap->old_cr = 0; in pl011_setup_port()
2758 uap->port.dev = dev; in pl011_setup_port()
2759 uap->port.mapbase = mmiobase->start; in pl011_setup_port()
2760 uap->port.membase = base; in pl011_setup_port()
2761 uap->port.fifosize = uap->fifosize; in pl011_setup_port()
2762 uap->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_AMBA_PL011_CONSOLE); in pl011_setup_port()
2763 uap->port.flags = UPF_BOOT_AUTOCONF; in pl011_setup_port()
2764 uap->port.line = index; in pl011_setup_port()
2786 dev_err(uap->port.dev, in pl011_register_port()
2787 "Failed to register AMBA-PL011 driver\n"); in pl011_register_port()
2795 ret = uart_add_one_port(&amba_reg, &uap->port); in pl011_register_port()
2805 struct vendor_data *vendor = id->data; in pl011_probe()
2812 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port), in pl011_probe()
2815 return -ENOMEM; in pl011_probe()
2817 uap->clk = devm_clk_get(&dev->dev, NULL); in pl011_probe()
2818 if (IS_ERR(uap->clk)) in pl011_probe()
2819 return PTR_ERR(uap->clk); in pl011_probe()
2821 uap->reg_offset = vendor->reg_offset; in pl011_probe()
2822 uap->vendor = vendor; in pl011_probe()
2823 uap->fifosize = vendor->get_fifosize(dev); in pl011_probe()
2824 uap->port.iotype = vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; in pl011_probe()
2825 uap->port.irq = dev->irq[0]; in pl011_probe()
2826 uap->port.ops = &amba_pl011_pops; in pl011_probe()
2827 uap->port.rs485_config = pl011_rs485_config; in pl011_probe()
2828 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); in pl011_probe()
2830 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr); in pl011_probe()
2843 uart_remove_one_port(&amba_reg, &uap->port); in pl011_remove()
2853 return -EINVAL; in pl011_suspend()
2855 return uart_suspend_port(&amba_reg, &uap->port); in pl011_suspend()
2863 return -EINVAL; in pl011_resume()
2865 return uart_resume_port(&amba_reg, &uap->port); in pl011_resume()
2882 if (pdev->dev.of_node) { in sbsa_uart_probe()
2883 struct device_node *np = pdev->dev.of_node; in sbsa_uart_probe()
2885 ret = of_property_read_u32(np, "current-speed", &baudrate); in sbsa_uart_probe()
2896 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port), in sbsa_uart_probe()
2899 return -ENOMEM; in sbsa_uart_probe()
2904 uap->port.irq = ret; in sbsa_uart_probe()
2908 dev_info(&pdev->dev, "working around QDF2400 SoC erratum 44\n"); in sbsa_uart_probe()
2909 uap->vendor = &vendor_qdt_qdf2400_e44; in sbsa_uart_probe()
2912 uap->vendor = &vendor_sbsa; in sbsa_uart_probe()
2914 uap->reg_offset = uap->vendor->reg_offset; in sbsa_uart_probe()
2915 uap->fifosize = 32; in sbsa_uart_probe()
2916 uap->port.iotype = uap->vendor->access_32b ? UPIO_MEM32 : UPIO_MEM; in sbsa_uart_probe()
2917 uap->port.ops = &sbsa_uart_pops; in sbsa_uart_probe()
2918 uap->fixed_baud = baudrate; in sbsa_uart_probe()
2920 snprintf(uap->type, sizeof(uap->type), "SBSA"); in sbsa_uart_probe()
2924 ret = pl011_setup_port(&pdev->dev, uap, r, portnr); in sbsa_uart_probe()
2937 uart_remove_one_port(&amba_reg, &uap->port); in sbsa_uart_remove()
2943 { .compatible = "arm,sbsa-uart", },
2958 .name = "sbsa-uart",
2989 .name = "uart-pl011",