1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * 8250_dma.c - DMA Engine API support for 8250.c
4 *
5 * Copyright (C) 2013 Intel Corporation
6 */
7 #include <linux/tty.h>
8 #include <linux/tty_flip.h>
9 #include <linux/serial_reg.h>
10 #include <linux/dma-mapping.h>
11
12 #include "8250.h"
13
__dma_tx_complete(void * param)14 static void __dma_tx_complete(void *param)
15 {
16 struct uart_8250_port *p = param;
17 struct uart_8250_dma *dma = p->dma;
18 struct circ_buf *xmit = &p->port.state->xmit;
19 unsigned long flags;
20 int ret;
21
22 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
23 UART_XMIT_SIZE, DMA_TO_DEVICE);
24
25 spin_lock_irqsave(&p->port.lock, flags);
26
27 dma->tx_running = 0;
28
29 uart_xmit_advance(&p->port, dma->tx_size);
30
31 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
32 uart_write_wakeup(&p->port);
33
34 ret = serial8250_tx_dma(p);
35 if (ret || !dma->tx_running)
36 serial8250_set_THRI(p);
37
38 spin_unlock_irqrestore(&p->port.lock, flags);
39 }
40
__dma_rx_complete(void * param)41 static void __dma_rx_complete(void *param)
42 {
43 struct uart_8250_port *p = param;
44 struct uart_8250_dma *dma = p->dma;
45 struct tty_port *tty_port = &p->port.state->port;
46 struct dma_tx_state state;
47 int count;
48
49 dma->rx_running = 0;
50 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
51
52 count = dma->rx_size - state.residue;
53
54 tty_insert_flip_string(tty_port, dma->rx_buf, count);
55 p->port.icount.rx += count;
56
57 tty_flip_buffer_push(tty_port);
58 }
59
serial8250_tx_dma(struct uart_8250_port * p)60 int serial8250_tx_dma(struct uart_8250_port *p)
61 {
62 struct uart_8250_dma *dma = p->dma;
63 struct circ_buf *xmit = &p->port.state->xmit;
64 struct dma_async_tx_descriptor *desc;
65 struct uart_port *up = &p->port;
66 int ret;
67
68 if (dma->tx_running) {
69 if (up->x_char) {
70 dmaengine_pause(dma->txchan);
71 uart_xchar_out(up, UART_TX);
72 dmaengine_resume(dma->txchan);
73 }
74 return 0;
75 } else if (up->x_char) {
76 uart_xchar_out(up, UART_TX);
77 }
78
79 if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) {
80 /* We have been called from __dma_tx_complete() */
81 return 0;
82 }
83
84 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
85
86 serial8250_do_prepare_tx_dma(p);
87
88 desc = dmaengine_prep_slave_single(dma->txchan,
89 dma->tx_addr + xmit->tail,
90 dma->tx_size, DMA_MEM_TO_DEV,
91 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
92 if (!desc) {
93 ret = -EBUSY;
94 goto err;
95 }
96
97 dma->tx_running = 1;
98 desc->callback = __dma_tx_complete;
99 desc->callback_param = p;
100
101 dma->tx_cookie = dmaengine_submit(desc);
102
103 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
104 UART_XMIT_SIZE, DMA_TO_DEVICE);
105
106 dma_async_issue_pending(dma->txchan);
107 serial8250_clear_THRI(p);
108 dma->tx_err = 0;
109
110 return 0;
111 err:
112 dma->tx_err = 1;
113 return ret;
114 }
115
serial8250_rx_dma(struct uart_8250_port * p)116 int serial8250_rx_dma(struct uart_8250_port *p)
117 {
118 struct uart_8250_dma *dma = p->dma;
119 struct dma_async_tx_descriptor *desc;
120
121 if (dma->rx_running)
122 return 0;
123
124 serial8250_do_prepare_rx_dma(p);
125
126 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
127 dma->rx_size, DMA_DEV_TO_MEM,
128 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
129 if (!desc)
130 return -EBUSY;
131
132 dma->rx_running = 1;
133 desc->callback = __dma_rx_complete;
134 desc->callback_param = p;
135
136 dma->rx_cookie = dmaengine_submit(desc);
137
138 dma_async_issue_pending(dma->rxchan);
139
140 return 0;
141 }
142
serial8250_rx_dma_flush(struct uart_8250_port * p)143 void serial8250_rx_dma_flush(struct uart_8250_port *p)
144 {
145 struct uart_8250_dma *dma = p->dma;
146
147 if (dma->rx_running) {
148 dmaengine_pause(dma->rxchan);
149 __dma_rx_complete(p);
150 dmaengine_terminate_async(dma->rxchan);
151 }
152 }
153 EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
154
serial8250_request_dma(struct uart_8250_port * p)155 int serial8250_request_dma(struct uart_8250_port *p)
156 {
157 struct uart_8250_dma *dma = p->dma;
158 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
159 dma->rx_dma_addr : p->port.mapbase;
160 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
161 dma->tx_dma_addr : p->port.mapbase;
162 dma_cap_mask_t mask;
163 struct dma_slave_caps caps;
164 int ret;
165
166 /* Default slave configuration parameters */
167 dma->rxconf.direction = DMA_DEV_TO_MEM;
168 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
169 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
170
171 dma->txconf.direction = DMA_MEM_TO_DEV;
172 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
173 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
174
175 dma_cap_zero(mask);
176 dma_cap_set(DMA_SLAVE, mask);
177
178 /* Get a channel for RX */
179 dma->rxchan = dma_request_slave_channel_compat(mask,
180 dma->fn, dma->rx_param,
181 p->port.dev, "rx");
182 if (!dma->rxchan)
183 return -ENODEV;
184
185 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
186 ret = dma_get_slave_caps(dma->rxchan, &caps);
187 if (ret)
188 goto release_rx;
189 if (!caps.cmd_pause || !caps.cmd_terminate ||
190 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
191 ret = -EINVAL;
192 goto release_rx;
193 }
194
195 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
196
197 /* Get a channel for TX */
198 dma->txchan = dma_request_slave_channel_compat(mask,
199 dma->fn, dma->tx_param,
200 p->port.dev, "tx");
201 if (!dma->txchan) {
202 ret = -ENODEV;
203 goto release_rx;
204 }
205
206 /* 8250 tx dma requires dmaengine driver to support terminate */
207 ret = dma_get_slave_caps(dma->txchan, &caps);
208 if (ret)
209 goto err;
210 if (!caps.cmd_terminate) {
211 ret = -EINVAL;
212 goto err;
213 }
214
215 dmaengine_slave_config(dma->txchan, &dma->txconf);
216
217 /* RX buffer */
218 if (!dma->rx_size)
219 dma->rx_size = PAGE_SIZE;
220
221 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
222 &dma->rx_addr, GFP_KERNEL);
223 if (!dma->rx_buf) {
224 ret = -ENOMEM;
225 goto err;
226 }
227
228 /* TX buffer */
229 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
230 p->port.state->xmit.buf,
231 UART_XMIT_SIZE,
232 DMA_TO_DEVICE);
233 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
234 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
235 dma->rx_buf, dma->rx_addr);
236 ret = -ENOMEM;
237 goto err;
238 }
239
240 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
241
242 return 0;
243 err:
244 dma_release_channel(dma->txchan);
245 release_rx:
246 dma_release_channel(dma->rxchan);
247 return ret;
248 }
249 EXPORT_SYMBOL_GPL(serial8250_request_dma);
250
serial8250_release_dma(struct uart_8250_port * p)251 void serial8250_release_dma(struct uart_8250_port *p)
252 {
253 struct uart_8250_dma *dma = p->dma;
254
255 if (!dma)
256 return;
257
258 /* Release RX resources */
259 dmaengine_terminate_sync(dma->rxchan);
260 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
261 dma->rx_addr);
262 dma_release_channel(dma->rxchan);
263 dma->rxchan = NULL;
264
265 /* Release TX resources */
266 dmaengine_terminate_sync(dma->txchan);
267 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
268 UART_XMIT_SIZE, DMA_TO_DEVICE);
269 dma_release_channel(dma->txchan);
270 dma->txchan = NULL;
271 dma->tx_running = 0;
272
273 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
274 }
275 EXPORT_SYMBOL_GPL(serial8250_release_dma);
276