1 /*
2  * S3C24XX DMA handling
3  *
4  * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
5  *
6  * based on amba-pl08x.c
7  *
8  * Copyright (c) 2006 ARM Ltd.
9  * Copyright (c) 2010 ST-Ericsson SA
10  *
11  * Author: Peter Pearse <peter.pearse@arm.com>
12  * Author: Linus Walleij <linus.walleij@stericsson.com>
13  *
14  * This program is free software; you can redistribute it and/or modify it
15  * under the terms of the GNU General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option)
17  * any later version.
18  *
19  * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals
20  * that can be routed to any of the 4 to 8 hardware-channels.
21  *
22  * Therefore on these DMA controllers the number of channels
23  * and the number of incoming DMA signals are two totally different things.
24  * It is usually not possible to theoretically handle all physical signals,
25  * so a multiplexing scheme with possible denial of use is necessary.
26  *
27  * Open items:
28  * - bursts
29  */
30 
31 #include <linux/platform_device.h>
32 #include <linux/types.h>
33 #include <linux/dmaengine.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/interrupt.h>
36 #include <linux/clk.h>
37 #include <linux/module.h>
38 #include <linux/mod_devicetable.h>
39 #include <linux/slab.h>
40 #include <linux/platform_data/dma-s3c24xx.h>
41 
42 #include "dmaengine.h"
43 #include "virt-dma.h"
44 
45 #define MAX_DMA_CHANNELS	8
46 
47 #define S3C24XX_DISRC			0x00
48 #define S3C24XX_DISRCC			0x04
49 #define S3C24XX_DISRCC_INC_INCREMENT	0
50 #define S3C24XX_DISRCC_INC_FIXED	BIT(0)
51 #define S3C24XX_DISRCC_LOC_AHB		0
52 #define S3C24XX_DISRCC_LOC_APB		BIT(1)
53 
54 #define S3C24XX_DIDST			0x08
55 #define S3C24XX_DIDSTC			0x0c
56 #define S3C24XX_DIDSTC_INC_INCREMENT	0
57 #define S3C24XX_DIDSTC_INC_FIXED	BIT(0)
58 #define S3C24XX_DIDSTC_LOC_AHB		0
59 #define S3C24XX_DIDSTC_LOC_APB		BIT(1)
60 #define S3C24XX_DIDSTC_INT_TC0		0
61 #define S3C24XX_DIDSTC_INT_RELOAD	BIT(2)
62 
63 #define S3C24XX_DCON			0x10
64 
65 #define S3C24XX_DCON_TC_MASK		0xfffff
66 #define S3C24XX_DCON_DSZ_BYTE		(0 << 20)
67 #define S3C24XX_DCON_DSZ_HALFWORD	(1 << 20)
68 #define S3C24XX_DCON_DSZ_WORD		(2 << 20)
69 #define S3C24XX_DCON_DSZ_MASK		(3 << 20)
70 #define S3C24XX_DCON_DSZ_SHIFT		20
71 #define S3C24XX_DCON_AUTORELOAD		0
72 #define S3C24XX_DCON_NORELOAD		BIT(22)
73 #define S3C24XX_DCON_HWTRIG		BIT(23)
74 #define S3C24XX_DCON_HWSRC_SHIFT	24
75 #define S3C24XX_DCON_SERV_SINGLE	0
76 #define S3C24XX_DCON_SERV_WHOLE		BIT(27)
77 #define S3C24XX_DCON_TSZ_UNIT		0
78 #define S3C24XX_DCON_TSZ_BURST4		BIT(28)
79 #define S3C24XX_DCON_INT		BIT(29)
80 #define S3C24XX_DCON_SYNC_PCLK		0
81 #define S3C24XX_DCON_SYNC_HCLK		BIT(30)
82 #define S3C24XX_DCON_DEMAND		0
83 #define S3C24XX_DCON_HANDSHAKE		BIT(31)
84 
85 #define S3C24XX_DSTAT			0x14
86 #define S3C24XX_DSTAT_STAT_BUSY		BIT(20)
87 #define S3C24XX_DSTAT_CURRTC_MASK	0xfffff
88 
89 #define S3C24XX_DMASKTRIG		0x20
90 #define S3C24XX_DMASKTRIG_SWTRIG	BIT(0)
91 #define S3C24XX_DMASKTRIG_ON		BIT(1)
92 #define S3C24XX_DMASKTRIG_STOP		BIT(2)
93 
94 #define S3C24XX_DMAREQSEL		0x24
95 #define S3C24XX_DMAREQSEL_HW		BIT(0)
96 
97 /*
98  * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel
99  * for a DMA source. Instead only specific channels are valid.
100  * All of these SoCs have 4 physical channels and the number of request
101  * source bits is 3. Additionally we also need 1 bit to mark the channel
102  * as valid.
103  * Therefore we separate the chansel element of the channel data into 4
104  * parts of 4 bits each, to hold the information if the channel is valid
105  * and the hw request source to use.
106  *
107  * Example:
108  * SDI is valid on channels 0, 2 and 3 - with varying hw request sources.
109  * For it the chansel field would look like
110  *
111  * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1
112  * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2
113  * ((BIT(3) | 2) << 0 * 4)   // channel 0, with request source 2
114  */
115 #define S3C24XX_CHANSEL_WIDTH		4
116 #define S3C24XX_CHANSEL_VALID		BIT(3)
117 #define S3C24XX_CHANSEL_REQ_MASK	7
118 
119 /*
120  * struct soc_data - vendor-specific config parameters for individual SoCs
121  * @stride: spacing between the registers of each channel
122  * @has_reqsel: does the controller use the newer requestselection mechanism
123  * @has_clocks: are controllable dma-clocks present
124  */
125 struct soc_data {
126 	int stride;
127 	bool has_reqsel;
128 	bool has_clocks;
129 };
130 
131 /*
132  * enum s3c24xx_dma_chan_state - holds the virtual channel states
133  * @S3C24XX_DMA_CHAN_IDLE: the channel is idle
134  * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport
135  * channel and is running a transfer on it
136  * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport
137  * channel to become available (only pertains to memcpy channels)
138  */
139 enum s3c24xx_dma_chan_state {
140 	S3C24XX_DMA_CHAN_IDLE,
141 	S3C24XX_DMA_CHAN_RUNNING,
142 	S3C24XX_DMA_CHAN_WAITING,
143 };
144 
145 /*
146  * struct s3c24xx_sg - structure containing data per sg
147  * @src_addr: src address of sg
148  * @dst_addr: dst address of sg
149  * @len: transfer len in bytes
150  * @node: node for txd's dsg_list
151  */
152 struct s3c24xx_sg {
153 	dma_addr_t src_addr;
154 	dma_addr_t dst_addr;
155 	size_t len;
156 	struct list_head node;
157 };
158 
159 /*
160  * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor
161  * @vd: virtual DMA descriptor
162  * @dsg_list: list of children sg's
163  * @at: sg currently being transfered
164  * @width: transfer width
165  * @disrcc: value for source control register
166  * @didstc: value for destination control register
167  * @dcon: base value for dcon register
168  * @cyclic: indicate cyclic transfer
169  */
170 struct s3c24xx_txd {
171 	struct virt_dma_desc vd;
172 	struct list_head dsg_list;
173 	struct list_head *at;
174 	u8 width;
175 	u32 disrcc;
176 	u32 didstc;
177 	u32 dcon;
178 	bool cyclic;
179 };
180 
181 struct s3c24xx_dma_chan;
182 
183 /*
184  * struct s3c24xx_dma_phy - holder for the physical channels
185  * @id: physical index to this channel
186  * @valid: does the channel have all required elements
187  * @base: virtual memory base (remapped) for the this channel
188  * @irq: interrupt for this channel
189  * @clk: clock for this channel
190  * @lock: a lock to use when altering an instance of this struct
191  * @serving: virtual channel currently being served by this physicalchannel
192  * @host: a pointer to the host (internal use)
193  */
194 struct s3c24xx_dma_phy {
195 	unsigned int			id;
196 	bool				valid;
197 	void __iomem			*base;
198 	int				irq;
199 	struct clk			*clk;
200 	spinlock_t			lock;
201 	struct s3c24xx_dma_chan		*serving;
202 	struct s3c24xx_dma_engine	*host;
203 };
204 
205 /*
206  * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel
207  * @id: the id of the channel
208  * @name: name of the channel
209  * @vc: wrappped virtual channel
210  * @phy: the physical channel utilized by this channel, if there is one
211  * @runtime_addr: address for RX/TX according to the runtime config
212  * @at: active transaction on this channel
213  * @lock: a lock for this channel data
214  * @host: a pointer to the host (internal use)
215  * @state: whether the channel is idle, running etc
216  * @slave: whether this channel is a device (slave) or for memcpy
217  */
218 struct s3c24xx_dma_chan {
219 	int id;
220 	const char *name;
221 	struct virt_dma_chan vc;
222 	struct s3c24xx_dma_phy *phy;
223 	struct dma_slave_config cfg;
224 	struct s3c24xx_txd *at;
225 	struct s3c24xx_dma_engine *host;
226 	enum s3c24xx_dma_chan_state state;
227 	bool slave;
228 };
229 
230 /*
231  * struct s3c24xx_dma_engine - the local state holder for the S3C24XX
232  * @pdev: the corresponding platform device
233  * @pdata: platform data passed in from the platform/machine
234  * @base: virtual memory base (remapped)
235  * @slave: slave engine for this instance
236  * @memcpy: memcpy engine for this instance
237  * @phy_chans: array of data for the physical channels
238  */
239 struct s3c24xx_dma_engine {
240 	struct platform_device			*pdev;
241 	const struct s3c24xx_dma_platdata	*pdata;
242 	struct soc_data				*sdata;
243 	void __iomem				*base;
244 	struct dma_device			slave;
245 	struct dma_device			memcpy;
246 	struct s3c24xx_dma_phy			*phy_chans;
247 };
248 
249 /*
250  * Physical channel handling
251  */
252 
253 /*
254  * Check whether a certain channel is busy or not.
255  */
s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy * phy)256 static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy)
257 {
258 	unsigned int val = readl(phy->base + S3C24XX_DSTAT);
259 	return val & S3C24XX_DSTAT_STAT_BUSY;
260 }
261 
s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan * s3cchan,struct s3c24xx_dma_phy * phy)262 static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan,
263 				  struct s3c24xx_dma_phy *phy)
264 {
265 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
266 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
267 	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
268 	int phyvalid;
269 
270 	/* every phy is valid for memcopy channels */
271 	if (!s3cchan->slave)
272 		return true;
273 
274 	/* On newer variants all phys can be used for all virtual channels */
275 	if (s3cdma->sdata->has_reqsel)
276 		return true;
277 
278 	phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH));
279 	return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false;
280 }
281 
282 /*
283  * Allocate a physical channel for a virtual channel
284  *
285  * Try to locate a physical channel to be used for this transfer. If all
286  * are taken return NULL and the requester will have to cope by using
287  * some fallback PIO mode or retrying later.
288  */
289 static
s3c24xx_dma_get_phy(struct s3c24xx_dma_chan * s3cchan)290 struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan)
291 {
292 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
293 	struct s3c24xx_dma_phy *phy = NULL;
294 	unsigned long flags;
295 	int i;
296 	int ret;
297 
298 	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
299 		phy = &s3cdma->phy_chans[i];
300 
301 		if (!phy->valid)
302 			continue;
303 
304 		if (!s3c24xx_dma_phy_valid(s3cchan, phy))
305 			continue;
306 
307 		spin_lock_irqsave(&phy->lock, flags);
308 
309 		if (!phy->serving) {
310 			phy->serving = s3cchan;
311 			spin_unlock_irqrestore(&phy->lock, flags);
312 			break;
313 		}
314 
315 		spin_unlock_irqrestore(&phy->lock, flags);
316 	}
317 
318 	/* No physical channel available, cope with it */
319 	if (i == s3cdma->pdata->num_phy_channels) {
320 		dev_warn(&s3cdma->pdev->dev, "no phy channel available\n");
321 		return NULL;
322 	}
323 
324 	/* start the phy clock */
325 	if (s3cdma->sdata->has_clocks) {
326 		ret = clk_enable(phy->clk);
327 		if (ret) {
328 			dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n",
329 				phy->id, ret);
330 			phy->serving = NULL;
331 			return NULL;
332 		}
333 	}
334 
335 	return phy;
336 }
337 
338 /*
339  * Mark the physical channel as free.
340  *
341  * This drops the link between the physical and virtual channel.
342  */
s3c24xx_dma_put_phy(struct s3c24xx_dma_phy * phy)343 static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy)
344 {
345 	struct s3c24xx_dma_engine *s3cdma = phy->host;
346 
347 	if (s3cdma->sdata->has_clocks)
348 		clk_disable(phy->clk);
349 
350 	phy->serving = NULL;
351 }
352 
353 /*
354  * Stops the channel by writing the stop bit.
355  * This should not be used for an on-going transfer, but as a method of
356  * shutting down a channel (eg, when it's no longer used) or terminating a
357  * transfer.
358  */
s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy * phy)359 static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy)
360 {
361 	writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG);
362 }
363 
364 /*
365  * Virtual channel handling
366  */
367 
368 static inline
to_s3c24xx_dma_chan(struct dma_chan * chan)369 struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan)
370 {
371 	return container_of(chan, struct s3c24xx_dma_chan, vc.chan);
372 }
373 
s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan * s3cchan)374 static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan)
375 {
376 	struct s3c24xx_dma_phy *phy = s3cchan->phy;
377 	struct s3c24xx_txd *txd = s3cchan->at;
378 	u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK;
379 
380 	return tc * txd->width;
381 }
382 
s3c24xx_dma_set_runtime_config(struct dma_chan * chan,struct dma_slave_config * config)383 static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan,
384 				  struct dma_slave_config *config)
385 {
386 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
387 	unsigned long flags;
388 	int ret = 0;
389 
390 	/* Reject definitely invalid configurations */
391 	if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
392 	    config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
393 		return -EINVAL;
394 
395 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
396 
397 	if (!s3cchan->slave) {
398 		ret = -EINVAL;
399 		goto out;
400 	}
401 
402 	s3cchan->cfg = *config;
403 
404 out:
405 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
406 	return ret;
407 }
408 
409 /*
410  * Transfer handling
411  */
412 
413 static inline
to_s3c24xx_txd(struct dma_async_tx_descriptor * tx)414 struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx)
415 {
416 	return container_of(tx, struct s3c24xx_txd, vd.tx);
417 }
418 
s3c24xx_dma_get_txd(void)419 static struct s3c24xx_txd *s3c24xx_dma_get_txd(void)
420 {
421 	struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
422 
423 	if (txd) {
424 		INIT_LIST_HEAD(&txd->dsg_list);
425 		txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD;
426 	}
427 
428 	return txd;
429 }
430 
s3c24xx_dma_free_txd(struct s3c24xx_txd * txd)431 static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd)
432 {
433 	struct s3c24xx_sg *dsg, *_dsg;
434 
435 	list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
436 		list_del(&dsg->node);
437 		kfree(dsg);
438 	}
439 
440 	kfree(txd);
441 }
442 
s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan * s3cchan,struct s3c24xx_txd * txd)443 static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan,
444 				       struct s3c24xx_txd *txd)
445 {
446 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
447 	struct s3c24xx_dma_phy *phy = s3cchan->phy;
448 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
449 	struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node);
450 	u32 dcon = txd->dcon;
451 	u32 val;
452 
453 	/* transfer-size and -count from len and width */
454 	switch (txd->width) {
455 	case 1:
456 		dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len;
457 		break;
458 	case 2:
459 		dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2);
460 		break;
461 	case 4:
462 		dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4);
463 		break;
464 	}
465 
466 	if (s3cchan->slave) {
467 		struct s3c24xx_dma_channel *cdata =
468 					&pdata->channels[s3cchan->id];
469 
470 		if (s3cdma->sdata->has_reqsel) {
471 			writel_relaxed((cdata->chansel << 1) |
472 							S3C24XX_DMAREQSEL_HW,
473 					phy->base + S3C24XX_DMAREQSEL);
474 		} else {
475 			int csel = cdata->chansel >> (phy->id *
476 							S3C24XX_CHANSEL_WIDTH);
477 
478 			csel &= S3C24XX_CHANSEL_REQ_MASK;
479 			dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT;
480 			dcon |= S3C24XX_DCON_HWTRIG;
481 		}
482 	} else {
483 		if (s3cdma->sdata->has_reqsel)
484 			writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL);
485 	}
486 
487 	writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC);
488 	writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC);
489 	writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST);
490 	writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC);
491 	writel_relaxed(dcon, phy->base + S3C24XX_DCON);
492 
493 	val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG);
494 	val &= ~S3C24XX_DMASKTRIG_STOP;
495 	val |= S3C24XX_DMASKTRIG_ON;
496 
497 	/* trigger the dma operation for memcpy transfers */
498 	if (!s3cchan->slave)
499 		val |= S3C24XX_DMASKTRIG_SWTRIG;
500 
501 	writel(val, phy->base + S3C24XX_DMASKTRIG);
502 }
503 
504 /*
505  * Set the initial DMA register values and start first sg.
506  */
s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan * s3cchan)507 static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
508 {
509 	struct s3c24xx_dma_phy *phy = s3cchan->phy;
510 	struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc);
511 	struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
512 
513 	list_del(&txd->vd.node);
514 
515 	s3cchan->at = txd;
516 
517 	/* Wait for channel inactive */
518 	while (s3c24xx_dma_phy_busy(phy))
519 		cpu_relax();
520 
521 	/* point to the first element of the sg list */
522 	txd->at = txd->dsg_list.next;
523 	s3c24xx_dma_start_next_sg(s3cchan, txd);
524 }
525 
s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine * s3cdma,struct s3c24xx_dma_chan * s3cchan)526 static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
527 				struct s3c24xx_dma_chan *s3cchan)
528 {
529 	LIST_HEAD(head);
530 
531 	vchan_get_all_descriptors(&s3cchan->vc, &head);
532 	vchan_dma_desc_free_list(&s3cchan->vc, &head);
533 }
534 
535 /*
536  * Try to allocate a physical channel.  When successful, assign it to
537  * this virtual channel, and initiate the next descriptor.  The
538  * virtual channel lock must be held at this point.
539  */
s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan * s3cchan)540 static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan)
541 {
542 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
543 	struct s3c24xx_dma_phy *phy;
544 
545 	phy = s3c24xx_dma_get_phy(s3cchan);
546 	if (!phy) {
547 		dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n",
548 			s3cchan->name);
549 		s3cchan->state = S3C24XX_DMA_CHAN_WAITING;
550 		return;
551 	}
552 
553 	dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n",
554 		phy->id, s3cchan->name);
555 
556 	s3cchan->phy = phy;
557 	s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
558 
559 	s3c24xx_dma_start_next_txd(s3cchan);
560 }
561 
s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy * phy,struct s3c24xx_dma_chan * s3cchan)562 static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy,
563 	struct s3c24xx_dma_chan *s3cchan)
564 {
565 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
566 
567 	dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n",
568 		phy->id, s3cchan->name);
569 
570 	/*
571 	 * We do this without taking the lock; we're really only concerned
572 	 * about whether this pointer is NULL or not, and we're guaranteed
573 	 * that this will only be called when it _already_ is non-NULL.
574 	 */
575 	phy->serving = s3cchan;
576 	s3cchan->phy = phy;
577 	s3cchan->state = S3C24XX_DMA_CHAN_RUNNING;
578 	s3c24xx_dma_start_next_txd(s3cchan);
579 }
580 
581 /*
582  * Free a physical DMA channel, potentially reallocating it to another
583  * virtual channel if we have any pending.
584  */
s3c24xx_dma_phy_free(struct s3c24xx_dma_chan * s3cchan)585 static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan)
586 {
587 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
588 	struct s3c24xx_dma_chan *p, *next;
589 
590 retry:
591 	next = NULL;
592 
593 	/* Find a waiting virtual channel for the next transfer. */
594 	list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node)
595 		if (p->state == S3C24XX_DMA_CHAN_WAITING) {
596 			next = p;
597 			break;
598 		}
599 
600 	if (!next) {
601 		list_for_each_entry(p, &s3cdma->slave.channels,
602 				    vc.chan.device_node)
603 			if (p->state == S3C24XX_DMA_CHAN_WAITING &&
604 				      s3c24xx_dma_phy_valid(p, s3cchan->phy)) {
605 				next = p;
606 				break;
607 			}
608 	}
609 
610 	/* Ensure that the physical channel is stopped */
611 	s3c24xx_dma_terminate_phy(s3cchan->phy);
612 
613 	if (next) {
614 		bool success;
615 
616 		/*
617 		 * Eww.  We know this isn't going to deadlock
618 		 * but lockdep probably doesn't.
619 		 */
620 		spin_lock(&next->vc.lock);
621 		/* Re-check the state now that we have the lock */
622 		success = next->state == S3C24XX_DMA_CHAN_WAITING;
623 		if (success)
624 			s3c24xx_dma_phy_reassign_start(s3cchan->phy, next);
625 		spin_unlock(&next->vc.lock);
626 
627 		/* If the state changed, try to find another channel */
628 		if (!success)
629 			goto retry;
630 	} else {
631 		/* No more jobs, so free up the physical channel */
632 		s3c24xx_dma_put_phy(s3cchan->phy);
633 	}
634 
635 	s3cchan->phy = NULL;
636 	s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
637 }
638 
s3c24xx_dma_desc_free(struct virt_dma_desc * vd)639 static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
640 {
641 	struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
642 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
643 
644 	if (!s3cchan->slave)
645 		dma_descriptor_unmap(&vd->tx);
646 
647 	s3c24xx_dma_free_txd(txd);
648 }
649 
s3c24xx_dma_irq(int irq,void * data)650 static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
651 {
652 	struct s3c24xx_dma_phy *phy = data;
653 	struct s3c24xx_dma_chan *s3cchan = phy->serving;
654 	struct s3c24xx_txd *txd;
655 
656 	dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id);
657 
658 	/*
659 	 * Interrupts happen to notify the completion of a transfer and the
660 	 * channel should have moved into its stop state already on its own.
661 	 * Therefore interrupts on channels not bound to a virtual channel
662 	 * should never happen. Nevertheless send a terminate command to the
663 	 * channel if the unlikely case happens.
664 	 */
665 	if (unlikely(!s3cchan)) {
666 		dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n",
667 			phy->id);
668 
669 		s3c24xx_dma_terminate_phy(phy);
670 
671 		return IRQ_HANDLED;
672 	}
673 
674 	spin_lock(&s3cchan->vc.lock);
675 	txd = s3cchan->at;
676 	if (txd) {
677 		/* when more sg's are in this txd, start the next one */
678 		if (!list_is_last(txd->at, &txd->dsg_list)) {
679 			txd->at = txd->at->next;
680 			if (txd->cyclic)
681 				vchan_cyclic_callback(&txd->vd);
682 			s3c24xx_dma_start_next_sg(s3cchan, txd);
683 		} else if (!txd->cyclic) {
684 			s3cchan->at = NULL;
685 			vchan_cookie_complete(&txd->vd);
686 
687 			/*
688 			 * And start the next descriptor (if any),
689 			 * otherwise free this channel.
690 			 */
691 			if (vchan_next_desc(&s3cchan->vc))
692 				s3c24xx_dma_start_next_txd(s3cchan);
693 			else
694 				s3c24xx_dma_phy_free(s3cchan);
695 		} else {
696 			vchan_cyclic_callback(&txd->vd);
697 
698 			/* Cyclic: reset at beginning */
699 			txd->at = txd->dsg_list.next;
700 			s3c24xx_dma_start_next_sg(s3cchan, txd);
701 		}
702 	}
703 	spin_unlock(&s3cchan->vc.lock);
704 
705 	return IRQ_HANDLED;
706 }
707 
708 /*
709  * The DMA ENGINE API
710  */
711 
s3c24xx_dma_terminate_all(struct dma_chan * chan)712 static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
713 {
714 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
715 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
716 	unsigned long flags;
717 	int ret = 0;
718 
719 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
720 
721 	if (!s3cchan->phy && !s3cchan->at) {
722 		dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n",
723 			s3cchan->id);
724 		ret = -EINVAL;
725 		goto unlock;
726 	}
727 
728 	s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
729 
730 	/* Mark physical channel as free */
731 	if (s3cchan->phy)
732 		s3c24xx_dma_phy_free(s3cchan);
733 
734 	/* Dequeue current job */
735 	if (s3cchan->at) {
736 		vchan_terminate_vdesc(&s3cchan->at->vd);
737 		s3cchan->at = NULL;
738 	}
739 
740 	/* Dequeue jobs not yet fired as well */
741 	s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
742 unlock:
743 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
744 
745 	return ret;
746 }
747 
s3c24xx_dma_synchronize(struct dma_chan * chan)748 static void s3c24xx_dma_synchronize(struct dma_chan *chan)
749 {
750 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
751 
752 	vchan_synchronize(&s3cchan->vc);
753 }
754 
s3c24xx_dma_free_chan_resources(struct dma_chan * chan)755 static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan)
756 {
757 	/* Ensure all queued descriptors are freed */
758 	vchan_free_chan_resources(to_virt_chan(chan));
759 }
760 
s3c24xx_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * txstate)761 static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
762 		dma_cookie_t cookie, struct dma_tx_state *txstate)
763 {
764 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
765 	struct s3c24xx_txd *txd;
766 	struct s3c24xx_sg *dsg;
767 	struct virt_dma_desc *vd;
768 	unsigned long flags;
769 	enum dma_status ret;
770 	size_t bytes = 0;
771 
772 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
773 	ret = dma_cookie_status(chan, cookie, txstate);
774 
775 	/*
776 	 * There's no point calculating the residue if there's
777 	 * no txstate to store the value.
778 	 */
779 	if (ret == DMA_COMPLETE || !txstate) {
780 		spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
781 		return ret;
782 	}
783 
784 	vd = vchan_find_desc(&s3cchan->vc, cookie);
785 	if (vd) {
786 		/* On the issued list, so hasn't been processed yet */
787 		txd = to_s3c24xx_txd(&vd->tx);
788 
789 		list_for_each_entry(dsg, &txd->dsg_list, node)
790 			bytes += dsg->len;
791 	} else {
792 		/*
793 		 * Currently running, so sum over the pending sg's and
794 		 * the currently active one.
795 		 */
796 		txd = s3cchan->at;
797 
798 		dsg = list_entry(txd->at, struct s3c24xx_sg, node);
799 		list_for_each_entry_from(dsg, &txd->dsg_list, node)
800 			bytes += dsg->len;
801 
802 		bytes += s3c24xx_dma_getbytes_chan(s3cchan);
803 	}
804 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
805 
806 	/*
807 	 * This cookie not complete yet
808 	 * Get number of bytes left in the active transactions and queue
809 	 */
810 	dma_set_residue(txstate, bytes);
811 
812 	/* Whether waiting or running, we're in progress */
813 	return ret;
814 }
815 
816 /*
817  * Initialize a descriptor to be used by memcpy submit
818  */
s3c24xx_dma_prep_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)819 static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
820 		struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
821 		size_t len, unsigned long flags)
822 {
823 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
824 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
825 	struct s3c24xx_txd *txd;
826 	struct s3c24xx_sg *dsg;
827 	int src_mod, dest_mod;
828 
829 	dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %zu bytes from %s\n",
830 			len, s3cchan->name);
831 
832 	if ((len & S3C24XX_DCON_TC_MASK) != len) {
833 		dev_err(&s3cdma->pdev->dev, "memcpy size %zu to large\n", len);
834 		return NULL;
835 	}
836 
837 	txd = s3c24xx_dma_get_txd();
838 	if (!txd)
839 		return NULL;
840 
841 	dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
842 	if (!dsg) {
843 		s3c24xx_dma_free_txd(txd);
844 		return NULL;
845 	}
846 	list_add_tail(&dsg->node, &txd->dsg_list);
847 
848 	dsg->src_addr = src;
849 	dsg->dst_addr = dest;
850 	dsg->len = len;
851 
852 	/*
853 	 * Determine a suitable transfer width.
854 	 * The DMA controller cannot fetch/store information which is not
855 	 * naturally aligned on the bus, i.e., a 4 byte fetch must start at
856 	 * an address divisible by 4 - more generally addr % width must be 0.
857 	 */
858 	src_mod = src % 4;
859 	dest_mod = dest % 4;
860 	switch (len % 4) {
861 	case 0:
862 		txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1;
863 		break;
864 	case 2:
865 		txd->width = ((src_mod == 2 || src_mod == 0) &&
866 			      (dest_mod == 2 || dest_mod == 0)) ? 2 : 1;
867 		break;
868 	default:
869 		txd->width = 1;
870 		break;
871 	}
872 
873 	txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT;
874 	txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT;
875 	txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK |
876 		     S3C24XX_DCON_SERV_WHOLE;
877 
878 	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
879 }
880 
s3c24xx_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t addr,size_t size,size_t period,enum dma_transfer_direction direction,unsigned long flags)881 static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
882 	struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
883 	enum dma_transfer_direction direction, unsigned long flags)
884 {
885 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
886 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
887 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
888 	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
889 	struct s3c24xx_txd *txd;
890 	struct s3c24xx_sg *dsg;
891 	unsigned sg_len;
892 	dma_addr_t slave_addr;
893 	u32 hwcfg = 0;
894 	int i;
895 
896 	dev_dbg(&s3cdma->pdev->dev,
897 		"prepare cyclic transaction of %zu bytes with period %zu from %s\n",
898 		size, period, s3cchan->name);
899 
900 	if (!is_slave_direction(direction)) {
901 		dev_err(&s3cdma->pdev->dev,
902 			"direction %d unsupported\n", direction);
903 		return NULL;
904 	}
905 
906 	txd = s3c24xx_dma_get_txd();
907 	if (!txd)
908 		return NULL;
909 
910 	txd->cyclic = 1;
911 
912 	if (cdata->handshake)
913 		txd->dcon |= S3C24XX_DCON_HANDSHAKE;
914 
915 	switch (cdata->bus) {
916 	case S3C24XX_DMA_APB:
917 		txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
918 		hwcfg |= S3C24XX_DISRCC_LOC_APB;
919 		break;
920 	case S3C24XX_DMA_AHB:
921 		txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
922 		hwcfg |= S3C24XX_DISRCC_LOC_AHB;
923 		break;
924 	}
925 
926 	/*
927 	 * Always assume our peripheral desintation is a fixed
928 	 * address in memory.
929 	 */
930 	hwcfg |= S3C24XX_DISRCC_INC_FIXED;
931 
932 	/*
933 	 * Individual dma operations are requested by the slave,
934 	 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
935 	 */
936 	txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
937 
938 	if (direction == DMA_MEM_TO_DEV) {
939 		txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
940 			      S3C24XX_DISRCC_INC_INCREMENT;
941 		txd->didstc = hwcfg;
942 		slave_addr = s3cchan->cfg.dst_addr;
943 		txd->width = s3cchan->cfg.dst_addr_width;
944 	} else {
945 		txd->disrcc = hwcfg;
946 		txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
947 			      S3C24XX_DIDSTC_INC_INCREMENT;
948 		slave_addr = s3cchan->cfg.src_addr;
949 		txd->width = s3cchan->cfg.src_addr_width;
950 	}
951 
952 	sg_len = size / period;
953 
954 	for (i = 0; i < sg_len; i++) {
955 		dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
956 		if (!dsg) {
957 			s3c24xx_dma_free_txd(txd);
958 			return NULL;
959 		}
960 		list_add_tail(&dsg->node, &txd->dsg_list);
961 
962 		dsg->len = period;
963 		/* Check last period length */
964 		if (i == sg_len - 1)
965 			dsg->len = size - period * i;
966 		if (direction == DMA_MEM_TO_DEV) {
967 			dsg->src_addr = addr + period * i;
968 			dsg->dst_addr = slave_addr;
969 		} else { /* DMA_DEV_TO_MEM */
970 			dsg->src_addr = slave_addr;
971 			dsg->dst_addr = addr + period * i;
972 		}
973 	}
974 
975 	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
976 }
977 
s3c24xx_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)978 static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
979 		struct dma_chan *chan, struct scatterlist *sgl,
980 		unsigned int sg_len, enum dma_transfer_direction direction,
981 		unsigned long flags, void *context)
982 {
983 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
984 	struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
985 	const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
986 	struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
987 	struct s3c24xx_txd *txd;
988 	struct s3c24xx_sg *dsg;
989 	struct scatterlist *sg;
990 	dma_addr_t slave_addr;
991 	u32 hwcfg = 0;
992 	int tmp;
993 
994 	dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n",
995 			sg_dma_len(sgl), s3cchan->name);
996 
997 	txd = s3c24xx_dma_get_txd();
998 	if (!txd)
999 		return NULL;
1000 
1001 	if (cdata->handshake)
1002 		txd->dcon |= S3C24XX_DCON_HANDSHAKE;
1003 
1004 	switch (cdata->bus) {
1005 	case S3C24XX_DMA_APB:
1006 		txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
1007 		hwcfg |= S3C24XX_DISRCC_LOC_APB;
1008 		break;
1009 	case S3C24XX_DMA_AHB:
1010 		txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
1011 		hwcfg |= S3C24XX_DISRCC_LOC_AHB;
1012 		break;
1013 	}
1014 
1015 	/*
1016 	 * Always assume our peripheral desintation is a fixed
1017 	 * address in memory.
1018 	 */
1019 	hwcfg |= S3C24XX_DISRCC_INC_FIXED;
1020 
1021 	/*
1022 	 * Individual dma operations are requested by the slave,
1023 	 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
1024 	 */
1025 	txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
1026 
1027 	if (direction == DMA_MEM_TO_DEV) {
1028 		txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
1029 			      S3C24XX_DISRCC_INC_INCREMENT;
1030 		txd->didstc = hwcfg;
1031 		slave_addr = s3cchan->cfg.dst_addr;
1032 		txd->width = s3cchan->cfg.dst_addr_width;
1033 	} else if (direction == DMA_DEV_TO_MEM) {
1034 		txd->disrcc = hwcfg;
1035 		txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
1036 			      S3C24XX_DIDSTC_INC_INCREMENT;
1037 		slave_addr = s3cchan->cfg.src_addr;
1038 		txd->width = s3cchan->cfg.src_addr_width;
1039 	} else {
1040 		s3c24xx_dma_free_txd(txd);
1041 		dev_err(&s3cdma->pdev->dev,
1042 			"direction %d unsupported\n", direction);
1043 		return NULL;
1044 	}
1045 
1046 	for_each_sg(sgl, sg, sg_len, tmp) {
1047 		dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
1048 		if (!dsg) {
1049 			s3c24xx_dma_free_txd(txd);
1050 			return NULL;
1051 		}
1052 		list_add_tail(&dsg->node, &txd->dsg_list);
1053 
1054 		dsg->len = sg_dma_len(sg);
1055 		if (direction == DMA_MEM_TO_DEV) {
1056 			dsg->src_addr = sg_dma_address(sg);
1057 			dsg->dst_addr = slave_addr;
1058 		} else { /* DMA_DEV_TO_MEM */
1059 			dsg->src_addr = slave_addr;
1060 			dsg->dst_addr = sg_dma_address(sg);
1061 		}
1062 	}
1063 
1064 	return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
1065 }
1066 
1067 /*
1068  * Slave transactions callback to the slave device to allow
1069  * synchronization of slave DMA signals with the DMAC enable
1070  */
s3c24xx_dma_issue_pending(struct dma_chan * chan)1071 static void s3c24xx_dma_issue_pending(struct dma_chan *chan)
1072 {
1073 	struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
1074 	unsigned long flags;
1075 
1076 	spin_lock_irqsave(&s3cchan->vc.lock, flags);
1077 	if (vchan_issue_pending(&s3cchan->vc)) {
1078 		if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING)
1079 			s3c24xx_dma_phy_alloc_and_start(s3cchan);
1080 	}
1081 	spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
1082 }
1083 
1084 /*
1085  * Bringup and teardown
1086  */
1087 
1088 /*
1089  * Initialise the DMAC memcpy/slave channels.
1090  * Make a local wrapper to hold required data
1091  */
s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine * s3cdma,struct dma_device * dmadev,unsigned int channels,bool slave)1092 static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma,
1093 		struct dma_device *dmadev, unsigned int channels, bool slave)
1094 {
1095 	struct s3c24xx_dma_chan *chan;
1096 	int i;
1097 
1098 	INIT_LIST_HEAD(&dmadev->channels);
1099 
1100 	/*
1101 	 * Register as many many memcpy as we have physical channels,
1102 	 * we won't always be able to use all but the code will have
1103 	 * to cope with that situation.
1104 	 */
1105 	for (i = 0; i < channels; i++) {
1106 		chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL);
1107 		if (!chan)
1108 			return -ENOMEM;
1109 
1110 		chan->id = i;
1111 		chan->host = s3cdma;
1112 		chan->state = S3C24XX_DMA_CHAN_IDLE;
1113 
1114 		if (slave) {
1115 			chan->slave = true;
1116 			chan->name = kasprintf(GFP_KERNEL, "slave%d", i);
1117 			if (!chan->name)
1118 				return -ENOMEM;
1119 		} else {
1120 			chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1121 			if (!chan->name)
1122 				return -ENOMEM;
1123 		}
1124 		dev_dbg(dmadev->dev,
1125 			 "initialize virtual channel \"%s\"\n",
1126 			 chan->name);
1127 
1128 		chan->vc.desc_free = s3c24xx_dma_desc_free;
1129 		vchan_init(&chan->vc, dmadev);
1130 	}
1131 	dev_info(dmadev->dev, "initialized %d virtual %s channels\n",
1132 		 i, slave ? "slave" : "memcpy");
1133 	return i;
1134 }
1135 
s3c24xx_dma_free_virtual_channels(struct dma_device * dmadev)1136 static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev)
1137 {
1138 	struct s3c24xx_dma_chan *chan = NULL;
1139 	struct s3c24xx_dma_chan *next;
1140 
1141 	list_for_each_entry_safe(chan,
1142 				 next, &dmadev->channels, vc.chan.device_node) {
1143 		list_del(&chan->vc.chan.device_node);
1144 		tasklet_kill(&chan->vc.task);
1145 	}
1146 }
1147 
1148 /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */
1149 static struct soc_data soc_s3c2410 = {
1150 	.stride = 0x40,
1151 	.has_reqsel = false,
1152 	.has_clocks = false,
1153 };
1154 
1155 /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */
1156 static struct soc_data soc_s3c2412 = {
1157 	.stride = 0x40,
1158 	.has_reqsel = true,
1159 	.has_clocks = true,
1160 };
1161 
1162 /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */
1163 static struct soc_data soc_s3c2443 = {
1164 	.stride = 0x100,
1165 	.has_reqsel = true,
1166 	.has_clocks = true,
1167 };
1168 
1169 static const struct platform_device_id s3c24xx_dma_driver_ids[] = {
1170 	{
1171 		.name		= "s3c2410-dma",
1172 		.driver_data	= (kernel_ulong_t)&soc_s3c2410,
1173 	}, {
1174 		.name		= "s3c2412-dma",
1175 		.driver_data	= (kernel_ulong_t)&soc_s3c2412,
1176 	}, {
1177 		.name		= "s3c2443-dma",
1178 		.driver_data	= (kernel_ulong_t)&soc_s3c2443,
1179 	},
1180 	{ },
1181 };
1182 
s3c24xx_dma_get_soc_data(struct platform_device * pdev)1183 static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev)
1184 {
1185 	return (struct soc_data *)
1186 			 platform_get_device_id(pdev)->driver_data;
1187 }
1188 
s3c24xx_dma_probe(struct platform_device * pdev)1189 static int s3c24xx_dma_probe(struct platform_device *pdev)
1190 {
1191 	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1192 	struct s3c24xx_dma_engine *s3cdma;
1193 	struct soc_data *sdata;
1194 	struct resource *res;
1195 	int ret;
1196 	int i;
1197 
1198 	if (!pdata) {
1199 		dev_err(&pdev->dev, "platform data missing\n");
1200 		return -ENODEV;
1201 	}
1202 
1203 	/* Basic sanity check */
1204 	if (pdata->num_phy_channels > MAX_DMA_CHANNELS) {
1205 		dev_err(&pdev->dev, "to many dma channels %d, max %d\n",
1206 			pdata->num_phy_channels, MAX_DMA_CHANNELS);
1207 		return -EINVAL;
1208 	}
1209 
1210 	sdata = s3c24xx_dma_get_soc_data(pdev);
1211 	if (!sdata)
1212 		return -EINVAL;
1213 
1214 	s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL);
1215 	if (!s3cdma)
1216 		return -ENOMEM;
1217 
1218 	s3cdma->pdev = pdev;
1219 	s3cdma->pdata = pdata;
1220 	s3cdma->sdata = sdata;
1221 
1222 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1223 	s3cdma->base = devm_ioremap_resource(&pdev->dev, res);
1224 	if (IS_ERR(s3cdma->base))
1225 		return PTR_ERR(s3cdma->base);
1226 
1227 	s3cdma->phy_chans = devm_kcalloc(&pdev->dev,
1228 					      pdata->num_phy_channels,
1229 					      sizeof(struct s3c24xx_dma_phy),
1230 					      GFP_KERNEL);
1231 	if (!s3cdma->phy_chans)
1232 		return -ENOMEM;
1233 
1234 	/* acquire irqs and clocks for all physical channels */
1235 	for (i = 0; i < pdata->num_phy_channels; i++) {
1236 		struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1237 		char clk_name[6];
1238 
1239 		phy->id = i;
1240 		phy->base = s3cdma->base + (i * sdata->stride);
1241 		phy->host = s3cdma;
1242 
1243 		phy->irq = platform_get_irq(pdev, i);
1244 		if (phy->irq < 0) {
1245 			dev_err(&pdev->dev, "failed to get irq %d, err %d\n",
1246 				i, phy->irq);
1247 			continue;
1248 		}
1249 
1250 		ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq,
1251 				       0, pdev->name, phy);
1252 		if (ret) {
1253 			dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n",
1254 				i, ret);
1255 			continue;
1256 		}
1257 
1258 		if (sdata->has_clocks) {
1259 			sprintf(clk_name, "dma.%d", i);
1260 			phy->clk = devm_clk_get(&pdev->dev, clk_name);
1261 			if (IS_ERR(phy->clk) && sdata->has_clocks) {
1262 				dev_err(&pdev->dev, "unable to acquire clock for channel %d, error %lu\n",
1263 					i, PTR_ERR(phy->clk));
1264 				continue;
1265 			}
1266 
1267 			ret = clk_prepare(phy->clk);
1268 			if (ret) {
1269 				dev_err(&pdev->dev, "clock for phy %d failed, error %d\n",
1270 					i, ret);
1271 				continue;
1272 			}
1273 		}
1274 
1275 		spin_lock_init(&phy->lock);
1276 		phy->valid = true;
1277 
1278 		dev_dbg(&pdev->dev, "physical channel %d is %s\n",
1279 			i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE");
1280 	}
1281 
1282 	/* Initialize memcpy engine */
1283 	dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask);
1284 	dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask);
1285 	s3cdma->memcpy.dev = &pdev->dev;
1286 	s3cdma->memcpy.device_free_chan_resources =
1287 					s3c24xx_dma_free_chan_resources;
1288 	s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy;
1289 	s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status;
1290 	s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending;
1291 	s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config;
1292 	s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all;
1293 	s3cdma->memcpy.device_synchronize = s3c24xx_dma_synchronize;
1294 
1295 	/* Initialize slave engine for SoC internal dedicated peripherals */
1296 	dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
1297 	dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
1298 	dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
1299 	s3cdma->slave.dev = &pdev->dev;
1300 	s3cdma->slave.device_free_chan_resources =
1301 					s3c24xx_dma_free_chan_resources;
1302 	s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
1303 	s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1304 	s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1305 	s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1306 	s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config;
1307 	s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all;
1308 	s3cdma->slave.device_synchronize = s3c24xx_dma_synchronize;
1309 	s3cdma->slave.filter.map = pdata->slave_map;
1310 	s3cdma->slave.filter.mapcnt = pdata->slavecnt;
1311 	s3cdma->slave.filter.fn = s3c24xx_dma_filter;
1312 
1313 	/* Register as many memcpy channels as there are physical channels */
1314 	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy,
1315 						pdata->num_phy_channels, false);
1316 	if (ret <= 0) {
1317 		dev_warn(&pdev->dev,
1318 			 "%s failed to enumerate memcpy channels - %d\n",
1319 			 __func__, ret);
1320 		goto err_memcpy;
1321 	}
1322 
1323 	/* Register slave channels */
1324 	ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave,
1325 				pdata->num_channels, true);
1326 	if (ret <= 0) {
1327 		dev_warn(&pdev->dev,
1328 			"%s failed to enumerate slave channels - %d\n",
1329 				__func__, ret);
1330 		goto err_slave;
1331 	}
1332 
1333 	ret = dma_async_device_register(&s3cdma->memcpy);
1334 	if (ret) {
1335 		dev_warn(&pdev->dev,
1336 			"%s failed to register memcpy as an async device - %d\n",
1337 			__func__, ret);
1338 		goto err_memcpy_reg;
1339 	}
1340 
1341 	ret = dma_async_device_register(&s3cdma->slave);
1342 	if (ret) {
1343 		dev_warn(&pdev->dev,
1344 			"%s failed to register slave as an async device - %d\n",
1345 			__func__, ret);
1346 		goto err_slave_reg;
1347 	}
1348 
1349 	platform_set_drvdata(pdev, s3cdma);
1350 	dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n",
1351 		 pdata->num_phy_channels);
1352 
1353 	return 0;
1354 
1355 err_slave_reg:
1356 	dma_async_device_unregister(&s3cdma->memcpy);
1357 err_memcpy_reg:
1358 	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1359 err_slave:
1360 	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1361 err_memcpy:
1362 	if (sdata->has_clocks)
1363 		for (i = 0; i < pdata->num_phy_channels; i++) {
1364 			struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1365 			if (phy->valid)
1366 				clk_unprepare(phy->clk);
1367 		}
1368 
1369 	return ret;
1370 }
1371 
s3c24xx_dma_free_irq(struct platform_device * pdev,struct s3c24xx_dma_engine * s3cdma)1372 static void s3c24xx_dma_free_irq(struct platform_device *pdev,
1373 				struct s3c24xx_dma_engine *s3cdma)
1374 {
1375 	int i;
1376 
1377 	for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) {
1378 		struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1379 
1380 		devm_free_irq(&pdev->dev, phy->irq, phy);
1381 	}
1382 }
1383 
s3c24xx_dma_remove(struct platform_device * pdev)1384 static int s3c24xx_dma_remove(struct platform_device *pdev)
1385 {
1386 	const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev);
1387 	struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev);
1388 	struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev);
1389 	int i;
1390 
1391 	dma_async_device_unregister(&s3cdma->slave);
1392 	dma_async_device_unregister(&s3cdma->memcpy);
1393 
1394 	s3c24xx_dma_free_irq(pdev, s3cdma);
1395 
1396 	s3c24xx_dma_free_virtual_channels(&s3cdma->slave);
1397 	s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy);
1398 
1399 	if (sdata->has_clocks)
1400 		for (i = 0; i < pdata->num_phy_channels; i++) {
1401 			struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i];
1402 			if (phy->valid)
1403 				clk_unprepare(phy->clk);
1404 		}
1405 
1406 	return 0;
1407 }
1408 
1409 static struct platform_driver s3c24xx_dma_driver = {
1410 	.driver		= {
1411 		.name	= "s3c24xx-dma",
1412 	},
1413 	.id_table	= s3c24xx_dma_driver_ids,
1414 	.probe		= s3c24xx_dma_probe,
1415 	.remove		= s3c24xx_dma_remove,
1416 };
1417 
1418 module_platform_driver(s3c24xx_dma_driver);
1419 
s3c24xx_dma_filter(struct dma_chan * chan,void * param)1420 bool s3c24xx_dma_filter(struct dma_chan *chan, void *param)
1421 {
1422 	struct s3c24xx_dma_chan *s3cchan;
1423 
1424 	if (chan->device->dev->driver != &s3c24xx_dma_driver.driver)
1425 		return false;
1426 
1427 	s3cchan = to_s3c24xx_dma_chan(chan);
1428 
1429 	return s3cchan->id == (uintptr_t)param;
1430 }
1431 EXPORT_SYMBOL(s3c24xx_dma_filter);
1432 
1433 MODULE_DESCRIPTION("S3C24XX DMA Driver");
1434 MODULE_AUTHOR("Heiko Stuebner");
1435 MODULE_LICENSE("GPL v2");
1436