1 /*
2  * Driver for the Cirrus Logic EP93xx DMA Controller
3  *
4  * Copyright (C) 2011 Mika Westerberg
5  *
6  * DMA M2P implementation is based on the original
7  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8  *
9  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10  *   Copyright (C) 2006 Applied Data Systems
11  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12  *
13  * This driver is based on dw_dmac and amba-pl08x drivers.
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  */
20 
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/module.h>
26 #include <linux/mod_devicetable.h>
27 #include <linux/platform_device.h>
28 #include <linux/slab.h>
29 
30 #include <linux/platform_data/dma-ep93xx.h>
31 
32 #include "dmaengine.h"
33 
34 /* M2P registers */
35 #define M2P_CONTROL			0x0000
36 #define M2P_CONTROL_STALLINT		BIT(0)
37 #define M2P_CONTROL_NFBINT		BIT(1)
38 #define M2P_CONTROL_CH_ERROR_INT	BIT(3)
39 #define M2P_CONTROL_ENABLE		BIT(4)
40 #define M2P_CONTROL_ICE			BIT(6)
41 
42 #define M2P_INTERRUPT			0x0004
43 #define M2P_INTERRUPT_STALL		BIT(0)
44 #define M2P_INTERRUPT_NFB		BIT(1)
45 #define M2P_INTERRUPT_ERROR		BIT(3)
46 
47 #define M2P_PPALLOC			0x0008
48 #define M2P_STATUS			0x000c
49 
50 #define M2P_MAXCNT0			0x0020
51 #define M2P_BASE0			0x0024
52 #define M2P_MAXCNT1			0x0030
53 #define M2P_BASE1			0x0034
54 
55 #define M2P_STATE_IDLE			0
56 #define M2P_STATE_STALL			1
57 #define M2P_STATE_ON			2
58 #define M2P_STATE_NEXT			3
59 
60 /* M2M registers */
61 #define M2M_CONTROL			0x0000
62 #define M2M_CONTROL_DONEINT		BIT(2)
63 #define M2M_CONTROL_ENABLE		BIT(3)
64 #define M2M_CONTROL_START		BIT(4)
65 #define M2M_CONTROL_DAH			BIT(11)
66 #define M2M_CONTROL_SAH			BIT(12)
67 #define M2M_CONTROL_PW_SHIFT		9
68 #define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
70 #define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
71 #define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
72 #define M2M_CONTROL_TM_SHIFT		13
73 #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
74 #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
75 #define M2M_CONTROL_NFBINT		BIT(21)
76 #define M2M_CONTROL_RSS_SHIFT		22
77 #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
78 #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
79 #define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
80 #define M2M_CONTROL_NO_HDSK		BIT(24)
81 #define M2M_CONTROL_PWSC_SHIFT		25
82 
83 #define M2M_INTERRUPT			0x0004
84 #define M2M_INTERRUPT_MASK		6
85 
86 #define M2M_STATUS			0x000c
87 #define M2M_STATUS_CTL_SHIFT		1
88 #define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
89 #define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
90 #define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
91 #define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
92 #define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
93 #define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
94 #define M2M_STATUS_BUF_SHIFT		4
95 #define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
96 #define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
97 #define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
98 #define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
99 #define M2M_STATUS_DONE			BIT(6)
100 
101 #define M2M_BCR0			0x0010
102 #define M2M_BCR1			0x0014
103 #define M2M_SAR_BASE0			0x0018
104 #define M2M_SAR_BASE1			0x001c
105 #define M2M_DAR_BASE0			0x002c
106 #define M2M_DAR_BASE1			0x0030
107 
108 #define DMA_MAX_CHAN_BYTES		0xffff
109 #define DMA_MAX_CHAN_DESCRIPTORS	32
110 
111 struct ep93xx_dma_engine;
112 
113 /**
114  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
115  * @src_addr: source address of the transaction
116  * @dst_addr: destination address of the transaction
117  * @size: size of the transaction (in bytes)
118  * @complete: this descriptor is completed
119  * @txd: dmaengine API descriptor
120  * @tx_list: list of linked descriptors
121  * @node: link used for putting this into a channel queue
122  */
123 struct ep93xx_dma_desc {
124 	u32				src_addr;
125 	u32				dst_addr;
126 	size_t				size;
127 	bool				complete;
128 	struct dma_async_tx_descriptor	txd;
129 	struct list_head		tx_list;
130 	struct list_head		node;
131 };
132 
133 /**
134  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
135  * @chan: dmaengine API channel
136  * @edma: pointer to to the engine device
137  * @regs: memory mapped registers
138  * @irq: interrupt number of the channel
139  * @clk: clock used by this channel
140  * @tasklet: channel specific tasklet used for callbacks
141  * @lock: lock protecting the fields following
142  * @flags: flags for the channel
143  * @buffer: which buffer to use next (0/1)
144  * @active: flattened chain of descriptors currently being processed
145  * @queue: pending descriptors which are handled next
146  * @free_list: list of free descriptors which can be used
147  * @runtime_addr: physical address currently used as dest/src (M2M only). This
148  *                is set via .device_config before slave operation is
149  *                prepared
150  * @runtime_ctrl: M2M runtime values for the control register.
151  *
152  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
153  * will have slightly different scheme here: @active points to a head of
154  * flattened DMA descriptor chain.
155  *
156  * @queue holds pending transactions. These are linked through the first
157  * descriptor in the chain. When a descriptor is moved to the @active queue,
158  * the first and chained descriptors are flattened into a single list.
159  *
160  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
161  * necessary channel configuration information. For memcpy channels this must
162  * be %NULL.
163  */
164 struct ep93xx_dma_chan {
165 	struct dma_chan			chan;
166 	const struct ep93xx_dma_engine	*edma;
167 	void __iomem			*regs;
168 	int				irq;
169 	struct clk			*clk;
170 	struct tasklet_struct		tasklet;
171 	/* protects the fields following */
172 	spinlock_t			lock;
173 	unsigned long			flags;
174 /* Channel is configured for cyclic transfers */
175 #define EP93XX_DMA_IS_CYCLIC		0
176 
177 	int				buffer;
178 	struct list_head		active;
179 	struct list_head		queue;
180 	struct list_head		free_list;
181 	u32				runtime_addr;
182 	u32				runtime_ctrl;
183 };
184 
185 /**
186  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
187  * @dma_dev: holds the dmaengine device
188  * @m2m: is this an M2M or M2P device
189  * @hw_setup: method which sets the channel up for operation
190  * @hw_shutdown: shuts the channel down and flushes whatever is left
191  * @hw_submit: pushes active descriptor(s) to the hardware
192  * @hw_interrupt: handle the interrupt
193  * @num_channels: number of channels for this instance
194  * @channels: array of channels
195  *
196  * There is one instance of this struct for the M2P channels and one for the
197  * M2M channels. hw_xxx() methods are used to perform operations which are
198  * different on M2M and M2P channels. These methods are called with channel
199  * lock held and interrupts disabled so they cannot sleep.
200  */
201 struct ep93xx_dma_engine {
202 	struct dma_device	dma_dev;
203 	bool			m2m;
204 	int			(*hw_setup)(struct ep93xx_dma_chan *);
205 	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
206 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
207 	void			(*hw_submit)(struct ep93xx_dma_chan *);
208 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
209 #define INTERRUPT_UNKNOWN	0
210 #define INTERRUPT_DONE		1
211 #define INTERRUPT_NEXT_BUFFER	2
212 
213 	size_t			num_channels;
214 	struct ep93xx_dma_chan	channels[];
215 };
216 
chan2dev(struct ep93xx_dma_chan * edmac)217 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
218 {
219 	return &edmac->chan.dev->device;
220 }
221 
to_ep93xx_dma_chan(struct dma_chan * chan)222 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
223 {
224 	return container_of(chan, struct ep93xx_dma_chan, chan);
225 }
226 
227 /**
228  * ep93xx_dma_set_active - set new active descriptor chain
229  * @edmac: channel
230  * @desc: head of the new active descriptor chain
231  *
232  * Sets @desc to be the head of the new active descriptor chain. This is the
233  * chain which is processed next. The active list must be empty before calling
234  * this function.
235  *
236  * Called with @edmac->lock held and interrupts disabled.
237  */
ep93xx_dma_set_active(struct ep93xx_dma_chan * edmac,struct ep93xx_dma_desc * desc)238 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
239 				  struct ep93xx_dma_desc *desc)
240 {
241 	BUG_ON(!list_empty(&edmac->active));
242 
243 	list_add_tail(&desc->node, &edmac->active);
244 
245 	/* Flatten the @desc->tx_list chain into @edmac->active list */
246 	while (!list_empty(&desc->tx_list)) {
247 		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
248 			struct ep93xx_dma_desc, node);
249 
250 		/*
251 		 * We copy the callback parameters from the first descriptor
252 		 * to all the chained descriptors. This way we can call the
253 		 * callback without having to find out the first descriptor in
254 		 * the chain. Useful for cyclic transfers.
255 		 */
256 		d->txd.callback = desc->txd.callback;
257 		d->txd.callback_param = desc->txd.callback_param;
258 
259 		list_move_tail(&d->node, &edmac->active);
260 	}
261 }
262 
263 /* Called with @edmac->lock held and interrupts disabled */
264 static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan * edmac)265 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
266 {
267 	return list_first_entry_or_null(&edmac->active,
268 					struct ep93xx_dma_desc, node);
269 }
270 
271 /**
272  * ep93xx_dma_advance_active - advances to the next active descriptor
273  * @edmac: channel
274  *
275  * Function advances active descriptor to the next in the @edmac->active and
276  * returns %true if we still have descriptors in the chain to process.
277  * Otherwise returns %false.
278  *
279  * When the channel is in cyclic mode always returns %true.
280  *
281  * Called with @edmac->lock held and interrupts disabled.
282  */
ep93xx_dma_advance_active(struct ep93xx_dma_chan * edmac)283 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
284 {
285 	struct ep93xx_dma_desc *desc;
286 
287 	list_rotate_left(&edmac->active);
288 
289 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
290 		return true;
291 
292 	desc = ep93xx_dma_get_active(edmac);
293 	if (!desc)
294 		return false;
295 
296 	/*
297 	 * If txd.cookie is set it means that we are back in the first
298 	 * descriptor in the chain and hence done with it.
299 	 */
300 	return !desc->txd.cookie;
301 }
302 
303 /*
304  * M2P DMA implementation
305  */
306 
m2p_set_control(struct ep93xx_dma_chan * edmac,u32 control)307 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
308 {
309 	writel(control, edmac->regs + M2P_CONTROL);
310 	/*
311 	 * EP93xx User's Guide states that we must perform a dummy read after
312 	 * write to the control register.
313 	 */
314 	readl(edmac->regs + M2P_CONTROL);
315 }
316 
m2p_hw_setup(struct ep93xx_dma_chan * edmac)317 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
318 {
319 	struct ep93xx_dma_data *data = edmac->chan.private;
320 	u32 control;
321 
322 	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
323 
324 	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
325 		| M2P_CONTROL_ENABLE;
326 	m2p_set_control(edmac, control);
327 
328 	edmac->buffer = 0;
329 
330 	return 0;
331 }
332 
m2p_channel_state(struct ep93xx_dma_chan * edmac)333 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
334 {
335 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
336 }
337 
m2p_hw_synchronize(struct ep93xx_dma_chan * edmac)338 static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
339 {
340 	unsigned long flags;
341 	u32 control;
342 
343 	spin_lock_irqsave(&edmac->lock, flags);
344 	control = readl(edmac->regs + M2P_CONTROL);
345 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
346 	m2p_set_control(edmac, control);
347 	spin_unlock_irqrestore(&edmac->lock, flags);
348 
349 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
350 		schedule();
351 }
352 
m2p_hw_shutdown(struct ep93xx_dma_chan * edmac)353 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
354 {
355 	m2p_set_control(edmac, 0);
356 
357 	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
358 		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
359 }
360 
m2p_fill_desc(struct ep93xx_dma_chan * edmac)361 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
362 {
363 	struct ep93xx_dma_desc *desc;
364 	u32 bus_addr;
365 
366 	desc = ep93xx_dma_get_active(edmac);
367 	if (!desc) {
368 		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
369 		return;
370 	}
371 
372 	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
373 		bus_addr = desc->src_addr;
374 	else
375 		bus_addr = desc->dst_addr;
376 
377 	if (edmac->buffer == 0) {
378 		writel(desc->size, edmac->regs + M2P_MAXCNT0);
379 		writel(bus_addr, edmac->regs + M2P_BASE0);
380 	} else {
381 		writel(desc->size, edmac->regs + M2P_MAXCNT1);
382 		writel(bus_addr, edmac->regs + M2P_BASE1);
383 	}
384 
385 	edmac->buffer ^= 1;
386 }
387 
m2p_hw_submit(struct ep93xx_dma_chan * edmac)388 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
389 {
390 	u32 control = readl(edmac->regs + M2P_CONTROL);
391 
392 	m2p_fill_desc(edmac);
393 	control |= M2P_CONTROL_STALLINT;
394 
395 	if (ep93xx_dma_advance_active(edmac)) {
396 		m2p_fill_desc(edmac);
397 		control |= M2P_CONTROL_NFBINT;
398 	}
399 
400 	m2p_set_control(edmac, control);
401 }
402 
m2p_hw_interrupt(struct ep93xx_dma_chan * edmac)403 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
404 {
405 	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
406 	u32 control;
407 
408 	if (irq_status & M2P_INTERRUPT_ERROR) {
409 		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
410 
411 		/* Clear the error interrupt */
412 		writel(1, edmac->regs + M2P_INTERRUPT);
413 
414 		/*
415 		 * It seems that there is no easy way of reporting errors back
416 		 * to client so we just report the error here and continue as
417 		 * usual.
418 		 *
419 		 * Revisit this when there is a mechanism to report back the
420 		 * errors.
421 		 */
422 		dev_err(chan2dev(edmac),
423 			"DMA transfer failed! Details:\n"
424 			"\tcookie	: %d\n"
425 			"\tsrc_addr	: 0x%08x\n"
426 			"\tdst_addr	: 0x%08x\n"
427 			"\tsize		: %zu\n",
428 			desc->txd.cookie, desc->src_addr, desc->dst_addr,
429 			desc->size);
430 	}
431 
432 	/*
433 	 * Even latest E2 silicon revision sometimes assert STALL interrupt
434 	 * instead of NFB. Therefore we treat them equally, basing on the
435 	 * amount of data we still have to transfer.
436 	 */
437 	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
438 		return INTERRUPT_UNKNOWN;
439 
440 	if (ep93xx_dma_advance_active(edmac)) {
441 		m2p_fill_desc(edmac);
442 		return INTERRUPT_NEXT_BUFFER;
443 	}
444 
445 	/* Disable interrupts */
446 	control = readl(edmac->regs + M2P_CONTROL);
447 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
448 	m2p_set_control(edmac, control);
449 
450 	return INTERRUPT_DONE;
451 }
452 
453 /*
454  * M2M DMA implementation
455  */
456 
m2m_hw_setup(struct ep93xx_dma_chan * edmac)457 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
458 {
459 	const struct ep93xx_dma_data *data = edmac->chan.private;
460 	u32 control = 0;
461 
462 	if (!data) {
463 		/* This is memcpy channel, nothing to configure */
464 		writel(control, edmac->regs + M2M_CONTROL);
465 		return 0;
466 	}
467 
468 	switch (data->port) {
469 	case EP93XX_DMA_SSP:
470 		/*
471 		 * This was found via experimenting - anything less than 5
472 		 * causes the channel to perform only a partial transfer which
473 		 * leads to problems since we don't get DONE interrupt then.
474 		 */
475 		control = (5 << M2M_CONTROL_PWSC_SHIFT);
476 		control |= M2M_CONTROL_NO_HDSK;
477 
478 		if (data->direction == DMA_MEM_TO_DEV) {
479 			control |= M2M_CONTROL_DAH;
480 			control |= M2M_CONTROL_TM_TX;
481 			control |= M2M_CONTROL_RSS_SSPTX;
482 		} else {
483 			control |= M2M_CONTROL_SAH;
484 			control |= M2M_CONTROL_TM_RX;
485 			control |= M2M_CONTROL_RSS_SSPRX;
486 		}
487 		break;
488 
489 	case EP93XX_DMA_IDE:
490 		/*
491 		 * This IDE part is totally untested. Values below are taken
492 		 * from the EP93xx Users's Guide and might not be correct.
493 		 */
494 		if (data->direction == DMA_MEM_TO_DEV) {
495 			/* Worst case from the UG */
496 			control = (3 << M2M_CONTROL_PWSC_SHIFT);
497 			control |= M2M_CONTROL_DAH;
498 			control |= M2M_CONTROL_TM_TX;
499 		} else {
500 			control = (2 << M2M_CONTROL_PWSC_SHIFT);
501 			control |= M2M_CONTROL_SAH;
502 			control |= M2M_CONTROL_TM_RX;
503 		}
504 
505 		control |= M2M_CONTROL_NO_HDSK;
506 		control |= M2M_CONTROL_RSS_IDE;
507 		control |= M2M_CONTROL_PW_16;
508 		break;
509 
510 	default:
511 		return -EINVAL;
512 	}
513 
514 	writel(control, edmac->regs + M2M_CONTROL);
515 	return 0;
516 }
517 
m2m_hw_shutdown(struct ep93xx_dma_chan * edmac)518 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
519 {
520 	/* Just disable the channel */
521 	writel(0, edmac->regs + M2M_CONTROL);
522 }
523 
m2m_fill_desc(struct ep93xx_dma_chan * edmac)524 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
525 {
526 	struct ep93xx_dma_desc *desc;
527 
528 	desc = ep93xx_dma_get_active(edmac);
529 	if (!desc) {
530 		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
531 		return;
532 	}
533 
534 	if (edmac->buffer == 0) {
535 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
536 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
537 		writel(desc->size, edmac->regs + M2M_BCR0);
538 	} else {
539 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
540 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
541 		writel(desc->size, edmac->regs + M2M_BCR1);
542 	}
543 
544 	edmac->buffer ^= 1;
545 }
546 
m2m_hw_submit(struct ep93xx_dma_chan * edmac)547 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
548 {
549 	struct ep93xx_dma_data *data = edmac->chan.private;
550 	u32 control = readl(edmac->regs + M2M_CONTROL);
551 
552 	/*
553 	 * Since we allow clients to configure PW (peripheral width) we always
554 	 * clear PW bits here and then set them according what is given in
555 	 * the runtime configuration.
556 	 */
557 	control &= ~M2M_CONTROL_PW_MASK;
558 	control |= edmac->runtime_ctrl;
559 
560 	m2m_fill_desc(edmac);
561 	control |= M2M_CONTROL_DONEINT;
562 
563 	if (ep93xx_dma_advance_active(edmac)) {
564 		m2m_fill_desc(edmac);
565 		control |= M2M_CONTROL_NFBINT;
566 	}
567 
568 	/*
569 	 * Now we can finally enable the channel. For M2M channel this must be
570 	 * done _after_ the BCRx registers are programmed.
571 	 */
572 	control |= M2M_CONTROL_ENABLE;
573 	writel(control, edmac->regs + M2M_CONTROL);
574 
575 	if (!data) {
576 		/*
577 		 * For memcpy channels the software trigger must be asserted
578 		 * in order to start the memcpy operation.
579 		 */
580 		control |= M2M_CONTROL_START;
581 		writel(control, edmac->regs + M2M_CONTROL);
582 	}
583 }
584 
585 /*
586  * According to EP93xx User's Guide, we should receive DONE interrupt when all
587  * M2M DMA controller transactions complete normally. This is not always the
588  * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
589  * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
590  * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
591  * In effect, disabling the channel when only DONE bit is set could stop
592  * currently running DMA transfer. To avoid this, we use Buffer FSM and
593  * Control FSM to check current state of DMA channel.
594  */
m2m_hw_interrupt(struct ep93xx_dma_chan * edmac)595 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
596 {
597 	u32 status = readl(edmac->regs + M2M_STATUS);
598 	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
599 	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
600 	bool done = status & M2M_STATUS_DONE;
601 	bool last_done;
602 	u32 control;
603 	struct ep93xx_dma_desc *desc;
604 
605 	/* Accept only DONE and NFB interrupts */
606 	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
607 		return INTERRUPT_UNKNOWN;
608 
609 	if (done) {
610 		/* Clear the DONE bit */
611 		writel(0, edmac->regs + M2M_INTERRUPT);
612 	}
613 
614 	/*
615 	 * Check whether we are done with descriptors or not. This, together
616 	 * with DMA channel state, determines action to take in interrupt.
617 	 */
618 	desc = ep93xx_dma_get_active(edmac);
619 	last_done = !desc || desc->txd.cookie;
620 
621 	/*
622 	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
623 	 * DMA channel. Using DONE and NFB bits from channel status register
624 	 * or bits from channel interrupt register is not reliable.
625 	 */
626 	if (!last_done &&
627 	    (buf_fsm == M2M_STATUS_BUF_NO ||
628 	     buf_fsm == M2M_STATUS_BUF_ON)) {
629 		/*
630 		 * Two buffers are ready for update when Buffer FSM is in
631 		 * DMA_NO_BUF state. Only one buffer can be prepared without
632 		 * disabling the channel or polling the DONE bit.
633 		 * To simplify things, always prepare only one buffer.
634 		 */
635 		if (ep93xx_dma_advance_active(edmac)) {
636 			m2m_fill_desc(edmac);
637 			if (done && !edmac->chan.private) {
638 				/* Software trigger for memcpy channel */
639 				control = readl(edmac->regs + M2M_CONTROL);
640 				control |= M2M_CONTROL_START;
641 				writel(control, edmac->regs + M2M_CONTROL);
642 			}
643 			return INTERRUPT_NEXT_BUFFER;
644 		} else {
645 			last_done = true;
646 		}
647 	}
648 
649 	/*
650 	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
651 	 * and Control FSM is in DMA_STALL state.
652 	 */
653 	if (last_done &&
654 	    buf_fsm == M2M_STATUS_BUF_NO &&
655 	    ctl_fsm == M2M_STATUS_CTL_STALL) {
656 		/* Disable interrupts and the channel */
657 		control = readl(edmac->regs + M2M_CONTROL);
658 		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
659 			    | M2M_CONTROL_ENABLE);
660 		writel(control, edmac->regs + M2M_CONTROL);
661 		return INTERRUPT_DONE;
662 	}
663 
664 	/*
665 	 * Nothing to do this time.
666 	 */
667 	return INTERRUPT_NEXT_BUFFER;
668 }
669 
670 /*
671  * DMA engine API implementation
672  */
673 
674 static struct ep93xx_dma_desc *
ep93xx_dma_desc_get(struct ep93xx_dma_chan * edmac)675 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
676 {
677 	struct ep93xx_dma_desc *desc, *_desc;
678 	struct ep93xx_dma_desc *ret = NULL;
679 	unsigned long flags;
680 
681 	spin_lock_irqsave(&edmac->lock, flags);
682 	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
683 		if (async_tx_test_ack(&desc->txd)) {
684 			list_del_init(&desc->node);
685 
686 			/* Re-initialize the descriptor */
687 			desc->src_addr = 0;
688 			desc->dst_addr = 0;
689 			desc->size = 0;
690 			desc->complete = false;
691 			desc->txd.cookie = 0;
692 			desc->txd.callback = NULL;
693 			desc->txd.callback_param = NULL;
694 
695 			ret = desc;
696 			break;
697 		}
698 	}
699 	spin_unlock_irqrestore(&edmac->lock, flags);
700 	return ret;
701 }
702 
ep93xx_dma_desc_put(struct ep93xx_dma_chan * edmac,struct ep93xx_dma_desc * desc)703 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
704 				struct ep93xx_dma_desc *desc)
705 {
706 	if (desc) {
707 		unsigned long flags;
708 
709 		spin_lock_irqsave(&edmac->lock, flags);
710 		list_splice_init(&desc->tx_list, &edmac->free_list);
711 		list_add(&desc->node, &edmac->free_list);
712 		spin_unlock_irqrestore(&edmac->lock, flags);
713 	}
714 }
715 
716 /**
717  * ep93xx_dma_advance_work - start processing the next pending transaction
718  * @edmac: channel
719  *
720  * If we have pending transactions queued and we are currently idling, this
721  * function takes the next queued transaction from the @edmac->queue and
722  * pushes it to the hardware for execution.
723  */
ep93xx_dma_advance_work(struct ep93xx_dma_chan * edmac)724 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
725 {
726 	struct ep93xx_dma_desc *new;
727 	unsigned long flags;
728 
729 	spin_lock_irqsave(&edmac->lock, flags);
730 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
731 		spin_unlock_irqrestore(&edmac->lock, flags);
732 		return;
733 	}
734 
735 	/* Take the next descriptor from the pending queue */
736 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
737 	list_del_init(&new->node);
738 
739 	ep93xx_dma_set_active(edmac, new);
740 
741 	/* Push it to the hardware */
742 	edmac->edma->hw_submit(edmac);
743 	spin_unlock_irqrestore(&edmac->lock, flags);
744 }
745 
ep93xx_dma_tasklet(unsigned long data)746 static void ep93xx_dma_tasklet(unsigned long data)
747 {
748 	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
749 	struct ep93xx_dma_desc *desc, *d;
750 	struct dmaengine_desc_callback cb;
751 	LIST_HEAD(list);
752 
753 	memset(&cb, 0, sizeof(cb));
754 	spin_lock_irq(&edmac->lock);
755 	/*
756 	 * If dma_terminate_all() was called before we get to run, the active
757 	 * list has become empty. If that happens we aren't supposed to do
758 	 * anything more than call ep93xx_dma_advance_work().
759 	 */
760 	desc = ep93xx_dma_get_active(edmac);
761 	if (desc) {
762 		if (desc->complete) {
763 			/* mark descriptor complete for non cyclic case only */
764 			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
765 				dma_cookie_complete(&desc->txd);
766 			list_splice_init(&edmac->active, &list);
767 		}
768 		dmaengine_desc_get_callback(&desc->txd, &cb);
769 	}
770 	spin_unlock_irq(&edmac->lock);
771 
772 	/* Pick up the next descriptor from the queue */
773 	ep93xx_dma_advance_work(edmac);
774 
775 	/* Now we can release all the chained descriptors */
776 	list_for_each_entry_safe(desc, d, &list, node) {
777 		dma_descriptor_unmap(&desc->txd);
778 		ep93xx_dma_desc_put(edmac, desc);
779 	}
780 
781 	dmaengine_desc_callback_invoke(&cb, NULL);
782 }
783 
ep93xx_dma_interrupt(int irq,void * dev_id)784 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
785 {
786 	struct ep93xx_dma_chan *edmac = dev_id;
787 	struct ep93xx_dma_desc *desc;
788 	irqreturn_t ret = IRQ_HANDLED;
789 
790 	spin_lock(&edmac->lock);
791 
792 	desc = ep93xx_dma_get_active(edmac);
793 	if (!desc) {
794 		dev_warn(chan2dev(edmac),
795 			 "got interrupt while active list is empty\n");
796 		spin_unlock(&edmac->lock);
797 		return IRQ_NONE;
798 	}
799 
800 	switch (edmac->edma->hw_interrupt(edmac)) {
801 	case INTERRUPT_DONE:
802 		desc->complete = true;
803 		tasklet_schedule(&edmac->tasklet);
804 		break;
805 
806 	case INTERRUPT_NEXT_BUFFER:
807 		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
808 			tasklet_schedule(&edmac->tasklet);
809 		break;
810 
811 	default:
812 		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
813 		ret = IRQ_NONE;
814 		break;
815 	}
816 
817 	spin_unlock(&edmac->lock);
818 	return ret;
819 }
820 
821 /**
822  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
823  * @tx: descriptor to be executed
824  *
825  * Function will execute given descriptor on the hardware or if the hardware
826  * is busy, queue the descriptor to be executed later on. Returns cookie which
827  * can be used to poll the status of the descriptor.
828  */
ep93xx_dma_tx_submit(struct dma_async_tx_descriptor * tx)829 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
830 {
831 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
832 	struct ep93xx_dma_desc *desc;
833 	dma_cookie_t cookie;
834 	unsigned long flags;
835 
836 	spin_lock_irqsave(&edmac->lock, flags);
837 	cookie = dma_cookie_assign(tx);
838 
839 	desc = container_of(tx, struct ep93xx_dma_desc, txd);
840 
841 	/*
842 	 * If nothing is currently prosessed, we push this descriptor
843 	 * directly to the hardware. Otherwise we put the descriptor
844 	 * to the pending queue.
845 	 */
846 	if (list_empty(&edmac->active)) {
847 		ep93xx_dma_set_active(edmac, desc);
848 		edmac->edma->hw_submit(edmac);
849 	} else {
850 		list_add_tail(&desc->node, &edmac->queue);
851 	}
852 
853 	spin_unlock_irqrestore(&edmac->lock, flags);
854 	return cookie;
855 }
856 
857 /**
858  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
859  * @chan: channel to allocate resources
860  *
861  * Function allocates necessary resources for the given DMA channel and
862  * returns number of allocated descriptors for the channel. Negative errno
863  * is returned in case of failure.
864  */
ep93xx_dma_alloc_chan_resources(struct dma_chan * chan)865 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
866 {
867 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
868 	struct ep93xx_dma_data *data = chan->private;
869 	const char *name = dma_chan_name(chan);
870 	int ret, i;
871 
872 	/* Sanity check the channel parameters */
873 	if (!edmac->edma->m2m) {
874 		if (!data)
875 			return -EINVAL;
876 		if (data->port < EP93XX_DMA_I2S1 ||
877 		    data->port > EP93XX_DMA_IRDA)
878 			return -EINVAL;
879 		if (data->direction != ep93xx_dma_chan_direction(chan))
880 			return -EINVAL;
881 	} else {
882 		if (data) {
883 			switch (data->port) {
884 			case EP93XX_DMA_SSP:
885 			case EP93XX_DMA_IDE:
886 				if (!is_slave_direction(data->direction))
887 					return -EINVAL;
888 				break;
889 			default:
890 				return -EINVAL;
891 			}
892 		}
893 	}
894 
895 	if (data && data->name)
896 		name = data->name;
897 
898 	ret = clk_enable(edmac->clk);
899 	if (ret)
900 		return ret;
901 
902 	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
903 	if (ret)
904 		goto fail_clk_disable;
905 
906 	spin_lock_irq(&edmac->lock);
907 	dma_cookie_init(&edmac->chan);
908 	ret = edmac->edma->hw_setup(edmac);
909 	spin_unlock_irq(&edmac->lock);
910 
911 	if (ret)
912 		goto fail_free_irq;
913 
914 	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
915 		struct ep93xx_dma_desc *desc;
916 
917 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
918 		if (!desc) {
919 			dev_warn(chan2dev(edmac), "not enough descriptors\n");
920 			break;
921 		}
922 
923 		INIT_LIST_HEAD(&desc->tx_list);
924 
925 		dma_async_tx_descriptor_init(&desc->txd, chan);
926 		desc->txd.flags = DMA_CTRL_ACK;
927 		desc->txd.tx_submit = ep93xx_dma_tx_submit;
928 
929 		ep93xx_dma_desc_put(edmac, desc);
930 	}
931 
932 	return i;
933 
934 fail_free_irq:
935 	free_irq(edmac->irq, edmac);
936 fail_clk_disable:
937 	clk_disable(edmac->clk);
938 
939 	return ret;
940 }
941 
942 /**
943  * ep93xx_dma_free_chan_resources - release resources for the channel
944  * @chan: channel
945  *
946  * Function releases all the resources allocated for the given channel.
947  * The channel must be idle when this is called.
948  */
ep93xx_dma_free_chan_resources(struct dma_chan * chan)949 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
950 {
951 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
952 	struct ep93xx_dma_desc *desc, *d;
953 	unsigned long flags;
954 	LIST_HEAD(list);
955 
956 	BUG_ON(!list_empty(&edmac->active));
957 	BUG_ON(!list_empty(&edmac->queue));
958 
959 	spin_lock_irqsave(&edmac->lock, flags);
960 	edmac->edma->hw_shutdown(edmac);
961 	edmac->runtime_addr = 0;
962 	edmac->runtime_ctrl = 0;
963 	edmac->buffer = 0;
964 	list_splice_init(&edmac->free_list, &list);
965 	spin_unlock_irqrestore(&edmac->lock, flags);
966 
967 	list_for_each_entry_safe(desc, d, &list, node)
968 		kfree(desc);
969 
970 	clk_disable(edmac->clk);
971 	free_irq(edmac->irq, edmac);
972 }
973 
974 /**
975  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
976  * @chan: channel
977  * @dest: destination bus address
978  * @src: source bus address
979  * @len: size of the transaction
980  * @flags: flags for the descriptor
981  *
982  * Returns a valid DMA descriptor or %NULL in case of failure.
983  */
984 static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)985 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
986 			   dma_addr_t src, size_t len, unsigned long flags)
987 {
988 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
989 	struct ep93xx_dma_desc *desc, *first;
990 	size_t bytes, offset;
991 
992 	first = NULL;
993 	for (offset = 0; offset < len; offset += bytes) {
994 		desc = ep93xx_dma_desc_get(edmac);
995 		if (!desc) {
996 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
997 			goto fail;
998 		}
999 
1000 		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1001 
1002 		desc->src_addr = src + offset;
1003 		desc->dst_addr = dest + offset;
1004 		desc->size = bytes;
1005 
1006 		if (!first)
1007 			first = desc;
1008 		else
1009 			list_add_tail(&desc->node, &first->tx_list);
1010 	}
1011 
1012 	first->txd.cookie = -EBUSY;
1013 	first->txd.flags = flags;
1014 
1015 	return &first->txd;
1016 fail:
1017 	ep93xx_dma_desc_put(edmac, first);
1018 	return NULL;
1019 }
1020 
1021 /**
1022  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1023  * @chan: channel
1024  * @sgl: list of buffers to transfer
1025  * @sg_len: number of entries in @sgl
1026  * @dir: direction of tha DMA transfer
1027  * @flags: flags for the descriptor
1028  * @context: operation context (ignored)
1029  *
1030  * Returns a valid DMA descriptor or %NULL in case of failure.
1031  */
1032 static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)1033 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1034 			 unsigned int sg_len, enum dma_transfer_direction dir,
1035 			 unsigned long flags, void *context)
1036 {
1037 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038 	struct ep93xx_dma_desc *desc, *first;
1039 	struct scatterlist *sg;
1040 	int i;
1041 
1042 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1043 		dev_warn(chan2dev(edmac),
1044 			 "channel was configured with different direction\n");
1045 		return NULL;
1046 	}
1047 
1048 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1049 		dev_warn(chan2dev(edmac),
1050 			 "channel is already used for cyclic transfers\n");
1051 		return NULL;
1052 	}
1053 
1054 	first = NULL;
1055 	for_each_sg(sgl, sg, sg_len, i) {
1056 		size_t len = sg_dma_len(sg);
1057 
1058 		if (len > DMA_MAX_CHAN_BYTES) {
1059 			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1060 				 len);
1061 			goto fail;
1062 		}
1063 
1064 		desc = ep93xx_dma_desc_get(edmac);
1065 		if (!desc) {
1066 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1067 			goto fail;
1068 		}
1069 
1070 		if (dir == DMA_MEM_TO_DEV) {
1071 			desc->src_addr = sg_dma_address(sg);
1072 			desc->dst_addr = edmac->runtime_addr;
1073 		} else {
1074 			desc->src_addr = edmac->runtime_addr;
1075 			desc->dst_addr = sg_dma_address(sg);
1076 		}
1077 		desc->size = len;
1078 
1079 		if (!first)
1080 			first = desc;
1081 		else
1082 			list_add_tail(&desc->node, &first->tx_list);
1083 	}
1084 
1085 	first->txd.cookie = -EBUSY;
1086 	first->txd.flags = flags;
1087 
1088 	return &first->txd;
1089 
1090 fail:
1091 	ep93xx_dma_desc_put(edmac, first);
1092 	return NULL;
1093 }
1094 
1095 /**
1096  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1097  * @chan: channel
1098  * @dma_addr: DMA mapped address of the buffer
1099  * @buf_len: length of the buffer (in bytes)
1100  * @period_len: length of a single period
1101  * @dir: direction of the operation
1102  * @flags: tx descriptor status flags
1103  *
1104  * Prepares a descriptor for cyclic DMA operation. This means that once the
1105  * descriptor is submitted, we will be submitting in a @period_len sized
1106  * buffers and calling callback once the period has been elapsed. Transfer
1107  * terminates only when client calls dmaengine_terminate_all() for this
1108  * channel.
1109  *
1110  * Returns a valid DMA descriptor or %NULL in case of failure.
1111  */
1112 static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1113 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1114 			   size_t buf_len, size_t period_len,
1115 			   enum dma_transfer_direction dir, unsigned long flags)
1116 {
1117 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1118 	struct ep93xx_dma_desc *desc, *first;
1119 	size_t offset = 0;
1120 
1121 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1122 		dev_warn(chan2dev(edmac),
1123 			 "channel was configured with different direction\n");
1124 		return NULL;
1125 	}
1126 
1127 	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1128 		dev_warn(chan2dev(edmac),
1129 			 "channel is already used for cyclic transfers\n");
1130 		return NULL;
1131 	}
1132 
1133 	if (period_len > DMA_MAX_CHAN_BYTES) {
1134 		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1135 			 period_len);
1136 		return NULL;
1137 	}
1138 
1139 	/* Split the buffer into period size chunks */
1140 	first = NULL;
1141 	for (offset = 0; offset < buf_len; offset += period_len) {
1142 		desc = ep93xx_dma_desc_get(edmac);
1143 		if (!desc) {
1144 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1145 			goto fail;
1146 		}
1147 
1148 		if (dir == DMA_MEM_TO_DEV) {
1149 			desc->src_addr = dma_addr + offset;
1150 			desc->dst_addr = edmac->runtime_addr;
1151 		} else {
1152 			desc->src_addr = edmac->runtime_addr;
1153 			desc->dst_addr = dma_addr + offset;
1154 		}
1155 
1156 		desc->size = period_len;
1157 
1158 		if (!first)
1159 			first = desc;
1160 		else
1161 			list_add_tail(&desc->node, &first->tx_list);
1162 	}
1163 
1164 	first->txd.cookie = -EBUSY;
1165 
1166 	return &first->txd;
1167 
1168 fail:
1169 	ep93xx_dma_desc_put(edmac, first);
1170 	return NULL;
1171 }
1172 
1173 /**
1174  * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1175  * current context.
1176  * @chan: channel
1177  *
1178  * Synchronizes the DMA channel termination to the current context. When this
1179  * function returns it is guaranteed that all transfers for previously issued
1180  * descriptors have stopped and and it is safe to free the memory associated
1181  * with them. Furthermore it is guaranteed that all complete callback functions
1182  * for a previously submitted descriptor have finished running and it is safe to
1183  * free resources accessed from within the complete callbacks.
1184  */
ep93xx_dma_synchronize(struct dma_chan * chan)1185 static void ep93xx_dma_synchronize(struct dma_chan *chan)
1186 {
1187 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1188 
1189 	if (edmac->edma->hw_synchronize)
1190 		edmac->edma->hw_synchronize(edmac);
1191 }
1192 
1193 /**
1194  * ep93xx_dma_terminate_all - terminate all transactions
1195  * @chan: channel
1196  *
1197  * Stops all DMA transactions. All descriptors are put back to the
1198  * @edmac->free_list and callbacks are _not_ called.
1199  */
ep93xx_dma_terminate_all(struct dma_chan * chan)1200 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1201 {
1202 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1203 	struct ep93xx_dma_desc *desc, *_d;
1204 	unsigned long flags;
1205 	LIST_HEAD(list);
1206 
1207 	spin_lock_irqsave(&edmac->lock, flags);
1208 	/* First we disable and flush the DMA channel */
1209 	edmac->edma->hw_shutdown(edmac);
1210 	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1211 	list_splice_init(&edmac->active, &list);
1212 	list_splice_init(&edmac->queue, &list);
1213 	/*
1214 	 * We then re-enable the channel. This way we can continue submitting
1215 	 * the descriptors by just calling ->hw_submit() again.
1216 	 */
1217 	edmac->edma->hw_setup(edmac);
1218 	spin_unlock_irqrestore(&edmac->lock, flags);
1219 
1220 	list_for_each_entry_safe(desc, _d, &list, node)
1221 		ep93xx_dma_desc_put(edmac, desc);
1222 
1223 	return 0;
1224 }
1225 
ep93xx_dma_slave_config(struct dma_chan * chan,struct dma_slave_config * config)1226 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1227 				   struct dma_slave_config *config)
1228 {
1229 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1230 	enum dma_slave_buswidth width;
1231 	unsigned long flags;
1232 	u32 addr, ctrl;
1233 
1234 	if (!edmac->edma->m2m)
1235 		return -EINVAL;
1236 
1237 	switch (config->direction) {
1238 	case DMA_DEV_TO_MEM:
1239 		width = config->src_addr_width;
1240 		addr = config->src_addr;
1241 		break;
1242 
1243 	case DMA_MEM_TO_DEV:
1244 		width = config->dst_addr_width;
1245 		addr = config->dst_addr;
1246 		break;
1247 
1248 	default:
1249 		return -EINVAL;
1250 	}
1251 
1252 	switch (width) {
1253 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1254 		ctrl = 0;
1255 		break;
1256 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1257 		ctrl = M2M_CONTROL_PW_16;
1258 		break;
1259 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1260 		ctrl = M2M_CONTROL_PW_32;
1261 		break;
1262 	default:
1263 		return -EINVAL;
1264 	}
1265 
1266 	spin_lock_irqsave(&edmac->lock, flags);
1267 	edmac->runtime_addr = addr;
1268 	edmac->runtime_ctrl = ctrl;
1269 	spin_unlock_irqrestore(&edmac->lock, flags);
1270 
1271 	return 0;
1272 }
1273 
1274 /**
1275  * ep93xx_dma_tx_status - check if a transaction is completed
1276  * @chan: channel
1277  * @cookie: transaction specific cookie
1278  * @state: state of the transaction is stored here if given
1279  *
1280  * This function can be used to query state of a given transaction.
1281  */
ep93xx_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)1282 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1283 					    dma_cookie_t cookie,
1284 					    struct dma_tx_state *state)
1285 {
1286 	return dma_cookie_status(chan, cookie, state);
1287 }
1288 
1289 /**
1290  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1291  * @chan: channel
1292  *
1293  * When this function is called, all pending transactions are pushed to the
1294  * hardware and executed.
1295  */
ep93xx_dma_issue_pending(struct dma_chan * chan)1296 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1297 {
1298 	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1299 }
1300 
ep93xx_dma_probe(struct platform_device * pdev)1301 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1302 {
1303 	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1304 	struct ep93xx_dma_engine *edma;
1305 	struct dma_device *dma_dev;
1306 	size_t edma_size;
1307 	int ret, i;
1308 
1309 	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1310 	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1311 	if (!edma)
1312 		return -ENOMEM;
1313 
1314 	dma_dev = &edma->dma_dev;
1315 	edma->m2m = platform_get_device_id(pdev)->driver_data;
1316 	edma->num_channels = pdata->num_channels;
1317 
1318 	INIT_LIST_HEAD(&dma_dev->channels);
1319 	for (i = 0; i < pdata->num_channels; i++) {
1320 		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1321 		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1322 
1323 		edmac->chan.device = dma_dev;
1324 		edmac->regs = cdata->base;
1325 		edmac->irq = cdata->irq;
1326 		edmac->edma = edma;
1327 
1328 		edmac->clk = clk_get(NULL, cdata->name);
1329 		if (IS_ERR(edmac->clk)) {
1330 			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1331 				 cdata->name);
1332 			continue;
1333 		}
1334 
1335 		spin_lock_init(&edmac->lock);
1336 		INIT_LIST_HEAD(&edmac->active);
1337 		INIT_LIST_HEAD(&edmac->queue);
1338 		INIT_LIST_HEAD(&edmac->free_list);
1339 		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1340 			     (unsigned long)edmac);
1341 
1342 		list_add_tail(&edmac->chan.device_node,
1343 			      &dma_dev->channels);
1344 	}
1345 
1346 	dma_cap_zero(dma_dev->cap_mask);
1347 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1348 	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1349 
1350 	dma_dev->dev = &pdev->dev;
1351 	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1352 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1353 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1354 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1355 	dma_dev->device_config = ep93xx_dma_slave_config;
1356 	dma_dev->device_synchronize = ep93xx_dma_synchronize;
1357 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1358 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1359 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1360 
1361 	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1362 
1363 	if (edma->m2m) {
1364 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1365 		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1366 
1367 		edma->hw_setup = m2m_hw_setup;
1368 		edma->hw_shutdown = m2m_hw_shutdown;
1369 		edma->hw_submit = m2m_hw_submit;
1370 		edma->hw_interrupt = m2m_hw_interrupt;
1371 	} else {
1372 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1373 
1374 		edma->hw_synchronize = m2p_hw_synchronize;
1375 		edma->hw_setup = m2p_hw_setup;
1376 		edma->hw_shutdown = m2p_hw_shutdown;
1377 		edma->hw_submit = m2p_hw_submit;
1378 		edma->hw_interrupt = m2p_hw_interrupt;
1379 	}
1380 
1381 	ret = dma_async_device_register(dma_dev);
1382 	if (unlikely(ret)) {
1383 		for (i = 0; i < edma->num_channels; i++) {
1384 			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1385 			if (!IS_ERR_OR_NULL(edmac->clk))
1386 				clk_put(edmac->clk);
1387 		}
1388 		kfree(edma);
1389 	} else {
1390 		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1391 			 edma->m2m ? "M" : "P");
1392 	}
1393 
1394 	return ret;
1395 }
1396 
1397 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1398 	{ "ep93xx-dma-m2p", 0 },
1399 	{ "ep93xx-dma-m2m", 1 },
1400 	{ },
1401 };
1402 
1403 static struct platform_driver ep93xx_dma_driver = {
1404 	.driver		= {
1405 		.name	= "ep93xx-dma",
1406 	},
1407 	.id_table	= ep93xx_dma_driver_ids,
1408 };
1409 
ep93xx_dma_module_init(void)1410 static int __init ep93xx_dma_module_init(void)
1411 {
1412 	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1413 }
1414 subsys_initcall(ep93xx_dma_module_init);
1415 
1416 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1417 MODULE_DESCRIPTION("EP93xx DMA driver");
1418 MODULE_LICENSE("GPL");
1419