1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright 2014-2015 Freescale
3 // Copyright 2018 NXP
4
5 /*
6 * Driver for NXP Layerscape Queue Direct Memory Access Controller
7 *
8 * Author:
9 * Wen He <wen.he_1@nxp.com>
10 * Jiaheng Fan <jiaheng.fan@nxp.com>
11 *
12 */
13
14 #include <linux/module.h>
15 #include <linux/delay.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_platform.h>
18 #include <linux/of_dma.h>
19 #include <linux/dma-mapping.h>
20
21 #include "virt-dma.h"
22 #include "fsldma.h"
23
24 /* Register related definition */
25 #define FSL_QDMA_DMR 0x0
26 #define FSL_QDMA_DSR 0x4
27 #define FSL_QDMA_DEIER 0xe00
28 #define FSL_QDMA_DEDR 0xe04
29 #define FSL_QDMA_DECFDW0R 0xe10
30 #define FSL_QDMA_DECFDW1R 0xe14
31 #define FSL_QDMA_DECFDW2R 0xe18
32 #define FSL_QDMA_DECFDW3R 0xe1c
33 #define FSL_QDMA_DECFQIDR 0xe30
34 #define FSL_QDMA_DECBR 0xe34
35
36 #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
37 #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
38 #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
39 #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
40 #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
41 #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
42 #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
43 #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
44
45 #define FSL_QDMA_SQDPAR 0x80c
46 #define FSL_QDMA_SQEPAR 0x814
47 #define FSL_QDMA_BSQMR 0x800
48 #define FSL_QDMA_BSQSR 0x804
49 #define FSL_QDMA_BSQICR 0x828
50 #define FSL_QDMA_CQMR 0xa00
51 #define FSL_QDMA_CQDSCR1 0xa08
52 #define FSL_QDMA_CQDSCR2 0xa0c
53 #define FSL_QDMA_CQIER 0xa10
54 #define FSL_QDMA_CQEDR 0xa14
55 #define FSL_QDMA_SQCCMR 0xa20
56
57 /* Registers for bit and genmask */
58 #define FSL_QDMA_CQIDR_SQT BIT(15)
59 #define QDMA_CCDF_FORMAT BIT(29)
60 #define QDMA_CCDF_SER BIT(30)
61 #define QDMA_SG_FIN BIT(30)
62 #define QDMA_SG_LEN_MASK GENMASK(29, 0)
63 #define QDMA_CCDF_MASK GENMASK(28, 20)
64
65 #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
66 #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
67 #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
68
69 #define FSL_QDMA_BCQIER_CQTIE BIT(15)
70 #define FSL_QDMA_BCQIER_CQPEIE BIT(23)
71 #define FSL_QDMA_BSQICR_ICEN BIT(31)
72
73 #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
74 #define FSL_QDMA_CQIER_MEIE BIT(31)
75 #define FSL_QDMA_CQIER_TEIE BIT(0)
76 #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
77
78 #define FSL_QDMA_BCQMR_EN BIT(31)
79 #define FSL_QDMA_BCQMR_EI BIT(30)
80 #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
81 #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
82
83 #define FSL_QDMA_BCQSR_QF BIT(16)
84 #define FSL_QDMA_BCQSR_XOFF BIT(0)
85
86 #define FSL_QDMA_BSQMR_EN BIT(31)
87 #define FSL_QDMA_BSQMR_DI BIT(30)
88 #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
89
90 #define FSL_QDMA_BSQSR_QE BIT(17)
91
92 #define FSL_QDMA_DMR_DQD BIT(30)
93 #define FSL_QDMA_DSR_DB BIT(31)
94
95 /* Size related definition */
96 #define FSL_QDMA_QUEUE_MAX 8
97 #define FSL_QDMA_COMMAND_BUFFER_SIZE 64
98 #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
99 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
100 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
101 #define FSL_QDMA_QUEUE_NUM_MAX 8
102
103 /* Field definition for CMD */
104 #define FSL_QDMA_CMD_RWTTYPE 0x4
105 #define FSL_QDMA_CMD_LWC 0x2
106 #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
107 #define FSL_QDMA_CMD_NS_OFFSET 27
108 #define FSL_QDMA_CMD_DQOS_OFFSET 24
109 #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
110 #define FSL_QDMA_CMD_DSEN_OFFSET 19
111 #define FSL_QDMA_CMD_LWC_OFFSET 16
112
113 /* Field definition for Descriptor status */
114 #define QDMA_CCDF_STATUS_RTE BIT(5)
115 #define QDMA_CCDF_STATUS_WTE BIT(4)
116 #define QDMA_CCDF_STATUS_CDE BIT(2)
117 #define QDMA_CCDF_STATUS_SDE BIT(1)
118 #define QDMA_CCDF_STATUS_DDE BIT(0)
119 #define QDMA_CCDF_STATUS_MASK (QDMA_CCDF_STATUS_RTE | \
120 QDMA_CCDF_STATUS_WTE | \
121 QDMA_CCDF_STATUS_CDE | \
122 QDMA_CCDF_STATUS_SDE | \
123 QDMA_CCDF_STATUS_DDE)
124
125 /* Field definition for Descriptor offset */
126 #define QDMA_CCDF_OFFSET 20
127 #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
128
129 /* Field definition for safe loop count*/
130 #define FSL_QDMA_HALT_COUNT 1500
131 #define FSL_QDMA_MAX_SIZE 16385
132 #define FSL_QDMA_COMP_TIMEOUT 1000
133 #define FSL_COMMAND_QUEUE_OVERFLLOW 10
134
135 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
136 (((fsl_qdma_engine)->block_offset) * (x))
137
138 /**
139 * struct fsl_qdma_format - This is the struct holding describing compound
140 * descriptor format with qDMA.
141 * @status: Command status and enqueue status notification.
142 * @cfg: Frame offset and frame format.
143 * @addr_lo: Holding the compound descriptor of the lower
144 * 32-bits address in memory 40-bit address.
145 * @addr_hi: Same as above member, but point high 8-bits in
146 * memory 40-bit address.
147 * @__reserved1: Reserved field.
148 * @cfg8b_w1: Compound descriptor command queue origin produced
149 * by qDMA and dynamic debug field.
150 * @data: Pointer to the memory 40-bit address, describes DMA
151 * source information and DMA destination information.
152 */
153 struct fsl_qdma_format {
154 __le32 status;
155 __le32 cfg;
156 union {
157 struct {
158 __le32 addr_lo;
159 u8 addr_hi;
160 u8 __reserved1[2];
161 u8 cfg8b_w1;
162 } __packed;
163 __le64 data;
164 };
165 } __packed;
166
167 /* qDMA status notification pre information */
168 struct fsl_pre_status {
169 u64 addr;
170 u8 queue;
171 };
172
173 static DEFINE_PER_CPU(struct fsl_pre_status, pre);
174
175 struct fsl_qdma_chan {
176 struct virt_dma_chan vchan;
177 struct virt_dma_desc vdesc;
178 enum dma_status status;
179 struct fsl_qdma_engine *qdma;
180 struct fsl_qdma_queue *queue;
181 };
182
183 struct fsl_qdma_queue {
184 struct fsl_qdma_format *virt_head;
185 struct fsl_qdma_format *virt_tail;
186 struct list_head comp_used;
187 struct list_head comp_free;
188 struct dma_pool *comp_pool;
189 struct dma_pool *desc_pool;
190 spinlock_t queue_lock;
191 dma_addr_t bus_addr;
192 u32 n_cq;
193 u32 id;
194 struct fsl_qdma_format *cq;
195 void __iomem *block_base;
196 };
197
198 struct fsl_qdma_comp {
199 dma_addr_t bus_addr;
200 dma_addr_t desc_bus_addr;
201 struct fsl_qdma_format *virt_addr;
202 struct fsl_qdma_format *desc_virt_addr;
203 struct fsl_qdma_chan *qchan;
204 struct virt_dma_desc vdesc;
205 struct list_head list;
206 };
207
208 struct fsl_qdma_engine {
209 struct dma_device dma_dev;
210 void __iomem *ctrl_base;
211 void __iomem *status_base;
212 void __iomem *block_base;
213 u32 n_chans;
214 u32 n_queues;
215 struct mutex fsl_qdma_mutex;
216 int error_irq;
217 int *queue_irq;
218 u32 feature;
219 struct fsl_qdma_queue *queue;
220 struct fsl_qdma_queue **status;
221 struct fsl_qdma_chan *chans;
222 int block_number;
223 int block_offset;
224 int irq_base;
225 int desc_allocated;
226
227 };
228
229 static inline u64
qdma_ccdf_addr_get64(const struct fsl_qdma_format * ccdf)230 qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
231 {
232 return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
233 }
234
235 static inline void
qdma_desc_addr_set64(struct fsl_qdma_format * ccdf,u64 addr)236 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
237 {
238 ccdf->addr_hi = upper_32_bits(addr);
239 ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
240 }
241
242 static inline u8
qdma_ccdf_get_queue(const struct fsl_qdma_format * ccdf)243 qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
244 {
245 return ccdf->cfg8b_w1 & U8_MAX;
246 }
247
248 static inline int
qdma_ccdf_get_offset(const struct fsl_qdma_format * ccdf)249 qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
250 {
251 return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
252 }
253
254 static inline void
qdma_ccdf_set_format(struct fsl_qdma_format * ccdf,int offset)255 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
256 {
257 ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
258 (offset << QDMA_CCDF_OFFSET));
259 }
260
261 static inline int
qdma_ccdf_get_status(const struct fsl_qdma_format * ccdf)262 qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
263 {
264 return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
265 }
266
267 static inline void
qdma_ccdf_set_ser(struct fsl_qdma_format * ccdf,int status)268 qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
269 {
270 ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
271 }
272
qdma_csgf_set_len(struct fsl_qdma_format * csgf,int len)273 static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
274 {
275 csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
276 }
277
qdma_csgf_set_f(struct fsl_qdma_format * csgf,int len)278 static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
279 {
280 csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
281 }
282
qdma_readl(struct fsl_qdma_engine * qdma,void __iomem * addr)283 static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
284 {
285 return FSL_DMA_IN(qdma, addr, 32);
286 }
287
qdma_writel(struct fsl_qdma_engine * qdma,u32 val,void __iomem * addr)288 static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
289 void __iomem *addr)
290 {
291 FSL_DMA_OUT(qdma, addr, val, 32);
292 }
293
to_fsl_qdma_chan(struct dma_chan * chan)294 static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
295 {
296 return container_of(chan, struct fsl_qdma_chan, vchan.chan);
297 }
298
to_fsl_qdma_comp(struct virt_dma_desc * vd)299 static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
300 {
301 return container_of(vd, struct fsl_qdma_comp, vdesc);
302 }
303
fsl_qdma_free_chan_resources(struct dma_chan * chan)304 static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
305 {
306 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
307 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
308 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
309 struct fsl_qdma_comp *comp_temp, *_comp_temp;
310 unsigned long flags;
311 LIST_HEAD(head);
312
313 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
314 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
315 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
316
317 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
318
319 if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
320 return;
321
322 list_for_each_entry_safe(comp_temp, _comp_temp,
323 &fsl_queue->comp_used, list) {
324 dma_pool_free(fsl_queue->comp_pool,
325 comp_temp->virt_addr,
326 comp_temp->bus_addr);
327 dma_pool_free(fsl_queue->desc_pool,
328 comp_temp->desc_virt_addr,
329 comp_temp->desc_bus_addr);
330 list_del(&comp_temp->list);
331 kfree(comp_temp);
332 }
333
334 list_for_each_entry_safe(comp_temp, _comp_temp,
335 &fsl_queue->comp_free, list) {
336 dma_pool_free(fsl_queue->comp_pool,
337 comp_temp->virt_addr,
338 comp_temp->bus_addr);
339 dma_pool_free(fsl_queue->desc_pool,
340 comp_temp->desc_virt_addr,
341 comp_temp->desc_bus_addr);
342 list_del(&comp_temp->list);
343 kfree(comp_temp);
344 }
345
346 dma_pool_destroy(fsl_queue->comp_pool);
347 dma_pool_destroy(fsl_queue->desc_pool);
348
349 fsl_qdma->desc_allocated--;
350 fsl_queue->comp_pool = NULL;
351 fsl_queue->desc_pool = NULL;
352 }
353
fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp * fsl_comp,dma_addr_t dst,dma_addr_t src,u32 len)354 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
355 dma_addr_t dst, dma_addr_t src, u32 len)
356 {
357 u32 cmd;
358 struct fsl_qdma_format *sdf, *ddf;
359 struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
360
361 ccdf = fsl_comp->virt_addr;
362 csgf_desc = fsl_comp->virt_addr + 1;
363 csgf_src = fsl_comp->virt_addr + 2;
364 csgf_dest = fsl_comp->virt_addr + 3;
365 sdf = fsl_comp->desc_virt_addr;
366 ddf = fsl_comp->desc_virt_addr + 1;
367
368 memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
369 memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
370 /* Head Command Descriptor(Frame Descriptor) */
371 qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
372 qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
373 qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
374 /* Status notification is enqueued to status queue. */
375 /* Compound Command Descriptor(Frame List Table) */
376 qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
377 /* It must be 32 as Compound S/G Descriptor */
378 qdma_csgf_set_len(csgf_desc, 32);
379 qdma_desc_addr_set64(csgf_src, src);
380 qdma_csgf_set_len(csgf_src, len);
381 qdma_desc_addr_set64(csgf_dest, dst);
382 qdma_csgf_set_len(csgf_dest, len);
383 /* This entry is the last entry. */
384 qdma_csgf_set_f(csgf_dest, len);
385 /* Descriptor Buffer */
386 cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
387 FSL_QDMA_CMD_RWTTYPE_OFFSET);
388 sdf->data = QDMA_SDDF_CMD(cmd);
389
390 cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
391 FSL_QDMA_CMD_RWTTYPE_OFFSET);
392 cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
393 ddf->data = QDMA_SDDF_CMD(cmd);
394 }
395
396 /*
397 * Pre-request full command descriptor for enqueue.
398 */
fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue * queue)399 static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
400 {
401 int i;
402 struct fsl_qdma_comp *comp_temp, *_comp_temp;
403
404 for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
405 comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
406 if (!comp_temp)
407 goto err_alloc;
408 comp_temp->virt_addr =
409 dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
410 &comp_temp->bus_addr);
411 if (!comp_temp->virt_addr)
412 goto err_dma_alloc;
413
414 comp_temp->desc_virt_addr =
415 dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
416 &comp_temp->desc_bus_addr);
417 if (!comp_temp->desc_virt_addr)
418 goto err_desc_dma_alloc;
419
420 list_add_tail(&comp_temp->list, &queue->comp_free);
421 }
422
423 return 0;
424
425 err_desc_dma_alloc:
426 dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
427 comp_temp->bus_addr);
428
429 err_dma_alloc:
430 kfree(comp_temp);
431
432 err_alloc:
433 list_for_each_entry_safe(comp_temp, _comp_temp,
434 &queue->comp_free, list) {
435 if (comp_temp->virt_addr)
436 dma_pool_free(queue->comp_pool,
437 comp_temp->virt_addr,
438 comp_temp->bus_addr);
439 if (comp_temp->desc_virt_addr)
440 dma_pool_free(queue->desc_pool,
441 comp_temp->desc_virt_addr,
442 comp_temp->desc_bus_addr);
443
444 list_del(&comp_temp->list);
445 kfree(comp_temp);
446 }
447
448 return -ENOMEM;
449 }
450
451 /*
452 * Request a command descriptor for enqueue.
453 */
454 static struct fsl_qdma_comp
fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan * fsl_chan)455 *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
456 {
457 unsigned long flags;
458 struct fsl_qdma_comp *comp_temp;
459 int timeout = FSL_QDMA_COMP_TIMEOUT;
460 struct fsl_qdma_queue *queue = fsl_chan->queue;
461
462 while (timeout--) {
463 spin_lock_irqsave(&queue->queue_lock, flags);
464 if (!list_empty(&queue->comp_free)) {
465 comp_temp = list_first_entry(&queue->comp_free,
466 struct fsl_qdma_comp,
467 list);
468 list_del(&comp_temp->list);
469
470 spin_unlock_irqrestore(&queue->queue_lock, flags);
471 comp_temp->qchan = fsl_chan;
472 return comp_temp;
473 }
474 spin_unlock_irqrestore(&queue->queue_lock, flags);
475 udelay(1);
476 }
477
478 return NULL;
479 }
480
481 static struct fsl_qdma_queue
fsl_qdma_alloc_queue_resources(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)482 *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
483 struct fsl_qdma_engine *fsl_qdma)
484 {
485 int ret, len, i, j;
486 int queue_num, block_number;
487 unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
488 struct fsl_qdma_queue *queue_head, *queue_temp;
489
490 queue_num = fsl_qdma->n_queues;
491 block_number = fsl_qdma->block_number;
492
493 if (queue_num > FSL_QDMA_QUEUE_MAX)
494 queue_num = FSL_QDMA_QUEUE_MAX;
495 len = sizeof(*queue_head) * queue_num * block_number;
496 queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
497 if (!queue_head)
498 return NULL;
499
500 ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
501 queue_size, queue_num);
502 if (ret) {
503 dev_err(&pdev->dev, "Can't get queue-sizes.\n");
504 return NULL;
505 }
506 for (j = 0; j < block_number; j++) {
507 for (i = 0; i < queue_num; i++) {
508 if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
509 queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
510 dev_err(&pdev->dev,
511 "Get wrong queue-sizes.\n");
512 return NULL;
513 }
514 queue_temp = queue_head + i + (j * queue_num);
515
516 queue_temp->cq =
517 dma_alloc_coherent(&pdev->dev,
518 sizeof(struct fsl_qdma_format) *
519 queue_size[i],
520 &queue_temp->bus_addr,
521 GFP_KERNEL);
522 if (!queue_temp->cq)
523 return NULL;
524 queue_temp->block_base = fsl_qdma->block_base +
525 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
526 queue_temp->n_cq = queue_size[i];
527 queue_temp->id = i;
528 queue_temp->virt_head = queue_temp->cq;
529 queue_temp->virt_tail = queue_temp->cq;
530 /*
531 * List for queue command buffer
532 */
533 INIT_LIST_HEAD(&queue_temp->comp_used);
534 spin_lock_init(&queue_temp->queue_lock);
535 }
536 }
537 return queue_head;
538 }
539
540 static struct fsl_qdma_queue
fsl_qdma_prep_status_queue(struct platform_device * pdev)541 *fsl_qdma_prep_status_queue(struct platform_device *pdev)
542 {
543 int ret;
544 unsigned int status_size;
545 struct fsl_qdma_queue *status_head;
546 struct device_node *np = pdev->dev.of_node;
547
548 ret = of_property_read_u32(np, "status-sizes", &status_size);
549 if (ret) {
550 dev_err(&pdev->dev, "Can't get status-sizes.\n");
551 return NULL;
552 }
553 if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
554 status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
555 dev_err(&pdev->dev, "Get wrong status_size.\n");
556 return NULL;
557 }
558 status_head = devm_kzalloc(&pdev->dev,
559 sizeof(*status_head), GFP_KERNEL);
560 if (!status_head)
561 return NULL;
562
563 /*
564 * Buffer for queue command
565 */
566 status_head->cq = dma_alloc_coherent(&pdev->dev,
567 sizeof(struct fsl_qdma_format) *
568 status_size,
569 &status_head->bus_addr,
570 GFP_KERNEL);
571 if (!status_head->cq) {
572 devm_kfree(&pdev->dev, status_head);
573 return NULL;
574 }
575 status_head->n_cq = status_size;
576 status_head->virt_head = status_head->cq;
577 status_head->virt_tail = status_head->cq;
578 status_head->comp_pool = NULL;
579
580 return status_head;
581 }
582
fsl_qdma_halt(struct fsl_qdma_engine * fsl_qdma)583 static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
584 {
585 u32 reg;
586 int i, j, count = FSL_QDMA_HALT_COUNT;
587 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
588
589 /* Disable the command queue and wait for idle state. */
590 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
591 reg |= FSL_QDMA_DMR_DQD;
592 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
593 for (j = 0; j < fsl_qdma->block_number; j++) {
594 block = fsl_qdma->block_base +
595 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
596 for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
597 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
598 }
599 while (1) {
600 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
601 if (!(reg & FSL_QDMA_DSR_DB))
602 break;
603 if (count-- < 0)
604 return -EBUSY;
605 udelay(100);
606 }
607
608 for (j = 0; j < fsl_qdma->block_number; j++) {
609 block = fsl_qdma->block_base +
610 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
611
612 /* Disable status queue. */
613 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
614
615 /*
616 * clear the command queue interrupt detect register for
617 * all queues.
618 */
619 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
620 block + FSL_QDMA_BCQIDR(0));
621 }
622
623 return 0;
624 }
625
626 static int
fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine * fsl_qdma,void * block,int id)627 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
628 void *block,
629 int id)
630 {
631 bool duplicate;
632 u32 reg, i, count;
633 u8 completion_status;
634 struct fsl_qdma_queue *temp_queue;
635 struct fsl_qdma_format *status_addr;
636 struct fsl_qdma_comp *fsl_comp = NULL;
637 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
638 struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
639
640 count = FSL_QDMA_MAX_SIZE;
641
642 while (count--) {
643 duplicate = 0;
644 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
645 if (reg & FSL_QDMA_BSQSR_QE)
646 return 0;
647
648 status_addr = fsl_status->virt_head;
649
650 if (qdma_ccdf_get_queue(status_addr) ==
651 __this_cpu_read(pre.queue) &&
652 qdma_ccdf_addr_get64(status_addr) ==
653 __this_cpu_read(pre.addr))
654 duplicate = 1;
655 i = qdma_ccdf_get_queue(status_addr) +
656 id * fsl_qdma->n_queues;
657 __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
658 __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
659 temp_queue = fsl_queue + i;
660
661 spin_lock(&temp_queue->queue_lock);
662 if (list_empty(&temp_queue->comp_used)) {
663 if (!duplicate) {
664 spin_unlock(&temp_queue->queue_lock);
665 return -EAGAIN;
666 }
667 } else {
668 fsl_comp = list_first_entry(&temp_queue->comp_used,
669 struct fsl_qdma_comp, list);
670 if (fsl_comp->bus_addr + 16 !=
671 __this_cpu_read(pre.addr)) {
672 if (!duplicate) {
673 spin_unlock(&temp_queue->queue_lock);
674 return -EAGAIN;
675 }
676 }
677 }
678
679 if (duplicate) {
680 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
681 reg |= FSL_QDMA_BSQMR_DI;
682 qdma_desc_addr_set64(status_addr, 0x0);
683 fsl_status->virt_head++;
684 if (fsl_status->virt_head == fsl_status->cq
685 + fsl_status->n_cq)
686 fsl_status->virt_head = fsl_status->cq;
687 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
688 spin_unlock(&temp_queue->queue_lock);
689 continue;
690 }
691 list_del(&fsl_comp->list);
692
693 completion_status = qdma_ccdf_get_status(status_addr);
694
695 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
696 reg |= FSL_QDMA_BSQMR_DI;
697 qdma_desc_addr_set64(status_addr, 0x0);
698 fsl_status->virt_head++;
699 if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
700 fsl_status->virt_head = fsl_status->cq;
701 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
702 spin_unlock(&temp_queue->queue_lock);
703
704 /* The completion_status is evaluated here
705 * (outside of spin lock)
706 */
707 if (completion_status) {
708 /* A completion error occurred! */
709 if (completion_status & QDMA_CCDF_STATUS_WTE) {
710 /* Write transaction error */
711 fsl_comp->vdesc.tx_result.result =
712 DMA_TRANS_WRITE_FAILED;
713 } else if (completion_status & QDMA_CCDF_STATUS_RTE) {
714 /* Read transaction error */
715 fsl_comp->vdesc.tx_result.result =
716 DMA_TRANS_READ_FAILED;
717 } else {
718 /* Command/source/destination
719 * description error
720 */
721 fsl_comp->vdesc.tx_result.result =
722 DMA_TRANS_ABORTED;
723 dev_err(fsl_qdma->dma_dev.dev,
724 "DMA status descriptor error %x\n",
725 completion_status);
726 }
727 }
728
729 spin_lock(&fsl_comp->qchan->vchan.lock);
730 vchan_cookie_complete(&fsl_comp->vdesc);
731 fsl_comp->qchan->status = DMA_COMPLETE;
732 spin_unlock(&fsl_comp->qchan->vchan.lock);
733 }
734
735 return 0;
736 }
737
fsl_qdma_error_handler(int irq,void * dev_id)738 static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
739 {
740 unsigned int intr;
741 struct fsl_qdma_engine *fsl_qdma = dev_id;
742 void __iomem *status = fsl_qdma->status_base;
743 unsigned int decfdw0r;
744 unsigned int decfdw1r;
745 unsigned int decfdw2r;
746 unsigned int decfdw3r;
747
748 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
749
750 if (intr) {
751 decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
752 decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
753 decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
754 decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
755 dev_err(fsl_qdma->dma_dev.dev,
756 "DMA transaction error! (%x: %x-%x-%x-%x)\n",
757 intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
758 }
759
760 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
761 return IRQ_HANDLED;
762 }
763
fsl_qdma_queue_handler(int irq,void * dev_id)764 static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
765 {
766 int id;
767 unsigned int intr, reg;
768 struct fsl_qdma_engine *fsl_qdma = dev_id;
769 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
770
771 id = irq - fsl_qdma->irq_base;
772 if (id < 0 && id > fsl_qdma->block_number) {
773 dev_err(fsl_qdma->dma_dev.dev,
774 "irq %d is wrong irq_base is %d\n",
775 irq, fsl_qdma->irq_base);
776 }
777
778 block = fsl_qdma->block_base +
779 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
780
781 intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
782
783 if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
784 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
785
786 if (intr != 0) {
787 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
788 reg |= FSL_QDMA_DMR_DQD;
789 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
790 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
791 dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
792 }
793
794 /* Clear all detected events and interrupts. */
795 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
796 block + FSL_QDMA_BCQIDR(0));
797
798 return IRQ_HANDLED;
799 }
800
801 static int
fsl_qdma_irq_init(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)802 fsl_qdma_irq_init(struct platform_device *pdev,
803 struct fsl_qdma_engine *fsl_qdma)
804 {
805 int i;
806 int cpu;
807 int ret;
808 char irq_name[20];
809
810 fsl_qdma->error_irq =
811 platform_get_irq_byname(pdev, "qdma-error");
812 if (fsl_qdma->error_irq < 0)
813 return fsl_qdma->error_irq;
814
815 ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
816 fsl_qdma_error_handler, 0,
817 "qDMA error", fsl_qdma);
818 if (ret) {
819 dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
820 return ret;
821 }
822
823 for (i = 0; i < fsl_qdma->block_number; i++) {
824 sprintf(irq_name, "qdma-queue%d", i);
825 fsl_qdma->queue_irq[i] =
826 platform_get_irq_byname(pdev, irq_name);
827
828 if (fsl_qdma->queue_irq[i] < 0)
829 return fsl_qdma->queue_irq[i];
830
831 ret = devm_request_irq(&pdev->dev,
832 fsl_qdma->queue_irq[i],
833 fsl_qdma_queue_handler,
834 0,
835 "qDMA queue",
836 fsl_qdma);
837 if (ret) {
838 dev_err(&pdev->dev,
839 "Can't register qDMA queue IRQ.\n");
840 return ret;
841 }
842
843 cpu = i % num_online_cpus();
844 ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
845 get_cpu_mask(cpu));
846 if (ret) {
847 dev_err(&pdev->dev,
848 "Can't set cpu %d affinity to IRQ %d.\n",
849 cpu,
850 fsl_qdma->queue_irq[i]);
851 return ret;
852 }
853 }
854
855 return 0;
856 }
857
fsl_qdma_irq_exit(struct platform_device * pdev,struct fsl_qdma_engine * fsl_qdma)858 static void fsl_qdma_irq_exit(struct platform_device *pdev,
859 struct fsl_qdma_engine *fsl_qdma)
860 {
861 int i;
862
863 devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
864 for (i = 0; i < fsl_qdma->block_number; i++)
865 devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
866 }
867
fsl_qdma_reg_init(struct fsl_qdma_engine * fsl_qdma)868 static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
869 {
870 u32 reg;
871 int i, j, ret;
872 struct fsl_qdma_queue *temp;
873 void __iomem *status = fsl_qdma->status_base;
874 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
875 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
876
877 /* Try to halt the qDMA engine first. */
878 ret = fsl_qdma_halt(fsl_qdma);
879 if (ret) {
880 dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
881 return ret;
882 }
883
884 for (i = 0; i < fsl_qdma->block_number; i++) {
885 /*
886 * Clear the command queue interrupt detect register for
887 * all queues.
888 */
889
890 block = fsl_qdma->block_base +
891 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
892 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
893 block + FSL_QDMA_BCQIDR(0));
894 }
895
896 for (j = 0; j < fsl_qdma->block_number; j++) {
897 block = fsl_qdma->block_base +
898 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
899 for (i = 0; i < fsl_qdma->n_queues; i++) {
900 temp = fsl_queue + i + (j * fsl_qdma->n_queues);
901 /*
902 * Initialize Command Queue registers to
903 * point to the first
904 * command descriptor in memory.
905 * Dequeue Pointer Address Registers
906 * Enqueue Pointer Address Registers
907 */
908
909 qdma_writel(fsl_qdma, temp->bus_addr,
910 block + FSL_QDMA_BCQDPA_SADDR(i));
911 qdma_writel(fsl_qdma, temp->bus_addr,
912 block + FSL_QDMA_BCQEPA_SADDR(i));
913
914 /* Initialize the queue mode. */
915 reg = FSL_QDMA_BCQMR_EN;
916 reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
917 reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
918 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
919 }
920
921 /*
922 * Workaround for erratum: ERR010812.
923 * We must enable XOFF to avoid the enqueue rejection occurs.
924 * Setting SQCCMR ENTER_WM to 0x20.
925 */
926
927 qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
928 block + FSL_QDMA_SQCCMR);
929
930 /*
931 * Initialize status queue registers to point to the first
932 * command descriptor in memory.
933 * Dequeue Pointer Address Registers
934 * Enqueue Pointer Address Registers
935 */
936
937 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
938 block + FSL_QDMA_SQEPAR);
939 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
940 block + FSL_QDMA_SQDPAR);
941 /* Initialize status queue interrupt. */
942 qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
943 block + FSL_QDMA_BCQIER(0));
944 qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
945 FSL_QDMA_BSQICR_ICST(5) | 0x8000,
946 block + FSL_QDMA_BSQICR);
947 qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
948 FSL_QDMA_CQIER_TEIE,
949 block + FSL_QDMA_CQIER);
950
951 /* Initialize the status queue mode. */
952 reg = FSL_QDMA_BSQMR_EN;
953 reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
954 (fsl_qdma->status[j]->n_cq) - 6);
955
956 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
957 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
958 }
959
960 /* Initialize controller interrupt register. */
961 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
962 qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
963
964 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
965 reg &= ~FSL_QDMA_DMR_DQD;
966 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
967
968 return 0;
969 }
970
971 static struct dma_async_tx_descriptor *
fsl_qdma_prep_memcpy(struct dma_chan * chan,dma_addr_t dst,dma_addr_t src,size_t len,unsigned long flags)972 fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
973 dma_addr_t src, size_t len, unsigned long flags)
974 {
975 struct fsl_qdma_comp *fsl_comp;
976 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
977
978 fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
979
980 if (!fsl_comp)
981 return NULL;
982
983 fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
984
985 return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
986 }
987
fsl_qdma_enqueue_desc(struct fsl_qdma_chan * fsl_chan)988 static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
989 {
990 u32 reg;
991 struct virt_dma_desc *vdesc;
992 struct fsl_qdma_comp *fsl_comp;
993 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
994 void __iomem *block = fsl_queue->block_base;
995
996 reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
997 if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
998 return;
999 vdesc = vchan_next_desc(&fsl_chan->vchan);
1000 if (!vdesc)
1001 return;
1002 list_del(&vdesc->node);
1003 fsl_comp = to_fsl_qdma_comp(vdesc);
1004
1005 memcpy(fsl_queue->virt_head++,
1006 fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
1007 if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
1008 fsl_queue->virt_head = fsl_queue->cq;
1009
1010 list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
1011 barrier();
1012 reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
1013 reg |= FSL_QDMA_BCQMR_EI;
1014 qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
1015 fsl_chan->status = DMA_IN_PROGRESS;
1016 }
1017
fsl_qdma_free_desc(struct virt_dma_desc * vdesc)1018 static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
1019 {
1020 unsigned long flags;
1021 struct fsl_qdma_comp *fsl_comp;
1022 struct fsl_qdma_queue *fsl_queue;
1023
1024 fsl_comp = to_fsl_qdma_comp(vdesc);
1025 fsl_queue = fsl_comp->qchan->queue;
1026
1027 spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1028 list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
1029 spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1030 }
1031
fsl_qdma_issue_pending(struct dma_chan * chan)1032 static void fsl_qdma_issue_pending(struct dma_chan *chan)
1033 {
1034 unsigned long flags;
1035 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1036 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1037
1038 spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1039 spin_lock(&fsl_chan->vchan.lock);
1040 if (vchan_issue_pending(&fsl_chan->vchan))
1041 fsl_qdma_enqueue_desc(fsl_chan);
1042 spin_unlock(&fsl_chan->vchan.lock);
1043 spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1044 }
1045
fsl_qdma_synchronize(struct dma_chan * chan)1046 static void fsl_qdma_synchronize(struct dma_chan *chan)
1047 {
1048 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1049
1050 vchan_synchronize(&fsl_chan->vchan);
1051 }
1052
fsl_qdma_terminate_all(struct dma_chan * chan)1053 static int fsl_qdma_terminate_all(struct dma_chan *chan)
1054 {
1055 LIST_HEAD(head);
1056 unsigned long flags;
1057 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1058
1059 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1060 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1061 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1062 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1063 return 0;
1064 }
1065
fsl_qdma_alloc_chan_resources(struct dma_chan * chan)1066 static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1067 {
1068 int ret;
1069 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1070 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1071 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1072
1073 if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1074 return fsl_qdma->desc_allocated;
1075
1076 INIT_LIST_HEAD(&fsl_queue->comp_free);
1077
1078 /*
1079 * The dma pool for queue command buffer
1080 */
1081 fsl_queue->comp_pool =
1082 dma_pool_create("comp_pool",
1083 chan->device->dev,
1084 FSL_QDMA_COMMAND_BUFFER_SIZE,
1085 64, 0);
1086 if (!fsl_queue->comp_pool)
1087 return -ENOMEM;
1088
1089 /*
1090 * The dma pool for Descriptor(SD/DD) buffer
1091 */
1092 fsl_queue->desc_pool =
1093 dma_pool_create("desc_pool",
1094 chan->device->dev,
1095 FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1096 32, 0);
1097 if (!fsl_queue->desc_pool)
1098 goto err_desc_pool;
1099
1100 ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1101 if (ret) {
1102 dev_err(chan->device->dev,
1103 "failed to alloc dma buffer for S/G descriptor\n");
1104 goto err_mem;
1105 }
1106
1107 fsl_qdma->desc_allocated++;
1108 return fsl_qdma->desc_allocated;
1109
1110 err_mem:
1111 dma_pool_destroy(fsl_queue->desc_pool);
1112 err_desc_pool:
1113 dma_pool_destroy(fsl_queue->comp_pool);
1114 return -ENOMEM;
1115 }
1116
fsl_qdma_probe(struct platform_device * pdev)1117 static int fsl_qdma_probe(struct platform_device *pdev)
1118 {
1119 int ret, i;
1120 int blk_num, blk_off;
1121 u32 len, chans, queues;
1122 struct resource *res;
1123 struct fsl_qdma_chan *fsl_chan;
1124 struct fsl_qdma_engine *fsl_qdma;
1125 struct device_node *np = pdev->dev.of_node;
1126
1127 ret = of_property_read_u32(np, "dma-channels", &chans);
1128 if (ret) {
1129 dev_err(&pdev->dev, "Can't get dma-channels.\n");
1130 return ret;
1131 }
1132
1133 ret = of_property_read_u32(np, "block-offset", &blk_off);
1134 if (ret) {
1135 dev_err(&pdev->dev, "Can't get block-offset.\n");
1136 return ret;
1137 }
1138
1139 ret = of_property_read_u32(np, "block-number", &blk_num);
1140 if (ret) {
1141 dev_err(&pdev->dev, "Can't get block-number.\n");
1142 return ret;
1143 }
1144
1145 blk_num = min_t(int, blk_num, num_online_cpus());
1146
1147 len = sizeof(*fsl_qdma);
1148 fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1149 if (!fsl_qdma)
1150 return -ENOMEM;
1151
1152 len = sizeof(*fsl_chan) * chans;
1153 fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1154 if (!fsl_qdma->chans)
1155 return -ENOMEM;
1156
1157 len = sizeof(struct fsl_qdma_queue *) * blk_num;
1158 fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1159 if (!fsl_qdma->status)
1160 return -ENOMEM;
1161
1162 len = sizeof(int) * blk_num;
1163 fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1164 if (!fsl_qdma->queue_irq)
1165 return -ENOMEM;
1166
1167 ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1168 if (ret) {
1169 dev_err(&pdev->dev, "Can't get queues.\n");
1170 return ret;
1171 }
1172
1173 fsl_qdma->desc_allocated = 0;
1174 fsl_qdma->n_chans = chans;
1175 fsl_qdma->n_queues = queues;
1176 fsl_qdma->block_number = blk_num;
1177 fsl_qdma->block_offset = blk_off;
1178
1179 mutex_init(&fsl_qdma->fsl_qdma_mutex);
1180
1181 for (i = 0; i < fsl_qdma->block_number; i++) {
1182 fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1183 if (!fsl_qdma->status[i])
1184 return -ENOMEM;
1185 }
1186 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1187 fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
1188 if (IS_ERR(fsl_qdma->ctrl_base))
1189 return PTR_ERR(fsl_qdma->ctrl_base);
1190
1191 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1192 fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
1193 if (IS_ERR(fsl_qdma->status_base))
1194 return PTR_ERR(fsl_qdma->status_base);
1195
1196 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1197 fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
1198 if (IS_ERR(fsl_qdma->block_base))
1199 return PTR_ERR(fsl_qdma->block_base);
1200 fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1201 if (!fsl_qdma->queue)
1202 return -ENOMEM;
1203
1204 ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1205 if (ret)
1206 return ret;
1207
1208 fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1209 if (fsl_qdma->irq_base < 0)
1210 return fsl_qdma->irq_base;
1211
1212 fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1213 INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1214
1215 for (i = 0; i < fsl_qdma->n_chans; i++) {
1216 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1217
1218 fsl_chan->qdma = fsl_qdma;
1219 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1220 fsl_qdma->block_number);
1221 fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1222 vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1223 }
1224
1225 dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1226
1227 fsl_qdma->dma_dev.dev = &pdev->dev;
1228 fsl_qdma->dma_dev.device_free_chan_resources =
1229 fsl_qdma_free_chan_resources;
1230 fsl_qdma->dma_dev.device_alloc_chan_resources =
1231 fsl_qdma_alloc_chan_resources;
1232 fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1233 fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1234 fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1235 fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1236 fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1237
1238 dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1239
1240 platform_set_drvdata(pdev, fsl_qdma);
1241
1242 ret = dma_async_device_register(&fsl_qdma->dma_dev);
1243 if (ret) {
1244 dev_err(&pdev->dev,
1245 "Can't register NXP Layerscape qDMA engine.\n");
1246 return ret;
1247 }
1248
1249 ret = fsl_qdma_reg_init(fsl_qdma);
1250 if (ret) {
1251 dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1252 return ret;
1253 }
1254
1255 return 0;
1256 }
1257
fsl_qdma_cleanup_vchan(struct dma_device * dmadev)1258 static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1259 {
1260 struct fsl_qdma_chan *chan, *_chan;
1261
1262 list_for_each_entry_safe(chan, _chan,
1263 &dmadev->channels, vchan.chan.device_node) {
1264 list_del(&chan->vchan.chan.device_node);
1265 tasklet_kill(&chan->vchan.task);
1266 }
1267 }
1268
fsl_qdma_remove(struct platform_device * pdev)1269 static int fsl_qdma_remove(struct platform_device *pdev)
1270 {
1271 int i;
1272 struct fsl_qdma_queue *status;
1273 struct device_node *np = pdev->dev.of_node;
1274 struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1275
1276 fsl_qdma_irq_exit(pdev, fsl_qdma);
1277 fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1278 of_dma_controller_free(np);
1279 dma_async_device_unregister(&fsl_qdma->dma_dev);
1280
1281 for (i = 0; i < fsl_qdma->block_number; i++) {
1282 status = fsl_qdma->status[i];
1283 dma_free_coherent(&pdev->dev, sizeof(struct fsl_qdma_format) *
1284 status->n_cq, status->cq, status->bus_addr);
1285 }
1286 return 0;
1287 }
1288
1289 static const struct of_device_id fsl_qdma_dt_ids[] = {
1290 { .compatible = "fsl,ls1021a-qdma", },
1291 { /* sentinel */ }
1292 };
1293 MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1294
1295 static struct platform_driver fsl_qdma_driver = {
1296 .driver = {
1297 .name = "fsl-qdma",
1298 .of_match_table = fsl_qdma_dt_ids,
1299 },
1300 .probe = fsl_qdma_probe,
1301 .remove = fsl_qdma_remove,
1302 };
1303
1304 module_platform_driver(fsl_qdma_driver);
1305
1306 MODULE_ALIAS("platform:fsl-qdma");
1307 MODULE_LICENSE("GPL v2");
1308 MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");
1309