1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * QMC driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10 #include <soc/fsl/qe/qmc.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/hdlc.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/module.h>
16 #include <linux/of.h>
17 #include <linux/of_platform.h>
18 #include <linux/platform_device.h>
19 #include <linux/slab.h>
20 #include <soc/fsl/cpm.h>
21 #include <sysdev/fsl_soc.h>
22 #include "tsa.h"
23
24 /* SCC general mode register high (32 bits) */
25 #define SCC_GSMRL 0x00
26 #define SCC_GSMRL_ENR (1 << 5)
27 #define SCC_GSMRL_ENT (1 << 4)
28 #define SCC_GSMRL_MODE_QMC (0x0A << 0)
29
30 /* SCC general mode register low (32 bits) */
31 #define SCC_GSMRH 0x04
32 #define SCC_GSMRH_CTSS (1 << 7)
33 #define SCC_GSMRH_CDS (1 << 8)
34 #define SCC_GSMRH_CTSP (1 << 9)
35 #define SCC_GSMRH_CDP (1 << 10)
36
37 /* SCC event register (16 bits) */
38 #define SCC_SCCE 0x10
39 #define SCC_SCCE_IQOV (1 << 3)
40 #define SCC_SCCE_GINT (1 << 2)
41 #define SCC_SCCE_GUN (1 << 1)
42 #define SCC_SCCE_GOV (1 << 0)
43
44 /* SCC mask register (16 bits) */
45 #define SCC_SCCM 0x14
46 /* Multichannel base pointer (32 bits) */
47 #define QMC_GBL_MCBASE 0x00
48 /* Multichannel controller state (16 bits) */
49 #define QMC_GBL_QMCSTATE 0x04
50 /* Maximum receive buffer length (16 bits) */
51 #define QMC_GBL_MRBLR 0x06
52 /* Tx time-slot assignment table pointer (16 bits) */
53 #define QMC_GBL_TX_S_PTR 0x08
54 /* Rx pointer (16 bits) */
55 #define QMC_GBL_RXPTR 0x0A
56 /* Global receive frame threshold (16 bits) */
57 #define QMC_GBL_GRFTHR 0x0C
58 /* Global receive frame count (16 bits) */
59 #define QMC_GBL_GRFCNT 0x0E
60 /* Multichannel interrupt base address (32 bits) */
61 #define QMC_GBL_INTBASE 0x10
62 /* Multichannel interrupt pointer (32 bits) */
63 #define QMC_GBL_INTPTR 0x14
64 /* Rx time-slot assignment table pointer (16 bits) */
65 #define QMC_GBL_RX_S_PTR 0x18
66 /* Tx pointer (16 bits) */
67 #define QMC_GBL_TXPTR 0x1A
68 /* CRC constant (32 bits) */
69 #define QMC_GBL_C_MASK32 0x1C
70 /* Time slot assignment table Rx (32 x 16 bits) */
71 #define QMC_GBL_TSATRX 0x20
72 /* Time slot assignment table Tx (32 x 16 bits) */
73 #define QMC_GBL_TSATTX 0x60
74 /* CRC constant (16 bits) */
75 #define QMC_GBL_C_MASK16 0xA0
76
77 /* TSA entry (16bit entry in TSATRX and TSATTX) */
78 #define QMC_TSA_VALID (1 << 15)
79 #define QMC_TSA_WRAP (1 << 14)
80 #define QMC_TSA_MASK (0x303F)
81 #define QMC_TSA_CHANNEL(x) ((x) << 6)
82
83 /* Tx buffer descriptor base address (16 bits, offset from MCBASE) */
84 #define QMC_SPE_TBASE 0x00
85
86 /* Channel mode register (16 bits) */
87 #define QMC_SPE_CHAMR 0x02
88 #define QMC_SPE_CHAMR_MODE_HDLC (1 << 15)
89 #define QMC_SPE_CHAMR_MODE_TRANSP ((0 << 15) | (1 << 13))
90 #define QMC_SPE_CHAMR_ENT (1 << 12)
91 #define QMC_SPE_CHAMR_POL (1 << 8)
92 #define QMC_SPE_CHAMR_HDLC_IDLM (1 << 13)
93 #define QMC_SPE_CHAMR_HDLC_CRC (1 << 7)
94 #define QMC_SPE_CHAMR_HDLC_NOF (0x0f << 0)
95 #define QMC_SPE_CHAMR_TRANSP_RD (1 << 14)
96 #define QMC_SPE_CHAMR_TRANSP_SYNC (1 << 10)
97
98 /* Tx internal state (32 bits) */
99 #define QMC_SPE_TSTATE 0x04
100 /* Tx buffer descriptor pointer (16 bits) */
101 #define QMC_SPE_TBPTR 0x0C
102 /* Zero-insertion state (32 bits) */
103 #define QMC_SPE_ZISTATE 0x14
104 /* Channel’s interrupt mask flags (16 bits) */
105 #define QMC_SPE_INTMSK 0x1C
106 /* Rx buffer descriptor base address (16 bits, offset from MCBASE) */
107 #define QMC_SPE_RBASE 0x20
108 /* HDLC: Maximum frame length register (16 bits) */
109 #define QMC_SPE_MFLR 0x22
110 /* TRANSPARENT: Transparent maximum receive length (16 bits) */
111 #define QMC_SPE_TMRBLR 0x22
112 /* Rx internal state (32 bits) */
113 #define QMC_SPE_RSTATE 0x24
114 /* Rx buffer descriptor pointer (16 bits) */
115 #define QMC_SPE_RBPTR 0x2C
116 /* Packs 4 bytes to 1 long word before writing to buffer (32 bits) */
117 #define QMC_SPE_RPACK 0x30
118 /* Zero deletion state (32 bits) */
119 #define QMC_SPE_ZDSTATE 0x34
120
121 /* Transparent synchronization (16 bits) */
122 #define QMC_SPE_TRNSYNC 0x3C
123 #define QMC_SPE_TRNSYNC_RX(x) ((x) << 8)
124 #define QMC_SPE_TRNSYNC_TX(x) ((x) << 0)
125
126 /* Interrupt related registers bits */
127 #define QMC_INT_V (1 << 15)
128 #define QMC_INT_W (1 << 14)
129 #define QMC_INT_NID (1 << 13)
130 #define QMC_INT_IDL (1 << 12)
131 #define QMC_INT_GET_CHANNEL(x) (((x) & 0x0FC0) >> 6)
132 #define QMC_INT_MRF (1 << 5)
133 #define QMC_INT_UN (1 << 4)
134 #define QMC_INT_RXF (1 << 3)
135 #define QMC_INT_BSY (1 << 2)
136 #define QMC_INT_TXB (1 << 1)
137 #define QMC_INT_RXB (1 << 0)
138
139 /* BD related registers bits */
140 #define QMC_BD_RX_E (1 << 15)
141 #define QMC_BD_RX_W (1 << 13)
142 #define QMC_BD_RX_I (1 << 12)
143 #define QMC_BD_RX_L (1 << 11)
144 #define QMC_BD_RX_F (1 << 10)
145 #define QMC_BD_RX_CM (1 << 9)
146 #define QMC_BD_RX_UB (1 << 7)
147 #define QMC_BD_RX_LG (1 << 5)
148 #define QMC_BD_RX_NO (1 << 4)
149 #define QMC_BD_RX_AB (1 << 3)
150 #define QMC_BD_RX_CR (1 << 2)
151
152 #define QMC_BD_TX_R (1 << 15)
153 #define QMC_BD_TX_W (1 << 13)
154 #define QMC_BD_TX_I (1 << 12)
155 #define QMC_BD_TX_L (1 << 11)
156 #define QMC_BD_TX_TC (1 << 10)
157 #define QMC_BD_TX_CM (1 << 9)
158 #define QMC_BD_TX_UB (1 << 7)
159 #define QMC_BD_TX_PAD (0x0f << 0)
160
161 /* Numbers of BDs and interrupt items */
162 #define QMC_NB_TXBDS 8
163 #define QMC_NB_RXBDS 8
164 #define QMC_NB_INTS 128
165
166 struct qmc_xfer_desc {
167 union {
168 void (*tx_complete)(void *context);
169 void (*rx_complete)(void *context, size_t length);
170 };
171 void *context;
172 };
173
174 struct qmc_chan {
175 struct list_head list;
176 unsigned int id;
177 struct qmc *qmc;
178 void *__iomem s_param;
179 enum qmc_mode mode;
180 u64 tx_ts_mask;
181 u64 rx_ts_mask;
182 bool is_reverse_data;
183
184 spinlock_t tx_lock;
185 cbd_t __iomem *txbds;
186 cbd_t __iomem *txbd_free;
187 cbd_t __iomem *txbd_done;
188 struct qmc_xfer_desc tx_desc[QMC_NB_TXBDS];
189 u64 nb_tx_underrun;
190 bool is_tx_stopped;
191
192 spinlock_t rx_lock;
193 cbd_t __iomem *rxbds;
194 cbd_t __iomem *rxbd_free;
195 cbd_t __iomem *rxbd_done;
196 struct qmc_xfer_desc rx_desc[QMC_NB_RXBDS];
197 u64 nb_rx_busy;
198 int rx_pending;
199 bool is_rx_halted;
200 bool is_rx_stopped;
201 };
202
203 struct qmc {
204 struct device *dev;
205 struct tsa_serial *tsa_serial;
206 void *__iomem scc_regs;
207 void *__iomem scc_pram;
208 void *__iomem dpram;
209 u16 scc_pram_offset;
210 cbd_t __iomem *bd_table;
211 dma_addr_t bd_dma_addr;
212 size_t bd_size;
213 u16 __iomem *int_table;
214 u16 __iomem *int_curr;
215 dma_addr_t int_dma_addr;
216 size_t int_size;
217 struct list_head chan_head;
218 struct qmc_chan *chans[64];
219 };
220
qmc_write16(void * __iomem addr,u16 val)221 static inline void qmc_write16(void *__iomem addr, u16 val)
222 {
223 iowrite16be(val, addr);
224 }
225
qmc_read16(void * __iomem addr)226 static inline u16 qmc_read16(void *__iomem addr)
227 {
228 return ioread16be(addr);
229 }
230
qmc_setbits16(void * __iomem addr,u16 set)231 static inline void qmc_setbits16(void *__iomem addr, u16 set)
232 {
233 qmc_write16(addr, qmc_read16(addr) | set);
234 }
235
qmc_clrbits16(void * __iomem addr,u16 clr)236 static inline void qmc_clrbits16(void *__iomem addr, u16 clr)
237 {
238 qmc_write16(addr, qmc_read16(addr) & ~clr);
239 }
240
qmc_write32(void * __iomem addr,u32 val)241 static inline void qmc_write32(void *__iomem addr, u32 val)
242 {
243 iowrite32be(val, addr);
244 }
245
qmc_read32(void * __iomem addr)246 static inline u32 qmc_read32(void *__iomem addr)
247 {
248 return ioread32be(addr);
249 }
250
qmc_setbits32(void * __iomem addr,u32 set)251 static inline void qmc_setbits32(void *__iomem addr, u32 set)
252 {
253 qmc_write32(addr, qmc_read32(addr) | set);
254 }
255
256
qmc_chan_get_info(struct qmc_chan * chan,struct qmc_chan_info * info)257 int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
258 {
259 struct tsa_serial_info tsa_info;
260 int ret;
261
262 /* Retrieve info from the TSA related serial */
263 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &tsa_info);
264 if (ret)
265 return ret;
266
267 info->mode = chan->mode;
268 info->rx_fs_rate = tsa_info.rx_fs_rate;
269 info->rx_bit_rate = tsa_info.rx_bit_rate;
270 info->nb_tx_ts = hweight64(chan->tx_ts_mask);
271 info->tx_fs_rate = tsa_info.tx_fs_rate;
272 info->tx_bit_rate = tsa_info.tx_bit_rate;
273 info->nb_rx_ts = hweight64(chan->rx_ts_mask);
274
275 return 0;
276 }
277 EXPORT_SYMBOL(qmc_chan_get_info);
278
qmc_chan_set_param(struct qmc_chan * chan,const struct qmc_chan_param * param)279 int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
280 {
281 if (param->mode != chan->mode)
282 return -EINVAL;
283
284 switch (param->mode) {
285 case QMC_HDLC:
286 if ((param->hdlc.max_rx_buf_size % 4) ||
287 (param->hdlc.max_rx_buf_size < 8))
288 return -EINVAL;
289
290 qmc_write16(chan->qmc->scc_pram + QMC_GBL_MRBLR,
291 param->hdlc.max_rx_buf_size - 8);
292 qmc_write16(chan->s_param + QMC_SPE_MFLR,
293 param->hdlc.max_rx_frame_size);
294 if (param->hdlc.is_crc32) {
295 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR,
296 QMC_SPE_CHAMR_HDLC_CRC);
297 } else {
298 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR,
299 QMC_SPE_CHAMR_HDLC_CRC);
300 }
301 break;
302
303 case QMC_TRANSPARENT:
304 qmc_write16(chan->s_param + QMC_SPE_TMRBLR,
305 param->transp.max_rx_buf_size);
306 break;
307
308 default:
309 return -EINVAL;
310 }
311
312 return 0;
313 }
314 EXPORT_SYMBOL(qmc_chan_set_param);
315
qmc_chan_write_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context),void * context)316 int qmc_chan_write_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
317 void (*complete)(void *context), void *context)
318 {
319 struct qmc_xfer_desc *xfer_desc;
320 unsigned long flags;
321 cbd_t *__iomem bd;
322 u16 ctrl;
323 int ret;
324
325 /*
326 * R bit UB bit
327 * 0 0 : The BD is free
328 * 1 1 : The BD is in used, waiting for transfer
329 * 0 1 : The BD is in used, waiting for completion
330 * 1 0 : Should not append
331 */
332
333 spin_lock_irqsave(&chan->tx_lock, flags);
334 bd = chan->txbd_free;
335
336 ctrl = qmc_read16(&bd->cbd_sc);
337 if (ctrl & (QMC_BD_TX_R | QMC_BD_TX_UB)) {
338 /* We are full ... */
339 ret = -EBUSY;
340 goto end;
341 }
342
343 qmc_write16(&bd->cbd_datlen, length);
344 qmc_write32(&bd->cbd_bufaddr, addr);
345
346 xfer_desc = &chan->tx_desc[bd - chan->txbds];
347 xfer_desc->tx_complete = complete;
348 xfer_desc->context = context;
349
350 /* Activate the descriptor */
351 ctrl |= (QMC_BD_TX_R | QMC_BD_TX_UB);
352 wmb(); /* Be sure to flush the descriptor before control update */
353 qmc_write16(&bd->cbd_sc, ctrl);
354
355 if (!chan->is_tx_stopped)
356 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
357
358 if (ctrl & QMC_BD_TX_W)
359 chan->txbd_free = chan->txbds;
360 else
361 chan->txbd_free++;
362
363 ret = 0;
364
365 end:
366 spin_unlock_irqrestore(&chan->tx_lock, flags);
367 return ret;
368 }
369 EXPORT_SYMBOL(qmc_chan_write_submit);
370
qmc_chan_write_done(struct qmc_chan * chan)371 static void qmc_chan_write_done(struct qmc_chan *chan)
372 {
373 struct qmc_xfer_desc *xfer_desc;
374 void (*complete)(void *context);
375 unsigned long flags;
376 void *context;
377 cbd_t *__iomem bd;
378 u16 ctrl;
379
380 /*
381 * R bit UB bit
382 * 0 0 : The BD is free
383 * 1 1 : The BD is in used, waiting for transfer
384 * 0 1 : The BD is in used, waiting for completion
385 * 1 0 : Should not append
386 */
387
388 spin_lock_irqsave(&chan->tx_lock, flags);
389 bd = chan->txbd_done;
390
391 ctrl = qmc_read16(&bd->cbd_sc);
392 while (!(ctrl & QMC_BD_TX_R)) {
393 if (!(ctrl & QMC_BD_TX_UB))
394 goto end;
395
396 xfer_desc = &chan->tx_desc[bd - chan->txbds];
397 complete = xfer_desc->tx_complete;
398 context = xfer_desc->context;
399 xfer_desc->tx_complete = NULL;
400 xfer_desc->context = NULL;
401
402 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_TX_UB);
403
404 if (ctrl & QMC_BD_TX_W)
405 chan->txbd_done = chan->txbds;
406 else
407 chan->txbd_done++;
408
409 if (complete) {
410 spin_unlock_irqrestore(&chan->tx_lock, flags);
411 complete(context);
412 spin_lock_irqsave(&chan->tx_lock, flags);
413 }
414
415 bd = chan->txbd_done;
416 ctrl = qmc_read16(&bd->cbd_sc);
417 }
418
419 end:
420 spin_unlock_irqrestore(&chan->tx_lock, flags);
421 }
422
qmc_chan_read_submit(struct qmc_chan * chan,dma_addr_t addr,size_t length,void (* complete)(void * context,size_t length),void * context)423 int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
424 void (*complete)(void *context, size_t length), void *context)
425 {
426 struct qmc_xfer_desc *xfer_desc;
427 unsigned long flags;
428 cbd_t *__iomem bd;
429 u16 ctrl;
430 int ret;
431
432 /*
433 * E bit UB bit
434 * 0 0 : The BD is free
435 * 1 1 : The BD is in used, waiting for transfer
436 * 0 1 : The BD is in used, waiting for completion
437 * 1 0 : Should not append
438 */
439
440 spin_lock_irqsave(&chan->rx_lock, flags);
441 bd = chan->rxbd_free;
442
443 ctrl = qmc_read16(&bd->cbd_sc);
444 if (ctrl & (QMC_BD_RX_E | QMC_BD_RX_UB)) {
445 /* We are full ... */
446 ret = -EBUSY;
447 goto end;
448 }
449
450 qmc_write16(&bd->cbd_datlen, 0); /* data length is updated by the QMC */
451 qmc_write32(&bd->cbd_bufaddr, addr);
452
453 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
454 xfer_desc->rx_complete = complete;
455 xfer_desc->context = context;
456
457 /* Activate the descriptor */
458 ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
459 wmb(); /* Be sure to flush data before descriptor activation */
460 qmc_write16(&bd->cbd_sc, ctrl);
461
462 /* Restart receiver if needed */
463 if (chan->is_rx_halted && !chan->is_rx_stopped) {
464 /* Restart receiver */
465 if (chan->mode == QMC_TRANSPARENT)
466 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
467 else
468 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
469 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
470 chan->is_rx_halted = false;
471 }
472 chan->rx_pending++;
473
474 if (ctrl & QMC_BD_RX_W)
475 chan->rxbd_free = chan->rxbds;
476 else
477 chan->rxbd_free++;
478
479 ret = 0;
480 end:
481 spin_unlock_irqrestore(&chan->rx_lock, flags);
482 return ret;
483 }
484 EXPORT_SYMBOL(qmc_chan_read_submit);
485
qmc_chan_read_done(struct qmc_chan * chan)486 static void qmc_chan_read_done(struct qmc_chan *chan)
487 {
488 void (*complete)(void *context, size_t size);
489 struct qmc_xfer_desc *xfer_desc;
490 unsigned long flags;
491 cbd_t *__iomem bd;
492 void *context;
493 u16 datalen;
494 u16 ctrl;
495
496 /*
497 * E bit UB bit
498 * 0 0 : The BD is free
499 * 1 1 : The BD is in used, waiting for transfer
500 * 0 1 : The BD is in used, waiting for completion
501 * 1 0 : Should not append
502 */
503
504 spin_lock_irqsave(&chan->rx_lock, flags);
505 bd = chan->rxbd_done;
506
507 ctrl = qmc_read16(&bd->cbd_sc);
508 while (!(ctrl & QMC_BD_RX_E)) {
509 if (!(ctrl & QMC_BD_RX_UB))
510 goto end;
511
512 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
513 complete = xfer_desc->rx_complete;
514 context = xfer_desc->context;
515 xfer_desc->rx_complete = NULL;
516 xfer_desc->context = NULL;
517
518 datalen = qmc_read16(&bd->cbd_datlen);
519 qmc_write16(&bd->cbd_sc, ctrl & ~QMC_BD_RX_UB);
520
521 if (ctrl & QMC_BD_RX_W)
522 chan->rxbd_done = chan->rxbds;
523 else
524 chan->rxbd_done++;
525
526 chan->rx_pending--;
527
528 if (complete) {
529 spin_unlock_irqrestore(&chan->rx_lock, flags);
530 complete(context, datalen);
531 spin_lock_irqsave(&chan->rx_lock, flags);
532 }
533
534 bd = chan->rxbd_done;
535 ctrl = qmc_read16(&bd->cbd_sc);
536 }
537
538 end:
539 spin_unlock_irqrestore(&chan->rx_lock, flags);
540 }
541
qmc_chan_command(struct qmc_chan * chan,u8 qmc_opcode)542 static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
543 {
544 return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
545 }
546
qmc_chan_stop_rx(struct qmc_chan * chan)547 static int qmc_chan_stop_rx(struct qmc_chan *chan)
548 {
549 unsigned long flags;
550 int ret;
551
552 spin_lock_irqsave(&chan->rx_lock, flags);
553
554 /* Send STOP RECEIVE command */
555 ret = qmc_chan_command(chan, 0x0);
556 if (ret) {
557 dev_err(chan->qmc->dev, "chan %u: Send STOP RECEIVE failed (%d)\n",
558 chan->id, ret);
559 goto end;
560 }
561
562 chan->is_rx_stopped = true;
563
564 end:
565 spin_unlock_irqrestore(&chan->rx_lock, flags);
566 return ret;
567 }
568
qmc_chan_stop_tx(struct qmc_chan * chan)569 static int qmc_chan_stop_tx(struct qmc_chan *chan)
570 {
571 unsigned long flags;
572 int ret;
573
574 spin_lock_irqsave(&chan->tx_lock, flags);
575
576 /* Send STOP TRANSMIT command */
577 ret = qmc_chan_command(chan, 0x1);
578 if (ret) {
579 dev_err(chan->qmc->dev, "chan %u: Send STOP TRANSMIT failed (%d)\n",
580 chan->id, ret);
581 goto end;
582 }
583
584 chan->is_tx_stopped = true;
585
586 end:
587 spin_unlock_irqrestore(&chan->tx_lock, flags);
588 return ret;
589 }
590
qmc_chan_stop(struct qmc_chan * chan,int direction)591 int qmc_chan_stop(struct qmc_chan *chan, int direction)
592 {
593 int ret;
594
595 if (direction & QMC_CHAN_READ) {
596 ret = qmc_chan_stop_rx(chan);
597 if (ret)
598 return ret;
599 }
600
601 if (direction & QMC_CHAN_WRITE) {
602 ret = qmc_chan_stop_tx(chan);
603 if (ret)
604 return ret;
605 }
606
607 return 0;
608 }
609 EXPORT_SYMBOL(qmc_chan_stop);
610
qmc_chan_start_rx(struct qmc_chan * chan)611 static void qmc_chan_start_rx(struct qmc_chan *chan)
612 {
613 unsigned long flags;
614
615 spin_lock_irqsave(&chan->rx_lock, flags);
616
617 /* Restart the receiver */
618 if (chan->mode == QMC_TRANSPARENT)
619 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
620 else
621 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
622 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
623 chan->is_rx_halted = false;
624
625 chan->is_rx_stopped = false;
626
627 spin_unlock_irqrestore(&chan->rx_lock, flags);
628 }
629
qmc_chan_start_tx(struct qmc_chan * chan)630 static void qmc_chan_start_tx(struct qmc_chan *chan)
631 {
632 unsigned long flags;
633
634 spin_lock_irqsave(&chan->tx_lock, flags);
635
636 /*
637 * Enable channel transmitter as it could be disabled if
638 * qmc_chan_reset() was called.
639 */
640 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
641
642 /* Set the POL bit in the channel mode register */
643 qmc_setbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_POL);
644
645 chan->is_tx_stopped = false;
646
647 spin_unlock_irqrestore(&chan->tx_lock, flags);
648 }
649
qmc_chan_start(struct qmc_chan * chan,int direction)650 int qmc_chan_start(struct qmc_chan *chan, int direction)
651 {
652 if (direction & QMC_CHAN_READ)
653 qmc_chan_start_rx(chan);
654
655 if (direction & QMC_CHAN_WRITE)
656 qmc_chan_start_tx(chan);
657
658 return 0;
659 }
660 EXPORT_SYMBOL(qmc_chan_start);
661
qmc_chan_reset_rx(struct qmc_chan * chan)662 static void qmc_chan_reset_rx(struct qmc_chan *chan)
663 {
664 struct qmc_xfer_desc *xfer_desc;
665 unsigned long flags;
666 cbd_t *__iomem bd;
667 u16 ctrl;
668
669 spin_lock_irqsave(&chan->rx_lock, flags);
670 bd = chan->rxbds;
671 do {
672 ctrl = qmc_read16(&bd->cbd_sc);
673 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_RX_UB | QMC_BD_RX_E));
674
675 xfer_desc = &chan->rx_desc[bd - chan->rxbds];
676 xfer_desc->rx_complete = NULL;
677 xfer_desc->context = NULL;
678
679 bd++;
680 } while (!(ctrl & QMC_BD_RX_W));
681
682 chan->rxbd_free = chan->rxbds;
683 chan->rxbd_done = chan->rxbds;
684 qmc_write16(chan->s_param + QMC_SPE_RBPTR,
685 qmc_read16(chan->s_param + QMC_SPE_RBASE));
686
687 chan->rx_pending = 0;
688 chan->is_rx_stopped = false;
689
690 spin_unlock_irqrestore(&chan->rx_lock, flags);
691 }
692
qmc_chan_reset_tx(struct qmc_chan * chan)693 static void qmc_chan_reset_tx(struct qmc_chan *chan)
694 {
695 struct qmc_xfer_desc *xfer_desc;
696 unsigned long flags;
697 cbd_t *__iomem bd;
698 u16 ctrl;
699
700 spin_lock_irqsave(&chan->tx_lock, flags);
701
702 /* Disable transmitter. It will be re-enable on qmc_chan_start() */
703 qmc_clrbits16(chan->s_param + QMC_SPE_CHAMR, QMC_SPE_CHAMR_ENT);
704
705 bd = chan->txbds;
706 do {
707 ctrl = qmc_read16(&bd->cbd_sc);
708 qmc_write16(&bd->cbd_sc, ctrl & ~(QMC_BD_TX_UB | QMC_BD_TX_R));
709
710 xfer_desc = &chan->tx_desc[bd - chan->txbds];
711 xfer_desc->tx_complete = NULL;
712 xfer_desc->context = NULL;
713
714 bd++;
715 } while (!(ctrl & QMC_BD_TX_W));
716
717 chan->txbd_free = chan->txbds;
718 chan->txbd_done = chan->txbds;
719 qmc_write16(chan->s_param + QMC_SPE_TBPTR,
720 qmc_read16(chan->s_param + QMC_SPE_TBASE));
721
722 /* Reset TSTATE and ZISTATE to their initial value */
723 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
724 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
725
726 spin_unlock_irqrestore(&chan->tx_lock, flags);
727 }
728
qmc_chan_reset(struct qmc_chan * chan,int direction)729 int qmc_chan_reset(struct qmc_chan *chan, int direction)
730 {
731 if (direction & QMC_CHAN_READ)
732 qmc_chan_reset_rx(chan);
733
734 if (direction & QMC_CHAN_WRITE)
735 qmc_chan_reset_tx(chan);
736
737 return 0;
738 }
739 EXPORT_SYMBOL(qmc_chan_reset);
740
qmc_check_chans(struct qmc * qmc)741 static int qmc_check_chans(struct qmc *qmc)
742 {
743 struct tsa_serial_info info;
744 bool is_one_table = false;
745 struct qmc_chan *chan;
746 u64 tx_ts_mask = 0;
747 u64 rx_ts_mask = 0;
748 u64 tx_ts_assigned_mask;
749 u64 rx_ts_assigned_mask;
750 int ret;
751
752 /* Retrieve info from the TSA related serial */
753 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
754 if (ret)
755 return ret;
756
757 if ((info.nb_tx_ts > 64) || (info.nb_rx_ts > 64)) {
758 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned not supported\n");
759 return -EINVAL;
760 }
761
762 /*
763 * If more than 32 TS are assigned to this serial, one common table is
764 * used for Tx and Rx and so masks must be equal for all channels.
765 */
766 if ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) {
767 if (info.nb_tx_ts != info.nb_rx_ts) {
768 dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
769 return -EINVAL;
770 }
771 is_one_table = true;
772 }
773
774 tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
775 rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
776
777 list_for_each_entry(chan, &qmc->chan_head, list) {
778 if (chan->tx_ts_mask > tx_ts_assigned_mask) {
779 dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
780 return -EINVAL;
781 }
782 if (tx_ts_mask & chan->tx_ts_mask) {
783 dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
784 return -EINVAL;
785 }
786
787 if (chan->rx_ts_mask > rx_ts_assigned_mask) {
788 dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
789 return -EINVAL;
790 }
791 if (rx_ts_mask & chan->rx_ts_mask) {
792 dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
793 return -EINVAL;
794 }
795
796 if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
797 dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
798 return -EINVAL;
799 }
800
801 tx_ts_mask |= chan->tx_ts_mask;
802 rx_ts_mask |= chan->rx_ts_mask;
803 }
804
805 return 0;
806 }
807
qmc_nb_chans(struct qmc * qmc)808 static unsigned int qmc_nb_chans(struct qmc *qmc)
809 {
810 unsigned int count = 0;
811 struct qmc_chan *chan;
812
813 list_for_each_entry(chan, &qmc->chan_head, list)
814 count++;
815
816 return count;
817 }
818
qmc_of_parse_chans(struct qmc * qmc,struct device_node * np)819 static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
820 {
821 struct device_node *chan_np;
822 struct qmc_chan *chan;
823 const char *mode;
824 u32 chan_id;
825 u64 ts_mask;
826 int ret;
827
828 for_each_available_child_of_node(np, chan_np) {
829 ret = of_property_read_u32(chan_np, "reg", &chan_id);
830 if (ret) {
831 dev_err(qmc->dev, "%pOF: failed to read reg\n", chan_np);
832 of_node_put(chan_np);
833 return ret;
834 }
835 if (chan_id > 63) {
836 dev_err(qmc->dev, "%pOF: Invalid chan_id\n", chan_np);
837 of_node_put(chan_np);
838 return -EINVAL;
839 }
840
841 chan = devm_kzalloc(qmc->dev, sizeof(*chan), GFP_KERNEL);
842 if (!chan) {
843 of_node_put(chan_np);
844 return -ENOMEM;
845 }
846
847 chan->id = chan_id;
848 spin_lock_init(&chan->rx_lock);
849 spin_lock_init(&chan->tx_lock);
850
851 ret = of_property_read_u64(chan_np, "fsl,tx-ts-mask", &ts_mask);
852 if (ret) {
853 dev_err(qmc->dev, "%pOF: failed to read fsl,tx-ts-mask\n",
854 chan_np);
855 of_node_put(chan_np);
856 return ret;
857 }
858 chan->tx_ts_mask = ts_mask;
859
860 ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
861 if (ret) {
862 dev_err(qmc->dev, "%pOF: failed to read fsl,rx-ts-mask\n",
863 chan_np);
864 of_node_put(chan_np);
865 return ret;
866 }
867 chan->rx_ts_mask = ts_mask;
868
869 mode = "transparent";
870 ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
871 if (ret && ret != -EINVAL) {
872 dev_err(qmc->dev, "%pOF: failed to read fsl,operational-mode\n",
873 chan_np);
874 of_node_put(chan_np);
875 return ret;
876 }
877 if (!strcmp(mode, "transparent")) {
878 chan->mode = QMC_TRANSPARENT;
879 } else if (!strcmp(mode, "hdlc")) {
880 chan->mode = QMC_HDLC;
881 } else {
882 dev_err(qmc->dev, "%pOF: Invalid fsl,operational-mode (%s)\n",
883 chan_np, mode);
884 of_node_put(chan_np);
885 return -EINVAL;
886 }
887
888 chan->is_reverse_data = of_property_read_bool(chan_np,
889 "fsl,reverse-data");
890
891 list_add_tail(&chan->list, &qmc->chan_head);
892 qmc->chans[chan->id] = chan;
893 }
894
895 return qmc_check_chans(qmc);
896 }
897
qmc_setup_tsa_64rxtx(struct qmc * qmc,const struct tsa_serial_info * info)898 static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
899 {
900 struct qmc_chan *chan;
901 unsigned int i;
902 u16 val;
903
904 /*
905 * Use a common Tx/Rx 64 entries table.
906 * Everything was previously checked, Tx and Rx related stuffs are
907 * identical -> Used Rx related stuff to build the table
908 */
909
910 /* Invalidate all entries */
911 for (i = 0; i < 64; i++)
912 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
913
914 /* Set entries based on Rx stuff*/
915 list_for_each_entry(chan, &qmc->chan_head, list) {
916 for (i = 0; i < info->nb_rx_ts; i++) {
917 if (!(chan->rx_ts_mask & (((u64)1) << i)))
918 continue;
919
920 val = QMC_TSA_VALID | QMC_TSA_MASK |
921 QMC_TSA_CHANNEL(chan->id);
922 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
923 }
924 }
925
926 /* Set Wrap bit on last entry */
927 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
928 QMC_TSA_WRAP);
929
930 /* Init pointers to the table */
931 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
932 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
933 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
934 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
935 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
936
937 return 0;
938 }
939
qmc_setup_tsa_32rx_32tx(struct qmc * qmc,const struct tsa_serial_info * info)940 static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
941 {
942 struct qmc_chan *chan;
943 unsigned int i;
944 u16 val;
945
946 /*
947 * Use a Tx 32 entries table and a Rx 32 entries table.
948 * Everything was previously checked.
949 */
950
951 /* Invalidate all entries */
952 for (i = 0; i < 32; i++) {
953 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
954 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
955 }
956
957 /* Set entries based on Rx and Tx stuff*/
958 list_for_each_entry(chan, &qmc->chan_head, list) {
959 /* Rx part */
960 for (i = 0; i < info->nb_rx_ts; i++) {
961 if (!(chan->rx_ts_mask & (((u64)1) << i)))
962 continue;
963
964 val = QMC_TSA_VALID | QMC_TSA_MASK |
965 QMC_TSA_CHANNEL(chan->id);
966 qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
967 }
968 /* Tx part */
969 for (i = 0; i < info->nb_tx_ts; i++) {
970 if (!(chan->tx_ts_mask & (((u64)1) << i)))
971 continue;
972
973 val = QMC_TSA_VALID | QMC_TSA_MASK |
974 QMC_TSA_CHANNEL(chan->id);
975 qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
976 }
977 }
978
979 /* Set Wrap bit on last entries */
980 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
981 QMC_TSA_WRAP);
982 qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATTX + ((info->nb_tx_ts - 1) * 2),
983 QMC_TSA_WRAP);
984
985 /* Init Rx pointers ...*/
986 val = qmc->scc_pram_offset + QMC_GBL_TSATRX;
987 qmc_write16(qmc->scc_pram + QMC_GBL_RX_S_PTR, val);
988 qmc_write16(qmc->scc_pram + QMC_GBL_RXPTR, val);
989
990 /* ... and Tx pointers */
991 val = qmc->scc_pram_offset + QMC_GBL_TSATTX;
992 qmc_write16(qmc->scc_pram + QMC_GBL_TX_S_PTR, val);
993 qmc_write16(qmc->scc_pram + QMC_GBL_TXPTR, val);
994
995 return 0;
996 }
997
qmc_setup_tsa(struct qmc * qmc)998 static int qmc_setup_tsa(struct qmc *qmc)
999 {
1000 struct tsa_serial_info info;
1001 int ret;
1002
1003 /* Retrieve info from the TSA related serial */
1004 ret = tsa_serial_get_info(qmc->tsa_serial, &info);
1005 if (ret)
1006 return ret;
1007
1008 /*
1009 * Setup one common 64 entries table or two 32 entries (one for Tx and
1010 * one for Tx) according to assigned TS numbers.
1011 */
1012 return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
1013 qmc_setup_tsa_64rxtx(qmc, &info) :
1014 qmc_setup_tsa_32rx_32tx(qmc, &info);
1015 }
1016
qmc_setup_chan_trnsync(struct qmc * qmc,struct qmc_chan * chan)1017 static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
1018 {
1019 struct tsa_serial_info info;
1020 u16 first_rx, last_tx;
1021 u16 trnsync;
1022 int ret;
1023
1024 /* Retrieve info from the TSA related serial */
1025 ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
1026 if (ret)
1027 return ret;
1028
1029 /* Find the first Rx TS allocated to the channel */
1030 first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
1031
1032 /* Find the last Tx TS allocated to the channel */
1033 last_tx = fls64(chan->tx_ts_mask);
1034
1035 trnsync = 0;
1036 if (info.nb_rx_ts)
1037 trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
1038 if (info.nb_tx_ts)
1039 trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
1040
1041 qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
1042
1043 dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
1044 chan->id, trnsync,
1045 first_rx, info.nb_rx_ts, chan->rx_ts_mask,
1046 last_tx, info.nb_tx_ts, chan->tx_ts_mask);
1047
1048 return 0;
1049 }
1050
qmc_setup_chan(struct qmc * qmc,struct qmc_chan * chan)1051 static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
1052 {
1053 unsigned int i;
1054 cbd_t __iomem *bd;
1055 int ret;
1056 u16 val;
1057
1058 chan->qmc = qmc;
1059
1060 /* Set channel specific parameter base address */
1061 chan->s_param = qmc->dpram + (chan->id * 64);
1062 /* 16 bd per channel (8 rx and 8 tx) */
1063 chan->txbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS));
1064 chan->rxbds = qmc->bd_table + (chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS;
1065
1066 chan->txbd_free = chan->txbds;
1067 chan->txbd_done = chan->txbds;
1068 chan->rxbd_free = chan->rxbds;
1069 chan->rxbd_done = chan->rxbds;
1070
1071 /* TBASE and TBPTR*/
1072 val = chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS) * sizeof(cbd_t);
1073 qmc_write16(chan->s_param + QMC_SPE_TBASE, val);
1074 qmc_write16(chan->s_param + QMC_SPE_TBPTR, val);
1075
1076 /* RBASE and RBPTR*/
1077 val = ((chan->id * (QMC_NB_TXBDS + QMC_NB_RXBDS)) + QMC_NB_TXBDS) * sizeof(cbd_t);
1078 qmc_write16(chan->s_param + QMC_SPE_RBASE, val);
1079 qmc_write16(chan->s_param + QMC_SPE_RBPTR, val);
1080 qmc_write32(chan->s_param + QMC_SPE_TSTATE, 0x30000000);
1081 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1082 qmc_write32(chan->s_param + QMC_SPE_ZISTATE, 0x00000100);
1083 if (chan->mode == QMC_TRANSPARENT) {
1084 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1085 qmc_write16(chan->s_param + QMC_SPE_TMRBLR, 60);
1086 val = QMC_SPE_CHAMR_MODE_TRANSP | QMC_SPE_CHAMR_TRANSP_SYNC;
1087 if (chan->is_reverse_data)
1088 val |= QMC_SPE_CHAMR_TRANSP_RD;
1089 qmc_write16(chan->s_param + QMC_SPE_CHAMR, val);
1090 ret = qmc_setup_chan_trnsync(qmc, chan);
1091 if (ret)
1092 return ret;
1093 } else {
1094 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1095 qmc_write16(chan->s_param + QMC_SPE_MFLR, 60);
1096 qmc_write16(chan->s_param + QMC_SPE_CHAMR,
1097 QMC_SPE_CHAMR_MODE_HDLC | QMC_SPE_CHAMR_HDLC_IDLM);
1098 }
1099
1100 /* Do not enable interrupts now. They will be enabled later */
1101 qmc_write16(chan->s_param + QMC_SPE_INTMSK, 0x0000);
1102
1103 /* Init Rx BDs and set Wrap bit on last descriptor */
1104 BUILD_BUG_ON(QMC_NB_RXBDS == 0);
1105 val = QMC_BD_RX_I;
1106 for (i = 0; i < QMC_NB_RXBDS; i++) {
1107 bd = chan->rxbds + i;
1108 qmc_write16(&bd->cbd_sc, val);
1109 }
1110 bd = chan->rxbds + QMC_NB_RXBDS - 1;
1111 qmc_write16(&bd->cbd_sc, val | QMC_BD_RX_W);
1112
1113 /* Init Tx BDs and set Wrap bit on last descriptor */
1114 BUILD_BUG_ON(QMC_NB_TXBDS == 0);
1115 val = QMC_BD_TX_I;
1116 if (chan->mode == QMC_HDLC)
1117 val |= QMC_BD_TX_L | QMC_BD_TX_TC;
1118 for (i = 0; i < QMC_NB_TXBDS; i++) {
1119 bd = chan->txbds + i;
1120 qmc_write16(&bd->cbd_sc, val);
1121 }
1122 bd = chan->txbds + QMC_NB_TXBDS - 1;
1123 qmc_write16(&bd->cbd_sc, val | QMC_BD_TX_W);
1124
1125 return 0;
1126 }
1127
qmc_setup_chans(struct qmc * qmc)1128 static int qmc_setup_chans(struct qmc *qmc)
1129 {
1130 struct qmc_chan *chan;
1131 int ret;
1132
1133 list_for_each_entry(chan, &qmc->chan_head, list) {
1134 ret = qmc_setup_chan(qmc, chan);
1135 if (ret)
1136 return ret;
1137 }
1138
1139 return 0;
1140 }
1141
qmc_finalize_chans(struct qmc * qmc)1142 static int qmc_finalize_chans(struct qmc *qmc)
1143 {
1144 struct qmc_chan *chan;
1145 int ret;
1146
1147 list_for_each_entry(chan, &qmc->chan_head, list) {
1148 /* Unmask channel interrupts */
1149 if (chan->mode == QMC_HDLC) {
1150 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1151 QMC_INT_NID | QMC_INT_IDL | QMC_INT_MRF |
1152 QMC_INT_UN | QMC_INT_RXF | QMC_INT_BSY |
1153 QMC_INT_TXB | QMC_INT_RXB);
1154 } else {
1155 qmc_write16(chan->s_param + QMC_SPE_INTMSK,
1156 QMC_INT_UN | QMC_INT_BSY |
1157 QMC_INT_TXB | QMC_INT_RXB);
1158 }
1159
1160 /* Forced stop the channel */
1161 ret = qmc_chan_stop(chan, QMC_CHAN_ALL);
1162 if (ret)
1163 return ret;
1164 }
1165
1166 return 0;
1167 }
1168
qmc_setup_ints(struct qmc * qmc)1169 static int qmc_setup_ints(struct qmc *qmc)
1170 {
1171 unsigned int i;
1172 u16 __iomem *last;
1173
1174 /* Raz all entries */
1175 for (i = 0; i < (qmc->int_size / sizeof(u16)); i++)
1176 qmc_write16(qmc->int_table + i, 0x0000);
1177
1178 /* Set Wrap bit on last entry */
1179 if (qmc->int_size >= sizeof(u16)) {
1180 last = qmc->int_table + (qmc->int_size / sizeof(u16)) - 1;
1181 qmc_write16(last, QMC_INT_W);
1182 }
1183
1184 return 0;
1185 }
1186
qmc_irq_gint(struct qmc * qmc)1187 static void qmc_irq_gint(struct qmc *qmc)
1188 {
1189 struct qmc_chan *chan;
1190 unsigned int chan_id;
1191 unsigned long flags;
1192 u16 int_entry;
1193
1194 int_entry = qmc_read16(qmc->int_curr);
1195 while (int_entry & QMC_INT_V) {
1196 /* Clear all but the Wrap bit */
1197 qmc_write16(qmc->int_curr, int_entry & QMC_INT_W);
1198
1199 chan_id = QMC_INT_GET_CHANNEL(int_entry);
1200 chan = qmc->chans[chan_id];
1201 if (!chan) {
1202 dev_err(qmc->dev, "interrupt on invalid chan %u\n", chan_id);
1203 goto int_next;
1204 }
1205
1206 if (int_entry & QMC_INT_TXB)
1207 qmc_chan_write_done(chan);
1208
1209 if (int_entry & QMC_INT_UN) {
1210 dev_info(qmc->dev, "intr chan %u, 0x%04x (UN)\n", chan_id,
1211 int_entry);
1212 chan->nb_tx_underrun++;
1213 }
1214
1215 if (int_entry & QMC_INT_BSY) {
1216 dev_info(qmc->dev, "intr chan %u, 0x%04x (BSY)\n", chan_id,
1217 int_entry);
1218 chan->nb_rx_busy++;
1219 /* Restart the receiver if needed */
1220 spin_lock_irqsave(&chan->rx_lock, flags);
1221 if (chan->rx_pending && !chan->is_rx_stopped) {
1222 if (chan->mode == QMC_TRANSPARENT)
1223 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
1224 else
1225 qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x00000080);
1226 qmc_write32(chan->s_param + QMC_SPE_RSTATE, 0x31000000);
1227 chan->is_rx_halted = false;
1228 } else {
1229 chan->is_rx_halted = true;
1230 }
1231 spin_unlock_irqrestore(&chan->rx_lock, flags);
1232 }
1233
1234 if (int_entry & QMC_INT_RXB)
1235 qmc_chan_read_done(chan);
1236
1237 int_next:
1238 if (int_entry & QMC_INT_W)
1239 qmc->int_curr = qmc->int_table;
1240 else
1241 qmc->int_curr++;
1242 int_entry = qmc_read16(qmc->int_curr);
1243 }
1244 }
1245
qmc_irq_handler(int irq,void * priv)1246 static irqreturn_t qmc_irq_handler(int irq, void *priv)
1247 {
1248 struct qmc *qmc = (struct qmc *)priv;
1249 u16 scce;
1250
1251 scce = qmc_read16(qmc->scc_regs + SCC_SCCE);
1252 qmc_write16(qmc->scc_regs + SCC_SCCE, scce);
1253
1254 if (unlikely(scce & SCC_SCCE_IQOV))
1255 dev_info(qmc->dev, "IRQ queue overflow\n");
1256
1257 if (unlikely(scce & SCC_SCCE_GUN))
1258 dev_err(qmc->dev, "Global transmitter underrun\n");
1259
1260 if (unlikely(scce & SCC_SCCE_GOV))
1261 dev_err(qmc->dev, "Global receiver overrun\n");
1262
1263 /* normal interrupt */
1264 if (likely(scce & SCC_SCCE_GINT))
1265 qmc_irq_gint(qmc);
1266
1267 return IRQ_HANDLED;
1268 }
1269
qmc_probe(struct platform_device * pdev)1270 static int qmc_probe(struct platform_device *pdev)
1271 {
1272 struct device_node *np = pdev->dev.of_node;
1273 unsigned int nb_chans;
1274 struct resource *res;
1275 struct qmc *qmc;
1276 int irq;
1277 int ret;
1278
1279 qmc = devm_kzalloc(&pdev->dev, sizeof(*qmc), GFP_KERNEL);
1280 if (!qmc)
1281 return -ENOMEM;
1282
1283 qmc->dev = &pdev->dev;
1284 INIT_LIST_HEAD(&qmc->chan_head);
1285
1286 qmc->scc_regs = devm_platform_ioremap_resource_byname(pdev, "scc_regs");
1287 if (IS_ERR(qmc->scc_regs))
1288 return PTR_ERR(qmc->scc_regs);
1289
1290
1291 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "scc_pram");
1292 if (!res)
1293 return -EINVAL;
1294 qmc->scc_pram_offset = res->start - get_immrbase();
1295 qmc->scc_pram = devm_ioremap_resource(qmc->dev, res);
1296 if (IS_ERR(qmc->scc_pram))
1297 return PTR_ERR(qmc->scc_pram);
1298
1299 qmc->dpram = devm_platform_ioremap_resource_byname(pdev, "dpram");
1300 if (IS_ERR(qmc->dpram))
1301 return PTR_ERR(qmc->dpram);
1302
1303 qmc->tsa_serial = devm_tsa_serial_get_byphandle(qmc->dev, np, "fsl,tsa-serial");
1304 if (IS_ERR(qmc->tsa_serial)) {
1305 return dev_err_probe(qmc->dev, PTR_ERR(qmc->tsa_serial),
1306 "Failed to get TSA serial\n");
1307 }
1308
1309 /* Connect the serial (SCC) to TSA */
1310 ret = tsa_serial_connect(qmc->tsa_serial);
1311 if (ret) {
1312 dev_err(qmc->dev, "Failed to connect TSA serial\n");
1313 return ret;
1314 }
1315
1316 /* Parse channels informationss */
1317 ret = qmc_of_parse_chans(qmc, np);
1318 if (ret)
1319 goto err_tsa_serial_disconnect;
1320
1321 nb_chans = qmc_nb_chans(qmc);
1322
1323 /* Init GMSR_H and GMSR_L registers */
1324 qmc_write32(qmc->scc_regs + SCC_GSMRH,
1325 SCC_GSMRH_CDS | SCC_GSMRH_CTSS | SCC_GSMRH_CDP | SCC_GSMRH_CTSP);
1326
1327 /* enable QMC mode */
1328 qmc_write32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_MODE_QMC);
1329
1330 /*
1331 * Allocate the buffer descriptor table
1332 * 8 rx and 8 tx descriptors per channel
1333 */
1334 qmc->bd_size = (nb_chans * (QMC_NB_TXBDS + QMC_NB_RXBDS)) * sizeof(cbd_t);
1335 qmc->bd_table = dmam_alloc_coherent(qmc->dev, qmc->bd_size,
1336 &qmc->bd_dma_addr, GFP_KERNEL);
1337 if (!qmc->bd_table) {
1338 dev_err(qmc->dev, "Failed to allocate bd table\n");
1339 ret = -ENOMEM;
1340 goto err_tsa_serial_disconnect;
1341 }
1342 memset(qmc->bd_table, 0, qmc->bd_size);
1343
1344 qmc_write32(qmc->scc_pram + QMC_GBL_MCBASE, qmc->bd_dma_addr);
1345
1346 /* Allocate the interrupt table */
1347 qmc->int_size = QMC_NB_INTS * sizeof(u16);
1348 qmc->int_table = dmam_alloc_coherent(qmc->dev, qmc->int_size,
1349 &qmc->int_dma_addr, GFP_KERNEL);
1350 if (!qmc->int_table) {
1351 dev_err(qmc->dev, "Failed to allocate interrupt table\n");
1352 ret = -ENOMEM;
1353 goto err_tsa_serial_disconnect;
1354 }
1355 memset(qmc->int_table, 0, qmc->int_size);
1356
1357 qmc->int_curr = qmc->int_table;
1358 qmc_write32(qmc->scc_pram + QMC_GBL_INTBASE, qmc->int_dma_addr);
1359 qmc_write32(qmc->scc_pram + QMC_GBL_INTPTR, qmc->int_dma_addr);
1360
1361 /* Set MRBLR (valid for HDLC only) max MRU + max CRC */
1362 qmc_write16(qmc->scc_pram + QMC_GBL_MRBLR, HDLC_MAX_MRU + 4);
1363
1364 qmc_write16(qmc->scc_pram + QMC_GBL_GRFTHR, 1);
1365 qmc_write16(qmc->scc_pram + QMC_GBL_GRFCNT, 1);
1366
1367 qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
1368 qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
1369
1370 ret = qmc_setup_tsa(qmc);
1371 if (ret)
1372 goto err_tsa_serial_disconnect;
1373
1374 qmc_write16(qmc->scc_pram + QMC_GBL_QMCSTATE, 0x8000);
1375
1376 ret = qmc_setup_chans(qmc);
1377 if (ret)
1378 goto err_tsa_serial_disconnect;
1379
1380 /* Init interrupts table */
1381 ret = qmc_setup_ints(qmc);
1382 if (ret)
1383 goto err_tsa_serial_disconnect;
1384
1385 /* Disable and clear interrupts, set the irq handler */
1386 qmc_write16(qmc->scc_regs + SCC_SCCM, 0x0000);
1387 qmc_write16(qmc->scc_regs + SCC_SCCE, 0x000F);
1388 irq = platform_get_irq(pdev, 0);
1389 if (irq < 0)
1390 goto err_tsa_serial_disconnect;
1391 ret = devm_request_irq(qmc->dev, irq, qmc_irq_handler, 0, "qmc", qmc);
1392 if (ret < 0)
1393 goto err_tsa_serial_disconnect;
1394
1395 /* Enable interrupts */
1396 qmc_write16(qmc->scc_regs + SCC_SCCM,
1397 SCC_SCCE_IQOV | SCC_SCCE_GINT | SCC_SCCE_GUN | SCC_SCCE_GOV);
1398
1399 ret = qmc_finalize_chans(qmc);
1400 if (ret < 0)
1401 goto err_disable_intr;
1402
1403 /* Enable transmiter and receiver */
1404 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
1405
1406 platform_set_drvdata(pdev, qmc);
1407
1408 return 0;
1409
1410 err_disable_intr:
1411 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1412
1413 err_tsa_serial_disconnect:
1414 tsa_serial_disconnect(qmc->tsa_serial);
1415 return ret;
1416 }
1417
qmc_remove(struct platform_device * pdev)1418 static int qmc_remove(struct platform_device *pdev)
1419 {
1420 struct qmc *qmc = platform_get_drvdata(pdev);
1421
1422 /* Disable transmiter and receiver */
1423 qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
1424
1425 /* Disable interrupts */
1426 qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
1427
1428 /* Disconnect the serial from TSA */
1429 tsa_serial_disconnect(qmc->tsa_serial);
1430
1431 return 0;
1432 }
1433
1434 static const struct of_device_id qmc_id_table[] = {
1435 { .compatible = "fsl,cpm1-scc-qmc" },
1436 {} /* sentinel */
1437 };
1438 MODULE_DEVICE_TABLE(of, qmc_id_table);
1439
1440 static struct platform_driver qmc_driver = {
1441 .driver = {
1442 .name = "fsl-qmc",
1443 .of_match_table = of_match_ptr(qmc_id_table),
1444 },
1445 .probe = qmc_probe,
1446 .remove = qmc_remove,
1447 };
1448 module_platform_driver(qmc_driver);
1449
qmc_chan_get_byphandle(struct device_node * np,const char * phandle_name)1450 struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
1451 {
1452 struct of_phandle_args out_args;
1453 struct platform_device *pdev;
1454 struct qmc_chan *qmc_chan;
1455 struct qmc *qmc;
1456 int ret;
1457
1458 ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
1459 &out_args);
1460 if (ret < 0)
1461 return ERR_PTR(ret);
1462
1463 if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
1464 of_node_put(out_args.np);
1465 return ERR_PTR(-EINVAL);
1466 }
1467
1468 pdev = of_find_device_by_node(out_args.np);
1469 of_node_put(out_args.np);
1470 if (!pdev)
1471 return ERR_PTR(-ENODEV);
1472
1473 qmc = platform_get_drvdata(pdev);
1474 if (!qmc) {
1475 platform_device_put(pdev);
1476 return ERR_PTR(-EPROBE_DEFER);
1477 }
1478
1479 if (out_args.args_count != 1) {
1480 platform_device_put(pdev);
1481 return ERR_PTR(-EINVAL);
1482 }
1483
1484 if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
1485 platform_device_put(pdev);
1486 return ERR_PTR(-EINVAL);
1487 }
1488
1489 qmc_chan = qmc->chans[out_args.args[0]];
1490 if (!qmc_chan) {
1491 platform_device_put(pdev);
1492 return ERR_PTR(-ENOENT);
1493 }
1494
1495 return qmc_chan;
1496 }
1497 EXPORT_SYMBOL(qmc_chan_get_byphandle);
1498
qmc_chan_put(struct qmc_chan * chan)1499 void qmc_chan_put(struct qmc_chan *chan)
1500 {
1501 put_device(chan->qmc->dev);
1502 }
1503 EXPORT_SYMBOL(qmc_chan_put);
1504
devm_qmc_chan_release(struct device * dev,void * res)1505 static void devm_qmc_chan_release(struct device *dev, void *res)
1506 {
1507 struct qmc_chan **qmc_chan = res;
1508
1509 qmc_chan_put(*qmc_chan);
1510 }
1511
devm_qmc_chan_get_byphandle(struct device * dev,struct device_node * np,const char * phandle_name)1512 struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
1513 struct device_node *np,
1514 const char *phandle_name)
1515 {
1516 struct qmc_chan *qmc_chan;
1517 struct qmc_chan **dr;
1518
1519 dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
1520 if (!dr)
1521 return ERR_PTR(-ENOMEM);
1522
1523 qmc_chan = qmc_chan_get_byphandle(np, phandle_name);
1524 if (!IS_ERR(qmc_chan)) {
1525 *dr = qmc_chan;
1526 devres_add(dev, dr);
1527 } else {
1528 devres_free(dr);
1529 }
1530
1531 return qmc_chan;
1532 }
1533 EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
1534
1535 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
1536 MODULE_DESCRIPTION("CPM QMC driver");
1537 MODULE_LICENSE("GPL");
1538