1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * TSA driver
4 *
5 * Copyright 2022 CS GROUP France
6 *
7 * Author: Herve Codina <herve.codina@bootlin.com>
8 */
9
10 #include "tsa.h"
11 #include <dt-bindings/soc/cpm1-fsl,tsa.h>
12 #include <linux/clk.h>
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <linux/of_platform.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19
20
21 /* TSA SI RAM routing tables entry */
22 #define TSA_SIRAM_ENTRY_LAST (1 << 16)
23 #define TSA_SIRAM_ENTRY_BYTE (1 << 17)
24 #define TSA_SIRAM_ENTRY_CNT(x) (((x) & 0x0f) << 18)
25 #define TSA_SIRAM_ENTRY_CSEL_MASK (0x7 << 22)
26 #define TSA_SIRAM_ENTRY_CSEL_NU (0x0 << 22)
27 #define TSA_SIRAM_ENTRY_CSEL_SCC2 (0x2 << 22)
28 #define TSA_SIRAM_ENTRY_CSEL_SCC3 (0x3 << 22)
29 #define TSA_SIRAM_ENTRY_CSEL_SCC4 (0x4 << 22)
30 #define TSA_SIRAM_ENTRY_CSEL_SMC1 (0x5 << 22)
31 #define TSA_SIRAM_ENTRY_CSEL_SMC2 (0x6 << 22)
32
33 /* SI mode register (32 bits) */
34 #define TSA_SIMODE 0x00
35 #define TSA_SIMODE_SMC2 0x80000000
36 #define TSA_SIMODE_SMC1 0x00008000
37 #define TSA_SIMODE_TDMA(x) ((x) << 0)
38 #define TSA_SIMODE_TDMB(x) ((x) << 16)
39 #define TSA_SIMODE_TDM_MASK 0x0fff
40 #define TSA_SIMODE_TDM_SDM_MASK 0x0c00
41 #define TSA_SIMODE_TDM_SDM_NORM 0x0000
42 #define TSA_SIMODE_TDM_SDM_ECHO 0x0400
43 #define TSA_SIMODE_TDM_SDM_INTL_LOOP 0x0800
44 #define TSA_SIMODE_TDM_SDM_LOOP_CTRL 0x0c00
45 #define TSA_SIMODE_TDM_RFSD(x) ((x) << 8)
46 #define TSA_SIMODE_TDM_DSC 0x0080
47 #define TSA_SIMODE_TDM_CRT 0x0040
48 #define TSA_SIMODE_TDM_STZ 0x0020
49 #define TSA_SIMODE_TDM_CE 0x0010
50 #define TSA_SIMODE_TDM_FE 0x0008
51 #define TSA_SIMODE_TDM_GM 0x0004
52 #define TSA_SIMODE_TDM_TFSD(x) ((x) << 0)
53
54 /* SI global mode register (8 bits) */
55 #define TSA_SIGMR 0x04
56 #define TSA_SIGMR_ENB (1<<3)
57 #define TSA_SIGMR_ENA (1<<2)
58 #define TSA_SIGMR_RDM_MASK 0x03
59 #define TSA_SIGMR_RDM_STATIC_TDMA 0x00
60 #define TSA_SIGMR_RDM_DYN_TDMA 0x01
61 #define TSA_SIGMR_RDM_STATIC_TDMAB 0x02
62 #define TSA_SIGMR_RDM_DYN_TDMAB 0x03
63
64 /* SI status register (8 bits) */
65 #define TSA_SISTR 0x06
66
67 /* SI command register (8 bits) */
68 #define TSA_SICMR 0x07
69
70 /* SI clock route register (32 bits) */
71 #define TSA_SICR 0x0C
72 #define TSA_SICR_SCC2(x) ((x) << 8)
73 #define TSA_SICR_SCC3(x) ((x) << 16)
74 #define TSA_SICR_SCC4(x) ((x) << 24)
75 #define TSA_SICR_SCC_MASK 0x0ff
76 #define TSA_SICR_SCC_GRX (1 << 7)
77 #define TSA_SICR_SCC_SCX_TSA (1 << 6)
78 #define TSA_SICR_SCC_RXCS_MASK (0x7 << 3)
79 #define TSA_SICR_SCC_RXCS_BRG1 (0x0 << 3)
80 #define TSA_SICR_SCC_RXCS_BRG2 (0x1 << 3)
81 #define TSA_SICR_SCC_RXCS_BRG3 (0x2 << 3)
82 #define TSA_SICR_SCC_RXCS_BRG4 (0x3 << 3)
83 #define TSA_SICR_SCC_RXCS_CLK15 (0x4 << 3)
84 #define TSA_SICR_SCC_RXCS_CLK26 (0x5 << 3)
85 #define TSA_SICR_SCC_RXCS_CLK37 (0x6 << 3)
86 #define TSA_SICR_SCC_RXCS_CLK48 (0x7 << 3)
87 #define TSA_SICR_SCC_TXCS_MASK (0x7 << 0)
88 #define TSA_SICR_SCC_TXCS_BRG1 (0x0 << 0)
89 #define TSA_SICR_SCC_TXCS_BRG2 (0x1 << 0)
90 #define TSA_SICR_SCC_TXCS_BRG3 (0x2 << 0)
91 #define TSA_SICR_SCC_TXCS_BRG4 (0x3 << 0)
92 #define TSA_SICR_SCC_TXCS_CLK15 (0x4 << 0)
93 #define TSA_SICR_SCC_TXCS_CLK26 (0x5 << 0)
94 #define TSA_SICR_SCC_TXCS_CLK37 (0x6 << 0)
95 #define TSA_SICR_SCC_TXCS_CLK48 (0x7 << 0)
96
97 /* Serial interface RAM pointer register (32 bits) */
98 #define TSA_SIRP 0x10
99
100 struct tsa_entries_area {
101 void *__iomem entries_start;
102 void *__iomem entries_next;
103 void *__iomem last_entry;
104 };
105
106 struct tsa_tdm {
107 bool is_enable;
108 struct clk *l1rclk_clk;
109 struct clk *l1rsync_clk;
110 struct clk *l1tclk_clk;
111 struct clk *l1tsync_clk;
112 u32 simode_tdm;
113 };
114
115 #define TSA_TDMA 0
116 #define TSA_TDMB 1
117
118 struct tsa {
119 struct device *dev;
120 void *__iomem si_regs;
121 void *__iomem si_ram;
122 resource_size_t si_ram_sz;
123 spinlock_t lock;
124 int tdms; /* TSA_TDMx ORed */
125 struct tsa_tdm tdm[2]; /* TDMa and TDMb */
126 struct tsa_serial {
127 unsigned int id;
128 struct tsa_serial_info info;
129 } serials[6];
130 };
131
tsa_serial_get_tsa(struct tsa_serial * tsa_serial)132 static inline struct tsa *tsa_serial_get_tsa(struct tsa_serial *tsa_serial)
133 {
134 /* The serials table is indexed by the serial id */
135 return container_of(tsa_serial, struct tsa, serials[tsa_serial->id]);
136 }
137
tsa_write32(void * __iomem addr,u32 val)138 static inline void tsa_write32(void *__iomem addr, u32 val)
139 {
140 iowrite32be(val, addr);
141 }
142
tsa_write8(void * __iomem addr,u32 val)143 static inline void tsa_write8(void *__iomem addr, u32 val)
144 {
145 iowrite8(val, addr);
146 }
147
tsa_read32(void * __iomem addr)148 static inline u32 tsa_read32(void *__iomem addr)
149 {
150 return ioread32be(addr);
151 }
152
tsa_clrbits32(void * __iomem addr,u32 clr)153 static inline void tsa_clrbits32(void *__iomem addr, u32 clr)
154 {
155 tsa_write32(addr, tsa_read32(addr) & ~clr);
156 }
157
tsa_clrsetbits32(void * __iomem addr,u32 clr,u32 set)158 static inline void tsa_clrsetbits32(void *__iomem addr, u32 clr, u32 set)
159 {
160 tsa_write32(addr, (tsa_read32(addr) & ~clr) | set);
161 }
162
tsa_serial_connect(struct tsa_serial * tsa_serial)163 int tsa_serial_connect(struct tsa_serial *tsa_serial)
164 {
165 struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
166 unsigned long flags;
167 u32 clear;
168 u32 set;
169
170 switch (tsa_serial->id) {
171 case FSL_CPM_TSA_SCC2:
172 clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
173 set = TSA_SICR_SCC2(TSA_SICR_SCC_SCX_TSA);
174 break;
175 case FSL_CPM_TSA_SCC3:
176 clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
177 set = TSA_SICR_SCC3(TSA_SICR_SCC_SCX_TSA);
178 break;
179 case FSL_CPM_TSA_SCC4:
180 clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
181 set = TSA_SICR_SCC4(TSA_SICR_SCC_SCX_TSA);
182 break;
183 default:
184 dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
185 return -EINVAL;
186 }
187
188 spin_lock_irqsave(&tsa->lock, flags);
189 tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, set);
190 spin_unlock_irqrestore(&tsa->lock, flags);
191
192 return 0;
193 }
194 EXPORT_SYMBOL(tsa_serial_connect);
195
tsa_serial_disconnect(struct tsa_serial * tsa_serial)196 int tsa_serial_disconnect(struct tsa_serial *tsa_serial)
197 {
198 struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
199 unsigned long flags;
200 u32 clear;
201
202 switch (tsa_serial->id) {
203 case FSL_CPM_TSA_SCC2:
204 clear = TSA_SICR_SCC2(TSA_SICR_SCC_MASK);
205 break;
206 case FSL_CPM_TSA_SCC3:
207 clear = TSA_SICR_SCC3(TSA_SICR_SCC_MASK);
208 break;
209 case FSL_CPM_TSA_SCC4:
210 clear = TSA_SICR_SCC4(TSA_SICR_SCC_MASK);
211 break;
212 default:
213 dev_err(tsa->dev, "Unsupported serial id %u\n", tsa_serial->id);
214 return -EINVAL;
215 }
216
217 spin_lock_irqsave(&tsa->lock, flags);
218 tsa_clrsetbits32(tsa->si_regs + TSA_SICR, clear, 0);
219 spin_unlock_irqrestore(&tsa->lock, flags);
220
221 return 0;
222 }
223 EXPORT_SYMBOL(tsa_serial_disconnect);
224
tsa_serial_get_info(struct tsa_serial * tsa_serial,struct tsa_serial_info * info)225 int tsa_serial_get_info(struct tsa_serial *tsa_serial, struct tsa_serial_info *info)
226 {
227 memcpy(info, &tsa_serial->info, sizeof(*info));
228 return 0;
229 }
230 EXPORT_SYMBOL(tsa_serial_get_info);
231
tsa_init_entries_area(struct tsa * tsa,struct tsa_entries_area * area,u32 tdms,u32 tdm_id,bool is_rx)232 static void tsa_init_entries_area(struct tsa *tsa, struct tsa_entries_area *area,
233 u32 tdms, u32 tdm_id, bool is_rx)
234 {
235 resource_size_t quarter;
236 resource_size_t half;
237
238 quarter = tsa->si_ram_sz/4;
239 half = tsa->si_ram_sz/2;
240
241 if (tdms == BIT(TSA_TDMA)) {
242 /* Only TDMA */
243 if (is_rx) {
244 /* First half of si_ram */
245 area->entries_start = tsa->si_ram;
246 area->entries_next = area->entries_start + half;
247 area->last_entry = NULL;
248 } else {
249 /* Second half of si_ram */
250 area->entries_start = tsa->si_ram + half;
251 area->entries_next = area->entries_start + half;
252 area->last_entry = NULL;
253 }
254 } else {
255 /* Only TDMB or both TDMs */
256 if (tdm_id == TSA_TDMA) {
257 if (is_rx) {
258 /* First half of first half of si_ram */
259 area->entries_start = tsa->si_ram;
260 area->entries_next = area->entries_start + quarter;
261 area->last_entry = NULL;
262 } else {
263 /* First half of second half of si_ram */
264 area->entries_start = tsa->si_ram + (2 * quarter);
265 area->entries_next = area->entries_start + quarter;
266 area->last_entry = NULL;
267 }
268 } else {
269 if (is_rx) {
270 /* Second half of first half of si_ram */
271 area->entries_start = tsa->si_ram + quarter;
272 area->entries_next = area->entries_start + quarter;
273 area->last_entry = NULL;
274 } else {
275 /* Second half of second half of si_ram */
276 area->entries_start = tsa->si_ram + (3 * quarter);
277 area->entries_next = area->entries_start + quarter;
278 area->last_entry = NULL;
279 }
280 }
281 }
282 }
283
tsa_serial_id2name(struct tsa * tsa,u32 serial_id)284 static const char *tsa_serial_id2name(struct tsa *tsa, u32 serial_id)
285 {
286 switch (serial_id) {
287 case FSL_CPM_TSA_NU: return "Not used";
288 case FSL_CPM_TSA_SCC2: return "SCC2";
289 case FSL_CPM_TSA_SCC3: return "SCC3";
290 case FSL_CPM_TSA_SCC4: return "SCC4";
291 case FSL_CPM_TSA_SMC1: return "SMC1";
292 case FSL_CPM_TSA_SMC2: return "SMC2";
293 default:
294 break;
295 }
296 return NULL;
297 }
298
tsa_serial_id2csel(struct tsa * tsa,u32 serial_id)299 static u32 tsa_serial_id2csel(struct tsa *tsa, u32 serial_id)
300 {
301 switch (serial_id) {
302 case FSL_CPM_TSA_SCC2: return TSA_SIRAM_ENTRY_CSEL_SCC2;
303 case FSL_CPM_TSA_SCC3: return TSA_SIRAM_ENTRY_CSEL_SCC3;
304 case FSL_CPM_TSA_SCC4: return TSA_SIRAM_ENTRY_CSEL_SCC4;
305 case FSL_CPM_TSA_SMC1: return TSA_SIRAM_ENTRY_CSEL_SMC1;
306 case FSL_CPM_TSA_SMC2: return TSA_SIRAM_ENTRY_CSEL_SMC2;
307 default:
308 break;
309 }
310 return TSA_SIRAM_ENTRY_CSEL_NU;
311 }
312
tsa_add_entry(struct tsa * tsa,struct tsa_entries_area * area,u32 count,u32 serial_id)313 static int tsa_add_entry(struct tsa *tsa, struct tsa_entries_area *area,
314 u32 count, u32 serial_id)
315 {
316 void *__iomem addr;
317 u32 left;
318 u32 val;
319 u32 cnt;
320 u32 nb;
321
322 addr = area->last_entry ? area->last_entry + 4 : area->entries_start;
323
324 nb = DIV_ROUND_UP(count, 8);
325 if ((addr + (nb * 4)) > area->entries_next) {
326 dev_err(tsa->dev, "si ram area full\n");
327 return -ENOSPC;
328 }
329
330 if (area->last_entry) {
331 /* Clear last flag */
332 tsa_clrbits32(area->last_entry, TSA_SIRAM_ENTRY_LAST);
333 }
334
335 left = count;
336 while (left) {
337 val = TSA_SIRAM_ENTRY_BYTE | tsa_serial_id2csel(tsa, serial_id);
338
339 if (left > 16) {
340 cnt = 16;
341 } else {
342 cnt = left;
343 val |= TSA_SIRAM_ENTRY_LAST;
344 area->last_entry = addr;
345 }
346 val |= TSA_SIRAM_ENTRY_CNT(cnt - 1);
347
348 tsa_write32(addr, val);
349 addr += 4;
350 left -= cnt;
351 }
352
353 return 0;
354 }
355
tsa_of_parse_tdm_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id,bool is_rx)356 static int tsa_of_parse_tdm_route(struct tsa *tsa, struct device_node *tdm_np,
357 u32 tdms, u32 tdm_id, bool is_rx)
358 {
359 struct tsa_entries_area area;
360 const char *route_name;
361 u32 serial_id;
362 int len, i;
363 u32 count;
364 const char *serial_name;
365 struct tsa_serial_info *serial_info;
366 struct tsa_tdm *tdm;
367 int ret;
368 u32 ts;
369
370 route_name = is_rx ? "fsl,rx-ts-routes" : "fsl,tx-ts-routes";
371
372 len = of_property_count_u32_elems(tdm_np, route_name);
373 if (len < 0) {
374 dev_err(tsa->dev, "%pOF: failed to read %s\n", tdm_np, route_name);
375 return len;
376 }
377 if (len % 2 != 0) {
378 dev_err(tsa->dev, "%pOF: wrong %s format\n", tdm_np, route_name);
379 return -EINVAL;
380 }
381
382 tsa_init_entries_area(tsa, &area, tdms, tdm_id, is_rx);
383 ts = 0;
384 for (i = 0; i < len; i += 2) {
385 of_property_read_u32_index(tdm_np, route_name, i, &count);
386 of_property_read_u32_index(tdm_np, route_name, i + 1, &serial_id);
387
388 if (serial_id >= ARRAY_SIZE(tsa->serials)) {
389 dev_err(tsa->dev, "%pOF: invalid serial id (%u)\n",
390 tdm_np, serial_id);
391 return -EINVAL;
392 }
393
394 serial_name = tsa_serial_id2name(tsa, serial_id);
395 if (!serial_name) {
396 dev_err(tsa->dev, "%pOF: unsupported serial id (%u)\n",
397 tdm_np, serial_id);
398 return -EINVAL;
399 }
400
401 dev_dbg(tsa->dev, "tdm_id=%u, %s ts %u..%u -> %s\n",
402 tdm_id, route_name, ts, ts+count-1, serial_name);
403 ts += count;
404
405 ret = tsa_add_entry(tsa, &area, count, serial_id);
406 if (ret)
407 return ret;
408
409 serial_info = &tsa->serials[serial_id].info;
410 tdm = &tsa->tdm[tdm_id];
411 if (is_rx) {
412 serial_info->rx_fs_rate = clk_get_rate(tdm->l1rsync_clk);
413 serial_info->rx_bit_rate = clk_get_rate(tdm->l1rclk_clk);
414 serial_info->nb_rx_ts += count;
415 } else {
416 serial_info->tx_fs_rate = tdm->l1tsync_clk ?
417 clk_get_rate(tdm->l1tsync_clk) :
418 clk_get_rate(tdm->l1rsync_clk);
419 serial_info->tx_bit_rate = tdm->l1tclk_clk ?
420 clk_get_rate(tdm->l1tclk_clk) :
421 clk_get_rate(tdm->l1rclk_clk);
422 serial_info->nb_tx_ts += count;
423 }
424 }
425 return 0;
426 }
427
tsa_of_parse_tdm_rx_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id)428 static inline int tsa_of_parse_tdm_rx_route(struct tsa *tsa,
429 struct device_node *tdm_np,
430 u32 tdms, u32 tdm_id)
431 {
432 return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, true);
433 }
434
tsa_of_parse_tdm_tx_route(struct tsa * tsa,struct device_node * tdm_np,u32 tdms,u32 tdm_id)435 static inline int tsa_of_parse_tdm_tx_route(struct tsa *tsa,
436 struct device_node *tdm_np,
437 u32 tdms, u32 tdm_id)
438 {
439 return tsa_of_parse_tdm_route(tsa, tdm_np, tdms, tdm_id, false);
440 }
441
tsa_of_parse_tdms(struct tsa * tsa,struct device_node * np)442 static int tsa_of_parse_tdms(struct tsa *tsa, struct device_node *np)
443 {
444 struct device_node *tdm_np;
445 struct tsa_tdm *tdm;
446 struct clk *clk;
447 u32 tdm_id, val;
448 int ret;
449 int i;
450
451 tsa->tdms = 0;
452 tsa->tdm[0].is_enable = false;
453 tsa->tdm[1].is_enable = false;
454
455 for_each_available_child_of_node(np, tdm_np) {
456 ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
457 if (ret) {
458 dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
459 of_node_put(tdm_np);
460 return ret;
461 }
462 switch (tdm_id) {
463 case 0:
464 tsa->tdms |= BIT(TSA_TDMA);
465 break;
466 case 1:
467 tsa->tdms |= BIT(TSA_TDMB);
468 break;
469 default:
470 dev_err(tsa->dev, "%pOF: Invalid tdm_id (%u)\n", tdm_np,
471 tdm_id);
472 of_node_put(tdm_np);
473 return -EINVAL;
474 }
475 }
476
477 for_each_available_child_of_node(np, tdm_np) {
478 ret = of_property_read_u32(tdm_np, "reg", &tdm_id);
479 if (ret) {
480 dev_err(tsa->dev, "%pOF: failed to read reg\n", tdm_np);
481 of_node_put(tdm_np);
482 return ret;
483 }
484
485 tdm = &tsa->tdm[tdm_id];
486 tdm->simode_tdm = TSA_SIMODE_TDM_SDM_NORM;
487
488 val = 0;
489 ret = of_property_read_u32(tdm_np, "fsl,rx-frame-sync-delay-bits",
490 &val);
491 if (ret && ret != -EINVAL) {
492 dev_err(tsa->dev,
493 "%pOF: failed to read fsl,rx-frame-sync-delay-bits\n",
494 tdm_np);
495 of_node_put(tdm_np);
496 return ret;
497 }
498 if (val > 3) {
499 dev_err(tsa->dev,
500 "%pOF: Invalid fsl,rx-frame-sync-delay-bits (%u)\n",
501 tdm_np, val);
502 of_node_put(tdm_np);
503 return -EINVAL;
504 }
505 tdm->simode_tdm |= TSA_SIMODE_TDM_RFSD(val);
506
507 val = 0;
508 ret = of_property_read_u32(tdm_np, "fsl,tx-frame-sync-delay-bits",
509 &val);
510 if (ret && ret != -EINVAL) {
511 dev_err(tsa->dev,
512 "%pOF: failed to read fsl,tx-frame-sync-delay-bits\n",
513 tdm_np);
514 of_node_put(tdm_np);
515 return ret;
516 }
517 if (val > 3) {
518 dev_err(tsa->dev,
519 "%pOF: Invalid fsl,tx-frame-sync-delay-bits (%u)\n",
520 tdm_np, val);
521 of_node_put(tdm_np);
522 return -EINVAL;
523 }
524 tdm->simode_tdm |= TSA_SIMODE_TDM_TFSD(val);
525
526 if (of_property_read_bool(tdm_np, "fsl,common-rxtx-pins"))
527 tdm->simode_tdm |= TSA_SIMODE_TDM_CRT;
528
529 if (of_property_read_bool(tdm_np, "fsl,clock-falling-edge"))
530 tdm->simode_tdm |= TSA_SIMODE_TDM_CE;
531
532 if (of_property_read_bool(tdm_np, "fsl,fsync-rising-edge"))
533 tdm->simode_tdm |= TSA_SIMODE_TDM_FE;
534
535 if (of_property_read_bool(tdm_np, "fsl,double-speed-clock"))
536 tdm->simode_tdm |= TSA_SIMODE_TDM_DSC;
537
538 clk = of_clk_get_by_name(tdm_np, "l1rsync");
539 if (IS_ERR(clk)) {
540 ret = PTR_ERR(clk);
541 of_node_put(tdm_np);
542 goto err;
543 }
544 ret = clk_prepare_enable(clk);
545 if (ret) {
546 clk_put(clk);
547 of_node_put(tdm_np);
548 goto err;
549 }
550 tdm->l1rsync_clk = clk;
551
552 clk = of_clk_get_by_name(tdm_np, "l1rclk");
553 if (IS_ERR(clk)) {
554 ret = PTR_ERR(clk);
555 of_node_put(tdm_np);
556 goto err;
557 }
558 ret = clk_prepare_enable(clk);
559 if (ret) {
560 clk_put(clk);
561 of_node_put(tdm_np);
562 goto err;
563 }
564 tdm->l1rclk_clk = clk;
565
566 if (!(tdm->simode_tdm & TSA_SIMODE_TDM_CRT)) {
567 clk = of_clk_get_by_name(tdm_np, "l1tsync");
568 if (IS_ERR(clk)) {
569 ret = PTR_ERR(clk);
570 of_node_put(tdm_np);
571 goto err;
572 }
573 ret = clk_prepare_enable(clk);
574 if (ret) {
575 clk_put(clk);
576 of_node_put(tdm_np);
577 goto err;
578 }
579 tdm->l1tsync_clk = clk;
580
581 clk = of_clk_get_by_name(tdm_np, "l1tclk");
582 if (IS_ERR(clk)) {
583 ret = PTR_ERR(clk);
584 of_node_put(tdm_np);
585 goto err;
586 }
587 ret = clk_prepare_enable(clk);
588 if (ret) {
589 clk_put(clk);
590 of_node_put(tdm_np);
591 goto err;
592 }
593 tdm->l1tclk_clk = clk;
594 }
595
596 ret = tsa_of_parse_tdm_rx_route(tsa, tdm_np, tsa->tdms, tdm_id);
597 if (ret) {
598 of_node_put(tdm_np);
599 goto err;
600 }
601
602 ret = tsa_of_parse_tdm_tx_route(tsa, tdm_np, tsa->tdms, tdm_id);
603 if (ret) {
604 of_node_put(tdm_np);
605 goto err;
606 }
607
608 tdm->is_enable = true;
609 }
610 return 0;
611
612 err:
613 for (i = 0; i < 2; i++) {
614 if (tsa->tdm[i].l1rsync_clk) {
615 clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
616 clk_put(tsa->tdm[i].l1rsync_clk);
617 }
618 if (tsa->tdm[i].l1rclk_clk) {
619 clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
620 clk_put(tsa->tdm[i].l1rclk_clk);
621 }
622 if (tsa->tdm[i].l1tsync_clk) {
623 clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
624 clk_put(tsa->tdm[i].l1rsync_clk);
625 }
626 if (tsa->tdm[i].l1tclk_clk) {
627 clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
628 clk_put(tsa->tdm[i].l1rclk_clk);
629 }
630 }
631 return ret;
632 }
633
tsa_init_si_ram(struct tsa * tsa)634 static void tsa_init_si_ram(struct tsa *tsa)
635 {
636 resource_size_t i;
637
638 /* Fill all entries as the last one */
639 for (i = 0; i < tsa->si_ram_sz; i += 4)
640 tsa_write32(tsa->si_ram + i, TSA_SIRAM_ENTRY_LAST);
641 }
642
tsa_probe(struct platform_device * pdev)643 static int tsa_probe(struct platform_device *pdev)
644 {
645 struct device_node *np = pdev->dev.of_node;
646 struct resource *res;
647 struct tsa *tsa;
648 unsigned int i;
649 u32 val;
650 int ret;
651
652 tsa = devm_kzalloc(&pdev->dev, sizeof(*tsa), GFP_KERNEL);
653 if (!tsa)
654 return -ENOMEM;
655
656 tsa->dev = &pdev->dev;
657
658 for (i = 0; i < ARRAY_SIZE(tsa->serials); i++)
659 tsa->serials[i].id = i;
660
661 spin_lock_init(&tsa->lock);
662
663 tsa->si_regs = devm_platform_ioremap_resource_byname(pdev, "si_regs");
664 if (IS_ERR(tsa->si_regs))
665 return PTR_ERR(tsa->si_regs);
666
667 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "si_ram");
668 if (!res) {
669 dev_err(tsa->dev, "si_ram resource missing\n");
670 return -EINVAL;
671 }
672 tsa->si_ram_sz = resource_size(res);
673 tsa->si_ram = devm_ioremap_resource(&pdev->dev, res);
674 if (IS_ERR(tsa->si_ram))
675 return PTR_ERR(tsa->si_ram);
676
677 tsa_init_si_ram(tsa);
678
679 ret = tsa_of_parse_tdms(tsa, np);
680 if (ret)
681 return ret;
682
683 /* Set SIMODE */
684 val = 0;
685 if (tsa->tdm[0].is_enable)
686 val |= TSA_SIMODE_TDMA(tsa->tdm[0].simode_tdm);
687 if (tsa->tdm[1].is_enable)
688 val |= TSA_SIMODE_TDMB(tsa->tdm[1].simode_tdm);
689
690 tsa_clrsetbits32(tsa->si_regs + TSA_SIMODE,
691 TSA_SIMODE_TDMA(TSA_SIMODE_TDM_MASK) |
692 TSA_SIMODE_TDMB(TSA_SIMODE_TDM_MASK),
693 val);
694
695 /* Set SIGMR */
696 val = (tsa->tdms == BIT(TSA_TDMA)) ?
697 TSA_SIGMR_RDM_STATIC_TDMA : TSA_SIGMR_RDM_STATIC_TDMAB;
698 if (tsa->tdms & BIT(TSA_TDMA))
699 val |= TSA_SIGMR_ENA;
700 if (tsa->tdms & BIT(TSA_TDMB))
701 val |= TSA_SIGMR_ENB;
702 tsa_write8(tsa->si_regs + TSA_SIGMR, val);
703
704 platform_set_drvdata(pdev, tsa);
705
706 return 0;
707 }
708
tsa_remove(struct platform_device * pdev)709 static int tsa_remove(struct platform_device *pdev)
710 {
711 struct tsa *tsa = platform_get_drvdata(pdev);
712 int i;
713
714 for (i = 0; i < 2; i++) {
715 if (tsa->tdm[i].l1rsync_clk) {
716 clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
717 clk_put(tsa->tdm[i].l1rsync_clk);
718 }
719 if (tsa->tdm[i].l1rclk_clk) {
720 clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
721 clk_put(tsa->tdm[i].l1rclk_clk);
722 }
723 if (tsa->tdm[i].l1tsync_clk) {
724 clk_disable_unprepare(tsa->tdm[i].l1rsync_clk);
725 clk_put(tsa->tdm[i].l1rsync_clk);
726 }
727 if (tsa->tdm[i].l1tclk_clk) {
728 clk_disable_unprepare(tsa->tdm[i].l1rclk_clk);
729 clk_put(tsa->tdm[i].l1rclk_clk);
730 }
731 }
732 return 0;
733 }
734
735 static const struct of_device_id tsa_id_table[] = {
736 { .compatible = "fsl,cpm1-tsa" },
737 {} /* sentinel */
738 };
739 MODULE_DEVICE_TABLE(of, tsa_id_table);
740
741 static struct platform_driver tsa_driver = {
742 .driver = {
743 .name = "fsl-tsa",
744 .of_match_table = of_match_ptr(tsa_id_table),
745 },
746 .probe = tsa_probe,
747 .remove = tsa_remove,
748 };
749 module_platform_driver(tsa_driver);
750
tsa_serial_get_byphandle(struct device_node * np,const char * phandle_name)751 struct tsa_serial *tsa_serial_get_byphandle(struct device_node *np,
752 const char *phandle_name)
753 {
754 struct of_phandle_args out_args;
755 struct platform_device *pdev;
756 struct tsa_serial *tsa_serial;
757 struct tsa *tsa;
758 int ret;
759
760 ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0, &out_args);
761 if (ret < 0)
762 return ERR_PTR(ret);
763
764 if (!of_match_node(tsa_driver.driver.of_match_table, out_args.np)) {
765 of_node_put(out_args.np);
766 return ERR_PTR(-EINVAL);
767 }
768
769 pdev = of_find_device_by_node(out_args.np);
770 of_node_put(out_args.np);
771 if (!pdev)
772 return ERR_PTR(-ENODEV);
773
774 tsa = platform_get_drvdata(pdev);
775 if (!tsa) {
776 platform_device_put(pdev);
777 return ERR_PTR(-EPROBE_DEFER);
778 }
779
780 if (out_args.args_count != 1) {
781 platform_device_put(pdev);
782 return ERR_PTR(-EINVAL);
783 }
784
785 if (out_args.args[0] >= ARRAY_SIZE(tsa->serials)) {
786 platform_device_put(pdev);
787 return ERR_PTR(-EINVAL);
788 }
789
790 tsa_serial = &tsa->serials[out_args.args[0]];
791
792 /*
793 * Be sure that the serial id matches the phandle arg.
794 * The tsa_serials table is indexed by serial ids. The serial id is set
795 * during the probe() call and needs to be coherent.
796 */
797 if (WARN_ON(tsa_serial->id != out_args.args[0])) {
798 platform_device_put(pdev);
799 return ERR_PTR(-EINVAL);
800 }
801
802 return tsa_serial;
803 }
804 EXPORT_SYMBOL(tsa_serial_get_byphandle);
805
tsa_serial_put(struct tsa_serial * tsa_serial)806 void tsa_serial_put(struct tsa_serial *tsa_serial)
807 {
808 struct tsa *tsa = tsa_serial_get_tsa(tsa_serial);
809
810 put_device(tsa->dev);
811 }
812 EXPORT_SYMBOL(tsa_serial_put);
813
devm_tsa_serial_release(struct device * dev,void * res)814 static void devm_tsa_serial_release(struct device *dev, void *res)
815 {
816 struct tsa_serial **tsa_serial = res;
817
818 tsa_serial_put(*tsa_serial);
819 }
820
devm_tsa_serial_get_byphandle(struct device * dev,struct device_node * np,const char * phandle_name)821 struct tsa_serial *devm_tsa_serial_get_byphandle(struct device *dev,
822 struct device_node *np,
823 const char *phandle_name)
824 {
825 struct tsa_serial *tsa_serial;
826 struct tsa_serial **dr;
827
828 dr = devres_alloc(devm_tsa_serial_release, sizeof(*dr), GFP_KERNEL);
829 if (!dr)
830 return ERR_PTR(-ENOMEM);
831
832 tsa_serial = tsa_serial_get_byphandle(np, phandle_name);
833 if (!IS_ERR(tsa_serial)) {
834 *dr = tsa_serial;
835 devres_add(dev, dr);
836 } else {
837 devres_free(dr);
838 }
839
840 return tsa_serial;
841 }
842 EXPORT_SYMBOL(devm_tsa_serial_get_byphandle);
843
844 MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
845 MODULE_DESCRIPTION("CPM TSA driver");
846 MODULE_LICENSE("GPL");
847