1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2013 Freescale Semiconductor, Inc.
4 *
5 * clock driver for Freescale QorIQ SoCs.
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/fsl/guts.h>
14 #include <linux/io.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21
22 #define PLL_DIV1 0
23 #define PLL_DIV2 1
24 #define PLL_DIV3 2
25 #define PLL_DIV4 3
26
27 #define PLATFORM_PLL 0
28 #define CGA_PLL1 1
29 #define CGA_PLL2 2
30 #define CGA_PLL3 3
31 #define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */
32 #define CGB_PLL1 4
33 #define CGB_PLL2 5
34 #define MAX_PLL_DIV 16
35
36 struct clockgen_pll_div {
37 struct clk *clk;
38 char name[32];
39 };
40
41 struct clockgen_pll {
42 struct clockgen_pll_div div[MAX_PLL_DIV];
43 };
44
45 #define CLKSEL_VALID 1
46 #define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */
47
48 struct clockgen_sourceinfo {
49 u32 flags; /* CLKSEL_xxx */
50 int pll; /* CGx_PLLn */
51 int div; /* PLL_DIVn */
52 };
53
54 #define NUM_MUX_PARENTS 16
55
56 struct clockgen_muxinfo {
57 struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS];
58 };
59
60 #define NUM_HWACCEL 5
61 #define NUM_CMUX 8
62
63 struct clockgen;
64
65 /*
66 * cmux freq must be >= platform pll.
67 * If not set, cmux freq must be >= platform pll/2
68 */
69 #define CG_CMUX_GE_PLAT 1
70
71 #define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */
72 #define CG_VER3 4 /* version 3 cg: reg layout different */
73 #define CG_LITTLE_ENDIAN 8
74
75 struct clockgen_chipinfo {
76 const char *compat, *guts_compat;
77 const struct clockgen_muxinfo *cmux_groups[2];
78 const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL];
79 void (*init_periph)(struct clockgen *cg);
80 int cmux_to_group[NUM_CMUX + 1]; /* array should be -1 terminated */
81 u32 pll_mask; /* 1 << n bit set if PLL n is valid */
82 u32 flags; /* CG_xxx */
83 };
84
85 struct clockgen {
86 struct device_node *node;
87 void __iomem *regs;
88 struct clockgen_chipinfo info; /* mutable copy */
89 struct clk *sysclk, *coreclk;
90 struct clockgen_pll pll[6];
91 struct clk *cmux[NUM_CMUX];
92 struct clk *hwaccel[NUM_HWACCEL];
93 struct clk *fman[2];
94 struct ccsr_guts __iomem *guts;
95 };
96
97 static struct clockgen clockgen;
98
cg_out(struct clockgen * cg,u32 val,u32 __iomem * reg)99 static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg)
100 {
101 if (cg->info.flags & CG_LITTLE_ENDIAN)
102 iowrite32(val, reg);
103 else
104 iowrite32be(val, reg);
105 }
106
cg_in(struct clockgen * cg,u32 __iomem * reg)107 static u32 cg_in(struct clockgen *cg, u32 __iomem *reg)
108 {
109 u32 val;
110
111 if (cg->info.flags & CG_LITTLE_ENDIAN)
112 val = ioread32(reg);
113 else
114 val = ioread32be(reg);
115
116 return val;
117 }
118
119 static const struct clockgen_muxinfo p2041_cmux_grp1 = {
120 {
121 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
122 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
123 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
124 }
125 };
126
127 static const struct clockgen_muxinfo p2041_cmux_grp2 = {
128 {
129 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
130 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
131 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
132 }
133 };
134
135 static const struct clockgen_muxinfo p5020_cmux_grp1 = {
136 {
137 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
138 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
139 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
140 }
141 };
142
143 static const struct clockgen_muxinfo p5020_cmux_grp2 = {
144 {
145 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
146 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
147 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
148 }
149 };
150
151 static const struct clockgen_muxinfo p5040_cmux_grp1 = {
152 {
153 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
154 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
155 [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 },
156 [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 },
157 }
158 };
159
160 static const struct clockgen_muxinfo p5040_cmux_grp2 = {
161 {
162 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
163 [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 },
164 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
165 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
166 }
167 };
168
169 static const struct clockgen_muxinfo p4080_cmux_grp1 = {
170 {
171 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
172 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
173 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
174 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
175 [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 },
176 }
177 };
178
179 static const struct clockgen_muxinfo p4080_cmux_grp2 = {
180 {
181 [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 },
182 [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
183 [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
184 [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 },
185 [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 },
186 }
187 };
188
189 static const struct clockgen_muxinfo t1023_cmux = {
190 {
191 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
192 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
193 }
194 };
195
196 static const struct clockgen_muxinfo t1040_cmux = {
197 {
198 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
199 [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
200 [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
201 [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
202 }
203 };
204
205
206 static const struct clockgen_muxinfo clockgen2_cmux_cga = {
207 {
208 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
209 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
210 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
211 {},
212 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
213 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
214 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
215 {},
216 { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 },
217 { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 },
218 { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 },
219 },
220 };
221
222 static const struct clockgen_muxinfo clockgen2_cmux_cga12 = {
223 {
224 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
225 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
226 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
227 {},
228 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
229 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
230 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
231 },
232 };
233
234 static const struct clockgen_muxinfo clockgen2_cmux_cgb = {
235 {
236 { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 },
237 { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
238 { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
239 {},
240 { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 },
241 { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
242 { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
243 },
244 };
245
246 static const struct clockgen_muxinfo ls1028a_hwa1 = {
247 {
248 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
249 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
250 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
251 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
252 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
253 {},
254 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
255 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
256 },
257 };
258
259 static const struct clockgen_muxinfo ls1028a_hwa2 = {
260 {
261 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
262 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
263 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
264 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
265 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
266 {},
267 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
268 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
269 },
270 };
271
272 static const struct clockgen_muxinfo ls1028a_hwa3 = {
273 {
274 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
275 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
276 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
277 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
278 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
279 {},
280 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
281 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
282 },
283 };
284
285 static const struct clockgen_muxinfo ls1028a_hwa4 = {
286 {
287 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
288 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
289 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
290 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
291 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
292 {},
293 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
294 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
295 },
296 };
297
298 static const struct clockgen_muxinfo ls1043a_hwa1 = {
299 {
300 {},
301 {},
302 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
303 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
304 {},
305 {},
306 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
307 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
308 },
309 };
310
311 static const struct clockgen_muxinfo ls1043a_hwa2 = {
312 {
313 {},
314 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
315 {},
316 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
317 },
318 };
319
320 static const struct clockgen_muxinfo ls1046a_hwa1 = {
321 {
322 {},
323 {},
324 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
325 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
326 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
327 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
328 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
329 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
330 },
331 };
332
333 static const struct clockgen_muxinfo ls1046a_hwa2 = {
334 {
335 {},
336 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
337 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
338 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
339 {},
340 {},
341 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
342 },
343 };
344
345 static const struct clockgen_muxinfo ls1012a_cmux = {
346 {
347 [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
348 {},
349 [2] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
350 }
351 };
352
353 static const struct clockgen_muxinfo t1023_hwa1 = {
354 {
355 {},
356 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
357 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
358 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
359 },
360 };
361
362 static const struct clockgen_muxinfo t1023_hwa2 = {
363 {
364 [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
365 },
366 };
367
368 static const struct clockgen_muxinfo t2080_hwa1 = {
369 {
370 {},
371 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
372 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
373 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
374 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
375 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
376 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
377 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
378 },
379 };
380
381 static const struct clockgen_muxinfo t2080_hwa2 = {
382 {
383 {},
384 { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 },
385 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
386 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
387 { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 },
388 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
389 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
390 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
391 },
392 };
393
394 static const struct clockgen_muxinfo t4240_hwa1 = {
395 {
396 { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 },
397 { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 },
398 { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 },
399 { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 },
400 { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 },
401 {},
402 { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 },
403 { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 },
404 },
405 };
406
407 static const struct clockgen_muxinfo t4240_hwa4 = {
408 {
409 [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
410 [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
411 [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 },
412 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
413 [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
414 },
415 };
416
417 static const struct clockgen_muxinfo t4240_hwa5 = {
418 {
419 [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 },
420 [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 },
421 [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 },
422 [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 },
423 [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 },
424 [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 },
425 },
426 };
427
428 #define RCWSR7_FM1_CLK_SEL 0x40000000
429 #define RCWSR7_FM2_CLK_SEL 0x20000000
430 #define RCWSR7_HWA_ASYNC_DIV 0x04000000
431
p2041_init_periph(struct clockgen * cg)432 static void __init p2041_init_periph(struct clockgen *cg)
433 {
434 u32 reg;
435
436 reg = ioread32be(&cg->guts->rcwsr[7]);
437
438 if (reg & RCWSR7_FM1_CLK_SEL)
439 cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk;
440 else
441 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
442 }
443
p4080_init_periph(struct clockgen * cg)444 static void __init p4080_init_periph(struct clockgen *cg)
445 {
446 u32 reg;
447
448 reg = ioread32be(&cg->guts->rcwsr[7]);
449
450 if (reg & RCWSR7_FM1_CLK_SEL)
451 cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
452 else
453 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
454
455 if (reg & RCWSR7_FM2_CLK_SEL)
456 cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk;
457 else
458 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
459 }
460
p5020_init_periph(struct clockgen * cg)461 static void __init p5020_init_periph(struct clockgen *cg)
462 {
463 u32 reg;
464 int div = PLL_DIV2;
465
466 reg = ioread32be(&cg->guts->rcwsr[7]);
467 if (reg & RCWSR7_HWA_ASYNC_DIV)
468 div = PLL_DIV4;
469
470 if (reg & RCWSR7_FM1_CLK_SEL)
471 cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk;
472 else
473 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
474 }
475
p5040_init_periph(struct clockgen * cg)476 static void __init p5040_init_periph(struct clockgen *cg)
477 {
478 u32 reg;
479 int div = PLL_DIV2;
480
481 reg = ioread32be(&cg->guts->rcwsr[7]);
482 if (reg & RCWSR7_HWA_ASYNC_DIV)
483 div = PLL_DIV4;
484
485 if (reg & RCWSR7_FM1_CLK_SEL)
486 cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk;
487 else
488 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
489
490 if (reg & RCWSR7_FM2_CLK_SEL)
491 cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk;
492 else
493 cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk;
494 }
495
t1023_init_periph(struct clockgen * cg)496 static void __init t1023_init_periph(struct clockgen *cg)
497 {
498 cg->fman[0] = cg->hwaccel[1];
499 }
500
t1040_init_periph(struct clockgen * cg)501 static void __init t1040_init_periph(struct clockgen *cg)
502 {
503 cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk;
504 }
505
t2080_init_periph(struct clockgen * cg)506 static void __init t2080_init_periph(struct clockgen *cg)
507 {
508 cg->fman[0] = cg->hwaccel[0];
509 }
510
t4240_init_periph(struct clockgen * cg)511 static void __init t4240_init_periph(struct clockgen *cg)
512 {
513 cg->fman[0] = cg->hwaccel[3];
514 cg->fman[1] = cg->hwaccel[4];
515 }
516
517 static const struct clockgen_chipinfo chipinfo[] = {
518 {
519 .compat = "fsl,b4420-clockgen",
520 .guts_compat = "fsl,b4860-device-config",
521 .init_periph = t2080_init_periph,
522 .cmux_groups = {
523 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
524 },
525 .hwaccel = {
526 &t2080_hwa1
527 },
528 .cmux_to_group = {
529 0, 1, 1, 1, -1
530 },
531 .pll_mask = 0x3f,
532 .flags = CG_PLL_8BIT,
533 },
534 {
535 .compat = "fsl,b4860-clockgen",
536 .guts_compat = "fsl,b4860-device-config",
537 .init_periph = t2080_init_periph,
538 .cmux_groups = {
539 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
540 },
541 .hwaccel = {
542 &t2080_hwa1
543 },
544 .cmux_to_group = {
545 0, 1, 1, 1, -1
546 },
547 .pll_mask = 0x3f,
548 .flags = CG_PLL_8BIT,
549 },
550 {
551 .compat = "fsl,ls1021a-clockgen",
552 .cmux_groups = {
553 &t1023_cmux
554 },
555 .cmux_to_group = {
556 0, -1
557 },
558 .pll_mask = 0x03,
559 },
560 {
561 .compat = "fsl,ls1028a-clockgen",
562 .cmux_groups = {
563 &clockgen2_cmux_cga12
564 },
565 .hwaccel = {
566 &ls1028a_hwa1, &ls1028a_hwa2,
567 &ls1028a_hwa3, &ls1028a_hwa4
568 },
569 .cmux_to_group = {
570 0, 0, 0, 0, -1
571 },
572 .pll_mask = 0x07,
573 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
574 },
575 {
576 .compat = "fsl,ls1043a-clockgen",
577 .init_periph = t2080_init_periph,
578 .cmux_groups = {
579 &t1040_cmux
580 },
581 .hwaccel = {
582 &ls1043a_hwa1, &ls1043a_hwa2
583 },
584 .cmux_to_group = {
585 0, -1
586 },
587 .pll_mask = 0x07,
588 .flags = CG_PLL_8BIT,
589 },
590 {
591 .compat = "fsl,ls1046a-clockgen",
592 .init_periph = t2080_init_periph,
593 .cmux_groups = {
594 &t1040_cmux
595 },
596 .hwaccel = {
597 &ls1046a_hwa1, &ls1046a_hwa2
598 },
599 .cmux_to_group = {
600 0, -1
601 },
602 .pll_mask = 0x07,
603 .flags = CG_PLL_8BIT,
604 },
605 {
606 .compat = "fsl,ls1088a-clockgen",
607 .cmux_groups = {
608 &clockgen2_cmux_cga12
609 },
610 .cmux_to_group = {
611 0, 0, -1
612 },
613 .pll_mask = 0x07,
614 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
615 },
616 {
617 .compat = "fsl,ls1012a-clockgen",
618 .cmux_groups = {
619 &ls1012a_cmux
620 },
621 .cmux_to_group = {
622 0, -1
623 },
624 .pll_mask = 0x03,
625 },
626 {
627 .compat = "fsl,ls2080a-clockgen",
628 .cmux_groups = {
629 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
630 },
631 .cmux_to_group = {
632 0, 0, 1, 1, -1
633 },
634 .pll_mask = 0x37,
635 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
636 },
637 {
638 .compat = "fsl,lx2160a-clockgen",
639 .cmux_groups = {
640 &clockgen2_cmux_cga12, &clockgen2_cmux_cgb
641 },
642 .cmux_to_group = {
643 0, 0, 0, 0, 1, 1, 1, 1, -1
644 },
645 .pll_mask = 0x37,
646 .flags = CG_VER3 | CG_LITTLE_ENDIAN,
647 },
648 {
649 .compat = "fsl,p2041-clockgen",
650 .guts_compat = "fsl,qoriq-device-config-1.0",
651 .init_periph = p2041_init_periph,
652 .cmux_groups = {
653 &p2041_cmux_grp1, &p2041_cmux_grp2
654 },
655 .cmux_to_group = {
656 0, 0, 1, 1, -1
657 },
658 .pll_mask = 0x07,
659 },
660 {
661 .compat = "fsl,p3041-clockgen",
662 .guts_compat = "fsl,qoriq-device-config-1.0",
663 .init_periph = p2041_init_periph,
664 .cmux_groups = {
665 &p2041_cmux_grp1, &p2041_cmux_grp2
666 },
667 .cmux_to_group = {
668 0, 0, 1, 1, -1
669 },
670 .pll_mask = 0x07,
671 },
672 {
673 .compat = "fsl,p4080-clockgen",
674 .guts_compat = "fsl,qoriq-device-config-1.0",
675 .init_periph = p4080_init_periph,
676 .cmux_groups = {
677 &p4080_cmux_grp1, &p4080_cmux_grp2
678 },
679 .cmux_to_group = {
680 0, 0, 0, 0, 1, 1, 1, 1, -1
681 },
682 .pll_mask = 0x1f,
683 },
684 {
685 .compat = "fsl,p5020-clockgen",
686 .guts_compat = "fsl,qoriq-device-config-1.0",
687 .init_periph = p5020_init_periph,
688 .cmux_groups = {
689 &p5020_cmux_grp1, &p5020_cmux_grp2
690 },
691 .cmux_to_group = {
692 0, 1, -1
693 },
694 .pll_mask = 0x07,
695 },
696 {
697 .compat = "fsl,p5040-clockgen",
698 .guts_compat = "fsl,p5040-device-config",
699 .init_periph = p5040_init_periph,
700 .cmux_groups = {
701 &p5040_cmux_grp1, &p5040_cmux_grp2
702 },
703 .cmux_to_group = {
704 0, 0, 1, 1, -1
705 },
706 .pll_mask = 0x0f,
707 },
708 {
709 .compat = "fsl,t1023-clockgen",
710 .guts_compat = "fsl,t1023-device-config",
711 .init_periph = t1023_init_periph,
712 .cmux_groups = {
713 &t1023_cmux
714 },
715 .hwaccel = {
716 &t1023_hwa1, &t1023_hwa2
717 },
718 .cmux_to_group = {
719 0, 0, -1
720 },
721 .pll_mask = 0x03,
722 .flags = CG_PLL_8BIT,
723 },
724 {
725 .compat = "fsl,t1040-clockgen",
726 .guts_compat = "fsl,t1040-device-config",
727 .init_periph = t1040_init_periph,
728 .cmux_groups = {
729 &t1040_cmux
730 },
731 .cmux_to_group = {
732 0, 0, 0, 0, -1
733 },
734 .pll_mask = 0x07,
735 .flags = CG_PLL_8BIT,
736 },
737 {
738 .compat = "fsl,t2080-clockgen",
739 .guts_compat = "fsl,t2080-device-config",
740 .init_periph = t2080_init_periph,
741 .cmux_groups = {
742 &clockgen2_cmux_cga12
743 },
744 .hwaccel = {
745 &t2080_hwa1, &t2080_hwa2
746 },
747 .cmux_to_group = {
748 0, -1
749 },
750 .pll_mask = 0x07,
751 .flags = CG_PLL_8BIT,
752 },
753 {
754 .compat = "fsl,t4240-clockgen",
755 .guts_compat = "fsl,t4240-device-config",
756 .init_periph = t4240_init_periph,
757 .cmux_groups = {
758 &clockgen2_cmux_cga, &clockgen2_cmux_cgb
759 },
760 .hwaccel = {
761 &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5
762 },
763 .cmux_to_group = {
764 0, 0, 1, -1
765 },
766 .pll_mask = 0x3f,
767 .flags = CG_PLL_8BIT,
768 },
769 {},
770 };
771
772 struct mux_hwclock {
773 struct clk_hw hw;
774 struct clockgen *cg;
775 const struct clockgen_muxinfo *info;
776 u32 __iomem *reg;
777 u8 parent_to_clksel[NUM_MUX_PARENTS];
778 s8 clksel_to_parent[NUM_MUX_PARENTS];
779 int num_parents;
780 };
781
782 #define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw)
783 #define CLKSEL_MASK 0x78000000
784 #define CLKSEL_SHIFT 27
785
mux_set_parent(struct clk_hw * hw,u8 idx)786 static int mux_set_parent(struct clk_hw *hw, u8 idx)
787 {
788 struct mux_hwclock *hwc = to_mux_hwclock(hw);
789 u32 clksel;
790
791 if (idx >= hwc->num_parents)
792 return -EINVAL;
793
794 clksel = hwc->parent_to_clksel[idx];
795 cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg);
796
797 return 0;
798 }
799
mux_get_parent(struct clk_hw * hw)800 static u8 mux_get_parent(struct clk_hw *hw)
801 {
802 struct mux_hwclock *hwc = to_mux_hwclock(hw);
803 u32 clksel;
804 s8 ret;
805
806 clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
807
808 ret = hwc->clksel_to_parent[clksel];
809 if (ret < 0) {
810 pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg);
811 return 0;
812 }
813
814 return ret;
815 }
816
817 static const struct clk_ops cmux_ops = {
818 .get_parent = mux_get_parent,
819 .set_parent = mux_set_parent,
820 };
821
822 /*
823 * Don't allow setting for now, as the clock options haven't been
824 * sanitized for additional restrictions.
825 */
826 static const struct clk_ops hwaccel_ops = {
827 .get_parent = mux_get_parent,
828 };
829
get_pll_div(struct clockgen * cg,struct mux_hwclock * hwc,int idx)830 static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg,
831 struct mux_hwclock *hwc,
832 int idx)
833 {
834 int pll, div;
835
836 if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID))
837 return NULL;
838
839 pll = hwc->info->clksel[idx].pll;
840 div = hwc->info->clksel[idx].div;
841
842 return &cg->pll[pll].div[div];
843 }
844
create_mux_common(struct clockgen * cg,struct mux_hwclock * hwc,const struct clk_ops * ops,unsigned long min_rate,unsigned long max_rate,unsigned long pct80_rate,const char * fmt,int idx)845 static struct clk * __init create_mux_common(struct clockgen *cg,
846 struct mux_hwclock *hwc,
847 const struct clk_ops *ops,
848 unsigned long min_rate,
849 unsigned long max_rate,
850 unsigned long pct80_rate,
851 const char *fmt, int idx)
852 {
853 struct clk_init_data init = {};
854 struct clk *clk;
855 const struct clockgen_pll_div *div;
856 const char *parent_names[NUM_MUX_PARENTS];
857 char name[32];
858 int i, j;
859
860 snprintf(name, sizeof(name), fmt, idx);
861
862 for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) {
863 unsigned long rate;
864
865 hwc->clksel_to_parent[i] = -1;
866
867 div = get_pll_div(cg, hwc, i);
868 if (!div)
869 continue;
870
871 rate = clk_get_rate(div->clk);
872
873 if (hwc->info->clksel[i].flags & CLKSEL_80PCT &&
874 rate > pct80_rate)
875 continue;
876 if (rate < min_rate)
877 continue;
878 if (rate > max_rate)
879 continue;
880
881 parent_names[j] = div->name;
882 hwc->parent_to_clksel[j] = i;
883 hwc->clksel_to_parent[i] = j;
884 j++;
885 }
886
887 init.name = name;
888 init.ops = ops;
889 init.parent_names = parent_names;
890 init.num_parents = hwc->num_parents = j;
891 init.flags = 0;
892 hwc->hw.init = &init;
893 hwc->cg = cg;
894
895 clk = clk_register(NULL, &hwc->hw);
896 if (IS_ERR(clk)) {
897 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
898 PTR_ERR(clk));
899 kfree(hwc);
900 return NULL;
901 }
902
903 return clk;
904 }
905
create_one_cmux(struct clockgen * cg,int idx)906 static struct clk * __init create_one_cmux(struct clockgen *cg, int idx)
907 {
908 struct mux_hwclock *hwc;
909 const struct clockgen_pll_div *div;
910 unsigned long plat_rate, min_rate;
911 u64 max_rate, pct80_rate;
912 u32 clksel;
913
914 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
915 if (!hwc)
916 return NULL;
917
918 if (cg->info.flags & CG_VER3)
919 hwc->reg = cg->regs + 0x70000 + 0x20 * idx;
920 else
921 hwc->reg = cg->regs + 0x20 * idx;
922
923 hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]];
924
925 /*
926 * Find the rate for the default clksel, and treat it as the
927 * maximum rated core frequency. If this is an incorrect
928 * assumption, certain clock options (possibly including the
929 * default clksel) may be inappropriately excluded on certain
930 * chips.
931 */
932 clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT;
933 div = get_pll_div(cg, hwc, clksel);
934 if (!div) {
935 kfree(hwc);
936 return NULL;
937 }
938
939 max_rate = clk_get_rate(div->clk);
940 pct80_rate = max_rate * 8;
941 do_div(pct80_rate, 10);
942
943 plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk);
944
945 if (cg->info.flags & CG_CMUX_GE_PLAT)
946 min_rate = plat_rate;
947 else
948 min_rate = plat_rate / 2;
949
950 return create_mux_common(cg, hwc, &cmux_ops, min_rate, max_rate,
951 pct80_rate, "cg-cmux%d", idx);
952 }
953
create_one_hwaccel(struct clockgen * cg,int idx)954 static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx)
955 {
956 struct mux_hwclock *hwc;
957
958 hwc = kzalloc(sizeof(*hwc), GFP_KERNEL);
959 if (!hwc)
960 return NULL;
961
962 hwc->reg = cg->regs + 0x20 * idx + 0x10;
963 hwc->info = cg->info.hwaccel[idx];
964
965 return create_mux_common(cg, hwc, &hwaccel_ops, 0, ULONG_MAX, 0,
966 "cg-hwaccel%d", idx);
967 }
968
create_muxes(struct clockgen * cg)969 static void __init create_muxes(struct clockgen *cg)
970 {
971 int i;
972
973 for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) {
974 if (cg->info.cmux_to_group[i] < 0)
975 break;
976 if (cg->info.cmux_to_group[i] >=
977 ARRAY_SIZE(cg->info.cmux_groups)) {
978 WARN_ON_ONCE(1);
979 continue;
980 }
981
982 cg->cmux[i] = create_one_cmux(cg, i);
983 }
984
985 for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) {
986 if (!cg->info.hwaccel[i])
987 continue;
988
989 cg->hwaccel[i] = create_one_hwaccel(cg, i);
990 }
991 }
992
993 static void __init clockgen_init(struct device_node *np);
994
995 /*
996 * Legacy nodes may get probed before the parent clockgen node.
997 * It is assumed that device trees with legacy nodes will not
998 * contain a "clocks" property -- otherwise the input clocks may
999 * not be initialized at this point.
1000 */
legacy_init_clockgen(struct device_node * np)1001 static void __init legacy_init_clockgen(struct device_node *np)
1002 {
1003 if (!clockgen.node)
1004 clockgen_init(of_get_parent(np));
1005 }
1006
1007 /* Legacy node */
core_mux_init(struct device_node * np)1008 static void __init core_mux_init(struct device_node *np)
1009 {
1010 struct clk *clk;
1011 struct resource res;
1012 int idx, rc;
1013
1014 legacy_init_clockgen(np);
1015
1016 if (of_address_to_resource(np, 0, &res))
1017 return;
1018
1019 idx = (res.start & 0xf0) >> 5;
1020 clk = clockgen.cmux[idx];
1021
1022 rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
1023 if (rc) {
1024 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1025 __func__, np, rc);
1026 return;
1027 }
1028 }
1029
1030 static struct clk __init
sysclk_from_fixed(struct device_node * node,const char * name)1031 *sysclk_from_fixed(struct device_node *node, const char *name)
1032 {
1033 u32 rate;
1034
1035 if (of_property_read_u32(node, "clock-frequency", &rate))
1036 return ERR_PTR(-ENODEV);
1037
1038 return clk_register_fixed_rate(NULL, name, NULL, 0, rate);
1039 }
1040
input_clock(const char * name,struct clk * clk)1041 static struct clk __init *input_clock(const char *name, struct clk *clk)
1042 {
1043 const char *input_name;
1044
1045 /* Register the input clock under the desired name. */
1046 input_name = __clk_get_name(clk);
1047 clk = clk_register_fixed_factor(NULL, name, input_name,
1048 0, 1, 1);
1049 if (IS_ERR(clk))
1050 pr_err("%s: Couldn't register %s: %ld\n", __func__, name,
1051 PTR_ERR(clk));
1052
1053 return clk;
1054 }
1055
input_clock_by_name(const char * name,const char * dtname)1056 static struct clk __init *input_clock_by_name(const char *name,
1057 const char *dtname)
1058 {
1059 struct clk *clk;
1060
1061 clk = of_clk_get_by_name(clockgen.node, dtname);
1062 if (IS_ERR(clk))
1063 return clk;
1064
1065 return input_clock(name, clk);
1066 }
1067
input_clock_by_index(const char * name,int idx)1068 static struct clk __init *input_clock_by_index(const char *name, int idx)
1069 {
1070 struct clk *clk;
1071
1072 clk = of_clk_get(clockgen.node, 0);
1073 if (IS_ERR(clk))
1074 return clk;
1075
1076 return input_clock(name, clk);
1077 }
1078
create_sysclk(const char * name)1079 static struct clk * __init create_sysclk(const char *name)
1080 {
1081 struct device_node *sysclk;
1082 struct clk *clk;
1083
1084 clk = sysclk_from_fixed(clockgen.node, name);
1085 if (!IS_ERR(clk))
1086 return clk;
1087
1088 clk = input_clock_by_name(name, "sysclk");
1089 if (!IS_ERR(clk))
1090 return clk;
1091
1092 clk = input_clock_by_index(name, 0);
1093 if (!IS_ERR(clk))
1094 return clk;
1095
1096 sysclk = of_get_child_by_name(clockgen.node, "sysclk");
1097 if (sysclk) {
1098 clk = sysclk_from_fixed(sysclk, name);
1099 if (!IS_ERR(clk))
1100 return clk;
1101 }
1102
1103 pr_err("%s: No input sysclk\n", __func__);
1104 return NULL;
1105 }
1106
create_coreclk(const char * name)1107 static struct clk * __init create_coreclk(const char *name)
1108 {
1109 struct clk *clk;
1110
1111 clk = input_clock_by_name(name, "coreclk");
1112 if (!IS_ERR(clk))
1113 return clk;
1114
1115 /*
1116 * This indicates a mix of legacy nodes with the new coreclk
1117 * mechanism, which should never happen. If this error occurs,
1118 * don't use the wrong input clock just because coreclk isn't
1119 * ready yet.
1120 */
1121 if (WARN_ON(PTR_ERR(clk) == -EPROBE_DEFER))
1122 return clk;
1123
1124 return NULL;
1125 }
1126
1127 /* Legacy node */
sysclk_init(struct device_node * node)1128 static void __init sysclk_init(struct device_node *node)
1129 {
1130 struct clk *clk;
1131
1132 legacy_init_clockgen(node);
1133
1134 clk = clockgen.sysclk;
1135 if (clk)
1136 of_clk_add_provider(node, of_clk_src_simple_get, clk);
1137 }
1138
1139 #define PLL_KILL BIT(31)
1140
create_one_pll(struct clockgen * cg,int idx)1141 static void __init create_one_pll(struct clockgen *cg, int idx)
1142 {
1143 u32 __iomem *reg;
1144 u32 mult;
1145 struct clockgen_pll *pll = &cg->pll[idx];
1146 const char *input = "cg-sysclk";
1147 int i;
1148
1149 if (!(cg->info.pll_mask & (1 << idx)))
1150 return;
1151
1152 if (cg->coreclk && idx != PLATFORM_PLL) {
1153 if (IS_ERR(cg->coreclk))
1154 return;
1155
1156 input = "cg-coreclk";
1157 }
1158
1159 if (cg->info.flags & CG_VER3) {
1160 switch (idx) {
1161 case PLATFORM_PLL:
1162 reg = cg->regs + 0x60080;
1163 break;
1164 case CGA_PLL1:
1165 reg = cg->regs + 0x80;
1166 break;
1167 case CGA_PLL2:
1168 reg = cg->regs + 0xa0;
1169 break;
1170 case CGB_PLL1:
1171 reg = cg->regs + 0x10080;
1172 break;
1173 case CGB_PLL2:
1174 reg = cg->regs + 0x100a0;
1175 break;
1176 default:
1177 WARN_ONCE(1, "index %d\n", idx);
1178 return;
1179 }
1180 } else {
1181 if (idx == PLATFORM_PLL)
1182 reg = cg->regs + 0xc00;
1183 else
1184 reg = cg->regs + 0x800 + 0x20 * (idx - 1);
1185 }
1186
1187 /* Get the multiple of PLL */
1188 mult = cg_in(cg, reg);
1189
1190 /* Check if this PLL is disabled */
1191 if (mult & PLL_KILL) {
1192 pr_debug("%s(): pll %p disabled\n", __func__, reg);
1193 return;
1194 }
1195
1196 if ((cg->info.flags & CG_VER3) ||
1197 ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL))
1198 mult = (mult & GENMASK(8, 1)) >> 1;
1199 else
1200 mult = (mult & GENMASK(6, 1)) >> 1;
1201
1202 for (i = 0; i < ARRAY_SIZE(pll->div); i++) {
1203 struct clk *clk;
1204 int ret;
1205
1206 /*
1207 * For platform PLL, there are MAX_PLL_DIV divider clocks.
1208 * For core PLL, there are 4 divider clocks at most.
1209 */
1210 if (idx != PLATFORM_PLL && i >= 4)
1211 break;
1212
1213 snprintf(pll->div[i].name, sizeof(pll->div[i].name),
1214 "cg-pll%d-div%d", idx, i + 1);
1215
1216 clk = clk_register_fixed_factor(NULL,
1217 pll->div[i].name, input, 0, mult, i + 1);
1218 if (IS_ERR(clk)) {
1219 pr_err("%s: %s: register failed %ld\n",
1220 __func__, pll->div[i].name, PTR_ERR(clk));
1221 continue;
1222 }
1223
1224 pll->div[i].clk = clk;
1225 ret = clk_register_clkdev(clk, pll->div[i].name, NULL);
1226 if (ret != 0)
1227 pr_err("%s: %s: register to lookup table failed %d\n",
1228 __func__, pll->div[i].name, ret);
1229
1230 }
1231 }
1232
create_plls(struct clockgen * cg)1233 static void __init create_plls(struct clockgen *cg)
1234 {
1235 int i;
1236
1237 for (i = 0; i < ARRAY_SIZE(cg->pll); i++)
1238 create_one_pll(cg, i);
1239 }
1240
legacy_pll_init(struct device_node * np,int idx)1241 static void __init legacy_pll_init(struct device_node *np, int idx)
1242 {
1243 struct clockgen_pll *pll;
1244 struct clk_onecell_data *onecell_data;
1245 struct clk **subclks;
1246 int count, rc;
1247
1248 legacy_init_clockgen(np);
1249
1250 pll = &clockgen.pll[idx];
1251 count = of_property_count_strings(np, "clock-output-names");
1252
1253 BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4);
1254 subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL);
1255 if (!subclks)
1256 return;
1257
1258 onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
1259 if (!onecell_data)
1260 goto err_clks;
1261
1262 if (count <= 3) {
1263 subclks[0] = pll->div[0].clk;
1264 subclks[1] = pll->div[1].clk;
1265 subclks[2] = pll->div[3].clk;
1266 } else {
1267 subclks[0] = pll->div[0].clk;
1268 subclks[1] = pll->div[1].clk;
1269 subclks[2] = pll->div[2].clk;
1270 subclks[3] = pll->div[3].clk;
1271 }
1272
1273 onecell_data->clks = subclks;
1274 onecell_data->clk_num = count;
1275
1276 rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
1277 if (rc) {
1278 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1279 __func__, np, rc);
1280 goto err_cell;
1281 }
1282
1283 return;
1284 err_cell:
1285 kfree(onecell_data);
1286 err_clks:
1287 kfree(subclks);
1288 }
1289
1290 /* Legacy node */
pltfrm_pll_init(struct device_node * np)1291 static void __init pltfrm_pll_init(struct device_node *np)
1292 {
1293 legacy_pll_init(np, PLATFORM_PLL);
1294 }
1295
1296 /* Legacy node */
core_pll_init(struct device_node * np)1297 static void __init core_pll_init(struct device_node *np)
1298 {
1299 struct resource res;
1300 int idx;
1301
1302 if (of_address_to_resource(np, 0, &res))
1303 return;
1304
1305 if ((res.start & 0xfff) == 0xc00) {
1306 /*
1307 * ls1021a devtree labels the platform PLL
1308 * with the core PLL compatible
1309 */
1310 pltfrm_pll_init(np);
1311 } else {
1312 idx = (res.start & 0xf0) >> 5;
1313 legacy_pll_init(np, CGA_PLL1 + idx);
1314 }
1315 }
1316
clockgen_clk_get(struct of_phandle_args * clkspec,void * data)1317 static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
1318 {
1319 struct clockgen *cg = data;
1320 struct clk *clk;
1321 struct clockgen_pll *pll;
1322 u32 type, idx;
1323
1324 if (clkspec->args_count < 2) {
1325 pr_err("%s: insufficient phandle args\n", __func__);
1326 return ERR_PTR(-EINVAL);
1327 }
1328
1329 type = clkspec->args[0];
1330 idx = clkspec->args[1];
1331
1332 switch (type) {
1333 case 0:
1334 if (idx != 0)
1335 goto bad_args;
1336 clk = cg->sysclk;
1337 break;
1338 case 1:
1339 if (idx >= ARRAY_SIZE(cg->cmux))
1340 goto bad_args;
1341 clk = cg->cmux[idx];
1342 break;
1343 case 2:
1344 if (idx >= ARRAY_SIZE(cg->hwaccel))
1345 goto bad_args;
1346 clk = cg->hwaccel[idx];
1347 break;
1348 case 3:
1349 if (idx >= ARRAY_SIZE(cg->fman))
1350 goto bad_args;
1351 clk = cg->fman[idx];
1352 break;
1353 case 4:
1354 pll = &cg->pll[PLATFORM_PLL];
1355 if (idx >= ARRAY_SIZE(pll->div))
1356 goto bad_args;
1357 clk = pll->div[idx].clk;
1358 break;
1359 case 5:
1360 if (idx != 0)
1361 goto bad_args;
1362 clk = cg->coreclk;
1363 if (IS_ERR(clk))
1364 clk = NULL;
1365 break;
1366 default:
1367 goto bad_args;
1368 }
1369
1370 if (!clk)
1371 return ERR_PTR(-ENOENT);
1372 return clk;
1373
1374 bad_args:
1375 pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx);
1376 return ERR_PTR(-EINVAL);
1377 }
1378
1379 #ifdef CONFIG_PPC
1380 #include <asm/mpc85xx.h>
1381
1382 static const u32 a4510_svrs[] __initconst = {
1383 (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */
1384 (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */
1385 (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */
1386 (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */
1387 (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */
1388 (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */
1389 (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */
1390 (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */
1391 (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */
1392 (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */
1393 (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */
1394 (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */
1395 (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */
1396 };
1397
1398 #define SVR_SECURITY 0x80000 /* The Security (E) bit */
1399
has_erratum_a4510(void)1400 static bool __init has_erratum_a4510(void)
1401 {
1402 u32 svr = mfspr(SPRN_SVR);
1403 int i;
1404
1405 svr &= ~SVR_SECURITY;
1406
1407 for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) {
1408 if (svr == a4510_svrs[i])
1409 return true;
1410 }
1411
1412 return false;
1413 }
1414 #else
has_erratum_a4510(void)1415 static bool __init has_erratum_a4510(void)
1416 {
1417 return false;
1418 }
1419 #endif
1420
clockgen_init(struct device_node * np)1421 static void __init clockgen_init(struct device_node *np)
1422 {
1423 int i, ret;
1424 bool is_old_ls1021a = false;
1425
1426 /* May have already been called by a legacy probe */
1427 if (clockgen.node)
1428 return;
1429
1430 clockgen.node = np;
1431 clockgen.regs = of_iomap(np, 0);
1432 if (!clockgen.regs &&
1433 of_device_is_compatible(of_root, "fsl,ls1021a")) {
1434 /* Compatibility hack for old, broken device trees */
1435 clockgen.regs = ioremap(0x1ee1000, 0x1000);
1436 is_old_ls1021a = true;
1437 }
1438 if (!clockgen.regs) {
1439 pr_err("%s(): %pOFn: of_iomap() failed\n", __func__, np);
1440 return;
1441 }
1442
1443 for (i = 0; i < ARRAY_SIZE(chipinfo); i++) {
1444 if (of_device_is_compatible(np, chipinfo[i].compat))
1445 break;
1446 if (is_old_ls1021a &&
1447 !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen"))
1448 break;
1449 }
1450
1451 if (i == ARRAY_SIZE(chipinfo)) {
1452 pr_err("%s: unknown clockgen node %pOF\n", __func__, np);
1453 goto err;
1454 }
1455 clockgen.info = chipinfo[i];
1456
1457 if (clockgen.info.guts_compat) {
1458 struct device_node *guts;
1459
1460 guts = of_find_compatible_node(NULL, NULL,
1461 clockgen.info.guts_compat);
1462 if (guts) {
1463 clockgen.guts = of_iomap(guts, 0);
1464 if (!clockgen.guts) {
1465 pr_err("%s: Couldn't map %pOF regs\n", __func__,
1466 guts);
1467 }
1468 of_node_put(guts);
1469 }
1470
1471 }
1472
1473 if (has_erratum_a4510())
1474 clockgen.info.flags |= CG_CMUX_GE_PLAT;
1475
1476 clockgen.sysclk = create_sysclk("cg-sysclk");
1477 clockgen.coreclk = create_coreclk("cg-coreclk");
1478 create_plls(&clockgen);
1479 create_muxes(&clockgen);
1480
1481 if (clockgen.info.init_periph)
1482 clockgen.info.init_periph(&clockgen);
1483
1484 ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen);
1485 if (ret) {
1486 pr_err("%s: Couldn't register clk provider for node %pOFn: %d\n",
1487 __func__, np, ret);
1488 }
1489
1490 return;
1491 err:
1492 iounmap(clockgen.regs);
1493 clockgen.regs = NULL;
1494 }
1495
1496 CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init);
1497 CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init);
1498 CLK_OF_DECLARE(qoriq_clockgen_b4420, "fsl,b4420-clockgen", clockgen_init);
1499 CLK_OF_DECLARE(qoriq_clockgen_b4860, "fsl,b4860-clockgen", clockgen_init);
1500 CLK_OF_DECLARE(qoriq_clockgen_ls1012a, "fsl,ls1012a-clockgen", clockgen_init);
1501 CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init);
1502 CLK_OF_DECLARE(qoriq_clockgen_ls1028a, "fsl,ls1028a-clockgen", clockgen_init);
1503 CLK_OF_DECLARE(qoriq_clockgen_ls1043a, "fsl,ls1043a-clockgen", clockgen_init);
1504 CLK_OF_DECLARE(qoriq_clockgen_ls1046a, "fsl,ls1046a-clockgen", clockgen_init);
1505 CLK_OF_DECLARE(qoriq_clockgen_ls1088a, "fsl,ls1088a-clockgen", clockgen_init);
1506 CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init);
1507 CLK_OF_DECLARE(qoriq_clockgen_lx2160a, "fsl,lx2160a-clockgen", clockgen_init);
1508 CLK_OF_DECLARE(qoriq_clockgen_p2041, "fsl,p2041-clockgen", clockgen_init);
1509 CLK_OF_DECLARE(qoriq_clockgen_p3041, "fsl,p3041-clockgen", clockgen_init);
1510 CLK_OF_DECLARE(qoriq_clockgen_p4080, "fsl,p4080-clockgen", clockgen_init);
1511 CLK_OF_DECLARE(qoriq_clockgen_p5020, "fsl,p5020-clockgen", clockgen_init);
1512 CLK_OF_DECLARE(qoriq_clockgen_p5040, "fsl,p5040-clockgen", clockgen_init);
1513 CLK_OF_DECLARE(qoriq_clockgen_t1023, "fsl,t1023-clockgen", clockgen_init);
1514 CLK_OF_DECLARE(qoriq_clockgen_t1040, "fsl,t1040-clockgen", clockgen_init);
1515 CLK_OF_DECLARE(qoriq_clockgen_t2080, "fsl,t2080-clockgen", clockgen_init);
1516 CLK_OF_DECLARE(qoriq_clockgen_t4240, "fsl,t4240-clockgen", clockgen_init);
1517
1518 /* Legacy nodes */
1519 CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
1520 CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
1521 CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
1522 CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
1523 CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
1524 CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
1525 CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
1526 CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
1527