1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/regmap.h>
14 #include <linux/math64.h>
15 #include <linux/slab.h>
16
17 #include <asm/div64.h>
18
19 #include "clk-rcg.h"
20 #include "common.h"
21
22 #define CMD_REG 0x0
23 #define CMD_UPDATE BIT(0)
24 #define CMD_ROOT_EN BIT(1)
25 #define CMD_DIRTY_CFG BIT(4)
26 #define CMD_DIRTY_N BIT(5)
27 #define CMD_DIRTY_M BIT(6)
28 #define CMD_DIRTY_D BIT(7)
29 #define CMD_ROOT_OFF BIT(31)
30
31 #define CFG_REG 0x4
32 #define CFG_SRC_DIV_SHIFT 0
33 #define CFG_SRC_SEL_SHIFT 8
34 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
35 #define CFG_MODE_SHIFT 12
36 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
37 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
38 #define CFG_HW_CLK_CTRL_MASK BIT(20)
39
40 #define M_REG 0x8
41 #define N_REG 0xc
42 #define D_REG 0x10
43
44 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
45 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
46 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
47 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
48
49 /* Dynamic Frequency Scaling */
50 #define MAX_PERF_LEVEL 8
51 #define SE_CMD_DFSR_OFFSET 0x14
52 #define SE_CMD_DFS_EN BIT(0)
53 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
54 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
55 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
56
57 enum freq_policy {
58 FLOOR,
59 CEIL,
60 };
61
clk_rcg2_is_enabled(struct clk_hw * hw)62 static int clk_rcg2_is_enabled(struct clk_hw *hw)
63 {
64 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
65 u32 cmd;
66 int ret;
67
68 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
69 if (ret)
70 return ret;
71
72 return (cmd & CMD_ROOT_OFF) == 0;
73 }
74
clk_rcg2_get_parent(struct clk_hw * hw)75 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
76 {
77 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
78 int num_parents = clk_hw_get_num_parents(hw);
79 u32 cfg;
80 int i, ret;
81
82 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
83 if (ret)
84 goto err;
85
86 cfg &= CFG_SRC_SEL_MASK;
87 cfg >>= CFG_SRC_SEL_SHIFT;
88
89 for (i = 0; i < num_parents; i++)
90 if (cfg == rcg->parent_map[i].cfg)
91 return i;
92
93 err:
94 pr_debug("%s: Clock %s has invalid parent, using default.\n",
95 __func__, clk_hw_get_name(hw));
96 return 0;
97 }
98
update_config(struct clk_rcg2 * rcg)99 static int update_config(struct clk_rcg2 *rcg)
100 {
101 int count, ret;
102 u32 cmd;
103 struct clk_hw *hw = &rcg->clkr.hw;
104 const char *name = clk_hw_get_name(hw);
105
106 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
107 CMD_UPDATE, CMD_UPDATE);
108 if (ret)
109 return ret;
110
111 /* Wait for update to take effect */
112 for (count = 500; count > 0; count--) {
113 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
114 if (ret)
115 return ret;
116 if (!(cmd & CMD_UPDATE))
117 return 0;
118 udelay(1);
119 }
120
121 WARN(1, "%s: rcg didn't update its configuration.", name);
122 return -EBUSY;
123 }
124
clk_rcg2_set_parent(struct clk_hw * hw,u8 index)125 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
126 {
127 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
128 int ret;
129 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
130
131 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
132 CFG_SRC_SEL_MASK, cfg);
133 if (ret)
134 return ret;
135
136 return update_config(rcg);
137 }
138
139 /*
140 * Calculate m/n:d rate
141 *
142 * parent_rate m
143 * rate = ----------- x ---
144 * hid_div n
145 */
146 static unsigned long
calc_rate(unsigned long rate,u32 m,u32 n,u32 mode,u32 hid_div)147 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
148 {
149 if (hid_div) {
150 rate *= 2;
151 rate /= hid_div + 1;
152 }
153
154 if (mode) {
155 u64 tmp = rate;
156 tmp *= m;
157 do_div(tmp, n);
158 rate = tmp;
159 }
160
161 return rate;
162 }
163
164 static unsigned long
clk_rcg2_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)165 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
166 {
167 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
168 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
169
170 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
171
172 if (rcg->mnd_width) {
173 mask = BIT(rcg->mnd_width) - 1;
174 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
175 m &= mask;
176 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
177 n = ~n;
178 n &= mask;
179 n += m;
180 mode = cfg & CFG_MODE_MASK;
181 mode >>= CFG_MODE_SHIFT;
182 }
183
184 mask = BIT(rcg->hid_width) - 1;
185 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
186 hid_div &= mask;
187
188 return calc_rate(parent_rate, m, n, mode, hid_div);
189 }
190
_freq_tbl_determine_rate(struct clk_hw * hw,const struct freq_tbl * f,struct clk_rate_request * req,enum freq_policy policy)191 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
192 struct clk_rate_request *req,
193 enum freq_policy policy)
194 {
195 unsigned long clk_flags, rate = req->rate;
196 struct clk_hw *p;
197 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
198 int index;
199
200 switch (policy) {
201 case FLOOR:
202 f = qcom_find_freq_floor(f, rate);
203 break;
204 case CEIL:
205 f = qcom_find_freq(f, rate);
206 break;
207 default:
208 return -EINVAL;
209 };
210
211 if (!f)
212 return -EINVAL;
213
214 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
215 if (index < 0)
216 return index;
217
218 clk_flags = clk_hw_get_flags(hw);
219 p = clk_hw_get_parent_by_index(hw, index);
220 if (clk_flags & CLK_SET_RATE_PARENT) {
221 rate = f->freq;
222 if (f->pre_div) {
223 rate /= 2;
224 rate *= f->pre_div + 1;
225 }
226
227 if (f->n) {
228 u64 tmp = rate;
229 tmp = tmp * f->n;
230 do_div(tmp, f->m);
231 rate = tmp;
232 }
233 } else {
234 rate = clk_hw_get_rate(p);
235 }
236 req->best_parent_hw = p;
237 req->best_parent_rate = rate;
238 req->rate = f->freq;
239
240 return 0;
241 }
242
clk_rcg2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)243 static int clk_rcg2_determine_rate(struct clk_hw *hw,
244 struct clk_rate_request *req)
245 {
246 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
247
248 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
249 }
250
clk_rcg2_determine_floor_rate(struct clk_hw * hw,struct clk_rate_request * req)251 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
252 struct clk_rate_request *req)
253 {
254 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
255
256 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
257 }
258
__clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)259 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
260 {
261 u32 cfg, mask;
262 struct clk_hw *hw = &rcg->clkr.hw;
263 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
264
265 if (index < 0)
266 return index;
267
268 if (rcg->mnd_width && f->n) {
269 mask = BIT(rcg->mnd_width) - 1;
270 ret = regmap_update_bits(rcg->clkr.regmap,
271 RCG_M_OFFSET(rcg), mask, f->m);
272 if (ret)
273 return ret;
274
275 ret = regmap_update_bits(rcg->clkr.regmap,
276 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
277 if (ret)
278 return ret;
279
280 ret = regmap_update_bits(rcg->clkr.regmap,
281 RCG_D_OFFSET(rcg), mask, ~f->n);
282 if (ret)
283 return ret;
284 }
285
286 mask = BIT(rcg->hid_width) - 1;
287 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
288 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
289 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
290 if (rcg->mnd_width && f->n && (f->m != f->n))
291 cfg |= CFG_MODE_DUAL_EDGE;
292 return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
293 mask, cfg);
294 }
295
clk_rcg2_configure(struct clk_rcg2 * rcg,const struct freq_tbl * f)296 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
297 {
298 int ret;
299
300 ret = __clk_rcg2_configure(rcg, f);
301 if (ret)
302 return ret;
303
304 return update_config(rcg);
305 }
306
__clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,enum freq_policy policy)307 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
308 enum freq_policy policy)
309 {
310 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
311 const struct freq_tbl *f;
312
313 switch (policy) {
314 case FLOOR:
315 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
316 break;
317 case CEIL:
318 f = qcom_find_freq(rcg->freq_tbl, rate);
319 break;
320 default:
321 return -EINVAL;
322 };
323
324 if (!f)
325 return -EINVAL;
326
327 return clk_rcg2_configure(rcg, f);
328 }
329
clk_rcg2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)330 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
331 unsigned long parent_rate)
332 {
333 return __clk_rcg2_set_rate(hw, rate, CEIL);
334 }
335
clk_rcg2_set_floor_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)336 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
337 unsigned long parent_rate)
338 {
339 return __clk_rcg2_set_rate(hw, rate, FLOOR);
340 }
341
clk_rcg2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)342 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
343 unsigned long rate, unsigned long parent_rate, u8 index)
344 {
345 return __clk_rcg2_set_rate(hw, rate, CEIL);
346 }
347
clk_rcg2_set_floor_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)348 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
349 unsigned long rate, unsigned long parent_rate, u8 index)
350 {
351 return __clk_rcg2_set_rate(hw, rate, FLOOR);
352 }
353
354 const struct clk_ops clk_rcg2_ops = {
355 .is_enabled = clk_rcg2_is_enabled,
356 .get_parent = clk_rcg2_get_parent,
357 .set_parent = clk_rcg2_set_parent,
358 .recalc_rate = clk_rcg2_recalc_rate,
359 .determine_rate = clk_rcg2_determine_rate,
360 .set_rate = clk_rcg2_set_rate,
361 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
362 };
363 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
364
365 const struct clk_ops clk_rcg2_floor_ops = {
366 .is_enabled = clk_rcg2_is_enabled,
367 .get_parent = clk_rcg2_get_parent,
368 .set_parent = clk_rcg2_set_parent,
369 .recalc_rate = clk_rcg2_recalc_rate,
370 .determine_rate = clk_rcg2_determine_floor_rate,
371 .set_rate = clk_rcg2_set_floor_rate,
372 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
373 };
374 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
375
376 struct frac_entry {
377 int num;
378 int den;
379 };
380
381 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
382 { 52, 295 }, /* 119 M */
383 { 11, 57 }, /* 130.25 M */
384 { 63, 307 }, /* 138.50 M */
385 { 11, 50 }, /* 148.50 M */
386 { 47, 206 }, /* 154 M */
387 { 31, 100 }, /* 205.25 M */
388 { 107, 269 }, /* 268.50 M */
389 { },
390 };
391
392 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
393 { 31, 211 }, /* 119 M */
394 { 32, 199 }, /* 130.25 M */
395 { 63, 307 }, /* 138.50 M */
396 { 11, 60 }, /* 148.50 M */
397 { 50, 263 }, /* 154 M */
398 { 31, 120 }, /* 205.25 M */
399 { 119, 359 }, /* 268.50 M */
400 { },
401 };
402
clk_edp_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)403 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
404 unsigned long parent_rate)
405 {
406 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
407 struct freq_tbl f = *rcg->freq_tbl;
408 const struct frac_entry *frac;
409 int delta = 100000;
410 s64 src_rate = parent_rate;
411 s64 request;
412 u32 mask = BIT(rcg->hid_width) - 1;
413 u32 hid_div;
414
415 if (src_rate == 810000000)
416 frac = frac_table_810m;
417 else
418 frac = frac_table_675m;
419
420 for (; frac->num; frac++) {
421 request = rate;
422 request *= frac->den;
423 request = div_s64(request, frac->num);
424 if ((src_rate < (request - delta)) ||
425 (src_rate > (request + delta)))
426 continue;
427
428 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
429 &hid_div);
430 f.pre_div = hid_div;
431 f.pre_div >>= CFG_SRC_DIV_SHIFT;
432 f.pre_div &= mask;
433 f.m = frac->num;
434 f.n = frac->den;
435
436 return clk_rcg2_configure(rcg, &f);
437 }
438
439 return -EINVAL;
440 }
441
clk_edp_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)442 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
443 unsigned long rate, unsigned long parent_rate, u8 index)
444 {
445 /* Parent index is set statically in frequency table */
446 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
447 }
448
clk_edp_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)449 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
450 struct clk_rate_request *req)
451 {
452 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
453 const struct freq_tbl *f = rcg->freq_tbl;
454 const struct frac_entry *frac;
455 int delta = 100000;
456 s64 request;
457 u32 mask = BIT(rcg->hid_width) - 1;
458 u32 hid_div;
459 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
460
461 /* Force the correct parent */
462 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
463 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
464
465 if (req->best_parent_rate == 810000000)
466 frac = frac_table_810m;
467 else
468 frac = frac_table_675m;
469
470 for (; frac->num; frac++) {
471 request = req->rate;
472 request *= frac->den;
473 request = div_s64(request, frac->num);
474 if ((req->best_parent_rate < (request - delta)) ||
475 (req->best_parent_rate > (request + delta)))
476 continue;
477
478 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
479 &hid_div);
480 hid_div >>= CFG_SRC_DIV_SHIFT;
481 hid_div &= mask;
482
483 req->rate = calc_rate(req->best_parent_rate,
484 frac->num, frac->den,
485 !!frac->den, hid_div);
486 return 0;
487 }
488
489 return -EINVAL;
490 }
491
492 const struct clk_ops clk_edp_pixel_ops = {
493 .is_enabled = clk_rcg2_is_enabled,
494 .get_parent = clk_rcg2_get_parent,
495 .set_parent = clk_rcg2_set_parent,
496 .recalc_rate = clk_rcg2_recalc_rate,
497 .set_rate = clk_edp_pixel_set_rate,
498 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
499 .determine_rate = clk_edp_pixel_determine_rate,
500 };
501 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
502
clk_byte_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)503 static int clk_byte_determine_rate(struct clk_hw *hw,
504 struct clk_rate_request *req)
505 {
506 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
507 const struct freq_tbl *f = rcg->freq_tbl;
508 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
509 unsigned long parent_rate, div;
510 u32 mask = BIT(rcg->hid_width) - 1;
511 struct clk_hw *p;
512
513 if (req->rate == 0)
514 return -EINVAL;
515
516 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
517 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
518
519 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
520 div = min_t(u32, div, mask);
521
522 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
523
524 return 0;
525 }
526
clk_byte_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)527 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
528 unsigned long parent_rate)
529 {
530 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
531 struct freq_tbl f = *rcg->freq_tbl;
532 unsigned long div;
533 u32 mask = BIT(rcg->hid_width) - 1;
534
535 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
536 div = min_t(u32, div, mask);
537
538 f.pre_div = div;
539
540 return clk_rcg2_configure(rcg, &f);
541 }
542
clk_byte_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)543 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
544 unsigned long rate, unsigned long parent_rate, u8 index)
545 {
546 /* Parent index is set statically in frequency table */
547 return clk_byte_set_rate(hw, rate, parent_rate);
548 }
549
550 const struct clk_ops clk_byte_ops = {
551 .is_enabled = clk_rcg2_is_enabled,
552 .get_parent = clk_rcg2_get_parent,
553 .set_parent = clk_rcg2_set_parent,
554 .recalc_rate = clk_rcg2_recalc_rate,
555 .set_rate = clk_byte_set_rate,
556 .set_rate_and_parent = clk_byte_set_rate_and_parent,
557 .determine_rate = clk_byte_determine_rate,
558 };
559 EXPORT_SYMBOL_GPL(clk_byte_ops);
560
clk_byte2_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)561 static int clk_byte2_determine_rate(struct clk_hw *hw,
562 struct clk_rate_request *req)
563 {
564 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
565 unsigned long parent_rate, div;
566 u32 mask = BIT(rcg->hid_width) - 1;
567 struct clk_hw *p;
568 unsigned long rate = req->rate;
569
570 if (rate == 0)
571 return -EINVAL;
572
573 p = req->best_parent_hw;
574 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
575
576 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
577 div = min_t(u32, div, mask);
578
579 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
580
581 return 0;
582 }
583
clk_byte2_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)584 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
585 unsigned long parent_rate)
586 {
587 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
588 struct freq_tbl f = { 0 };
589 unsigned long div;
590 int i, num_parents = clk_hw_get_num_parents(hw);
591 u32 mask = BIT(rcg->hid_width) - 1;
592 u32 cfg;
593
594 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
595 div = min_t(u32, div, mask);
596
597 f.pre_div = div;
598
599 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
600 cfg &= CFG_SRC_SEL_MASK;
601 cfg >>= CFG_SRC_SEL_SHIFT;
602
603 for (i = 0; i < num_parents; i++) {
604 if (cfg == rcg->parent_map[i].cfg) {
605 f.src = rcg->parent_map[i].src;
606 return clk_rcg2_configure(rcg, &f);
607 }
608 }
609
610 return -EINVAL;
611 }
612
clk_byte2_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)613 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
614 unsigned long rate, unsigned long parent_rate, u8 index)
615 {
616 /* Read the hardware to determine parent during set_rate */
617 return clk_byte2_set_rate(hw, rate, parent_rate);
618 }
619
620 const struct clk_ops clk_byte2_ops = {
621 .is_enabled = clk_rcg2_is_enabled,
622 .get_parent = clk_rcg2_get_parent,
623 .set_parent = clk_rcg2_set_parent,
624 .recalc_rate = clk_rcg2_recalc_rate,
625 .set_rate = clk_byte2_set_rate,
626 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
627 .determine_rate = clk_byte2_determine_rate,
628 };
629 EXPORT_SYMBOL_GPL(clk_byte2_ops);
630
631 static const struct frac_entry frac_table_pixel[] = {
632 { 3, 8 },
633 { 2, 9 },
634 { 4, 9 },
635 { 1, 1 },
636 { }
637 };
638
clk_pixel_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)639 static int clk_pixel_determine_rate(struct clk_hw *hw,
640 struct clk_rate_request *req)
641 {
642 unsigned long request, src_rate;
643 int delta = 100000;
644 const struct frac_entry *frac = frac_table_pixel;
645
646 for (; frac->num; frac++) {
647 request = (req->rate * frac->den) / frac->num;
648
649 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
650 if ((src_rate < (request - delta)) ||
651 (src_rate > (request + delta)))
652 continue;
653
654 req->best_parent_rate = src_rate;
655 req->rate = (src_rate * frac->num) / frac->den;
656 return 0;
657 }
658
659 return -EINVAL;
660 }
661
clk_pixel_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)662 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
663 unsigned long parent_rate)
664 {
665 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
666 struct freq_tbl f = { 0 };
667 const struct frac_entry *frac = frac_table_pixel;
668 unsigned long request;
669 int delta = 100000;
670 u32 mask = BIT(rcg->hid_width) - 1;
671 u32 hid_div, cfg;
672 int i, num_parents = clk_hw_get_num_parents(hw);
673
674 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
675 cfg &= CFG_SRC_SEL_MASK;
676 cfg >>= CFG_SRC_SEL_SHIFT;
677
678 for (i = 0; i < num_parents; i++)
679 if (cfg == rcg->parent_map[i].cfg) {
680 f.src = rcg->parent_map[i].src;
681 break;
682 }
683
684 for (; frac->num; frac++) {
685 request = (rate * frac->den) / frac->num;
686
687 if ((parent_rate < (request - delta)) ||
688 (parent_rate > (request + delta)))
689 continue;
690
691 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
692 &hid_div);
693 f.pre_div = hid_div;
694 f.pre_div >>= CFG_SRC_DIV_SHIFT;
695 f.pre_div &= mask;
696 f.m = frac->num;
697 f.n = frac->den;
698
699 return clk_rcg2_configure(rcg, &f);
700 }
701 return -EINVAL;
702 }
703
clk_pixel_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)704 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
705 unsigned long parent_rate, u8 index)
706 {
707 return clk_pixel_set_rate(hw, rate, parent_rate);
708 }
709
710 const struct clk_ops clk_pixel_ops = {
711 .is_enabled = clk_rcg2_is_enabled,
712 .get_parent = clk_rcg2_get_parent,
713 .set_parent = clk_rcg2_set_parent,
714 .recalc_rate = clk_rcg2_recalc_rate,
715 .set_rate = clk_pixel_set_rate,
716 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
717 .determine_rate = clk_pixel_determine_rate,
718 };
719 EXPORT_SYMBOL_GPL(clk_pixel_ops);
720
clk_gfx3d_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)721 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
722 struct clk_rate_request *req)
723 {
724 struct clk_rate_request parent_req = { };
725 struct clk_hw *p2, *p8, *p9, *xo;
726 unsigned long p9_rate;
727 int ret;
728
729 xo = clk_hw_get_parent_by_index(hw, 0);
730 if (req->rate == clk_hw_get_rate(xo)) {
731 req->best_parent_hw = xo;
732 return 0;
733 }
734
735 p9 = clk_hw_get_parent_by_index(hw, 2);
736 p2 = clk_hw_get_parent_by_index(hw, 3);
737 p8 = clk_hw_get_parent_by_index(hw, 4);
738
739 /* PLL9 is a fixed rate PLL */
740 p9_rate = clk_hw_get_rate(p9);
741
742 parent_req.rate = req->rate = min(req->rate, p9_rate);
743 if (req->rate == p9_rate) {
744 req->rate = req->best_parent_rate = p9_rate;
745 req->best_parent_hw = p9;
746 return 0;
747 }
748
749 if (req->best_parent_hw == p9) {
750 /* Are we going back to a previously used rate? */
751 if (clk_hw_get_rate(p8) == req->rate)
752 req->best_parent_hw = p8;
753 else
754 req->best_parent_hw = p2;
755 } else if (req->best_parent_hw == p8) {
756 req->best_parent_hw = p2;
757 } else {
758 req->best_parent_hw = p8;
759 }
760
761 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
762 if (ret)
763 return ret;
764
765 req->rate = req->best_parent_rate = parent_req.rate;
766
767 return 0;
768 }
769
clk_gfx3d_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)770 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
771 unsigned long parent_rate, u8 index)
772 {
773 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
774 u32 cfg;
775 int ret;
776
777 /* Just mux it, we don't use the division or m/n hardware */
778 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
779 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
780 if (ret)
781 return ret;
782
783 return update_config(rcg);
784 }
785
clk_gfx3d_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)786 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
787 unsigned long parent_rate)
788 {
789 /*
790 * We should never get here; clk_gfx3d_determine_rate() should always
791 * make us use a different parent than what we're currently using, so
792 * clk_gfx3d_set_rate_and_parent() should always be called.
793 */
794 return 0;
795 }
796
797 const struct clk_ops clk_gfx3d_ops = {
798 .is_enabled = clk_rcg2_is_enabled,
799 .get_parent = clk_rcg2_get_parent,
800 .set_parent = clk_rcg2_set_parent,
801 .recalc_rate = clk_rcg2_recalc_rate,
802 .set_rate = clk_gfx3d_set_rate,
803 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
804 .determine_rate = clk_gfx3d_determine_rate,
805 };
806 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
807
clk_rcg2_set_force_enable(struct clk_hw * hw)808 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
809 {
810 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
811 const char *name = clk_hw_get_name(hw);
812 int ret, count;
813
814 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
815 CMD_ROOT_EN, CMD_ROOT_EN);
816 if (ret)
817 return ret;
818
819 /* wait for RCG to turn ON */
820 for (count = 500; count > 0; count--) {
821 if (clk_rcg2_is_enabled(hw))
822 return 0;
823
824 udelay(1);
825 }
826
827 pr_err("%s: RCG did not turn on\n", name);
828 return -ETIMEDOUT;
829 }
830
clk_rcg2_clear_force_enable(struct clk_hw * hw)831 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
832 {
833 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
834
835 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
836 CMD_ROOT_EN, 0);
837 }
838
839 static int
clk_rcg2_shared_force_enable_clear(struct clk_hw * hw,const struct freq_tbl * f)840 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
841 {
842 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
843 int ret;
844
845 ret = clk_rcg2_set_force_enable(hw);
846 if (ret)
847 return ret;
848
849 ret = clk_rcg2_configure(rcg, f);
850 if (ret)
851 return ret;
852
853 return clk_rcg2_clear_force_enable(hw);
854 }
855
clk_rcg2_shared_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)856 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
857 unsigned long parent_rate)
858 {
859 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
860 const struct freq_tbl *f;
861
862 f = qcom_find_freq(rcg->freq_tbl, rate);
863 if (!f)
864 return -EINVAL;
865
866 /*
867 * In case clock is disabled, update the CFG, M, N and D registers
868 * and don't hit the update bit of CMD register.
869 */
870 if (!__clk_is_enabled(hw->clk))
871 return __clk_rcg2_configure(rcg, f);
872
873 return clk_rcg2_shared_force_enable_clear(hw, f);
874 }
875
clk_rcg2_shared_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)876 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
877 unsigned long rate, unsigned long parent_rate, u8 index)
878 {
879 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
880 }
881
clk_rcg2_shared_enable(struct clk_hw * hw)882 static int clk_rcg2_shared_enable(struct clk_hw *hw)
883 {
884 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
885 int ret;
886
887 /*
888 * Set the update bit because required configuration has already
889 * been written in clk_rcg2_shared_set_rate()
890 */
891 ret = clk_rcg2_set_force_enable(hw);
892 if (ret)
893 return ret;
894
895 ret = update_config(rcg);
896 if (ret)
897 return ret;
898
899 return clk_rcg2_clear_force_enable(hw);
900 }
901
clk_rcg2_shared_disable(struct clk_hw * hw)902 static void clk_rcg2_shared_disable(struct clk_hw *hw)
903 {
904 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
905 u32 cfg;
906
907 /*
908 * Store current configuration as switching to safe source would clear
909 * the SRC and DIV of CFG register
910 */
911 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
912
913 /*
914 * Park the RCG at a safe configuration - sourced off of safe source.
915 * Force enable and disable the RCG while configuring it to safeguard
916 * against any update signal coming from the downstream clock.
917 * The current parent is still prepared and enabled at this point, and
918 * the safe source is always on while application processor subsystem
919 * is online. Therefore, the RCG can safely switch its parent.
920 */
921 clk_rcg2_set_force_enable(hw);
922
923 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
924 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
925
926 update_config(rcg);
927
928 clk_rcg2_clear_force_enable(hw);
929
930 /* Write back the stored configuration corresponding to current rate */
931 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
932 }
933
934 const struct clk_ops clk_rcg2_shared_ops = {
935 .enable = clk_rcg2_shared_enable,
936 .disable = clk_rcg2_shared_disable,
937 .get_parent = clk_rcg2_get_parent,
938 .set_parent = clk_rcg2_set_parent,
939 .recalc_rate = clk_rcg2_recalc_rate,
940 .determine_rate = clk_rcg2_determine_rate,
941 .set_rate = clk_rcg2_shared_set_rate,
942 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
943 };
944 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
945
946 /* Common APIs to be used for DFS based RCGR */
clk_rcg2_dfs_populate_freq(struct clk_hw * hw,unsigned int l,struct freq_tbl * f)947 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
948 struct freq_tbl *f)
949 {
950 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
951 struct clk_hw *p;
952 unsigned long prate = 0;
953 u32 val, mask, cfg, mode;
954 int i, num_parents;
955
956 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
957
958 mask = BIT(rcg->hid_width) - 1;
959 f->pre_div = 1;
960 if (cfg & mask)
961 f->pre_div = cfg & mask;
962
963 cfg &= CFG_SRC_SEL_MASK;
964 cfg >>= CFG_SRC_SEL_SHIFT;
965
966 num_parents = clk_hw_get_num_parents(hw);
967 for (i = 0; i < num_parents; i++) {
968 if (cfg == rcg->parent_map[i].cfg) {
969 f->src = rcg->parent_map[i].src;
970 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
971 prate = clk_hw_get_rate(p);
972 }
973 }
974
975 mode = cfg & CFG_MODE_MASK;
976 mode >>= CFG_MODE_SHIFT;
977 if (mode) {
978 mask = BIT(rcg->mnd_width) - 1;
979 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
980 &val);
981 val &= mask;
982 f->m = val;
983
984 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
985 &val);
986 val = ~val;
987 val &= mask;
988 val += f->m;
989 f->n = val;
990 }
991
992 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
993 }
994
clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 * rcg)995 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
996 {
997 struct freq_tbl *freq_tbl;
998 int i;
999
1000 /* Allocate space for 1 extra since table is NULL terminated */
1001 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1002 if (!freq_tbl)
1003 return -ENOMEM;
1004 rcg->freq_tbl = freq_tbl;
1005
1006 for (i = 0; i < MAX_PERF_LEVEL; i++)
1007 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1008
1009 return 0;
1010 }
1011
clk_rcg2_dfs_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1012 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1013 struct clk_rate_request *req)
1014 {
1015 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1016 int ret;
1017
1018 if (!rcg->freq_tbl) {
1019 ret = clk_rcg2_dfs_populate_freq_table(rcg);
1020 if (ret) {
1021 pr_err("Failed to update DFS tables for %s\n",
1022 clk_hw_get_name(hw));
1023 return ret;
1024 }
1025 }
1026
1027 return clk_rcg2_determine_rate(hw, req);
1028 }
1029
1030 static unsigned long
clk_rcg2_dfs_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)1031 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1032 {
1033 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1034 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1035
1036 regmap_read(rcg->clkr.regmap,
1037 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1038 level &= GENMASK(4, 1);
1039 level >>= 1;
1040
1041 if (rcg->freq_tbl)
1042 return rcg->freq_tbl[level].freq;
1043
1044 /*
1045 * Assume that parent_rate is actually the parent because
1046 * we can't do any better at figuring it out when the table
1047 * hasn't been populated yet. We only populate the table
1048 * in determine_rate because we can't guarantee the parents
1049 * will be registered with the framework until then.
1050 */
1051 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1052 &cfg);
1053
1054 mask = BIT(rcg->hid_width) - 1;
1055 pre_div = 1;
1056 if (cfg & mask)
1057 pre_div = cfg & mask;
1058
1059 mode = cfg & CFG_MODE_MASK;
1060 mode >>= CFG_MODE_SHIFT;
1061 if (mode) {
1062 mask = BIT(rcg->mnd_width) - 1;
1063 regmap_read(rcg->clkr.regmap,
1064 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1065 m &= mask;
1066
1067 regmap_read(rcg->clkr.regmap,
1068 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1069 n = ~n;
1070 n &= mask;
1071 n += m;
1072 }
1073
1074 return calc_rate(parent_rate, m, n, mode, pre_div);
1075 }
1076
1077 static const struct clk_ops clk_rcg2_dfs_ops = {
1078 .is_enabled = clk_rcg2_is_enabled,
1079 .get_parent = clk_rcg2_get_parent,
1080 .determine_rate = clk_rcg2_dfs_determine_rate,
1081 .recalc_rate = clk_rcg2_dfs_recalc_rate,
1082 };
1083
clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data * data,struct regmap * regmap)1084 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1085 struct regmap *regmap)
1086 {
1087 struct clk_rcg2 *rcg = data->rcg;
1088 struct clk_init_data *init = data->init;
1089 u32 val;
1090 int ret;
1091
1092 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1093 if (ret)
1094 return -EINVAL;
1095
1096 if (!(val & SE_CMD_DFS_EN))
1097 return 0;
1098
1099 /*
1100 * Rate changes with consumer writing a register in
1101 * their own I/O region
1102 */
1103 init->flags |= CLK_GET_RATE_NOCACHE;
1104 init->ops = &clk_rcg2_dfs_ops;
1105
1106 rcg->freq_tbl = NULL;
1107
1108 return 0;
1109 }
1110
qcom_cc_register_rcg_dfs(struct regmap * regmap,const struct clk_rcg_dfs_data * rcgs,size_t len)1111 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1112 const struct clk_rcg_dfs_data *rcgs, size_t len)
1113 {
1114 int i, ret;
1115
1116 for (i = 0; i < len; i++) {
1117 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1118 if (ret)
1119 return ret;
1120 }
1121
1122 return 0;
1123 }
1124 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);
1125