1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * clk-flexgen.c
4  *
5  * Copyright (C) ST-Microelectronics SA 2013
6  * Author:  Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics.
7  */
8 
9 #include <linux/clk.h>
10 #include <linux/clk-provider.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/io.h>
14 #include <linux/err.h>
15 #include <linux/string.h>
16 #include <linux/of.h>
17 #include <linux/of_address.h>
18 
19 struct clkgen_data {
20 	unsigned long flags;
21 	bool mode;
22 };
23 
24 struct flexgen {
25 	struct clk_hw hw;
26 
27 	/* Crossbar */
28 	struct clk_mux mux;
29 	/* Pre-divisor's gate */
30 	struct clk_gate pgate;
31 	/* Pre-divisor */
32 	struct clk_divider pdiv;
33 	/* Final divisor's gate */
34 	struct clk_gate fgate;
35 	/* Final divisor */
36 	struct clk_divider fdiv;
37 	/* Asynchronous mode control */
38 	struct clk_gate sync;
39 	/* hw control flags */
40 	bool control_mode;
41 };
42 
43 #define to_flexgen(_hw) container_of(_hw, struct flexgen, hw)
44 #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
45 
flexgen_enable(struct clk_hw * hw)46 static int flexgen_enable(struct clk_hw *hw)
47 {
48 	struct flexgen *flexgen = to_flexgen(hw);
49 	struct clk_hw *pgate_hw = &flexgen->pgate.hw;
50 	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
51 
52 	__clk_hw_set_clk(pgate_hw, hw);
53 	__clk_hw_set_clk(fgate_hw, hw);
54 
55 	clk_gate_ops.enable(pgate_hw);
56 
57 	clk_gate_ops.enable(fgate_hw);
58 
59 	pr_debug("%s: flexgen output enabled\n", clk_hw_get_name(hw));
60 	return 0;
61 }
62 
flexgen_disable(struct clk_hw * hw)63 static void flexgen_disable(struct clk_hw *hw)
64 {
65 	struct flexgen *flexgen = to_flexgen(hw);
66 	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
67 
68 	/* disable only the final gate */
69 	__clk_hw_set_clk(fgate_hw, hw);
70 
71 	clk_gate_ops.disable(fgate_hw);
72 
73 	pr_debug("%s: flexgen output disabled\n", clk_hw_get_name(hw));
74 }
75 
flexgen_is_enabled(struct clk_hw * hw)76 static int flexgen_is_enabled(struct clk_hw *hw)
77 {
78 	struct flexgen *flexgen = to_flexgen(hw);
79 	struct clk_hw *fgate_hw = &flexgen->fgate.hw;
80 
81 	__clk_hw_set_clk(fgate_hw, hw);
82 
83 	if (!clk_gate_ops.is_enabled(fgate_hw))
84 		return 0;
85 
86 	return 1;
87 }
88 
flexgen_get_parent(struct clk_hw * hw)89 static u8 flexgen_get_parent(struct clk_hw *hw)
90 {
91 	struct flexgen *flexgen = to_flexgen(hw);
92 	struct clk_hw *mux_hw = &flexgen->mux.hw;
93 
94 	__clk_hw_set_clk(mux_hw, hw);
95 
96 	return clk_mux_ops.get_parent(mux_hw);
97 }
98 
flexgen_set_parent(struct clk_hw * hw,u8 index)99 static int flexgen_set_parent(struct clk_hw *hw, u8 index)
100 {
101 	struct flexgen *flexgen = to_flexgen(hw);
102 	struct clk_hw *mux_hw = &flexgen->mux.hw;
103 
104 	__clk_hw_set_clk(mux_hw, hw);
105 
106 	return clk_mux_ops.set_parent(mux_hw, index);
107 }
108 
109 static inline unsigned long
clk_best_div(unsigned long parent_rate,unsigned long rate)110 clk_best_div(unsigned long parent_rate, unsigned long rate)
111 {
112 	return parent_rate / rate + ((rate > (2*(parent_rate % rate))) ? 0 : 1);
113 }
114 
flexgen_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * prate)115 static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate,
116 				   unsigned long *prate)
117 {
118 	unsigned long div;
119 
120 	/* Round div according to exact prate and wished rate */
121 	div = clk_best_div(*prate, rate);
122 
123 	if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT) {
124 		*prate = rate * div;
125 		return rate;
126 	}
127 
128 	return *prate / div;
129 }
130 
flexgen_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)131 static unsigned long flexgen_recalc_rate(struct clk_hw *hw,
132 		unsigned long parent_rate)
133 {
134 	struct flexgen *flexgen = to_flexgen(hw);
135 	struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
136 	struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
137 	unsigned long mid_rate;
138 
139 	__clk_hw_set_clk(pdiv_hw, hw);
140 	__clk_hw_set_clk(fdiv_hw, hw);
141 
142 	mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
143 
144 	return clk_divider_ops.recalc_rate(fdiv_hw, mid_rate);
145 }
146 
flexgen_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)147 static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
148 				unsigned long parent_rate)
149 {
150 	struct flexgen *flexgen = to_flexgen(hw);
151 	struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
152 	struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
153 	struct clk_hw *sync_hw = &flexgen->sync.hw;
154 	struct clk_gate *config = to_clk_gate(sync_hw);
155 	unsigned long div = 0;
156 	int ret = 0;
157 	u32 reg;
158 
159 	__clk_hw_set_clk(pdiv_hw, hw);
160 	__clk_hw_set_clk(fdiv_hw, hw);
161 
162 	if (flexgen->control_mode) {
163 		reg = readl(config->reg);
164 		reg &= ~BIT(config->bit_idx);
165 		writel(reg, config->reg);
166 	}
167 
168 	div = clk_best_div(parent_rate, rate);
169 
170 	/*
171 	* pdiv is mainly targeted for low freq results, while fdiv
172 	* should be used for div <= 64. The other way round can
173 	* lead to 'duty cycle' issues.
174 	*/
175 
176 	if (div <= 64) {
177 		clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
178 		ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
179 	} else {
180 		clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
181 		ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
182 	}
183 
184 	return ret;
185 }
186 
187 static const struct clk_ops flexgen_ops = {
188 	.enable = flexgen_enable,
189 	.disable = flexgen_disable,
190 	.is_enabled = flexgen_is_enabled,
191 	.get_parent = flexgen_get_parent,
192 	.set_parent = flexgen_set_parent,
193 	.round_rate = flexgen_round_rate,
194 	.recalc_rate = flexgen_recalc_rate,
195 	.set_rate = flexgen_set_rate,
196 };
197 
clk_register_flexgen(const char * name,const char ** parent_names,u8 num_parents,void __iomem * reg,spinlock_t * lock,u32 idx,unsigned long flexgen_flags,bool mode)198 static struct clk *clk_register_flexgen(const char *name,
199 				const char **parent_names, u8 num_parents,
200 				void __iomem *reg, spinlock_t *lock, u32 idx,
201 				unsigned long flexgen_flags, bool mode) {
202 	struct flexgen *fgxbar;
203 	struct clk *clk;
204 	struct clk_init_data init;
205 	u32  xbar_shift;
206 	void __iomem *xbar_reg, *fdiv_reg;
207 
208 	fgxbar = kzalloc(sizeof(struct flexgen), GFP_KERNEL);
209 	if (!fgxbar)
210 		return ERR_PTR(-ENOMEM);
211 
212 	init.name = name;
213 	init.ops = &flexgen_ops;
214 	init.flags = CLK_GET_RATE_NOCACHE | flexgen_flags;
215 	init.parent_names = parent_names;
216 	init.num_parents = num_parents;
217 
218 	xbar_reg = reg + 0x18 + (idx & ~0x3);
219 	xbar_shift = (idx % 4) * 0x8;
220 	fdiv_reg = reg + 0x164 + idx * 4;
221 
222 	/* Crossbar element config */
223 	fgxbar->mux.lock = lock;
224 	fgxbar->mux.mask = BIT(6) - 1;
225 	fgxbar->mux.reg = xbar_reg;
226 	fgxbar->mux.shift = xbar_shift;
227 	fgxbar->mux.table = NULL;
228 
229 
230 	/* Pre-divider's gate config (in xbar register)*/
231 	fgxbar->pgate.lock = lock;
232 	fgxbar->pgate.reg = xbar_reg;
233 	fgxbar->pgate.bit_idx = xbar_shift + 6;
234 
235 	/* Pre-divider config */
236 	fgxbar->pdiv.lock = lock;
237 	fgxbar->pdiv.reg = reg + 0x58 + idx * 4;
238 	fgxbar->pdiv.width = 10;
239 
240 	/* Final divider's gate config */
241 	fgxbar->fgate.lock = lock;
242 	fgxbar->fgate.reg = fdiv_reg;
243 	fgxbar->fgate.bit_idx = 6;
244 
245 	/* Final divider config */
246 	fgxbar->fdiv.lock = lock;
247 	fgxbar->fdiv.reg = fdiv_reg;
248 	fgxbar->fdiv.width = 6;
249 
250 	/* Final divider sync config */
251 	fgxbar->sync.lock = lock;
252 	fgxbar->sync.reg = fdiv_reg;
253 	fgxbar->sync.bit_idx = 7;
254 
255 	fgxbar->control_mode = mode;
256 
257 	fgxbar->hw.init = &init;
258 
259 	clk = clk_register(NULL, &fgxbar->hw);
260 	if (IS_ERR(clk))
261 		kfree(fgxbar);
262 	else
263 		pr_debug("%s: parent %s rate %u\n",
264 			__clk_get_name(clk),
265 			__clk_get_name(clk_get_parent(clk)),
266 			(unsigned int)clk_get_rate(clk));
267 	return clk;
268 }
269 
flexgen_get_parents(struct device_node * np,int * num_parents)270 static const char ** __init flexgen_get_parents(struct device_node *np,
271 						       int *num_parents)
272 {
273 	const char **parents;
274 	unsigned int nparents;
275 
276 	nparents = of_clk_get_parent_count(np);
277 	if (WARN_ON(!nparents))
278 		return NULL;
279 
280 	parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
281 	if (!parents)
282 		return NULL;
283 
284 	*num_parents = of_clk_parent_fill(np, parents, nparents);
285 
286 	return parents;
287 }
288 
289 static const struct clkgen_data clkgen_audio = {
290 	.flags = CLK_SET_RATE_PARENT,
291 };
292 
293 static const struct clkgen_data clkgen_video = {
294 	.flags = CLK_SET_RATE_PARENT,
295 	.mode = 1,
296 };
297 
298 static const struct of_device_id flexgen_of_match[] = {
299 	{
300 		.compatible = "st,flexgen-audio",
301 		.data = &clkgen_audio,
302 	},
303 	{
304 		.compatible = "st,flexgen-video",
305 		.data = &clkgen_video,
306 	},
307 	{}
308 };
309 
st_of_flexgen_setup(struct device_node * np)310 static void __init st_of_flexgen_setup(struct device_node *np)
311 {
312 	struct device_node *pnode;
313 	void __iomem *reg;
314 	struct clk_onecell_data *clk_data;
315 	const char **parents;
316 	int num_parents, i;
317 	spinlock_t *rlock = NULL;
318 	const struct of_device_id *match;
319 	struct clkgen_data *data = NULL;
320 	unsigned long flex_flags = 0;
321 	int ret;
322 	bool clk_mode = 0;
323 
324 	pnode = of_get_parent(np);
325 	if (!pnode)
326 		return;
327 
328 	reg = of_iomap(pnode, 0);
329 	of_node_put(pnode);
330 	if (!reg)
331 		return;
332 
333 	parents = flexgen_get_parents(np, &num_parents);
334 	if (!parents) {
335 		iounmap(reg);
336 		return;
337 	}
338 
339 	match = of_match_node(flexgen_of_match, np);
340 	if (match) {
341 		data = (struct clkgen_data *)match->data;
342 		flex_flags = data->flags;
343 		clk_mode = data->mode;
344 	}
345 
346 	clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
347 	if (!clk_data)
348 		goto err;
349 
350 	ret = of_property_count_strings(np, "clock-output-names");
351 	if (ret <= 0) {
352 		pr_err("%s: Failed to get number of output clocks (%d)",
353 				__func__, clk_data->clk_num);
354 		goto err;
355 	}
356 	clk_data->clk_num = ret;
357 
358 	clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
359 			GFP_KERNEL);
360 	if (!clk_data->clks)
361 		goto err;
362 
363 	rlock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
364 	if (!rlock)
365 		goto err;
366 
367 	spin_lock_init(rlock);
368 
369 	for (i = 0; i < clk_data->clk_num; i++) {
370 		struct clk *clk;
371 		const char *clk_name;
372 
373 		if (of_property_read_string_index(np, "clock-output-names",
374 						  i, &clk_name)) {
375 			break;
376 		}
377 
378 		of_clk_detect_critical(np, i, &flex_flags);
379 
380 		/*
381 		 * If we read an empty clock name then the output is unused
382 		 */
383 		if (*clk_name == '\0')
384 			continue;
385 
386 		clk = clk_register_flexgen(clk_name, parents, num_parents,
387 					   reg, rlock, i, flex_flags, clk_mode);
388 
389 		if (IS_ERR(clk))
390 			goto err;
391 
392 		clk_data->clks[i] = clk;
393 	}
394 
395 	kfree(parents);
396 	of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
397 
398 	return;
399 
400 err:
401 	iounmap(reg);
402 	if (clk_data)
403 		kfree(clk_data->clks);
404 	kfree(clk_data);
405 	kfree(parents);
406 	kfree(rlock);
407 }
408 CLK_OF_DECLARE(flexgen, "st,flexgen", st_of_flexgen_setup);
409