1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Based on drivers/clk/tegra/clk-emc.c
4 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
5 *
6 * Author: Dmitry Osipenko <digetx@gmail.com>
7 * Copyright (C) 2019 GRATE-DRIVER project
8 */
9
10 #define pr_fmt(fmt) "tegra-emc-clk: " fmt
11
12 #include <linux/bits.h>
13 #include <linux/clk-provider.h>
14 #include <linux/clk/tegra.h>
15 #include <linux/err.h>
16 #include <linux/io.h>
17 #include <linux/kernel.h>
18 #include <linux/slab.h>
19
20 #include "clk.h"
21
22 #define CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK GENMASK(7, 0)
23 #define CLK_SOURCE_EMC_2X_CLK_SRC_MASK GENMASK(31, 30)
24 #define CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT 30
25
26 #define MC_EMC_SAME_FREQ BIT(16)
27 #define USE_PLLM_UD BIT(29)
28
29 #define EMC_SRC_PLL_M 0
30 #define EMC_SRC_PLL_C 1
31 #define EMC_SRC_PLL_P 2
32 #define EMC_SRC_CLK_M 3
33
34 static const char * const emc_parent_clk_names[] = {
35 "pll_m", "pll_c", "pll_p", "clk_m",
36 };
37
38 struct tegra_clk_emc {
39 struct clk_hw hw;
40 void __iomem *reg;
41 bool mc_same_freq;
42 bool want_low_jitter;
43
44 tegra20_clk_emc_round_cb *round_cb;
45 void *cb_arg;
46 };
47
to_tegra_clk_emc(struct clk_hw * hw)48 static inline struct tegra_clk_emc *to_tegra_clk_emc(struct clk_hw *hw)
49 {
50 return container_of(hw, struct tegra_clk_emc, hw);
51 }
52
emc_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)53 static unsigned long emc_recalc_rate(struct clk_hw *hw,
54 unsigned long parent_rate)
55 {
56 struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
57 u32 val, div;
58
59 val = readl_relaxed(emc->reg);
60 div = val & CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
61
62 return DIV_ROUND_UP(parent_rate * 2, div + 2);
63 }
64
emc_get_parent(struct clk_hw * hw)65 static u8 emc_get_parent(struct clk_hw *hw)
66 {
67 struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
68
69 return readl_relaxed(emc->reg) >> CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
70 }
71
emc_set_parent(struct clk_hw * hw,u8 index)72 static int emc_set_parent(struct clk_hw *hw, u8 index)
73 {
74 struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
75 u32 val, div;
76
77 val = readl_relaxed(emc->reg);
78 val &= ~CLK_SOURCE_EMC_2X_CLK_SRC_MASK;
79 val |= index << CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
80
81 div = val & CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
82
83 if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
84 val |= USE_PLLM_UD;
85 else
86 val &= ~USE_PLLM_UD;
87
88 if (emc->mc_same_freq)
89 val |= MC_EMC_SAME_FREQ;
90 else
91 val &= ~MC_EMC_SAME_FREQ;
92
93 writel_relaxed(val, emc->reg);
94
95 fence_udelay(1, emc->reg);
96
97 return 0;
98 }
99
emc_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)100 static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
101 unsigned long parent_rate)
102 {
103 struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
104 unsigned int index;
105 u32 val, div;
106
107 div = div_frac_get(rate, parent_rate, 8, 1, 0);
108
109 val = readl_relaxed(emc->reg);
110 val &= ~CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
111 val |= div;
112
113 index = val >> CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
114
115 if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
116 val |= USE_PLLM_UD;
117 else
118 val &= ~USE_PLLM_UD;
119
120 if (emc->mc_same_freq)
121 val |= MC_EMC_SAME_FREQ;
122 else
123 val &= ~MC_EMC_SAME_FREQ;
124
125 writel_relaxed(val, emc->reg);
126
127 fence_udelay(1, emc->reg);
128
129 return 0;
130 }
131
emc_set_rate_and_parent(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate,u8 index)132 static int emc_set_rate_and_parent(struct clk_hw *hw,
133 unsigned long rate,
134 unsigned long parent_rate,
135 u8 index)
136 {
137 struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
138 u32 val, div;
139
140 div = div_frac_get(rate, parent_rate, 8, 1, 0);
141
142 val = readl_relaxed(emc->reg);
143
144 val &= ~CLK_SOURCE_EMC_2X_CLK_SRC_MASK;
145 val |= index << CLK_SOURCE_EMC_2X_CLK_SRC_SHIFT;
146
147 val &= ~CLK_SOURCE_EMC_2X_CLK_DIVISOR_MASK;
148 val |= div;
149
150 if (index == EMC_SRC_PLL_M && div == 0 && emc->want_low_jitter)
151 val |= USE_PLLM_UD;
152 else
153 val &= ~USE_PLLM_UD;
154
155 if (emc->mc_same_freq)
156 val |= MC_EMC_SAME_FREQ;
157 else
158 val &= ~MC_EMC_SAME_FREQ;
159
160 writel_relaxed(val, emc->reg);
161
162 fence_udelay(1, emc->reg);
163
164 return 0;
165 }
166
emc_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)167 static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
168 {
169 struct tegra_clk_emc *emc = to_tegra_clk_emc(hw);
170 struct clk_hw *parent_hw;
171 unsigned long divided_rate;
172 unsigned long parent_rate;
173 unsigned int i;
174 long emc_rate;
175 int div;
176
177 emc_rate = emc->round_cb(req->rate, req->min_rate, req->max_rate,
178 emc->cb_arg);
179 if (emc_rate < 0)
180 return emc_rate;
181
182 for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) {
183 parent_hw = clk_hw_get_parent_by_index(hw, i);
184
185 if (req->best_parent_hw == parent_hw)
186 parent_rate = req->best_parent_rate;
187 else
188 parent_rate = clk_hw_get_rate(parent_hw);
189
190 if (emc_rate > parent_rate)
191 continue;
192
193 div = div_frac_get(emc_rate, parent_rate, 8, 1, 0);
194 divided_rate = DIV_ROUND_UP(parent_rate * 2, div + 2);
195
196 if (divided_rate != emc_rate)
197 continue;
198
199 req->best_parent_rate = parent_rate;
200 req->best_parent_hw = parent_hw;
201 req->rate = emc_rate;
202 break;
203 }
204
205 if (i == ARRAY_SIZE(emc_parent_clk_names)) {
206 pr_err_once("can't find parent for rate %lu emc_rate %lu\n",
207 req->rate, emc_rate);
208 return -EINVAL;
209 }
210
211 return 0;
212 }
213
214 static const struct clk_ops tegra_clk_emc_ops = {
215 .recalc_rate = emc_recalc_rate,
216 .get_parent = emc_get_parent,
217 .set_parent = emc_set_parent,
218 .set_rate = emc_set_rate,
219 .set_rate_and_parent = emc_set_rate_and_parent,
220 .determine_rate = emc_determine_rate,
221 };
222
tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb * round_cb,void * cb_arg)223 void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
224 void *cb_arg)
225 {
226 struct clk *clk = __clk_lookup("emc");
227 struct tegra_clk_emc *emc;
228 struct clk_hw *hw;
229
230 if (clk) {
231 hw = __clk_get_hw(clk);
232 emc = to_tegra_clk_emc(hw);
233
234 emc->round_cb = round_cb;
235 emc->cb_arg = cb_arg;
236 }
237 }
238
tegra20_clk_emc_driver_available(struct clk_hw * emc_hw)239 bool tegra20_clk_emc_driver_available(struct clk_hw *emc_hw)
240 {
241 return to_tegra_clk_emc(emc_hw)->round_cb != NULL;
242 }
243
tegra20_clk_register_emc(void __iomem * ioaddr,bool low_jitter)244 struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter)
245 {
246 struct tegra_clk_emc *emc;
247 struct clk_init_data init;
248 struct clk *clk;
249
250 emc = kzalloc(sizeof(*emc), GFP_KERNEL);
251 if (!emc)
252 return NULL;
253
254 /*
255 * EMC stands for External Memory Controller.
256 *
257 * We don't want EMC clock to be disabled ever by gating its
258 * parent and whatnot because system is busted immediately in that
259 * case, hence the clock is marked as critical.
260 */
261 init.name = "emc";
262 init.ops = &tegra_clk_emc_ops;
263 init.flags = CLK_IS_CRITICAL;
264 init.parent_names = emc_parent_clk_names;
265 init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
266
267 emc->reg = ioaddr;
268 emc->hw.init = &init;
269 emc->want_low_jitter = low_jitter;
270
271 clk = clk_register(NULL, &emc->hw);
272 if (IS_ERR(clk)) {
273 kfree(emc);
274 return NULL;
275 }
276
277 return clk;
278 }
279
tegra20_clk_prepare_emc_mc_same_freq(struct clk * emc_clk,bool same)280 int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same)
281 {
282 struct tegra_clk_emc *emc;
283 struct clk_hw *hw;
284
285 if (!emc_clk)
286 return -EINVAL;
287
288 hw = __clk_get_hw(emc_clk);
289 emc = to_tegra_clk_emc(hw);
290 emc->mc_same_freq = same;
291
292 return 0;
293 }
294