1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4  * Copyright (C) 2018-2020 Linaro Ltd.
5  */
6 
7 #include <linux/refcount.h>
8 #include <linux/mutex.h>
9 #include <linux/clk.h>
10 #include <linux/device.h>
11 #include <linux/interconnect.h>
12 
13 #include "ipa.h"
14 #include "ipa_clock.h"
15 #include "ipa_modem.h"
16 
17 /**
18  * DOC: IPA Clocking
19  *
20  * The "IPA Clock" manages both the IPA core clock and the interconnects
21  * (buses) the IPA depends on as a single logical entity.  A reference count
22  * is incremented by "get" operations and decremented by "put" operations.
23  * Transitions of that count from 0 to 1 result in the clock and interconnects
24  * being enabled, and transitions of the count from 1 to 0 cause them to be
25  * disabled.  We currently operate the core clock at a fixed clock rate, and
26  * all buses at a fixed average and peak bandwidth.  As more advanced IPA
27  * features are enabled, we can make better use of clock and bus scaling.
28  *
29  * An IPA clock reference must be held for any access to IPA hardware.
30  */
31 
32 #define	IPA_CORE_CLOCK_RATE		(75UL * 1000 * 1000)	/* Hz */
33 
34 /* Interconnect path bandwidths (each times 1000 bytes per second) */
35 #define IPA_MEMORY_AVG			(80 * 1000)	/* 80 MBps */
36 #define IPA_MEMORY_PEAK			(600 * 1000)
37 
38 #define IPA_IMEM_AVG			(80 * 1000)
39 #define IPA_IMEM_PEAK			(350 * 1000)
40 
41 #define IPA_CONFIG_AVG			(40 * 1000)
42 #define IPA_CONFIG_PEAK			(40 * 1000)
43 
44 /**
45  * struct ipa_clock - IPA clocking information
46  * @count:		Clocking reference count
47  * @mutex:		Protects clock enable/disable
48  * @core:		IPA core clock
49  * @memory_path:	Memory interconnect
50  * @imem_path:		Internal memory interconnect
51  * @config_path:	Configuration space interconnect
52  */
53 struct ipa_clock {
54 	refcount_t count;
55 	struct mutex mutex; /* protects clock enable/disable */
56 	struct clk *core;
57 	struct icc_path *memory_path;
58 	struct icc_path *imem_path;
59 	struct icc_path *config_path;
60 };
61 
62 static struct icc_path *
ipa_interconnect_init_one(struct device * dev,const char * name)63 ipa_interconnect_init_one(struct device *dev, const char *name)
64 {
65 	struct icc_path *path;
66 
67 	path = of_icc_get(dev, name);
68 	if (IS_ERR(path))
69 		dev_err(dev, "error %ld getting %s interconnect\n",
70 			PTR_ERR(path), name);
71 
72 	return path;
73 }
74 
75 /* Initialize interconnects required for IPA operation */
ipa_interconnect_init(struct ipa_clock * clock,struct device * dev)76 static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
77 {
78 	struct icc_path *path;
79 
80 	path = ipa_interconnect_init_one(dev, "memory");
81 	if (IS_ERR(path))
82 		goto err_return;
83 	clock->memory_path = path;
84 
85 	path = ipa_interconnect_init_one(dev, "imem");
86 	if (IS_ERR(path))
87 		goto err_memory_path_put;
88 	clock->imem_path = path;
89 
90 	path = ipa_interconnect_init_one(dev, "config");
91 	if (IS_ERR(path))
92 		goto err_imem_path_put;
93 	clock->config_path = path;
94 
95 	return 0;
96 
97 err_imem_path_put:
98 	icc_put(clock->imem_path);
99 err_memory_path_put:
100 	icc_put(clock->memory_path);
101 err_return:
102 	return PTR_ERR(path);
103 }
104 
105 /* Inverse of ipa_interconnect_init() */
ipa_interconnect_exit(struct ipa_clock * clock)106 static void ipa_interconnect_exit(struct ipa_clock *clock)
107 {
108 	icc_put(clock->config_path);
109 	icc_put(clock->imem_path);
110 	icc_put(clock->memory_path);
111 }
112 
113 /* Currently we only use one bandwidth level, so just "enable" interconnects */
ipa_interconnect_enable(struct ipa * ipa)114 static int ipa_interconnect_enable(struct ipa *ipa)
115 {
116 	struct ipa_clock *clock = ipa->clock;
117 	int ret;
118 
119 	ret = icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
120 	if (ret)
121 		return ret;
122 
123 	ret = icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
124 	if (ret)
125 		goto err_memory_path_disable;
126 
127 	ret = icc_set_bw(clock->config_path, IPA_CONFIG_AVG, IPA_CONFIG_PEAK);
128 	if (ret)
129 		goto err_imem_path_disable;
130 
131 	return 0;
132 
133 err_imem_path_disable:
134 	(void)icc_set_bw(clock->imem_path, 0, 0);
135 err_memory_path_disable:
136 	(void)icc_set_bw(clock->memory_path, 0, 0);
137 
138 	return ret;
139 }
140 
141 /* To disable an interconnect, we just its bandwidth to 0 */
ipa_interconnect_disable(struct ipa * ipa)142 static int ipa_interconnect_disable(struct ipa *ipa)
143 {
144 	struct ipa_clock *clock = ipa->clock;
145 	int ret;
146 
147 	ret = icc_set_bw(clock->memory_path, 0, 0);
148 	if (ret)
149 		return ret;
150 
151 	ret = icc_set_bw(clock->imem_path, 0, 0);
152 	if (ret)
153 		goto err_memory_path_reenable;
154 
155 	ret = icc_set_bw(clock->config_path, 0, 0);
156 	if (ret)
157 		goto err_imem_path_reenable;
158 
159 	return 0;
160 
161 err_imem_path_reenable:
162 	(void)icc_set_bw(clock->imem_path, IPA_IMEM_AVG, IPA_IMEM_PEAK);
163 err_memory_path_reenable:
164 	(void)icc_set_bw(clock->memory_path, IPA_MEMORY_AVG, IPA_MEMORY_PEAK);
165 
166 	return ret;
167 }
168 
169 /* Turn on IPA clocks, including interconnects */
ipa_clock_enable(struct ipa * ipa)170 static int ipa_clock_enable(struct ipa *ipa)
171 {
172 	int ret;
173 
174 	ret = ipa_interconnect_enable(ipa);
175 	if (ret)
176 		return ret;
177 
178 	ret = clk_prepare_enable(ipa->clock->core);
179 	if (ret)
180 		ipa_interconnect_disable(ipa);
181 
182 	return ret;
183 }
184 
185 /* Inverse of ipa_clock_enable() */
ipa_clock_disable(struct ipa * ipa)186 static void ipa_clock_disable(struct ipa *ipa)
187 {
188 	clk_disable_unprepare(ipa->clock->core);
189 	(void)ipa_interconnect_disable(ipa);
190 }
191 
192 /* Get an IPA clock reference, but only if the reference count is
193  * already non-zero.  Returns true if the additional reference was
194  * added successfully, or false otherwise.
195  */
ipa_clock_get_additional(struct ipa * ipa)196 bool ipa_clock_get_additional(struct ipa *ipa)
197 {
198 	return refcount_inc_not_zero(&ipa->clock->count);
199 }
200 
201 /* Get an IPA clock reference.  If the reference count is non-zero, it is
202  * incremented and return is immediate.  Otherwise it is checked again
203  * under protection of the mutex, and if appropriate the IPA clock
204  * is enabled.
205  *
206  * Incrementing the reference count is intentionally deferred until
207  * after the clock is running and endpoints are resumed.
208  */
ipa_clock_get(struct ipa * ipa)209 void ipa_clock_get(struct ipa *ipa)
210 {
211 	struct ipa_clock *clock = ipa->clock;
212 	int ret;
213 
214 	/* If the clock is running, just bump the reference count */
215 	if (ipa_clock_get_additional(ipa))
216 		return;
217 
218 	/* Otherwise get the mutex and check again */
219 	mutex_lock(&clock->mutex);
220 
221 	/* A reference might have been added before we got the mutex. */
222 	if (ipa_clock_get_additional(ipa))
223 		goto out_mutex_unlock;
224 
225 	ret = ipa_clock_enable(ipa);
226 	if (ret) {
227 		dev_err(&ipa->pdev->dev, "error %d enabling IPA clock\n", ret);
228 		goto out_mutex_unlock;
229 	}
230 
231 	refcount_set(&clock->count, 1);
232 
233 out_mutex_unlock:
234 	mutex_unlock(&clock->mutex);
235 }
236 
237 /* Attempt to remove an IPA clock reference.  If this represents the
238  * last reference, disable the IPA clock under protection of the mutex.
239  */
ipa_clock_put(struct ipa * ipa)240 void ipa_clock_put(struct ipa *ipa)
241 {
242 	struct ipa_clock *clock = ipa->clock;
243 
244 	/* If this is not the last reference there's nothing more to do */
245 	if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
246 		return;
247 
248 	ipa_clock_disable(ipa);
249 
250 	mutex_unlock(&clock->mutex);
251 }
252 
253 /* Return the current IPA core clock rate */
ipa_clock_rate(struct ipa * ipa)254 u32 ipa_clock_rate(struct ipa *ipa)
255 {
256 	return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
257 }
258 
259 /* Initialize IPA clocking */
ipa_clock_init(struct device * dev)260 struct ipa_clock *ipa_clock_init(struct device *dev)
261 {
262 	struct ipa_clock *clock;
263 	struct clk *clk;
264 	int ret;
265 
266 	clk = clk_get(dev, "core");
267 	if (IS_ERR(clk)) {
268 		dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
269 		return ERR_CAST(clk);
270 	}
271 
272 	ret = clk_set_rate(clk, IPA_CORE_CLOCK_RATE);
273 	if (ret) {
274 		dev_err(dev, "error %d setting core clock rate to %lu\n",
275 			ret, IPA_CORE_CLOCK_RATE);
276 		goto err_clk_put;
277 	}
278 
279 	clock = kzalloc(sizeof(*clock), GFP_KERNEL);
280 	if (!clock) {
281 		ret = -ENOMEM;
282 		goto err_clk_put;
283 	}
284 	clock->core = clk;
285 
286 	ret = ipa_interconnect_init(clock, dev);
287 	if (ret)
288 		goto err_kfree;
289 
290 	mutex_init(&clock->mutex);
291 	refcount_set(&clock->count, 0);
292 
293 	return clock;
294 
295 err_kfree:
296 	kfree(clock);
297 err_clk_put:
298 	clk_put(clk);
299 
300 	return ERR_PTR(ret);
301 }
302 
303 /* Inverse of ipa_clock_init() */
ipa_clock_exit(struct ipa_clock * clock)304 void ipa_clock_exit(struct ipa_clock *clock)
305 {
306 	struct clk *clk = clock->core;
307 
308 	WARN_ON(refcount_read(&clock->count) != 0);
309 	mutex_destroy(&clock->mutex);
310 	ipa_interconnect_exit(clock);
311 	kfree(clock);
312 	clk_put(clk);
313 }
314