1 /*
2 * linux/drivers/thermal/cpu_cooling.c
3 *
4 * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
5 * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
6 *
7 * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
8 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25 #include <linux/module.h>
26 #include <linux/thermal.h>
27 #include <linux/cpufreq.h>
28 #include <linux/err.h>
29 #include <linux/idr.h>
30 #include <linux/pm_opp.h>
31 #include <linux/slab.h>
32 #include <linux/cpu.h>
33 #include <linux/cpu_cooling.h>
34
35 #include <trace/events/thermal.h>
36
37 /*
38 * Cooling state <-> CPUFreq frequency
39 *
40 * Cooling states are translated to frequencies throughout this driver and this
41 * is the relation between them.
42 *
43 * Highest cooling state corresponds to lowest possible frequency.
44 *
45 * i.e.
46 * level 0 --> 1st Max Freq
47 * level 1 --> 2nd Max Freq
48 * ...
49 */
50
51 /**
52 * struct freq_table - frequency table along with power entries
53 * @frequency: frequency in KHz
54 * @power: power in mW
55 *
56 * This structure is built when the cooling device registers and helps
57 * in translating frequency to power and vice versa.
58 */
59 struct freq_table {
60 u32 frequency;
61 u32 power;
62 };
63
64 /**
65 * struct time_in_idle - Idle time stats
66 * @time: previous reading of the absolute time that this cpu was idle
67 * @timestamp: wall time of the last invocation of get_cpu_idle_time_us()
68 */
69 struct time_in_idle {
70 u64 time;
71 u64 timestamp;
72 };
73
74 /**
75 * struct cpufreq_cooling_device - data for cooling device with cpufreq
76 * @id: unique integer value corresponding to each cpufreq_cooling_device
77 * registered.
78 * @last_load: load measured by the latest call to cpufreq_get_requested_power()
79 * @cpufreq_state: integer value representing the current state of cpufreq
80 * cooling devices.
81 * @clipped_freq: integer value representing the absolute value of the clipped
82 * frequency.
83 * @max_level: maximum cooling level. One less than total number of valid
84 * cpufreq frequencies.
85 * @freq_table: Freq table in descending order of frequencies
86 * @cdev: thermal_cooling_device pointer to keep track of the
87 * registered cooling device.
88 * @policy: cpufreq policy.
89 * @node: list_head to link all cpufreq_cooling_device together.
90 * @idle_time: idle time stats
91 *
92 * This structure is required for keeping information of each registered
93 * cpufreq_cooling_device.
94 */
95 struct cpufreq_cooling_device {
96 int id;
97 u32 last_load;
98 unsigned int cpufreq_state;
99 unsigned int clipped_freq;
100 unsigned int max_level;
101 struct freq_table *freq_table; /* In descending order */
102 struct thermal_cooling_device *cdev;
103 struct cpufreq_policy *policy;
104 struct list_head node;
105 struct time_in_idle *idle_time;
106 };
107
108 static DEFINE_IDA(cpufreq_ida);
109 static DEFINE_MUTEX(cooling_list_lock);
110 static LIST_HEAD(cpufreq_cdev_list);
111
112 /* Below code defines functions to be used for cpufreq as cooling device */
113
114 /**
115 * get_level: Find the level for a particular frequency
116 * @cpufreq_cdev: cpufreq_cdev for which the property is required
117 * @freq: Frequency
118 *
119 * Return: level corresponding to the frequency.
120 */
get_level(struct cpufreq_cooling_device * cpufreq_cdev,unsigned int freq)121 static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev,
122 unsigned int freq)
123 {
124 struct freq_table *freq_table = cpufreq_cdev->freq_table;
125 unsigned long level;
126
127 for (level = 1; level <= cpufreq_cdev->max_level; level++)
128 if (freq > freq_table[level].frequency)
129 break;
130
131 return level - 1;
132 }
133
134 /**
135 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
136 * @nb: struct notifier_block * with callback info.
137 * @event: value showing cpufreq event for which this function invoked.
138 * @data: callback-specific data
139 *
140 * Callback to hijack the notification on cpufreq policy transition.
141 * Every time there is a change in policy, we will intercept and
142 * update the cpufreq policy with thermal constraints.
143 *
144 * Return: 0 (success)
145 */
cpufreq_thermal_notifier(struct notifier_block * nb,unsigned long event,void * data)146 static int cpufreq_thermal_notifier(struct notifier_block *nb,
147 unsigned long event, void *data)
148 {
149 struct cpufreq_policy *policy = data;
150 unsigned long clipped_freq;
151 struct cpufreq_cooling_device *cpufreq_cdev;
152
153 if (event != CPUFREQ_ADJUST)
154 return NOTIFY_DONE;
155
156 mutex_lock(&cooling_list_lock);
157 list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) {
158 /*
159 * A new copy of the policy is sent to the notifier and can't
160 * compare that directly.
161 */
162 if (policy->cpu != cpufreq_cdev->policy->cpu)
163 continue;
164
165 /*
166 * policy->max is the maximum allowed frequency defined by user
167 * and clipped_freq is the maximum that thermal constraints
168 * allow.
169 *
170 * If clipped_freq is lower than policy->max, then we need to
171 * readjust policy->max.
172 *
173 * But, if clipped_freq is greater than policy->max, we don't
174 * need to do anything.
175 */
176 clipped_freq = cpufreq_cdev->clipped_freq;
177
178 if (policy->max > clipped_freq)
179 cpufreq_verify_within_limits(policy, 0, clipped_freq);
180 break;
181 }
182 mutex_unlock(&cooling_list_lock);
183
184 return NOTIFY_OK;
185 }
186
187 /**
188 * update_freq_table() - Update the freq table with power numbers
189 * @cpufreq_cdev: the cpufreq cooling device in which to update the table
190 * @capacitance: dynamic power coefficient for these cpus
191 *
192 * Update the freq table with power numbers. This table will be used in
193 * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and
194 * frequency efficiently. Power is stored in mW, frequency in KHz. The
195 * resulting table is in descending order.
196 *
197 * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
198 * or -ENOMEM if we run out of memory.
199 */
update_freq_table(struct cpufreq_cooling_device * cpufreq_cdev,u32 capacitance)200 static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev,
201 u32 capacitance)
202 {
203 struct freq_table *freq_table = cpufreq_cdev->freq_table;
204 struct dev_pm_opp *opp;
205 struct device *dev = NULL;
206 int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i;
207
208 dev = get_cpu_device(cpu);
209 if (unlikely(!dev)) {
210 dev_warn(&cpufreq_cdev->cdev->device,
211 "No cpu device for cpu %d\n", cpu);
212 return -ENODEV;
213 }
214
215 num_opps = dev_pm_opp_get_opp_count(dev);
216 if (num_opps < 0)
217 return num_opps;
218
219 /*
220 * The cpufreq table is also built from the OPP table and so the count
221 * should match.
222 */
223 if (num_opps != cpufreq_cdev->max_level + 1) {
224 dev_warn(dev, "Number of OPPs not matching with max_levels\n");
225 return -EINVAL;
226 }
227
228 for (i = 0; i <= cpufreq_cdev->max_level; i++) {
229 unsigned long freq = freq_table[i].frequency * 1000;
230 u32 freq_mhz = freq_table[i].frequency / 1000;
231 u64 power;
232 u32 voltage_mv;
233
234 /*
235 * Find ceil frequency as 'freq' may be slightly lower than OPP
236 * freq due to truncation while converting to kHz.
237 */
238 opp = dev_pm_opp_find_freq_ceil(dev, &freq);
239 if (IS_ERR(opp)) {
240 dev_err(dev, "failed to get opp for %lu frequency\n",
241 freq);
242 return -EINVAL;
243 }
244
245 voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
246 dev_pm_opp_put(opp);
247
248 /*
249 * Do the multiplication with MHz and millivolt so as
250 * to not overflow.
251 */
252 power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv;
253 do_div(power, 1000000000);
254
255 /* power is stored in mW */
256 freq_table[i].power = power;
257 }
258
259 return 0;
260 }
261
cpu_freq_to_power(struct cpufreq_cooling_device * cpufreq_cdev,u32 freq)262 static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev,
263 u32 freq)
264 {
265 int i;
266 struct freq_table *freq_table = cpufreq_cdev->freq_table;
267
268 for (i = 1; i <= cpufreq_cdev->max_level; i++)
269 if (freq > freq_table[i].frequency)
270 break;
271
272 return freq_table[i - 1].power;
273 }
274
cpu_power_to_freq(struct cpufreq_cooling_device * cpufreq_cdev,u32 power)275 static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
276 u32 power)
277 {
278 int i;
279 struct freq_table *freq_table = cpufreq_cdev->freq_table;
280
281 for (i = 1; i <= cpufreq_cdev->max_level; i++)
282 if (power > freq_table[i].power)
283 break;
284
285 return freq_table[i - 1].frequency;
286 }
287
288 /**
289 * get_load() - get load for a cpu since last updated
290 * @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu
291 * @cpu: cpu number
292 * @cpu_idx: index of the cpu in time_in_idle*
293 *
294 * Return: The average load of cpu @cpu in percentage since this
295 * function was last called.
296 */
get_load(struct cpufreq_cooling_device * cpufreq_cdev,int cpu,int cpu_idx)297 static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu,
298 int cpu_idx)
299 {
300 u32 load;
301 u64 now, now_idle, delta_time, delta_idle;
302 struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx];
303
304 now_idle = get_cpu_idle_time(cpu, &now, 0);
305 delta_idle = now_idle - idle_time->time;
306 delta_time = now - idle_time->timestamp;
307
308 if (delta_time <= delta_idle)
309 load = 0;
310 else
311 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
312
313 idle_time->time = now_idle;
314 idle_time->timestamp = now;
315
316 return load;
317 }
318
319 /**
320 * get_dynamic_power() - calculate the dynamic power
321 * @cpufreq_cdev: &cpufreq_cooling_device for this cdev
322 * @freq: current frequency
323 *
324 * Return: the dynamic power consumed by the cpus described by
325 * @cpufreq_cdev.
326 */
get_dynamic_power(struct cpufreq_cooling_device * cpufreq_cdev,unsigned long freq)327 static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev,
328 unsigned long freq)
329 {
330 u32 raw_cpu_power;
331
332 raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq);
333 return (raw_cpu_power * cpufreq_cdev->last_load) / 100;
334 }
335
336 /* cpufreq cooling device callback functions are defined below */
337
338 /**
339 * cpufreq_get_max_state - callback function to get the max cooling state.
340 * @cdev: thermal cooling device pointer.
341 * @state: fill this variable with the max cooling state.
342 *
343 * Callback for the thermal cooling device to return the cpufreq
344 * max cooling state.
345 *
346 * Return: 0 on success, an error code otherwise.
347 */
cpufreq_get_max_state(struct thermal_cooling_device * cdev,unsigned long * state)348 static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
349 unsigned long *state)
350 {
351 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
352
353 *state = cpufreq_cdev->max_level;
354 return 0;
355 }
356
357 /**
358 * cpufreq_get_cur_state - callback function to get the current cooling state.
359 * @cdev: thermal cooling device pointer.
360 * @state: fill this variable with the current cooling state.
361 *
362 * Callback for the thermal cooling device to return the cpufreq
363 * current cooling state.
364 *
365 * Return: 0 on success, an error code otherwise.
366 */
cpufreq_get_cur_state(struct thermal_cooling_device * cdev,unsigned long * state)367 static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
368 unsigned long *state)
369 {
370 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
371
372 *state = cpufreq_cdev->cpufreq_state;
373
374 return 0;
375 }
376
377 /**
378 * cpufreq_set_cur_state - callback function to set the current cooling state.
379 * @cdev: thermal cooling device pointer.
380 * @state: set this variable to the current cooling state.
381 *
382 * Callback for the thermal cooling device to change the cpufreq
383 * current cooling state.
384 *
385 * Return: 0 on success, an error code otherwise.
386 */
cpufreq_set_cur_state(struct thermal_cooling_device * cdev,unsigned long state)387 static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
388 unsigned long state)
389 {
390 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
391 unsigned int clip_freq;
392
393 /* Request state should be less than max_level */
394 if (WARN_ON(state > cpufreq_cdev->max_level))
395 return -EINVAL;
396
397 /* Check if the old cooling action is same as new cooling action */
398 if (cpufreq_cdev->cpufreq_state == state)
399 return 0;
400
401 clip_freq = cpufreq_cdev->freq_table[state].frequency;
402 cpufreq_cdev->cpufreq_state = state;
403 cpufreq_cdev->clipped_freq = clip_freq;
404
405 cpufreq_update_policy(cpufreq_cdev->policy->cpu);
406
407 return 0;
408 }
409
410 /**
411 * cpufreq_get_requested_power() - get the current power
412 * @cdev: &thermal_cooling_device pointer
413 * @tz: a valid thermal zone device pointer
414 * @power: pointer in which to store the resulting power
415 *
416 * Calculate the current power consumption of the cpus in milliwatts
417 * and store it in @power. This function should actually calculate
418 * the requested power, but it's hard to get the frequency that
419 * cpufreq would have assigned if there were no thermal limits.
420 * Instead, we calculate the current power on the assumption that the
421 * immediate future will look like the immediate past.
422 *
423 * We use the current frequency and the average load since this
424 * function was last called. In reality, there could have been
425 * multiple opps since this function was last called and that affects
426 * the load calculation. While it's not perfectly accurate, this
427 * simplification is good enough and works. REVISIT this, as more
428 * complex code may be needed if experiments show that it's not
429 * accurate enough.
430 *
431 * Return: 0 on success, -E* if getting the static power failed.
432 */
cpufreq_get_requested_power(struct thermal_cooling_device * cdev,struct thermal_zone_device * tz,u32 * power)433 static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
434 struct thermal_zone_device *tz,
435 u32 *power)
436 {
437 unsigned long freq;
438 int i = 0, cpu;
439 u32 total_load = 0;
440 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
441 struct cpufreq_policy *policy = cpufreq_cdev->policy;
442 u32 *load_cpu = NULL;
443
444 freq = cpufreq_quick_get(policy->cpu);
445
446 if (trace_thermal_power_cpu_get_power_enabled()) {
447 u32 ncpus = cpumask_weight(policy->related_cpus);
448
449 load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL);
450 }
451
452 for_each_cpu(cpu, policy->related_cpus) {
453 u32 load;
454
455 if (cpu_online(cpu))
456 load = get_load(cpufreq_cdev, cpu, i);
457 else
458 load = 0;
459
460 total_load += load;
461 if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
462 load_cpu[i] = load;
463
464 i++;
465 }
466
467 cpufreq_cdev->last_load = total_load;
468
469 *power = get_dynamic_power(cpufreq_cdev, freq);
470
471 if (load_cpu) {
472 trace_thermal_power_cpu_get_power(policy->related_cpus, freq,
473 load_cpu, i, *power);
474
475 kfree(load_cpu);
476 }
477
478 return 0;
479 }
480
481 /**
482 * cpufreq_state2power() - convert a cpu cdev state to power consumed
483 * @cdev: &thermal_cooling_device pointer
484 * @tz: a valid thermal zone device pointer
485 * @state: cooling device state to be converted
486 * @power: pointer in which to store the resulting power
487 *
488 * Convert cooling device state @state into power consumption in
489 * milliwatts assuming 100% load. Store the calculated power in
490 * @power.
491 *
492 * Return: 0 on success, -EINVAL if the cooling device state could not
493 * be converted into a frequency or other -E* if there was an error
494 * when calculating the static power.
495 */
cpufreq_state2power(struct thermal_cooling_device * cdev,struct thermal_zone_device * tz,unsigned long state,u32 * power)496 static int cpufreq_state2power(struct thermal_cooling_device *cdev,
497 struct thermal_zone_device *tz,
498 unsigned long state, u32 *power)
499 {
500 unsigned int freq, num_cpus;
501 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
502
503 /* Request state should be less than max_level */
504 if (WARN_ON(state > cpufreq_cdev->max_level))
505 return -EINVAL;
506
507 num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
508
509 freq = cpufreq_cdev->freq_table[state].frequency;
510 *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus;
511
512 return 0;
513 }
514
515 /**
516 * cpufreq_power2state() - convert power to a cooling device state
517 * @cdev: &thermal_cooling_device pointer
518 * @tz: a valid thermal zone device pointer
519 * @power: power in milliwatts to be converted
520 * @state: pointer in which to store the resulting state
521 *
522 * Calculate a cooling device state for the cpus described by @cdev
523 * that would allow them to consume at most @power mW and store it in
524 * @state. Note that this calculation depends on external factors
525 * such as the cpu load or the current static power. Calling this
526 * function with the same power as input can yield different cooling
527 * device states depending on those external factors.
528 *
529 * Return: 0 on success, -ENODEV if no cpus are online or -EINVAL if
530 * the calculated frequency could not be converted to a valid state.
531 * The latter should not happen unless the frequencies available to
532 * cpufreq have changed since the initialization of the cpu cooling
533 * device.
534 */
cpufreq_power2state(struct thermal_cooling_device * cdev,struct thermal_zone_device * tz,u32 power,unsigned long * state)535 static int cpufreq_power2state(struct thermal_cooling_device *cdev,
536 struct thermal_zone_device *tz, u32 power,
537 unsigned long *state)
538 {
539 unsigned int cur_freq, target_freq;
540 u32 last_load, normalised_power;
541 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
542 struct cpufreq_policy *policy = cpufreq_cdev->policy;
543
544 cur_freq = cpufreq_quick_get(policy->cpu);
545 power = power > 0 ? power : 0;
546 last_load = cpufreq_cdev->last_load ?: 1;
547 normalised_power = (power * 100) / last_load;
548 target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power);
549
550 *state = get_level(cpufreq_cdev, target_freq);
551 trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state,
552 power);
553 return 0;
554 }
555
556 /* Bind cpufreq callbacks to thermal cooling device ops */
557
558 static struct thermal_cooling_device_ops cpufreq_cooling_ops = {
559 .get_max_state = cpufreq_get_max_state,
560 .get_cur_state = cpufreq_get_cur_state,
561 .set_cur_state = cpufreq_set_cur_state,
562 };
563
564 static struct thermal_cooling_device_ops cpufreq_power_cooling_ops = {
565 .get_max_state = cpufreq_get_max_state,
566 .get_cur_state = cpufreq_get_cur_state,
567 .set_cur_state = cpufreq_set_cur_state,
568 .get_requested_power = cpufreq_get_requested_power,
569 .state2power = cpufreq_state2power,
570 .power2state = cpufreq_power2state,
571 };
572
573 /* Notifier for cpufreq policy change */
574 static struct notifier_block thermal_cpufreq_notifier_block = {
575 .notifier_call = cpufreq_thermal_notifier,
576 };
577
find_next_max(struct cpufreq_frequency_table * table,unsigned int prev_max)578 static unsigned int find_next_max(struct cpufreq_frequency_table *table,
579 unsigned int prev_max)
580 {
581 struct cpufreq_frequency_table *pos;
582 unsigned int max = 0;
583
584 cpufreq_for_each_valid_entry(pos, table) {
585 if (pos->frequency > max && pos->frequency < prev_max)
586 max = pos->frequency;
587 }
588
589 return max;
590 }
591
592 /**
593 * __cpufreq_cooling_register - helper function to create cpufreq cooling device
594 * @np: a valid struct device_node to the cooling device device tree node
595 * @policy: cpufreq policy
596 * Normally this should be same as cpufreq policy->related_cpus.
597 * @capacitance: dynamic power coefficient for these cpus
598 *
599 * This interface function registers the cpufreq cooling device with the name
600 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
601 * cooling devices. It also gives the opportunity to link the cooling device
602 * with a device tree node, in order to bind it via the thermal DT code.
603 *
604 * Return: a valid struct thermal_cooling_device pointer on success,
605 * on failure, it returns a corresponding ERR_PTR().
606 */
607 static struct thermal_cooling_device *
__cpufreq_cooling_register(struct device_node * np,struct cpufreq_policy * policy,u32 capacitance)608 __cpufreq_cooling_register(struct device_node *np,
609 struct cpufreq_policy *policy, u32 capacitance)
610 {
611 struct thermal_cooling_device *cdev;
612 struct cpufreq_cooling_device *cpufreq_cdev;
613 char dev_name[THERMAL_NAME_LENGTH];
614 unsigned int freq, i, num_cpus;
615 int ret;
616 struct thermal_cooling_device_ops *cooling_ops;
617 bool first;
618
619 if (IS_ERR_OR_NULL(policy)) {
620 pr_err("%s: cpufreq policy isn't valid: %p\n", __func__, policy);
621 return ERR_PTR(-EINVAL);
622 }
623
624 i = cpufreq_table_count_valid_entries(policy);
625 if (!i) {
626 pr_debug("%s: CPUFreq table not found or has no valid entries\n",
627 __func__);
628 return ERR_PTR(-ENODEV);
629 }
630
631 cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL);
632 if (!cpufreq_cdev)
633 return ERR_PTR(-ENOMEM);
634
635 cpufreq_cdev->policy = policy;
636 num_cpus = cpumask_weight(policy->related_cpus);
637 cpufreq_cdev->idle_time = kcalloc(num_cpus,
638 sizeof(*cpufreq_cdev->idle_time),
639 GFP_KERNEL);
640 if (!cpufreq_cdev->idle_time) {
641 cdev = ERR_PTR(-ENOMEM);
642 goto free_cdev;
643 }
644
645 /* max_level is an index, not a counter */
646 cpufreq_cdev->max_level = i - 1;
647
648 cpufreq_cdev->freq_table = kmalloc_array(i,
649 sizeof(*cpufreq_cdev->freq_table),
650 GFP_KERNEL);
651 if (!cpufreq_cdev->freq_table) {
652 cdev = ERR_PTR(-ENOMEM);
653 goto free_idle_time;
654 }
655
656 ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL);
657 if (ret < 0) {
658 cdev = ERR_PTR(ret);
659 goto free_table;
660 }
661 cpufreq_cdev->id = ret;
662
663 snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
664 cpufreq_cdev->id);
665
666 /* Fill freq-table in descending order of frequencies */
667 for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) {
668 freq = find_next_max(policy->freq_table, freq);
669 cpufreq_cdev->freq_table[i].frequency = freq;
670
671 /* Warn for duplicate entries */
672 if (!freq)
673 pr_warn("%s: table has duplicate entries\n", __func__);
674 else
675 pr_debug("%s: freq:%u KHz\n", __func__, freq);
676 }
677
678 if (capacitance) {
679 ret = update_freq_table(cpufreq_cdev, capacitance);
680 if (ret) {
681 cdev = ERR_PTR(ret);
682 goto remove_ida;
683 }
684
685 cooling_ops = &cpufreq_power_cooling_ops;
686 } else {
687 cooling_ops = &cpufreq_cooling_ops;
688 }
689
690 cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev,
691 cooling_ops);
692 if (IS_ERR(cdev))
693 goto remove_ida;
694
695 cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency;
696 cpufreq_cdev->cdev = cdev;
697
698 mutex_lock(&cooling_list_lock);
699 /* Register the notifier for first cpufreq cooling device */
700 first = list_empty(&cpufreq_cdev_list);
701 list_add(&cpufreq_cdev->node, &cpufreq_cdev_list);
702 mutex_unlock(&cooling_list_lock);
703
704 if (first)
705 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
706 CPUFREQ_POLICY_NOTIFIER);
707
708 return cdev;
709
710 remove_ida:
711 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
712 free_table:
713 kfree(cpufreq_cdev->freq_table);
714 free_idle_time:
715 kfree(cpufreq_cdev->idle_time);
716 free_cdev:
717 kfree(cpufreq_cdev);
718 return cdev;
719 }
720
721 /**
722 * cpufreq_cooling_register - function to create cpufreq cooling device.
723 * @policy: cpufreq policy
724 *
725 * This interface function registers the cpufreq cooling device with the name
726 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
727 * cooling devices.
728 *
729 * Return: a valid struct thermal_cooling_device pointer on success,
730 * on failure, it returns a corresponding ERR_PTR().
731 */
732 struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy * policy)733 cpufreq_cooling_register(struct cpufreq_policy *policy)
734 {
735 return __cpufreq_cooling_register(NULL, policy, 0);
736 }
737 EXPORT_SYMBOL_GPL(cpufreq_cooling_register);
738
739 /**
740 * of_cpufreq_cooling_register - function to create cpufreq cooling device.
741 * @policy: cpufreq policy
742 *
743 * This interface function registers the cpufreq cooling device with the name
744 * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
745 * cooling devices. Using this API, the cpufreq cooling device will be
746 * linked to the device tree node provided.
747 *
748 * Using this function, the cooling device will implement the power
749 * extensions by using a simple cpu power model. The cpus must have
750 * registered their OPPs using the OPP library.
751 *
752 * It also takes into account, if property present in policy CPU node, the
753 * static power consumed by the cpu.
754 *
755 * Return: a valid struct thermal_cooling_device pointer on success,
756 * and NULL on failure.
757 */
758 struct thermal_cooling_device *
of_cpufreq_cooling_register(struct cpufreq_policy * policy)759 of_cpufreq_cooling_register(struct cpufreq_policy *policy)
760 {
761 struct device_node *np = of_get_cpu_node(policy->cpu, NULL);
762 struct thermal_cooling_device *cdev = NULL;
763 u32 capacitance = 0;
764
765 if (!np) {
766 pr_err("cpu_cooling: OF node not available for cpu%d\n",
767 policy->cpu);
768 return NULL;
769 }
770
771 if (of_find_property(np, "#cooling-cells", NULL)) {
772 of_property_read_u32(np, "dynamic-power-coefficient",
773 &capacitance);
774
775 cdev = __cpufreq_cooling_register(np, policy, capacitance);
776 if (IS_ERR(cdev)) {
777 pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n",
778 policy->cpu, PTR_ERR(cdev));
779 cdev = NULL;
780 }
781 }
782
783 of_node_put(np);
784 return cdev;
785 }
786 EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register);
787
788 /**
789 * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
790 * @cdev: thermal cooling device pointer.
791 *
792 * This interface function unregisters the "thermal-cpufreq-%x" cooling device.
793 */
cpufreq_cooling_unregister(struct thermal_cooling_device * cdev)794 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
795 {
796 struct cpufreq_cooling_device *cpufreq_cdev;
797 bool last;
798
799 if (!cdev)
800 return;
801
802 cpufreq_cdev = cdev->devdata;
803
804 mutex_lock(&cooling_list_lock);
805 list_del(&cpufreq_cdev->node);
806 /* Unregister the notifier for the last cpufreq cooling device */
807 last = list_empty(&cpufreq_cdev_list);
808 mutex_unlock(&cooling_list_lock);
809
810 if (last)
811 cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
812 CPUFREQ_POLICY_NOTIFIER);
813
814 thermal_cooling_device_unregister(cpufreq_cdev->cdev);
815 ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
816 kfree(cpufreq_cdev->idle_time);
817 kfree(cpufreq_cdev->freq_table);
818 kfree(cpufreq_cdev);
819 }
820 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
821