1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * A devfreq driver for NVIDIA Tegra SoCs
4 *
5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
6 * Copyright (C) 2014 Google, Inc
7 */
8
9 #include <linux/clk.h>
10 #include <linux/cpufreq.h>
11 #include <linux/devfreq.h>
12 #include <linux/interrupt.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/of_device.h>
17 #include <linux/platform_device.h>
18 #include <linux/pm_opp.h>
19 #include <linux/reset.h>
20 #include <linux/workqueue.h>
21
22 #include <soc/tegra/fuse.h>
23
24 #include "governor.h"
25
26 #define ACTMON_GLB_STATUS 0x0
27 #define ACTMON_GLB_PERIOD_CTRL 0x4
28
29 #define ACTMON_DEV_CTRL 0x0
30 #define ACTMON_DEV_CTRL_K_VAL_SHIFT 10
31 #define ACTMON_DEV_CTRL_ENB_PERIODIC BIT(18)
32 #define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN BIT(20)
33 #define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN BIT(21)
34 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT 23
35 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT 26
36 #define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN BIT(29)
37 #define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN BIT(30)
38 #define ACTMON_DEV_CTRL_ENB BIT(31)
39
40 #define ACTMON_DEV_CTRL_STOP 0x00000000
41
42 #define ACTMON_DEV_UPPER_WMARK 0x4
43 #define ACTMON_DEV_LOWER_WMARK 0x8
44 #define ACTMON_DEV_INIT_AVG 0xc
45 #define ACTMON_DEV_AVG_UPPER_WMARK 0x10
46 #define ACTMON_DEV_AVG_LOWER_WMARK 0x14
47 #define ACTMON_DEV_COUNT_WEIGHT 0x18
48 #define ACTMON_DEV_AVG_COUNT 0x20
49 #define ACTMON_DEV_INTR_STATUS 0x24
50
51 #define ACTMON_INTR_STATUS_CLEAR 0xffffffff
52
53 #define ACTMON_DEV_INTR_CONSECUTIVE_UPPER BIT(31)
54 #define ACTMON_DEV_INTR_CONSECUTIVE_LOWER BIT(30)
55
56 #define ACTMON_ABOVE_WMARK_WINDOW 1
57 #define ACTMON_BELOW_WMARK_WINDOW 3
58 #define ACTMON_BOOST_FREQ_STEP 16000
59
60 /*
61 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
62 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
63 */
64 #define ACTMON_AVERAGE_WINDOW_LOG2 6
65 #define ACTMON_SAMPLING_PERIOD 12 /* ms */
66 #define ACTMON_DEFAULT_AVG_BAND 6 /* 1/10 of % */
67
68 #define KHZ 1000
69
70 #define KHZ_MAX (ULONG_MAX / KHZ)
71
72 /* Assume that the bus is saturated if the utilization is 25% */
73 #define BUS_SATURATION_RATIO 25
74
75 /**
76 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
77 * device
78 *
79 * Coefficients and thresholds are percentages unless otherwise noted
80 */
81 struct tegra_devfreq_device_config {
82 u32 offset;
83 u32 irq_mask;
84
85 /* Factors applied to boost_freq every consecutive watermark breach */
86 unsigned int boost_up_coeff;
87 unsigned int boost_down_coeff;
88
89 /* Define the watermark bounds when applied to the current avg */
90 unsigned int boost_up_threshold;
91 unsigned int boost_down_threshold;
92
93 /*
94 * Threshold of activity (cycles translated to kHz) below which the
95 * CPU frequency isn't to be taken into account. This is to avoid
96 * increasing the EMC frequency when the CPU is very busy but not
97 * accessing the bus often.
98 */
99 u32 avg_dependency_threshold;
100 };
101
102 enum tegra_actmon_device {
103 MCALL = 0,
104 MCCPU,
105 };
106
107 static const struct tegra_devfreq_device_config tegra124_device_configs[] = {
108 {
109 /* MCALL: All memory accesses (including from the CPUs) */
110 .offset = 0x1c0,
111 .irq_mask = 1 << 26,
112 .boost_up_coeff = 200,
113 .boost_down_coeff = 50,
114 .boost_up_threshold = 60,
115 .boost_down_threshold = 40,
116 },
117 {
118 /* MCCPU: memory accesses from the CPUs */
119 .offset = 0x200,
120 .irq_mask = 1 << 25,
121 .boost_up_coeff = 800,
122 .boost_down_coeff = 40,
123 .boost_up_threshold = 27,
124 .boost_down_threshold = 10,
125 .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
126 },
127 };
128
129 static const struct tegra_devfreq_device_config tegra30_device_configs[] = {
130 {
131 /* MCALL: All memory accesses (including from the CPUs) */
132 .offset = 0x1c0,
133 .irq_mask = 1 << 26,
134 .boost_up_coeff = 200,
135 .boost_down_coeff = 50,
136 .boost_up_threshold = 20,
137 .boost_down_threshold = 10,
138 },
139 {
140 /* MCCPU: memory accesses from the CPUs */
141 .offset = 0x200,
142 .irq_mask = 1 << 25,
143 .boost_up_coeff = 800,
144 .boost_down_coeff = 40,
145 .boost_up_threshold = 27,
146 .boost_down_threshold = 10,
147 .avg_dependency_threshold = 16000, /* 16MHz in kHz units */
148 },
149 };
150
151 /**
152 * struct tegra_devfreq_device - state specific to an ACTMON device
153 *
154 * Frequencies are in kHz.
155 */
156 struct tegra_devfreq_device {
157 const struct tegra_devfreq_device_config *config;
158 void __iomem *regs;
159
160 /* Average event count sampled in the last interrupt */
161 u32 avg_count;
162
163 /*
164 * Extra frequency to increase the target by due to consecutive
165 * watermark breaches.
166 */
167 unsigned long boost_freq;
168
169 /* Optimal frequency calculated from the stats for this device */
170 unsigned long target_freq;
171 };
172
173 struct tegra_devfreq_soc_data {
174 const struct tegra_devfreq_device_config *configs;
175 /* Weight value for count measurements */
176 unsigned int count_weight;
177 };
178
179 struct tegra_devfreq {
180 struct devfreq *devfreq;
181 struct opp_table *opp_table;
182
183 struct reset_control *reset;
184 struct clk *clock;
185 void __iomem *regs;
186
187 struct clk *emc_clock;
188 unsigned long max_freq;
189 unsigned long cur_freq;
190 struct notifier_block clk_rate_change_nb;
191
192 struct delayed_work cpufreq_update_work;
193 struct notifier_block cpu_rate_change_nb;
194
195 struct tegra_devfreq_device devices[2];
196
197 unsigned int irq;
198
199 bool started;
200
201 const struct tegra_devfreq_soc_data *soc;
202 };
203
204 struct tegra_actmon_emc_ratio {
205 unsigned long cpu_freq;
206 unsigned long emc_freq;
207 };
208
209 static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
210 { 1400000, KHZ_MAX },
211 { 1200000, 750000 },
212 { 1100000, 600000 },
213 { 1000000, 500000 },
214 { 800000, 375000 },
215 { 500000, 200000 },
216 { 250000, 100000 },
217 };
218
actmon_readl(struct tegra_devfreq * tegra,u32 offset)219 static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
220 {
221 return readl_relaxed(tegra->regs + offset);
222 }
223
actmon_writel(struct tegra_devfreq * tegra,u32 val,u32 offset)224 static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
225 {
226 writel_relaxed(val, tegra->regs + offset);
227 }
228
device_readl(struct tegra_devfreq_device * dev,u32 offset)229 static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
230 {
231 return readl_relaxed(dev->regs + offset);
232 }
233
device_writel(struct tegra_devfreq_device * dev,u32 val,u32 offset)234 static void device_writel(struct tegra_devfreq_device *dev, u32 val,
235 u32 offset)
236 {
237 writel_relaxed(val, dev->regs + offset);
238 }
239
do_percent(unsigned long long val,unsigned int pct)240 static unsigned long do_percent(unsigned long long val, unsigned int pct)
241 {
242 val = val * pct;
243 do_div(val, 100);
244
245 /*
246 * High freq + high boosting percent + large polling interval are
247 * resulting in integer overflow when watermarks are calculated.
248 */
249 return min_t(u64, val, U32_MAX);
250 }
251
tegra_devfreq_update_avg_wmark(struct tegra_devfreq * tegra,struct tegra_devfreq_device * dev)252 static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
253 struct tegra_devfreq_device *dev)
254 {
255 u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
256 u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
257 u32 avg;
258
259 avg = min(dev->avg_count, U32_MAX - band);
260 device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
261
262 avg = max(dev->avg_count, band);
263 device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
264 }
265
tegra_devfreq_update_wmark(struct tegra_devfreq * tegra,struct tegra_devfreq_device * dev)266 static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
267 struct tegra_devfreq_device *dev)
268 {
269 u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
270
271 device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
272 ACTMON_DEV_UPPER_WMARK);
273
274 device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
275 ACTMON_DEV_LOWER_WMARK);
276 }
277
actmon_isr_device(struct tegra_devfreq * tegra,struct tegra_devfreq_device * dev)278 static void actmon_isr_device(struct tegra_devfreq *tegra,
279 struct tegra_devfreq_device *dev)
280 {
281 u32 intr_status, dev_ctrl;
282
283 dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
284 tegra_devfreq_update_avg_wmark(tegra, dev);
285
286 intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
287 dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
288
289 if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
290 /*
291 * new_boost = min(old_boost * up_coef + step, max_freq)
292 */
293 dev->boost_freq = do_percent(dev->boost_freq,
294 dev->config->boost_up_coeff);
295 dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
296
297 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
298
299 if (dev->boost_freq >= tegra->max_freq) {
300 dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
301 dev->boost_freq = tegra->max_freq;
302 }
303 } else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
304 /*
305 * new_boost = old_boost * down_coef
306 * or 0 if (old_boost * down_coef < step / 2)
307 */
308 dev->boost_freq = do_percent(dev->boost_freq,
309 dev->config->boost_down_coeff);
310
311 dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
312
313 if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
314 dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
315 dev->boost_freq = 0;
316 }
317 }
318
319 device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
320
321 device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
322 }
323
actmon_cpu_to_emc_rate(struct tegra_devfreq * tegra,unsigned long cpu_freq)324 static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
325 unsigned long cpu_freq)
326 {
327 unsigned int i;
328 const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
329
330 for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
331 if (cpu_freq >= ratio->cpu_freq) {
332 if (ratio->emc_freq >= tegra->max_freq)
333 return tegra->max_freq;
334 else
335 return ratio->emc_freq;
336 }
337 }
338
339 return 0;
340 }
341
actmon_device_target_freq(struct tegra_devfreq * tegra,struct tegra_devfreq_device * dev)342 static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
343 struct tegra_devfreq_device *dev)
344 {
345 unsigned int avg_sustain_coef;
346 unsigned long target_freq;
347
348 target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
349 avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
350 target_freq = do_percent(target_freq, avg_sustain_coef);
351
352 return target_freq;
353 }
354
actmon_update_target(struct tegra_devfreq * tegra,struct tegra_devfreq_device * dev)355 static void actmon_update_target(struct tegra_devfreq *tegra,
356 struct tegra_devfreq_device *dev)
357 {
358 unsigned long cpu_freq = 0;
359 unsigned long static_cpu_emc_freq = 0;
360
361 dev->target_freq = actmon_device_target_freq(tegra, dev);
362
363 if (dev->config->avg_dependency_threshold &&
364 dev->config->avg_dependency_threshold <= dev->target_freq) {
365 cpu_freq = cpufreq_quick_get(0);
366 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
367
368 dev->target_freq += dev->boost_freq;
369 dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
370 } else {
371 dev->target_freq += dev->boost_freq;
372 }
373 }
374
actmon_thread_isr(int irq,void * data)375 static irqreturn_t actmon_thread_isr(int irq, void *data)
376 {
377 struct tegra_devfreq *tegra = data;
378 bool handled = false;
379 unsigned int i;
380 u32 val;
381
382 mutex_lock(&tegra->devfreq->lock);
383
384 val = actmon_readl(tegra, ACTMON_GLB_STATUS);
385 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
386 if (val & tegra->devices[i].config->irq_mask) {
387 actmon_isr_device(tegra, tegra->devices + i);
388 handled = true;
389 }
390 }
391
392 if (handled)
393 update_devfreq(tegra->devfreq);
394
395 mutex_unlock(&tegra->devfreq->lock);
396
397 return handled ? IRQ_HANDLED : IRQ_NONE;
398 }
399
tegra_actmon_clk_notify_cb(struct notifier_block * nb,unsigned long action,void * ptr)400 static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
401 unsigned long action, void *ptr)
402 {
403 struct clk_notifier_data *data = ptr;
404 struct tegra_devfreq *tegra;
405 struct tegra_devfreq_device *dev;
406 unsigned int i;
407
408 if (action != POST_RATE_CHANGE)
409 return NOTIFY_OK;
410
411 tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
412
413 tegra->cur_freq = data->new_rate / KHZ;
414
415 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
416 dev = &tegra->devices[i];
417
418 tegra_devfreq_update_wmark(tegra, dev);
419 }
420
421 return NOTIFY_OK;
422 }
423
tegra_actmon_delayed_update(struct work_struct * work)424 static void tegra_actmon_delayed_update(struct work_struct *work)
425 {
426 struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
427 cpufreq_update_work.work);
428
429 mutex_lock(&tegra->devfreq->lock);
430 update_devfreq(tegra->devfreq);
431 mutex_unlock(&tegra->devfreq->lock);
432 }
433
434 static unsigned long
tegra_actmon_cpufreq_contribution(struct tegra_devfreq * tegra,unsigned int cpu_freq)435 tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
436 unsigned int cpu_freq)
437 {
438 struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
439 unsigned long static_cpu_emc_freq, dev_freq;
440
441 dev_freq = actmon_device_target_freq(tegra, actmon_dev);
442
443 /* check whether CPU's freq is taken into account at all */
444 if (dev_freq < actmon_dev->config->avg_dependency_threshold)
445 return 0;
446
447 static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
448
449 if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
450 return 0;
451
452 return static_cpu_emc_freq;
453 }
454
tegra_actmon_cpu_notify_cb(struct notifier_block * nb,unsigned long action,void * ptr)455 static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
456 unsigned long action, void *ptr)
457 {
458 struct cpufreq_freqs *freqs = ptr;
459 struct tegra_devfreq *tegra;
460 unsigned long old, new, delay;
461
462 if (action != CPUFREQ_POSTCHANGE)
463 return NOTIFY_OK;
464
465 tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
466
467 /*
468 * Quickly check whether CPU frequency should be taken into account
469 * at all, without blocking CPUFreq's core.
470 */
471 if (mutex_trylock(&tegra->devfreq->lock)) {
472 old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
473 new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
474 mutex_unlock(&tegra->devfreq->lock);
475
476 /*
477 * If CPU's frequency shouldn't be taken into account at
478 * the moment, then there is no need to update the devfreq's
479 * state because ISR will re-check CPU's frequency on the
480 * next interrupt.
481 */
482 if (old == new)
483 return NOTIFY_OK;
484 }
485
486 /*
487 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
488 * to allow asynchronous notifications. This means we can't block
489 * here for too long, otherwise CPUFreq's core will complain with a
490 * warning splat.
491 */
492 delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
493 schedule_delayed_work(&tegra->cpufreq_update_work, delay);
494
495 return NOTIFY_OK;
496 }
497
tegra_actmon_configure_device(struct tegra_devfreq * tegra,struct tegra_devfreq_device * dev)498 static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
499 struct tegra_devfreq_device *dev)
500 {
501 u32 val = 0;
502
503 /* reset boosting on governor's restart */
504 dev->boost_freq = 0;
505
506 dev->target_freq = tegra->cur_freq;
507
508 dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
509 device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
510
511 tegra_devfreq_update_avg_wmark(tegra, dev);
512 tegra_devfreq_update_wmark(tegra, dev);
513
514 device_writel(dev, tegra->soc->count_weight, ACTMON_DEV_COUNT_WEIGHT);
515 device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
516
517 val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
518 val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
519 << ACTMON_DEV_CTRL_K_VAL_SHIFT;
520 val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
521 << ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
522 val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
523 << ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
524 val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
525 val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
526 val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
527 val |= ACTMON_DEV_CTRL_ENB;
528
529 device_writel(dev, val, ACTMON_DEV_CTRL);
530 }
531
tegra_actmon_stop_devices(struct tegra_devfreq * tegra)532 static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
533 {
534 struct tegra_devfreq_device *dev = tegra->devices;
535 unsigned int i;
536
537 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
538 device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
539 device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
540 ACTMON_DEV_INTR_STATUS);
541 }
542 }
543
tegra_actmon_resume(struct tegra_devfreq * tegra)544 static int tegra_actmon_resume(struct tegra_devfreq *tegra)
545 {
546 unsigned int i;
547 int err;
548
549 if (!tegra->devfreq->profile->polling_ms || !tegra->started)
550 return 0;
551
552 actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
553 ACTMON_GLB_PERIOD_CTRL);
554
555 /*
556 * CLK notifications are needed in order to reconfigure the upper
557 * consecutive watermark in accordance to the actual clock rate
558 * to avoid unnecessary upper interrupts.
559 */
560 err = clk_notifier_register(tegra->emc_clock,
561 &tegra->clk_rate_change_nb);
562 if (err) {
563 dev_err(tegra->devfreq->dev.parent,
564 "Failed to register rate change notifier\n");
565 return err;
566 }
567
568 tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
569
570 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
571 tegra_actmon_configure_device(tegra, &tegra->devices[i]);
572
573 /*
574 * We are estimating CPU's memory bandwidth requirement based on
575 * amount of memory accesses and system's load, judging by CPU's
576 * frequency. We also don't want to receive events about CPU's
577 * frequency transaction when governor is stopped, hence notifier
578 * is registered dynamically.
579 */
580 err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
581 CPUFREQ_TRANSITION_NOTIFIER);
582 if (err) {
583 dev_err(tegra->devfreq->dev.parent,
584 "Failed to register rate change notifier: %d\n", err);
585 goto err_stop;
586 }
587
588 enable_irq(tegra->irq);
589
590 return 0;
591
592 err_stop:
593 tegra_actmon_stop_devices(tegra);
594
595 clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
596
597 return err;
598 }
599
tegra_actmon_start(struct tegra_devfreq * tegra)600 static int tegra_actmon_start(struct tegra_devfreq *tegra)
601 {
602 int ret = 0;
603
604 if (!tegra->started) {
605 tegra->started = true;
606
607 ret = tegra_actmon_resume(tegra);
608 if (ret)
609 tegra->started = false;
610 }
611
612 return ret;
613 }
614
tegra_actmon_pause(struct tegra_devfreq * tegra)615 static void tegra_actmon_pause(struct tegra_devfreq *tegra)
616 {
617 if (!tegra->devfreq->profile->polling_ms || !tegra->started)
618 return;
619
620 disable_irq(tegra->irq);
621
622 cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
623 CPUFREQ_TRANSITION_NOTIFIER);
624
625 cancel_delayed_work_sync(&tegra->cpufreq_update_work);
626
627 tegra_actmon_stop_devices(tegra);
628
629 clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
630 }
631
tegra_actmon_stop(struct tegra_devfreq * tegra)632 static void tegra_actmon_stop(struct tegra_devfreq *tegra)
633 {
634 tegra_actmon_pause(tegra);
635 tegra->started = false;
636 }
637
tegra_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)638 static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
639 u32 flags)
640 {
641 struct dev_pm_opp *opp;
642 int ret;
643
644 opp = devfreq_recommended_opp(dev, freq, flags);
645 if (IS_ERR(opp)) {
646 dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
647 return PTR_ERR(opp);
648 }
649
650 ret = dev_pm_opp_set_opp(dev, opp);
651 dev_pm_opp_put(opp);
652
653 return ret;
654 }
655
tegra_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * stat)656 static int tegra_devfreq_get_dev_status(struct device *dev,
657 struct devfreq_dev_status *stat)
658 {
659 struct tegra_devfreq *tegra = dev_get_drvdata(dev);
660 struct tegra_devfreq_device *actmon_dev;
661 unsigned long cur_freq;
662
663 cur_freq = READ_ONCE(tegra->cur_freq);
664
665 /* To be used by the tegra governor */
666 stat->private_data = tegra;
667
668 /* The below are to be used by the other governors */
669 stat->current_frequency = cur_freq * KHZ;
670
671 actmon_dev = &tegra->devices[MCALL];
672
673 /* Number of cycles spent on memory access */
674 stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
675
676 /* The bus can be considered to be saturated way before 100% */
677 stat->busy_time *= 100 / BUS_SATURATION_RATIO;
678
679 /* Number of cycles in a sampling period */
680 stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
681
682 stat->busy_time = min(stat->busy_time, stat->total_time);
683
684 return 0;
685 }
686
687 static struct devfreq_dev_profile tegra_devfreq_profile = {
688 .polling_ms = ACTMON_SAMPLING_PERIOD,
689 .target = tegra_devfreq_target,
690 .get_dev_status = tegra_devfreq_get_dev_status,
691 .is_cooling_device = true,
692 };
693
tegra_governor_get_target(struct devfreq * devfreq,unsigned long * freq)694 static int tegra_governor_get_target(struct devfreq *devfreq,
695 unsigned long *freq)
696 {
697 struct devfreq_dev_status *stat;
698 struct tegra_devfreq *tegra;
699 struct tegra_devfreq_device *dev;
700 unsigned long target_freq = 0;
701 unsigned int i;
702 int err;
703
704 err = devfreq_update_stats(devfreq);
705 if (err)
706 return err;
707
708 stat = &devfreq->last_status;
709
710 tegra = stat->private_data;
711
712 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
713 dev = &tegra->devices[i];
714
715 actmon_update_target(tegra, dev);
716
717 target_freq = max(target_freq, dev->target_freq);
718 }
719
720 /*
721 * tegra-devfreq driver operates with KHz units, while OPP table
722 * entries use Hz units. Hence we need to convert the units for the
723 * devfreq core.
724 */
725 *freq = target_freq * KHZ;
726
727 return 0;
728 }
729
tegra_governor_event_handler(struct devfreq * devfreq,unsigned int event,void * data)730 static int tegra_governor_event_handler(struct devfreq *devfreq,
731 unsigned int event, void *data)
732 {
733 struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
734 unsigned int *new_delay = data;
735 int ret = 0;
736
737 /*
738 * Couple devfreq-device with the governor early because it is
739 * needed at the moment of governor's start (used by ISR).
740 */
741 tegra->devfreq = devfreq;
742
743 switch (event) {
744 case DEVFREQ_GOV_START:
745 devfreq_monitor_start(devfreq);
746 ret = tegra_actmon_start(tegra);
747 break;
748
749 case DEVFREQ_GOV_STOP:
750 tegra_actmon_stop(tegra);
751 devfreq_monitor_stop(devfreq);
752 break;
753
754 case DEVFREQ_GOV_UPDATE_INTERVAL:
755 /*
756 * ACTMON hardware supports up to 256 milliseconds for the
757 * sampling period.
758 */
759 if (*new_delay > 256) {
760 ret = -EINVAL;
761 break;
762 }
763
764 tegra_actmon_pause(tegra);
765 devfreq_update_interval(devfreq, new_delay);
766 ret = tegra_actmon_resume(tegra);
767 break;
768
769 case DEVFREQ_GOV_SUSPEND:
770 tegra_actmon_stop(tegra);
771 devfreq_monitor_suspend(devfreq);
772 break;
773
774 case DEVFREQ_GOV_RESUME:
775 devfreq_monitor_resume(devfreq);
776 ret = tegra_actmon_start(tegra);
777 break;
778 }
779
780 return ret;
781 }
782
783 static struct devfreq_governor tegra_devfreq_governor = {
784 .name = "tegra_actmon",
785 .attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL,
786 .flags = DEVFREQ_GOV_FLAG_IMMUTABLE
787 | DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
788 .get_target_freq = tegra_governor_get_target,
789 .event_handler = tegra_governor_event_handler,
790 };
791
tegra_devfreq_probe(struct platform_device * pdev)792 static int tegra_devfreq_probe(struct platform_device *pdev)
793 {
794 u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
795 struct tegra_devfreq_device *dev;
796 struct tegra_devfreq *tegra;
797 struct devfreq *devfreq;
798 unsigned int i;
799 long rate;
800 int err;
801
802 tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
803 if (!tegra)
804 return -ENOMEM;
805
806 tegra->soc = of_device_get_match_data(&pdev->dev);
807
808 tegra->regs = devm_platform_ioremap_resource(pdev, 0);
809 if (IS_ERR(tegra->regs))
810 return PTR_ERR(tegra->regs);
811
812 tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
813 if (IS_ERR(tegra->reset)) {
814 dev_err(&pdev->dev, "Failed to get reset\n");
815 return PTR_ERR(tegra->reset);
816 }
817
818 tegra->clock = devm_clk_get(&pdev->dev, "actmon");
819 if (IS_ERR(tegra->clock)) {
820 dev_err(&pdev->dev, "Failed to get actmon clock\n");
821 return PTR_ERR(tegra->clock);
822 }
823
824 tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
825 if (IS_ERR(tegra->emc_clock))
826 return dev_err_probe(&pdev->dev, PTR_ERR(tegra->emc_clock),
827 "Failed to get emc clock\n");
828
829 err = platform_get_irq(pdev, 0);
830 if (err < 0)
831 return err;
832
833 tegra->irq = err;
834
835 irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
836
837 err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
838 actmon_thread_isr, IRQF_ONESHOT,
839 "tegra-devfreq", tegra);
840 if (err) {
841 dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
842 return err;
843 }
844
845 tegra->opp_table = dev_pm_opp_set_supported_hw(&pdev->dev,
846 &hw_version, 1);
847 err = PTR_ERR_OR_ZERO(tegra->opp_table);
848 if (err) {
849 dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
850 return err;
851 }
852
853 err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0);
854 if (err) {
855 dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
856 goto put_hw;
857 }
858
859 err = clk_prepare_enable(tegra->clock);
860 if (err) {
861 dev_err(&pdev->dev,
862 "Failed to prepare and enable ACTMON clock\n");
863 goto remove_table;
864 }
865
866 err = reset_control_reset(tegra->reset);
867 if (err) {
868 dev_err(&pdev->dev, "Failed to reset hardware: %d\n", err);
869 goto disable_clk;
870 }
871
872 rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
873 if (rate < 0) {
874 dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
875 err = rate;
876 goto disable_clk;
877 }
878
879 tegra->max_freq = rate / KHZ;
880
881 for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
882 dev = tegra->devices + i;
883 dev->config = tegra->soc->configs + i;
884 dev->regs = tegra->regs + dev->config->offset;
885 }
886
887 platform_set_drvdata(pdev, tegra);
888
889 tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
890 tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
891
892 INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
893 tegra_actmon_delayed_update);
894
895 err = devfreq_add_governor(&tegra_devfreq_governor);
896 if (err) {
897 dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
898 goto remove_opps;
899 }
900
901 tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
902
903 devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
904 "tegra_actmon", NULL);
905 if (IS_ERR(devfreq)) {
906 err = PTR_ERR(devfreq);
907 goto remove_governor;
908 }
909
910 return 0;
911
912 remove_governor:
913 devfreq_remove_governor(&tegra_devfreq_governor);
914
915 remove_opps:
916 dev_pm_opp_remove_all_dynamic(&pdev->dev);
917
918 reset_control_reset(tegra->reset);
919 disable_clk:
920 clk_disable_unprepare(tegra->clock);
921 remove_table:
922 dev_pm_opp_of_remove_table(&pdev->dev);
923 put_hw:
924 dev_pm_opp_put_supported_hw(tegra->opp_table);
925
926 return err;
927 }
928
tegra_devfreq_remove(struct platform_device * pdev)929 static int tegra_devfreq_remove(struct platform_device *pdev)
930 {
931 struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
932
933 devfreq_remove_device(tegra->devfreq);
934 devfreq_remove_governor(&tegra_devfreq_governor);
935
936 reset_control_reset(tegra->reset);
937 clk_disable_unprepare(tegra->clock);
938
939 dev_pm_opp_of_remove_table(&pdev->dev);
940 dev_pm_opp_put_supported_hw(tegra->opp_table);
941
942 return 0;
943 }
944
945 static const struct tegra_devfreq_soc_data tegra124_soc = {
946 .configs = tegra124_device_configs,
947
948 /*
949 * Activity counter is incremented every 256 memory transactions,
950 * and each transaction takes 4 EMC clocks.
951 */
952 .count_weight = 4 * 256,
953 };
954
955 static const struct tegra_devfreq_soc_data tegra30_soc = {
956 .configs = tegra30_device_configs,
957 .count_weight = 2 * 256,
958 };
959
960 static const struct of_device_id tegra_devfreq_of_match[] = {
961 { .compatible = "nvidia,tegra30-actmon", .data = &tegra30_soc, },
962 { .compatible = "nvidia,tegra124-actmon", .data = &tegra124_soc, },
963 { },
964 };
965
966 MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
967
968 static struct platform_driver tegra_devfreq_driver = {
969 .probe = tegra_devfreq_probe,
970 .remove = tegra_devfreq_remove,
971 .driver = {
972 .name = "tegra-devfreq",
973 .of_match_table = tegra_devfreq_of_match,
974 },
975 };
976 module_platform_driver(tegra_devfreq_driver);
977
978 MODULE_LICENSE("GPL v2");
979 MODULE_DESCRIPTION("Tegra devfreq driver");
980 MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
981