1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Collabora ltd. */
3 #include <linux/devfreq.h>
4 #include <linux/platform_device.h>
5 #include <linux/pm_opp.h>
6 #include <linux/clk.h>
7 #include <linux/regulator/consumer.h>
8
9 #include "panfrost_device.h"
10 #include "panfrost_devfreq.h"
11 #include "panfrost_features.h"
12 #include "panfrost_issues.h"
13 #include "panfrost_gpu.h"
14 #include "panfrost_regs.h"
15
16 static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
17
panfrost_devfreq_target(struct device * dev,unsigned long * freq,u32 flags)18 static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
19 u32 flags)
20 {
21 struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
22 struct dev_pm_opp *opp;
23 unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
24 unsigned long target_volt, target_rate;
25 int err;
26
27 opp = devfreq_recommended_opp(dev, freq, flags);
28 if (IS_ERR(opp))
29 return PTR_ERR(opp);
30
31 target_rate = dev_pm_opp_get_freq(opp);
32 target_volt = dev_pm_opp_get_voltage(opp);
33 dev_pm_opp_put(opp);
34
35 if (old_clk_rate == target_rate)
36 return 0;
37
38 /*
39 * If frequency scaling from low to high, adjust voltage first.
40 * If frequency scaling from high to low, adjust frequency first.
41 */
42 if (old_clk_rate < target_rate) {
43 err = regulator_set_voltage(pfdev->regulator, target_volt,
44 target_volt);
45 if (err) {
46 dev_err(dev, "Cannot set voltage %lu uV\n",
47 target_volt);
48 return err;
49 }
50 }
51
52 err = clk_set_rate(pfdev->clock, target_rate);
53 if (err) {
54 dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
55 err);
56 regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
57 pfdev->devfreq.cur_volt);
58 return err;
59 }
60
61 if (old_clk_rate > target_rate) {
62 err = regulator_set_voltage(pfdev->regulator, target_volt,
63 target_volt);
64 if (err)
65 dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
66 }
67
68 pfdev->devfreq.cur_freq = target_rate;
69 pfdev->devfreq.cur_volt = target_volt;
70
71 return 0;
72 }
73
panfrost_devfreq_reset(struct panfrost_device * pfdev)74 static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
75 {
76 ktime_t now = ktime_get();
77 int i;
78
79 for (i = 0; i < NUM_JOB_SLOTS; i++) {
80 pfdev->devfreq.slot[i].busy_time = 0;
81 pfdev->devfreq.slot[i].idle_time = 0;
82 pfdev->devfreq.slot[i].time_last_update = now;
83 }
84 }
85
panfrost_devfreq_get_dev_status(struct device * dev,struct devfreq_dev_status * status)86 static int panfrost_devfreq_get_dev_status(struct device *dev,
87 struct devfreq_dev_status *status)
88 {
89 struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
90 int i;
91
92 for (i = 0; i < NUM_JOB_SLOTS; i++) {
93 panfrost_devfreq_update_utilization(pfdev, i);
94 }
95
96 status->current_frequency = clk_get_rate(pfdev->clock);
97 status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
98 pfdev->devfreq.slot[0].idle_time));
99
100 status->busy_time = 0;
101 for (i = 0; i < NUM_JOB_SLOTS; i++) {
102 status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
103 }
104
105 /* We're scheduling only to one core atm, so don't divide for now */
106 /* status->busy_time /= NUM_JOB_SLOTS; */
107
108 panfrost_devfreq_reset(pfdev);
109
110 dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time,
111 status->total_time,
112 status->busy_time / (status->total_time / 100),
113 status->current_frequency / 1000 / 1000);
114
115 return 0;
116 }
117
panfrost_devfreq_get_cur_freq(struct device * dev,unsigned long * freq)118 static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
119 {
120 struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
121
122 *freq = pfdev->devfreq.cur_freq;
123
124 return 0;
125 }
126
127 static struct devfreq_dev_profile panfrost_devfreq_profile = {
128 .polling_ms = 50, /* ~3 frames */
129 .target = panfrost_devfreq_target,
130 .get_dev_status = panfrost_devfreq_get_dev_status,
131 .get_cur_freq = panfrost_devfreq_get_cur_freq,
132 };
133
panfrost_devfreq_init(struct panfrost_device * pfdev)134 int panfrost_devfreq_init(struct panfrost_device *pfdev)
135 {
136 int ret;
137 struct dev_pm_opp *opp;
138
139 ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
140 if (ret == -ENODEV) /* Optional, continue without devfreq */
141 return 0;
142 else if (ret)
143 return ret;
144
145 panfrost_devfreq_reset(pfdev);
146
147 pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
148
149 opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
150 if (IS_ERR(opp))
151 return PTR_ERR(opp);
152
153 panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
154 dev_pm_opp_put(opp);
155
156 pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
157 &panfrost_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
158 NULL);
159 if (IS_ERR(pfdev->devfreq.devfreq)) {
160 DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
161 ret = PTR_ERR(pfdev->devfreq.devfreq);
162 pfdev->devfreq.devfreq = NULL;
163 dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
164 return ret;
165 }
166
167 return 0;
168 }
169
panfrost_devfreq_fini(struct panfrost_device * pfdev)170 void panfrost_devfreq_fini(struct panfrost_device *pfdev)
171 {
172 dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
173 }
174
panfrost_devfreq_resume(struct panfrost_device * pfdev)175 void panfrost_devfreq_resume(struct panfrost_device *pfdev)
176 {
177 int i;
178
179 if (!pfdev->devfreq.devfreq)
180 return;
181
182 panfrost_devfreq_reset(pfdev);
183 for (i = 0; i < NUM_JOB_SLOTS; i++)
184 pfdev->devfreq.slot[i].busy = false;
185
186 devfreq_resume_device(pfdev->devfreq.devfreq);
187 }
188
panfrost_devfreq_suspend(struct panfrost_device * pfdev)189 void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
190 {
191 if (!pfdev->devfreq.devfreq)
192 return;
193
194 devfreq_suspend_device(pfdev->devfreq.devfreq);
195 }
196
panfrost_devfreq_update_utilization(struct panfrost_device * pfdev,int slot)197 static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
198 {
199 struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
200 ktime_t now;
201 ktime_t last;
202
203 if (!pfdev->devfreq.devfreq)
204 return;
205
206 now = ktime_get();
207 last = pfdev->devfreq.slot[slot].time_last_update;
208
209 /* If we last recorded a transition to busy, we have been idle since */
210 if (devfreq_slot->busy)
211 pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
212 else
213 pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
214
215 pfdev->devfreq.slot[slot].time_last_update = now;
216 }
217
218 /* The job scheduler is expected to call this at every transition busy <-> idle */
panfrost_devfreq_record_transition(struct panfrost_device * pfdev,int slot)219 void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
220 {
221 struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
222
223 panfrost_devfreq_update_utilization(pfdev, slot);
224 devfreq_slot->busy = !devfreq_slot->busy;
225 }
226