1 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
14
15 #include <linux/debugfs.h>
16 #include <linux/errno.h>
17 #include <linux/mutex.h>
18 #include <linux/sort.h>
19 #include <linux/clk.h>
20 #include <linux/bitmap.h>
21
22 #include "dpu_kms.h"
23 #include "dpu_trace.h"
24 #include "dpu_crtc.h"
25 #include "dpu_core_perf.h"
26
27 #define DPU_PERF_MODE_STRING_SIZE 128
28
29 /**
30 * enum dpu_perf_mode - performance tuning mode
31 * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
32 * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
33 * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
34 */
35 enum dpu_perf_mode {
36 DPU_PERF_MODE_NORMAL,
37 DPU_PERF_MODE_MINIMUM,
38 DPU_PERF_MODE_FIXED,
39 DPU_PERF_MODE_MAX
40 };
41
_dpu_crtc_get_kms(struct drm_crtc * crtc)42 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
43 {
44 struct msm_drm_private *priv;
45
46 if (!crtc->dev || !crtc->dev->dev_private) {
47 DPU_ERROR("invalid device\n");
48 return NULL;
49 }
50
51 priv = crtc->dev->dev_private;
52 if (!priv || !priv->kms) {
53 DPU_ERROR("invalid kms\n");
54 return NULL;
55 }
56
57 return to_dpu_kms(priv->kms);
58 }
59
_dpu_core_perf_crtc_is_power_on(struct drm_crtc * crtc)60 static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
61 {
62 return dpu_crtc_is_enabled(crtc);
63 }
64
_dpu_core_video_mode_intf_connected(struct drm_crtc * crtc)65 static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
66 {
67 struct drm_crtc *tmp_crtc;
68 bool intf_connected = false;
69
70 if (!crtc)
71 goto end;
72
73 drm_for_each_crtc(tmp_crtc, crtc->dev) {
74 if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
75 _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
76 DPU_DEBUG("video interface connected crtc:%d\n",
77 tmp_crtc->base.id);
78 intf_connected = true;
79 goto end;
80 }
81 }
82
83 end:
84 return intf_connected;
85 }
86
_dpu_core_perf_calc_crtc(struct dpu_kms * kms,struct drm_crtc * crtc,struct drm_crtc_state * state,struct dpu_core_perf_params * perf)87 static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
88 struct drm_crtc *crtc,
89 struct drm_crtc_state *state,
90 struct dpu_core_perf_params *perf)
91 {
92 struct dpu_crtc_state *dpu_cstate;
93 int i;
94
95 if (!kms || !kms->catalog || !crtc || !state || !perf) {
96 DPU_ERROR("invalid parameters\n");
97 return;
98 }
99
100 dpu_cstate = to_dpu_crtc_state(state);
101 memset(perf, 0, sizeof(struct dpu_core_perf_params));
102
103 if (!dpu_cstate->bw_control) {
104 for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
105 perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
106 1000ULL;
107 perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
108 }
109 perf->core_clk_rate = kms->perf.max_core_clk_rate;
110 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
111 for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
112 perf->bw_ctl[i] = 0;
113 perf->max_per_pipe_ib[i] = 0;
114 }
115 perf->core_clk_rate = 0;
116 } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
117 for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
118 perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
119 perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
120 }
121 perf->core_clk_rate = kms->perf.fix_core_clk_rate;
122 }
123
124 DPU_DEBUG(
125 "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
126 crtc->base.id, perf->core_clk_rate,
127 perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
128 perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
129 perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
130 perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
131 perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
132 perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
133 }
134
dpu_core_perf_crtc_check(struct drm_crtc * crtc,struct drm_crtc_state * state)135 int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
136 struct drm_crtc_state *state)
137 {
138 u32 bw, threshold;
139 u64 bw_sum_of_intfs = 0;
140 enum dpu_crtc_client_type curr_client_type;
141 bool is_video_mode;
142 struct dpu_crtc_state *dpu_cstate;
143 struct drm_crtc *tmp_crtc;
144 struct dpu_kms *kms;
145 int i;
146
147 if (!crtc || !state) {
148 DPU_ERROR("invalid crtc\n");
149 return -EINVAL;
150 }
151
152 kms = _dpu_crtc_get_kms(crtc);
153 if (!kms || !kms->catalog) {
154 DPU_ERROR("invalid parameters\n");
155 return 0;
156 }
157
158 /* we only need bandwidth check on real-time clients (interfaces) */
159 if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
160 return 0;
161
162 dpu_cstate = to_dpu_crtc_state(state);
163
164 /* obtain new values */
165 _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
166
167 for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
168 i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
169 bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
170 curr_client_type = dpu_crtc_get_client_type(crtc);
171
172 drm_for_each_crtc(tmp_crtc, crtc->dev) {
173 if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
174 (dpu_crtc_get_client_type(tmp_crtc) ==
175 curr_client_type) &&
176 (tmp_crtc != crtc)) {
177 struct dpu_crtc_state *tmp_cstate =
178 to_dpu_crtc_state(tmp_crtc->state);
179
180 DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
181 tmp_crtc->base.id,
182 tmp_cstate->new_perf.bw_ctl[i],
183 tmp_cstate->bw_control);
184 /*
185 * For bw check only use the bw if the
186 * atomic property has been already set
187 */
188 if (tmp_cstate->bw_control)
189 bw_sum_of_intfs +=
190 tmp_cstate->new_perf.bw_ctl[i];
191 }
192 }
193
194 /* convert bandwidth to kb */
195 bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
196 DPU_DEBUG("calculated bandwidth=%uk\n", bw);
197
198 is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
199 threshold = (is_video_mode ||
200 _dpu_core_video_mode_intf_connected(crtc)) ?
201 kms->catalog->perf.max_bw_low :
202 kms->catalog->perf.max_bw_high;
203
204 DPU_DEBUG("final threshold bw limit = %d\n", threshold);
205
206 if (!dpu_cstate->bw_control) {
207 DPU_DEBUG("bypass bandwidth check\n");
208 } else if (!threshold) {
209 DPU_ERROR("no bandwidth limits specified\n");
210 return -E2BIG;
211 } else if (bw > threshold) {
212 DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
213 threshold);
214 return -E2BIG;
215 }
216 }
217
218 return 0;
219 }
220
_dpu_core_perf_crtc_update_bus(struct dpu_kms * kms,struct drm_crtc * crtc,u32 bus_id)221 static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
222 struct drm_crtc *crtc, u32 bus_id)
223 {
224 struct dpu_core_perf_params perf = { { 0 } };
225 enum dpu_crtc_client_type curr_client_type
226 = dpu_crtc_get_client_type(crtc);
227 struct drm_crtc *tmp_crtc;
228 struct dpu_crtc_state *dpu_cstate;
229 int ret = 0;
230
231 drm_for_each_crtc(tmp_crtc, crtc->dev) {
232 if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
233 curr_client_type ==
234 dpu_crtc_get_client_type(tmp_crtc)) {
235 dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
236
237 perf.max_per_pipe_ib[bus_id] =
238 max(perf.max_per_pipe_ib[bus_id],
239 dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
240
241 DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
242 tmp_crtc->base.id, bus_id,
243 dpu_cstate->new_perf.bw_ctl[bus_id]);
244 }
245 }
246 return ret;
247 }
248
249 /**
250 * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
251 * @crtc - pointer to a crtc
252 *
253 * Function checks a state variable for the crtc, if all pending commit
254 * requests are done, meaning no more bandwidth is needed, release
255 * bandwidth request.
256 */
dpu_core_perf_crtc_release_bw(struct drm_crtc * crtc)257 void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
258 {
259 struct drm_crtc *tmp_crtc;
260 struct dpu_crtc *dpu_crtc;
261 struct dpu_crtc_state *dpu_cstate;
262 struct dpu_kms *kms;
263 int i;
264
265 if (!crtc) {
266 DPU_ERROR("invalid crtc\n");
267 return;
268 }
269
270 kms = _dpu_crtc_get_kms(crtc);
271 if (!kms || !kms->catalog) {
272 DPU_ERROR("invalid kms\n");
273 return;
274 }
275
276 dpu_crtc = to_dpu_crtc(crtc);
277 dpu_cstate = to_dpu_crtc_state(crtc->state);
278
279 /* only do this for command mode rt client */
280 if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
281 return;
282
283 /*
284 * If video interface present, cmd panel bandwidth cannot be
285 * released.
286 */
287 if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
288 drm_for_each_crtc(tmp_crtc, crtc->dev) {
289 if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
290 dpu_crtc_get_intf_mode(tmp_crtc) ==
291 INTF_MODE_VIDEO)
292 return;
293 }
294
295 /* Release the bandwidth */
296 if (kms->perf.enable_bw_release) {
297 trace_dpu_cmd_release_bw(crtc->base.id);
298 DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
299 for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
300 dpu_crtc->cur_perf.bw_ctl[i] = 0;
301 _dpu_core_perf_crtc_update_bus(kms, crtc, i);
302 }
303 }
304 }
305
_dpu_core_perf_set_core_clk_rate(struct dpu_kms * kms,u64 rate)306 static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
307 {
308 struct dss_clk *core_clk = kms->perf.core_clk;
309
310 if (core_clk->max_rate && (rate > core_clk->max_rate))
311 rate = core_clk->max_rate;
312
313 core_clk->rate = rate;
314 return msm_dss_clk_set_rate(core_clk, 1);
315 }
316
_dpu_core_perf_get_core_clk_rate(struct dpu_kms * kms)317 static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
318 {
319 u64 clk_rate = kms->perf.perf_tune.min_core_clk;
320 struct drm_crtc *crtc;
321 struct dpu_crtc_state *dpu_cstate;
322
323 drm_for_each_crtc(crtc, kms->dev) {
324 if (_dpu_core_perf_crtc_is_power_on(crtc)) {
325 dpu_cstate = to_dpu_crtc_state(crtc->state);
326 clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
327 clk_rate);
328 clk_rate = clk_round_rate(kms->perf.core_clk->clk,
329 clk_rate);
330 }
331 }
332
333 if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
334 clk_rate = kms->perf.fix_core_clk_rate;
335
336 DPU_DEBUG("clk:%llu\n", clk_rate);
337
338 return clk_rate;
339 }
340
dpu_core_perf_crtc_update(struct drm_crtc * crtc,int params_changed,bool stop_req)341 int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
342 int params_changed, bool stop_req)
343 {
344 struct dpu_core_perf_params *new, *old;
345 int update_bus = 0, update_clk = 0;
346 u64 clk_rate = 0;
347 struct dpu_crtc *dpu_crtc;
348 struct dpu_crtc_state *dpu_cstate;
349 int i;
350 struct msm_drm_private *priv;
351 struct dpu_kms *kms;
352 int ret;
353
354 if (!crtc) {
355 DPU_ERROR("invalid crtc\n");
356 return -EINVAL;
357 }
358
359 kms = _dpu_crtc_get_kms(crtc);
360 if (!kms || !kms->catalog) {
361 DPU_ERROR("invalid kms\n");
362 return -EINVAL;
363 }
364 priv = kms->dev->dev_private;
365
366 dpu_crtc = to_dpu_crtc(crtc);
367 dpu_cstate = to_dpu_crtc_state(crtc->state);
368
369 DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
370 crtc->base.id, stop_req, kms->perf.core_clk_rate);
371
372 old = &dpu_crtc->cur_perf;
373 new = &dpu_cstate->new_perf;
374
375 if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
376 for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
377 /*
378 * cases for bus bandwidth update.
379 * 1. new bandwidth vote - "ab or ib vote" is higher
380 * than current vote for update request.
381 * 2. new bandwidth vote - "ab or ib vote" is lower
382 * than current vote at end of commit or stop.
383 */
384 if ((params_changed && ((new->bw_ctl[i] >
385 old->bw_ctl[i]) ||
386 (new->max_per_pipe_ib[i] >
387 old->max_per_pipe_ib[i]))) ||
388 (!params_changed && ((new->bw_ctl[i] <
389 old->bw_ctl[i]) ||
390 (new->max_per_pipe_ib[i] <
391 old->max_per_pipe_ib[i])))) {
392 DPU_DEBUG(
393 "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
394 crtc->base.id, params_changed,
395 new->bw_ctl[i], old->bw_ctl[i]);
396 old->bw_ctl[i] = new->bw_ctl[i];
397 old->max_per_pipe_ib[i] =
398 new->max_per_pipe_ib[i];
399 update_bus |= BIT(i);
400 }
401 }
402
403 if ((params_changed &&
404 (new->core_clk_rate > old->core_clk_rate)) ||
405 (!params_changed &&
406 (new->core_clk_rate < old->core_clk_rate))) {
407 old->core_clk_rate = new->core_clk_rate;
408 update_clk = 1;
409 }
410 } else {
411 DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
412 memset(old, 0, sizeof(*old));
413 memset(new, 0, sizeof(*new));
414 update_bus = ~0;
415 update_clk = 1;
416 }
417 trace_dpu_perf_crtc_update(crtc->base.id,
418 new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
419 new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
420 new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
421 new->core_clk_rate, stop_req,
422 update_bus, update_clk);
423
424 for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
425 if (update_bus & BIT(i)) {
426 ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
427 if (ret) {
428 DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
429 crtc->base.id, i);
430 return ret;
431 }
432 }
433 }
434
435 /*
436 * Update the clock after bandwidth vote to ensure
437 * bandwidth is available before clock rate is increased.
438 */
439 if (update_clk) {
440 clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
441
442 trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
443
444 ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
445 if (ret) {
446 DPU_ERROR("failed to set %s clock rate %llu\n",
447 kms->perf.core_clk->clk_name, clk_rate);
448 return ret;
449 }
450
451 kms->perf.core_clk_rate = clk_rate;
452 DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
453 }
454 return 0;
455 }
456
457 #ifdef CONFIG_DEBUG_FS
458
_dpu_core_perf_mode_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)459 static ssize_t _dpu_core_perf_mode_write(struct file *file,
460 const char __user *user_buf, size_t count, loff_t *ppos)
461 {
462 struct dpu_core_perf *perf = file->private_data;
463 struct dpu_perf_cfg *cfg = &perf->catalog->perf;
464 u32 perf_mode = 0;
465 char buf[10];
466
467 if (!perf)
468 return -ENODEV;
469
470 if (count >= sizeof(buf))
471 return -EFAULT;
472
473 if (copy_from_user(buf, user_buf, count))
474 return -EFAULT;
475
476 buf[count] = 0; /* end of string */
477
478 if (kstrtouint(buf, 0, &perf_mode))
479 return -EFAULT;
480
481 if (perf_mode >= DPU_PERF_MODE_MAX)
482 return -EFAULT;
483
484 if (perf_mode == DPU_PERF_MODE_FIXED) {
485 DRM_INFO("fix performance mode\n");
486 } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
487 /* run the driver with max clk and BW vote */
488 perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
489 perf->perf_tune.min_bus_vote =
490 (u64) cfg->max_bw_high * 1000;
491 DRM_INFO("minimum performance mode\n");
492 } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
493 /* reset the perf tune params to 0 */
494 perf->perf_tune.min_core_clk = 0;
495 perf->perf_tune.min_bus_vote = 0;
496 DRM_INFO("normal performance mode\n");
497 }
498 perf->perf_tune.mode = perf_mode;
499
500 return count;
501 }
502
_dpu_core_perf_mode_read(struct file * file,char __user * buff,size_t count,loff_t * ppos)503 static ssize_t _dpu_core_perf_mode_read(struct file *file,
504 char __user *buff, size_t count, loff_t *ppos)
505 {
506 struct dpu_core_perf *perf = file->private_data;
507 int len = 0;
508 char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
509
510 if (!perf)
511 return -ENODEV;
512
513 if (*ppos)
514 return 0; /* the end */
515
516 len = snprintf(buf, sizeof(buf),
517 "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
518 perf->perf_tune.mode,
519 perf->perf_tune.min_core_clk,
520 perf->perf_tune.min_bus_vote);
521 if (len < 0 || len >= sizeof(buf))
522 return 0;
523
524 if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
525 return -EFAULT;
526
527 *ppos += len; /* increase offset */
528
529 return len;
530 }
531
532 static const struct file_operations dpu_core_perf_mode_fops = {
533 .open = simple_open,
534 .read = _dpu_core_perf_mode_read,
535 .write = _dpu_core_perf_mode_write,
536 };
537
dpu_core_perf_debugfs_destroy(struct dpu_core_perf * perf)538 static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
539 {
540 debugfs_remove_recursive(perf->debugfs_root);
541 perf->debugfs_root = NULL;
542 }
543
dpu_core_perf_debugfs_init(struct dpu_core_perf * perf,struct dentry * parent)544 int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
545 struct dentry *parent)
546 {
547 struct dpu_mdss_cfg *catalog = perf->catalog;
548 struct msm_drm_private *priv;
549 struct dpu_kms *dpu_kms;
550
551 priv = perf->dev->dev_private;
552 if (!priv || !priv->kms) {
553 DPU_ERROR("invalid KMS reference\n");
554 return -EINVAL;
555 }
556
557 dpu_kms = to_dpu_kms(priv->kms);
558
559 perf->debugfs_root = debugfs_create_dir("core_perf", parent);
560 if (!perf->debugfs_root) {
561 DPU_ERROR("failed to create core perf debugfs\n");
562 return -EINVAL;
563 }
564
565 debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
566 &perf->max_core_clk_rate);
567 debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
568 &perf->core_clk_rate);
569 debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
570 (u32 *)&perf->enable_bw_release);
571 debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
572 (u32 *)&catalog->perf.max_bw_low);
573 debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
574 (u32 *)&catalog->perf.max_bw_high);
575 debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
576 (u32 *)&catalog->perf.min_core_ib);
577 debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
578 (u32 *)&catalog->perf.min_llcc_ib);
579 debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
580 (u32 *)&catalog->perf.min_dram_ib);
581 debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
582 (u32 *)perf, &dpu_core_perf_mode_fops);
583 debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
584 &perf->fix_core_clk_rate);
585 debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
586 &perf->fix_core_ib_vote);
587 debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
588 &perf->fix_core_ab_vote);
589
590 return 0;
591 }
592 #else
dpu_core_perf_debugfs_destroy(struct dpu_core_perf * perf)593 static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
594 {
595 }
596
dpu_core_perf_debugfs_init(struct dpu_core_perf * perf,struct dentry * parent)597 int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
598 struct dentry *parent)
599 {
600 return 0;
601 }
602 #endif
603
dpu_core_perf_destroy(struct dpu_core_perf * perf)604 void dpu_core_perf_destroy(struct dpu_core_perf *perf)
605 {
606 if (!perf) {
607 DPU_ERROR("invalid parameters\n");
608 return;
609 }
610
611 dpu_core_perf_debugfs_destroy(perf);
612 perf->max_core_clk_rate = 0;
613 perf->core_clk = NULL;
614 perf->phandle = NULL;
615 perf->catalog = NULL;
616 perf->dev = NULL;
617 }
618
dpu_core_perf_init(struct dpu_core_perf * perf,struct drm_device * dev,struct dpu_mdss_cfg * catalog,struct dpu_power_handle * phandle,struct dss_clk * core_clk)619 int dpu_core_perf_init(struct dpu_core_perf *perf,
620 struct drm_device *dev,
621 struct dpu_mdss_cfg *catalog,
622 struct dpu_power_handle *phandle,
623 struct dss_clk *core_clk)
624 {
625 perf->dev = dev;
626 perf->catalog = catalog;
627 perf->phandle = phandle;
628 perf->core_clk = core_clk;
629
630 perf->max_core_clk_rate = core_clk->max_rate;
631 if (!perf->max_core_clk_rate) {
632 DPU_DEBUG("optional max core clk rate, use default\n");
633 perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
634 }
635
636 return 0;
637 }
638