1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3 */
4
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10
11 #define CTL_LAYER(lm) \
12 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define CTL_LAYER_EXT(lm) \
14 (0x40 + (((lm) - LM_0) * 0x004))
15 #define CTL_LAYER_EXT2(lm) \
16 (0x70 + (((lm) - LM_0) * 0x004))
17 #define CTL_LAYER_EXT3(lm) \
18 (0xA0 + (((lm) - LM_0) * 0x004))
19 #define CTL_TOP 0x014
20 #define CTL_FLUSH 0x018
21 #define CTL_START 0x01C
22 #define CTL_PREPARE 0x0d0
23 #define CTL_SW_RESET 0x030
24 #define CTL_LAYER_EXTN_OFFSET 0x40
25 #define CTL_INTF_ACTIVE 0x0F4
26 #define CTL_INTF_FLUSH 0x110
27 #define CTL_INTF_MASTER 0x134
28
29 #define CTL_MIXER_BORDER_OUT BIT(24)
30 #define CTL_FLUSH_MASK_CTL BIT(17)
31
32 #define DPU_REG_RESET_TIMEOUT_US 2000
33 #define INTF_IDX 31
34
_ctl_offset(enum dpu_ctl ctl,const struct dpu_mdss_cfg * m,void __iomem * addr,struct dpu_hw_blk_reg_map * b)35 static const struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
36 const struct dpu_mdss_cfg *m,
37 void __iomem *addr,
38 struct dpu_hw_blk_reg_map *b)
39 {
40 int i;
41
42 for (i = 0; i < m->ctl_count; i++) {
43 if (ctl == m->ctl[i].id) {
44 b->base_off = addr;
45 b->blk_off = m->ctl[i].base;
46 b->length = m->ctl[i].len;
47 b->hwversion = m->hwversion;
48 b->log_mask = DPU_DBG_MASK_CTL;
49 return &m->ctl[i];
50 }
51 }
52 return ERR_PTR(-ENOMEM);
53 }
54
_mixer_stages(const struct dpu_lm_cfg * mixer,int count,enum dpu_lm lm)55 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
56 enum dpu_lm lm)
57 {
58 int i;
59 int stages = -EINVAL;
60
61 for (i = 0; i < count; i++) {
62 if (lm == mixer[i].id) {
63 stages = mixer[i].sblk->maxblendstages;
64 break;
65 }
66 }
67
68 return stages;
69 }
70
dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl * ctx)71 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
72 {
73 struct dpu_hw_blk_reg_map *c = &ctx->hw;
74
75 return DPU_REG_READ(c, CTL_FLUSH);
76 }
77
dpu_hw_ctl_trigger_start(struct dpu_hw_ctl * ctx)78 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
79 {
80 trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
81 dpu_hw_ctl_get_flush_register(ctx));
82 DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
83 }
84
dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl * ctx)85 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
86 {
87 trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
88 dpu_hw_ctl_get_flush_register(ctx));
89 DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
90 }
91
dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl * ctx)92 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
93 {
94 trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
95 dpu_hw_ctl_get_flush_register(ctx));
96 ctx->pending_flush_mask = 0x0;
97 }
98
dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl * ctx,u32 flushbits)99 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
100 u32 flushbits)
101 {
102 trace_dpu_hw_ctl_update_pending_flush(flushbits,
103 ctx->pending_flush_mask);
104 ctx->pending_flush_mask |= flushbits;
105 }
106
dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl * ctx,u32 flushbits)107 static inline void dpu_hw_ctl_update_pending_intf_flush(struct dpu_hw_ctl *ctx,
108 u32 flushbits)
109 {
110 ctx->pending_intf_flush_mask |= flushbits;
111 }
112
dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl * ctx)113 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
114 {
115 return ctx->pending_flush_mask;
116 }
117
dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl * ctx)118 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
119 {
120
121 if (ctx->pending_flush_mask & BIT(INTF_IDX))
122 DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
123 ctx->pending_intf_flush_mask);
124
125 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
126 }
127
dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl * ctx)128 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
129 {
130 trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
131 dpu_hw_ctl_get_flush_register(ctx));
132 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
133 }
134
dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl * ctx,enum dpu_sspp sspp)135 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
136 enum dpu_sspp sspp)
137 {
138 uint32_t flushbits = 0;
139
140 switch (sspp) {
141 case SSPP_VIG0:
142 flushbits = BIT(0);
143 break;
144 case SSPP_VIG1:
145 flushbits = BIT(1);
146 break;
147 case SSPP_VIG2:
148 flushbits = BIT(2);
149 break;
150 case SSPP_VIG3:
151 flushbits = BIT(18);
152 break;
153 case SSPP_RGB0:
154 flushbits = BIT(3);
155 break;
156 case SSPP_RGB1:
157 flushbits = BIT(4);
158 break;
159 case SSPP_RGB2:
160 flushbits = BIT(5);
161 break;
162 case SSPP_RGB3:
163 flushbits = BIT(19);
164 break;
165 case SSPP_DMA0:
166 flushbits = BIT(11);
167 break;
168 case SSPP_DMA1:
169 flushbits = BIT(12);
170 break;
171 case SSPP_DMA2:
172 flushbits = BIT(24);
173 break;
174 case SSPP_DMA3:
175 flushbits = BIT(25);
176 break;
177 case SSPP_CURSOR0:
178 flushbits = BIT(22);
179 break;
180 case SSPP_CURSOR1:
181 flushbits = BIT(23);
182 break;
183 default:
184 break;
185 }
186
187 return flushbits;
188 }
189
dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl * ctx,enum dpu_lm lm)190 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
191 enum dpu_lm lm)
192 {
193 uint32_t flushbits = 0;
194
195 switch (lm) {
196 case LM_0:
197 flushbits = BIT(6);
198 break;
199 case LM_1:
200 flushbits = BIT(7);
201 break;
202 case LM_2:
203 flushbits = BIT(8);
204 break;
205 case LM_3:
206 flushbits = BIT(9);
207 break;
208 case LM_4:
209 flushbits = BIT(10);
210 break;
211 case LM_5:
212 flushbits = BIT(20);
213 break;
214 default:
215 return -EINVAL;
216 }
217
218 flushbits |= CTL_FLUSH_MASK_CTL;
219
220 return flushbits;
221 }
222
dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl * ctx,u32 * flushbits,enum dpu_intf intf)223 static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
224 u32 *flushbits, enum dpu_intf intf)
225 {
226 switch (intf) {
227 case INTF_0:
228 *flushbits |= BIT(31);
229 break;
230 case INTF_1:
231 *flushbits |= BIT(30);
232 break;
233 case INTF_2:
234 *flushbits |= BIT(29);
235 break;
236 case INTF_3:
237 *flushbits |= BIT(28);
238 break;
239 default:
240 return -EINVAL;
241 }
242 return 0;
243 }
244
dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl * ctx,u32 * flushbits,enum dpu_intf intf)245 static int dpu_hw_ctl_get_bitmask_intf_v1(struct dpu_hw_ctl *ctx,
246 u32 *flushbits, enum dpu_intf intf)
247 {
248 *flushbits |= BIT(31);
249 return 0;
250 }
251
dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl * ctx,u32 * flushbits,enum dpu_intf intf)252 static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
253 u32 *flushbits, enum dpu_intf intf)
254 {
255 *flushbits |= BIT(intf - INTF_0);
256 return 0;
257 }
258
dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp)259 static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
260 enum dpu_dspp dspp)
261 {
262 uint32_t flushbits = 0;
263
264 switch (dspp) {
265 case DSPP_0:
266 flushbits = BIT(13);
267 break;
268 case DSPP_1:
269 flushbits = BIT(14);
270 break;
271 case DSPP_2:
272 flushbits = BIT(15);
273 break;
274 case DSPP_3:
275 flushbits = BIT(21);
276 break;
277 default:
278 return 0;
279 }
280
281 return flushbits;
282 }
283
dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl * ctx,u32 timeout_us)284 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
285 {
286 struct dpu_hw_blk_reg_map *c = &ctx->hw;
287 ktime_t timeout;
288 u32 status;
289
290 timeout = ktime_add_us(ktime_get(), timeout_us);
291
292 /*
293 * it takes around 30us to have mdp finish resetting its ctl path
294 * poll every 50us so that reset should be completed at 1st poll
295 */
296 do {
297 status = DPU_REG_READ(c, CTL_SW_RESET);
298 status &= 0x1;
299 if (status)
300 usleep_range(20, 50);
301 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
302
303 return status;
304 }
305
dpu_hw_ctl_reset_control(struct dpu_hw_ctl * ctx)306 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
307 {
308 struct dpu_hw_blk_reg_map *c = &ctx->hw;
309
310 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
311 DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
312 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
313 return -EINVAL;
314
315 return 0;
316 }
317
dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl * ctx)318 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
319 {
320 struct dpu_hw_blk_reg_map *c = &ctx->hw;
321 u32 status;
322
323 status = DPU_REG_READ(c, CTL_SW_RESET);
324 status &= 0x01;
325 if (!status)
326 return 0;
327
328 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
329 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
330 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
331 return -EINVAL;
332 }
333
334 return 0;
335 }
336
dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl * ctx)337 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
338 {
339 struct dpu_hw_blk_reg_map *c = &ctx->hw;
340 int i;
341
342 for (i = 0; i < ctx->mixer_count; i++) {
343 DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
344 DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
345 DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
346 DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
347 }
348 }
349
dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl * ctx,enum dpu_lm lm,struct dpu_hw_stage_cfg * stage_cfg)350 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
351 enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
352 {
353 struct dpu_hw_blk_reg_map *c = &ctx->hw;
354 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
355 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
356 int i, j;
357 int stages;
358 int pipes_per_stage;
359
360 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
361 if (stages < 0)
362 return;
363
364 if (test_bit(DPU_MIXER_SOURCESPLIT,
365 &ctx->mixer_hw_caps->features))
366 pipes_per_stage = PIPES_PER_STAGE;
367 else
368 pipes_per_stage = 1;
369
370 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
371
372 if (!stage_cfg)
373 goto exit;
374
375 for (i = 0; i <= stages; i++) {
376 /* overflow to ext register if 'i + 1 > 7' */
377 mix = (i + 1) & 0x7;
378 ext = i >= 7;
379
380 for (j = 0 ; j < pipes_per_stage; j++) {
381 enum dpu_sspp_multirect_index rect_index =
382 stage_cfg->multirect_index[i][j];
383
384 switch (stage_cfg->stage[i][j]) {
385 case SSPP_VIG0:
386 if (rect_index == DPU_SSPP_RECT_1) {
387 mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
388 } else {
389 mixercfg |= mix << 0;
390 mixercfg_ext |= ext << 0;
391 }
392 break;
393 case SSPP_VIG1:
394 if (rect_index == DPU_SSPP_RECT_1) {
395 mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
396 } else {
397 mixercfg |= mix << 3;
398 mixercfg_ext |= ext << 2;
399 }
400 break;
401 case SSPP_VIG2:
402 if (rect_index == DPU_SSPP_RECT_1) {
403 mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
404 } else {
405 mixercfg |= mix << 6;
406 mixercfg_ext |= ext << 4;
407 }
408 break;
409 case SSPP_VIG3:
410 if (rect_index == DPU_SSPP_RECT_1) {
411 mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
412 } else {
413 mixercfg |= mix << 26;
414 mixercfg_ext |= ext << 6;
415 }
416 break;
417 case SSPP_RGB0:
418 mixercfg |= mix << 9;
419 mixercfg_ext |= ext << 8;
420 break;
421 case SSPP_RGB1:
422 mixercfg |= mix << 12;
423 mixercfg_ext |= ext << 10;
424 break;
425 case SSPP_RGB2:
426 mixercfg |= mix << 15;
427 mixercfg_ext |= ext << 12;
428 break;
429 case SSPP_RGB3:
430 mixercfg |= mix << 29;
431 mixercfg_ext |= ext << 14;
432 break;
433 case SSPP_DMA0:
434 if (rect_index == DPU_SSPP_RECT_1) {
435 mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
436 } else {
437 mixercfg |= mix << 18;
438 mixercfg_ext |= ext << 16;
439 }
440 break;
441 case SSPP_DMA1:
442 if (rect_index == DPU_SSPP_RECT_1) {
443 mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
444 } else {
445 mixercfg |= mix << 21;
446 mixercfg_ext |= ext << 18;
447 }
448 break;
449 case SSPP_DMA2:
450 if (rect_index == DPU_SSPP_RECT_1) {
451 mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
452 } else {
453 mix |= (i + 1) & 0xF;
454 mixercfg_ext2 |= mix << 0;
455 }
456 break;
457 case SSPP_DMA3:
458 if (rect_index == DPU_SSPP_RECT_1) {
459 mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
460 } else {
461 mix |= (i + 1) & 0xF;
462 mixercfg_ext2 |= mix << 4;
463 }
464 break;
465 case SSPP_CURSOR0:
466 mixercfg_ext |= ((i + 1) & 0xF) << 20;
467 break;
468 case SSPP_CURSOR1:
469 mixercfg_ext |= ((i + 1) & 0xF) << 26;
470 break;
471 default:
472 break;
473 }
474 }
475 }
476
477 exit:
478 DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
479 DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
480 DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
481 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
482 }
483
484
dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)485 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
486 struct dpu_hw_intf_cfg *cfg)
487 {
488 struct dpu_hw_blk_reg_map *c = &ctx->hw;
489 u32 intf_active = 0;
490 u32 mode_sel = 0;
491
492 if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
493 mode_sel |= BIT(17);
494
495 intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
496 intf_active |= BIT(cfg->intf - INTF_0);
497
498 DPU_REG_WRITE(c, CTL_TOP, mode_sel);
499 DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
500 }
501
dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)502 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
503 struct dpu_hw_intf_cfg *cfg)
504 {
505 struct dpu_hw_blk_reg_map *c = &ctx->hw;
506 u32 intf_cfg = 0;
507
508 intf_cfg |= (cfg->intf & 0xF) << 4;
509
510 if (cfg->mode_3d) {
511 intf_cfg |= BIT(19);
512 intf_cfg |= (cfg->mode_3d - 0x1) << 20;
513 }
514
515 switch (cfg->intf_mode_sel) {
516 case DPU_CTL_MODE_SEL_VID:
517 intf_cfg &= ~BIT(17);
518 intf_cfg &= ~(0x3 << 15);
519 break;
520 case DPU_CTL_MODE_SEL_CMD:
521 intf_cfg |= BIT(17);
522 intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
523 break;
524 default:
525 pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
526 return;
527 }
528
529 DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
530 }
531
_setup_ctl_ops(struct dpu_hw_ctl_ops * ops,unsigned long cap)532 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
533 unsigned long cap)
534 {
535 if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
536 ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
537 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
538 ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf_v1;
539 ops->get_bitmask_active_intf =
540 dpu_hw_ctl_active_get_bitmask_intf;
541 ops->update_pending_intf_flush =
542 dpu_hw_ctl_update_pending_intf_flush;
543 } else {
544 ops->trigger_flush = dpu_hw_ctl_trigger_flush;
545 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
546 ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
547 }
548 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
549 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
550 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
551 ops->get_flush_register = dpu_hw_ctl_get_flush_register;
552 ops->trigger_start = dpu_hw_ctl_trigger_start;
553 ops->trigger_pending = dpu_hw_ctl_trigger_pending;
554 ops->reset = dpu_hw_ctl_reset_control;
555 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
556 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
557 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
558 ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
559 ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
560 ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
561 };
562
563 static struct dpu_hw_blk_ops dpu_hw_ops;
564
dpu_hw_ctl_init(enum dpu_ctl idx,void __iomem * addr,const struct dpu_mdss_cfg * m)565 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
566 void __iomem *addr,
567 const struct dpu_mdss_cfg *m)
568 {
569 struct dpu_hw_ctl *c;
570 const struct dpu_ctl_cfg *cfg;
571
572 c = kzalloc(sizeof(*c), GFP_KERNEL);
573 if (!c)
574 return ERR_PTR(-ENOMEM);
575
576 cfg = _ctl_offset(idx, m, addr, &c->hw);
577 if (IS_ERR_OR_NULL(cfg)) {
578 kfree(c);
579 pr_err("failed to create dpu_hw_ctl %d\n", idx);
580 return ERR_PTR(-EINVAL);
581 }
582
583 c->caps = cfg;
584 _setup_ctl_ops(&c->ops, c->caps->features);
585 c->idx = idx;
586 c->mixer_count = m->mixer_count;
587 c->mixer_hw_caps = m->mixer;
588
589 dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
590
591 return c;
592 }
593
dpu_hw_ctl_destroy(struct dpu_hw_ctl * ctx)594 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
595 {
596 if (ctx)
597 dpu_hw_blk_destroy(&ctx->base);
598 kfree(ctx);
599 }
600