1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2022 MediaTek Inc.
4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
5 */
6
7 #include <linux/clk.h>
8 #include <linux/of_platform.h>
9 #include <linux/of_address.h>
10 #include <linux/pm_runtime.h>
11 #include "mtk-mdp3-comp.h"
12 #include "mtk-mdp3-core.h"
13 #include "mtk-mdp3-regs.h"
14
15 #include "mdp_reg_rdma.h"
16 #include "mdp_reg_ccorr.h"
17 #include "mdp_reg_rsz.h"
18 #include "mdp_reg_wrot.h"
19 #include "mdp_reg_wdma.h"
20
21 static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
22
23 static inline const struct mdp_platform_config *
__get_plat_cfg(const struct mdp_comp_ctx * ctx)24 __get_plat_cfg(const struct mdp_comp_ctx *ctx)
25 {
26 if (!ctx)
27 return NULL;
28
29 return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
30 }
31
get_comp_flag(const struct mdp_comp_ctx * ctx)32 static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
33 {
34 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
35
36 if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
37 if (ctx->comp->id == MDP_COMP_RDMA0)
38 return BIT(MDP_COMP_RDMA0) | BIT(MDP_COMP_RSZ1);
39
40 return BIT(ctx->comp->id);
41 }
42
init_rdma(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)43 static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
44 {
45 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
46 phys_addr_t base = ctx->comp->reg_base;
47 u8 subsys_id = ctx->comp->subsys_id;
48
49 if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
50 struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
51
52 /* Disable RSZ1 */
53 if (ctx->comp->id == MDP_COMP_RDMA0 && prz1)
54 MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
55 0x0, BIT(0));
56 }
57
58 /* Reset RDMA */
59 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
60 MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
61 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
62 return 0;
63 }
64
config_rdma_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)65 static int config_rdma_frame(struct mdp_comp_ctx *ctx,
66 struct mdp_cmdq_cmd *cmd,
67 const struct v4l2_rect *compose)
68 {
69 const struct mdp_rdma_data *rdma = &ctx->param->rdma;
70 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
71 u32 colorformat = ctx->input->buffer.format.colorformat;
72 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
73 bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
74 phys_addr_t base = ctx->comp->reg_base;
75 u8 subsys_id = ctx->comp->subsys_id;
76
77 if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
78 if (block10bit)
79 MM_REG_WRITE(cmd, subsys_id, base,
80 MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
81 else
82 MM_REG_WRITE(cmd, subsys_id, base,
83 MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
84 }
85
86 /* Setup smi control */
87 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
88 (7 << 4) + //burst type to 8
89 (1 << 16), //enable pre-ultra
90 0x00030071);
91
92 /* Setup source frame info */
93 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, rdma->src_ctrl,
94 0x03C8FE0F);
95
96 if (mdp_cfg)
97 if (mdp_cfg->rdma_support_10bit && en_ufo) {
98 /* Setup source buffer base */
99 MM_REG_WRITE(cmd, subsys_id,
100 base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
101 rdma->ufo_dec_y, 0xFFFFFFFF);
102 MM_REG_WRITE(cmd, subsys_id,
103 base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
104 rdma->ufo_dec_c, 0xFFFFFFFF);
105 /* Set 10bit source frame pitch */
106 if (block10bit)
107 MM_REG_WRITE(cmd, subsys_id,
108 base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
109 rdma->mf_bkgd_in_pxl, 0x001FFFFF);
110 }
111
112 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, rdma->control,
113 0x1110);
114 /* Setup source buffer base */
115 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, rdma->iova[0],
116 0xFFFFFFFF);
117 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, rdma->iova[1],
118 0xFFFFFFFF);
119 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, rdma->iova[2],
120 0xFFFFFFFF);
121 /* Setup source buffer end */
122 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
123 rdma->iova_end[0], 0xFFFFFFFF);
124 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
125 rdma->iova_end[1], 0xFFFFFFFF);
126 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
127 rdma->iova_end[2], 0xFFFFFFFF);
128 /* Setup source frame pitch */
129 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
130 rdma->mf_bkgd, 0x001FFFFF);
131 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
132 rdma->sf_bkgd, 0x001FFFFF);
133 /* Setup color transform */
134 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
135 rdma->transform, 0x0F110000);
136
137 return 0;
138 }
139
config_rdma_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)140 static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
141 struct mdp_cmdq_cmd *cmd, u32 index)
142 {
143 const struct mdp_rdma_subfrm *subfrm = &ctx->param->rdma.subfrms[index];
144 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
145 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
146 u32 colorformat = ctx->input->buffer.format.colorformat;
147 bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
148 bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
149 phys_addr_t base = ctx->comp->reg_base;
150 u8 subsys_id = ctx->comp->subsys_id;
151
152 /* Enable RDMA */
153 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
154
155 /* Set Y pixel offset */
156 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
157 subfrm->offset[0], 0xFFFFFFFF);
158
159 /* Set 10bit UFO mode */
160 if (mdp_cfg)
161 if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo)
162 MM_REG_WRITE(cmd, subsys_id, base,
163 MDP_RDMA_SRC_OFFSET_0_P,
164 subfrm->offset_0_p, 0xFFFFFFFF);
165
166 /* Set U pixel offset */
167 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
168 subfrm->offset[1], 0xFFFFFFFF);
169 /* Set V pixel offset */
170 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
171 subfrm->offset[2], 0xFFFFFFFF);
172 /* Set source size */
173 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, subfrm->src,
174 0x1FFF1FFF);
175 /* Set target size */
176 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
177 subfrm->clip, 0x1FFF1FFF);
178 /* Set crop offset */
179 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
180 subfrm->clip_ofst, 0x003F001F);
181
182 if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
183 if ((csf->in.right - csf->in.left + 1) > 320)
184 MM_REG_WRITE(cmd, subsys_id, base,
185 MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
186
187 return 0;
188 }
189
wait_rdma_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)190 static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
191 {
192 struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
193 phys_addr_t base = ctx->comp->reg_base;
194 u8 subsys_id = ctx->comp->subsys_id;
195
196 if (ctx->comp->alias_id == 0)
197 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
198 else
199 dev_err(dev, "Do not support RDMA1_DONE event\n");
200
201 /* Disable RDMA */
202 MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
203 return 0;
204 }
205
206 static const struct mdp_comp_ops rdma_ops = {
207 .get_comp_flag = get_comp_flag,
208 .init_comp = init_rdma,
209 .config_frame = config_rdma_frame,
210 .config_subfrm = config_rdma_subfrm,
211 .wait_comp_event = wait_rdma_event,
212 };
213
init_rsz(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)214 static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
215 {
216 phys_addr_t base = ctx->comp->reg_base;
217 u8 subsys_id = ctx->comp->subsys_id;
218
219 /* Reset RSZ */
220 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
221 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
222 /* Enable RSZ */
223 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
224 return 0;
225 }
226
config_rsz_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)227 static int config_rsz_frame(struct mdp_comp_ctx *ctx,
228 struct mdp_cmdq_cmd *cmd,
229 const struct v4l2_rect *compose)
230 {
231 const struct mdp_rsz_data *rsz = &ctx->param->rsz;
232 phys_addr_t base = ctx->comp->reg_base;
233 u8 subsys_id = ctx->comp->subsys_id;
234
235 if (ctx->param->frame.bypass) {
236 /* Disable RSZ */
237 MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
238 return 0;
239 }
240
241 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, rsz->control1,
242 0x03FFFDF3);
243 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, rsz->control2,
244 0x0FFFC290);
245 MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
246 rsz->coeff_step_x, 0x007FFFFF);
247 MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
248 rsz->coeff_step_y, 0x007FFFFF);
249 return 0;
250 }
251
config_rsz_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)252 static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
253 struct mdp_cmdq_cmd *cmd, u32 index)
254 {
255 const struct mdp_rsz_subfrm *subfrm = &ctx->param->rsz.subfrms[index];
256 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
257 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
258 phys_addr_t base = ctx->comp->reg_base;
259 u8 subsys_id = ctx->comp->subsys_id;
260
261 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, subfrm->control2,
262 0x00003800);
263 MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, subfrm->src,
264 0xFFFFFFFF);
265
266 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
267 if ((csf->in.right - csf->in.left + 1) <= 16)
268 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
269 BIT(27), BIT(27));
270
271 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
272 csf->luma.left, 0xFFFF);
273 MM_REG_WRITE(cmd, subsys_id,
274 base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
275 csf->luma.left_subpix, 0x1FFFFF);
276 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
277 csf->luma.top, 0xFFFF);
278 MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
279 csf->luma.top_subpix, 0x1FFFFF);
280 MM_REG_WRITE(cmd, subsys_id,
281 base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
282 csf->chroma.left, 0xFFFF);
283 MM_REG_WRITE(cmd, subsys_id,
284 base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
285 csf->chroma.left_subpix, 0x1FFFFF);
286
287 MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, subfrm->clip,
288 0xFFFFFFFF);
289
290 return 0;
291 }
292
advance_rsz_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)293 static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
294 struct mdp_cmdq_cmd *cmd, u32 index)
295 {
296 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
297
298 if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
299 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
300 phys_addr_t base = ctx->comp->reg_base;
301 u8 subsys_id = ctx->comp->subsys_id;
302
303 if ((csf->in.right - csf->in.left + 1) <= 16)
304 MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
305 BIT(27));
306 }
307
308 return 0;
309 }
310
311 static const struct mdp_comp_ops rsz_ops = {
312 .get_comp_flag = get_comp_flag,
313 .init_comp = init_rsz,
314 .config_frame = config_rsz_frame,
315 .config_subfrm = config_rsz_subfrm,
316 .advance_subfrm = advance_rsz_subfrm,
317 };
318
init_wrot(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)319 static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
320 {
321 phys_addr_t base = ctx->comp->reg_base;
322 u8 subsys_id = ctx->comp->subsys_id;
323
324 /* Reset WROT */
325 MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
326 MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
327 MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
328 MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
329 return 0;
330 }
331
config_wrot_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)332 static int config_wrot_frame(struct mdp_comp_ctx *ctx,
333 struct mdp_cmdq_cmd *cmd,
334 const struct v4l2_rect *compose)
335 {
336 const struct mdp_wrot_data *wrot = &ctx->param->wrot;
337 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
338 phys_addr_t base = ctx->comp->reg_base;
339 u8 subsys_id = ctx->comp->subsys_id;
340
341 /* Write frame base address */
342 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, wrot->iova[0],
343 0xFFFFFFFF);
344 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, wrot->iova[1],
345 0xFFFFFFFF);
346 MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, wrot->iova[2],
347 0xFFFFFFFF);
348 /* Write frame related registers */
349 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, wrot->control,
350 0xF131510F);
351 /* Write frame Y pitch */
352 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, wrot->stride[0],
353 0x0000FFFF);
354 /* Write frame UV pitch */
355 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, wrot->stride[1],
356 0xFFFF);
357 MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, wrot->stride[2],
358 0xFFFF);
359 /* Write matrix control */
360 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, wrot->mat_ctrl, 0xF3);
361
362 /* Set the fixed ALPHA as 0xFF */
363 MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
364 0xFF000000);
365 /* Set VIDO_EOL_SEL */
366 MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
367 /* Set VIDO_FIFO_TEST */
368 if (wrot->fifo_test != 0)
369 MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
370 wrot->fifo_test, 0xFFF);
371 /* Filter enable */
372 if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
373 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
374 wrot->filter, 0x77);
375
376 return 0;
377 }
378
config_wrot_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)379 static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
380 struct mdp_cmdq_cmd *cmd, u32 index)
381 {
382 const struct mdp_wrot_subfrm *subfrm = &ctx->param->wrot.subfrms[index];
383 phys_addr_t base = ctx->comp->reg_base;
384 u8 subsys_id = ctx->comp->subsys_id;
385
386 /* Write Y pixel offset */
387 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
388 subfrm->offset[0], 0x0FFFFFFF);
389 /* Write U pixel offset */
390 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
391 subfrm->offset[1], 0x0FFFFFFF);
392 /* Write V pixel offset */
393 MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
394 subfrm->offset[2], 0x0FFFFFFF);
395 /* Write source size */
396 MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, subfrm->src,
397 0x1FFF1FFF);
398 /* Write target size */
399 MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, subfrm->clip,
400 0x1FFF1FFF);
401 MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, subfrm->clip_ofst,
402 0x1FFF1FFF);
403
404 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
405 subfrm->main_buf, 0x1FFF7F00);
406
407 /* Enable WROT */
408 MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
409
410 return 0;
411 }
412
wait_wrot_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)413 static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
414 {
415 const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
416 struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
417 phys_addr_t base = ctx->comp->reg_base;
418 u8 subsys_id = ctx->comp->subsys_id;
419
420 if (ctx->comp->alias_id == 0)
421 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
422 else
423 dev_err(dev, "Do not support WROT1_DONE event\n");
424
425 if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
426 MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
427 0x77);
428
429 /* Disable WROT */
430 MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
431
432 return 0;
433 }
434
435 static const struct mdp_comp_ops wrot_ops = {
436 .get_comp_flag = get_comp_flag,
437 .init_comp = init_wrot,
438 .config_frame = config_wrot_frame,
439 .config_subfrm = config_wrot_subfrm,
440 .wait_comp_event = wait_wrot_event,
441 };
442
init_wdma(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)443 static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
444 {
445 phys_addr_t base = ctx->comp->reg_base;
446 u8 subsys_id = ctx->comp->subsys_id;
447
448 /* Reset WDMA */
449 MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
450 MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
451 MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
452 return 0;
453 }
454
config_wdma_frame(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,const struct v4l2_rect * compose)455 static int config_wdma_frame(struct mdp_comp_ctx *ctx,
456 struct mdp_cmdq_cmd *cmd,
457 const struct v4l2_rect *compose)
458 {
459 const struct mdp_wdma_data *wdma = &ctx->param->wdma;
460 phys_addr_t base = ctx->comp->reg_base;
461 u8 subsys_id = ctx->comp->subsys_id;
462
463 MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
464 0xFFFFFFFF);
465
466 /* Setup frame information */
467 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, wdma->wdma_cfg,
468 0x0F01B8F0);
469 /* Setup frame base address */
470 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, wdma->iova[0],
471 0xFFFFFFFF);
472 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, wdma->iova[1],
473 0xFFFFFFFF);
474 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, wdma->iova[2],
475 0xFFFFFFFF);
476 /* Setup Y pitch */
477 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
478 wdma->w_in_byte, 0x0000FFFF);
479 /* Setup UV pitch */
480 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
481 wdma->uv_stride, 0x0000FFFF);
482 /* Set the fixed ALPHA as 0xFF */
483 MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
484 0x800000FF);
485
486 return 0;
487 }
488
config_wdma_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)489 static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
490 struct mdp_cmdq_cmd *cmd, u32 index)
491 {
492 const struct mdp_wdma_subfrm *subfrm = &ctx->param->wdma.subfrms[index];
493 phys_addr_t base = ctx->comp->reg_base;
494 u8 subsys_id = ctx->comp->subsys_id;
495
496 /* Write Y pixel offset */
497 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
498 subfrm->offset[0], 0x0FFFFFFF);
499 /* Write U pixel offset */
500 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
501 subfrm->offset[1], 0x0FFFFFFF);
502 /* Write V pixel offset */
503 MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
504 subfrm->offset[2], 0x0FFFFFFF);
505 /* Write source size */
506 MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, subfrm->src,
507 0x3FFF3FFF);
508 /* Write target size */
509 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, subfrm->clip,
510 0x3FFF3FFF);
511 /* Write clip offset */
512 MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, subfrm->clip_ofst,
513 0x3FFF3FFF);
514
515 /* Enable WDMA */
516 MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
517
518 return 0;
519 }
520
wait_wdma_event(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)521 static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
522 {
523 phys_addr_t base = ctx->comp->reg_base;
524 u8 subsys_id = ctx->comp->subsys_id;
525
526 MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
527 /* Disable WDMA */
528 MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
529 return 0;
530 }
531
532 static const struct mdp_comp_ops wdma_ops = {
533 .get_comp_flag = get_comp_flag,
534 .init_comp = init_wdma,
535 .config_frame = config_wdma_frame,
536 .config_subfrm = config_wdma_subfrm,
537 .wait_comp_event = wait_wdma_event,
538 };
539
init_ccorr(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd)540 static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
541 {
542 phys_addr_t base = ctx->comp->reg_base;
543 u8 subsys_id = ctx->comp->subsys_id;
544
545 /* CCORR enable */
546 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
547 /* Relay mode */
548 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
549 return 0;
550 }
551
config_ccorr_subfrm(struct mdp_comp_ctx * ctx,struct mdp_cmdq_cmd * cmd,u32 index)552 static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
553 struct mdp_cmdq_cmd *cmd, u32 index)
554 {
555 const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
556 phys_addr_t base = ctx->comp->reg_base;
557 u8 subsys_id = ctx->comp->subsys_id;
558 u32 hsize, vsize;
559
560 hsize = csf->in.right - csf->in.left + 1;
561 vsize = csf->in.bottom - csf->in.top + 1;
562 MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
563 (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
564 return 0;
565 }
566
567 static const struct mdp_comp_ops ccorr_ops = {
568 .get_comp_flag = get_comp_flag,
569 .init_comp = init_ccorr,
570 .config_subfrm = config_ccorr_subfrm,
571 };
572
573 static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
574 [MDP_COMP_TYPE_RDMA] = &rdma_ops,
575 [MDP_COMP_TYPE_RSZ] = &rsz_ops,
576 [MDP_COMP_TYPE_WROT] = &wrot_ops,
577 [MDP_COMP_TYPE_WDMA] = &wdma_ops,
578 [MDP_COMP_TYPE_CCORR] = &ccorr_ops,
579 };
580
581 struct mdp_comp_match {
582 enum mdp_comp_type type;
583 u32 alias_id;
584 };
585
586 static const struct mdp_comp_match mdp_comp_matches[MDP_MAX_COMP_COUNT] = {
587 [MDP_COMP_WPEI] = { MDP_COMP_TYPE_WPEI, 0 },
588 [MDP_COMP_WPEO] = { MDP_COMP_TYPE_EXTO, 2 },
589 [MDP_COMP_WPEI2] = { MDP_COMP_TYPE_WPEI, 1 },
590 [MDP_COMP_WPEO2] = { MDP_COMP_TYPE_EXTO, 3 },
591 [MDP_COMP_ISP_IMGI] = { MDP_COMP_TYPE_IMGI, 0 },
592 [MDP_COMP_ISP_IMGO] = { MDP_COMP_TYPE_EXTO, 0 },
593 [MDP_COMP_ISP_IMG2O] = { MDP_COMP_TYPE_EXTO, 1 },
594
595 [MDP_COMP_CAMIN] = { MDP_COMP_TYPE_DL_PATH, 0 },
596 [MDP_COMP_CAMIN2] = { MDP_COMP_TYPE_DL_PATH, 1 },
597 [MDP_COMP_RDMA0] = { MDP_COMP_TYPE_RDMA, 0 },
598 [MDP_COMP_CCORR0] = { MDP_COMP_TYPE_CCORR, 0 },
599 [MDP_COMP_RSZ0] = { MDP_COMP_TYPE_RSZ, 0 },
600 [MDP_COMP_RSZ1] = { MDP_COMP_TYPE_RSZ, 1 },
601 [MDP_COMP_PATH0_SOUT] = { MDP_COMP_TYPE_PATH, 0 },
602 [MDP_COMP_PATH1_SOUT] = { MDP_COMP_TYPE_PATH, 1 },
603 [MDP_COMP_WROT0] = { MDP_COMP_TYPE_WROT, 0 },
604 [MDP_COMP_WDMA] = { MDP_COMP_TYPE_WDMA, 0 },
605 };
606
607 static const struct of_device_id mdp_comp_dt_ids[] = {
608 {
609 .compatible = "mediatek,mt8183-mdp3-rdma",
610 .data = (void *)MDP_COMP_TYPE_RDMA,
611 }, {
612 .compatible = "mediatek,mt8183-mdp3-ccorr",
613 .data = (void *)MDP_COMP_TYPE_CCORR,
614 }, {
615 .compatible = "mediatek,mt8183-mdp3-rsz",
616 .data = (void *)MDP_COMP_TYPE_RSZ,
617 }, {
618 .compatible = "mediatek,mt8183-mdp3-wrot",
619 .data = (void *)MDP_COMP_TYPE_WROT,
620 }, {
621 .compatible = "mediatek,mt8183-mdp3-wdma",
622 .data = (void *)MDP_COMP_TYPE_WDMA,
623 },
624 {}
625 };
626
627 static const struct of_device_id mdp_sub_comp_dt_ids[] = {
628 {
629 .compatible = "mediatek,mt8183-mdp3-wdma",
630 .data = (void *)MDP_COMP_TYPE_PATH,
631 }, {
632 .compatible = "mediatek,mt8183-mdp3-wrot",
633 .data = (void *)MDP_COMP_TYPE_PATH,
634 },
635 {}
636 };
637
638 /* Used to describe the item order in MDP property */
639 struct mdp_comp_info {
640 u32 clk_num;
641 u32 clk_ofst;
642 u32 dts_reg_ofst;
643 };
644
645 static const struct mdp_comp_info mdp_comp_dt_info[MDP_MAX_COMP_COUNT] = {
646 [MDP_COMP_RDMA0] = {2, 0, 0},
647 [MDP_COMP_RSZ0] = {1, 0, 0},
648 [MDP_COMP_WROT0] = {1, 0, 0},
649 [MDP_COMP_WDMA] = {1, 0, 0},
650 [MDP_COMP_CCORR0] = {1, 0, 0},
651 };
652
is_dma_capable(const enum mdp_comp_type type)653 static inline bool is_dma_capable(const enum mdp_comp_type type)
654 {
655 return (type == MDP_COMP_TYPE_RDMA ||
656 type == MDP_COMP_TYPE_WROT ||
657 type == MDP_COMP_TYPE_WDMA);
658 }
659
is_bypass_gce_event(const enum mdp_comp_type type)660 static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
661 {
662 /*
663 * Subcomponent PATH is only used for the direction of data flow and
664 * dose not need to wait for GCE event.
665 */
666 return (type == MDP_COMP_TYPE_PATH);
667 }
668
mdp_comp_get_id(enum mdp_comp_type type,int alias_id)669 static int mdp_comp_get_id(enum mdp_comp_type type, int alias_id)
670 {
671 int i;
672
673 for (i = 0; i < ARRAY_SIZE(mdp_comp_matches); i++)
674 if (mdp_comp_matches[i].type == type &&
675 mdp_comp_matches[i].alias_id == alias_id)
676 return i;
677 return -ENODEV;
678 }
679
mdp_comp_clock_on(struct device * dev,struct mdp_comp * comp)680 int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
681 {
682 int i, ret;
683
684 if (comp->comp_dev) {
685 ret = pm_runtime_resume_and_get(comp->comp_dev);
686 if (ret < 0) {
687 dev_err(dev,
688 "Failed to get power, err %d. type:%d id:%d\n",
689 ret, comp->type, comp->id);
690 return ret;
691 }
692 }
693
694 for (i = 0; i < ARRAY_SIZE(comp->clks); i++) {
695 if (IS_ERR_OR_NULL(comp->clks[i]))
696 continue;
697 ret = clk_prepare_enable(comp->clks[i]);
698 if (ret) {
699 dev_err(dev,
700 "Failed to enable clk %d. type:%d id:%d\n",
701 i, comp->type, comp->id);
702 pm_runtime_put(comp->comp_dev);
703 return ret;
704 }
705 }
706
707 return 0;
708 }
709
mdp_comp_clock_off(struct device * dev,struct mdp_comp * comp)710 void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
711 {
712 int i;
713
714 for (i = 0; i < ARRAY_SIZE(comp->clks); i++) {
715 if (IS_ERR_OR_NULL(comp->clks[i]))
716 continue;
717 clk_disable_unprepare(comp->clks[i]);
718 }
719
720 if (comp->comp_dev)
721 pm_runtime_put(comp->comp_dev);
722 }
723
mdp_comp_clocks_on(struct device * dev,struct mdp_comp * comps,int num)724 int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
725 {
726 int i;
727
728 for (i = 0; i < num; i++)
729 if (mdp_comp_clock_on(dev, &comps[i]) != 0)
730 return ++i;
731
732 return 0;
733 }
734
mdp_comp_clocks_off(struct device * dev,struct mdp_comp * comps,int num)735 void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
736 {
737 int i;
738
739 for (i = 0; i < num; i++)
740 mdp_comp_clock_off(dev, &comps[i]);
741 }
742
mdp_get_subsys_id(struct device * dev,struct device_node * node,struct mdp_comp * comp)743 static int mdp_get_subsys_id(struct device *dev, struct device_node *node,
744 struct mdp_comp *comp)
745 {
746 struct platform_device *comp_pdev;
747 struct cmdq_client_reg cmdq_reg;
748 int ret = 0;
749 int index = 0;
750
751 if (!dev || !node || !comp)
752 return -EINVAL;
753
754 comp_pdev = of_find_device_by_node(node);
755
756 if (!comp_pdev) {
757 dev_err(dev, "get comp_pdev fail! comp id=%d type=%d\n",
758 comp->id, comp->type);
759 return -ENODEV;
760 }
761
762 index = mdp_comp_dt_info[comp->id].dts_reg_ofst;
763 ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
764 if (ret != 0) {
765 dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
766 return -EINVAL;
767 }
768
769 comp->subsys_id = cmdq_reg.subsys;
770 dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
771
772 return 0;
773 }
774
__mdp_comp_init(struct mdp_dev * mdp,struct device_node * node,struct mdp_comp * comp)775 static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
776 struct mdp_comp *comp)
777 {
778 struct resource res;
779 phys_addr_t base;
780 int index = mdp_comp_dt_info[comp->id].dts_reg_ofst;
781
782 if (of_address_to_resource(node, index, &res) < 0)
783 base = 0L;
784 else
785 base = res.start;
786
787 comp->mdp_dev = mdp;
788 comp->regs = of_iomap(node, 0);
789 comp->reg_base = base;
790 }
791
mdp_comp_init(struct mdp_dev * mdp,struct device_node * node,struct mdp_comp * comp,enum mtk_mdp_comp_id id)792 static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
793 struct mdp_comp *comp, enum mtk_mdp_comp_id id)
794 {
795 struct device *dev = &mdp->pdev->dev;
796 int clk_num;
797 int clk_ofst;
798 int i;
799 s32 event;
800
801 if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
802 dev_err(dev, "Invalid component id %d\n", id);
803 return -EINVAL;
804 }
805
806 comp->id = id;
807 comp->type = mdp_comp_matches[id].type;
808 comp->alias_id = mdp_comp_matches[id].alias_id;
809 comp->ops = mdp_comp_ops[comp->type];
810 __mdp_comp_init(mdp, node, comp);
811
812 clk_num = mdp_comp_dt_info[id].clk_num;
813 clk_ofst = mdp_comp_dt_info[id].clk_ofst;
814
815 for (i = 0; i < clk_num; i++) {
816 comp->clks[i] = of_clk_get(node, i + clk_ofst);
817 if (IS_ERR(comp->clks[i]))
818 break;
819 }
820
821 mdp_get_subsys_id(dev, node, comp);
822
823 /* Set GCE SOF event */
824 if (is_bypass_gce_event(comp->type) ||
825 of_property_read_u32_index(node, "mediatek,gce-events",
826 MDP_GCE_EVENT_SOF, &event))
827 event = MDP_GCE_NO_EVENT;
828
829 comp->gce_event[MDP_GCE_EVENT_SOF] = event;
830
831 /* Set GCE EOF event */
832 if (is_dma_capable(comp->type)) {
833 if (of_property_read_u32_index(node, "mediatek,gce-events",
834 MDP_GCE_EVENT_EOF, &event)) {
835 dev_err(dev, "Component id %d has no EOF\n", id);
836 return -EINVAL;
837 }
838 } else {
839 event = MDP_GCE_NO_EVENT;
840 }
841
842 comp->gce_event[MDP_GCE_EVENT_EOF] = event;
843
844 return 0;
845 }
846
mdp_comp_deinit(struct mdp_comp * comp)847 static void mdp_comp_deinit(struct mdp_comp *comp)
848 {
849 if (!comp)
850 return;
851
852 if (comp->regs)
853 iounmap(comp->regs);
854 }
855
mdp_comp_create(struct mdp_dev * mdp,struct device_node * node,enum mtk_mdp_comp_id id)856 static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
857 struct device_node *node,
858 enum mtk_mdp_comp_id id)
859 {
860 struct device *dev = &mdp->pdev->dev;
861 struct mdp_comp *comp;
862 int ret;
863
864 if (mdp->comp[id])
865 return ERR_PTR(-EEXIST);
866
867 comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
868 if (!comp)
869 return ERR_PTR(-ENOMEM);
870
871 ret = mdp_comp_init(mdp, node, comp, id);
872 if (ret) {
873 devm_kfree(dev, comp);
874 return ERR_PTR(ret);
875 }
876 mdp->comp[id] = comp;
877 mdp->comp[id]->mdp_dev = mdp;
878
879 dev_dbg(dev, "%s type:%d alias:%d id:%d base:%#x regs:%p\n",
880 dev->of_node->name, comp->type, comp->alias_id, id,
881 (u32)comp->reg_base, comp->regs);
882 return comp;
883 }
884
mdp_comp_sub_create(struct mdp_dev * mdp)885 static int mdp_comp_sub_create(struct mdp_dev *mdp)
886 {
887 struct device *dev = &mdp->pdev->dev;
888 struct device_node *node, *parent;
889
890 parent = dev->of_node->parent;
891
892 for_each_child_of_node(parent, node) {
893 const struct of_device_id *of_id;
894 enum mdp_comp_type type;
895 int id, alias_id;
896 struct mdp_comp *comp;
897
898 of_id = of_match_node(mdp_sub_comp_dt_ids, node);
899 if (!of_id)
900 continue;
901 if (!of_device_is_available(node)) {
902 dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
903 node);
904 continue;
905 }
906
907 type = (enum mdp_comp_type)(uintptr_t)of_id->data;
908 alias_id = mdp_comp_alias_id[type];
909 id = mdp_comp_get_id(type, alias_id);
910 if (id < 0) {
911 dev_err(dev,
912 "Fail to get sub comp. id: type %d alias %d\n",
913 type, alias_id);
914 return -EINVAL;
915 }
916 mdp_comp_alias_id[type]++;
917
918 comp = mdp_comp_create(mdp, node, id);
919 if (IS_ERR(comp))
920 return PTR_ERR(comp);
921 }
922
923 return 0;
924 }
925
mdp_comp_destroy(struct mdp_dev * mdp)926 void mdp_comp_destroy(struct mdp_dev *mdp)
927 {
928 int i;
929
930 for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
931 if (mdp->comp[i]) {
932 pm_runtime_disable(mdp->comp[i]->comp_dev);
933 mdp_comp_deinit(mdp->comp[i]);
934 devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]);
935 mdp->comp[i] = NULL;
936 }
937 }
938 }
939
mdp_comp_config(struct mdp_dev * mdp)940 int mdp_comp_config(struct mdp_dev *mdp)
941 {
942 struct device *dev = &mdp->pdev->dev;
943 struct device_node *node, *parent;
944 struct platform_device *pdev;
945 int ret;
946
947 memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
948
949 parent = dev->of_node->parent;
950 /* Iterate over sibling MDP function blocks */
951 for_each_child_of_node(parent, node) {
952 const struct of_device_id *of_id;
953 enum mdp_comp_type type;
954 int id, alias_id;
955 struct mdp_comp *comp;
956
957 of_id = of_match_node(mdp_comp_dt_ids, node);
958 if (!of_id)
959 continue;
960
961 if (!of_device_is_available(node)) {
962 dev_dbg(dev, "Skipping disabled component %pOF\n",
963 node);
964 continue;
965 }
966
967 type = (enum mdp_comp_type)(uintptr_t)of_id->data;
968 alias_id = mdp_comp_alias_id[type];
969 id = mdp_comp_get_id(type, alias_id);
970 if (id < 0) {
971 dev_err(dev,
972 "Fail to get component id: type %d alias %d\n",
973 type, alias_id);
974 continue;
975 }
976 mdp_comp_alias_id[type]++;
977
978 comp = mdp_comp_create(mdp, node, id);
979 if (IS_ERR(comp)) {
980 ret = PTR_ERR(comp);
981 goto err_init_comps;
982 }
983
984 /* Only DMA capable components need the pm control */
985 comp->comp_dev = NULL;
986 if (!is_dma_capable(comp->type))
987 continue;
988
989 pdev = of_find_device_by_node(node);
990 if (!pdev) {
991 dev_warn(dev, "can't find platform device of node:%s\n",
992 node->name);
993 return -ENODEV;
994 }
995
996 comp->comp_dev = &pdev->dev;
997 pm_runtime_enable(comp->comp_dev);
998 }
999
1000 ret = mdp_comp_sub_create(mdp);
1001 if (ret)
1002 goto err_init_comps;
1003
1004 return 0;
1005
1006 err_init_comps:
1007 mdp_comp_destroy(mdp);
1008 return ret;
1009 }
1010
mdp_comp_ctx_config(struct mdp_dev * mdp,struct mdp_comp_ctx * ctx,const struct img_compparam * param,const struct img_ipi_frameparam * frame)1011 int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
1012 const struct img_compparam *param,
1013 const struct img_ipi_frameparam *frame)
1014 {
1015 struct device *dev = &mdp->pdev->dev;
1016 int i;
1017
1018 if (param->type < 0 || param->type >= MDP_MAX_COMP_COUNT) {
1019 dev_err(dev, "Invalid component id %d", param->type);
1020 return -EINVAL;
1021 }
1022
1023 ctx->comp = mdp->comp[param->type];
1024 if (!ctx->comp) {
1025 dev_err(dev, "Uninit component id %d", param->type);
1026 return -EINVAL;
1027 }
1028
1029 ctx->param = param;
1030 ctx->input = &frame->inputs[param->input];
1031 for (i = 0; i < param->num_outputs; i++)
1032 ctx->outputs[i] = &frame->outputs[param->outputs[i]];
1033 return 0;
1034 }
1035