1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8 #include <drm/drm_atomic.h>
9 #include <drm/drm_blend.h>
10 #include <drm/drm_damage_helper.h>
11 #include <drm/drm_fourcc.h>
12 #include <drm/drm_framebuffer.h>
13 #include <drm/drm_gem_atomic_helper.h>
14 #include <drm/drm_print.h>
15
16 #include "mdp5_kms.h"
17
18 struct mdp5_plane {
19 struct drm_plane base;
20
21 uint32_t nformats;
22 uint32_t formats[32];
23 };
24 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
25
26 static int mdp5_plane_mode_set(struct drm_plane *plane,
27 struct drm_crtc *crtc, struct drm_framebuffer *fb,
28 struct drm_rect *src, struct drm_rect *dest);
29
get_kms(struct drm_plane * plane)30 static struct mdp5_kms *get_kms(struct drm_plane *plane)
31 {
32 struct msm_drm_private *priv = plane->dev->dev_private;
33 return to_mdp5_kms(to_mdp_kms(priv->kms));
34 }
35
plane_enabled(struct drm_plane_state * state)36 static bool plane_enabled(struct drm_plane_state *state)
37 {
38 return state->visible;
39 }
40
mdp5_plane_destroy(struct drm_plane * plane)41 static void mdp5_plane_destroy(struct drm_plane *plane)
42 {
43 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
44
45 drm_plane_cleanup(plane);
46
47 kfree(mdp5_plane);
48 }
49
50 /* helper to install properties which are common to planes and crtcs */
mdp5_plane_install_properties(struct drm_plane * plane,struct drm_mode_object * obj)51 static void mdp5_plane_install_properties(struct drm_plane *plane,
52 struct drm_mode_object *obj)
53 {
54 unsigned int zpos;
55
56 drm_plane_create_rotation_property(plane,
57 DRM_MODE_ROTATE_0,
58 DRM_MODE_ROTATE_0 |
59 DRM_MODE_ROTATE_180 |
60 DRM_MODE_REFLECT_X |
61 DRM_MODE_REFLECT_Y);
62 drm_plane_create_alpha_property(plane);
63 drm_plane_create_blend_mode_property(plane,
64 BIT(DRM_MODE_BLEND_PIXEL_NONE) |
65 BIT(DRM_MODE_BLEND_PREMULTI) |
66 BIT(DRM_MODE_BLEND_COVERAGE));
67
68 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
69 zpos = STAGE_BASE;
70 else
71 zpos = STAGE0 + drm_plane_index(plane);
72 drm_plane_create_zpos_property(plane, zpos, 1, 255);
73 }
74
75 static void
mdp5_plane_atomic_print_state(struct drm_printer * p,const struct drm_plane_state * state)76 mdp5_plane_atomic_print_state(struct drm_printer *p,
77 const struct drm_plane_state *state)
78 {
79 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
80 struct mdp5_kms *mdp5_kms = get_kms(state->plane);
81
82 drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
83 pstate->hwpipe->name : "(null)");
84 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
85 drm_printf(p, "\tright-hwpipe=%s\n",
86 pstate->r_hwpipe ? pstate->r_hwpipe->name :
87 "(null)");
88 drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
89 drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
90 drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
91 drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
92 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
93 }
94
mdp5_plane_reset(struct drm_plane * plane)95 static void mdp5_plane_reset(struct drm_plane *plane)
96 {
97 struct mdp5_plane_state *mdp5_state;
98
99 if (plane->state)
100 __drm_atomic_helper_plane_destroy_state(plane->state);
101
102 kfree(to_mdp5_plane_state(plane->state));
103 plane->state = NULL;
104 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
105 if (!mdp5_state)
106 return;
107 __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
108 }
109
110 static struct drm_plane_state *
mdp5_plane_duplicate_state(struct drm_plane * plane)111 mdp5_plane_duplicate_state(struct drm_plane *plane)
112 {
113 struct mdp5_plane_state *mdp5_state;
114
115 if (WARN_ON(!plane->state))
116 return NULL;
117
118 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
119 sizeof(*mdp5_state), GFP_KERNEL);
120 if (!mdp5_state)
121 return NULL;
122
123 __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
124
125 return &mdp5_state->base;
126 }
127
mdp5_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)128 static void mdp5_plane_destroy_state(struct drm_plane *plane,
129 struct drm_plane_state *state)
130 {
131 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
132
133 if (state->fb)
134 drm_framebuffer_put(state->fb);
135
136 kfree(pstate);
137 }
138
139 static const struct drm_plane_funcs mdp5_plane_funcs = {
140 .update_plane = drm_atomic_helper_update_plane,
141 .disable_plane = drm_atomic_helper_disable_plane,
142 .destroy = mdp5_plane_destroy,
143 .reset = mdp5_plane_reset,
144 .atomic_duplicate_state = mdp5_plane_duplicate_state,
145 .atomic_destroy_state = mdp5_plane_destroy_state,
146 .atomic_print_state = mdp5_plane_atomic_print_state,
147 };
148
mdp5_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)149 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
150 struct drm_plane_state *new_state)
151 {
152 struct msm_drm_private *priv = plane->dev->dev_private;
153 struct msm_kms *kms = priv->kms;
154 bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
155
156 if (!new_state->fb)
157 return 0;
158
159 drm_gem_plane_helper_prepare_fb(plane, new_state);
160
161 return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
162 }
163
mdp5_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)164 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
165 struct drm_plane_state *old_state)
166 {
167 struct mdp5_kms *mdp5_kms = get_kms(plane);
168 struct msm_kms *kms = &mdp5_kms->base.base;
169 struct drm_framebuffer *fb = old_state->fb;
170 bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
171
172 if (!fb)
173 return;
174
175 DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
176 msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
177 }
178
mdp5_plane_atomic_check_with_state(struct drm_crtc_state * crtc_state,struct drm_plane_state * state)179 static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
180 struct drm_plane_state *state)
181 {
182 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
183 struct drm_plane *plane = state->plane;
184 struct drm_plane_state *old_state = plane->state;
185 struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
186 bool new_hwpipe = false;
187 bool need_right_hwpipe = false;
188 uint32_t max_width, max_height;
189 bool out_of_bounds = false;
190 uint32_t caps = 0;
191 int min_scale, max_scale;
192 int ret;
193
194 DBG("%s: check (%d -> %d)", plane->name,
195 plane_enabled(old_state), plane_enabled(state));
196
197 max_width = config->hw->lm.max_width << 16;
198 max_height = config->hw->lm.max_height << 16;
199
200 /* Make sure source dimensions are within bounds. */
201 if (state->src_h > max_height)
202 out_of_bounds = true;
203
204 if (state->src_w > max_width) {
205 /* If source split is supported, we can go up to 2x
206 * the max LM width, but we'd need to stage another
207 * hwpipe to the right LM. So, the drm_plane would
208 * consist of 2 hwpipes.
209 */
210 if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
211 (state->src_w <= 2 * max_width))
212 need_right_hwpipe = true;
213 else
214 out_of_bounds = true;
215 }
216
217 if (out_of_bounds) {
218 struct drm_rect src = drm_plane_state_src(state);
219 DBG("Invalid source size "DRM_RECT_FP_FMT,
220 DRM_RECT_FP_ARG(&src));
221 return -ERANGE;
222 }
223
224 min_scale = FRAC_16_16(1, 8);
225 max_scale = FRAC_16_16(8, 1);
226
227 ret = drm_atomic_helper_check_plane_state(state, crtc_state,
228 min_scale, max_scale,
229 true, true);
230 if (ret)
231 return ret;
232
233 if (plane_enabled(state)) {
234 unsigned int rotation;
235 const struct mdp_format *format;
236 struct mdp5_kms *mdp5_kms = get_kms(plane);
237 uint32_t blkcfg = 0;
238
239 format = to_mdp_format(msm_framebuffer_format(state->fb));
240 if (MDP_FORMAT_IS_YUV(format))
241 caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
242
243 if (((state->src_w >> 16) != state->crtc_w) ||
244 ((state->src_h >> 16) != state->crtc_h))
245 caps |= MDP_PIPE_CAP_SCALE;
246
247 rotation = drm_rotation_simplify(state->rotation,
248 DRM_MODE_ROTATE_0 |
249 DRM_MODE_REFLECT_X |
250 DRM_MODE_REFLECT_Y);
251
252 if (rotation & DRM_MODE_REFLECT_X)
253 caps |= MDP_PIPE_CAP_HFLIP;
254
255 if (rotation & DRM_MODE_REFLECT_Y)
256 caps |= MDP_PIPE_CAP_VFLIP;
257
258 if (plane->type == DRM_PLANE_TYPE_CURSOR)
259 caps |= MDP_PIPE_CAP_CURSOR;
260
261 /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
262 if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
263 new_hwpipe = true;
264
265 /*
266 * (re)allocte hw pipe if we're either requesting for 2 hw pipes
267 * or we're switching from 2 hw pipes to 1 hw pipe because the
268 * new src_w can be supported by 1 hw pipe itself.
269 */
270 if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
271 (!need_right_hwpipe && mdp5_state->r_hwpipe))
272 new_hwpipe = true;
273
274 if (mdp5_kms->smp) {
275 const struct mdp_format *format =
276 to_mdp_format(msm_framebuffer_format(state->fb));
277
278 blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
279 state->src_w >> 16, false);
280
281 if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
282 new_hwpipe = true;
283 }
284
285 /* (re)assign hwpipe if needed, otherwise keep old one: */
286 if (new_hwpipe) {
287 /* TODO maybe we want to re-assign hwpipe sometimes
288 * in cases when we no-longer need some caps to make
289 * it available for other planes?
290 */
291 struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
292 struct mdp5_hw_pipe *old_right_hwpipe =
293 mdp5_state->r_hwpipe;
294 struct mdp5_hw_pipe *new_hwpipe = NULL;
295 struct mdp5_hw_pipe *new_right_hwpipe = NULL;
296
297 ret = mdp5_pipe_assign(state->state, plane, caps,
298 blkcfg, &new_hwpipe,
299 need_right_hwpipe ?
300 &new_right_hwpipe : NULL);
301 if (ret) {
302 DBG("%s: failed to assign hwpipe(s)!",
303 plane->name);
304 return ret;
305 }
306
307 mdp5_state->hwpipe = new_hwpipe;
308 if (need_right_hwpipe)
309 mdp5_state->r_hwpipe = new_right_hwpipe;
310 else
311 /*
312 * set it to NULL so that the driver knows we
313 * don't have a right hwpipe when committing a
314 * new state
315 */
316 mdp5_state->r_hwpipe = NULL;
317
318
319 ret = mdp5_pipe_release(state->state, old_hwpipe);
320 if (ret)
321 return ret;
322
323 ret = mdp5_pipe_release(state->state, old_right_hwpipe);
324 if (ret)
325 return ret;
326
327 }
328 } else {
329 ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
330 if (ret)
331 return ret;
332
333 ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
334 if (ret)
335 return ret;
336
337 mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
338 }
339
340 return 0;
341 }
342
mdp5_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)343 static int mdp5_plane_atomic_check(struct drm_plane *plane,
344 struct drm_atomic_state *state)
345 {
346 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
347 plane);
348 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
349 plane);
350 struct drm_crtc *crtc;
351 struct drm_crtc_state *crtc_state;
352
353 crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
354 if (!crtc)
355 return 0;
356
357 crtc_state = drm_atomic_get_existing_crtc_state(state,
358 crtc);
359 if (WARN_ON(!crtc_state))
360 return -EINVAL;
361
362 return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
363 }
364
mdp5_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)365 static void mdp5_plane_atomic_update(struct drm_plane *plane,
366 struct drm_atomic_state *state)
367 {
368 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
369 plane);
370
371 DBG("%s: update", plane->name);
372
373 if (plane_enabled(new_state)) {
374 int ret;
375
376 ret = mdp5_plane_mode_set(plane,
377 new_state->crtc, new_state->fb,
378 &new_state->src, &new_state->dst);
379 /* atomic_check should have ensured that this doesn't fail */
380 WARN_ON(ret < 0);
381 }
382 }
383
mdp5_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state)384 static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
385 struct drm_atomic_state *state)
386 {
387 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
388 plane);
389 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
390 struct drm_crtc_state *crtc_state;
391 int min_scale, max_scale;
392 int ret;
393
394 crtc_state = drm_atomic_get_existing_crtc_state(state,
395 new_plane_state->crtc);
396 if (WARN_ON(!crtc_state))
397 return -EINVAL;
398
399 if (!crtc_state->active)
400 return -EINVAL;
401
402 /* don't use fast path if we don't have a hwpipe allocated yet */
403 if (!mdp5_state->hwpipe)
404 return -EINVAL;
405
406 /* only allow changing of position(crtc x/y or src x/y) in fast path */
407 if (plane->state->crtc != new_plane_state->crtc ||
408 plane->state->src_w != new_plane_state->src_w ||
409 plane->state->src_h != new_plane_state->src_h ||
410 plane->state->crtc_w != new_plane_state->crtc_w ||
411 plane->state->crtc_h != new_plane_state->crtc_h ||
412 !plane->state->fb ||
413 plane->state->fb != new_plane_state->fb)
414 return -EINVAL;
415
416 min_scale = FRAC_16_16(1, 8);
417 max_scale = FRAC_16_16(8, 1);
418
419 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
420 min_scale, max_scale,
421 true, true);
422 if (ret)
423 return ret;
424
425 /*
426 * if the visibility of the plane changes (i.e, if the cursor is
427 * clipped out completely, we can't take the async path because
428 * we need to stage/unstage the plane from the Layer Mixer(s). We
429 * also assign/unassign the hwpipe(s) tied to the plane. We avoid
430 * taking the fast path for both these reasons.
431 */
432 if (new_plane_state->visible != plane->state->visible)
433 return -EINVAL;
434
435 return 0;
436 }
437
mdp5_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)438 static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
439 struct drm_atomic_state *state)
440 {
441 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
442 plane);
443 struct drm_framebuffer *old_fb = plane->state->fb;
444
445 plane->state->src_x = new_state->src_x;
446 plane->state->src_y = new_state->src_y;
447 plane->state->crtc_x = new_state->crtc_x;
448 plane->state->crtc_y = new_state->crtc_y;
449
450 if (plane_enabled(new_state)) {
451 struct mdp5_ctl *ctl;
452 struct mdp5_pipeline *pipeline =
453 mdp5_crtc_get_pipeline(new_state->crtc);
454 int ret;
455
456 ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
457 &new_state->src, &new_state->dst);
458 WARN_ON(ret < 0);
459
460 ctl = mdp5_crtc_get_ctl(new_state->crtc);
461
462 mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
463 }
464
465 *to_mdp5_plane_state(plane->state) =
466 *to_mdp5_plane_state(new_state);
467
468 new_state->fb = old_fb;
469 }
470
471 static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
472 .prepare_fb = mdp5_plane_prepare_fb,
473 .cleanup_fb = mdp5_plane_cleanup_fb,
474 .atomic_check = mdp5_plane_atomic_check,
475 .atomic_update = mdp5_plane_atomic_update,
476 .atomic_async_check = mdp5_plane_atomic_async_check,
477 .atomic_async_update = mdp5_plane_atomic_async_update,
478 };
479
set_scanout_locked(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe,struct drm_framebuffer * fb)480 static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
481 enum mdp5_pipe pipe,
482 struct drm_framebuffer *fb)
483 {
484 struct msm_kms *kms = &mdp5_kms->base.base;
485
486 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
487 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
488 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
489
490 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
491 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
492 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
493
494 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
495 msm_framebuffer_iova(fb, kms->aspace, 0));
496 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
497 msm_framebuffer_iova(fb, kms->aspace, 1));
498 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
499 msm_framebuffer_iova(fb, kms->aspace, 2));
500 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
501 msm_framebuffer_iova(fb, kms->aspace, 3));
502 }
503
504 /* Note: mdp5_plane->pipe_lock must be locked */
csc_disable(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe)505 static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
506 {
507 uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
508 ~MDP5_PIPE_OP_MODE_CSC_1_EN;
509
510 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
511 }
512
513 /* Note: mdp5_plane->pipe_lock must be locked */
csc_enable(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe,struct csc_cfg * csc)514 static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
515 struct csc_cfg *csc)
516 {
517 uint32_t i, mode = 0; /* RGB, no CSC */
518 uint32_t *matrix;
519
520 if (unlikely(!csc))
521 return;
522
523 if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
524 mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
525 if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
526 mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
527 mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
528 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
529
530 matrix = csc->matrix;
531 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
532 MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
533 MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
534 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
535 MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
536 MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
537 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
538 MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
539 MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
540 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
541 MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
542 MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
543 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
544 MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
545
546 for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
547 uint32_t *pre_clamp = csc->pre_clamp;
548 uint32_t *post_clamp = csc->post_clamp;
549
550 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
551 MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
552 MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
553
554 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
555 MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
556 MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
557
558 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
559 MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
560
561 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
562 MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
563 }
564 }
565
566 #define PHASE_STEP_SHIFT 21
567 #define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
568
calc_phase_step(uint32_t src,uint32_t dst,uint32_t * out_phase)569 static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
570 {
571 uint32_t unit;
572
573 if (src == 0 || dst == 0)
574 return -EINVAL;
575
576 /*
577 * PHASE_STEP_X/Y is coded on 26 bits (25:0),
578 * where 2^21 represents the unity "1" in fixed-point hardware design.
579 * This leaves 5 bits for the integer part (downscale case):
580 * -> maximum downscale ratio = 0b1_1111 = 31
581 */
582 if (src > (dst * DOWN_SCALE_RATIO_MAX))
583 return -EOVERFLOW;
584
585 unit = 1 << PHASE_STEP_SHIFT;
586 *out_phase = mult_frac(unit, src, dst);
587
588 return 0;
589 }
590
calc_scalex_steps(struct drm_plane * plane,uint32_t pixel_format,uint32_t src,uint32_t dest,uint32_t phasex_steps[COMP_MAX])591 static int calc_scalex_steps(struct drm_plane *plane,
592 uint32_t pixel_format, uint32_t src, uint32_t dest,
593 uint32_t phasex_steps[COMP_MAX])
594 {
595 const struct drm_format_info *info = drm_format_info(pixel_format);
596 struct mdp5_kms *mdp5_kms = get_kms(plane);
597 struct device *dev = mdp5_kms->dev->dev;
598 uint32_t phasex_step;
599 int ret;
600
601 ret = calc_phase_step(src, dest, &phasex_step);
602 if (ret) {
603 DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
604 return ret;
605 }
606
607 phasex_steps[COMP_0] = phasex_step;
608 phasex_steps[COMP_3] = phasex_step;
609 phasex_steps[COMP_1_2] = phasex_step / info->hsub;
610
611 return 0;
612 }
613
calc_scaley_steps(struct drm_plane * plane,uint32_t pixel_format,uint32_t src,uint32_t dest,uint32_t phasey_steps[COMP_MAX])614 static int calc_scaley_steps(struct drm_plane *plane,
615 uint32_t pixel_format, uint32_t src, uint32_t dest,
616 uint32_t phasey_steps[COMP_MAX])
617 {
618 const struct drm_format_info *info = drm_format_info(pixel_format);
619 struct mdp5_kms *mdp5_kms = get_kms(plane);
620 struct device *dev = mdp5_kms->dev->dev;
621 uint32_t phasey_step;
622 int ret;
623
624 ret = calc_phase_step(src, dest, &phasey_step);
625 if (ret) {
626 DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
627 return ret;
628 }
629
630 phasey_steps[COMP_0] = phasey_step;
631 phasey_steps[COMP_3] = phasey_step;
632 phasey_steps[COMP_1_2] = phasey_step / info->vsub;
633
634 return 0;
635 }
636
get_scale_config(const struct mdp_format * format,uint32_t src,uint32_t dst,bool horz)637 static uint32_t get_scale_config(const struct mdp_format *format,
638 uint32_t src, uint32_t dst, bool horz)
639 {
640 const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
641 bool scaling = format->is_yuv ? true : (src != dst);
642 uint32_t sub;
643 uint32_t ya_filter, uv_filter;
644 bool yuv = format->is_yuv;
645
646 if (!scaling)
647 return 0;
648
649 if (yuv) {
650 sub = horz ? info->hsub : info->vsub;
651 uv_filter = ((src / sub) <= dst) ?
652 SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
653 }
654 ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
655
656 if (horz)
657 return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
658 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
659 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
660 COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
661 else
662 return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
663 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
664 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
665 COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
666 }
667
calc_pixel_ext(const struct mdp_format * format,uint32_t src,uint32_t dst,uint32_t phase_step[2],int pix_ext_edge1[COMP_MAX],int pix_ext_edge2[COMP_MAX],bool horz)668 static void calc_pixel_ext(const struct mdp_format *format,
669 uint32_t src, uint32_t dst, uint32_t phase_step[2],
670 int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
671 bool horz)
672 {
673 bool scaling = format->is_yuv ? true : (src != dst);
674 int i;
675
676 /*
677 * Note:
678 * We assume here that:
679 * 1. PCMN filter is used for downscale
680 * 2. bilinear filter is used for upscale
681 * 3. we are in a single pipe configuration
682 */
683
684 for (i = 0; i < COMP_MAX; i++) {
685 pix_ext_edge1[i] = 0;
686 pix_ext_edge2[i] = scaling ? 1 : 0;
687 }
688 }
689
mdp5_write_pixel_ext(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe,const struct mdp_format * format,uint32_t src_w,int pe_left[COMP_MAX],int pe_right[COMP_MAX],uint32_t src_h,int pe_top[COMP_MAX],int pe_bottom[COMP_MAX])690 static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
691 const struct mdp_format *format,
692 uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
693 uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
694 {
695 const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
696 uint32_t lr, tb, req;
697 int i;
698
699 for (i = 0; i < COMP_MAX; i++) {
700 uint32_t roi_w = src_w;
701 uint32_t roi_h = src_h;
702
703 if (format->is_yuv && i == COMP_1_2) {
704 roi_w /= info->hsub;
705 roi_h /= info->vsub;
706 }
707
708 lr = (pe_left[i] >= 0) ?
709 MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
710 MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
711
712 lr |= (pe_right[i] >= 0) ?
713 MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
714 MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
715
716 tb = (pe_top[i] >= 0) ?
717 MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
718 MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
719
720 tb |= (pe_bottom[i] >= 0) ?
721 MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
722 MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
723
724 req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
725 pe_left[i] + pe_right[i]);
726
727 req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
728 pe_top[i] + pe_bottom[i]);
729
730 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
731 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
732 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
733
734 DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
735 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
736 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
737 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
738 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
739 FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
740
741 DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
742 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
743 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
744 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
745 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
746 FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
747 }
748 }
749
750 struct pixel_ext {
751 int left[COMP_MAX];
752 int right[COMP_MAX];
753 int top[COMP_MAX];
754 int bottom[COMP_MAX];
755 };
756
757 struct phase_step {
758 u32 x[COMP_MAX];
759 u32 y[COMP_MAX];
760 };
761
mdp5_hwpipe_mode_set(struct mdp5_kms * mdp5_kms,struct mdp5_hw_pipe * hwpipe,struct drm_framebuffer * fb,struct phase_step * step,struct pixel_ext * pe,u32 scale_config,u32 hdecm,u32 vdecm,bool hflip,bool vflip,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,u32 src_img_w,u32 src_img_h,u32 src_x,u32 src_y,u32 src_w,u32 src_h)762 static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
763 struct mdp5_hw_pipe *hwpipe,
764 struct drm_framebuffer *fb,
765 struct phase_step *step,
766 struct pixel_ext *pe,
767 u32 scale_config, u32 hdecm, u32 vdecm,
768 bool hflip, bool vflip,
769 int crtc_x, int crtc_y,
770 unsigned int crtc_w, unsigned int crtc_h,
771 u32 src_img_w, u32 src_img_h,
772 u32 src_x, u32 src_y,
773 u32 src_w, u32 src_h)
774 {
775 enum mdp5_pipe pipe = hwpipe->pipe;
776 bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
777 const struct mdp_format *format =
778 to_mdp_format(msm_framebuffer_format(fb));
779
780 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
781 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
782 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
783
784 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
785 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
786 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
787
788 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
789 MDP5_PIPE_SRC_XY_X(src_x) |
790 MDP5_PIPE_SRC_XY_Y(src_y));
791
792 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
793 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
794 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
795
796 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
797 MDP5_PIPE_OUT_XY_X(crtc_x) |
798 MDP5_PIPE_OUT_XY_Y(crtc_y));
799
800 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
801 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
802 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
803 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
804 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
805 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
806 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
807 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
808 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
809 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
810 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
811
812 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
813 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
814 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
815 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
816 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
817
818 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
819 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
820 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
821 COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
822 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
823
824 /* not using secure mode: */
825 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
826
827 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
828 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
829 src_w, pe->left, pe->right,
830 src_h, pe->top, pe->bottom);
831
832 if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
833 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
834 step->x[COMP_0]);
835 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
836 step->y[COMP_0]);
837 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
838 step->x[COMP_1_2]);
839 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
840 step->y[COMP_1_2]);
841 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
842 MDP5_PIPE_DECIMATION_VERT(vdecm) |
843 MDP5_PIPE_DECIMATION_HORZ(hdecm));
844 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
845 scale_config);
846 }
847
848 if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
849 if (MDP_FORMAT_IS_YUV(format))
850 csc_enable(mdp5_kms, pipe,
851 mdp_get_default_csc_cfg(CSC_YUV2RGB));
852 else
853 csc_disable(mdp5_kms, pipe);
854 }
855
856 set_scanout_locked(mdp5_kms, pipe, fb);
857 }
858
mdp5_plane_mode_set(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_rect * src,struct drm_rect * dest)859 static int mdp5_plane_mode_set(struct drm_plane *plane,
860 struct drm_crtc *crtc, struct drm_framebuffer *fb,
861 struct drm_rect *src, struct drm_rect *dest)
862 {
863 struct drm_plane_state *pstate = plane->state;
864 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
865 struct mdp5_kms *mdp5_kms = get_kms(plane);
866 enum mdp5_pipe pipe = hwpipe->pipe;
867 struct mdp5_hw_pipe *right_hwpipe;
868 const struct mdp_format *format;
869 uint32_t nplanes, config = 0;
870 struct phase_step step = { { 0 } };
871 struct pixel_ext pe = { { 0 } };
872 uint32_t hdecm = 0, vdecm = 0;
873 uint32_t pix_format;
874 unsigned int rotation;
875 bool vflip, hflip;
876 int crtc_x, crtc_y;
877 unsigned int crtc_w, crtc_h;
878 uint32_t src_x, src_y;
879 uint32_t src_w, src_h;
880 uint32_t src_img_w, src_img_h;
881 int ret;
882
883 nplanes = fb->format->num_planes;
884
885 /* bad formats should already be rejected: */
886 if (WARN_ON(nplanes > pipe2nclients(pipe)))
887 return -EINVAL;
888
889 format = to_mdp_format(msm_framebuffer_format(fb));
890 pix_format = format->base.pixel_format;
891
892 src_x = src->x1;
893 src_y = src->y1;
894 src_w = drm_rect_width(src);
895 src_h = drm_rect_height(src);
896
897 crtc_x = dest->x1;
898 crtc_y = dest->y1;
899 crtc_w = drm_rect_width(dest);
900 crtc_h = drm_rect_height(dest);
901
902 /* src values are in Q16 fixed point, convert to integer: */
903 src_x = src_x >> 16;
904 src_y = src_y >> 16;
905 src_w = src_w >> 16;
906 src_h = src_h >> 16;
907
908 src_img_w = min(fb->width, src_w);
909 src_img_h = min(fb->height, src_h);
910
911 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
912 fb->base.id, src_x, src_y, src_w, src_h,
913 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
914
915 right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
916 if (right_hwpipe) {
917 /*
918 * if the plane comprises of 2 hw pipes, assume that the width
919 * is split equally across them. The only parameters that varies
920 * between the 2 pipes are src_x and crtc_x
921 */
922 crtc_w /= 2;
923 src_w /= 2;
924 src_img_w /= 2;
925 }
926
927 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
928 if (ret)
929 return ret;
930
931 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
932 if (ret)
933 return ret;
934
935 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
936 calc_pixel_ext(format, src_w, crtc_w, step.x,
937 pe.left, pe.right, true);
938 calc_pixel_ext(format, src_h, crtc_h, step.y,
939 pe.top, pe.bottom, false);
940 }
941
942 /* TODO calc hdecm, vdecm */
943
944 /* SCALE is used to both scale and up-sample chroma components */
945 config |= get_scale_config(format, src_w, crtc_w, true);
946 config |= get_scale_config(format, src_h, crtc_h, false);
947 DBG("scale config = %x", config);
948
949 rotation = drm_rotation_simplify(pstate->rotation,
950 DRM_MODE_ROTATE_0 |
951 DRM_MODE_REFLECT_X |
952 DRM_MODE_REFLECT_Y);
953 hflip = !!(rotation & DRM_MODE_REFLECT_X);
954 vflip = !!(rotation & DRM_MODE_REFLECT_Y);
955
956 mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
957 config, hdecm, vdecm, hflip, vflip,
958 crtc_x, crtc_y, crtc_w, crtc_h,
959 src_img_w, src_img_h,
960 src_x, src_y, src_w, src_h);
961 if (right_hwpipe)
962 mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
963 config, hdecm, vdecm, hflip, vflip,
964 crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
965 src_img_w, src_img_h,
966 src_x + src_w, src_y, src_w, src_h);
967
968 return ret;
969 }
970
971 /*
972 * Use this func and the one below only after the atomic state has been
973 * successfully swapped
974 */
mdp5_plane_pipe(struct drm_plane * plane)975 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
976 {
977 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
978
979 if (WARN_ON(!pstate->hwpipe))
980 return SSPP_NONE;
981
982 return pstate->hwpipe->pipe;
983 }
984
mdp5_plane_right_pipe(struct drm_plane * plane)985 enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
986 {
987 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
988
989 if (!pstate->r_hwpipe)
990 return SSPP_NONE;
991
992 return pstate->r_hwpipe->pipe;
993 }
994
mdp5_plane_get_flush(struct drm_plane * plane)995 uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
996 {
997 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
998 u32 mask;
999
1000 if (WARN_ON(!pstate->hwpipe))
1001 return 0;
1002
1003 mask = pstate->hwpipe->flush_mask;
1004
1005 if (pstate->r_hwpipe)
1006 mask |= pstate->r_hwpipe->flush_mask;
1007
1008 return mask;
1009 }
1010
1011 /* initialize plane */
mdp5_plane_init(struct drm_device * dev,enum drm_plane_type type)1012 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
1013 enum drm_plane_type type)
1014 {
1015 struct drm_plane *plane = NULL;
1016 struct mdp5_plane *mdp5_plane;
1017 int ret;
1018
1019 mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
1020 if (!mdp5_plane) {
1021 ret = -ENOMEM;
1022 goto fail;
1023 }
1024
1025 plane = &mdp5_plane->base;
1026
1027 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
1028 ARRAY_SIZE(mdp5_plane->formats), false);
1029
1030 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
1031 mdp5_plane->formats, mdp5_plane->nformats,
1032 NULL, type, NULL);
1033 if (ret)
1034 goto fail;
1035
1036 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
1037
1038 mdp5_plane_install_properties(plane, &plane->base);
1039
1040 drm_plane_enable_fb_damage_clips(plane);
1041
1042 return plane;
1043
1044 fail:
1045 if (plane)
1046 mdp5_plane_destroy(plane);
1047
1048 return ERR_PTR(ret);
1049 }
1050