1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * rcar_du_crtc.c  --  R-Car Display Unit CRTCs
4  *
5  * Copyright (C) 2013-2015 Renesas Electronics Corporation
6  *
7  * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8  */
9 
10 #include <linux/clk.h>
11 #include <linux/mutex.h>
12 #include <linux/platform_device.h>
13 #include <linux/sys_soc.h>
14 
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_device.h>
19 #include <drm/drm_fb_cma_helper.h>
20 #include <drm/drm_gem_cma_helper.h>
21 #include <drm/drm_plane_helper.h>
22 #include <drm/drm_vblank.h>
23 
24 #include "rcar_du_crtc.h"
25 #include "rcar_du_drv.h"
26 #include "rcar_du_encoder.h"
27 #include "rcar_du_kms.h"
28 #include "rcar_du_plane.h"
29 #include "rcar_du_regs.h"
30 #include "rcar_du_vsp.h"
31 #include "rcar_lvds.h"
32 
rcar_du_crtc_read(struct rcar_du_crtc * rcrtc,u32 reg)33 static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
34 {
35 	struct rcar_du_device *rcdu = rcrtc->dev;
36 
37 	return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
38 }
39 
rcar_du_crtc_write(struct rcar_du_crtc * rcrtc,u32 reg,u32 data)40 static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
41 {
42 	struct rcar_du_device *rcdu = rcrtc->dev;
43 
44 	rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
45 }
46 
rcar_du_crtc_clr(struct rcar_du_crtc * rcrtc,u32 reg,u32 clr)47 static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
48 {
49 	struct rcar_du_device *rcdu = rcrtc->dev;
50 
51 	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
52 		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
53 }
54 
rcar_du_crtc_set(struct rcar_du_crtc * rcrtc,u32 reg,u32 set)55 static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
56 {
57 	struct rcar_du_device *rcdu = rcrtc->dev;
58 
59 	rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
60 		      rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
61 }
62 
rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc * rcrtc,u32 clr,u32 set)63 void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
64 {
65 	struct rcar_du_device *rcdu = rcrtc->dev;
66 
67 	rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
68 	rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
69 }
70 
71 /* -----------------------------------------------------------------------------
72  * Hardware Setup
73  */
74 
75 struct dpll_info {
76 	unsigned int output;
77 	unsigned int fdpll;
78 	unsigned int n;
79 	unsigned int m;
80 };
81 
rcar_du_dpll_divider(struct rcar_du_crtc * rcrtc,struct dpll_info * dpll,unsigned long input,unsigned long target)82 static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
83 				 struct dpll_info *dpll,
84 				 unsigned long input,
85 				 unsigned long target)
86 {
87 	unsigned long best_diff = (unsigned long)-1;
88 	unsigned long diff;
89 	unsigned int fdpll;
90 	unsigned int m;
91 	unsigned int n;
92 
93 	/*
94 	 *   fin                                 fvco        fout       fclkout
95 	 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
96 	 *              +-> |  |                             |
97 	 *              |                                    |
98 	 *              +---------------- [1/N] <------------+
99 	 *
100 	 *	fclkout = fvco / P / FDPLL -- (1)
101 	 *
102 	 * fin/M = fvco/P/N
103 	 *
104 	 *	fvco = fin * P *  N / M -- (2)
105 	 *
106 	 * (1) + (2) indicates
107 	 *
108 	 *	fclkout = fin * N / M / FDPLL
109 	 *
110 	 * NOTES
111 	 *	N	: (n + 1)
112 	 *	M	: (m + 1)
113 	 *	FDPLL	: (fdpll + 1)
114 	 *	P	: 2
115 	 *	2kHz < fvco < 4096MHz
116 	 *
117 	 * To minimize the jitter,
118 	 * N : as large as possible
119 	 * M : as small as possible
120 	 */
121 	for (m = 0; m < 4; m++) {
122 		for (n = 119; n > 38; n--) {
123 			/*
124 			 * This code only runs on 64-bit architectures, the
125 			 * unsigned long type can thus be used for 64-bit
126 			 * computation. It will still compile without any
127 			 * warning on 32-bit architectures.
128 			 *
129 			 * To optimize calculations, use fout instead of fvco
130 			 * to verify the VCO frequency constraint.
131 			 */
132 			unsigned long fout = input * (n + 1) / (m + 1);
133 
134 			if (fout < 1000 || fout > 2048 * 1000 * 1000U)
135 				continue;
136 
137 			for (fdpll = 1; fdpll < 32; fdpll++) {
138 				unsigned long output;
139 
140 				output = fout / (fdpll + 1);
141 				if (output >= 400 * 1000 * 1000)
142 					continue;
143 
144 				diff = abs((long)output - (long)target);
145 				if (best_diff > diff) {
146 					best_diff = diff;
147 					dpll->n = n;
148 					dpll->m = m;
149 					dpll->fdpll = fdpll;
150 					dpll->output = output;
151 				}
152 
153 				if (diff == 0)
154 					goto done;
155 			}
156 		}
157 	}
158 
159 done:
160 	dev_dbg(rcrtc->dev->dev,
161 		"output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
162 		 dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
163 }
164 
165 struct du_clk_params {
166 	struct clk *clk;
167 	unsigned long rate;
168 	unsigned long diff;
169 	u32 escr;
170 };
171 
rcar_du_escr_divider(struct clk * clk,unsigned long target,u32 escr,struct du_clk_params * params)172 static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
173 				 u32 escr, struct du_clk_params *params)
174 {
175 	unsigned long rate;
176 	unsigned long diff;
177 	u32 div;
178 
179 	/*
180 	 * If the target rate has already been achieved perfectly we can't do
181 	 * better.
182 	 */
183 	if (params->diff == 0)
184 		return;
185 
186 	/*
187 	 * Compute the input clock rate and internal divisor values to obtain
188 	 * the clock rate closest to the target frequency.
189 	 */
190 	rate = clk_round_rate(clk, target);
191 	div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
192 	diff = abs(rate / (div + 1) - target);
193 
194 	/*
195 	 * Store the parameters if the resulting frequency is better than any
196 	 * previously calculated value.
197 	 */
198 	if (diff < params->diff) {
199 		params->clk = clk;
200 		params->rate = rate;
201 		params->diff = diff;
202 		params->escr = escr | div;
203 	}
204 }
205 
206 static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
207 	{ .soc_id = "r8a7795", .revision = "ES1.*" },
208 	{ /* sentinel */ }
209 };
210 
rcar_du_crtc_set_display_timing(struct rcar_du_crtc * rcrtc)211 static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
212 {
213 	const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
214 	struct rcar_du_device *rcdu = rcrtc->dev;
215 	unsigned long mode_clock = mode->clock * 1000;
216 	u32 dsmr;
217 	u32 escr;
218 
219 	if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
220 		unsigned long target = mode_clock;
221 		struct dpll_info dpll = { 0 };
222 		unsigned long extclk;
223 		u32 dpllcr;
224 		u32 div = 0;
225 
226 		/*
227 		 * DU channels that have a display PLL can't use the internal
228 		 * system clock, and have no internal clock divider.
229 		 */
230 
231 		/*
232 		 * The H3 ES1.x exhibits dot clock duty cycle stability issues.
233 		 * We can work around them by configuring the DPLL to twice the
234 		 * desired frequency, coupled with a /2 post-divider. Restrict
235 		 * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
236 		 * no post-divider when a display PLL is present (as shown by
237 		 * the workaround breaking HDMI output on M3-W during testing).
238 		 */
239 		if (soc_device_match(rcar_du_r8a7795_es1)) {
240 			target *= 2;
241 			div = 1;
242 		}
243 
244 		extclk = clk_get_rate(rcrtc->extclock);
245 		rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
246 
247 		dpllcr = DPLLCR_CODE | DPLLCR_CLKE
248 		       | DPLLCR_FDPLL(dpll.fdpll)
249 		       | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
250 		       | DPLLCR_STBY;
251 
252 		if (rcrtc->index == 1)
253 			dpllcr |= DPLLCR_PLCS1
254 			       |  DPLLCR_INCS_DOTCLKIN1;
255 		else
256 			dpllcr |= DPLLCR_PLCS0
257 			       |  DPLLCR_INCS_DOTCLKIN0;
258 
259 		rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
260 
261 		escr = ESCR_DCLKSEL_DCLKIN | div;
262 	} else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
263 		/*
264 		 * Use the LVDS PLL output as the dot clock when outputting to
265 		 * the LVDS encoder on an SoC that supports this clock routing
266 		 * option. We use the clock directly in that case, without any
267 		 * additional divider.
268 		 */
269 		escr = ESCR_DCLKSEL_DCLKIN;
270 	} else {
271 		struct du_clk_params params = { .diff = (unsigned long)-1 };
272 
273 		rcar_du_escr_divider(rcrtc->clock, mode_clock,
274 				     ESCR_DCLKSEL_CLKS, &params);
275 		if (rcrtc->extclock)
276 			rcar_du_escr_divider(rcrtc->extclock, mode_clock,
277 					     ESCR_DCLKSEL_DCLKIN, &params);
278 
279 		dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
280 			mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
281 			params.rate);
282 
283 		clk_set_rate(params.clk, params.rate);
284 		escr = params.escr;
285 	}
286 
287 	dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
288 
289 	rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
290 	rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
291 
292 	/* Signal polarities */
293 	dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
294 	     | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
295 	     | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
296 	     | DSMR_DIPM_DISP | DSMR_CSPM;
297 	rcar_du_crtc_write(rcrtc, DSMR, dsmr);
298 
299 	/* Display timings */
300 	rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19);
301 	rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
302 					mode->hdisplay - 19);
303 	rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
304 					mode->hsync_start - 1);
305 	rcar_du_crtc_write(rcrtc, HCR,  mode->htotal - 1);
306 
307 	rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
308 					mode->crtc_vsync_end - 2);
309 	rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
310 					mode->crtc_vsync_end +
311 					mode->crtc_vdisplay - 2);
312 	rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
313 					mode->crtc_vsync_end +
314 					mode->crtc_vsync_start - 1);
315 	rcar_du_crtc_write(rcrtc, VCR,  mode->crtc_vtotal - 1);
316 
317 	rcar_du_crtc_write(rcrtc, DESR,  mode->htotal - mode->hsync_start - 1);
318 	rcar_du_crtc_write(rcrtc, DEWR,  mode->hdisplay);
319 }
320 
plane_zpos(struct rcar_du_plane * plane)321 static unsigned int plane_zpos(struct rcar_du_plane *plane)
322 {
323 	return plane->plane.state->normalized_zpos;
324 }
325 
326 static const struct rcar_du_format_info *
plane_format(struct rcar_du_plane * plane)327 plane_format(struct rcar_du_plane *plane)
328 {
329 	return to_rcar_plane_state(plane->plane.state)->format;
330 }
331 
rcar_du_crtc_update_planes(struct rcar_du_crtc * rcrtc)332 static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
333 {
334 	struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
335 	struct rcar_du_device *rcdu = rcrtc->dev;
336 	unsigned int num_planes = 0;
337 	unsigned int dptsr_planes;
338 	unsigned int hwplanes = 0;
339 	unsigned int prio = 0;
340 	unsigned int i;
341 	u32 dspr = 0;
342 
343 	for (i = 0; i < rcrtc->group->num_planes; ++i) {
344 		struct rcar_du_plane *plane = &rcrtc->group->planes[i];
345 		unsigned int j;
346 
347 		if (plane->plane.state->crtc != &rcrtc->crtc ||
348 		    !plane->plane.state->visible)
349 			continue;
350 
351 		/* Insert the plane in the sorted planes array. */
352 		for (j = num_planes++; j > 0; --j) {
353 			if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
354 				break;
355 			planes[j] = planes[j-1];
356 		}
357 
358 		planes[j] = plane;
359 		prio += plane_format(plane)->planes * 4;
360 	}
361 
362 	for (i = 0; i < num_planes; ++i) {
363 		struct rcar_du_plane *plane = planes[i];
364 		struct drm_plane_state *state = plane->plane.state;
365 		unsigned int index = to_rcar_plane_state(state)->hwindex;
366 
367 		prio -= 4;
368 		dspr |= (index + 1) << prio;
369 		hwplanes |= 1 << index;
370 
371 		if (plane_format(plane)->planes == 2) {
372 			index = (index + 1) % 8;
373 
374 			prio -= 4;
375 			dspr |= (index + 1) << prio;
376 			hwplanes |= 1 << index;
377 		}
378 	}
379 
380 	/* If VSP+DU integration is enabled the plane assignment is fixed. */
381 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
382 		if (rcdu->info->gen < 3) {
383 			dspr = (rcrtc->index % 2) + 1;
384 			hwplanes = 1 << (rcrtc->index % 2);
385 		} else {
386 			dspr = (rcrtc->index % 2) ? 3 : 1;
387 			hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
388 		}
389 	}
390 
391 	/*
392 	 * Update the planes to display timing and dot clock generator
393 	 * associations.
394 	 *
395 	 * Updating the DPTSR register requires restarting the CRTC group,
396 	 * resulting in visible flicker. To mitigate the issue only update the
397 	 * association if needed by enabled planes. Planes being disabled will
398 	 * keep their current association.
399 	 */
400 	mutex_lock(&rcrtc->group->lock);
401 
402 	dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
403 		     : rcrtc->group->dptsr_planes & ~hwplanes;
404 
405 	if (dptsr_planes != rcrtc->group->dptsr_planes) {
406 		rcar_du_group_write(rcrtc->group, DPTSR,
407 				    (dptsr_planes << 16) | dptsr_planes);
408 		rcrtc->group->dptsr_planes = dptsr_planes;
409 
410 		if (rcrtc->group->used_crtcs)
411 			rcar_du_group_restart(rcrtc->group);
412 	}
413 
414 	/* Restart the group if plane sources have changed. */
415 	if (rcrtc->group->need_restart)
416 		rcar_du_group_restart(rcrtc->group);
417 
418 	mutex_unlock(&rcrtc->group->lock);
419 
420 	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
421 			    dspr);
422 }
423 
424 /* -----------------------------------------------------------------------------
425  * Page Flip
426  */
427 
rcar_du_crtc_finish_page_flip(struct rcar_du_crtc * rcrtc)428 void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
429 {
430 	struct drm_pending_vblank_event *event;
431 	struct drm_device *dev = rcrtc->crtc.dev;
432 	unsigned long flags;
433 
434 	spin_lock_irqsave(&dev->event_lock, flags);
435 	event = rcrtc->event;
436 	rcrtc->event = NULL;
437 	spin_unlock_irqrestore(&dev->event_lock, flags);
438 
439 	if (event == NULL)
440 		return;
441 
442 	spin_lock_irqsave(&dev->event_lock, flags);
443 	drm_crtc_send_vblank_event(&rcrtc->crtc, event);
444 	wake_up(&rcrtc->flip_wait);
445 	spin_unlock_irqrestore(&dev->event_lock, flags);
446 
447 	drm_crtc_vblank_put(&rcrtc->crtc);
448 }
449 
rcar_du_crtc_page_flip_pending(struct rcar_du_crtc * rcrtc)450 static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
451 {
452 	struct drm_device *dev = rcrtc->crtc.dev;
453 	unsigned long flags;
454 	bool pending;
455 
456 	spin_lock_irqsave(&dev->event_lock, flags);
457 	pending = rcrtc->event != NULL;
458 	spin_unlock_irqrestore(&dev->event_lock, flags);
459 
460 	return pending;
461 }
462 
rcar_du_crtc_wait_page_flip(struct rcar_du_crtc * rcrtc)463 static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
464 {
465 	struct rcar_du_device *rcdu = rcrtc->dev;
466 
467 	if (wait_event_timeout(rcrtc->flip_wait,
468 			       !rcar_du_crtc_page_flip_pending(rcrtc),
469 			       msecs_to_jiffies(50)))
470 		return;
471 
472 	dev_warn(rcdu->dev, "page flip timeout\n");
473 
474 	rcar_du_crtc_finish_page_flip(rcrtc);
475 }
476 
477 /* -----------------------------------------------------------------------------
478  * Start/Stop and Suspend/Resume
479  */
480 
rcar_du_crtc_setup(struct rcar_du_crtc * rcrtc)481 static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
482 {
483 	/* Set display off and background to black */
484 	rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
485 	rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
486 
487 	/* Configure display timings and output routing */
488 	rcar_du_crtc_set_display_timing(rcrtc);
489 	rcar_du_group_set_routing(rcrtc->group);
490 
491 	/* Start with all planes disabled. */
492 	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
493 
494 	/* Enable the VSP compositor. */
495 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
496 		rcar_du_vsp_enable(rcrtc);
497 
498 	/* Turn vertical blanking interrupt reporting on. */
499 	drm_crtc_vblank_on(&rcrtc->crtc);
500 }
501 
rcar_du_crtc_get(struct rcar_du_crtc * rcrtc)502 static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
503 {
504 	int ret;
505 
506 	/*
507 	 * Guard against double-get, as the function is called from both the
508 	 * .atomic_enable() and .atomic_begin() handlers.
509 	 */
510 	if (rcrtc->initialized)
511 		return 0;
512 
513 	ret = clk_prepare_enable(rcrtc->clock);
514 	if (ret < 0)
515 		return ret;
516 
517 	ret = clk_prepare_enable(rcrtc->extclock);
518 	if (ret < 0)
519 		goto error_clock;
520 
521 	ret = rcar_du_group_get(rcrtc->group);
522 	if (ret < 0)
523 		goto error_group;
524 
525 	rcar_du_crtc_setup(rcrtc);
526 	rcrtc->initialized = true;
527 
528 	return 0;
529 
530 error_group:
531 	clk_disable_unprepare(rcrtc->extclock);
532 error_clock:
533 	clk_disable_unprepare(rcrtc->clock);
534 	return ret;
535 }
536 
rcar_du_crtc_put(struct rcar_du_crtc * rcrtc)537 static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
538 {
539 	rcar_du_group_put(rcrtc->group);
540 
541 	clk_disable_unprepare(rcrtc->extclock);
542 	clk_disable_unprepare(rcrtc->clock);
543 
544 	rcrtc->initialized = false;
545 }
546 
rcar_du_crtc_start(struct rcar_du_crtc * rcrtc)547 static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
548 {
549 	bool interlaced;
550 
551 	/*
552 	 * Select master sync mode. This enables display operation in master
553 	 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
554 	 * actively driven).
555 	 */
556 	interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
557 	rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
558 				   (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
559 				   DSYSR_TVM_MASTER);
560 
561 	rcar_du_group_start_stop(rcrtc->group, true);
562 }
563 
rcar_du_crtc_disable_planes(struct rcar_du_crtc * rcrtc)564 static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
565 {
566 	struct rcar_du_device *rcdu = rcrtc->dev;
567 	struct drm_crtc *crtc = &rcrtc->crtc;
568 	u32 status;
569 
570 	/* Make sure vblank interrupts are enabled. */
571 	drm_crtc_vblank_get(crtc);
572 
573 	/*
574 	 * Disable planes and calculate how many vertical blanking interrupts we
575 	 * have to wait for. If a vertical blanking interrupt has been triggered
576 	 * but not processed yet, we don't know whether it occurred before or
577 	 * after the planes got disabled. We thus have to wait for two vblank
578 	 * interrupts in that case.
579 	 */
580 	spin_lock_irq(&rcrtc->vblank_lock);
581 	rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
582 	status = rcar_du_crtc_read(rcrtc, DSSR);
583 	rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
584 	spin_unlock_irq(&rcrtc->vblank_lock);
585 
586 	if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
587 				msecs_to_jiffies(100)))
588 		dev_warn(rcdu->dev, "vertical blanking timeout\n");
589 
590 	drm_crtc_vblank_put(crtc);
591 }
592 
rcar_du_crtc_stop(struct rcar_du_crtc * rcrtc)593 static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
594 {
595 	struct drm_crtc *crtc = &rcrtc->crtc;
596 
597 	/*
598 	 * Disable all planes and wait for the change to take effect. This is
599 	 * required as the plane enable registers are updated on vblank, and no
600 	 * vblank will occur once the CRTC is stopped. Disabling planes when
601 	 * starting the CRTC thus wouldn't be enough as it would start scanning
602 	 * out immediately from old frame buffers until the next vblank.
603 	 *
604 	 * This increases the CRTC stop delay, especially when multiple CRTCs
605 	 * are stopped in one operation as we now wait for one vblank per CRTC.
606 	 * Whether this can be improved needs to be researched.
607 	 */
608 	rcar_du_crtc_disable_planes(rcrtc);
609 
610 	/*
611 	 * Disable vertical blanking interrupt reporting. We first need to wait
612 	 * for page flip completion before stopping the CRTC as userspace
613 	 * expects page flips to eventually complete.
614 	 */
615 	rcar_du_crtc_wait_page_flip(rcrtc);
616 	drm_crtc_vblank_off(crtc);
617 
618 	/* Disable the VSP compositor. */
619 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
620 		rcar_du_vsp_disable(rcrtc);
621 
622 	/*
623 	 * Select switch sync mode. This stops display operation and configures
624 	 * the HSYNC and VSYNC signals as inputs.
625 	 *
626 	 * TODO: Find another way to stop the display for DUs that don't support
627 	 * TVM sync.
628 	 */
629 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
630 		rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
631 					   DSYSR_TVM_SWITCH);
632 
633 	rcar_du_group_start_stop(rcrtc->group, false);
634 }
635 
636 /* -----------------------------------------------------------------------------
637  * CRTC Functions
638  */
639 
rcar_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_crtc_state * state)640 static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
641 				     struct drm_crtc_state *state)
642 {
643 	struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
644 	struct drm_encoder *encoder;
645 
646 	/* Store the routes from the CRTC output to the DU outputs. */
647 	rstate->outputs = 0;
648 
649 	drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
650 		struct rcar_du_encoder *renc;
651 
652 		/* Skip the writeback encoder. */
653 		if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
654 			continue;
655 
656 		renc = to_rcar_encoder(encoder);
657 		rstate->outputs |= BIT(renc->output);
658 	}
659 
660 	return 0;
661 }
662 
rcar_du_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)663 static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
664 				       struct drm_crtc_state *old_state)
665 {
666 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
667 	struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
668 	struct rcar_du_device *rcdu = rcrtc->dev;
669 
670 	rcar_du_crtc_get(rcrtc);
671 
672 	/*
673 	 * On D3/E3 the dot clock is provided by the LVDS encoder attached to
674 	 * the DU channel. We need to enable its clock output explicitly if
675 	 * the LVDS output is disabled.
676 	 */
677 	if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
678 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
679 		struct rcar_du_encoder *encoder =
680 			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
681 		const struct drm_display_mode *mode =
682 			&crtc->state->adjusted_mode;
683 
684 		rcar_lvds_clk_enable(encoder->base.bridge,
685 				     mode->clock * 1000);
686 	}
687 
688 	rcar_du_crtc_start(rcrtc);
689 }
690 
rcar_du_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_crtc_state * old_state)691 static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
692 					struct drm_crtc_state *old_state)
693 {
694 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
695 	struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
696 	struct rcar_du_device *rcdu = rcrtc->dev;
697 
698 	rcar_du_crtc_stop(rcrtc);
699 	rcar_du_crtc_put(rcrtc);
700 
701 	if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) &&
702 	    rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) {
703 		struct rcar_du_encoder *encoder =
704 			rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index];
705 
706 		/*
707 		 * Disable the LVDS clock output, see
708 		 * rcar_du_crtc_atomic_enable().
709 		 */
710 		rcar_lvds_clk_disable(encoder->base.bridge);
711 	}
712 
713 	spin_lock_irq(&crtc->dev->event_lock);
714 	if (crtc->state->event) {
715 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
716 		crtc->state->event = NULL;
717 	}
718 	spin_unlock_irq(&crtc->dev->event_lock);
719 }
720 
rcar_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state)721 static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
722 				      struct drm_crtc_state *old_crtc_state)
723 {
724 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
725 
726 	WARN_ON(!crtc->state->enable);
727 
728 	/*
729 	 * If a mode set is in progress we can be called with the CRTC disabled.
730 	 * We thus need to first get and setup the CRTC in order to configure
731 	 * planes. We must *not* put the CRTC in .atomic_flush(), as it must be
732 	 * kept awake until the .atomic_enable() call that will follow. The get
733 	 * operation in .atomic_enable() will in that case be a no-op, and the
734 	 * CRTC will be put later in .atomic_disable().
735 	 *
736 	 * If a mode set is not in progress the CRTC is enabled, and the
737 	 * following get call will be a no-op. There is thus no need to balance
738 	 * it in .atomic_flush() either.
739 	 */
740 	rcar_du_crtc_get(rcrtc);
741 
742 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
743 		rcar_du_vsp_atomic_begin(rcrtc);
744 }
745 
rcar_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state)746 static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
747 				      struct drm_crtc_state *old_crtc_state)
748 {
749 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
750 	struct drm_device *dev = rcrtc->crtc.dev;
751 	unsigned long flags;
752 
753 	rcar_du_crtc_update_planes(rcrtc);
754 
755 	if (crtc->state->event) {
756 		WARN_ON(drm_crtc_vblank_get(crtc) != 0);
757 
758 		spin_lock_irqsave(&dev->event_lock, flags);
759 		rcrtc->event = crtc->state->event;
760 		crtc->state->event = NULL;
761 		spin_unlock_irqrestore(&dev->event_lock, flags);
762 	}
763 
764 	if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
765 		rcar_du_vsp_atomic_flush(rcrtc);
766 }
767 
768 static enum drm_mode_status
rcar_du_crtc_mode_valid(struct drm_crtc * crtc,const struct drm_display_mode * mode)769 rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
770 			const struct drm_display_mode *mode)
771 {
772 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
773 	struct rcar_du_device *rcdu = rcrtc->dev;
774 	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
775 	unsigned int vbp;
776 
777 	if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
778 		return MODE_NO_INTERLACE;
779 
780 	/*
781 	 * The hardware requires a minimum combined horizontal sync and back
782 	 * porch of 20 pixels and a minimum vertical back porch of 3 lines.
783 	 */
784 	if (mode->htotal - mode->hsync_start < 20)
785 		return MODE_HBLANK_NARROW;
786 
787 	vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
788 	if (vbp < 3)
789 		return MODE_VBLANK_NARROW;
790 
791 	return MODE_OK;
792 }
793 
794 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
795 	.atomic_check = rcar_du_crtc_atomic_check,
796 	.atomic_begin = rcar_du_crtc_atomic_begin,
797 	.atomic_flush = rcar_du_crtc_atomic_flush,
798 	.atomic_enable = rcar_du_crtc_atomic_enable,
799 	.atomic_disable = rcar_du_crtc_atomic_disable,
800 	.mode_valid = rcar_du_crtc_mode_valid,
801 };
802 
rcar_du_crtc_crc_init(struct rcar_du_crtc * rcrtc)803 static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
804 {
805 	struct rcar_du_device *rcdu = rcrtc->dev;
806 	const char **sources;
807 	unsigned int count;
808 	int i = -1;
809 
810 	/* CRC available only on Gen3 HW. */
811 	if (rcdu->info->gen < 3)
812 		return;
813 
814 	/* Reserve 1 for "auto" source. */
815 	count = rcrtc->vsp->num_planes + 1;
816 
817 	sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
818 	if (!sources)
819 		return;
820 
821 	sources[0] = kstrdup("auto", GFP_KERNEL);
822 	if (!sources[0])
823 		goto error;
824 
825 	for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
826 		struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
827 		char name[16];
828 
829 		sprintf(name, "plane%u", plane->base.id);
830 		sources[i + 1] = kstrdup(name, GFP_KERNEL);
831 		if (!sources[i + 1])
832 			goto error;
833 	}
834 
835 	rcrtc->sources = sources;
836 	rcrtc->sources_count = count;
837 	return;
838 
839 error:
840 	while (i >= 0) {
841 		kfree(sources[i]);
842 		i--;
843 	}
844 	kfree(sources);
845 }
846 
rcar_du_crtc_crc_cleanup(struct rcar_du_crtc * rcrtc)847 static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
848 {
849 	unsigned int i;
850 
851 	if (!rcrtc->sources)
852 		return;
853 
854 	for (i = 0; i < rcrtc->sources_count; i++)
855 		kfree(rcrtc->sources[i]);
856 	kfree(rcrtc->sources);
857 
858 	rcrtc->sources = NULL;
859 	rcrtc->sources_count = 0;
860 }
861 
862 static struct drm_crtc_state *
rcar_du_crtc_atomic_duplicate_state(struct drm_crtc * crtc)863 rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
864 {
865 	struct rcar_du_crtc_state *state;
866 	struct rcar_du_crtc_state *copy;
867 
868 	if (WARN_ON(!crtc->state))
869 		return NULL;
870 
871 	state = to_rcar_crtc_state(crtc->state);
872 	copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
873 	if (copy == NULL)
874 		return NULL;
875 
876 	__drm_atomic_helper_crtc_duplicate_state(crtc, &copy->state);
877 
878 	return &copy->state;
879 }
880 
rcar_du_crtc_atomic_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)881 static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
882 					      struct drm_crtc_state *state)
883 {
884 	__drm_atomic_helper_crtc_destroy_state(state);
885 	kfree(to_rcar_crtc_state(state));
886 }
887 
rcar_du_crtc_cleanup(struct drm_crtc * crtc)888 static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
889 {
890 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
891 
892 	rcar_du_crtc_crc_cleanup(rcrtc);
893 
894 	return drm_crtc_cleanup(crtc);
895 }
896 
rcar_du_crtc_reset(struct drm_crtc * crtc)897 static void rcar_du_crtc_reset(struct drm_crtc *crtc)
898 {
899 	struct rcar_du_crtc_state *state;
900 
901 	if (crtc->state) {
902 		rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
903 		crtc->state = NULL;
904 	}
905 
906 	state = kzalloc(sizeof(*state), GFP_KERNEL);
907 	if (state == NULL)
908 		return;
909 
910 	state->crc.source = VSP1_DU_CRC_NONE;
911 	state->crc.index = 0;
912 
913 	crtc->state = &state->state;
914 	crtc->state->crtc = crtc;
915 }
916 
rcar_du_crtc_enable_vblank(struct drm_crtc * crtc)917 static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
918 {
919 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
920 
921 	rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
922 	rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
923 	rcrtc->vblank_enable = true;
924 
925 	return 0;
926 }
927 
rcar_du_crtc_disable_vblank(struct drm_crtc * crtc)928 static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
929 {
930 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
931 
932 	rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
933 	rcrtc->vblank_enable = false;
934 }
935 
rcar_du_crtc_parse_crc_source(struct rcar_du_crtc * rcrtc,const char * source_name,enum vsp1_du_crc_source * source)936 static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
937 					 const char *source_name,
938 					 enum vsp1_du_crc_source *source)
939 {
940 	unsigned int index;
941 	int ret;
942 
943 	/*
944 	 * Parse the source name. Supported values are "plane%u" to compute the
945 	 * CRC on an input plane (%u is the plane ID), and "auto" to compute the
946 	 * CRC on the composer (VSP) output.
947 	 */
948 
949 	if (!source_name) {
950 		*source = VSP1_DU_CRC_NONE;
951 		return 0;
952 	} else if (!strcmp(source_name, "auto")) {
953 		*source = VSP1_DU_CRC_OUTPUT;
954 		return 0;
955 	} else if (strstarts(source_name, "plane")) {
956 		unsigned int i;
957 
958 		*source = VSP1_DU_CRC_PLANE;
959 
960 		ret = kstrtouint(source_name + strlen("plane"), 10, &index);
961 		if (ret < 0)
962 			return ret;
963 
964 		for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
965 			if (index == rcrtc->vsp->planes[i].plane.base.id)
966 				return i;
967 		}
968 	}
969 
970 	return -EINVAL;
971 }
972 
rcar_du_crtc_verify_crc_source(struct drm_crtc * crtc,const char * source_name,size_t * values_cnt)973 static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
974 					  const char *source_name,
975 					  size_t *values_cnt)
976 {
977 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
978 	enum vsp1_du_crc_source source;
979 
980 	if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
981 		DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
982 		return -EINVAL;
983 	}
984 
985 	*values_cnt = 1;
986 	return 0;
987 }
988 
989 static const char *const *
rcar_du_crtc_get_crc_sources(struct drm_crtc * crtc,size_t * count)990 rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
991 {
992 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
993 
994 	*count = rcrtc->sources_count;
995 	return rcrtc->sources;
996 }
997 
rcar_du_crtc_set_crc_source(struct drm_crtc * crtc,const char * source_name)998 static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
999 				       const char *source_name)
1000 {
1001 	struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1002 	struct drm_modeset_acquire_ctx ctx;
1003 	struct drm_crtc_state *crtc_state;
1004 	struct drm_atomic_state *state;
1005 	enum vsp1_du_crc_source source;
1006 	unsigned int index;
1007 	int ret;
1008 
1009 	ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
1010 	if (ret < 0)
1011 		return ret;
1012 
1013 	index = ret;
1014 
1015 	/* Perform an atomic commit to set the CRC source. */
1016 	drm_modeset_acquire_init(&ctx, 0);
1017 
1018 	state = drm_atomic_state_alloc(crtc->dev);
1019 	if (!state) {
1020 		ret = -ENOMEM;
1021 		goto unlock;
1022 	}
1023 
1024 	state->acquire_ctx = &ctx;
1025 
1026 retry:
1027 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
1028 	if (!IS_ERR(crtc_state)) {
1029 		struct rcar_du_crtc_state *rcrtc_state;
1030 
1031 		rcrtc_state = to_rcar_crtc_state(crtc_state);
1032 		rcrtc_state->crc.source = source;
1033 		rcrtc_state->crc.index = index;
1034 
1035 		ret = drm_atomic_commit(state);
1036 	} else {
1037 		ret = PTR_ERR(crtc_state);
1038 	}
1039 
1040 	if (ret == -EDEADLK) {
1041 		drm_atomic_state_clear(state);
1042 		drm_modeset_backoff(&ctx);
1043 		goto retry;
1044 	}
1045 
1046 	drm_atomic_state_put(state);
1047 
1048 unlock:
1049 	drm_modeset_drop_locks(&ctx);
1050 	drm_modeset_acquire_fini(&ctx);
1051 
1052 	return ret;
1053 }
1054 
1055 static const struct drm_crtc_funcs crtc_funcs_gen2 = {
1056 	.reset = rcar_du_crtc_reset,
1057 	.destroy = drm_crtc_cleanup,
1058 	.set_config = drm_atomic_helper_set_config,
1059 	.page_flip = drm_atomic_helper_page_flip,
1060 	.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1061 	.atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1062 	.enable_vblank = rcar_du_crtc_enable_vblank,
1063 	.disable_vblank = rcar_du_crtc_disable_vblank,
1064 };
1065 
1066 static const struct drm_crtc_funcs crtc_funcs_gen3 = {
1067 	.reset = rcar_du_crtc_reset,
1068 	.destroy = rcar_du_crtc_cleanup,
1069 	.set_config = drm_atomic_helper_set_config,
1070 	.page_flip = drm_atomic_helper_page_flip,
1071 	.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1072 	.atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1073 	.enable_vblank = rcar_du_crtc_enable_vblank,
1074 	.disable_vblank = rcar_du_crtc_disable_vblank,
1075 	.set_crc_source = rcar_du_crtc_set_crc_source,
1076 	.verify_crc_source = rcar_du_crtc_verify_crc_source,
1077 	.get_crc_sources = rcar_du_crtc_get_crc_sources,
1078 };
1079 
1080 /* -----------------------------------------------------------------------------
1081  * Interrupt Handling
1082  */
1083 
rcar_du_crtc_irq(int irq,void * arg)1084 static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
1085 {
1086 	struct rcar_du_crtc *rcrtc = arg;
1087 	struct rcar_du_device *rcdu = rcrtc->dev;
1088 	irqreturn_t ret = IRQ_NONE;
1089 	u32 status;
1090 
1091 	spin_lock(&rcrtc->vblank_lock);
1092 
1093 	status = rcar_du_crtc_read(rcrtc, DSSR);
1094 	rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
1095 
1096 	if (status & DSSR_VBK) {
1097 		/*
1098 		 * Wake up the vblank wait if the counter reaches 0. This must
1099 		 * be protected by the vblank_lock to avoid races in
1100 		 * rcar_du_crtc_disable_planes().
1101 		 */
1102 		if (rcrtc->vblank_count) {
1103 			if (--rcrtc->vblank_count == 0)
1104 				wake_up(&rcrtc->vblank_wait);
1105 		}
1106 	}
1107 
1108 	spin_unlock(&rcrtc->vblank_lock);
1109 
1110 	if (status & DSSR_VBK) {
1111 		if (rcdu->info->gen < 3) {
1112 			drm_crtc_handle_vblank(&rcrtc->crtc);
1113 			rcar_du_crtc_finish_page_flip(rcrtc);
1114 		}
1115 
1116 		ret = IRQ_HANDLED;
1117 	}
1118 
1119 	return ret;
1120 }
1121 
1122 /* -----------------------------------------------------------------------------
1123  * Initialization
1124  */
1125 
rcar_du_crtc_create(struct rcar_du_group * rgrp,unsigned int swindex,unsigned int hwindex)1126 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
1127 			unsigned int hwindex)
1128 {
1129 	static const unsigned int mmio_offsets[] = {
1130 		DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
1131 	};
1132 
1133 	struct rcar_du_device *rcdu = rgrp->dev;
1134 	struct platform_device *pdev = to_platform_device(rcdu->dev);
1135 	struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
1136 	struct drm_crtc *crtc = &rcrtc->crtc;
1137 	struct drm_plane *primary;
1138 	unsigned int irqflags;
1139 	struct clk *clk;
1140 	char clk_name[9];
1141 	char *name;
1142 	int irq;
1143 	int ret;
1144 
1145 	/* Get the CRTC clock and the optional external clock. */
1146 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1147 		sprintf(clk_name, "du.%u", hwindex);
1148 		name = clk_name;
1149 	} else {
1150 		name = NULL;
1151 	}
1152 
1153 	rcrtc->clock = devm_clk_get(rcdu->dev, name);
1154 	if (IS_ERR(rcrtc->clock)) {
1155 		dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
1156 		return PTR_ERR(rcrtc->clock);
1157 	}
1158 
1159 	sprintf(clk_name, "dclkin.%u", hwindex);
1160 	clk = devm_clk_get(rcdu->dev, clk_name);
1161 	if (!IS_ERR(clk)) {
1162 		rcrtc->extclock = clk;
1163 	} else if (PTR_ERR(clk) == -EPROBE_DEFER) {
1164 		return -EPROBE_DEFER;
1165 	} else if (rcdu->info->dpll_mask & BIT(hwindex)) {
1166 		/*
1167 		 * DU channels that have a display PLL can't use the internal
1168 		 * system clock and thus require an external clock.
1169 		 */
1170 		ret = PTR_ERR(clk);
1171 		dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
1172 		return ret;
1173 	}
1174 
1175 	init_waitqueue_head(&rcrtc->flip_wait);
1176 	init_waitqueue_head(&rcrtc->vblank_wait);
1177 	spin_lock_init(&rcrtc->vblank_lock);
1178 
1179 	rcrtc->dev = rcdu;
1180 	rcrtc->group = rgrp;
1181 	rcrtc->mmio_offset = mmio_offsets[hwindex];
1182 	rcrtc->index = hwindex;
1183 	rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC;
1184 
1185 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
1186 		primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
1187 	else
1188 		primary = &rgrp->planes[swindex % 2].plane;
1189 
1190 	ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL,
1191 					rcdu->info->gen <= 2 ?
1192 					&crtc_funcs_gen2 : &crtc_funcs_gen3,
1193 					NULL);
1194 	if (ret < 0)
1195 		return ret;
1196 
1197 	drm_crtc_helper_add(crtc, &crtc_helper_funcs);
1198 
1199 	/* Start with vertical blanking interrupt reporting disabled. */
1200 	drm_crtc_vblank_off(crtc);
1201 
1202 	/* Register the interrupt handler. */
1203 	if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
1204 		/* The IRQ's are associated with the CRTC (sw)index. */
1205 		irq = platform_get_irq(pdev, swindex);
1206 		irqflags = 0;
1207 	} else {
1208 		irq = platform_get_irq(pdev, 0);
1209 		irqflags = IRQF_SHARED;
1210 	}
1211 
1212 	if (irq < 0) {
1213 		dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
1214 		return irq;
1215 	}
1216 
1217 	ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
1218 			       dev_name(rcdu->dev), rcrtc);
1219 	if (ret < 0) {
1220 		dev_err(rcdu->dev,
1221 			"failed to register IRQ for CRTC %u\n", swindex);
1222 		return ret;
1223 	}
1224 
1225 	rcar_du_crtc_crc_init(rcrtc);
1226 
1227 	return 0;
1228 }
1229