1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/gpu/drm/exynos/exynos7_drm_decon.c
3 *
4 * Copyright (C) 2014 Samsung Electronics Co.Ltd
5 * Authors:
6 * Akshu Agarwal <akshua@gmail.com>
7 * Ajay Kumar <ajaykumar.rs@samsung.com>
8 */
9
10 #include <linux/clk.h>
11 #include <linux/component.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/of_device.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18
19 #include <video/of_display_timing.h>
20 #include <video/of_videomode.h>
21
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_framebuffer.h>
24 #include <drm/drm_vblank.h>
25 #include <drm/exynos_drm.h>
26
27 #include "exynos_drm_crtc.h"
28 #include "exynos_drm_drv.h"
29 #include "exynos_drm_fb.h"
30 #include "exynos_drm_plane.h"
31 #include "regs-decon7.h"
32
33 /*
34 * DECON stands for Display and Enhancement controller.
35 */
36
37 #define MIN_FB_WIDTH_FOR_16WORD_BURST 128
38
39 #define WINDOWS_NR 2
40
41 struct decon_context {
42 struct device *dev;
43 struct drm_device *drm_dev;
44 void *dma_priv;
45 struct exynos_drm_crtc *crtc;
46 struct exynos_drm_plane planes[WINDOWS_NR];
47 struct exynos_drm_plane_config configs[WINDOWS_NR];
48 struct clk *pclk;
49 struct clk *aclk;
50 struct clk *eclk;
51 struct clk *vclk;
52 void __iomem *regs;
53 unsigned long irq_flags;
54 bool i80_if;
55 bool suspended;
56 wait_queue_head_t wait_vsync_queue;
57 atomic_t wait_vsync_event;
58
59 struct drm_encoder *encoder;
60 };
61
62 static const struct of_device_id decon_driver_dt_match[] = {
63 {.compatible = "samsung,exynos7-decon"},
64 {},
65 };
66 MODULE_DEVICE_TABLE(of, decon_driver_dt_match);
67
68 static const uint32_t decon_formats[] = {
69 DRM_FORMAT_RGB565,
70 DRM_FORMAT_XRGB8888,
71 DRM_FORMAT_XBGR8888,
72 DRM_FORMAT_RGBX8888,
73 DRM_FORMAT_BGRX8888,
74 DRM_FORMAT_ARGB8888,
75 DRM_FORMAT_ABGR8888,
76 DRM_FORMAT_RGBA8888,
77 DRM_FORMAT_BGRA8888,
78 };
79
80 static const enum drm_plane_type decon_win_types[WINDOWS_NR] = {
81 DRM_PLANE_TYPE_PRIMARY,
82 DRM_PLANE_TYPE_CURSOR,
83 };
84
decon_wait_for_vblank(struct exynos_drm_crtc * crtc)85 static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
86 {
87 struct decon_context *ctx = crtc->ctx;
88
89 if (ctx->suspended)
90 return;
91
92 atomic_set(&ctx->wait_vsync_event, 1);
93
94 /*
95 * wait for DECON to signal VSYNC interrupt or return after
96 * timeout which is set to 50ms (refresh rate of 20).
97 */
98 if (!wait_event_timeout(ctx->wait_vsync_queue,
99 !atomic_read(&ctx->wait_vsync_event),
100 HZ/20))
101 DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
102 }
103
decon_clear_channels(struct exynos_drm_crtc * crtc)104 static void decon_clear_channels(struct exynos_drm_crtc *crtc)
105 {
106 struct decon_context *ctx = crtc->ctx;
107 unsigned int win, ch_enabled = 0;
108
109 /* Check if any channel is enabled. */
110 for (win = 0; win < WINDOWS_NR; win++) {
111 u32 val = readl(ctx->regs + WINCON(win));
112
113 if (val & WINCONx_ENWIN) {
114 val &= ~WINCONx_ENWIN;
115 writel(val, ctx->regs + WINCON(win));
116 ch_enabled = 1;
117 }
118 }
119
120 /* Wait for vsync, as disable channel takes effect at next vsync */
121 if (ch_enabled)
122 decon_wait_for_vblank(ctx->crtc);
123 }
124
decon_ctx_initialize(struct decon_context * ctx,struct drm_device * drm_dev)125 static int decon_ctx_initialize(struct decon_context *ctx,
126 struct drm_device *drm_dev)
127 {
128 ctx->drm_dev = drm_dev;
129
130 decon_clear_channels(ctx->crtc);
131
132 return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
133 }
134
decon_ctx_remove(struct decon_context * ctx)135 static void decon_ctx_remove(struct decon_context *ctx)
136 {
137 /* detach this sub driver from iommu mapping if supported. */
138 exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
139 }
140
decon_calc_clkdiv(struct decon_context * ctx,const struct drm_display_mode * mode)141 static u32 decon_calc_clkdiv(struct decon_context *ctx,
142 const struct drm_display_mode *mode)
143 {
144 unsigned long ideal_clk = mode->clock;
145 u32 clkdiv;
146
147 /* Find the clock divider value that gets us closest to ideal_clk */
148 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk);
149
150 return (clkdiv < 0x100) ? clkdiv : 0xff;
151 }
152
decon_commit(struct exynos_drm_crtc * crtc)153 static void decon_commit(struct exynos_drm_crtc *crtc)
154 {
155 struct decon_context *ctx = crtc->ctx;
156 struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
157 u32 val, clkdiv;
158
159 if (ctx->suspended)
160 return;
161
162 /* nothing to do if we haven't set the mode yet */
163 if (mode->htotal == 0 || mode->vtotal == 0)
164 return;
165
166 if (!ctx->i80_if) {
167 int vsync_len, vbpd, vfpd, hsync_len, hbpd, hfpd;
168 /* setup vertical timing values. */
169 vsync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
170 vbpd = mode->crtc_vtotal - mode->crtc_vsync_end;
171 vfpd = mode->crtc_vsync_start - mode->crtc_vdisplay;
172
173 val = VIDTCON0_VBPD(vbpd - 1) | VIDTCON0_VFPD(vfpd - 1);
174 writel(val, ctx->regs + VIDTCON0);
175
176 val = VIDTCON1_VSPW(vsync_len - 1);
177 writel(val, ctx->regs + VIDTCON1);
178
179 /* setup horizontal timing values. */
180 hsync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
181 hbpd = mode->crtc_htotal - mode->crtc_hsync_end;
182 hfpd = mode->crtc_hsync_start - mode->crtc_hdisplay;
183
184 /* setup horizontal timing values. */
185 val = VIDTCON2_HBPD(hbpd - 1) | VIDTCON2_HFPD(hfpd - 1);
186 writel(val, ctx->regs + VIDTCON2);
187
188 val = VIDTCON3_HSPW(hsync_len - 1);
189 writel(val, ctx->regs + VIDTCON3);
190 }
191
192 /* setup horizontal and vertical display size. */
193 val = VIDTCON4_LINEVAL(mode->vdisplay - 1) |
194 VIDTCON4_HOZVAL(mode->hdisplay - 1);
195 writel(val, ctx->regs + VIDTCON4);
196
197 writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD);
198
199 /*
200 * fields of register with prefix '_F' would be updated
201 * at vsync(same as dma start)
202 */
203 val = VIDCON0_ENVID | VIDCON0_ENVID_F;
204 writel(val, ctx->regs + VIDCON0);
205
206 clkdiv = decon_calc_clkdiv(ctx, mode);
207 if (clkdiv > 1) {
208 val = VCLKCON1_CLKVAL_NUM_VCLK(clkdiv - 1);
209 writel(val, ctx->regs + VCLKCON1);
210 writel(val, ctx->regs + VCLKCON2);
211 }
212
213 val = readl(ctx->regs + DECON_UPDATE);
214 val |= DECON_UPDATE_STANDALONE_F;
215 writel(val, ctx->regs + DECON_UPDATE);
216 }
217
decon_enable_vblank(struct exynos_drm_crtc * crtc)218 static int decon_enable_vblank(struct exynos_drm_crtc *crtc)
219 {
220 struct decon_context *ctx = crtc->ctx;
221 u32 val;
222
223 if (ctx->suspended)
224 return -EPERM;
225
226 if (!test_and_set_bit(0, &ctx->irq_flags)) {
227 val = readl(ctx->regs + VIDINTCON0);
228
229 val |= VIDINTCON0_INT_ENABLE;
230
231 if (!ctx->i80_if) {
232 val |= VIDINTCON0_INT_FRAME;
233 val &= ~VIDINTCON0_FRAMESEL0_MASK;
234 val |= VIDINTCON0_FRAMESEL0_VSYNC;
235 }
236
237 writel(val, ctx->regs + VIDINTCON0);
238 }
239
240 return 0;
241 }
242
decon_disable_vblank(struct exynos_drm_crtc * crtc)243 static void decon_disable_vblank(struct exynos_drm_crtc *crtc)
244 {
245 struct decon_context *ctx = crtc->ctx;
246 u32 val;
247
248 if (ctx->suspended)
249 return;
250
251 if (test_and_clear_bit(0, &ctx->irq_flags)) {
252 val = readl(ctx->regs + VIDINTCON0);
253
254 val &= ~VIDINTCON0_INT_ENABLE;
255 if (!ctx->i80_if)
256 val &= ~VIDINTCON0_INT_FRAME;
257
258 writel(val, ctx->regs + VIDINTCON0);
259 }
260 }
261
decon_win_set_pixfmt(struct decon_context * ctx,unsigned int win,struct drm_framebuffer * fb)262 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
263 struct drm_framebuffer *fb)
264 {
265 unsigned long val;
266 int padding;
267
268 val = readl(ctx->regs + WINCON(win));
269 val &= ~WINCONx_BPPMODE_MASK;
270
271 switch (fb->format->format) {
272 case DRM_FORMAT_RGB565:
273 val |= WINCONx_BPPMODE_16BPP_565;
274 val |= WINCONx_BURSTLEN_16WORD;
275 break;
276 case DRM_FORMAT_XRGB8888:
277 val |= WINCONx_BPPMODE_24BPP_xRGB;
278 val |= WINCONx_BURSTLEN_16WORD;
279 break;
280 case DRM_FORMAT_XBGR8888:
281 val |= WINCONx_BPPMODE_24BPP_xBGR;
282 val |= WINCONx_BURSTLEN_16WORD;
283 break;
284 case DRM_FORMAT_RGBX8888:
285 val |= WINCONx_BPPMODE_24BPP_RGBx;
286 val |= WINCONx_BURSTLEN_16WORD;
287 break;
288 case DRM_FORMAT_BGRX8888:
289 val |= WINCONx_BPPMODE_24BPP_BGRx;
290 val |= WINCONx_BURSTLEN_16WORD;
291 break;
292 case DRM_FORMAT_ARGB8888:
293 val |= WINCONx_BPPMODE_32BPP_ARGB | WINCONx_BLD_PIX |
294 WINCONx_ALPHA_SEL;
295 val |= WINCONx_BURSTLEN_16WORD;
296 break;
297 case DRM_FORMAT_ABGR8888:
298 val |= WINCONx_BPPMODE_32BPP_ABGR | WINCONx_BLD_PIX |
299 WINCONx_ALPHA_SEL;
300 val |= WINCONx_BURSTLEN_16WORD;
301 break;
302 case DRM_FORMAT_RGBA8888:
303 val |= WINCONx_BPPMODE_32BPP_RGBA | WINCONx_BLD_PIX |
304 WINCONx_ALPHA_SEL;
305 val |= WINCONx_BURSTLEN_16WORD;
306 break;
307 case DRM_FORMAT_BGRA8888:
308 default:
309 val |= WINCONx_BPPMODE_32BPP_BGRA | WINCONx_BLD_PIX |
310 WINCONx_ALPHA_SEL;
311 val |= WINCONx_BURSTLEN_16WORD;
312 break;
313 }
314
315 DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %d\n", fb->format->cpp[0]);
316
317 /*
318 * In case of exynos, setting dma-burst to 16Word causes permanent
319 * tearing for very small buffers, e.g. cursor buffer. Burst Mode
320 * switching which is based on plane size is not recommended as
321 * plane size varies a lot towards the end of the screen and rapid
322 * movement causes unstable DMA which results into iommu crash/tear.
323 */
324
325 padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width;
326 if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) {
327 val &= ~WINCONx_BURSTLEN_MASK;
328 val |= WINCONx_BURSTLEN_8WORD;
329 }
330
331 writel(val, ctx->regs + WINCON(win));
332 }
333
decon_win_set_colkey(struct decon_context * ctx,unsigned int win)334 static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win)
335 {
336 unsigned int keycon0 = 0, keycon1 = 0;
337
338 keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
339 WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
340
341 keycon1 = WxKEYCON1_COLVAL(0xffffffff);
342
343 writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
344 writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
345 }
346
347 /**
348 * decon_shadow_protect_win() - disable updating values from shadow registers at vsync
349 *
350 * @ctx: display and enhancement controller context
351 * @win: window to protect registers for
352 * @protect: 1 to protect (disable updates)
353 */
decon_shadow_protect_win(struct decon_context * ctx,unsigned int win,bool protect)354 static void decon_shadow_protect_win(struct decon_context *ctx,
355 unsigned int win, bool protect)
356 {
357 u32 bits, val;
358
359 bits = SHADOWCON_WINx_PROTECT(win);
360
361 val = readl(ctx->regs + SHADOWCON);
362 if (protect)
363 val |= bits;
364 else
365 val &= ~bits;
366 writel(val, ctx->regs + SHADOWCON);
367 }
368
decon_atomic_begin(struct exynos_drm_crtc * crtc)369 static void decon_atomic_begin(struct exynos_drm_crtc *crtc)
370 {
371 struct decon_context *ctx = crtc->ctx;
372 int i;
373
374 if (ctx->suspended)
375 return;
376
377 for (i = 0; i < WINDOWS_NR; i++)
378 decon_shadow_protect_win(ctx, i, true);
379 }
380
decon_update_plane(struct exynos_drm_crtc * crtc,struct exynos_drm_plane * plane)381 static void decon_update_plane(struct exynos_drm_crtc *crtc,
382 struct exynos_drm_plane *plane)
383 {
384 struct exynos_drm_plane_state *state =
385 to_exynos_plane_state(plane->base.state);
386 struct decon_context *ctx = crtc->ctx;
387 struct drm_framebuffer *fb = state->base.fb;
388 int padding;
389 unsigned long val, alpha;
390 unsigned int last_x;
391 unsigned int last_y;
392 unsigned int win = plane->index;
393 unsigned int cpp = fb->format->cpp[0];
394 unsigned int pitch = fb->pitches[0];
395
396 if (ctx->suspended)
397 return;
398
399 /*
400 * SHADOWCON/PRTCON register is used for enabling timing.
401 *
402 * for example, once only width value of a register is set,
403 * if the dma is started then decon hardware could malfunction so
404 * with protect window setting, the register fields with prefix '_F'
405 * wouldn't be updated at vsync also but updated once unprotect window
406 * is set.
407 */
408
409 /* buffer start address */
410 val = (unsigned long)exynos_drm_fb_dma_addr(fb, 0);
411 writel(val, ctx->regs + VIDW_BUF_START(win));
412
413 padding = (pitch / cpp) - fb->width;
414
415 /* buffer size */
416 writel(fb->width + padding, ctx->regs + VIDW_WHOLE_X(win));
417 writel(fb->height, ctx->regs + VIDW_WHOLE_Y(win));
418
419 /* offset from the start of the buffer to read */
420 writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
421 writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
422
423 DRM_DEV_DEBUG_KMS(ctx->dev, "start addr = 0x%lx\n",
424 (unsigned long)val);
425 DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
426 state->crtc.w, state->crtc.h);
427
428 val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
429 VIDOSDxA_TOPLEFT_Y(state->crtc.y);
430 writel(val, ctx->regs + VIDOSD_A(win));
431
432 last_x = state->crtc.x + state->crtc.w;
433 if (last_x)
434 last_x--;
435 last_y = state->crtc.y + state->crtc.h;
436 if (last_y)
437 last_y--;
438
439 val = VIDOSDxB_BOTRIGHT_X(last_x) | VIDOSDxB_BOTRIGHT_Y(last_y);
440
441 writel(val, ctx->regs + VIDOSD_B(win));
442
443 DRM_DEV_DEBUG_KMS(ctx->dev, "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
444 state->crtc.x, state->crtc.y, last_x, last_y);
445
446 /* OSD alpha */
447 alpha = VIDOSDxC_ALPHA0_R_F(0x0) |
448 VIDOSDxC_ALPHA0_G_F(0x0) |
449 VIDOSDxC_ALPHA0_B_F(0x0);
450
451 writel(alpha, ctx->regs + VIDOSD_C(win));
452
453 alpha = VIDOSDxD_ALPHA1_R_F(0xff) |
454 VIDOSDxD_ALPHA1_G_F(0xff) |
455 VIDOSDxD_ALPHA1_B_F(0xff);
456
457 writel(alpha, ctx->regs + VIDOSD_D(win));
458
459 decon_win_set_pixfmt(ctx, win, fb);
460
461 /* hardware window 0 doesn't support color key. */
462 if (win != 0)
463 decon_win_set_colkey(ctx, win);
464
465 /* wincon */
466 val = readl(ctx->regs + WINCON(win));
467 val |= WINCONx_TRIPLE_BUF_MODE;
468 val |= WINCONx_ENWIN;
469 writel(val, ctx->regs + WINCON(win));
470
471 /* Enable DMA channel and unprotect windows */
472 decon_shadow_protect_win(ctx, win, false);
473
474 val = readl(ctx->regs + DECON_UPDATE);
475 val |= DECON_UPDATE_STANDALONE_F;
476 writel(val, ctx->regs + DECON_UPDATE);
477 }
478
decon_disable_plane(struct exynos_drm_crtc * crtc,struct exynos_drm_plane * plane)479 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
480 struct exynos_drm_plane *plane)
481 {
482 struct decon_context *ctx = crtc->ctx;
483 unsigned int win = plane->index;
484 u32 val;
485
486 if (ctx->suspended)
487 return;
488
489 /* protect windows */
490 decon_shadow_protect_win(ctx, win, true);
491
492 /* wincon */
493 val = readl(ctx->regs + WINCON(win));
494 val &= ~WINCONx_ENWIN;
495 writel(val, ctx->regs + WINCON(win));
496
497 val = readl(ctx->regs + DECON_UPDATE);
498 val |= DECON_UPDATE_STANDALONE_F;
499 writel(val, ctx->regs + DECON_UPDATE);
500 }
501
decon_atomic_flush(struct exynos_drm_crtc * crtc)502 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
503 {
504 struct decon_context *ctx = crtc->ctx;
505 int i;
506
507 if (ctx->suspended)
508 return;
509
510 for (i = 0; i < WINDOWS_NR; i++)
511 decon_shadow_protect_win(ctx, i, false);
512 exynos_crtc_handle_event(crtc);
513 }
514
decon_init(struct decon_context * ctx)515 static void decon_init(struct decon_context *ctx)
516 {
517 u32 val;
518
519 writel(VIDCON0_SWRESET, ctx->regs + VIDCON0);
520
521 val = VIDOUTCON0_DISP_IF_0_ON;
522 if (!ctx->i80_if)
523 val |= VIDOUTCON0_RGBIF;
524 writel(val, ctx->regs + VIDOUTCON0);
525
526 writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0);
527
528 if (!ctx->i80_if)
529 writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0));
530 }
531
decon_atomic_enable(struct exynos_drm_crtc * crtc)532 static void decon_atomic_enable(struct exynos_drm_crtc *crtc)
533 {
534 struct decon_context *ctx = crtc->ctx;
535 int ret;
536
537 if (!ctx->suspended)
538 return;
539
540 ret = pm_runtime_resume_and_get(ctx->dev);
541 if (ret < 0) {
542 DRM_DEV_ERROR(ctx->dev, "failed to enable DECON device.\n");
543 return;
544 }
545
546 decon_init(ctx);
547
548 /* if vblank was enabled status, enable it again. */
549 if (test_and_clear_bit(0, &ctx->irq_flags))
550 decon_enable_vblank(ctx->crtc);
551
552 decon_commit(ctx->crtc);
553
554 ctx->suspended = false;
555 }
556
decon_atomic_disable(struct exynos_drm_crtc * crtc)557 static void decon_atomic_disable(struct exynos_drm_crtc *crtc)
558 {
559 struct decon_context *ctx = crtc->ctx;
560 int i;
561
562 if (ctx->suspended)
563 return;
564
565 /*
566 * We need to make sure that all windows are disabled before we
567 * suspend that connector. Otherwise we might try to scan from
568 * a destroyed buffer later.
569 */
570 for (i = 0; i < WINDOWS_NR; i++)
571 decon_disable_plane(crtc, &ctx->planes[i]);
572
573 pm_runtime_put_sync(ctx->dev);
574
575 ctx->suspended = true;
576 }
577
578 static const struct exynos_drm_crtc_ops decon_crtc_ops = {
579 .atomic_enable = decon_atomic_enable,
580 .atomic_disable = decon_atomic_disable,
581 .enable_vblank = decon_enable_vblank,
582 .disable_vblank = decon_disable_vblank,
583 .atomic_begin = decon_atomic_begin,
584 .update_plane = decon_update_plane,
585 .disable_plane = decon_disable_plane,
586 .atomic_flush = decon_atomic_flush,
587 };
588
589
decon_irq_handler(int irq,void * dev_id)590 static irqreturn_t decon_irq_handler(int irq, void *dev_id)
591 {
592 struct decon_context *ctx = (struct decon_context *)dev_id;
593 u32 val, clear_bit;
594
595 val = readl(ctx->regs + VIDINTCON1);
596
597 clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME;
598 if (val & clear_bit)
599 writel(clear_bit, ctx->regs + VIDINTCON1);
600
601 /* check the crtc is detached already from encoder */
602 if (!ctx->drm_dev)
603 goto out;
604
605 if (!ctx->i80_if) {
606 drm_crtc_handle_vblank(&ctx->crtc->base);
607
608 /* set wait vsync event to zero and wake up queue. */
609 if (atomic_read(&ctx->wait_vsync_event)) {
610 atomic_set(&ctx->wait_vsync_event, 0);
611 wake_up(&ctx->wait_vsync_queue);
612 }
613 }
614 out:
615 return IRQ_HANDLED;
616 }
617
decon_bind(struct device * dev,struct device * master,void * data)618 static int decon_bind(struct device *dev, struct device *master, void *data)
619 {
620 struct decon_context *ctx = dev_get_drvdata(dev);
621 struct drm_device *drm_dev = data;
622 struct exynos_drm_plane *exynos_plane;
623 unsigned int i;
624 int ret;
625
626 ret = decon_ctx_initialize(ctx, drm_dev);
627 if (ret) {
628 DRM_DEV_ERROR(dev, "decon_ctx_initialize failed.\n");
629 return ret;
630 }
631
632 for (i = 0; i < WINDOWS_NR; i++) {
633 ctx->configs[i].pixel_formats = decon_formats;
634 ctx->configs[i].num_pixel_formats = ARRAY_SIZE(decon_formats);
635 ctx->configs[i].zpos = i;
636 ctx->configs[i].type = decon_win_types[i];
637
638 ret = exynos_plane_init(drm_dev, &ctx->planes[i], i,
639 &ctx->configs[i]);
640 if (ret)
641 return ret;
642 }
643
644 exynos_plane = &ctx->planes[DEFAULT_WIN];
645 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
646 EXYNOS_DISPLAY_TYPE_LCD, &decon_crtc_ops, ctx);
647 if (IS_ERR(ctx->crtc)) {
648 decon_ctx_remove(ctx);
649 return PTR_ERR(ctx->crtc);
650 }
651
652 if (ctx->encoder)
653 exynos_dpi_bind(drm_dev, ctx->encoder);
654
655 return 0;
656
657 }
658
decon_unbind(struct device * dev,struct device * master,void * data)659 static void decon_unbind(struct device *dev, struct device *master,
660 void *data)
661 {
662 struct decon_context *ctx = dev_get_drvdata(dev);
663
664 decon_atomic_disable(ctx->crtc);
665
666 if (ctx->encoder)
667 exynos_dpi_remove(ctx->encoder);
668
669 decon_ctx_remove(ctx);
670 }
671
672 static const struct component_ops decon_component_ops = {
673 .bind = decon_bind,
674 .unbind = decon_unbind,
675 };
676
decon_probe(struct platform_device * pdev)677 static int decon_probe(struct platform_device *pdev)
678 {
679 struct device *dev = &pdev->dev;
680 struct decon_context *ctx;
681 struct device_node *i80_if_timings;
682 int ret;
683
684 if (!dev->of_node)
685 return -ENODEV;
686
687 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
688 if (!ctx)
689 return -ENOMEM;
690
691 ctx->dev = dev;
692 ctx->suspended = true;
693
694 i80_if_timings = of_get_child_by_name(dev->of_node, "i80-if-timings");
695 if (i80_if_timings)
696 ctx->i80_if = true;
697 of_node_put(i80_if_timings);
698
699 ctx->regs = of_iomap(dev->of_node, 0);
700 if (!ctx->regs)
701 return -ENOMEM;
702
703 ctx->pclk = devm_clk_get(dev, "pclk_decon0");
704 if (IS_ERR(ctx->pclk)) {
705 dev_err(dev, "failed to get bus clock pclk\n");
706 ret = PTR_ERR(ctx->pclk);
707 goto err_iounmap;
708 }
709
710 ctx->aclk = devm_clk_get(dev, "aclk_decon0");
711 if (IS_ERR(ctx->aclk)) {
712 dev_err(dev, "failed to get bus clock aclk\n");
713 ret = PTR_ERR(ctx->aclk);
714 goto err_iounmap;
715 }
716
717 ctx->eclk = devm_clk_get(dev, "decon0_eclk");
718 if (IS_ERR(ctx->eclk)) {
719 dev_err(dev, "failed to get eclock\n");
720 ret = PTR_ERR(ctx->eclk);
721 goto err_iounmap;
722 }
723
724 ctx->vclk = devm_clk_get(dev, "decon0_vclk");
725 if (IS_ERR(ctx->vclk)) {
726 dev_err(dev, "failed to get vclock\n");
727 ret = PTR_ERR(ctx->vclk);
728 goto err_iounmap;
729 }
730
731 ret = platform_get_irq_byname(pdev, ctx->i80_if ? "lcd_sys" : "vsync");
732 if (ret < 0)
733 goto err_iounmap;
734
735 ret = devm_request_irq(dev, ret, decon_irq_handler, 0, "drm_decon", ctx);
736 if (ret) {
737 dev_err(dev, "irq request failed.\n");
738 goto err_iounmap;
739 }
740
741 init_waitqueue_head(&ctx->wait_vsync_queue);
742 atomic_set(&ctx->wait_vsync_event, 0);
743
744 platform_set_drvdata(pdev, ctx);
745
746 ctx->encoder = exynos_dpi_probe(dev);
747 if (IS_ERR(ctx->encoder)) {
748 ret = PTR_ERR(ctx->encoder);
749 goto err_iounmap;
750 }
751
752 pm_runtime_enable(dev);
753
754 ret = component_add(dev, &decon_component_ops);
755 if (ret)
756 goto err_disable_pm_runtime;
757
758 return ret;
759
760 err_disable_pm_runtime:
761 pm_runtime_disable(dev);
762
763 err_iounmap:
764 iounmap(ctx->regs);
765
766 return ret;
767 }
768
decon_remove(struct platform_device * pdev)769 static int decon_remove(struct platform_device *pdev)
770 {
771 struct decon_context *ctx = dev_get_drvdata(&pdev->dev);
772
773 pm_runtime_disable(&pdev->dev);
774
775 iounmap(ctx->regs);
776
777 component_del(&pdev->dev, &decon_component_ops);
778
779 return 0;
780 }
781
782 #ifdef CONFIG_PM
exynos7_decon_suspend(struct device * dev)783 static int exynos7_decon_suspend(struct device *dev)
784 {
785 struct decon_context *ctx = dev_get_drvdata(dev);
786
787 clk_disable_unprepare(ctx->vclk);
788 clk_disable_unprepare(ctx->eclk);
789 clk_disable_unprepare(ctx->aclk);
790 clk_disable_unprepare(ctx->pclk);
791
792 return 0;
793 }
794
exynos7_decon_resume(struct device * dev)795 static int exynos7_decon_resume(struct device *dev)
796 {
797 struct decon_context *ctx = dev_get_drvdata(dev);
798 int ret;
799
800 ret = clk_prepare_enable(ctx->pclk);
801 if (ret < 0) {
802 DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
803 ret);
804 goto err_pclk_enable;
805 }
806
807 ret = clk_prepare_enable(ctx->aclk);
808 if (ret < 0) {
809 DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
810 ret);
811 goto err_aclk_enable;
812 }
813
814 ret = clk_prepare_enable(ctx->eclk);
815 if (ret < 0) {
816 DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
817 ret);
818 goto err_eclk_enable;
819 }
820
821 ret = clk_prepare_enable(ctx->vclk);
822 if (ret < 0) {
823 DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
824 ret);
825 goto err_vclk_enable;
826 }
827
828 return 0;
829
830 err_vclk_enable:
831 clk_disable_unprepare(ctx->eclk);
832 err_eclk_enable:
833 clk_disable_unprepare(ctx->aclk);
834 err_aclk_enable:
835 clk_disable_unprepare(ctx->pclk);
836 err_pclk_enable:
837 return ret;
838 }
839 #endif
840
841 static const struct dev_pm_ops exynos7_decon_pm_ops = {
842 SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
843 NULL)
844 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
845 pm_runtime_force_resume)
846 };
847
848 struct platform_driver decon_driver = {
849 .probe = decon_probe,
850 .remove = decon_remove,
851 .driver = {
852 .name = "exynos-decon",
853 .pm = &exynos7_decon_pm_ops,
854 .of_match_table = decon_driver_dt_match,
855 },
856 };
857