1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3 */
4
5 #include <linux/delay.h>
6 #include "dpu_hwio.h"
7 #include "dpu_hw_ctl.h"
8 #include "dpu_kms.h"
9 #include "dpu_trace.h"
10
11 #define CTL_LAYER(lm) \
12 (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
13 #define CTL_LAYER_EXT(lm) \
14 (0x40 + (((lm) - LM_0) * 0x004))
15 #define CTL_LAYER_EXT2(lm) \
16 (0x70 + (((lm) - LM_0) * 0x004))
17 #define CTL_LAYER_EXT3(lm) \
18 (0xA0 + (((lm) - LM_0) * 0x004))
19 #define CTL_TOP 0x014
20 #define CTL_FLUSH 0x018
21 #define CTL_START 0x01C
22 #define CTL_PREPARE 0x0d0
23 #define CTL_SW_RESET 0x030
24 #define CTL_LAYER_EXTN_OFFSET 0x40
25
26 #define CTL_MIXER_BORDER_OUT BIT(24)
27 #define CTL_FLUSH_MASK_CTL BIT(17)
28
29 #define DPU_REG_RESET_TIMEOUT_US 2000
30
_ctl_offset(enum dpu_ctl ctl,struct dpu_mdss_cfg * m,void __iomem * addr,struct dpu_hw_blk_reg_map * b)31 static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
32 struct dpu_mdss_cfg *m,
33 void __iomem *addr,
34 struct dpu_hw_blk_reg_map *b)
35 {
36 int i;
37
38 for (i = 0; i < m->ctl_count; i++) {
39 if (ctl == m->ctl[i].id) {
40 b->base_off = addr;
41 b->blk_off = m->ctl[i].base;
42 b->length = m->ctl[i].len;
43 b->hwversion = m->hwversion;
44 b->log_mask = DPU_DBG_MASK_CTL;
45 return &m->ctl[i];
46 }
47 }
48 return ERR_PTR(-ENOMEM);
49 }
50
_mixer_stages(const struct dpu_lm_cfg * mixer,int count,enum dpu_lm lm)51 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
52 enum dpu_lm lm)
53 {
54 int i;
55 int stages = -EINVAL;
56
57 for (i = 0; i < count; i++) {
58 if (lm == mixer[i].id) {
59 stages = mixer[i].sblk->maxblendstages;
60 break;
61 }
62 }
63
64 return stages;
65 }
66
dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl * ctx)67 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
68 {
69 struct dpu_hw_blk_reg_map *c = &ctx->hw;
70
71 return DPU_REG_READ(c, CTL_FLUSH);
72 }
73
dpu_hw_ctl_trigger_start(struct dpu_hw_ctl * ctx)74 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
75 {
76 trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
77 dpu_hw_ctl_get_flush_register(ctx));
78 DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
79 }
80
dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl * ctx)81 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
82 {
83 trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
84 dpu_hw_ctl_get_flush_register(ctx));
85 DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
86 }
87
dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl * ctx)88 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
89 {
90 trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
91 dpu_hw_ctl_get_flush_register(ctx));
92 ctx->pending_flush_mask = 0x0;
93 }
94
dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl * ctx,u32 flushbits)95 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
96 u32 flushbits)
97 {
98 trace_dpu_hw_ctl_update_pending_flush(flushbits,
99 ctx->pending_flush_mask);
100 ctx->pending_flush_mask |= flushbits;
101 }
102
dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl * ctx)103 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
104 {
105 return ctx->pending_flush_mask;
106 }
107
dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl * ctx)108 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
109 {
110 trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
111 dpu_hw_ctl_get_flush_register(ctx));
112 DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
113 }
114
dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl * ctx,enum dpu_sspp sspp)115 static uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
116 enum dpu_sspp sspp)
117 {
118 uint32_t flushbits = 0;
119
120 switch (sspp) {
121 case SSPP_VIG0:
122 flushbits = BIT(0);
123 break;
124 case SSPP_VIG1:
125 flushbits = BIT(1);
126 break;
127 case SSPP_VIG2:
128 flushbits = BIT(2);
129 break;
130 case SSPP_VIG3:
131 flushbits = BIT(18);
132 break;
133 case SSPP_RGB0:
134 flushbits = BIT(3);
135 break;
136 case SSPP_RGB1:
137 flushbits = BIT(4);
138 break;
139 case SSPP_RGB2:
140 flushbits = BIT(5);
141 break;
142 case SSPP_RGB3:
143 flushbits = BIT(19);
144 break;
145 case SSPP_DMA0:
146 flushbits = BIT(11);
147 break;
148 case SSPP_DMA1:
149 flushbits = BIT(12);
150 break;
151 case SSPP_DMA2:
152 flushbits = BIT(24);
153 break;
154 case SSPP_DMA3:
155 flushbits = BIT(25);
156 break;
157 case SSPP_CURSOR0:
158 flushbits = BIT(22);
159 break;
160 case SSPP_CURSOR1:
161 flushbits = BIT(23);
162 break;
163 default:
164 break;
165 }
166
167 return flushbits;
168 }
169
dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl * ctx,enum dpu_lm lm)170 static uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
171 enum dpu_lm lm)
172 {
173 uint32_t flushbits = 0;
174
175 switch (lm) {
176 case LM_0:
177 flushbits = BIT(6);
178 break;
179 case LM_1:
180 flushbits = BIT(7);
181 break;
182 case LM_2:
183 flushbits = BIT(8);
184 break;
185 case LM_3:
186 flushbits = BIT(9);
187 break;
188 case LM_4:
189 flushbits = BIT(10);
190 break;
191 case LM_5:
192 flushbits = BIT(20);
193 break;
194 default:
195 return -EINVAL;
196 }
197
198 flushbits |= CTL_FLUSH_MASK_CTL;
199
200 return flushbits;
201 }
202
dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl * ctx,u32 * flushbits,enum dpu_intf intf)203 static int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
204 u32 *flushbits, enum dpu_intf intf)
205 {
206 switch (intf) {
207 case INTF_0:
208 *flushbits |= BIT(31);
209 break;
210 case INTF_1:
211 *flushbits |= BIT(30);
212 break;
213 case INTF_2:
214 *flushbits |= BIT(29);
215 break;
216 case INTF_3:
217 *flushbits |= BIT(28);
218 break;
219 default:
220 return -EINVAL;
221 }
222 return 0;
223 }
224
dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl * ctx,u32 timeout_us)225 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
226 {
227 struct dpu_hw_blk_reg_map *c = &ctx->hw;
228 ktime_t timeout;
229 u32 status;
230
231 timeout = ktime_add_us(ktime_get(), timeout_us);
232
233 /*
234 * it takes around 30us to have mdp finish resetting its ctl path
235 * poll every 50us so that reset should be completed at 1st poll
236 */
237 do {
238 status = DPU_REG_READ(c, CTL_SW_RESET);
239 status &= 0x1;
240 if (status)
241 usleep_range(20, 50);
242 } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
243
244 return status;
245 }
246
dpu_hw_ctl_reset_control(struct dpu_hw_ctl * ctx)247 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
248 {
249 struct dpu_hw_blk_reg_map *c = &ctx->hw;
250
251 pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
252 DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
253 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
254 return -EINVAL;
255
256 return 0;
257 }
258
dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl * ctx)259 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
260 {
261 struct dpu_hw_blk_reg_map *c = &ctx->hw;
262 u32 status;
263
264 status = DPU_REG_READ(c, CTL_SW_RESET);
265 status &= 0x01;
266 if (!status)
267 return 0;
268
269 pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
270 if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
271 pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
272 return -EINVAL;
273 }
274
275 return 0;
276 }
277
dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl * ctx)278 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
279 {
280 struct dpu_hw_blk_reg_map *c = &ctx->hw;
281 int i;
282
283 for (i = 0; i < ctx->mixer_count; i++) {
284 DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
285 DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
286 DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
287 DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
288 }
289 }
290
dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl * ctx,enum dpu_lm lm,struct dpu_hw_stage_cfg * stage_cfg)291 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
292 enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
293 {
294 struct dpu_hw_blk_reg_map *c = &ctx->hw;
295 u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
296 u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
297 int i, j;
298 int stages;
299 int pipes_per_stage;
300
301 stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
302 if (stages < 0)
303 return;
304
305 if (test_bit(DPU_MIXER_SOURCESPLIT,
306 &ctx->mixer_hw_caps->features))
307 pipes_per_stage = PIPES_PER_STAGE;
308 else
309 pipes_per_stage = 1;
310
311 mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
312
313 if (!stage_cfg)
314 goto exit;
315
316 for (i = 0; i <= stages; i++) {
317 /* overflow to ext register if 'i + 1 > 7' */
318 mix = (i + 1) & 0x7;
319 ext = i >= 7;
320
321 for (j = 0 ; j < pipes_per_stage; j++) {
322 enum dpu_sspp_multirect_index rect_index =
323 stage_cfg->multirect_index[i][j];
324
325 switch (stage_cfg->stage[i][j]) {
326 case SSPP_VIG0:
327 if (rect_index == DPU_SSPP_RECT_1) {
328 mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
329 } else {
330 mixercfg |= mix << 0;
331 mixercfg_ext |= ext << 0;
332 }
333 break;
334 case SSPP_VIG1:
335 if (rect_index == DPU_SSPP_RECT_1) {
336 mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
337 } else {
338 mixercfg |= mix << 3;
339 mixercfg_ext |= ext << 2;
340 }
341 break;
342 case SSPP_VIG2:
343 if (rect_index == DPU_SSPP_RECT_1) {
344 mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
345 } else {
346 mixercfg |= mix << 6;
347 mixercfg_ext |= ext << 4;
348 }
349 break;
350 case SSPP_VIG3:
351 if (rect_index == DPU_SSPP_RECT_1) {
352 mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
353 } else {
354 mixercfg |= mix << 26;
355 mixercfg_ext |= ext << 6;
356 }
357 break;
358 case SSPP_RGB0:
359 mixercfg |= mix << 9;
360 mixercfg_ext |= ext << 8;
361 break;
362 case SSPP_RGB1:
363 mixercfg |= mix << 12;
364 mixercfg_ext |= ext << 10;
365 break;
366 case SSPP_RGB2:
367 mixercfg |= mix << 15;
368 mixercfg_ext |= ext << 12;
369 break;
370 case SSPP_RGB3:
371 mixercfg |= mix << 29;
372 mixercfg_ext |= ext << 14;
373 break;
374 case SSPP_DMA0:
375 if (rect_index == DPU_SSPP_RECT_1) {
376 mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
377 } else {
378 mixercfg |= mix << 18;
379 mixercfg_ext |= ext << 16;
380 }
381 break;
382 case SSPP_DMA1:
383 if (rect_index == DPU_SSPP_RECT_1) {
384 mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
385 } else {
386 mixercfg |= mix << 21;
387 mixercfg_ext |= ext << 18;
388 }
389 break;
390 case SSPP_DMA2:
391 if (rect_index == DPU_SSPP_RECT_1) {
392 mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
393 } else {
394 mix |= (i + 1) & 0xF;
395 mixercfg_ext2 |= mix << 0;
396 }
397 break;
398 case SSPP_DMA3:
399 if (rect_index == DPU_SSPP_RECT_1) {
400 mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
401 } else {
402 mix |= (i + 1) & 0xF;
403 mixercfg_ext2 |= mix << 4;
404 }
405 break;
406 case SSPP_CURSOR0:
407 mixercfg_ext |= ((i + 1) & 0xF) << 20;
408 break;
409 case SSPP_CURSOR1:
410 mixercfg_ext |= ((i + 1) & 0xF) << 26;
411 break;
412 default:
413 break;
414 }
415 }
416 }
417
418 exit:
419 DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
420 DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
421 DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
422 DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
423 }
424
dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)425 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
426 struct dpu_hw_intf_cfg *cfg)
427 {
428 struct dpu_hw_blk_reg_map *c = &ctx->hw;
429 u32 intf_cfg = 0;
430
431 intf_cfg |= (cfg->intf & 0xF) << 4;
432
433 if (cfg->mode_3d) {
434 intf_cfg |= BIT(19);
435 intf_cfg |= (cfg->mode_3d - 0x1) << 20;
436 }
437
438 switch (cfg->intf_mode_sel) {
439 case DPU_CTL_MODE_SEL_VID:
440 intf_cfg &= ~BIT(17);
441 intf_cfg &= ~(0x3 << 15);
442 break;
443 case DPU_CTL_MODE_SEL_CMD:
444 intf_cfg |= BIT(17);
445 intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
446 break;
447 default:
448 pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
449 return;
450 }
451
452 DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
453 }
454
_setup_ctl_ops(struct dpu_hw_ctl_ops * ops,unsigned long cap)455 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
456 unsigned long cap)
457 {
458 ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
459 ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
460 ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
461 ops->trigger_flush = dpu_hw_ctl_trigger_flush;
462 ops->get_flush_register = dpu_hw_ctl_get_flush_register;
463 ops->trigger_start = dpu_hw_ctl_trigger_start;
464 ops->trigger_pending = dpu_hw_ctl_trigger_pending;
465 ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
466 ops->reset = dpu_hw_ctl_reset_control;
467 ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
468 ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
469 ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
470 ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
471 ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
472 ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
473 };
474
475 static struct dpu_hw_blk_ops dpu_hw_ops;
476
dpu_hw_ctl_init(enum dpu_ctl idx,void __iomem * addr,struct dpu_mdss_cfg * m)477 struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
478 void __iomem *addr,
479 struct dpu_mdss_cfg *m)
480 {
481 struct dpu_hw_ctl *c;
482 struct dpu_ctl_cfg *cfg;
483
484 c = kzalloc(sizeof(*c), GFP_KERNEL);
485 if (!c)
486 return ERR_PTR(-ENOMEM);
487
488 cfg = _ctl_offset(idx, m, addr, &c->hw);
489 if (IS_ERR_OR_NULL(cfg)) {
490 kfree(c);
491 pr_err("failed to create dpu_hw_ctl %d\n", idx);
492 return ERR_PTR(-EINVAL);
493 }
494
495 c->caps = cfg;
496 _setup_ctl_ops(&c->ops, c->caps->features);
497 c->idx = idx;
498 c->mixer_count = m->mixer_count;
499 c->mixer_hw_caps = m->mixer;
500
501 dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
502
503 return c;
504 }
505
dpu_hw_ctl_destroy(struct dpu_hw_ctl * ctx)506 void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
507 {
508 if (ctx)
509 dpu_hw_blk_destroy(&ctx->base);
510 kfree(ctx);
511 }
512