1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
7 #include <linux/delay.h>
8 #include "dpu_encoder_phys.h"
9 #include "dpu_hw_interrupts.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_core_irq.h"
12 #include "dpu_formats.h"
13 #include "dpu_trace.h"
14 #include "disp/msm_disp_snapshot.h"
15
16 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
17 (e) && (e)->base.parent ? \
18 (e)->base.parent->base.id : -1, \
19 (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
20
21 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
22 (e) && (e)->base.parent ? \
23 (e)->base.parent->base.id : -1, \
24 (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
25
26 #define to_dpu_encoder_phys_cmd(x) \
27 container_of(x, struct dpu_encoder_phys_cmd, base)
28
29 #define PP_TIMEOUT_MAX_TRIALS 10
30
31 /*
32 * Tearcheck sync start and continue thresholds are empirically found
33 * based on common panels In the future, may want to allow panels to override
34 * these default values
35 */
36 #define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
37 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
38
39 #define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
40
41 #define DPU_ENC_MAX_POLL_TIMEOUT_US 2000
42
dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys * phys_enc)43 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
44 {
45 return (phys_enc->split_role != ENC_ROLE_SLAVE);
46 }
47
_dpu_encoder_phys_cmd_update_intf_cfg(struct dpu_encoder_phys * phys_enc)48 static void _dpu_encoder_phys_cmd_update_intf_cfg(
49 struct dpu_encoder_phys *phys_enc)
50 {
51 struct dpu_encoder_phys_cmd *cmd_enc =
52 to_dpu_encoder_phys_cmd(phys_enc);
53 struct dpu_hw_ctl *ctl;
54 struct dpu_hw_intf_cfg intf_cfg = { 0 };
55
56 ctl = phys_enc->hw_ctl;
57 if (!ctl->ops.setup_intf_cfg)
58 return;
59
60 intf_cfg.intf = phys_enc->intf_idx;
61 intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
62 intf_cfg.stream_sel = cmd_enc->stream_sel;
63 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
64 ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
65
66 /* setup which pp blk will connect to this intf */
67 if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
68 phys_enc->hw_intf->ops.bind_pingpong_blk(
69 phys_enc->hw_intf,
70 true,
71 phys_enc->hw_pp->idx);
72 }
73
dpu_encoder_phys_cmd_pp_tx_done_irq(void * arg,int irq_idx)74 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
75 {
76 struct dpu_encoder_phys *phys_enc = arg;
77 unsigned long lock_flags;
78 int new_cnt;
79 u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
80
81 if (!phys_enc->hw_pp)
82 return;
83
84 DPU_ATRACE_BEGIN("pp_done_irq");
85 /* notify all synchronous clients first, then asynchronous clients */
86 if (phys_enc->parent_ops->handle_frame_done)
87 phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
88 phys_enc, event);
89
90 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
91 new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
92 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
93
94 trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
95 phys_enc->hw_pp->idx - PINGPONG_0,
96 new_cnt, event);
97
98 /* Signal any waiting atomic commit thread */
99 wake_up_all(&phys_enc->pending_kickoff_wq);
100 DPU_ATRACE_END("pp_done_irq");
101 }
102
dpu_encoder_phys_cmd_pp_rd_ptr_irq(void * arg,int irq_idx)103 static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
104 {
105 struct dpu_encoder_phys *phys_enc = arg;
106 struct dpu_encoder_phys_cmd *cmd_enc;
107
108 if (!phys_enc->hw_pp)
109 return;
110
111 DPU_ATRACE_BEGIN("rd_ptr_irq");
112 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
113
114 if (phys_enc->parent_ops->handle_vblank_virt)
115 phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
116 phys_enc);
117
118 atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
119 wake_up_all(&cmd_enc->pending_vblank_wq);
120 DPU_ATRACE_END("rd_ptr_irq");
121 }
122
dpu_encoder_phys_cmd_ctl_start_irq(void * arg,int irq_idx)123 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
124 {
125 struct dpu_encoder_phys *phys_enc = arg;
126
127 DPU_ATRACE_BEGIN("ctl_start_irq");
128
129 atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
130
131 /* Signal any waiting ctl start interrupt */
132 wake_up_all(&phys_enc->pending_kickoff_wq);
133 DPU_ATRACE_END("ctl_start_irq");
134 }
135
dpu_encoder_phys_cmd_underrun_irq(void * arg,int irq_idx)136 static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
137 {
138 struct dpu_encoder_phys *phys_enc = arg;
139
140 if (phys_enc->parent_ops->handle_underrun_virt)
141 phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
142 phys_enc);
143 }
144
dpu_encoder_phys_cmd_atomic_mode_set(struct dpu_encoder_phys * phys_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)145 static void dpu_encoder_phys_cmd_atomic_mode_set(
146 struct dpu_encoder_phys *phys_enc,
147 struct drm_crtc_state *crtc_state,
148 struct drm_connector_state *conn_state)
149 {
150 phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
151
152 phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
153
154 phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
155
156 phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
157 }
158
_dpu_encoder_phys_cmd_handle_ppdone_timeout(struct dpu_encoder_phys * phys_enc)159 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
160 struct dpu_encoder_phys *phys_enc)
161 {
162 struct dpu_encoder_phys_cmd *cmd_enc =
163 to_dpu_encoder_phys_cmd(phys_enc);
164 u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
165 bool do_log = false;
166 struct drm_encoder *drm_enc;
167
168 if (!phys_enc->hw_pp)
169 return -EINVAL;
170
171 drm_enc = phys_enc->parent;
172
173 cmd_enc->pp_timeout_report_cnt++;
174 if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
175 frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
176 do_log = true;
177 } else if (cmd_enc->pp_timeout_report_cnt == 1) {
178 do_log = true;
179 }
180
181 trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
182 phys_enc->hw_pp->idx - PINGPONG_0,
183 cmd_enc->pp_timeout_report_cnt,
184 atomic_read(&phys_enc->pending_kickoff_cnt),
185 frame_event);
186
187 /* to avoid flooding, only log first time, and "dead" time */
188 if (do_log) {
189 DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
190 DRMID(drm_enc),
191 phys_enc->hw_pp->idx - PINGPONG_0,
192 phys_enc->hw_ctl->idx - CTL_0,
193 cmd_enc->pp_timeout_report_cnt,
194 atomic_read(&phys_enc->pending_kickoff_cnt));
195 msm_disp_snapshot_state(drm_enc->dev);
196 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
197 phys_enc->irq[INTR_IDX_RDPTR]);
198 }
199
200 atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
201
202 /* request a ctl reset before the next kickoff */
203 phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
204
205 if (phys_enc->parent_ops->handle_frame_done)
206 phys_enc->parent_ops->handle_frame_done(
207 drm_enc, phys_enc, frame_event);
208
209 return -ETIMEDOUT;
210 }
211
_dpu_encoder_phys_cmd_wait_for_idle(struct dpu_encoder_phys * phys_enc)212 static int _dpu_encoder_phys_cmd_wait_for_idle(
213 struct dpu_encoder_phys *phys_enc)
214 {
215 struct dpu_encoder_phys_cmd *cmd_enc =
216 to_dpu_encoder_phys_cmd(phys_enc);
217 struct dpu_encoder_wait_info wait_info;
218 int ret;
219
220 wait_info.wq = &phys_enc->pending_kickoff_wq;
221 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
222 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
223
224 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
225 phys_enc->irq[INTR_IDX_PINGPONG],
226 dpu_encoder_phys_cmd_pp_tx_done_irq,
227 &wait_info);
228 if (ret == -ETIMEDOUT)
229 _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
230 else if (!ret)
231 cmd_enc->pp_timeout_report_cnt = 0;
232
233 return ret;
234 }
235
dpu_encoder_phys_cmd_control_vblank_irq(struct dpu_encoder_phys * phys_enc,bool enable)236 static int dpu_encoder_phys_cmd_control_vblank_irq(
237 struct dpu_encoder_phys *phys_enc,
238 bool enable)
239 {
240 int ret = 0;
241 int refcount;
242
243 if (!phys_enc->hw_pp) {
244 DPU_ERROR("invalid encoder\n");
245 return -EINVAL;
246 }
247
248 refcount = atomic_read(&phys_enc->vblank_refcount);
249
250 /* Slave encoders don't report vblank */
251 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
252 goto end;
253
254 /* protect against negative */
255 if (!enable && refcount == 0) {
256 ret = -EINVAL;
257 goto end;
258 }
259
260 DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
261 phys_enc->hw_pp->idx - PINGPONG_0,
262 enable ? "true" : "false", refcount);
263
264 if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
265 ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
266 phys_enc->irq[INTR_IDX_RDPTR],
267 dpu_encoder_phys_cmd_pp_rd_ptr_irq,
268 phys_enc);
269 else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
270 ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
271 phys_enc->irq[INTR_IDX_RDPTR]);
272
273 end:
274 if (ret) {
275 DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
276 DRMID(phys_enc->parent),
277 phys_enc->hw_pp->idx - PINGPONG_0, ret,
278 enable ? "true" : "false", refcount);
279 }
280
281 return ret;
282 }
283
dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys * phys_enc,bool enable)284 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
285 bool enable)
286 {
287 trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
288 phys_enc->hw_pp->idx - PINGPONG_0,
289 enable, atomic_read(&phys_enc->vblank_refcount));
290
291 if (enable) {
292 dpu_core_irq_register_callback(phys_enc->dpu_kms,
293 phys_enc->irq[INTR_IDX_PINGPONG],
294 dpu_encoder_phys_cmd_pp_tx_done_irq,
295 phys_enc);
296 dpu_core_irq_register_callback(phys_enc->dpu_kms,
297 phys_enc->irq[INTR_IDX_UNDERRUN],
298 dpu_encoder_phys_cmd_underrun_irq,
299 phys_enc);
300 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
301
302 if (dpu_encoder_phys_cmd_is_master(phys_enc))
303 dpu_core_irq_register_callback(phys_enc->dpu_kms,
304 phys_enc->irq[INTR_IDX_CTL_START],
305 dpu_encoder_phys_cmd_ctl_start_irq,
306 phys_enc);
307 } else {
308 if (dpu_encoder_phys_cmd_is_master(phys_enc))
309 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
310 phys_enc->irq[INTR_IDX_CTL_START]);
311
312 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
313 phys_enc->irq[INTR_IDX_UNDERRUN]);
314 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
315 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
316 phys_enc->irq[INTR_IDX_PINGPONG]);
317 }
318 }
319
dpu_encoder_phys_cmd_tearcheck_config(struct dpu_encoder_phys * phys_enc)320 static void dpu_encoder_phys_cmd_tearcheck_config(
321 struct dpu_encoder_phys *phys_enc)
322 {
323 struct dpu_encoder_phys_cmd *cmd_enc =
324 to_dpu_encoder_phys_cmd(phys_enc);
325 struct dpu_hw_tear_check tc_cfg = { 0 };
326 struct drm_display_mode *mode;
327 bool tc_enable = true;
328 u32 vsync_hz;
329 struct dpu_kms *dpu_kms;
330
331 if (!phys_enc->hw_pp) {
332 DPU_ERROR("invalid encoder\n");
333 return;
334 }
335 mode = &phys_enc->cached_mode;
336
337 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
338
339 if (!phys_enc->hw_pp->ops.setup_tearcheck ||
340 !phys_enc->hw_pp->ops.enable_tearcheck) {
341 DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
342 return;
343 }
344
345 dpu_kms = phys_enc->dpu_kms;
346
347 /*
348 * TE default: dsi byte clock calculated base on 70 fps;
349 * around 14 ms to complete a kickoff cycle if te disabled;
350 * vclk_line base on 60 fps; write is faster than read;
351 * init == start == rdptr;
352 *
353 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
354 * frequency divided by the no. of rows (lines) in the LCDpanel.
355 */
356 vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
357 if (vsync_hz <= 0) {
358 DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
359 vsync_hz);
360 return;
361 }
362
363 tc_cfg.vsync_count = vsync_hz /
364 (mode->vtotal * drm_mode_vrefresh(mode));
365
366 /*
367 * Set the sync_cfg_height to twice vtotal so that if we lose a
368 * TE event coming from the display TE pin we won't stall immediately
369 */
370 tc_cfg.hw_vsync_mode = 1;
371 tc_cfg.sync_cfg_height = mode->vtotal * 2;
372 tc_cfg.vsync_init_val = mode->vdisplay;
373 tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
374 tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
375 tc_cfg.start_pos = mode->vdisplay;
376 tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
377
378 DPU_DEBUG_CMDENC(cmd_enc,
379 "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
380 phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
381 mode->vtotal, drm_mode_vrefresh(mode));
382 DPU_DEBUG_CMDENC(cmd_enc,
383 "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
384 phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
385 tc_cfg.rd_ptr_irq);
386 DPU_DEBUG_CMDENC(cmd_enc,
387 "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
388 phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
389 tc_cfg.vsync_count, tc_cfg.vsync_init_val);
390 DPU_DEBUG_CMDENC(cmd_enc,
391 "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
392 phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
393 tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
394
395 phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
396 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
397 }
398
_dpu_encoder_phys_cmd_pingpong_config(struct dpu_encoder_phys * phys_enc)399 static void _dpu_encoder_phys_cmd_pingpong_config(
400 struct dpu_encoder_phys *phys_enc)
401 {
402 struct dpu_encoder_phys_cmd *cmd_enc =
403 to_dpu_encoder_phys_cmd(phys_enc);
404
405 if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
406 DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
407 return;
408 }
409
410 DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
411 phys_enc->hw_pp->idx - PINGPONG_0);
412 drm_mode_debug_printmodeline(&phys_enc->cached_mode);
413
414 _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
415 dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
416 }
417
dpu_encoder_phys_cmd_needs_single_flush(struct dpu_encoder_phys * phys_enc)418 static bool dpu_encoder_phys_cmd_needs_single_flush(
419 struct dpu_encoder_phys *phys_enc)
420 {
421 /**
422 * we do separate flush for each CTL and let
423 * CTL_START synchronize them
424 */
425 return false;
426 }
427
dpu_encoder_phys_cmd_enable_helper(struct dpu_encoder_phys * phys_enc)428 static void dpu_encoder_phys_cmd_enable_helper(
429 struct dpu_encoder_phys *phys_enc)
430 {
431 struct dpu_hw_ctl *ctl;
432
433 if (!phys_enc->hw_pp) {
434 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
435 return;
436 }
437
438 dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
439
440 _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
441
442 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
443 return;
444
445 ctl = phys_enc->hw_ctl;
446 ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
447 }
448
dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys * phys_enc)449 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
450 {
451 struct dpu_encoder_phys_cmd *cmd_enc =
452 to_dpu_encoder_phys_cmd(phys_enc);
453
454 if (!phys_enc->hw_pp) {
455 DPU_ERROR("invalid phys encoder\n");
456 return;
457 }
458
459 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
460
461 if (phys_enc->enable_state == DPU_ENC_ENABLED) {
462 DPU_ERROR("already enabled\n");
463 return;
464 }
465
466 dpu_encoder_phys_cmd_enable_helper(phys_enc);
467 phys_enc->enable_state = DPU_ENC_ENABLED;
468 }
469
_dpu_encoder_phys_cmd_connect_te(struct dpu_encoder_phys * phys_enc,bool enable)470 static void _dpu_encoder_phys_cmd_connect_te(
471 struct dpu_encoder_phys *phys_enc, bool enable)
472 {
473 if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
474 return;
475
476 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
477 phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
478 }
479
dpu_encoder_phys_cmd_prepare_idle_pc(struct dpu_encoder_phys * phys_enc)480 static void dpu_encoder_phys_cmd_prepare_idle_pc(
481 struct dpu_encoder_phys *phys_enc)
482 {
483 _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
484 }
485
dpu_encoder_phys_cmd_get_line_count(struct dpu_encoder_phys * phys_enc)486 static int dpu_encoder_phys_cmd_get_line_count(
487 struct dpu_encoder_phys *phys_enc)
488 {
489 struct dpu_hw_pingpong *hw_pp;
490
491 if (!phys_enc->hw_pp)
492 return -EINVAL;
493
494 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
495 return -EINVAL;
496
497 hw_pp = phys_enc->hw_pp;
498 if (!hw_pp->ops.get_line_count)
499 return -EINVAL;
500
501 return hw_pp->ops.get_line_count(hw_pp);
502 }
503
dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys * phys_enc)504 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
505 {
506 struct dpu_encoder_phys_cmd *cmd_enc =
507 to_dpu_encoder_phys_cmd(phys_enc);
508 struct dpu_hw_ctl *ctl;
509
510 if (!phys_enc->hw_pp) {
511 DPU_ERROR("invalid encoder\n");
512 return;
513 }
514 DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
515 phys_enc->hw_pp->idx - PINGPONG_0,
516 phys_enc->enable_state);
517
518 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
519 DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
520 return;
521 }
522
523 if (phys_enc->hw_pp->ops.enable_tearcheck)
524 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
525
526 if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
527 phys_enc->hw_intf->ops.bind_pingpong_blk(
528 phys_enc->hw_intf,
529 false,
530 phys_enc->hw_pp->idx);
531
532 ctl = phys_enc->hw_ctl;
533 ctl->ops.update_pending_flush_intf(ctl, phys_enc->intf_idx);
534 }
535
536 phys_enc->enable_state = DPU_ENC_DISABLED;
537 }
538
dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys * phys_enc)539 static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
540 {
541 struct dpu_encoder_phys_cmd *cmd_enc =
542 to_dpu_encoder_phys_cmd(phys_enc);
543
544 kfree(cmd_enc);
545 }
546
dpu_encoder_phys_cmd_prepare_for_kickoff(struct dpu_encoder_phys * phys_enc)547 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
548 struct dpu_encoder_phys *phys_enc)
549 {
550 struct dpu_encoder_phys_cmd *cmd_enc =
551 to_dpu_encoder_phys_cmd(phys_enc);
552 int ret;
553
554 if (!phys_enc->hw_pp) {
555 DPU_ERROR("invalid encoder\n");
556 return;
557 }
558 DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
559 phys_enc->hw_pp->idx - PINGPONG_0,
560 atomic_read(&phys_enc->pending_kickoff_cnt));
561
562 /*
563 * Mark kickoff request as outstanding. If there are more than one,
564 * outstanding, then we have to wait for the previous one to complete
565 */
566 ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
567 if (ret) {
568 /* force pending_kickoff_cnt 0 to discard failed kickoff */
569 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
570 DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
571 DRMID(phys_enc->parent), ret,
572 phys_enc->hw_pp->idx - PINGPONG_0);
573 }
574
575 DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
576 phys_enc->hw_pp->idx - PINGPONG_0,
577 atomic_read(&phys_enc->pending_kickoff_cnt));
578 }
579
dpu_encoder_phys_cmd_is_ongoing_pptx(struct dpu_encoder_phys * phys_enc)580 static bool dpu_encoder_phys_cmd_is_ongoing_pptx(
581 struct dpu_encoder_phys *phys_enc)
582 {
583 struct dpu_hw_pp_vsync_info info;
584
585 if (!phys_enc)
586 return false;
587
588 phys_enc->hw_pp->ops.get_vsync_info(phys_enc->hw_pp, &info);
589 if (info.wr_ptr_line_count > 0 &&
590 info.wr_ptr_line_count < phys_enc->cached_mode.vdisplay)
591 return true;
592
593 return false;
594 }
595
dpu_encoder_phys_cmd_prepare_commit(struct dpu_encoder_phys * phys_enc)596 static void dpu_encoder_phys_cmd_prepare_commit(
597 struct dpu_encoder_phys *phys_enc)
598 {
599 struct dpu_encoder_phys_cmd *cmd_enc =
600 to_dpu_encoder_phys_cmd(phys_enc);
601 int trial = 0;
602
603 if (!phys_enc)
604 return;
605 if (!phys_enc->hw_pp)
606 return;
607 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
608 return;
609
610 /* If autorefresh is already disabled, we have nothing to do */
611 if (!phys_enc->hw_pp->ops.get_autorefresh(phys_enc->hw_pp, NULL))
612 return;
613
614 /*
615 * If autorefresh is enabled, disable it and make sure it is safe to
616 * proceed with current frame commit/push. Sequence fallowed is,
617 * 1. Disable TE
618 * 2. Disable autorefresh config
619 * 4. Poll for frame transfer ongoing to be false
620 * 5. Enable TE back
621 */
622 _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
623 phys_enc->hw_pp->ops.setup_autorefresh(phys_enc->hw_pp, 0, false);
624
625 do {
626 udelay(DPU_ENC_MAX_POLL_TIMEOUT_US);
627 if ((trial * DPU_ENC_MAX_POLL_TIMEOUT_US)
628 > (KICKOFF_TIMEOUT_MS * USEC_PER_MSEC)) {
629 DPU_ERROR_CMDENC(cmd_enc,
630 "disable autorefresh failed\n");
631 break;
632 }
633
634 trial++;
635 } while (dpu_encoder_phys_cmd_is_ongoing_pptx(phys_enc));
636
637 _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
638
639 DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc),
640 "disabled autorefresh\n");
641 }
642
_dpu_encoder_phys_cmd_wait_for_ctl_start(struct dpu_encoder_phys * phys_enc)643 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
644 struct dpu_encoder_phys *phys_enc)
645 {
646 struct dpu_encoder_phys_cmd *cmd_enc =
647 to_dpu_encoder_phys_cmd(phys_enc);
648 struct dpu_encoder_wait_info wait_info;
649 int ret;
650
651 wait_info.wq = &phys_enc->pending_kickoff_wq;
652 wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
653 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
654
655 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
656 phys_enc->irq[INTR_IDX_CTL_START],
657 dpu_encoder_phys_cmd_ctl_start_irq,
658 &wait_info);
659 if (ret == -ETIMEDOUT) {
660 DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
661 ret = -EINVAL;
662 } else if (!ret)
663 ret = 0;
664
665 return ret;
666 }
667
dpu_encoder_phys_cmd_wait_for_tx_complete(struct dpu_encoder_phys * phys_enc)668 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
669 struct dpu_encoder_phys *phys_enc)
670 {
671 int rc;
672
673 rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
674 if (rc) {
675 DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
676 DRMID(phys_enc->parent), rc,
677 phys_enc->intf_idx - INTF_0);
678 }
679
680 return rc;
681 }
682
dpu_encoder_phys_cmd_wait_for_commit_done(struct dpu_encoder_phys * phys_enc)683 static int dpu_encoder_phys_cmd_wait_for_commit_done(
684 struct dpu_encoder_phys *phys_enc)
685 {
686 /* only required for master controller */
687 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
688 return 0;
689
690 if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
691 return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
692
693 return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
694 }
695
dpu_encoder_phys_cmd_wait_for_vblank(struct dpu_encoder_phys * phys_enc)696 static int dpu_encoder_phys_cmd_wait_for_vblank(
697 struct dpu_encoder_phys *phys_enc)
698 {
699 int rc = 0;
700 struct dpu_encoder_phys_cmd *cmd_enc;
701 struct dpu_encoder_wait_info wait_info;
702
703 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
704
705 /* only required for master controller */
706 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
707 return rc;
708
709 wait_info.wq = &cmd_enc->pending_vblank_wq;
710 wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
711 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
712
713 atomic_inc(&cmd_enc->pending_vblank_cnt);
714
715 rc = dpu_encoder_helper_wait_for_irq(phys_enc,
716 phys_enc->irq[INTR_IDX_RDPTR],
717 dpu_encoder_phys_cmd_pp_rd_ptr_irq,
718 &wait_info);
719
720 return rc;
721 }
722
dpu_encoder_phys_cmd_handle_post_kickoff(struct dpu_encoder_phys * phys_enc)723 static void dpu_encoder_phys_cmd_handle_post_kickoff(
724 struct dpu_encoder_phys *phys_enc)
725 {
726 /**
727 * re-enable external TE, either for the first time after enabling
728 * or if disabled for Autorefresh
729 */
730 _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
731 }
732
dpu_encoder_phys_cmd_trigger_start(struct dpu_encoder_phys * phys_enc)733 static void dpu_encoder_phys_cmd_trigger_start(
734 struct dpu_encoder_phys *phys_enc)
735 {
736 dpu_encoder_helper_trigger_start(phys_enc);
737 }
738
dpu_encoder_phys_cmd_init_ops(struct dpu_encoder_phys_ops * ops)739 static void dpu_encoder_phys_cmd_init_ops(
740 struct dpu_encoder_phys_ops *ops)
741 {
742 ops->prepare_commit = dpu_encoder_phys_cmd_prepare_commit;
743 ops->is_master = dpu_encoder_phys_cmd_is_master;
744 ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
745 ops->enable = dpu_encoder_phys_cmd_enable;
746 ops->disable = dpu_encoder_phys_cmd_disable;
747 ops->destroy = dpu_encoder_phys_cmd_destroy;
748 ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
749 ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
750 ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
751 ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
752 ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
753 ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
754 ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
755 ops->irq_control = dpu_encoder_phys_cmd_irq_control;
756 ops->restore = dpu_encoder_phys_cmd_enable_helper;
757 ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
758 ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
759 ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
760 }
761
dpu_encoder_phys_cmd_init(struct dpu_enc_phys_init_params * p)762 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
763 struct dpu_enc_phys_init_params *p)
764 {
765 struct dpu_encoder_phys *phys_enc = NULL;
766 struct dpu_encoder_phys_cmd *cmd_enc = NULL;
767 int i, ret = 0;
768
769 DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
770
771 cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
772 if (!cmd_enc) {
773 ret = -ENOMEM;
774 DPU_ERROR("failed to allocate\n");
775 return ERR_PTR(ret);
776 }
777 phys_enc = &cmd_enc->base;
778 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
779 phys_enc->intf_idx = p->intf_idx;
780
781 dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
782 phys_enc->parent = p->parent;
783 phys_enc->parent_ops = p->parent_ops;
784 phys_enc->dpu_kms = p->dpu_kms;
785 phys_enc->split_role = p->split_role;
786 phys_enc->intf_mode = INTF_MODE_CMD;
787 phys_enc->enc_spinlock = p->enc_spinlock;
788 cmd_enc->stream_sel = 0;
789 phys_enc->enable_state = DPU_ENC_DISABLED;
790 for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
791 phys_enc->irq[i] = -EINVAL;
792
793 atomic_set(&phys_enc->vblank_refcount, 0);
794 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
795 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
796 atomic_set(&cmd_enc->pending_vblank_cnt, 0);
797 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
798 init_waitqueue_head(&cmd_enc->pending_vblank_wq);
799
800 DPU_DEBUG_CMDENC(cmd_enc, "created\n");
801
802 return phys_enc;
803 }
804