1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 */
5
6 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
7 #include <linux/delay.h>
8 #include "dpu_encoder_phys.h"
9 #include "dpu_hw_interrupts.h"
10 #include "dpu_hw_pingpong.h"
11 #include "dpu_core_irq.h"
12 #include "dpu_formats.h"
13 #include "dpu_trace.h"
14 #include "disp/msm_disp_snapshot.h"
15
16 #define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
17 (e) && (e)->base.parent ? \
18 (e)->base.parent->base.id : -1, \
19 (e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
20
21 #define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
22 (e) && (e)->base.parent ? \
23 (e)->base.parent->base.id : -1, \
24 (e) ? (e)->base.hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
25
26 #define to_dpu_encoder_phys_cmd(x) \
27 container_of(x, struct dpu_encoder_phys_cmd, base)
28
29 #define PP_TIMEOUT_MAX_TRIALS 10
30
31 /*
32 * Tearcheck sync start and continue thresholds are empirically found
33 * based on common panels In the future, may want to allow panels to override
34 * these default values
35 */
36 #define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
37 #define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
38
39 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc);
40
dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys * phys_enc)41 static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
42 {
43 return (phys_enc->split_role != ENC_ROLE_SLAVE);
44 }
45
_dpu_encoder_phys_cmd_update_intf_cfg(struct dpu_encoder_phys * phys_enc)46 static void _dpu_encoder_phys_cmd_update_intf_cfg(
47 struct dpu_encoder_phys *phys_enc)
48 {
49 struct dpu_encoder_phys_cmd *cmd_enc =
50 to_dpu_encoder_phys_cmd(phys_enc);
51 struct dpu_hw_ctl *ctl;
52 struct dpu_hw_intf_cfg intf_cfg = { 0 };
53 struct dpu_hw_intf_cmd_mode_cfg cmd_mode_cfg = {};
54
55 ctl = phys_enc->hw_ctl;
56 if (!ctl->ops.setup_intf_cfg)
57 return;
58
59 intf_cfg.intf = phys_enc->hw_intf->idx;
60 intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
61 intf_cfg.stream_sel = cmd_enc->stream_sel;
62 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
63 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
64 ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
65
66 /* setup which pp blk will connect to this intf */
67 if (test_bit(DPU_CTL_ACTIVE_CFG, &ctl->caps->features) && phys_enc->hw_intf->ops.bind_pingpong_blk)
68 phys_enc->hw_intf->ops.bind_pingpong_blk(
69 phys_enc->hw_intf,
70 phys_enc->hw_pp->idx);
71
72 if (intf_cfg.dsc != 0)
73 cmd_mode_cfg.data_compress = true;
74
75 if (phys_enc->hw_intf->ops.program_intf_cmd_cfg)
76 phys_enc->hw_intf->ops.program_intf_cmd_cfg(phys_enc->hw_intf, &cmd_mode_cfg);
77 }
78
dpu_encoder_phys_cmd_pp_tx_done_irq(void * arg,int irq_idx)79 static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
80 {
81 struct dpu_encoder_phys *phys_enc = arg;
82 unsigned long lock_flags;
83 int new_cnt;
84 u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
85
86 if (!phys_enc->hw_pp)
87 return;
88
89 DPU_ATRACE_BEGIN("pp_done_irq");
90 /* notify all synchronous clients first, then asynchronous clients */
91 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, event);
92
93 spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
94 new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
95 spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
96
97 trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
98 phys_enc->hw_pp->idx - PINGPONG_0,
99 new_cnt, event);
100
101 /* Signal any waiting atomic commit thread */
102 wake_up_all(&phys_enc->pending_kickoff_wq);
103 DPU_ATRACE_END("pp_done_irq");
104 }
105
dpu_encoder_phys_cmd_te_rd_ptr_irq(void * arg,int irq_idx)106 static void dpu_encoder_phys_cmd_te_rd_ptr_irq(void *arg, int irq_idx)
107 {
108 struct dpu_encoder_phys *phys_enc = arg;
109 struct dpu_encoder_phys_cmd *cmd_enc;
110
111 if (phys_enc->has_intf_te) {
112 if (!phys_enc->hw_intf)
113 return;
114 } else {
115 if (!phys_enc->hw_pp)
116 return;
117 }
118
119 DPU_ATRACE_BEGIN("rd_ptr_irq");
120 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
121
122 dpu_encoder_vblank_callback(phys_enc->parent, phys_enc);
123
124 atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
125 wake_up_all(&cmd_enc->pending_vblank_wq);
126 DPU_ATRACE_END("rd_ptr_irq");
127 }
128
dpu_encoder_phys_cmd_ctl_start_irq(void * arg,int irq_idx)129 static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
130 {
131 struct dpu_encoder_phys *phys_enc = arg;
132
133 DPU_ATRACE_BEGIN("ctl_start_irq");
134
135 atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
136
137 /* Signal any waiting ctl start interrupt */
138 wake_up_all(&phys_enc->pending_kickoff_wq);
139 DPU_ATRACE_END("ctl_start_irq");
140 }
141
dpu_encoder_phys_cmd_underrun_irq(void * arg,int irq_idx)142 static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
143 {
144 struct dpu_encoder_phys *phys_enc = arg;
145
146 dpu_encoder_underrun_callback(phys_enc->parent, phys_enc);
147 }
148
dpu_encoder_phys_cmd_atomic_mode_set(struct dpu_encoder_phys * phys_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)149 static void dpu_encoder_phys_cmd_atomic_mode_set(
150 struct dpu_encoder_phys *phys_enc,
151 struct drm_crtc_state *crtc_state,
152 struct drm_connector_state *conn_state)
153 {
154 phys_enc->irq[INTR_IDX_CTL_START] = phys_enc->hw_ctl->caps->intr_start;
155
156 phys_enc->irq[INTR_IDX_PINGPONG] = phys_enc->hw_pp->caps->intr_done;
157
158 if (phys_enc->has_intf_te)
159 phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_intf->cap->intr_tear_rd_ptr;
160 else
161 phys_enc->irq[INTR_IDX_RDPTR] = phys_enc->hw_pp->caps->intr_rdptr;
162
163 phys_enc->irq[INTR_IDX_UNDERRUN] = phys_enc->hw_intf->cap->intr_underrun;
164 }
165
_dpu_encoder_phys_cmd_handle_ppdone_timeout(struct dpu_encoder_phys * phys_enc)166 static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
167 struct dpu_encoder_phys *phys_enc)
168 {
169 struct dpu_encoder_phys_cmd *cmd_enc =
170 to_dpu_encoder_phys_cmd(phys_enc);
171 u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
172 bool do_log = false;
173 struct drm_encoder *drm_enc;
174
175 if (!phys_enc->hw_pp)
176 return -EINVAL;
177
178 drm_enc = phys_enc->parent;
179
180 cmd_enc->pp_timeout_report_cnt++;
181 if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
182 frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
183 do_log = true;
184 } else if (cmd_enc->pp_timeout_report_cnt == 1) {
185 do_log = true;
186 }
187
188 trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(drm_enc),
189 phys_enc->hw_pp->idx - PINGPONG_0,
190 cmd_enc->pp_timeout_report_cnt,
191 atomic_read(&phys_enc->pending_kickoff_cnt),
192 frame_event);
193
194 /* to avoid flooding, only log first time, and "dead" time */
195 if (do_log) {
196 DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
197 DRMID(drm_enc),
198 phys_enc->hw_pp->idx - PINGPONG_0,
199 phys_enc->hw_ctl->idx - CTL_0,
200 cmd_enc->pp_timeout_report_cnt,
201 atomic_read(&phys_enc->pending_kickoff_cnt));
202 msm_disp_snapshot_state(drm_enc->dev);
203 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
204 phys_enc->irq[INTR_IDX_RDPTR]);
205 }
206
207 atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
208
209 /* request a ctl reset before the next kickoff */
210 phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
211
212 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc, frame_event);
213
214 return -ETIMEDOUT;
215 }
216
_dpu_encoder_phys_cmd_wait_for_idle(struct dpu_encoder_phys * phys_enc)217 static int _dpu_encoder_phys_cmd_wait_for_idle(
218 struct dpu_encoder_phys *phys_enc)
219 {
220 struct dpu_encoder_phys_cmd *cmd_enc =
221 to_dpu_encoder_phys_cmd(phys_enc);
222 struct dpu_encoder_wait_info wait_info;
223 int ret;
224
225 wait_info.wq = &phys_enc->pending_kickoff_wq;
226 wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
227 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
228
229 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
230 phys_enc->irq[INTR_IDX_PINGPONG],
231 dpu_encoder_phys_cmd_pp_tx_done_irq,
232 &wait_info);
233 if (ret == -ETIMEDOUT)
234 _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
235 else if (!ret)
236 cmd_enc->pp_timeout_report_cnt = 0;
237
238 return ret;
239 }
240
dpu_encoder_phys_cmd_control_vblank_irq(struct dpu_encoder_phys * phys_enc,bool enable)241 static int dpu_encoder_phys_cmd_control_vblank_irq(
242 struct dpu_encoder_phys *phys_enc,
243 bool enable)
244 {
245 int ret = 0;
246 int refcount;
247
248 if (!phys_enc->hw_pp) {
249 DPU_ERROR("invalid encoder\n");
250 return -EINVAL;
251 }
252
253 refcount = atomic_read(&phys_enc->vblank_refcount);
254
255 /* Slave encoders don't report vblank */
256 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
257 goto end;
258
259 /* protect against negative */
260 if (!enable && refcount == 0) {
261 ret = -EINVAL;
262 goto end;
263 }
264
265 DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
266 phys_enc->hw_pp->idx - PINGPONG_0,
267 enable ? "true" : "false", refcount);
268
269 if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
270 ret = dpu_core_irq_register_callback(phys_enc->dpu_kms,
271 phys_enc->irq[INTR_IDX_RDPTR],
272 dpu_encoder_phys_cmd_te_rd_ptr_irq,
273 phys_enc);
274 else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
275 ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
276 phys_enc->irq[INTR_IDX_RDPTR]);
277
278 end:
279 if (ret) {
280 DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
281 DRMID(phys_enc->parent),
282 phys_enc->hw_pp->idx - PINGPONG_0, ret,
283 enable ? "true" : "false", refcount);
284 }
285
286 return ret;
287 }
288
dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys * phys_enc,bool enable)289 static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
290 bool enable)
291 {
292 trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
293 phys_enc->hw_pp->idx - PINGPONG_0,
294 enable, atomic_read(&phys_enc->vblank_refcount));
295
296 if (enable) {
297 dpu_core_irq_register_callback(phys_enc->dpu_kms,
298 phys_enc->irq[INTR_IDX_PINGPONG],
299 dpu_encoder_phys_cmd_pp_tx_done_irq,
300 phys_enc);
301 dpu_core_irq_register_callback(phys_enc->dpu_kms,
302 phys_enc->irq[INTR_IDX_UNDERRUN],
303 dpu_encoder_phys_cmd_underrun_irq,
304 phys_enc);
305 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
306
307 if (dpu_encoder_phys_cmd_is_master(phys_enc))
308 dpu_core_irq_register_callback(phys_enc->dpu_kms,
309 phys_enc->irq[INTR_IDX_CTL_START],
310 dpu_encoder_phys_cmd_ctl_start_irq,
311 phys_enc);
312 } else {
313 if (dpu_encoder_phys_cmd_is_master(phys_enc))
314 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
315 phys_enc->irq[INTR_IDX_CTL_START]);
316
317 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
318 phys_enc->irq[INTR_IDX_UNDERRUN]);
319 dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
320 dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
321 phys_enc->irq[INTR_IDX_PINGPONG]);
322 }
323 }
324
dpu_encoder_phys_cmd_tearcheck_config(struct dpu_encoder_phys * phys_enc)325 static void dpu_encoder_phys_cmd_tearcheck_config(
326 struct dpu_encoder_phys *phys_enc)
327 {
328 struct dpu_encoder_phys_cmd *cmd_enc =
329 to_dpu_encoder_phys_cmd(phys_enc);
330 struct dpu_hw_tear_check tc_cfg = { 0 };
331 struct drm_display_mode *mode;
332 bool tc_enable = true;
333 unsigned long vsync_hz;
334 struct dpu_kms *dpu_kms;
335
336 if (phys_enc->has_intf_te) {
337 if (!phys_enc->hw_intf ||
338 !phys_enc->hw_intf->ops.enable_tearcheck) {
339 DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
340 return;
341 }
342
343 DPU_DEBUG_CMDENC(cmd_enc, "");
344 } else {
345 if (!phys_enc->hw_pp ||
346 !phys_enc->hw_pp->ops.enable_tearcheck) {
347 DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
348 return;
349 }
350
351 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
352 }
353
354 mode = &phys_enc->cached_mode;
355
356 dpu_kms = phys_enc->dpu_kms;
357
358 /*
359 * TE default: dsi byte clock calculated base on 70 fps;
360 * around 14 ms to complete a kickoff cycle if te disabled;
361 * vclk_line base on 60 fps; write is faster than read;
362 * init == start == rdptr;
363 *
364 * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
365 * frequency divided by the no. of rows (lines) in the LCDpanel.
366 */
367 vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
368 if (!vsync_hz) {
369 DPU_DEBUG_CMDENC(cmd_enc, "invalid - no vsync clock\n");
370 return;
371 }
372
373 tc_cfg.vsync_count = vsync_hz /
374 (mode->vtotal * drm_mode_vrefresh(mode));
375
376 /*
377 * Set the sync_cfg_height to twice vtotal so that if we lose a
378 * TE event coming from the display TE pin we won't stall immediately
379 */
380 tc_cfg.hw_vsync_mode = 1;
381 tc_cfg.sync_cfg_height = mode->vtotal * 2;
382 tc_cfg.vsync_init_val = mode->vdisplay;
383 tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
384 tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
385 tc_cfg.start_pos = mode->vdisplay;
386 tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
387
388 DPU_DEBUG_CMDENC(cmd_enc,
389 "tc vsync_clk_speed_hz %lu vtotal %u vrefresh %u\n",
390 vsync_hz, mode->vtotal, drm_mode_vrefresh(mode));
391 DPU_DEBUG_CMDENC(cmd_enc,
392 "tc enable %u start_pos %u rd_ptr_irq %u\n",
393 tc_enable, tc_cfg.start_pos, tc_cfg.rd_ptr_irq);
394 DPU_DEBUG_CMDENC(cmd_enc,
395 "tc hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
396 tc_cfg.hw_vsync_mode, tc_cfg.vsync_count,
397 tc_cfg.vsync_init_val);
398 DPU_DEBUG_CMDENC(cmd_enc,
399 "tc cfgheight %u thresh_start %u thresh_cont %u\n",
400 tc_cfg.sync_cfg_height, tc_cfg.sync_threshold_start,
401 tc_cfg.sync_threshold_continue);
402
403 if (phys_enc->has_intf_te)
404 phys_enc->hw_intf->ops.enable_tearcheck(phys_enc->hw_intf, &tc_cfg);
405 else
406 phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, &tc_cfg);
407 }
408
_dpu_encoder_phys_cmd_pingpong_config(struct dpu_encoder_phys * phys_enc)409 static void _dpu_encoder_phys_cmd_pingpong_config(
410 struct dpu_encoder_phys *phys_enc)
411 {
412 struct dpu_encoder_phys_cmd *cmd_enc =
413 to_dpu_encoder_phys_cmd(phys_enc);
414
415 if (!phys_enc->hw_pp || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
416 DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != NULL);
417 return;
418 }
419
420 DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
421 phys_enc->hw_pp->idx - PINGPONG_0);
422 drm_mode_debug_printmodeline(&phys_enc->cached_mode);
423
424 _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
425 dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
426 }
427
dpu_encoder_phys_cmd_needs_single_flush(struct dpu_encoder_phys * phys_enc)428 static bool dpu_encoder_phys_cmd_needs_single_flush(
429 struct dpu_encoder_phys *phys_enc)
430 {
431 /**
432 * we do separate flush for each CTL and let
433 * CTL_START synchronize them
434 */
435 return false;
436 }
437
dpu_encoder_phys_cmd_enable_helper(struct dpu_encoder_phys * phys_enc)438 static void dpu_encoder_phys_cmd_enable_helper(
439 struct dpu_encoder_phys *phys_enc)
440 {
441 struct dpu_hw_ctl *ctl;
442
443 if (!phys_enc->hw_pp) {
444 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
445 return;
446 }
447
448 dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
449
450 _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
451
452 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
453 return;
454
455 ctl = phys_enc->hw_ctl;
456 ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
457 }
458
dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys * phys_enc)459 static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
460 {
461 struct dpu_encoder_phys_cmd *cmd_enc =
462 to_dpu_encoder_phys_cmd(phys_enc);
463
464 if (!phys_enc->hw_pp) {
465 DPU_ERROR("invalid phys encoder\n");
466 return;
467 }
468
469 DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
470
471 if (phys_enc->enable_state == DPU_ENC_ENABLED) {
472 DPU_ERROR("already enabled\n");
473 return;
474 }
475
476 dpu_encoder_phys_cmd_enable_helper(phys_enc);
477 phys_enc->enable_state = DPU_ENC_ENABLED;
478 }
479
_dpu_encoder_phys_cmd_connect_te(struct dpu_encoder_phys * phys_enc,bool enable)480 static void _dpu_encoder_phys_cmd_connect_te(
481 struct dpu_encoder_phys *phys_enc, bool enable)
482 {
483 if (phys_enc->has_intf_te) {
484 if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.connect_external_te)
485 return;
486
487 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
488 phys_enc->hw_intf->ops.connect_external_te(phys_enc->hw_intf, enable);
489 } else {
490 if (!phys_enc->hw_pp || !phys_enc->hw_pp->ops.connect_external_te)
491 return;
492
493 trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
494 phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
495 }
496 }
497
dpu_encoder_phys_cmd_prepare_idle_pc(struct dpu_encoder_phys * phys_enc)498 static void dpu_encoder_phys_cmd_prepare_idle_pc(
499 struct dpu_encoder_phys *phys_enc)
500 {
501 _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
502 }
503
dpu_encoder_phys_cmd_get_line_count(struct dpu_encoder_phys * phys_enc)504 static int dpu_encoder_phys_cmd_get_line_count(
505 struct dpu_encoder_phys *phys_enc)
506 {
507 struct dpu_hw_pingpong *hw_pp;
508 struct dpu_hw_intf *hw_intf;
509
510 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
511 return -EINVAL;
512
513 if (phys_enc->has_intf_te) {
514 hw_intf = phys_enc->hw_intf;
515 if (!hw_intf || !hw_intf->ops.get_line_count)
516 return -EINVAL;
517 return hw_intf->ops.get_line_count(hw_intf);
518 }
519
520 hw_pp = phys_enc->hw_pp;
521 if (!hw_pp || !hw_pp->ops.get_line_count)
522 return -EINVAL;
523 return hw_pp->ops.get_line_count(hw_pp);
524 }
525
dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys * phys_enc)526 static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
527 {
528 struct dpu_encoder_phys_cmd *cmd_enc =
529 to_dpu_encoder_phys_cmd(phys_enc);
530 struct dpu_hw_ctl *ctl;
531
532 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
533 DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
534 return;
535 }
536
537 if (phys_enc->has_intf_te) {
538 DRM_DEBUG_KMS("id:%u intf:%d state:%d\n", DRMID(phys_enc->parent),
539 phys_enc->hw_intf->idx - INTF_0,
540 phys_enc->enable_state);
541
542 if (phys_enc->hw_intf->ops.disable_tearcheck)
543 phys_enc->hw_intf->ops.disable_tearcheck(phys_enc->hw_intf);
544 } else {
545 if (!phys_enc->hw_pp) {
546 DPU_ERROR("invalid encoder\n");
547 return;
548 }
549
550 DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
551 phys_enc->hw_pp->idx - PINGPONG_0,
552 phys_enc->enable_state);
553
554 if (phys_enc->hw_pp->ops.disable_tearcheck)
555 phys_enc->hw_pp->ops.disable_tearcheck(phys_enc->hw_pp);
556 }
557
558 if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
559 phys_enc->hw_intf->ops.bind_pingpong_blk(
560 phys_enc->hw_intf,
561 PINGPONG_NONE);
562
563 ctl = phys_enc->hw_ctl;
564 ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
565 }
566
567 phys_enc->enable_state = DPU_ENC_DISABLED;
568 }
569
dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys * phys_enc)570 static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
571 {
572 struct dpu_encoder_phys_cmd *cmd_enc =
573 to_dpu_encoder_phys_cmd(phys_enc);
574
575 kfree(cmd_enc);
576 }
577
dpu_encoder_phys_cmd_prepare_for_kickoff(struct dpu_encoder_phys * phys_enc)578 static void dpu_encoder_phys_cmd_prepare_for_kickoff(
579 struct dpu_encoder_phys *phys_enc)
580 {
581 struct dpu_encoder_phys_cmd *cmd_enc =
582 to_dpu_encoder_phys_cmd(phys_enc);
583 int ret;
584
585 if (!phys_enc->hw_pp) {
586 DPU_ERROR("invalid encoder\n");
587 return;
588 }
589 DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
590 phys_enc->hw_pp->idx - PINGPONG_0,
591 atomic_read(&phys_enc->pending_kickoff_cnt));
592
593 /*
594 * Mark kickoff request as outstanding. If there are more than one,
595 * outstanding, then we have to wait for the previous one to complete
596 */
597 ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
598 if (ret) {
599 /* force pending_kickoff_cnt 0 to discard failed kickoff */
600 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
601 DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
602 DRMID(phys_enc->parent), ret,
603 phys_enc->hw_pp->idx - PINGPONG_0);
604 }
605
606 dpu_encoder_phys_cmd_enable_te(phys_enc);
607
608 DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
609 phys_enc->hw_pp->idx - PINGPONG_0,
610 atomic_read(&phys_enc->pending_kickoff_cnt));
611 }
612
dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys * phys_enc)613 static void dpu_encoder_phys_cmd_enable_te(struct dpu_encoder_phys *phys_enc)
614 {
615 if (!phys_enc)
616 return;
617 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
618 return;
619
620 if (phys_enc->has_intf_te) {
621 if (!phys_enc->hw_intf->ops.disable_autorefresh)
622 return;
623
624 phys_enc->hw_intf->ops.disable_autorefresh(
625 phys_enc->hw_intf,
626 DRMID(phys_enc->parent),
627 phys_enc->cached_mode.vdisplay);
628 } else {
629 if (!phys_enc->hw_pp ||
630 !phys_enc->hw_pp->ops.disable_autorefresh)
631 return;
632
633 phys_enc->hw_pp->ops.disable_autorefresh(
634 phys_enc->hw_pp,
635 DRMID(phys_enc->parent),
636 phys_enc->cached_mode.vdisplay);
637 }
638 }
639
_dpu_encoder_phys_cmd_wait_for_ctl_start(struct dpu_encoder_phys * phys_enc)640 static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
641 struct dpu_encoder_phys *phys_enc)
642 {
643 struct dpu_encoder_phys_cmd *cmd_enc =
644 to_dpu_encoder_phys_cmd(phys_enc);
645 struct dpu_encoder_wait_info wait_info;
646 int ret;
647
648 wait_info.wq = &phys_enc->pending_kickoff_wq;
649 wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
650 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
651
652 ret = dpu_encoder_helper_wait_for_irq(phys_enc,
653 phys_enc->irq[INTR_IDX_CTL_START],
654 dpu_encoder_phys_cmd_ctl_start_irq,
655 &wait_info);
656 if (ret == -ETIMEDOUT) {
657 DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
658 ret = -EINVAL;
659 } else if (!ret)
660 ret = 0;
661
662 return ret;
663 }
664
dpu_encoder_phys_cmd_wait_for_tx_complete(struct dpu_encoder_phys * phys_enc)665 static int dpu_encoder_phys_cmd_wait_for_tx_complete(
666 struct dpu_encoder_phys *phys_enc)
667 {
668 int rc;
669
670 rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
671 if (rc) {
672 DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
673 DRMID(phys_enc->parent), rc,
674 phys_enc->hw_intf->idx - INTF_0);
675 }
676
677 return rc;
678 }
679
dpu_encoder_phys_cmd_wait_for_commit_done(struct dpu_encoder_phys * phys_enc)680 static int dpu_encoder_phys_cmd_wait_for_commit_done(
681 struct dpu_encoder_phys *phys_enc)
682 {
683 /* only required for master controller */
684 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
685 return 0;
686
687 if (phys_enc->hw_ctl->ops.is_started(phys_enc->hw_ctl))
688 return dpu_encoder_phys_cmd_wait_for_tx_complete(phys_enc);
689
690 return _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
691 }
692
dpu_encoder_phys_cmd_wait_for_vblank(struct dpu_encoder_phys * phys_enc)693 static int dpu_encoder_phys_cmd_wait_for_vblank(
694 struct dpu_encoder_phys *phys_enc)
695 {
696 int rc = 0;
697 struct dpu_encoder_phys_cmd *cmd_enc;
698 struct dpu_encoder_wait_info wait_info;
699
700 cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
701
702 /* only required for master controller */
703 if (!dpu_encoder_phys_cmd_is_master(phys_enc))
704 return rc;
705
706 wait_info.wq = &cmd_enc->pending_vblank_wq;
707 wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
708 wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
709
710 atomic_inc(&cmd_enc->pending_vblank_cnt);
711
712 rc = dpu_encoder_helper_wait_for_irq(phys_enc,
713 phys_enc->irq[INTR_IDX_RDPTR],
714 dpu_encoder_phys_cmd_te_rd_ptr_irq,
715 &wait_info);
716
717 return rc;
718 }
719
dpu_encoder_phys_cmd_handle_post_kickoff(struct dpu_encoder_phys * phys_enc)720 static void dpu_encoder_phys_cmd_handle_post_kickoff(
721 struct dpu_encoder_phys *phys_enc)
722 {
723 /**
724 * re-enable external TE, either for the first time after enabling
725 * or if disabled for Autorefresh
726 */
727 _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
728 }
729
dpu_encoder_phys_cmd_trigger_start(struct dpu_encoder_phys * phys_enc)730 static void dpu_encoder_phys_cmd_trigger_start(
731 struct dpu_encoder_phys *phys_enc)
732 {
733 dpu_encoder_helper_trigger_start(phys_enc);
734 }
735
dpu_encoder_phys_cmd_init_ops(struct dpu_encoder_phys_ops * ops)736 static void dpu_encoder_phys_cmd_init_ops(
737 struct dpu_encoder_phys_ops *ops)
738 {
739 ops->is_master = dpu_encoder_phys_cmd_is_master;
740 ops->atomic_mode_set = dpu_encoder_phys_cmd_atomic_mode_set;
741 ops->enable = dpu_encoder_phys_cmd_enable;
742 ops->disable = dpu_encoder_phys_cmd_disable;
743 ops->destroy = dpu_encoder_phys_cmd_destroy;
744 ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
745 ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
746 ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
747 ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
748 ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
749 ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
750 ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
751 ops->irq_control = dpu_encoder_phys_cmd_irq_control;
752 ops->restore = dpu_encoder_phys_cmd_enable_helper;
753 ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
754 ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
755 ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
756 }
757
dpu_encoder_phys_cmd_init(struct dpu_enc_phys_init_params * p)758 struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
759 struct dpu_enc_phys_init_params *p)
760 {
761 struct dpu_encoder_phys *phys_enc = NULL;
762 struct dpu_encoder_phys_cmd *cmd_enc = NULL;
763
764 DPU_DEBUG("intf\n");
765
766 cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
767 if (!cmd_enc) {
768 DPU_ERROR("failed to allocate\n");
769 return ERR_PTR(-ENOMEM);
770 }
771 phys_enc = &cmd_enc->base;
772
773 dpu_encoder_phys_init(phys_enc, p);
774
775 dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
776 phys_enc->intf_mode = INTF_MODE_CMD;
777 cmd_enc->stream_sel = 0;
778
779 phys_enc->has_intf_te = test_bit(DPU_INTF_TE,
780 &phys_enc->hw_intf->cap->features);
781
782 atomic_set(&cmd_enc->pending_vblank_cnt, 0);
783 init_waitqueue_head(&cmd_enc->pending_vblank_wq);
784
785 DPU_DEBUG_CMDENC(cmd_enc, "created\n");
786
787 return phys_enc;
788 }
789