1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59
60 #define DC_LOGGER_INIT(logger)
61
62 #define CTX \
63 hws->ctx
64 #define REG(reg)\
65 hws->regs->reg
66
67 #undef FN
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
70
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
74
75 #define GAMMA_HW_POINTS_NUM 256
76
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
82 uint32_t ref_cycle)
83 {
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87
88 DTN_INFO(" %11d.%03d",
89 us_x10 / frac,
90 us_x10 % frac);
91 }
92
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
95 bool lock)
96 {
97 struct pipe_ctx *pipe_ctx;
98 struct pipe_ctx *old_pipe_ctx;
99 struct timing_generator *tg;
100 int i;
101
102 for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
106
107 /*
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
110 */
111 if (pipe_ctx->top_pipe ||
112 !pipe_ctx->stream ||
113 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 !tg->funcs->is_tg_enabled(tg))
115 continue;
116
117 if (lock)
118 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
119 else
120 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 }
122 }
123
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)124 static void log_mpc_crc(struct dc *dc,
125 struct dc_log_buffer_ctx *log_ctx)
126 {
127 struct dc_context *dc_ctx = dc->ctx;
128 struct dce_hwseq *hws = dc->hwseq;
129
130 if (REG(MPC_CRC_RESULT_GB))
131 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
136 }
137
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)138 static void dcn10_log_hubbub_state(struct dc *dc,
139 struct dc_log_buffer_ctx *log_ctx)
140 {
141 struct dc_context *dc_ctx = dc->ctx;
142 struct dcn_hubbub_wm wm;
143 int i;
144
145 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
147
148 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
149 " sr_enter sr_exit dram_clk_change\n");
150
151 for (i = 0; i < 4; i++) {
152 struct dcn_hubbub_wm_set *s;
153
154 s = &wm.sets[i];
155 DTN_INFO("WM_Set[%d]:", s->wm_set);
156 DTN_INFO_MICRO_SEC(s->data_urgent);
157 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 DTN_INFO_MICRO_SEC(s->sr_enter);
159 DTN_INFO_MICRO_SEC(s->sr_exit);
160 DTN_INFO_MICRO_SEC(s->dram_clk_change);
161 DTN_INFO("\n");
162 }
163
164 DTN_INFO("\n");
165 }
166
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
168 {
169 struct dc_context *dc_ctx = dc->ctx;
170 struct resource_pool *pool = dc->res_pool;
171 int i;
172
173 DTN_INFO(
174 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
175 for (i = 0; i < pool->pipe_count; i++) {
176 struct hubp *hubp = pool->hubps[i];
177 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
178
179 hubp->funcs->hubp_read_state(hubp);
180
181 if (!s->blank_en) {
182 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
183 hubp->inst,
184 s->pixel_format,
185 s->inuse_addr_hi,
186 s->viewport_width,
187 s->viewport_height,
188 s->rotation_angle,
189 s->h_mirror_en,
190 s->sw_mode,
191 s->dcc_en,
192 s->blank_en,
193 s->clock_en,
194 s->ttu_disable,
195 s->underflow_status);
196 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
199 DTN_INFO("\n");
200 }
201 }
202
203 DTN_INFO("\n=========RQ========\n");
204 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
205 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
207 for (i = 0; i < pool->pipe_count; i++) {
208 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
210
211 if (!s->blank_en)
212 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
213 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
222 }
223
224 DTN_INFO("========DLG========\n");
225 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
226 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
227 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
228 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
229 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
230 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
231 " x_rp_dlay x_rr_sfl\n");
232 for (i = 0; i < pool->pipe_count; i++) {
233 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
235
236 if (!s->blank_en)
237 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
238 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
240 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 dlg_regs->xfc_reg_remote_surface_flip_latency);
258 }
259
260 DTN_INFO("========TTU========\n");
261 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
262 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
263 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
264 for (i = 0; i < pool->pipe_count; i++) {
265 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
267
268 if (!s->blank_en)
269 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
270 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
277 }
278 DTN_INFO("\n");
279 }
280
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)281 void dcn10_log_hw_state(struct dc *dc,
282 struct dc_log_buffer_ctx *log_ctx)
283 {
284 struct dc_context *dc_ctx = dc->ctx;
285 struct resource_pool *pool = dc->res_pool;
286 int i;
287
288 DTN_INFO_BEGIN();
289
290 dcn10_log_hubbub_state(dc, log_ctx);
291
292 dcn10_log_hubp_states(dc, log_ctx);
293
294 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
295 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
296 "C31 C32 C33 C34\n");
297 for (i = 0; i < pool->pipe_count; i++) {
298 struct dpp *dpp = pool->dpps[i];
299 struct dcn_dpp_state s = {0};
300
301 dpp->funcs->dpp_read_state(dpp, &s);
302
303 if (!s.is_enabled)
304 continue;
305
306 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
307 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
308 dpp->inst,
309 s.igam_input_format,
310 (s.igam_lut_mode == 0) ? "BypassFixed" :
311 ((s.igam_lut_mode == 1) ? "BypassFloat" :
312 ((s.igam_lut_mode == 2) ? "RAM" :
313 ((s.igam_lut_mode == 3) ? "RAM" :
314 "Unknown"))),
315 (s.dgam_lut_mode == 0) ? "Bypass" :
316 ((s.dgam_lut_mode == 1) ? "sRGB" :
317 ((s.dgam_lut_mode == 2) ? "Ycc" :
318 ((s.dgam_lut_mode == 3) ? "RAM" :
319 ((s.dgam_lut_mode == 4) ? "RAM" :
320 "Unknown")))),
321 (s.rgam_lut_mode == 0) ? "Bypass" :
322 ((s.rgam_lut_mode == 1) ? "sRGB" :
323 ((s.rgam_lut_mode == 2) ? "Ycc" :
324 ((s.rgam_lut_mode == 3) ? "RAM" :
325 ((s.rgam_lut_mode == 4) ? "RAM" :
326 "Unknown")))),
327 s.gamut_remap_mode,
328 s.gamut_remap_c11_c12,
329 s.gamut_remap_c13_c14,
330 s.gamut_remap_c21_c22,
331 s.gamut_remap_c23_c24,
332 s.gamut_remap_c31_c32,
333 s.gamut_remap_c33_c34);
334 DTN_INFO("\n");
335 }
336 DTN_INFO("\n");
337
338 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
339 for (i = 0; i < pool->pipe_count; i++) {
340 struct mpcc_state s = {0};
341
342 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
343 if (s.opp_id != 0xf)
344 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
345 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
347 s.idle);
348 }
349 DTN_INFO("\n");
350
351 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
352
353 for (i = 0; i < pool->timing_generator_count; i++) {
354 struct timing_generator *tg = pool->timing_generators[i];
355 struct dcn_otg_state s = {0};
356 /* Read shared OTG state registers for all DCNx */
357 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
358
359 /*
360 * For DCN2 and greater, a register on the OPP is used to
361 * determine if the CRTC is blanked instead of the OTG. So use
362 * dpg_is_blanked() if exists, otherwise fallback on otg.
363 *
364 * TODO: Implement DCN-specific read_otg_state hooks.
365 */
366 if (pool->opps[i]->funcs->dpg_is_blanked)
367 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
368 else
369 s.blank_enabled = tg->funcs->is_blanked(tg);
370
371 //only print if OTG master is enabled
372 if ((s.otg_enabled & 1) == 0)
373 continue;
374
375 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
376 tg->inst,
377 s.v_blank_start,
378 s.v_blank_end,
379 s.v_sync_a_start,
380 s.v_sync_a_end,
381 s.v_sync_a_pol,
382 s.v_total_max,
383 s.v_total_min,
384 s.v_total_max_sel,
385 s.v_total_min_sel,
386 s.h_blank_start,
387 s.h_blank_end,
388 s.h_sync_a_start,
389 s.h_sync_a_end,
390 s.h_sync_a_pol,
391 s.h_total,
392 s.v_total,
393 s.underflow_occurred_status,
394 s.blank_enabled);
395
396 // Clear underflow for debug purposes
397 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 // This function is called only from Windows or Diags test environment, hence it's safe to clear
399 // it from here without affecting the original intent.
400 tg->funcs->clear_optc_underflow(tg);
401 }
402 DTN_INFO("\n");
403
404 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 // TODO: Update golden log header to reflect this name change
406 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
407 for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 struct display_stream_compressor *dsc = pool->dscs[i];
409 struct dcn_dsc_state s = {0};
410
411 dsc->funcs->dsc_read_state(dsc, &s);
412 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 dsc->inst,
414 s.dsc_clock_en,
415 s.dsc_slice_width,
416 s.dsc_bits_per_pixel);
417 DTN_INFO("\n");
418 }
419 DTN_INFO("\n");
420
421 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
422 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
423 for (i = 0; i < pool->stream_enc_count; i++) {
424 struct stream_encoder *enc = pool->stream_enc[i];
425 struct enc_state s = {0};
426
427 if (enc->funcs->enc_read_state) {
428 enc->funcs->enc_read_state(enc, &s);
429 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
430 enc->id,
431 s.dsc_mode,
432 s.sec_gsp_pps_line_num,
433 s.vbid6_line_reference,
434 s.vbid6_line_num,
435 s.sec_gsp_pps_enable,
436 s.sec_stream_enable);
437 DTN_INFO("\n");
438 }
439 }
440 DTN_INFO("\n");
441
442 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
443 for (i = 0; i < dc->link_count; i++) {
444 struct link_encoder *lenc = dc->links[i]->link_enc;
445
446 struct link_enc_state s = {0};
447
448 if (lenc && lenc->funcs->read_state) {
449 lenc->funcs->read_state(lenc, &s);
450 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
451 i,
452 s.dphy_fec_en,
453 s.dphy_fec_ready_shadow,
454 s.dphy_fec_active_status,
455 s.dp_link_training_complete);
456 DTN_INFO("\n");
457 }
458 }
459 DTN_INFO("\n");
460
461 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
462 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
463 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
470
471 log_mpc_crc(dc, log_ctx);
472
473 {
474 if (pool->hpo_dp_stream_enc_count > 0) {
475 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
476 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
479
480 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
482
483 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
484 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 hpo_dp_se_state.stream_enc_enabled,
486 hpo_dp_se_state.otg_inst,
487 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 (hpo_dp_se_state.component_depth == 0) ? 6 :
491 ((hpo_dp_se_state.component_depth == 1) ? 8 :
492 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 hpo_dp_se_state.vid_stream_enabled,
494 hpo_dp_se_state.sdp_enabled,
495 hpo_dp_se_state.compressed_format,
496 hpo_dp_se_state.mapped_to_link_enc);
497 }
498 }
499
500 DTN_INFO("\n");
501 }
502
503 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 if (pool->hpo_dp_link_enc_count) {
505 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
506
507 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
510
511 if (hpo_dp_link_enc->funcs->read_state) {
512 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
514 hpo_dp_link_enc->inst,
515 hpo_dp_le_state.link_enc_enabled,
516 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 hpo_dp_le_state.lane_count,
520 hpo_dp_le_state.stream_src[0],
521 hpo_dp_le_state.slot_count[0],
522 hpo_dp_le_state.vc_rate_x[0],
523 hpo_dp_le_state.vc_rate_y[0]);
524 DTN_INFO("\n");
525 }
526 }
527
528 DTN_INFO("\n");
529 }
530 }
531
532 DTN_INFO_END();
533 }
534
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
536 {
537 struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 struct timing_generator *tg = pipe_ctx->stream_res.tg;
539
540 if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 tg->funcs->clear_optc_underflow(tg);
542 return true;
543 }
544
545 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 hubp->funcs->hubp_clear_underflow(hubp);
547 return true;
548 }
549 return false;
550 }
551
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)552 void dcn10_enable_power_gating_plane(
553 struct dce_hwseq *hws,
554 bool enable)
555 {
556 bool force_on = true; /* disable power gating */
557
558 if (enable)
559 force_on = false;
560
561 /* DCHUBP0/1/2/3 */
562 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
566
567 /* DPP0/1/2/3 */
568 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
572 }
573
dcn10_disable_vga(struct dce_hwseq * hws)574 void dcn10_disable_vga(
575 struct dce_hwseq *hws)
576 {
577 unsigned int in_vga1_mode = 0;
578 unsigned int in_vga2_mode = 0;
579 unsigned int in_vga3_mode = 0;
580 unsigned int in_vga4_mode = 0;
581
582 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
586
587 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 in_vga3_mode == 0 && in_vga4_mode == 0)
589 return;
590
591 REG_WRITE(D1VGA_CONTROL, 0);
592 REG_WRITE(D2VGA_CONTROL, 0);
593 REG_WRITE(D3VGA_CONTROL, 0);
594 REG_WRITE(D4VGA_CONTROL, 0);
595
596 /* HW Engineer's Notes:
597 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
599 *
600 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 * VGA_TEST_ENABLE, to leave it in the same state as before.
602 */
603 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 }
606
607 /**
608 * dcn10_dpp_pg_control - DPP power gate control.
609 *
610 * @hws: dce_hwseq reference.
611 * @dpp_inst: DPP instance reference.
612 * @power_on: true if we want to enable power gate, false otherwise.
613 *
614 * Enable or disable power gate in the specific DPP instance.
615 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)616 void dcn10_dpp_pg_control(
617 struct dce_hwseq *hws,
618 unsigned int dpp_inst,
619 bool power_on)
620 {
621 uint32_t power_gate = power_on ? 0 : 1;
622 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
623
624 if (hws->ctx->dc->debug.disable_dpp_power_gate)
625 return;
626 if (REG(DOMAIN1_PG_CONFIG) == 0)
627 return;
628
629 switch (dpp_inst) {
630 case 0: /* DPP0 */
631 REG_UPDATE(DOMAIN1_PG_CONFIG,
632 DOMAIN1_POWER_GATE, power_gate);
633
634 REG_WAIT(DOMAIN1_PG_STATUS,
635 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 1, 1000);
637 break;
638 case 1: /* DPP1 */
639 REG_UPDATE(DOMAIN3_PG_CONFIG,
640 DOMAIN3_POWER_GATE, power_gate);
641
642 REG_WAIT(DOMAIN3_PG_STATUS,
643 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 1, 1000);
645 break;
646 case 2: /* DPP2 */
647 REG_UPDATE(DOMAIN5_PG_CONFIG,
648 DOMAIN5_POWER_GATE, power_gate);
649
650 REG_WAIT(DOMAIN5_PG_STATUS,
651 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 1, 1000);
653 break;
654 case 3: /* DPP3 */
655 REG_UPDATE(DOMAIN7_PG_CONFIG,
656 DOMAIN7_POWER_GATE, power_gate);
657
658 REG_WAIT(DOMAIN7_PG_STATUS,
659 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
660 1, 1000);
661 break;
662 default:
663 BREAK_TO_DEBUGGER();
664 break;
665 }
666 }
667
668 /**
669 * dcn10_hubp_pg_control - HUBP power gate control.
670 *
671 * @hws: dce_hwseq reference.
672 * @hubp_inst: DPP instance reference.
673 * @power_on: true if we want to enable power gate, false otherwise.
674 *
675 * Enable or disable power gate in the specific HUBP instance.
676 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)677 void dcn10_hubp_pg_control(
678 struct dce_hwseq *hws,
679 unsigned int hubp_inst,
680 bool power_on)
681 {
682 uint32_t power_gate = power_on ? 0 : 1;
683 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684
685 if (hws->ctx->dc->debug.disable_hubp_power_gate)
686 return;
687 if (REG(DOMAIN0_PG_CONFIG) == 0)
688 return;
689
690 switch (hubp_inst) {
691 case 0: /* DCHUBP0 */
692 REG_UPDATE(DOMAIN0_PG_CONFIG,
693 DOMAIN0_POWER_GATE, power_gate);
694
695 REG_WAIT(DOMAIN0_PG_STATUS,
696 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
697 1, 1000);
698 break;
699 case 1: /* DCHUBP1 */
700 REG_UPDATE(DOMAIN2_PG_CONFIG,
701 DOMAIN2_POWER_GATE, power_gate);
702
703 REG_WAIT(DOMAIN2_PG_STATUS,
704 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
705 1, 1000);
706 break;
707 case 2: /* DCHUBP2 */
708 REG_UPDATE(DOMAIN4_PG_CONFIG,
709 DOMAIN4_POWER_GATE, power_gate);
710
711 REG_WAIT(DOMAIN4_PG_STATUS,
712 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
713 1, 1000);
714 break;
715 case 3: /* DCHUBP3 */
716 REG_UPDATE(DOMAIN6_PG_CONFIG,
717 DOMAIN6_POWER_GATE, power_gate);
718
719 REG_WAIT(DOMAIN6_PG_STATUS,
720 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
721 1, 1000);
722 break;
723 default:
724 BREAK_TO_DEBUGGER();
725 break;
726 }
727 }
728
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)729 static void power_on_plane_resources(
730 struct dce_hwseq *hws,
731 int plane_id)
732 {
733 DC_LOGGER_INIT(hws->ctx->logger);
734
735 if (hws->funcs.dpp_root_clock_control)
736 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
737
738 if (REG(DC_IP_REQUEST_CNTL)) {
739 REG_SET(DC_IP_REQUEST_CNTL, 0,
740 IP_REQUEST_EN, 1);
741
742 if (hws->funcs.dpp_pg_control)
743 hws->funcs.dpp_pg_control(hws, plane_id, true);
744
745 if (hws->funcs.hubp_pg_control)
746 hws->funcs.hubp_pg_control(hws, plane_id, true);
747
748 REG_SET(DC_IP_REQUEST_CNTL, 0,
749 IP_REQUEST_EN, 0);
750 DC_LOG_DEBUG(
751 "Un-gated front end for pipe %d\n", plane_id);
752 }
753 }
754
undo_DEGVIDCN10_253_wa(struct dc * dc)755 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
756 {
757 struct dce_hwseq *hws = dc->hwseq;
758 struct hubp *hubp = dc->res_pool->hubps[0];
759
760 if (!hws->wa_state.DEGVIDCN10_253_applied)
761 return;
762
763 hubp->funcs->set_blank(hubp, true);
764
765 REG_SET(DC_IP_REQUEST_CNTL, 0,
766 IP_REQUEST_EN, 1);
767
768 hws->funcs.hubp_pg_control(hws, 0, false);
769 REG_SET(DC_IP_REQUEST_CNTL, 0,
770 IP_REQUEST_EN, 0);
771
772 hws->wa_state.DEGVIDCN10_253_applied = false;
773 }
774
apply_DEGVIDCN10_253_wa(struct dc * dc)775 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
776 {
777 struct dce_hwseq *hws = dc->hwseq;
778 struct hubp *hubp = dc->res_pool->hubps[0];
779 int i;
780
781 if (dc->debug.disable_stutter)
782 return;
783
784 if (!hws->wa.DEGVIDCN10_253)
785 return;
786
787 for (i = 0; i < dc->res_pool->pipe_count; i++) {
788 if (!dc->res_pool->hubps[i]->power_gated)
789 return;
790 }
791
792 /* all pipe power gated, apply work around to enable stutter. */
793
794 REG_SET(DC_IP_REQUEST_CNTL, 0,
795 IP_REQUEST_EN, 1);
796
797 hws->funcs.hubp_pg_control(hws, 0, true);
798 REG_SET(DC_IP_REQUEST_CNTL, 0,
799 IP_REQUEST_EN, 0);
800
801 hubp->funcs->set_hubp_blank_en(hubp, false);
802 hws->wa_state.DEGVIDCN10_253_applied = true;
803 }
804
dcn10_bios_golden_init(struct dc * dc)805 void dcn10_bios_golden_init(struct dc *dc)
806 {
807 struct dce_hwseq *hws = dc->hwseq;
808 struct dc_bios *bp = dc->ctx->dc_bios;
809 int i;
810 bool allow_self_fresh_force_enable = true;
811
812 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
813 return;
814
815 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
816 allow_self_fresh_force_enable =
817 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
818
819
820 /* WA for making DF sleep when idle after resume from S0i3.
821 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
822 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
823 * before calling command table and it changed to 1 after,
824 * it should be set back to 0.
825 */
826
827 /* initialize dcn global */
828 bp->funcs->enable_disp_power_gating(bp,
829 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
830
831 for (i = 0; i < dc->res_pool->pipe_count; i++) {
832 /* initialize dcn per pipe */
833 bp->funcs->enable_disp_power_gating(bp,
834 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
835 }
836
837 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
838 if (allow_self_fresh_force_enable == false &&
839 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
840 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
841 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
842
843 }
844
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)845 static void false_optc_underflow_wa(
846 struct dc *dc,
847 const struct dc_stream_state *stream,
848 struct timing_generator *tg)
849 {
850 int i;
851 bool underflow;
852
853 if (!dc->hwseq->wa.false_optc_underflow)
854 return;
855
856 underflow = tg->funcs->is_optc_underflow_occurred(tg);
857
858 for (i = 0; i < dc->res_pool->pipe_count; i++) {
859 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
860
861 if (old_pipe_ctx->stream != stream)
862 continue;
863
864 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
865 }
866
867 if (tg->funcs->set_blank_data_double_buffer)
868 tg->funcs->set_blank_data_double_buffer(tg, true);
869
870 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
871 tg->funcs->clear_optc_underflow(tg);
872 }
873
calculate_vready_offset_for_group(struct pipe_ctx * pipe)874 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
875 {
876 struct pipe_ctx *other_pipe;
877 int vready_offset = pipe->pipe_dlg_param.vready_offset;
878
879 /* Always use the largest vready_offset of all connected pipes */
880 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
881 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
882 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
883 }
884 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
885 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
887 }
888 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
889 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
891 }
892 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
893 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
894 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
895 }
896
897 return vready_offset;
898 }
899
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)900 enum dc_status dcn10_enable_stream_timing(
901 struct pipe_ctx *pipe_ctx,
902 struct dc_state *context,
903 struct dc *dc)
904 {
905 struct dc_stream_state *stream = pipe_ctx->stream;
906 enum dc_color_space color_space;
907 struct tg_color black_color = {0};
908
909 /* by upper caller loop, pipe0 is parent pipe and be called first.
910 * back end is set up by for pipe0. Other children pipe share back end
911 * with pipe 0. No program is needed.
912 */
913 if (pipe_ctx->top_pipe != NULL)
914 return DC_OK;
915
916 /* TODO check if timing_changed, disable stream if timing changed */
917
918 /* HW program guide assume display already disable
919 * by unplug sequence. OTG assume stop.
920 */
921 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
922
923 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
924 pipe_ctx->clock_source,
925 &pipe_ctx->stream_res.pix_clk_params,
926 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
927 &pipe_ctx->pll_settings)) {
928 BREAK_TO_DEBUGGER();
929 return DC_ERROR_UNEXPECTED;
930 }
931
932 if (dc_is_hdmi_tmds_signal(stream->signal)) {
933 stream->link->phy_state.symclk_ref_cnts.otg = 1;
934 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
935 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
936 else
937 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
938 }
939
940 pipe_ctx->stream_res.tg->funcs->program_timing(
941 pipe_ctx->stream_res.tg,
942 &stream->timing,
943 calculate_vready_offset_for_group(pipe_ctx),
944 pipe_ctx->pipe_dlg_param.vstartup_start,
945 pipe_ctx->pipe_dlg_param.vupdate_offset,
946 pipe_ctx->pipe_dlg_param.vupdate_width,
947 pipe_ctx->stream->signal,
948 true);
949
950 #if 0 /* move to after enable_crtc */
951 /* TODO: OPP FMT, ABM. etc. should be done here. */
952 /* or FPGA now. instance 0 only. TODO: move to opp.c */
953
954 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
955
956 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
957 pipe_ctx->stream_res.opp,
958 &stream->bit_depth_params,
959 &stream->clamping);
960 #endif
961 /* program otg blank color */
962 color_space = stream->output_color_space;
963 color_space_to_black_color(dc, color_space, &black_color);
964
965 /*
966 * The way 420 is packed, 2 channels carry Y component, 1 channel
967 * alternate between Cb and Cr, so both channels need the pixel
968 * value for Y
969 */
970 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
971 black_color.color_r_cr = black_color.color_g_y;
972
973 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
974 pipe_ctx->stream_res.tg->funcs->set_blank_color(
975 pipe_ctx->stream_res.tg,
976 &black_color);
977
978 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
979 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
980 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
981 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
982 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
983 }
984
985 /* VTG is within DCHUB command block. DCFCLK is always on */
986 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
987 BREAK_TO_DEBUGGER();
988 return DC_ERROR_UNEXPECTED;
989 }
990
991 /* TODO program crtc source select for non-virtual signal*/
992 /* TODO program FMT */
993 /* TODO setup link_enc */
994 /* TODO set stream attributes */
995 /* TODO program audio */
996 /* TODO enable stream if timing changed */
997 /* TODO unblank stream if DP */
998
999 return DC_OK;
1000 }
1001
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1002 static void dcn10_reset_back_end_for_pipe(
1003 struct dc *dc,
1004 struct pipe_ctx *pipe_ctx,
1005 struct dc_state *context)
1006 {
1007 int i;
1008 struct dc_link *link;
1009 DC_LOGGER_INIT(dc->ctx->logger);
1010 if (pipe_ctx->stream_res.stream_enc == NULL) {
1011 pipe_ctx->stream = NULL;
1012 return;
1013 }
1014
1015 link = pipe_ctx->stream->link;
1016 /* DPMS may already disable or */
1017 /* dpms_off status is incorrect due to fastboot
1018 * feature. When system resume from S4 with second
1019 * screen only, the dpms_off would be true but
1020 * VBIOS lit up eDP, so check link status too.
1021 */
1022 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1023 dc->link_srv->set_dpms_off(pipe_ctx);
1024 else if (pipe_ctx->stream_res.audio)
1025 dc->hwss.disable_audio_stream(pipe_ctx);
1026
1027 if (pipe_ctx->stream_res.audio) {
1028 /*disable az_endpoint*/
1029 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1030
1031 /*free audio*/
1032 if (dc->caps.dynamic_audio == true) {
1033 /*we have to dynamic arbitrate the audio endpoints*/
1034 /*we free the resource, need reset is_audio_acquired*/
1035 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1036 pipe_ctx->stream_res.audio, false);
1037 pipe_ctx->stream_res.audio = NULL;
1038 }
1039 }
1040
1041 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1042 * back end share by all pipes and will be disable only when disable
1043 * parent pipe.
1044 */
1045 if (pipe_ctx->top_pipe == NULL) {
1046
1047 if (pipe_ctx->stream_res.abm)
1048 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1049
1050 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1051
1052 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1053 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1054 pipe_ctx->stream_res.tg->funcs->set_drr(
1055 pipe_ctx->stream_res.tg, NULL);
1056 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1057 }
1058
1059 for (i = 0; i < dc->res_pool->pipe_count; i++)
1060 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1061 break;
1062
1063 if (i == dc->res_pool->pipe_count)
1064 return;
1065
1066 pipe_ctx->stream = NULL;
1067 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1068 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1069 }
1070
dcn10_hw_wa_force_recovery(struct dc * dc)1071 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1072 {
1073 struct hubp *hubp ;
1074 unsigned int i;
1075 bool need_recover = true;
1076
1077 if (!dc->debug.recovery_enabled)
1078 return false;
1079
1080 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1081 struct pipe_ctx *pipe_ctx =
1082 &dc->current_state->res_ctx.pipe_ctx[i];
1083 if (pipe_ctx != NULL) {
1084 hubp = pipe_ctx->plane_res.hubp;
1085 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1086 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1087 /* one pipe underflow, we will reset all the pipes*/
1088 need_recover = true;
1089 }
1090 }
1091 }
1092 }
1093 if (!need_recover)
1094 return false;
1095 /*
1096 DCHUBP_CNTL:HUBP_BLANK_EN=1
1097 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1098 DCHUBP_CNTL:HUBP_DISABLE=1
1099 DCHUBP_CNTL:HUBP_DISABLE=0
1100 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1101 DCSURF_PRIMARY_SURFACE_ADDRESS
1102 DCHUBP_CNTL:HUBP_BLANK_EN=0
1103 */
1104
1105 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1106 struct pipe_ctx *pipe_ctx =
1107 &dc->current_state->res_ctx.pipe_ctx[i];
1108 if (pipe_ctx != NULL) {
1109 hubp = pipe_ctx->plane_res.hubp;
1110 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1111 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1112 hubp->funcs->set_hubp_blank_en(hubp, true);
1113 }
1114 }
1115 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1116 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1117
1118 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1119 struct pipe_ctx *pipe_ctx =
1120 &dc->current_state->res_ctx.pipe_ctx[i];
1121 if (pipe_ctx != NULL) {
1122 hubp = pipe_ctx->plane_res.hubp;
1123 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1124 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1125 hubp->funcs->hubp_disable_control(hubp, true);
1126 }
1127 }
1128 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1129 struct pipe_ctx *pipe_ctx =
1130 &dc->current_state->res_ctx.pipe_ctx[i];
1131 if (pipe_ctx != NULL) {
1132 hubp = pipe_ctx->plane_res.hubp;
1133 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1134 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1135 hubp->funcs->hubp_disable_control(hubp, true);
1136 }
1137 }
1138 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1139 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1140 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1141 struct pipe_ctx *pipe_ctx =
1142 &dc->current_state->res_ctx.pipe_ctx[i];
1143 if (pipe_ctx != NULL) {
1144 hubp = pipe_ctx->plane_res.hubp;
1145 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1146 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1147 hubp->funcs->set_hubp_blank_en(hubp, true);
1148 }
1149 }
1150 return true;
1151
1152 }
1153
dcn10_verify_allow_pstate_change_high(struct dc * dc)1154 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1155 {
1156 struct hubbub *hubbub = dc->res_pool->hubbub;
1157 static bool should_log_hw_state; /* prevent hw state log by default */
1158
1159 if (!hubbub->funcs->verify_allow_pstate_change_high)
1160 return;
1161
1162 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1163 int i = 0;
1164
1165 if (should_log_hw_state)
1166 dcn10_log_hw_state(dc, NULL);
1167
1168 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1169 BREAK_TO_DEBUGGER();
1170 if (dcn10_hw_wa_force_recovery(dc)) {
1171 /*check again*/
1172 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1173 BREAK_TO_DEBUGGER();
1174 }
1175 }
1176 }
1177
1178 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1179 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1180 {
1181 struct dce_hwseq *hws = dc->hwseq;
1182 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1183 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1184 struct mpc *mpc = dc->res_pool->mpc;
1185 struct mpc_tree *mpc_tree_params;
1186 struct mpcc *mpcc_to_remove = NULL;
1187 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1188
1189 mpc_tree_params = &(opp->mpc_tree_params);
1190 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1191
1192 /*Already reset*/
1193 if (mpcc_to_remove == NULL)
1194 return;
1195
1196 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1197 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1198 // so don't wait for MPCC_IDLE in the programming sequence
1199 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1200 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1201
1202 dc->optimized_required = true;
1203
1204 if (hubp->funcs->hubp_disconnect)
1205 hubp->funcs->hubp_disconnect(hubp);
1206
1207 if (dc->debug.sanity_checks)
1208 hws->funcs.verify_allow_pstate_change_high(dc);
1209 }
1210
1211 /**
1212 * dcn10_plane_atomic_power_down - Power down plane components.
1213 *
1214 * @dc: dc struct reference. used for grab hwseq.
1215 * @dpp: dpp struct reference.
1216 * @hubp: hubp struct reference.
1217 *
1218 * Keep in mind that this operation requires a power gate configuration;
1219 * however, requests for switch power gate are precisely controlled to avoid
1220 * problems. For this reason, power gate request is usually disabled. This
1221 * function first needs to enable the power gate request before disabling DPP
1222 * and HUBP. Finally, it disables the power gate request again.
1223 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1224 void dcn10_plane_atomic_power_down(struct dc *dc,
1225 struct dpp *dpp,
1226 struct hubp *hubp)
1227 {
1228 struct dce_hwseq *hws = dc->hwseq;
1229 DC_LOGGER_INIT(dc->ctx->logger);
1230
1231 if (REG(DC_IP_REQUEST_CNTL)) {
1232 REG_SET(DC_IP_REQUEST_CNTL, 0,
1233 IP_REQUEST_EN, 1);
1234
1235 if (hws->funcs.dpp_pg_control)
1236 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1237
1238 if (hws->funcs.hubp_pg_control)
1239 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1240
1241 dpp->funcs->dpp_reset(dpp);
1242
1243 REG_SET(DC_IP_REQUEST_CNTL, 0,
1244 IP_REQUEST_EN, 0);
1245 DC_LOG_DEBUG(
1246 "Power gated front end %d\n", hubp->inst);
1247 }
1248
1249 if (hws->funcs.dpp_root_clock_control)
1250 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1251 }
1252
1253 /* disable HW used by plane.
1254 * note: cannot disable until disconnect is complete
1255 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1256 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1257 {
1258 struct dce_hwseq *hws = dc->hwseq;
1259 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1260 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1261 int opp_id = hubp->opp_id;
1262
1263 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1264
1265 hubp->funcs->hubp_clk_cntl(hubp, false);
1266
1267 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1268
1269 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1270 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1271 pipe_ctx->stream_res.opp,
1272 false);
1273
1274 hubp->power_gated = true;
1275 dc->optimized_required = false; /* We're powering off, no need to optimize */
1276
1277 hws->funcs.plane_atomic_power_down(dc,
1278 pipe_ctx->plane_res.dpp,
1279 pipe_ctx->plane_res.hubp);
1280
1281 pipe_ctx->stream = NULL;
1282 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1283 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1284 pipe_ctx->top_pipe = NULL;
1285 pipe_ctx->bottom_pipe = NULL;
1286 pipe_ctx->plane_state = NULL;
1287 }
1288
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1289 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1290 {
1291 struct dce_hwseq *hws = dc->hwseq;
1292 DC_LOGGER_INIT(dc->ctx->logger);
1293
1294 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1295 return;
1296
1297 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1298
1299 apply_DEGVIDCN10_253_wa(dc);
1300
1301 DC_LOG_DC("Power down front end %d\n",
1302 pipe_ctx->pipe_idx);
1303 }
1304
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1305 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1306 {
1307 int i;
1308 struct dce_hwseq *hws = dc->hwseq;
1309 struct hubbub *hubbub = dc->res_pool->hubbub;
1310 bool can_apply_seamless_boot = false;
1311
1312 for (i = 0; i < context->stream_count; i++) {
1313 if (context->streams[i]->apply_seamless_boot_optimization) {
1314 can_apply_seamless_boot = true;
1315 break;
1316 }
1317 }
1318
1319 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1320 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1321 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1322
1323 /* There is assumption that pipe_ctx is not mapping irregularly
1324 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1325 * we will use the pipe, so don't disable
1326 */
1327 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1328 continue;
1329
1330 /* Blank controller using driver code instead of
1331 * command table.
1332 */
1333 if (tg->funcs->is_tg_enabled(tg)) {
1334 if (hws->funcs.init_blank != NULL) {
1335 hws->funcs.init_blank(dc, tg);
1336 tg->funcs->lock(tg);
1337 } else {
1338 tg->funcs->lock(tg);
1339 tg->funcs->set_blank(tg, true);
1340 hwss_wait_for_blank_complete(tg);
1341 }
1342 }
1343 }
1344
1345 /* Reset det size */
1346 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1347 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1348 struct hubp *hubp = dc->res_pool->hubps[i];
1349
1350 /* Do not need to reset for seamless boot */
1351 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1352 continue;
1353
1354 if (hubbub && hubp) {
1355 if (hubbub->funcs->program_det_size)
1356 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1357 }
1358 }
1359
1360 /* num_opp will be equal to number of mpcc */
1361 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1362 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1363
1364 /* Cannot reset the MPC mux if seamless boot */
1365 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1366 continue;
1367
1368 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1369 dc->res_pool->mpc, i);
1370 }
1371
1372 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1373 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1374 struct hubp *hubp = dc->res_pool->hubps[i];
1375 struct dpp *dpp = dc->res_pool->dpps[i];
1376 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1377
1378 /* There is assumption that pipe_ctx is not mapping irregularly
1379 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1380 * we will use the pipe, so don't disable
1381 */
1382 if (can_apply_seamless_boot &&
1383 pipe_ctx->stream != NULL &&
1384 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1385 pipe_ctx->stream_res.tg)) {
1386 // Enable double buffering for OTG_BLANK no matter if
1387 // seamless boot is enabled or not to suppress global sync
1388 // signals when OTG blanked. This is to prevent pipe from
1389 // requesting data while in PSR.
1390 tg->funcs->tg_init(tg);
1391 hubp->power_gated = true;
1392 continue;
1393 }
1394
1395 /* Disable on the current state so the new one isn't cleared. */
1396 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1397
1398 dpp->funcs->dpp_reset(dpp);
1399
1400 pipe_ctx->stream_res.tg = tg;
1401 pipe_ctx->pipe_idx = i;
1402
1403 pipe_ctx->plane_res.hubp = hubp;
1404 pipe_ctx->plane_res.dpp = dpp;
1405 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1406 hubp->mpcc_id = dpp->inst;
1407 hubp->opp_id = OPP_ID_INVALID;
1408 hubp->power_gated = false;
1409
1410 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1411 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1412 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1413 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1414
1415 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1416
1417 if (tg->funcs->is_tg_enabled(tg))
1418 tg->funcs->unlock(tg);
1419
1420 dc->hwss.disable_plane(dc, pipe_ctx);
1421
1422 pipe_ctx->stream_res.tg = NULL;
1423 pipe_ctx->plane_res.hubp = NULL;
1424
1425 if (tg->funcs->is_tg_enabled(tg)) {
1426 if (tg->funcs->init_odm)
1427 tg->funcs->init_odm(tg);
1428 }
1429
1430 tg->funcs->tg_init(tg);
1431 }
1432
1433 /* Power gate DSCs */
1434 if (hws->funcs.dsc_pg_control != NULL) {
1435 uint32_t num_opps = 0;
1436 uint32_t opp_id_src0 = OPP_ID_INVALID;
1437 uint32_t opp_id_src1 = OPP_ID_INVALID;
1438
1439 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1440 // We can't use res_pool->res_cap->num_timing_generator to check
1441 // Because it records display pipes default setting built in driver,
1442 // not display pipes of the current chip.
1443 // Some ASICs would be fused display pipes less than the default setting.
1444 // In dcnxx_resource_construct function, driver would obatin real information.
1445 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1446 uint32_t optc_dsc_state = 0;
1447 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1448
1449 if (tg->funcs->is_tg_enabled(tg)) {
1450 if (tg->funcs->get_dsc_status)
1451 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1452 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1453 // non-zero value is DSC enabled
1454 if (optc_dsc_state != 0) {
1455 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1456 break;
1457 }
1458 }
1459 }
1460
1461 // Step 2: To power down DSC but skip DSC of running OPTC
1462 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1463 struct dcn_dsc_state s = {0};
1464
1465 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1466
1467 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1468 s.dsc_clock_en && s.dsc_fw_en)
1469 continue;
1470
1471 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1472 }
1473 }
1474 }
1475
dcn10_init_hw(struct dc * dc)1476 void dcn10_init_hw(struct dc *dc)
1477 {
1478 int i;
1479 struct abm *abm = dc->res_pool->abm;
1480 struct dmcu *dmcu = dc->res_pool->dmcu;
1481 struct dce_hwseq *hws = dc->hwseq;
1482 struct dc_bios *dcb = dc->ctx->dc_bios;
1483 struct resource_pool *res_pool = dc->res_pool;
1484 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1485 bool is_optimized_init_done = false;
1486
1487 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1488 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1489
1490 /* Align bw context with hw config when system resume. */
1491 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1492 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1493 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1494 }
1495
1496 // Initialize the dccg
1497 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1498 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1499
1500 if (!dcb->funcs->is_accelerated_mode(dcb))
1501 hws->funcs.disable_vga(dc->hwseq);
1502
1503 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1504 hws->funcs.bios_golden_init(dc);
1505
1506
1507 if (dc->ctx->dc_bios->fw_info_valid) {
1508 res_pool->ref_clocks.xtalin_clock_inKhz =
1509 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1510
1511 if (res_pool->dccg && res_pool->hubbub) {
1512
1513 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1514 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1515 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1516
1517 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1518 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1519 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1520 } else {
1521 // Not all ASICs have DCCG sw component
1522 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1523 res_pool->ref_clocks.xtalin_clock_inKhz;
1524 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1525 res_pool->ref_clocks.xtalin_clock_inKhz;
1526 }
1527 } else
1528 ASSERT_CRITICAL(false);
1529
1530 for (i = 0; i < dc->link_count; i++) {
1531 /* Power up AND update implementation according to the
1532 * required signal (which may be different from the
1533 * default signal on connector).
1534 */
1535 struct dc_link *link = dc->links[i];
1536
1537 if (!is_optimized_init_done)
1538 link->link_enc->funcs->hw_init(link->link_enc);
1539
1540 /* Check for enabled DIG to identify enabled display */
1541 if (link->link_enc->funcs->is_dig_enabled &&
1542 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1543 link->link_status.link_active = true;
1544 if (link->link_enc->funcs->fec_is_active &&
1545 link->link_enc->funcs->fec_is_active(link->link_enc))
1546 link->fec_state = dc_link_fec_enabled;
1547 }
1548 }
1549
1550 /* we want to turn off all dp displays before doing detection */
1551 dc->link_srv->blank_all_dp_displays(dc);
1552
1553 if (hws->funcs.enable_power_gating_plane)
1554 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1555
1556 /* If taking control over from VBIOS, we may want to optimize our first
1557 * mode set, so we need to skip powering down pipes until we know which
1558 * pipes we want to use.
1559 * Otherwise, if taking control is not possible, we need to power
1560 * everything down.
1561 */
1562 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1563 if (!is_optimized_init_done) {
1564 hws->funcs.init_pipes(dc, dc->current_state);
1565 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1566 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1567 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1568 }
1569 }
1570
1571 if (!is_optimized_init_done) {
1572
1573 for (i = 0; i < res_pool->audio_count; i++) {
1574 struct audio *audio = res_pool->audios[i];
1575
1576 audio->funcs->hw_init(audio);
1577 }
1578
1579 for (i = 0; i < dc->link_count; i++) {
1580 struct dc_link *link = dc->links[i];
1581
1582 if (link->panel_cntl)
1583 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1584 }
1585
1586 if (abm != NULL)
1587 abm->funcs->abm_init(abm, backlight);
1588
1589 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1590 dmcu->funcs->dmcu_init(dmcu);
1591 }
1592
1593 if (abm != NULL && dmcu != NULL)
1594 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1595
1596 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1597 if (!is_optimized_init_done)
1598 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1599
1600 if (!dc->debug.disable_clock_gate) {
1601 /* enable all DCN clock gating */
1602 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1603
1604 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1605
1606 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1607 }
1608
1609 if (dc->clk_mgr->funcs->notify_wm_ranges)
1610 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1611 }
1612
1613 /* In headless boot cases, DIG may be turned
1614 * on which causes HW/SW discrepancies.
1615 * To avoid this, power down hardware on boot
1616 * if DIG is turned on
1617 */
dcn10_power_down_on_boot(struct dc * dc)1618 void dcn10_power_down_on_boot(struct dc *dc)
1619 {
1620 struct dc_link *edp_links[MAX_NUM_EDP];
1621 struct dc_link *edp_link = NULL;
1622 int edp_num;
1623 int i = 0;
1624
1625 dc_get_edp_links(dc, edp_links, &edp_num);
1626 if (edp_num)
1627 edp_link = edp_links[0];
1628
1629 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1630 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1631 dc->hwseq->funcs.edp_backlight_control &&
1632 dc->hwss.power_down &&
1633 dc->hwss.edp_power_control) {
1634 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1635 dc->hwss.power_down(dc);
1636 dc->hwss.edp_power_control(edp_link, false);
1637 } else {
1638 for (i = 0; i < dc->link_count; i++) {
1639 struct dc_link *link = dc->links[i];
1640
1641 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1642 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1643 dc->hwss.power_down) {
1644 dc->hwss.power_down(dc);
1645 break;
1646 }
1647
1648 }
1649 }
1650
1651 /*
1652 * Call update_clocks with empty context
1653 * to send DISPLAY_OFF
1654 * Otherwise DISPLAY_OFF may not be asserted
1655 */
1656 if (dc->clk_mgr->funcs->set_low_power_state)
1657 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1658 }
1659
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1660 void dcn10_reset_hw_ctx_wrap(
1661 struct dc *dc,
1662 struct dc_state *context)
1663 {
1664 int i;
1665 struct dce_hwseq *hws = dc->hwseq;
1666
1667 /* Reset Back End*/
1668 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1669 struct pipe_ctx *pipe_ctx_old =
1670 &dc->current_state->res_ctx.pipe_ctx[i];
1671 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1672
1673 if (!pipe_ctx_old->stream)
1674 continue;
1675
1676 if (pipe_ctx_old->top_pipe)
1677 continue;
1678
1679 if (!pipe_ctx->stream ||
1680 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1681 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1682
1683 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1684 if (hws->funcs.enable_stream_gating)
1685 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1686 if (old_clk)
1687 old_clk->funcs->cs_power_down(old_clk);
1688 }
1689 }
1690 }
1691
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1692 static bool patch_address_for_sbs_tb_stereo(
1693 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1694 {
1695 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1696 bool sec_split = pipe_ctx->top_pipe &&
1697 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1698 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1699 (pipe_ctx->stream->timing.timing_3d_format ==
1700 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1701 pipe_ctx->stream->timing.timing_3d_format ==
1702 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1703 *addr = plane_state->address.grph_stereo.left_addr;
1704 plane_state->address.grph_stereo.left_addr =
1705 plane_state->address.grph_stereo.right_addr;
1706 return true;
1707 } else {
1708 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1709 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1710 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1711 plane_state->address.grph_stereo.right_addr =
1712 plane_state->address.grph_stereo.left_addr;
1713 plane_state->address.grph_stereo.right_meta_addr =
1714 plane_state->address.grph_stereo.left_meta_addr;
1715 }
1716 }
1717 return false;
1718 }
1719
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1720 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1721 {
1722 bool addr_patched = false;
1723 PHYSICAL_ADDRESS_LOC addr;
1724 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1725
1726 if (plane_state == NULL)
1727 return;
1728
1729 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1730
1731 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1732 pipe_ctx->plane_res.hubp,
1733 &plane_state->address,
1734 plane_state->flip_immediate);
1735
1736 plane_state->status.requested_address = plane_state->address;
1737
1738 if (plane_state->flip_immediate)
1739 plane_state->status.current_address = plane_state->address;
1740
1741 if (addr_patched)
1742 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1743 }
1744
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1745 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1746 const struct dc_plane_state *plane_state)
1747 {
1748 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1749 const struct dc_transfer_func *tf = NULL;
1750 bool result = true;
1751
1752 if (dpp_base == NULL)
1753 return false;
1754
1755 if (plane_state->in_transfer_func)
1756 tf = plane_state->in_transfer_func;
1757
1758 if (plane_state->gamma_correction &&
1759 !dpp_base->ctx->dc->debug.always_use_regamma
1760 && !plane_state->gamma_correction->is_identity
1761 && dce_use_lut(plane_state->format))
1762 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1763
1764 if (tf == NULL)
1765 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1766 else if (tf->type == TF_TYPE_PREDEFINED) {
1767 switch (tf->tf) {
1768 case TRANSFER_FUNCTION_SRGB:
1769 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1770 break;
1771 case TRANSFER_FUNCTION_BT709:
1772 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1773 break;
1774 case TRANSFER_FUNCTION_LINEAR:
1775 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1776 break;
1777 case TRANSFER_FUNCTION_PQ:
1778 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1779 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1780 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1781 result = true;
1782 break;
1783 default:
1784 result = false;
1785 break;
1786 }
1787 } else if (tf->type == TF_TYPE_BYPASS) {
1788 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1789 } else {
1790 cm_helper_translate_curve_to_degamma_hw_format(tf,
1791 &dpp_base->degamma_params);
1792 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1793 &dpp_base->degamma_params);
1794 result = true;
1795 }
1796
1797 return result;
1798 }
1799
1800 #define MAX_NUM_HW_POINTS 0x200
1801
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1802 static void log_tf(struct dc_context *ctx,
1803 struct dc_transfer_func *tf, uint32_t hw_points_num)
1804 {
1805 // DC_LOG_GAMMA is default logging of all hw points
1806 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1807 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1808 int i = 0;
1809
1810 DC_LOGGER_INIT(ctx->logger);
1811 DC_LOG_GAMMA("Gamma Correction TF");
1812 DC_LOG_ALL_GAMMA("Logging all tf points...");
1813 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1814
1815 for (i = 0; i < hw_points_num; i++) {
1816 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1817 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1818 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1819 }
1820
1821 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1822 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1823 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1824 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1825 }
1826 }
1827
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1828 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1829 const struct dc_stream_state *stream)
1830 {
1831 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1832
1833 if (dpp == NULL)
1834 return false;
1835
1836 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1837
1838 if (stream->out_transfer_func &&
1839 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1840 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1841 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1842
1843 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1844 * update.
1845 */
1846 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1847 stream->out_transfer_func,
1848 &dpp->regamma_params, false)) {
1849 dpp->funcs->dpp_program_regamma_pwl(
1850 dpp,
1851 &dpp->regamma_params, OPP_REGAMMA_USER);
1852 } else
1853 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1854
1855 if (stream != NULL && stream->ctx != NULL &&
1856 stream->out_transfer_func != NULL) {
1857 log_tf(stream->ctx,
1858 stream->out_transfer_func,
1859 dpp->regamma_params.hw_points_num);
1860 }
1861
1862 return true;
1863 }
1864
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1865 void dcn10_pipe_control_lock(
1866 struct dc *dc,
1867 struct pipe_ctx *pipe,
1868 bool lock)
1869 {
1870 struct dce_hwseq *hws = dc->hwseq;
1871
1872 /* use TG master update lock to lock everything on the TG
1873 * therefore only top pipe need to lock
1874 */
1875 if (!pipe || pipe->top_pipe)
1876 return;
1877
1878 if (dc->debug.sanity_checks)
1879 hws->funcs.verify_allow_pstate_change_high(dc);
1880
1881 if (lock)
1882 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1883 else
1884 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1885
1886 if (dc->debug.sanity_checks)
1887 hws->funcs.verify_allow_pstate_change_high(dc);
1888 }
1889
1890 /**
1891 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1892 *
1893 * Software keepout workaround to prevent cursor update locking from stalling
1894 * out cursor updates indefinitely or from old values from being retained in
1895 * the case where the viewport changes in the same frame as the cursor.
1896 *
1897 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1898 * too close to VUPDATE, then stall out until VUPDATE finishes.
1899 *
1900 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1901 * to avoid the need for this workaround.
1902 *
1903 * @dc: Current DC state
1904 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1905 *
1906 * Return: void
1907 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1908 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1909 {
1910 struct dc_stream_state *stream = pipe_ctx->stream;
1911 struct crtc_position position;
1912 uint32_t vupdate_start, vupdate_end;
1913 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1914 unsigned int us_per_line, us_vupdate;
1915
1916 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1917 return;
1918
1919 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1920 return;
1921
1922 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1923 &vupdate_end);
1924
1925 dc->hwss.get_position(&pipe_ctx, 1, &position);
1926 vpos = position.vertical_count;
1927
1928 /* Avoid wraparound calculation issues */
1929 vupdate_start += stream->timing.v_total;
1930 vupdate_end += stream->timing.v_total;
1931 vpos += stream->timing.v_total;
1932
1933 if (vpos <= vupdate_start) {
1934 /* VPOS is in VACTIVE or back porch. */
1935 lines_to_vupdate = vupdate_start - vpos;
1936 } else if (vpos > vupdate_end) {
1937 /* VPOS is in the front porch. */
1938 return;
1939 } else {
1940 /* VPOS is in VUPDATE. */
1941 lines_to_vupdate = 0;
1942 }
1943
1944 /* Calculate time until VUPDATE in microseconds. */
1945 us_per_line =
1946 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1947 us_to_vupdate = lines_to_vupdate * us_per_line;
1948
1949 /* 70 us is a conservative estimate of cursor update time*/
1950 if (us_to_vupdate > 70)
1951 return;
1952
1953 /* Stall out until the cursor update completes. */
1954 if (vupdate_end < vupdate_start)
1955 vupdate_end += stream->timing.v_total;
1956 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1957 udelay(us_to_vupdate + us_vupdate);
1958 }
1959
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1960 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1961 {
1962 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1963 if (!pipe || pipe->top_pipe)
1964 return;
1965
1966 /* Prevent cursor lock from stalling out cursor updates. */
1967 if (lock)
1968 delay_cursor_until_vupdate(dc, pipe);
1969
1970 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1971 union dmub_hw_lock_flags hw_locks = { 0 };
1972 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1973
1974 hw_locks.bits.lock_cursor = 1;
1975 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1976
1977 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1978 lock,
1979 &hw_locks,
1980 &inst_flags);
1981 } else
1982 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1983 pipe->stream_res.opp->inst, lock);
1984 }
1985
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1986 static bool wait_for_reset_trigger_to_occur(
1987 struct dc_context *dc_ctx,
1988 struct timing_generator *tg)
1989 {
1990 bool rc = false;
1991
1992 /* To avoid endless loop we wait at most
1993 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1994 const uint32_t frames_to_wait_on_triggered_reset = 10;
1995 int i;
1996
1997 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1998
1999 if (!tg->funcs->is_counter_moving(tg)) {
2000 DC_ERROR("TG counter is not moving!\n");
2001 break;
2002 }
2003
2004 if (tg->funcs->did_triggered_reset_occur(tg)) {
2005 rc = true;
2006 /* usually occurs at i=1 */
2007 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2008 i);
2009 break;
2010 }
2011
2012 /* Wait for one frame. */
2013 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2014 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2015 }
2016
2017 if (false == rc)
2018 DC_ERROR("GSL: Timeout on reset trigger!\n");
2019
2020 return rc;
2021 }
2022
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2023 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2024 uint64_t *denominator,
2025 bool checkUint32Bounary)
2026 {
2027 int i;
2028 bool ret = checkUint32Bounary == false;
2029 uint64_t max_int32 = 0xffffffff;
2030 uint64_t num, denom;
2031 static const uint16_t prime_numbers[] = {
2032 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2033 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2034 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2035 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2036 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2037 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2038 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2039 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2040 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2041 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2042 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2043 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2044 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2045 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2046 941, 947, 953, 967, 971, 977, 983, 991, 997};
2047 int count = ARRAY_SIZE(prime_numbers);
2048
2049 num = *numerator;
2050 denom = *denominator;
2051 for (i = 0; i < count; i++) {
2052 uint32_t num_remainder, denom_remainder;
2053 uint64_t num_result, denom_result;
2054 if (checkUint32Bounary &&
2055 num <= max_int32 && denom <= max_int32) {
2056 ret = true;
2057 break;
2058 }
2059 do {
2060 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2061 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2062 if (num_remainder == 0 && denom_remainder == 0) {
2063 num = num_result;
2064 denom = denom_result;
2065 }
2066 } while (num_remainder == 0 && denom_remainder == 0);
2067 }
2068 *numerator = num;
2069 *denominator = denom;
2070 return ret;
2071 }
2072
is_low_refresh_rate(struct pipe_ctx * pipe)2073 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2074 {
2075 uint32_t master_pipe_refresh_rate =
2076 pipe->stream->timing.pix_clk_100hz * 100 /
2077 pipe->stream->timing.h_total /
2078 pipe->stream->timing.v_total;
2079 return master_pipe_refresh_rate <= 30;
2080 }
2081
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2082 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2083 bool account_low_refresh_rate)
2084 {
2085 uint32_t clock_divider = 1;
2086 uint32_t numpipes = 1;
2087
2088 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2089 clock_divider *= 2;
2090
2091 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2092 clock_divider *= 2;
2093
2094 while (pipe->next_odm_pipe) {
2095 pipe = pipe->next_odm_pipe;
2096 numpipes++;
2097 }
2098 clock_divider *= numpipes;
2099
2100 return clock_divider;
2101 }
2102
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2103 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2104 struct pipe_ctx *grouped_pipes[])
2105 {
2106 struct dc_context *dc_ctx = dc->ctx;
2107 int i, master = -1, embedded = -1;
2108 struct dc_crtc_timing *hw_crtc_timing;
2109 uint64_t phase[MAX_PIPES];
2110 uint64_t modulo[MAX_PIPES];
2111 unsigned int pclk;
2112
2113 uint32_t embedded_pix_clk_100hz;
2114 uint16_t embedded_h_total;
2115 uint16_t embedded_v_total;
2116 uint32_t dp_ref_clk_100hz =
2117 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2118
2119 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2120 if (!hw_crtc_timing)
2121 return master;
2122
2123 if (dc->config.vblank_alignment_dto_params &&
2124 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2125 embedded_h_total =
2126 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2127 embedded_v_total =
2128 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2129 embedded_pix_clk_100hz =
2130 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2131
2132 for (i = 0; i < group_size; i++) {
2133 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2134 grouped_pipes[i]->stream_res.tg,
2135 &hw_crtc_timing[i]);
2136 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2137 dc->res_pool->dp_clock_source,
2138 grouped_pipes[i]->stream_res.tg->inst,
2139 &pclk);
2140 hw_crtc_timing[i].pix_clk_100hz = pclk;
2141 if (dc_is_embedded_signal(
2142 grouped_pipes[i]->stream->signal)) {
2143 embedded = i;
2144 master = i;
2145 phase[i] = embedded_pix_clk_100hz*100;
2146 modulo[i] = dp_ref_clk_100hz*100;
2147 } else {
2148
2149 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2150 hw_crtc_timing[i].h_total*
2151 hw_crtc_timing[i].v_total;
2152 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2153 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2154 embedded_h_total*
2155 embedded_v_total;
2156
2157 if (reduceSizeAndFraction(&phase[i],
2158 &modulo[i], true) == false) {
2159 /*
2160 * this will help to stop reporting
2161 * this timing synchronizable
2162 */
2163 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2164 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2165 }
2166 }
2167 }
2168
2169 for (i = 0; i < group_size; i++) {
2170 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2171 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2172 dc->res_pool->dp_clock_source,
2173 grouped_pipes[i]->stream_res.tg->inst,
2174 phase[i], modulo[i]);
2175 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2176 dc->res_pool->dp_clock_source,
2177 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2178 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2179 pclk*get_clock_divider(grouped_pipes[i], false);
2180 if (master == -1)
2181 master = i;
2182 }
2183 }
2184
2185 }
2186
2187 kfree(hw_crtc_timing);
2188 return master;
2189 }
2190
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2191 void dcn10_enable_vblanks_synchronization(
2192 struct dc *dc,
2193 int group_index,
2194 int group_size,
2195 struct pipe_ctx *grouped_pipes[])
2196 {
2197 struct dc_context *dc_ctx = dc->ctx;
2198 struct output_pixel_processor *opp;
2199 struct timing_generator *tg;
2200 int i, width, height, master;
2201
2202 for (i = 1; i < group_size; i++) {
2203 opp = grouped_pipes[i]->stream_res.opp;
2204 tg = grouped_pipes[i]->stream_res.tg;
2205 tg->funcs->get_otg_active_size(tg, &width, &height);
2206
2207 if (!tg->funcs->is_tg_enabled(tg)) {
2208 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2209 return;
2210 }
2211
2212 if (opp->funcs->opp_program_dpg_dimensions)
2213 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2214 }
2215
2216 for (i = 0; i < group_size; i++) {
2217 if (grouped_pipes[i]->stream == NULL)
2218 continue;
2219 grouped_pipes[i]->stream->vblank_synchronized = false;
2220 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2221 }
2222
2223 DC_SYNC_INFO("Aligning DP DTOs\n");
2224
2225 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2226
2227 DC_SYNC_INFO("Synchronizing VBlanks\n");
2228
2229 if (master >= 0) {
2230 for (i = 0; i < group_size; i++) {
2231 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2232 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2233 grouped_pipes[master]->stream_res.tg,
2234 grouped_pipes[i]->stream_res.tg,
2235 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2236 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2237 get_clock_divider(grouped_pipes[master], false),
2238 get_clock_divider(grouped_pipes[i], false));
2239 grouped_pipes[i]->stream->vblank_synchronized = true;
2240 }
2241 grouped_pipes[master]->stream->vblank_synchronized = true;
2242 DC_SYNC_INFO("Sync complete\n");
2243 }
2244
2245 for (i = 1; i < group_size; i++) {
2246 opp = grouped_pipes[i]->stream_res.opp;
2247 tg = grouped_pipes[i]->stream_res.tg;
2248 tg->funcs->get_otg_active_size(tg, &width, &height);
2249 if (opp->funcs->opp_program_dpg_dimensions)
2250 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2251 }
2252 }
2253
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2254 void dcn10_enable_timing_synchronization(
2255 struct dc *dc,
2256 int group_index,
2257 int group_size,
2258 struct pipe_ctx *grouped_pipes[])
2259 {
2260 struct dc_context *dc_ctx = dc->ctx;
2261 struct output_pixel_processor *opp;
2262 struct timing_generator *tg;
2263 int i, width, height;
2264
2265 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2266
2267 for (i = 1; i < group_size; i++) {
2268 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2269 continue;
2270
2271 opp = grouped_pipes[i]->stream_res.opp;
2272 tg = grouped_pipes[i]->stream_res.tg;
2273 tg->funcs->get_otg_active_size(tg, &width, &height);
2274
2275 if (!tg->funcs->is_tg_enabled(tg)) {
2276 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2277 return;
2278 }
2279
2280 if (opp->funcs->opp_program_dpg_dimensions)
2281 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2282 }
2283
2284 for (i = 0; i < group_size; i++) {
2285 if (grouped_pipes[i]->stream == NULL)
2286 continue;
2287
2288 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2289 continue;
2290
2291 grouped_pipes[i]->stream->vblank_synchronized = false;
2292 }
2293
2294 for (i = 1; i < group_size; i++) {
2295 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2296 continue;
2297
2298 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2299 grouped_pipes[i]->stream_res.tg,
2300 grouped_pipes[0]->stream_res.tg->inst);
2301 }
2302
2303 DC_SYNC_INFO("Waiting for trigger\n");
2304
2305 /* Need to get only check 1 pipe for having reset as all the others are
2306 * synchronized. Look at last pipe programmed to reset.
2307 */
2308
2309 if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2310 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2311
2312 for (i = 1; i < group_size; i++) {
2313 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2314 continue;
2315
2316 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2317 grouped_pipes[i]->stream_res.tg);
2318 }
2319
2320 for (i = 1; i < group_size; i++) {
2321 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2322 continue;
2323
2324 opp = grouped_pipes[i]->stream_res.opp;
2325 tg = grouped_pipes[i]->stream_res.tg;
2326 tg->funcs->get_otg_active_size(tg, &width, &height);
2327 if (opp->funcs->opp_program_dpg_dimensions)
2328 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2329 }
2330
2331 DC_SYNC_INFO("Sync complete\n");
2332 }
2333
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2334 void dcn10_enable_per_frame_crtc_position_reset(
2335 struct dc *dc,
2336 int group_size,
2337 struct pipe_ctx *grouped_pipes[])
2338 {
2339 struct dc_context *dc_ctx = dc->ctx;
2340 int i;
2341
2342 DC_SYNC_INFO("Setting up\n");
2343 for (i = 0; i < group_size; i++)
2344 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2345 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2346 grouped_pipes[i]->stream_res.tg,
2347 0,
2348 &grouped_pipes[i]->stream->triggered_crtc_reset);
2349
2350 DC_SYNC_INFO("Waiting for trigger\n");
2351
2352 for (i = 0; i < group_size; i++)
2353 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2354
2355 DC_SYNC_INFO("Multi-display sync is complete\n");
2356 }
2357
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2358 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2359 struct vm_system_aperture_param *apt,
2360 struct dce_hwseq *hws)
2361 {
2362 PHYSICAL_ADDRESS_LOC physical_page_number;
2363 uint32_t logical_addr_low;
2364 uint32_t logical_addr_high;
2365
2366 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2367 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2368 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2369 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2370
2371 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2372 LOGICAL_ADDR, &logical_addr_low);
2373
2374 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2375 LOGICAL_ADDR, &logical_addr_high);
2376
2377 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2378 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2379 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2380 }
2381
2382 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2383 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2384 struct vm_context0_param *vm0,
2385 struct dce_hwseq *hws)
2386 {
2387 PHYSICAL_ADDRESS_LOC fb_base;
2388 PHYSICAL_ADDRESS_LOC fb_offset;
2389 uint32_t fb_base_value;
2390 uint32_t fb_offset_value;
2391
2392 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2393 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2394
2395 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2396 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2397 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2398 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2399
2400 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2401 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2402 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2403 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2404
2405 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2406 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2407 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2408 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2409
2410 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2411 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2412 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2413 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2414
2415 /*
2416 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2417 * Therefore we need to do
2418 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2419 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2420 */
2421 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2422 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2423 vm0->pte_base.quad_part += fb_base.quad_part;
2424 vm0->pte_base.quad_part -= fb_offset.quad_part;
2425 }
2426
2427
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2428 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2429 {
2430 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2431 struct vm_system_aperture_param apt = {0};
2432 struct vm_context0_param vm0 = {0};
2433
2434 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2435 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2436
2437 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2438 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2439 }
2440
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2441 static void dcn10_enable_plane(
2442 struct dc *dc,
2443 struct pipe_ctx *pipe_ctx,
2444 struct dc_state *context)
2445 {
2446 struct dce_hwseq *hws = dc->hwseq;
2447
2448 if (dc->debug.sanity_checks) {
2449 hws->funcs.verify_allow_pstate_change_high(dc);
2450 }
2451
2452 undo_DEGVIDCN10_253_wa(dc);
2453
2454 power_on_plane_resources(dc->hwseq,
2455 pipe_ctx->plane_res.hubp->inst);
2456
2457 /* enable DCFCLK current DCHUB */
2458 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2459
2460 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2461 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2462 pipe_ctx->stream_res.opp,
2463 true);
2464
2465 if (dc->config.gpu_vm_support)
2466 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2467
2468 if (dc->debug.sanity_checks) {
2469 hws->funcs.verify_allow_pstate_change_high(dc);
2470 }
2471
2472 if (!pipe_ctx->top_pipe
2473 && pipe_ctx->plane_state
2474 && pipe_ctx->plane_state->flip_int_enabled
2475 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2476 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2477
2478 }
2479
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2480 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2481 {
2482 int i = 0;
2483 struct dpp_grph_csc_adjustment adjust;
2484 memset(&adjust, 0, sizeof(adjust));
2485 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2486
2487
2488 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2489 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2490 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2491 adjust.temperature_matrix[i] =
2492 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2493 } else if (pipe_ctx->plane_state &&
2494 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2495 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2496 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2497 adjust.temperature_matrix[i] =
2498 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2499 }
2500
2501 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2502 }
2503
2504
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2505 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2506 {
2507 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2508 if (pipe_ctx->top_pipe) {
2509 struct pipe_ctx *top = pipe_ctx->top_pipe;
2510
2511 while (top->top_pipe)
2512 top = top->top_pipe; // Traverse to top pipe_ctx
2513 if (top->plane_state && top->plane_state->layer_index == 0)
2514 return true; // Front MPO plane not hidden
2515 }
2516 }
2517 return false;
2518 }
2519
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2520 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2521 {
2522 // Override rear plane RGB bias to fix MPO brightness
2523 uint16_t rgb_bias = matrix[3];
2524
2525 matrix[3] = 0;
2526 matrix[7] = 0;
2527 matrix[11] = 0;
2528 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2529 matrix[3] = rgb_bias;
2530 matrix[7] = rgb_bias;
2531 matrix[11] = rgb_bias;
2532 }
2533
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2534 void dcn10_program_output_csc(struct dc *dc,
2535 struct pipe_ctx *pipe_ctx,
2536 enum dc_color_space colorspace,
2537 uint16_t *matrix,
2538 int opp_id)
2539 {
2540 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2541 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2542
2543 /* MPO is broken with RGB colorspaces when OCSC matrix
2544 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2545 * Blending adds offsets from front + rear to rear plane
2546 *
2547 * Fix is to set RGB bias to 0 on rear plane, top plane
2548 * black value pixels add offset instead of rear + front
2549 */
2550
2551 int16_t rgb_bias = matrix[3];
2552 // matrix[3/7/11] are all the same offset value
2553
2554 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2555 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2556 } else {
2557 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2558 }
2559 }
2560 } else {
2561 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2562 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2563 }
2564 }
2565
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2566 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2567 {
2568 struct dc_bias_and_scale bns_params = {0};
2569
2570 // program the input csc
2571 dpp->funcs->dpp_setup(dpp,
2572 plane_state->format,
2573 EXPANSION_MODE_ZERO,
2574 plane_state->input_csc_color_matrix,
2575 plane_state->color_space,
2576 NULL);
2577
2578 //set scale and bias registers
2579 build_prescale_params(&bns_params, plane_state);
2580 if (dpp->funcs->dpp_program_bias_and_scale)
2581 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2582 }
2583
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2584 void dcn10_update_visual_confirm_color(struct dc *dc,
2585 struct pipe_ctx *pipe_ctx,
2586 int mpcc_id)
2587 {
2588 struct mpc *mpc = dc->res_pool->mpc;
2589
2590 if (mpc->funcs->set_bg_color) {
2591 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2592 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2593 }
2594 }
2595
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2596 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2597 {
2598 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2599 struct mpcc_blnd_cfg blnd_cfg = {0};
2600 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2601 int mpcc_id;
2602 struct mpcc *new_mpcc;
2603 struct mpc *mpc = dc->res_pool->mpc;
2604 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2605
2606 blnd_cfg.overlap_only = false;
2607 blnd_cfg.global_gain = 0xff;
2608
2609 if (per_pixel_alpha) {
2610 /* DCN1.0 has output CM before MPC which seems to screw with
2611 * pre-multiplied alpha.
2612 */
2613 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2614 pipe_ctx->stream->output_color_space)
2615 && pipe_ctx->plane_state->pre_multiplied_alpha);
2616 if (pipe_ctx->plane_state->global_alpha) {
2617 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2618 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2619 } else {
2620 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2621 }
2622 } else {
2623 blnd_cfg.pre_multiplied_alpha = false;
2624 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2625 }
2626
2627 if (pipe_ctx->plane_state->global_alpha)
2628 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2629 else
2630 blnd_cfg.global_alpha = 0xff;
2631
2632 /*
2633 * TODO: remove hack
2634 * Note: currently there is a bug in init_hw such that
2635 * on resume from hibernate, BIOS sets up MPCC0, and
2636 * we do mpcc_remove but the mpcc cannot go to idle
2637 * after remove. This cause us to pick mpcc1 here,
2638 * which causes a pstate hang for yet unknown reason.
2639 */
2640 mpcc_id = hubp->inst;
2641
2642 /* If there is no full update, don't need to touch MPC tree*/
2643 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2644 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2645 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2646 return;
2647 }
2648
2649 /* check if this MPCC is already being used */
2650 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2651 /* remove MPCC if being used */
2652 if (new_mpcc != NULL)
2653 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2654 else
2655 if (dc->debug.sanity_checks)
2656 mpc->funcs->assert_mpcc_idle_before_connect(
2657 dc->res_pool->mpc, mpcc_id);
2658
2659 /* Call MPC to insert new plane */
2660 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2661 mpc_tree_params,
2662 &blnd_cfg,
2663 NULL,
2664 NULL,
2665 hubp->inst,
2666 mpcc_id);
2667 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2668
2669 ASSERT(new_mpcc != NULL);
2670 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2671 hubp->mpcc_id = mpcc_id;
2672 }
2673
update_scaler(struct pipe_ctx * pipe_ctx)2674 static void update_scaler(struct pipe_ctx *pipe_ctx)
2675 {
2676 bool per_pixel_alpha =
2677 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2678
2679 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2680 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2681 /* scaler configuration */
2682 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2683 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2684 }
2685
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2686 static void dcn10_update_dchubp_dpp(
2687 struct dc *dc,
2688 struct pipe_ctx *pipe_ctx,
2689 struct dc_state *context)
2690 {
2691 struct dce_hwseq *hws = dc->hwseq;
2692 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2693 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2694 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2695 struct plane_size size = plane_state->plane_size;
2696 unsigned int compat_level = 0;
2697 bool should_divided_by_2 = false;
2698
2699 /* depends on DML calculation, DPP clock value may change dynamically */
2700 /* If request max dpp clk is lower than current dispclk, no need to
2701 * divided by 2
2702 */
2703 if (plane_state->update_flags.bits.full_update) {
2704
2705 /* new calculated dispclk, dppclk are stored in
2706 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2707 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2708 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2709 * dispclk will put in use after optimize_bandwidth when
2710 * ramp_up_dispclk_with_dpp is called.
2711 * there are two places for dppclk be put in use. One location
2712 * is the same as the location as dispclk. Another is within
2713 * update_dchubp_dpp which happens between pre_bandwidth and
2714 * optimize_bandwidth.
2715 * dppclk updated within update_dchubp_dpp will cause new
2716 * clock values of dispclk and dppclk not be in use at the same
2717 * time. when clocks are decreased, this may cause dppclk is
2718 * lower than previous configuration and let pipe stuck.
2719 * for example, eDP + external dp, change resolution of DP from
2720 * 1920x1080x144hz to 1280x960x60hz.
2721 * before change: dispclk = 337889 dppclk = 337889
2722 * change mode, dcn10_validate_bandwidth calculate
2723 * dispclk = 143122 dppclk = 143122
2724 * update_dchubp_dpp be executed before dispclk be updated,
2725 * dispclk = 337889, but dppclk use new value dispclk /2 =
2726 * 168944. this will cause pipe pstate warning issue.
2727 * solution: between pre_bandwidth and optimize_bandwidth, while
2728 * dispclk is going to be decreased, keep dppclk = dispclk
2729 **/
2730 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2731 dc->clk_mgr->clks.dispclk_khz)
2732 should_divided_by_2 = false;
2733 else
2734 should_divided_by_2 =
2735 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2736 dc->clk_mgr->clks.dispclk_khz / 2;
2737
2738 dpp->funcs->dpp_dppclk_control(
2739 dpp,
2740 should_divided_by_2,
2741 true);
2742
2743 if (dc->res_pool->dccg)
2744 dc->res_pool->dccg->funcs->update_dpp_dto(
2745 dc->res_pool->dccg,
2746 dpp->inst,
2747 pipe_ctx->plane_res.bw.dppclk_khz);
2748 else
2749 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2750 dc->clk_mgr->clks.dispclk_khz / 2 :
2751 dc->clk_mgr->clks.dispclk_khz;
2752 }
2753
2754 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2755 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2756 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2757 */
2758 if (plane_state->update_flags.bits.full_update) {
2759 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2760
2761 hubp->funcs->hubp_setup(
2762 hubp,
2763 &pipe_ctx->dlg_regs,
2764 &pipe_ctx->ttu_regs,
2765 &pipe_ctx->rq_regs,
2766 &pipe_ctx->pipe_dlg_param);
2767 hubp->funcs->hubp_setup_interdependent(
2768 hubp,
2769 &pipe_ctx->dlg_regs,
2770 &pipe_ctx->ttu_regs);
2771 }
2772
2773 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2774
2775 if (plane_state->update_flags.bits.full_update ||
2776 plane_state->update_flags.bits.bpp_change)
2777 dcn10_update_dpp(dpp, plane_state);
2778
2779 if (plane_state->update_flags.bits.full_update ||
2780 plane_state->update_flags.bits.per_pixel_alpha_change ||
2781 plane_state->update_flags.bits.global_alpha_change)
2782 hws->funcs.update_mpcc(dc, pipe_ctx);
2783
2784 if (plane_state->update_flags.bits.full_update ||
2785 plane_state->update_flags.bits.per_pixel_alpha_change ||
2786 plane_state->update_flags.bits.global_alpha_change ||
2787 plane_state->update_flags.bits.scaling_change ||
2788 plane_state->update_flags.bits.position_change) {
2789 update_scaler(pipe_ctx);
2790 }
2791
2792 if (plane_state->update_flags.bits.full_update ||
2793 plane_state->update_flags.bits.scaling_change ||
2794 plane_state->update_flags.bits.position_change) {
2795 hubp->funcs->mem_program_viewport(
2796 hubp,
2797 &pipe_ctx->plane_res.scl_data.viewport,
2798 &pipe_ctx->plane_res.scl_data.viewport_c);
2799 }
2800
2801 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2802 dc->hwss.set_cursor_position(pipe_ctx);
2803 dc->hwss.set_cursor_attribute(pipe_ctx);
2804
2805 if (dc->hwss.set_cursor_sdr_white_level)
2806 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2807 }
2808
2809 if (plane_state->update_flags.bits.full_update) {
2810 /*gamut remap*/
2811 dc->hwss.program_gamut_remap(pipe_ctx);
2812
2813 dc->hwss.program_output_csc(dc,
2814 pipe_ctx,
2815 pipe_ctx->stream->output_color_space,
2816 pipe_ctx->stream->csc_color_matrix.matrix,
2817 pipe_ctx->stream_res.opp->inst);
2818 }
2819
2820 if (plane_state->update_flags.bits.full_update ||
2821 plane_state->update_flags.bits.pixel_format_change ||
2822 plane_state->update_flags.bits.horizontal_mirror_change ||
2823 plane_state->update_flags.bits.rotation_change ||
2824 plane_state->update_flags.bits.swizzle_change ||
2825 plane_state->update_flags.bits.dcc_change ||
2826 plane_state->update_flags.bits.bpp_change ||
2827 plane_state->update_flags.bits.scaling_change ||
2828 plane_state->update_flags.bits.plane_size_change) {
2829 hubp->funcs->hubp_program_surface_config(
2830 hubp,
2831 plane_state->format,
2832 &plane_state->tiling_info,
2833 &size,
2834 plane_state->rotation,
2835 &plane_state->dcc,
2836 plane_state->horizontal_mirror,
2837 compat_level);
2838 }
2839
2840 hubp->power_gated = false;
2841
2842 hws->funcs.update_plane_addr(dc, pipe_ctx);
2843
2844 if (is_pipe_tree_visible(pipe_ctx))
2845 hubp->funcs->set_blank(hubp, false);
2846 }
2847
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2848 void dcn10_blank_pixel_data(
2849 struct dc *dc,
2850 struct pipe_ctx *pipe_ctx,
2851 bool blank)
2852 {
2853 enum dc_color_space color_space;
2854 struct tg_color black_color = {0};
2855 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2856 struct dc_stream_state *stream = pipe_ctx->stream;
2857
2858 /* program otg blank color */
2859 color_space = stream->output_color_space;
2860 color_space_to_black_color(dc, color_space, &black_color);
2861
2862 /*
2863 * The way 420 is packed, 2 channels carry Y component, 1 channel
2864 * alternate between Cb and Cr, so both channels need the pixel
2865 * value for Y
2866 */
2867 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2868 black_color.color_r_cr = black_color.color_g_y;
2869
2870
2871 if (stream_res->tg->funcs->set_blank_color)
2872 stream_res->tg->funcs->set_blank_color(
2873 stream_res->tg,
2874 &black_color);
2875
2876 if (!blank) {
2877 if (stream_res->tg->funcs->set_blank)
2878 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2879 if (stream_res->abm) {
2880 dc->hwss.set_pipe(pipe_ctx);
2881 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2882 }
2883 } else {
2884 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2885 if (stream_res->tg->funcs->set_blank) {
2886 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2887 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2888 }
2889 }
2890 }
2891
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2892 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2893 {
2894 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2895 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2896 struct custom_float_format fmt;
2897
2898 fmt.exponenta_bits = 6;
2899 fmt.mantissa_bits = 12;
2900 fmt.sign = true;
2901
2902
2903 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2904 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2905
2906 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2907 pipe_ctx->plane_res.dpp, hw_mult);
2908 }
2909
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2910 void dcn10_program_pipe(
2911 struct dc *dc,
2912 struct pipe_ctx *pipe_ctx,
2913 struct dc_state *context)
2914 {
2915 struct dce_hwseq *hws = dc->hwseq;
2916
2917 if (pipe_ctx->top_pipe == NULL) {
2918 bool blank = !is_pipe_tree_visible(pipe_ctx);
2919
2920 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2921 pipe_ctx->stream_res.tg,
2922 calculate_vready_offset_for_group(pipe_ctx),
2923 pipe_ctx->pipe_dlg_param.vstartup_start,
2924 pipe_ctx->pipe_dlg_param.vupdate_offset,
2925 pipe_ctx->pipe_dlg_param.vupdate_width);
2926
2927 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2928 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2929
2930 if (hws->funcs.setup_vupdate_interrupt)
2931 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2932
2933 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2934 }
2935
2936 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2937 dcn10_enable_plane(dc, pipe_ctx, context);
2938
2939 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2940
2941 hws->funcs.set_hdr_multiplier(pipe_ctx);
2942
2943 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2944 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2945 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2946 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2947
2948 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2949 * only do gamma programming for full update.
2950 * TODO: This can be further optimized/cleaned up
2951 * Always call this for now since it does memcmp inside before
2952 * doing heavy calculation and programming
2953 */
2954 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2955 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2956 }
2957
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2958 void dcn10_wait_for_pending_cleared(struct dc *dc,
2959 struct dc_state *context)
2960 {
2961 struct pipe_ctx *pipe_ctx;
2962 struct timing_generator *tg;
2963 int i;
2964
2965 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2966 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2967 tg = pipe_ctx->stream_res.tg;
2968
2969 /*
2970 * Only wait for top pipe's tg penindg bit
2971 * Also skip if pipe is disabled.
2972 */
2973 if (pipe_ctx->top_pipe ||
2974 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2975 !tg->funcs->is_tg_enabled(tg))
2976 continue;
2977
2978 /*
2979 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2980 * For some reason waiting for OTG_UPDATE_PENDING cleared
2981 * seems to not trigger the update right away, and if we
2982 * lock again before VUPDATE then we don't get a separated
2983 * operation.
2984 */
2985 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2986 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2987 }
2988 }
2989
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2990 void dcn10_post_unlock_program_front_end(
2991 struct dc *dc,
2992 struct dc_state *context)
2993 {
2994 int i;
2995
2996 DC_LOGGER_INIT(dc->ctx->logger);
2997
2998 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2999 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3000
3001 if (!pipe_ctx->top_pipe &&
3002 !pipe_ctx->prev_odm_pipe &&
3003 pipe_ctx->stream) {
3004 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3005
3006 if (context->stream_status[i].plane_count == 0)
3007 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3008 }
3009 }
3010
3011 for (i = 0; i < dc->res_pool->pipe_count; i++)
3012 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3013 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3014
3015 for (i = 0; i < dc->res_pool->pipe_count; i++)
3016 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3017 dc->hwss.optimize_bandwidth(dc, context);
3018 break;
3019 }
3020
3021 if (dc->hwseq->wa.DEGVIDCN10_254)
3022 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3023 }
3024
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3025 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3026 {
3027 uint8_t i;
3028
3029 for (i = 0; i < context->stream_count; i++) {
3030 if (context->streams[i]->timing.timing_3d_format
3031 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3032 /*
3033 * Disable stutter
3034 */
3035 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3036 break;
3037 }
3038 }
3039 }
3040
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3041 void dcn10_prepare_bandwidth(
3042 struct dc *dc,
3043 struct dc_state *context)
3044 {
3045 struct dce_hwseq *hws = dc->hwseq;
3046 struct hubbub *hubbub = dc->res_pool->hubbub;
3047 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3048
3049 if (dc->debug.sanity_checks)
3050 hws->funcs.verify_allow_pstate_change_high(dc);
3051
3052 if (context->stream_count == 0)
3053 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3054
3055 dc->clk_mgr->funcs->update_clocks(
3056 dc->clk_mgr,
3057 context,
3058 false);
3059
3060 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3061 &context->bw_ctx.bw.dcn.watermarks,
3062 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3063 true);
3064 dcn10_stereo_hw_frame_pack_wa(dc, context);
3065
3066 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3067 DC_FP_START();
3068 dcn_get_soc_clks(
3069 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3070 DC_FP_END();
3071 dcn_bw_notify_pplib_of_wm_ranges(
3072 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3073 }
3074
3075 if (dc->debug.sanity_checks)
3076 hws->funcs.verify_allow_pstate_change_high(dc);
3077 }
3078
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3079 void dcn10_optimize_bandwidth(
3080 struct dc *dc,
3081 struct dc_state *context)
3082 {
3083 struct dce_hwseq *hws = dc->hwseq;
3084 struct hubbub *hubbub = dc->res_pool->hubbub;
3085 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3086
3087 if (dc->debug.sanity_checks)
3088 hws->funcs.verify_allow_pstate_change_high(dc);
3089
3090 if (context->stream_count == 0)
3091 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3092
3093 dc->clk_mgr->funcs->update_clocks(
3094 dc->clk_mgr,
3095 context,
3096 true);
3097
3098 hubbub->funcs->program_watermarks(hubbub,
3099 &context->bw_ctx.bw.dcn.watermarks,
3100 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3101 true);
3102
3103 dcn10_stereo_hw_frame_pack_wa(dc, context);
3104
3105 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3106 DC_FP_START();
3107 dcn_get_soc_clks(
3108 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3109 DC_FP_END();
3110 dcn_bw_notify_pplib_of_wm_ranges(
3111 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3112 }
3113
3114 if (dc->debug.sanity_checks)
3115 hws->funcs.verify_allow_pstate_change_high(dc);
3116 }
3117
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3118 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3119 int num_pipes, struct dc_crtc_timing_adjust adjust)
3120 {
3121 int i = 0;
3122 struct drr_params params = {0};
3123 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3124 unsigned int event_triggers = 0x800;
3125 // Note DRR trigger events are generated regardless of whether num frames met.
3126 unsigned int num_frames = 2;
3127
3128 params.vertical_total_max = adjust.v_total_max;
3129 params.vertical_total_min = adjust.v_total_min;
3130 params.vertical_total_mid = adjust.v_total_mid;
3131 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3132 /* TODO: If multiple pipes are to be supported, you need
3133 * some GSL stuff. Static screen triggers may be programmed differently
3134 * as well.
3135 */
3136 for (i = 0; i < num_pipes; i++) {
3137 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3138 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3139 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3140 pipe_ctx[i]->stream_res.tg, ¶ms);
3141 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3142 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3143 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3144 pipe_ctx[i]->stream_res.tg,
3145 event_triggers, num_frames);
3146 }
3147 }
3148 }
3149
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3150 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3151 int num_pipes,
3152 struct crtc_position *position)
3153 {
3154 int i = 0;
3155
3156 /* TODO: handle pipes > 1
3157 */
3158 for (i = 0; i < num_pipes; i++)
3159 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3160 }
3161
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3162 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3163 int num_pipes, const struct dc_static_screen_params *params)
3164 {
3165 unsigned int i;
3166 unsigned int triggers = 0;
3167
3168 if (params->triggers.surface_update)
3169 triggers |= 0x80;
3170 if (params->triggers.cursor_update)
3171 triggers |= 0x2;
3172 if (params->triggers.force_trigger)
3173 triggers |= 0x1;
3174
3175 for (i = 0; i < num_pipes; i++)
3176 pipe_ctx[i]->stream_res.tg->funcs->
3177 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3178 triggers, params->num_frames);
3179 }
3180
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3181 static void dcn10_config_stereo_parameters(
3182 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3183 {
3184 enum view_3d_format view_format = stream->view_format;
3185 enum dc_timing_3d_format timing_3d_format =\
3186 stream->timing.timing_3d_format;
3187 bool non_stereo_timing = false;
3188
3189 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3190 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3191 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3192 non_stereo_timing = true;
3193
3194 if (non_stereo_timing == false &&
3195 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3196
3197 flags->PROGRAM_STEREO = 1;
3198 flags->PROGRAM_POLARITY = 1;
3199 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3200 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3201 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3202 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3203
3204 if (stream->link && stream->link->ddc) {
3205 enum display_dongle_type dongle = \
3206 stream->link->ddc->dongle_type;
3207
3208 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3209 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3210 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3211 flags->DISABLE_STEREO_DP_SYNC = 1;
3212 }
3213 }
3214 flags->RIGHT_EYE_POLARITY =\
3215 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3216 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3217 flags->FRAME_PACKED = 1;
3218 }
3219
3220 return;
3221 }
3222
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3223 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3224 {
3225 struct crtc_stereo_flags flags = { 0 };
3226 struct dc_stream_state *stream = pipe_ctx->stream;
3227
3228 dcn10_config_stereo_parameters(stream, &flags);
3229
3230 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3231 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3232 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3233 } else {
3234 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3235 }
3236
3237 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3238 pipe_ctx->stream_res.opp,
3239 flags.PROGRAM_STEREO == 1,
3240 &stream->timing);
3241
3242 pipe_ctx->stream_res.tg->funcs->program_stereo(
3243 pipe_ctx->stream_res.tg,
3244 &stream->timing,
3245 &flags);
3246
3247 return;
3248 }
3249
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3250 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3251 {
3252 int i;
3253
3254 for (i = 0; i < res_pool->pipe_count; i++) {
3255 if (res_pool->hubps[i]->inst == mpcc_inst)
3256 return res_pool->hubps[i];
3257 }
3258 ASSERT(false);
3259 return NULL;
3260 }
3261
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3262 void dcn10_wait_for_mpcc_disconnect(
3263 struct dc *dc,
3264 struct resource_pool *res_pool,
3265 struct pipe_ctx *pipe_ctx)
3266 {
3267 struct dce_hwseq *hws = dc->hwseq;
3268 int mpcc_inst;
3269
3270 if (dc->debug.sanity_checks) {
3271 hws->funcs.verify_allow_pstate_change_high(dc);
3272 }
3273
3274 if (!pipe_ctx->stream_res.opp)
3275 return;
3276
3277 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3278 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3279 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3280
3281 if (pipe_ctx->stream_res.tg &&
3282 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3283 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3284 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3285 hubp->funcs->set_blank(hubp, true);
3286 }
3287 }
3288
3289 if (dc->debug.sanity_checks) {
3290 hws->funcs.verify_allow_pstate_change_high(dc);
3291 }
3292
3293 }
3294
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3295 bool dcn10_dummy_display_power_gating(
3296 struct dc *dc,
3297 uint8_t controller_id,
3298 struct dc_bios *dcb,
3299 enum pipe_gating_control power_gating)
3300 {
3301 return true;
3302 }
3303
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3304 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3305 {
3306 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3307 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3308 bool flip_pending;
3309 struct dc *dc = pipe_ctx->stream->ctx->dc;
3310
3311 if (plane_state == NULL)
3312 return;
3313
3314 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3315 pipe_ctx->plane_res.hubp);
3316
3317 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3318
3319 if (!flip_pending)
3320 plane_state->status.current_address = plane_state->status.requested_address;
3321
3322 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3323 tg->funcs->is_stereo_left_eye) {
3324 plane_state->status.is_right_eye =
3325 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3326 }
3327
3328 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3329 struct dce_hwseq *hwseq = dc->hwseq;
3330 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3331 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3332
3333 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3334 struct hubbub *hubbub = dc->res_pool->hubbub;
3335
3336 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3337 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3338 }
3339 }
3340 }
3341
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3342 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3343 {
3344 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3345
3346 /* In DCN, this programming sequence is owned by the hubbub */
3347 hubbub->funcs->update_dchub(hubbub, dh_data);
3348 }
3349
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3350 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3351 {
3352 struct pipe_ctx *test_pipe, *split_pipe;
3353 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3354 struct rect r1 = scl_data->recout, r2, r2_half;
3355 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3356 int cur_layer = pipe_ctx->plane_state->layer_index;
3357
3358 /**
3359 * Disable the cursor if there's another pipe above this with a
3360 * plane that contains this pipe's viewport to prevent double cursor
3361 * and incorrect scaling artifacts.
3362 */
3363 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3364 test_pipe = test_pipe->top_pipe) {
3365 // Skip invisible layer and pipe-split plane on same layer
3366 if (!test_pipe->plane_state ||
3367 !test_pipe->plane_state->visible ||
3368 test_pipe->plane_state->layer_index == cur_layer)
3369 continue;
3370
3371 r2 = test_pipe->plane_res.scl_data.recout;
3372 r2_r = r2.x + r2.width;
3373 r2_b = r2.y + r2.height;
3374 split_pipe = test_pipe;
3375
3376 /**
3377 * There is another half plane on same layer because of
3378 * pipe-split, merge together per same height.
3379 */
3380 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3381 split_pipe = split_pipe->top_pipe)
3382 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3383 r2_half = split_pipe->plane_res.scl_data.recout;
3384 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3385 r2.width = r2.width + r2_half.width;
3386 r2_r = r2.x + r2.width;
3387 break;
3388 }
3389
3390 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3391 return true;
3392 }
3393
3394 return false;
3395 }
3396
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3397 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3398 {
3399 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3400 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3401 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3402 struct dc_cursor_mi_param param = {
3403 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3404 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3405 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3406 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3407 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3408 .rotation = pipe_ctx->plane_state->rotation,
3409 .mirror = pipe_ctx->plane_state->horizontal_mirror
3410 };
3411 bool pipe_split_on = false;
3412 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3413 (pipe_ctx->prev_odm_pipe != NULL);
3414
3415 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3416 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3417 int x_pos = pos_cpy.x;
3418 int y_pos = pos_cpy.y;
3419
3420 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3421 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3422 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3423 pipe_split_on = true;
3424 }
3425 }
3426
3427 /**
3428 * DC cursor is stream space, HW cursor is plane space and drawn
3429 * as part of the framebuffer.
3430 *
3431 * Cursor position can't be negative, but hotspot can be used to
3432 * shift cursor out of the plane bounds. Hotspot must be smaller
3433 * than the cursor size.
3434 */
3435
3436 /**
3437 * Translate cursor from stream space to plane space.
3438 *
3439 * If the cursor is scaled then we need to scale the position
3440 * to be in the approximately correct place. We can't do anything
3441 * about the actual size being incorrect, that's a limitation of
3442 * the hardware.
3443 */
3444 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3445 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3446 pipe_ctx->plane_state->dst_rect.width;
3447 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3448 pipe_ctx->plane_state->dst_rect.height;
3449 } else {
3450 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3451 pipe_ctx->plane_state->dst_rect.width;
3452 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3453 pipe_ctx->plane_state->dst_rect.height;
3454 }
3455
3456 /**
3457 * If the cursor's source viewport is clipped then we need to
3458 * translate the cursor to appear in the correct position on
3459 * the screen.
3460 *
3461 * This translation isn't affected by scaling so it needs to be
3462 * done *after* we adjust the position for the scale factor.
3463 *
3464 * This is only done by opt-in for now since there are still
3465 * some usecases like tiled display that might enable the
3466 * cursor on both streams while expecting dc to clip it.
3467 */
3468 if (pos_cpy.translate_by_source) {
3469 x_pos += pipe_ctx->plane_state->src_rect.x;
3470 y_pos += pipe_ctx->plane_state->src_rect.y;
3471 }
3472
3473 /**
3474 * If the position is negative then we need to add to the hotspot
3475 * to shift the cursor outside the plane.
3476 */
3477
3478 if (x_pos < 0) {
3479 pos_cpy.x_hotspot -= x_pos;
3480 x_pos = 0;
3481 }
3482
3483 if (y_pos < 0) {
3484 pos_cpy.y_hotspot -= y_pos;
3485 y_pos = 0;
3486 }
3487
3488 pos_cpy.x = (uint32_t)x_pos;
3489 pos_cpy.y = (uint32_t)y_pos;
3490
3491 if (pipe_ctx->plane_state->address.type
3492 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3493 pos_cpy.enable = false;
3494
3495 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3496 pos_cpy.enable = false;
3497
3498
3499 if (param.rotation == ROTATION_ANGLE_0) {
3500 int viewport_width =
3501 pipe_ctx->plane_res.scl_data.viewport.width;
3502 int viewport_x =
3503 pipe_ctx->plane_res.scl_data.viewport.x;
3504
3505 if (param.mirror) {
3506 if (pipe_split_on || odm_combine_on) {
3507 if (pos_cpy.x >= viewport_width + viewport_x) {
3508 pos_cpy.x = 2 * viewport_width
3509 - pos_cpy.x + 2 * viewport_x;
3510 } else {
3511 uint32_t temp_x = pos_cpy.x;
3512
3513 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3514 if (temp_x >= viewport_x +
3515 (int)hubp->curs_attr.width || pos_cpy.x
3516 <= (int)hubp->curs_attr.width +
3517 pipe_ctx->plane_state->src_rect.x) {
3518 pos_cpy.x = temp_x + viewport_width;
3519 }
3520 }
3521 } else {
3522 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3523 }
3524 }
3525 }
3526 // Swap axis and mirror horizontally
3527 else if (param.rotation == ROTATION_ANGLE_90) {
3528 uint32_t temp_x = pos_cpy.x;
3529
3530 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3531 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3532 pos_cpy.y = temp_x;
3533 }
3534 // Swap axis and mirror vertically
3535 else if (param.rotation == ROTATION_ANGLE_270) {
3536 uint32_t temp_y = pos_cpy.y;
3537 int viewport_height =
3538 pipe_ctx->plane_res.scl_data.viewport.height;
3539 int viewport_y =
3540 pipe_ctx->plane_res.scl_data.viewport.y;
3541
3542 /**
3543 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3544 * For pipe split cases:
3545 * - apply offset of viewport.y to normalize pos_cpy.x
3546 * - calculate the pos_cpy.y as before
3547 * - shift pos_cpy.y back by same offset to get final value
3548 * - since we iterate through both pipes, use the lower
3549 * viewport.y for offset
3550 * For non pipe split cases, use the same calculation for
3551 * pos_cpy.y as the 180 degree rotation case below,
3552 * but use pos_cpy.x as our input because we are rotating
3553 * 270 degrees
3554 */
3555 if (pipe_split_on || odm_combine_on) {
3556 int pos_cpy_x_offset;
3557 int other_pipe_viewport_y;
3558
3559 if (pipe_split_on) {
3560 if (pipe_ctx->bottom_pipe) {
3561 other_pipe_viewport_y =
3562 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3563 } else {
3564 other_pipe_viewport_y =
3565 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3566 }
3567 } else {
3568 if (pipe_ctx->next_odm_pipe) {
3569 other_pipe_viewport_y =
3570 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3571 } else {
3572 other_pipe_viewport_y =
3573 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3574 }
3575 }
3576 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3577 other_pipe_viewport_y : viewport_y;
3578 pos_cpy.x -= pos_cpy_x_offset;
3579 if (pos_cpy.x > viewport_height) {
3580 pos_cpy.x = pos_cpy.x - viewport_height;
3581 pos_cpy.y = viewport_height - pos_cpy.x;
3582 } else {
3583 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3584 }
3585 pos_cpy.y += pos_cpy_x_offset;
3586 } else {
3587 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3588 }
3589 pos_cpy.x = temp_y;
3590 }
3591 // Mirror horizontally and vertically
3592 else if (param.rotation == ROTATION_ANGLE_180) {
3593 int viewport_width =
3594 pipe_ctx->plane_res.scl_data.viewport.width;
3595 int viewport_x =
3596 pipe_ctx->plane_res.scl_data.viewport.x;
3597
3598 if (!param.mirror) {
3599 if (pipe_split_on || odm_combine_on) {
3600 if (pos_cpy.x >= viewport_width + viewport_x) {
3601 pos_cpy.x = 2 * viewport_width
3602 - pos_cpy.x + 2 * viewport_x;
3603 } else {
3604 uint32_t temp_x = pos_cpy.x;
3605
3606 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3607 if (temp_x >= viewport_x +
3608 (int)hubp->curs_attr.width || pos_cpy.x
3609 <= (int)hubp->curs_attr.width +
3610 pipe_ctx->plane_state->src_rect.x) {
3611 pos_cpy.x = 2 * viewport_width - temp_x;
3612 }
3613 }
3614 } else {
3615 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3616 }
3617 }
3618
3619 /**
3620 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3621 * Calculation:
3622 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3623 * pos_cpy.y_new = viewport.y + delta_from_bottom
3624 * Simplify it as:
3625 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3626 */
3627 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3628 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3629 }
3630
3631 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3632 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3633 }
3634
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3635 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3636 {
3637 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3638
3639 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3640 pipe_ctx->plane_res.hubp, attributes);
3641 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3642 pipe_ctx->plane_res.dpp, attributes);
3643 }
3644
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3645 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3646 {
3647 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3648 struct fixed31_32 multiplier;
3649 struct dpp_cursor_attributes opt_attr = { 0 };
3650 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3651 struct custom_float_format fmt;
3652
3653 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3654 return;
3655
3656 fmt.exponenta_bits = 5;
3657 fmt.mantissa_bits = 10;
3658 fmt.sign = true;
3659
3660 if (sdr_white_level > 80) {
3661 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3662 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3663 }
3664
3665 opt_attr.scale = hw_scale;
3666 opt_attr.bias = 0;
3667
3668 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3669 pipe_ctx->plane_res.dpp, &opt_attr);
3670 }
3671
3672 /*
3673 * apply_front_porch_workaround TODO FPGA still need?
3674 *
3675 * This is a workaround for a bug that has existed since R5xx and has not been
3676 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3677 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3678 static void apply_front_porch_workaround(
3679 struct dc_crtc_timing *timing)
3680 {
3681 if (timing->flags.INTERLACE == 1) {
3682 if (timing->v_front_porch < 2)
3683 timing->v_front_porch = 2;
3684 } else {
3685 if (timing->v_front_porch < 1)
3686 timing->v_front_porch = 1;
3687 }
3688 }
3689
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3690 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3691 {
3692 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3693 struct dc_crtc_timing patched_crtc_timing;
3694 int vesa_sync_start;
3695 int asic_blank_end;
3696 int interlace_factor;
3697
3698 patched_crtc_timing = *dc_crtc_timing;
3699 apply_front_porch_workaround(&patched_crtc_timing);
3700
3701 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3702
3703 vesa_sync_start = patched_crtc_timing.v_addressable +
3704 patched_crtc_timing.v_border_bottom +
3705 patched_crtc_timing.v_front_porch;
3706
3707 asic_blank_end = (patched_crtc_timing.v_total -
3708 vesa_sync_start -
3709 patched_crtc_timing.v_border_top)
3710 * interlace_factor;
3711
3712 return asic_blank_end -
3713 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3714 }
3715
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3716 void dcn10_calc_vupdate_position(
3717 struct dc *dc,
3718 struct pipe_ctx *pipe_ctx,
3719 uint32_t *start_line,
3720 uint32_t *end_line)
3721 {
3722 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3723 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3724
3725 if (vupdate_pos >= 0)
3726 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3727 else
3728 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3729 *end_line = (*start_line + 2) % timing->v_total;
3730 }
3731
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3732 static void dcn10_cal_vline_position(
3733 struct dc *dc,
3734 struct pipe_ctx *pipe_ctx,
3735 uint32_t *start_line,
3736 uint32_t *end_line)
3737 {
3738 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3739 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3740
3741 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3742 if (vline_pos > 0)
3743 vline_pos--;
3744 else if (vline_pos < 0)
3745 vline_pos++;
3746
3747 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3748 if (vline_pos >= 0)
3749 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3750 else
3751 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3752 *end_line = (*start_line + 2) % timing->v_total;
3753 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3754 // vsync is line 0 so start_line is just the requested line offset
3755 *start_line = vline_pos;
3756 *end_line = (*start_line + 2) % timing->v_total;
3757 } else
3758 ASSERT(0);
3759 }
3760
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3761 void dcn10_setup_periodic_interrupt(
3762 struct dc *dc,
3763 struct pipe_ctx *pipe_ctx)
3764 {
3765 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3766 uint32_t start_line = 0;
3767 uint32_t end_line = 0;
3768
3769 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3770
3771 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3772 }
3773
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3774 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3775 {
3776 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3777 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3778
3779 if (start_line < 0) {
3780 ASSERT(0);
3781 start_line = 0;
3782 }
3783
3784 if (tg->funcs->setup_vertical_interrupt2)
3785 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3786 }
3787
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3788 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3789 struct dc_link_settings *link_settings)
3790 {
3791 struct encoder_unblank_param params = {0};
3792 struct dc_stream_state *stream = pipe_ctx->stream;
3793 struct dc_link *link = stream->link;
3794 struct dce_hwseq *hws = link->dc->hwseq;
3795
3796 /* only 3 items below are used by unblank */
3797 params.timing = pipe_ctx->stream->timing;
3798
3799 params.link_settings.link_rate = link_settings->link_rate;
3800
3801 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3802 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3803 params.timing.pix_clk_100hz /= 2;
3804 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3805 }
3806
3807 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3808 hws->funcs.edp_backlight_control(link, true);
3809 }
3810 }
3811
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3812 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3813 const uint8_t *custom_sdp_message,
3814 unsigned int sdp_message_size)
3815 {
3816 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3817 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3818 pipe_ctx->stream_res.stream_enc,
3819 custom_sdp_message,
3820 sdp_message_size);
3821 }
3822 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3823 enum dc_status dcn10_set_clock(struct dc *dc,
3824 enum dc_clock_type clock_type,
3825 uint32_t clk_khz,
3826 uint32_t stepping)
3827 {
3828 struct dc_state *context = dc->current_state;
3829 struct dc_clock_config clock_cfg = {0};
3830 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3831
3832 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3833 return DC_FAIL_UNSUPPORTED_1;
3834
3835 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3836 context, clock_type, &clock_cfg);
3837
3838 if (clk_khz > clock_cfg.max_clock_khz)
3839 return DC_FAIL_CLK_EXCEED_MAX;
3840
3841 if (clk_khz < clock_cfg.min_clock_khz)
3842 return DC_FAIL_CLK_BELOW_MIN;
3843
3844 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3845 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3846
3847 /*update internal request clock for update clock use*/
3848 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3849 current_clocks->dispclk_khz = clk_khz;
3850 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3851 current_clocks->dppclk_khz = clk_khz;
3852 else
3853 return DC_ERROR_UNEXPECTED;
3854
3855 if (dc->clk_mgr->funcs->update_clocks)
3856 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3857 context, true);
3858 return DC_OK;
3859
3860 }
3861
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3862 void dcn10_get_clock(struct dc *dc,
3863 enum dc_clock_type clock_type,
3864 struct dc_clock_config *clock_cfg)
3865 {
3866 struct dc_state *context = dc->current_state;
3867
3868 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3869 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3870
3871 }
3872
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3873 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3874 {
3875 struct resource_pool *pool = dc->res_pool;
3876 int i;
3877
3878 for (i = 0; i < pool->pipe_count; i++) {
3879 struct hubp *hubp = pool->hubps[i];
3880 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3881
3882 hubp->funcs->hubp_read_state(hubp);
3883
3884 if (!s->blank_en)
3885 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3886 }
3887 }
3888