1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 #include "dc_trace.h"
56 #include "dce/dmub_outbox.h"
57 #include "inc/dc_link_dp.h"
58 #include "inc/link_dpcd.h"
59
60 #define DC_LOGGER_INIT(logger)
61
62 #define CTX \
63 hws->ctx
64 #define REG(reg)\
65 hws->regs->reg
66
67 #undef FN
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
70
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
74
75 #define GAMMA_HW_POINTS_NUM 256
76
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)80 void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
82 uint32_t ref_cycle)
83 {
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87
88 DTN_INFO(" %11d.%03d",
89 us_x10 / frac,
90 us_x10 % frac);
91 }
92
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
95 bool lock)
96 {
97 struct pipe_ctx *pipe_ctx;
98 struct timing_generator *tg;
99 int i;
100
101 for (i = 0; i < dc->res_pool->pipe_count; i++) {
102 pipe_ctx = &context->res_ctx.pipe_ctx[i];
103 tg = pipe_ctx->stream_res.tg;
104
105 /*
106 * Only lock the top pipe's tg to prevent redundant
107 * (un)locking. Also skip if pipe is disabled.
108 */
109 if (pipe_ctx->top_pipe ||
110 !pipe_ctx->stream || !pipe_ctx->plane_state ||
111 !tg->funcs->is_tg_enabled(tg))
112 continue;
113
114 if (lock)
115 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
116 else
117 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
118 }
119 }
120
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)121 static void log_mpc_crc(struct dc *dc,
122 struct dc_log_buffer_ctx *log_ctx)
123 {
124 struct dc_context *dc_ctx = dc->ctx;
125 struct dce_hwseq *hws = dc->hwseq;
126
127 if (REG(MPC_CRC_RESULT_GB))
128 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
129 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
130 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
131 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
132 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
133 }
134
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)135 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
136 {
137 struct dc_context *dc_ctx = dc->ctx;
138 struct dcn_hubbub_wm wm;
139 int i;
140
141 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
142 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
143
144 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
145 " sr_enter sr_exit dram_clk_change\n");
146
147 for (i = 0; i < 4; i++) {
148 struct dcn_hubbub_wm_set *s;
149
150 s = &wm.sets[i];
151 DTN_INFO("WM_Set[%d]:", s->wm_set);
152 DTN_INFO_MICRO_SEC(s->data_urgent);
153 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
154 DTN_INFO_MICRO_SEC(s->sr_enter);
155 DTN_INFO_MICRO_SEC(s->sr_exit);
156 DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
157 DTN_INFO("\n");
158 }
159
160 DTN_INFO("\n");
161 }
162
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)163 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
164 {
165 struct dc_context *dc_ctx = dc->ctx;
166 struct resource_pool *pool = dc->res_pool;
167 int i;
168
169 DTN_INFO(
170 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
171 for (i = 0; i < pool->pipe_count; i++) {
172 struct hubp *hubp = pool->hubps[i];
173 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
174
175 hubp->funcs->hubp_read_state(hubp);
176
177 if (!s->blank_en) {
178 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
179 hubp->inst,
180 s->pixel_format,
181 s->inuse_addr_hi,
182 s->viewport_width,
183 s->viewport_height,
184 s->rotation_angle,
185 s->h_mirror_en,
186 s->sw_mode,
187 s->dcc_en,
188 s->blank_en,
189 s->clock_en,
190 s->ttu_disable,
191 s->underflow_status);
192 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
193 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
194 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
195 DTN_INFO("\n");
196 }
197 }
198
199 DTN_INFO("\n=========RQ========\n");
200 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
201 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
202 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
203 for (i = 0; i < pool->pipe_count; i++) {
204 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
205 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
206
207 if (!s->blank_en)
208 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
209 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
210 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
211 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
212 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
213 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
214 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
215 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
216 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
217 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
218 }
219
220 DTN_INFO("========DLG========\n");
221 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
222 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
223 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
224 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
225 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
226 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
227 " x_rp_dlay x_rr_sfl\n");
228 for (i = 0; i < pool->pipe_count; i++) {
229 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
230 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
231
232 if (!s->blank_en)
233 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
234 "% 8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
235 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
236 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
237 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
238 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
239 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
240 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
241 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
242 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
243 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
244 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
245 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
246 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
247 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
248 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
249 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
250 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
251 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
252 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
253 dlg_regs->xfc_reg_remote_surface_flip_latency);
254 }
255
256 DTN_INFO("========TTU========\n");
257 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
258 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
259 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
260 for (i = 0; i < pool->pipe_count; i++) {
261 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
262 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
263
264 if (!s->blank_en)
265 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
266 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
267 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
268 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
269 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
270 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
271 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
272 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
273 }
274 DTN_INFO("\n");
275 }
276
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)277 void dcn10_log_hw_state(struct dc *dc,
278 struct dc_log_buffer_ctx *log_ctx)
279 {
280 struct dc_context *dc_ctx = dc->ctx;
281 struct resource_pool *pool = dc->res_pool;
282 int i;
283
284 DTN_INFO_BEGIN();
285
286 dcn10_log_hubbub_state(dc, log_ctx);
287
288 dcn10_log_hubp_states(dc, log_ctx);
289
290 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
291 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
292 "C31 C32 C33 C34\n");
293 for (i = 0; i < pool->pipe_count; i++) {
294 struct dpp *dpp = pool->dpps[i];
295 struct dcn_dpp_state s = {0};
296
297 dpp->funcs->dpp_read_state(dpp, &s);
298
299 if (!s.is_enabled)
300 continue;
301
302 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
303 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
304 dpp->inst,
305 s.igam_input_format,
306 (s.igam_lut_mode == 0) ? "BypassFixed" :
307 ((s.igam_lut_mode == 1) ? "BypassFloat" :
308 ((s.igam_lut_mode == 2) ? "RAM" :
309 ((s.igam_lut_mode == 3) ? "RAM" :
310 "Unknown"))),
311 (s.dgam_lut_mode == 0) ? "Bypass" :
312 ((s.dgam_lut_mode == 1) ? "sRGB" :
313 ((s.dgam_lut_mode == 2) ? "Ycc" :
314 ((s.dgam_lut_mode == 3) ? "RAM" :
315 ((s.dgam_lut_mode == 4) ? "RAM" :
316 "Unknown")))),
317 (s.rgam_lut_mode == 0) ? "Bypass" :
318 ((s.rgam_lut_mode == 1) ? "sRGB" :
319 ((s.rgam_lut_mode == 2) ? "Ycc" :
320 ((s.rgam_lut_mode == 3) ? "RAM" :
321 ((s.rgam_lut_mode == 4) ? "RAM" :
322 "Unknown")))),
323 s.gamut_remap_mode,
324 s.gamut_remap_c11_c12,
325 s.gamut_remap_c13_c14,
326 s.gamut_remap_c21_c22,
327 s.gamut_remap_c23_c24,
328 s.gamut_remap_c31_c32,
329 s.gamut_remap_c33_c34);
330 DTN_INFO("\n");
331 }
332 DTN_INFO("\n");
333
334 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
335 for (i = 0; i < pool->pipe_count; i++) {
336 struct mpcc_state s = {0};
337
338 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
339 if (s.opp_id != 0xf)
340 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
341 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
342 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
343 s.idle);
344 }
345 DTN_INFO("\n");
346
347 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
348
349 for (i = 0; i < pool->timing_generator_count; i++) {
350 struct timing_generator *tg = pool->timing_generators[i];
351 struct dcn_otg_state s = {0};
352 /* Read shared OTG state registers for all DCNx */
353 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
354
355 /*
356 * For DCN2 and greater, a register on the OPP is used to
357 * determine if the CRTC is blanked instead of the OTG. So use
358 * dpg_is_blanked() if exists, otherwise fallback on otg.
359 *
360 * TODO: Implement DCN-specific read_otg_state hooks.
361 */
362 if (pool->opps[i]->funcs->dpg_is_blanked)
363 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
364 else
365 s.blank_enabled = tg->funcs->is_blanked(tg);
366
367 //only print if OTG master is enabled
368 if ((s.otg_enabled & 1) == 0)
369 continue;
370
371 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
372 tg->inst,
373 s.v_blank_start,
374 s.v_blank_end,
375 s.v_sync_a_start,
376 s.v_sync_a_end,
377 s.v_sync_a_pol,
378 s.v_total_max,
379 s.v_total_min,
380 s.v_total_max_sel,
381 s.v_total_min_sel,
382 s.h_blank_start,
383 s.h_blank_end,
384 s.h_sync_a_start,
385 s.h_sync_a_end,
386 s.h_sync_a_pol,
387 s.h_total,
388 s.v_total,
389 s.underflow_occurred_status,
390 s.blank_enabled);
391
392 // Clear underflow for debug purposes
393 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
394 // This function is called only from Windows or Diags test environment, hence it's safe to clear
395 // it from here without affecting the original intent.
396 tg->funcs->clear_optc_underflow(tg);
397 }
398 DTN_INFO("\n");
399
400 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
401 // TODO: Update golden log header to reflect this name change
402 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
403 for (i = 0; i < pool->res_cap->num_dsc; i++) {
404 struct display_stream_compressor *dsc = pool->dscs[i];
405 struct dcn_dsc_state s = {0};
406
407 dsc->funcs->dsc_read_state(dsc, &s);
408 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
409 dsc->inst,
410 s.dsc_clock_en,
411 s.dsc_slice_width,
412 s.dsc_bits_per_pixel);
413 DTN_INFO("\n");
414 }
415 DTN_INFO("\n");
416
417 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
418 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
419 for (i = 0; i < pool->stream_enc_count; i++) {
420 struct stream_encoder *enc = pool->stream_enc[i];
421 struct enc_state s = {0};
422
423 if (enc->funcs->enc_read_state) {
424 enc->funcs->enc_read_state(enc, &s);
425 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
426 enc->id,
427 s.dsc_mode,
428 s.sec_gsp_pps_line_num,
429 s.vbid6_line_reference,
430 s.vbid6_line_num,
431 s.sec_gsp_pps_enable,
432 s.sec_stream_enable);
433 DTN_INFO("\n");
434 }
435 }
436 DTN_INFO("\n");
437
438 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
439 for (i = 0; i < dc->link_count; i++) {
440 struct link_encoder *lenc = dc->links[i]->link_enc;
441
442 struct link_enc_state s = {0};
443
444 if (lenc->funcs->read_state) {
445 lenc->funcs->read_state(lenc, &s);
446 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
447 i,
448 s.dphy_fec_en,
449 s.dphy_fec_ready_shadow,
450 s.dphy_fec_active_status,
451 s.dp_link_training_complete);
452 DTN_INFO("\n");
453 }
454 }
455 DTN_INFO("\n");
456
457 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
458 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
459 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
460 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
461 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
462 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
463 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
464 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
466
467 log_mpc_crc(dc, log_ctx);
468
469 DTN_INFO_END();
470 }
471
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)472 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
473 {
474 struct hubp *hubp = pipe_ctx->plane_res.hubp;
475 struct timing_generator *tg = pipe_ctx->stream_res.tg;
476
477 if (tg->funcs->is_optc_underflow_occurred(tg)) {
478 tg->funcs->clear_optc_underflow(tg);
479 return true;
480 }
481
482 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
483 hubp->funcs->hubp_clear_underflow(hubp);
484 return true;
485 }
486 return false;
487 }
488
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)489 void dcn10_enable_power_gating_plane(
490 struct dce_hwseq *hws,
491 bool enable)
492 {
493 bool force_on = true; /* disable power gating */
494
495 if (enable)
496 force_on = false;
497
498 /* DCHUBP0/1/2/3 */
499 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
500 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
501 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
502 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
503
504 /* DPP0/1/2/3 */
505 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
506 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
507 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
508 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
509 }
510
dcn10_disable_vga(struct dce_hwseq * hws)511 void dcn10_disable_vga(
512 struct dce_hwseq *hws)
513 {
514 unsigned int in_vga1_mode = 0;
515 unsigned int in_vga2_mode = 0;
516 unsigned int in_vga3_mode = 0;
517 unsigned int in_vga4_mode = 0;
518
519 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
520 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
521 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
522 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
523
524 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
525 in_vga3_mode == 0 && in_vga4_mode == 0)
526 return;
527
528 REG_WRITE(D1VGA_CONTROL, 0);
529 REG_WRITE(D2VGA_CONTROL, 0);
530 REG_WRITE(D3VGA_CONTROL, 0);
531 REG_WRITE(D4VGA_CONTROL, 0);
532
533 /* HW Engineer's Notes:
534 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
535 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
536 *
537 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
538 * VGA_TEST_ENABLE, to leave it in the same state as before.
539 */
540 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
541 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
542 }
543
544 /**
545 * dcn10_dpp_pg_control - DPP power gate control.
546 *
547 * @hws: dce_hwseq reference.
548 * @dpp_inst: DPP instance reference.
549 * @power_on: true if we want to enable power gate, false otherwise.
550 *
551 * Enable or disable power gate in the specific DPP instance.
552 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)553 void dcn10_dpp_pg_control(
554 struct dce_hwseq *hws,
555 unsigned int dpp_inst,
556 bool power_on)
557 {
558 uint32_t power_gate = power_on ? 0 : 1;
559 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
560
561 if (hws->ctx->dc->debug.disable_dpp_power_gate)
562 return;
563 if (REG(DOMAIN1_PG_CONFIG) == 0)
564 return;
565
566 switch (dpp_inst) {
567 case 0: /* DPP0 */
568 REG_UPDATE(DOMAIN1_PG_CONFIG,
569 DOMAIN1_POWER_GATE, power_gate);
570
571 REG_WAIT(DOMAIN1_PG_STATUS,
572 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
573 1, 1000);
574 break;
575 case 1: /* DPP1 */
576 REG_UPDATE(DOMAIN3_PG_CONFIG,
577 DOMAIN3_POWER_GATE, power_gate);
578
579 REG_WAIT(DOMAIN3_PG_STATUS,
580 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
581 1, 1000);
582 break;
583 case 2: /* DPP2 */
584 REG_UPDATE(DOMAIN5_PG_CONFIG,
585 DOMAIN5_POWER_GATE, power_gate);
586
587 REG_WAIT(DOMAIN5_PG_STATUS,
588 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
589 1, 1000);
590 break;
591 case 3: /* DPP3 */
592 REG_UPDATE(DOMAIN7_PG_CONFIG,
593 DOMAIN7_POWER_GATE, power_gate);
594
595 REG_WAIT(DOMAIN7_PG_STATUS,
596 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
597 1, 1000);
598 break;
599 default:
600 BREAK_TO_DEBUGGER();
601 break;
602 }
603 }
604
605 /**
606 * dcn10_hubp_pg_control - HUBP power gate control.
607 *
608 * @hws: dce_hwseq reference.
609 * @hubp_inst: DPP instance reference.
610 * @power_on: true if we want to enable power gate, false otherwise.
611 *
612 * Enable or disable power gate in the specific HUBP instance.
613 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)614 void dcn10_hubp_pg_control(
615 struct dce_hwseq *hws,
616 unsigned int hubp_inst,
617 bool power_on)
618 {
619 uint32_t power_gate = power_on ? 0 : 1;
620 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
621
622 if (hws->ctx->dc->debug.disable_hubp_power_gate)
623 return;
624 if (REG(DOMAIN0_PG_CONFIG) == 0)
625 return;
626
627 switch (hubp_inst) {
628 case 0: /* DCHUBP0 */
629 REG_UPDATE(DOMAIN0_PG_CONFIG,
630 DOMAIN0_POWER_GATE, power_gate);
631
632 REG_WAIT(DOMAIN0_PG_STATUS,
633 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
634 1, 1000);
635 break;
636 case 1: /* DCHUBP1 */
637 REG_UPDATE(DOMAIN2_PG_CONFIG,
638 DOMAIN2_POWER_GATE, power_gate);
639
640 REG_WAIT(DOMAIN2_PG_STATUS,
641 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
642 1, 1000);
643 break;
644 case 2: /* DCHUBP2 */
645 REG_UPDATE(DOMAIN4_PG_CONFIG,
646 DOMAIN4_POWER_GATE, power_gate);
647
648 REG_WAIT(DOMAIN4_PG_STATUS,
649 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
650 1, 1000);
651 break;
652 case 3: /* DCHUBP3 */
653 REG_UPDATE(DOMAIN6_PG_CONFIG,
654 DOMAIN6_POWER_GATE, power_gate);
655
656 REG_WAIT(DOMAIN6_PG_STATUS,
657 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
658 1, 1000);
659 break;
660 default:
661 BREAK_TO_DEBUGGER();
662 break;
663 }
664 }
665
power_on_plane(struct dce_hwseq * hws,int plane_id)666 static void power_on_plane(
667 struct dce_hwseq *hws,
668 int plane_id)
669 {
670 DC_LOGGER_INIT(hws->ctx->logger);
671 if (REG(DC_IP_REQUEST_CNTL)) {
672 REG_SET(DC_IP_REQUEST_CNTL, 0,
673 IP_REQUEST_EN, 1);
674
675 if (hws->funcs.dpp_pg_control)
676 hws->funcs.dpp_pg_control(hws, plane_id, true);
677
678 if (hws->funcs.hubp_pg_control)
679 hws->funcs.hubp_pg_control(hws, plane_id, true);
680
681 REG_SET(DC_IP_REQUEST_CNTL, 0,
682 IP_REQUEST_EN, 0);
683 DC_LOG_DEBUG(
684 "Un-gated front end for pipe %d\n", plane_id);
685 }
686 }
687
undo_DEGVIDCN10_253_wa(struct dc * dc)688 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
689 {
690 struct dce_hwseq *hws = dc->hwseq;
691 struct hubp *hubp = dc->res_pool->hubps[0];
692
693 if (!hws->wa_state.DEGVIDCN10_253_applied)
694 return;
695
696 hubp->funcs->set_blank(hubp, true);
697
698 REG_SET(DC_IP_REQUEST_CNTL, 0,
699 IP_REQUEST_EN, 1);
700
701 hws->funcs.hubp_pg_control(hws, 0, false);
702 REG_SET(DC_IP_REQUEST_CNTL, 0,
703 IP_REQUEST_EN, 0);
704
705 hws->wa_state.DEGVIDCN10_253_applied = false;
706 }
707
apply_DEGVIDCN10_253_wa(struct dc * dc)708 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
709 {
710 struct dce_hwseq *hws = dc->hwseq;
711 struct hubp *hubp = dc->res_pool->hubps[0];
712 int i;
713
714 if (dc->debug.disable_stutter)
715 return;
716
717 if (!hws->wa.DEGVIDCN10_253)
718 return;
719
720 for (i = 0; i < dc->res_pool->pipe_count; i++) {
721 if (!dc->res_pool->hubps[i]->power_gated)
722 return;
723 }
724
725 /* all pipe power gated, apply work around to enable stutter. */
726
727 REG_SET(DC_IP_REQUEST_CNTL, 0,
728 IP_REQUEST_EN, 1);
729
730 hws->funcs.hubp_pg_control(hws, 0, true);
731 REG_SET(DC_IP_REQUEST_CNTL, 0,
732 IP_REQUEST_EN, 0);
733
734 hubp->funcs->set_hubp_blank_en(hubp, false);
735 hws->wa_state.DEGVIDCN10_253_applied = true;
736 }
737
dcn10_bios_golden_init(struct dc * dc)738 void dcn10_bios_golden_init(struct dc *dc)
739 {
740 struct dce_hwseq *hws = dc->hwseq;
741 struct dc_bios *bp = dc->ctx->dc_bios;
742 int i;
743 bool allow_self_fresh_force_enable = true;
744
745 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
746 return;
747
748 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
749 allow_self_fresh_force_enable =
750 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
751
752
753 /* WA for making DF sleep when idle after resume from S0i3.
754 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
755 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
756 * before calling command table and it changed to 1 after,
757 * it should be set back to 0.
758 */
759
760 /* initialize dcn global */
761 bp->funcs->enable_disp_power_gating(bp,
762 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
763
764 for (i = 0; i < dc->res_pool->pipe_count; i++) {
765 /* initialize dcn per pipe */
766 bp->funcs->enable_disp_power_gating(bp,
767 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
768 }
769
770 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
771 if (allow_self_fresh_force_enable == false &&
772 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
773 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
774 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
775
776 }
777
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)778 static void false_optc_underflow_wa(
779 struct dc *dc,
780 const struct dc_stream_state *stream,
781 struct timing_generator *tg)
782 {
783 int i;
784 bool underflow;
785
786 if (!dc->hwseq->wa.false_optc_underflow)
787 return;
788
789 underflow = tg->funcs->is_optc_underflow_occurred(tg);
790
791 for (i = 0; i < dc->res_pool->pipe_count; i++) {
792 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
793
794 if (old_pipe_ctx->stream != stream)
795 continue;
796
797 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
798 }
799
800 if (tg->funcs->set_blank_data_double_buffer)
801 tg->funcs->set_blank_data_double_buffer(tg, true);
802
803 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
804 tg->funcs->clear_optc_underflow(tg);
805 }
806
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)807 enum dc_status dcn10_enable_stream_timing(
808 struct pipe_ctx *pipe_ctx,
809 struct dc_state *context,
810 struct dc *dc)
811 {
812 struct dc_stream_state *stream = pipe_ctx->stream;
813 enum dc_color_space color_space;
814 struct tg_color black_color = {0};
815
816 /* by upper caller loop, pipe0 is parent pipe and be called first.
817 * back end is set up by for pipe0. Other children pipe share back end
818 * with pipe 0. No program is needed.
819 */
820 if (pipe_ctx->top_pipe != NULL)
821 return DC_OK;
822
823 /* TODO check if timing_changed, disable stream if timing changed */
824
825 /* HW program guide assume display already disable
826 * by unplug sequence. OTG assume stop.
827 */
828 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
829
830 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
831 pipe_ctx->clock_source,
832 &pipe_ctx->stream_res.pix_clk_params,
833 &pipe_ctx->pll_settings)) {
834 BREAK_TO_DEBUGGER();
835 return DC_ERROR_UNEXPECTED;
836 }
837
838 pipe_ctx->stream_res.tg->funcs->program_timing(
839 pipe_ctx->stream_res.tg,
840 &stream->timing,
841 pipe_ctx->pipe_dlg_param.vready_offset,
842 pipe_ctx->pipe_dlg_param.vstartup_start,
843 pipe_ctx->pipe_dlg_param.vupdate_offset,
844 pipe_ctx->pipe_dlg_param.vupdate_width,
845 pipe_ctx->stream->signal,
846 true);
847
848 #if 0 /* move to after enable_crtc */
849 /* TODO: OPP FMT, ABM. etc. should be done here. */
850 /* or FPGA now. instance 0 only. TODO: move to opp.c */
851
852 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
853
854 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
855 pipe_ctx->stream_res.opp,
856 &stream->bit_depth_params,
857 &stream->clamping);
858 #endif
859 /* program otg blank color */
860 color_space = stream->output_color_space;
861 color_space_to_black_color(dc, color_space, &black_color);
862
863 /*
864 * The way 420 is packed, 2 channels carry Y component, 1 channel
865 * alternate between Cb and Cr, so both channels need the pixel
866 * value for Y
867 */
868 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
869 black_color.color_r_cr = black_color.color_g_y;
870
871 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
872 pipe_ctx->stream_res.tg->funcs->set_blank_color(
873 pipe_ctx->stream_res.tg,
874 &black_color);
875
876 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
877 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
878 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
879 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
880 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
881 }
882
883 /* VTG is within DCHUB command block. DCFCLK is always on */
884 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
885 BREAK_TO_DEBUGGER();
886 return DC_ERROR_UNEXPECTED;
887 }
888
889 /* TODO program crtc source select for non-virtual signal*/
890 /* TODO program FMT */
891 /* TODO setup link_enc */
892 /* TODO set stream attributes */
893 /* TODO program audio */
894 /* TODO enable stream if timing changed */
895 /* TODO unblank stream if DP */
896
897 return DC_OK;
898 }
899
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)900 static void dcn10_reset_back_end_for_pipe(
901 struct dc *dc,
902 struct pipe_ctx *pipe_ctx,
903 struct dc_state *context)
904 {
905 int i;
906 struct dc_link *link;
907 DC_LOGGER_INIT(dc->ctx->logger);
908 if (pipe_ctx->stream_res.stream_enc == NULL) {
909 pipe_ctx->stream = NULL;
910 return;
911 }
912
913 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
914 link = pipe_ctx->stream->link;
915 /* DPMS may already disable or */
916 /* dpms_off status is incorrect due to fastboot
917 * feature. When system resume from S4 with second
918 * screen only, the dpms_off would be true but
919 * VBIOS lit up eDP, so check link status too.
920 */
921 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
922 core_link_disable_stream(pipe_ctx);
923 else if (pipe_ctx->stream_res.audio)
924 dc->hwss.disable_audio_stream(pipe_ctx);
925
926 if (pipe_ctx->stream_res.audio) {
927 /*disable az_endpoint*/
928 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
929
930 /*free audio*/
931 if (dc->caps.dynamic_audio == true) {
932 /*we have to dynamic arbitrate the audio endpoints*/
933 /*we free the resource, need reset is_audio_acquired*/
934 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
935 pipe_ctx->stream_res.audio, false);
936 pipe_ctx->stream_res.audio = NULL;
937 }
938 }
939 }
940
941 /* by upper caller loop, parent pipe: pipe0, will be reset last.
942 * back end share by all pipes and will be disable only when disable
943 * parent pipe.
944 */
945 if (pipe_ctx->top_pipe == NULL) {
946
947 if (pipe_ctx->stream_res.abm)
948 dc->hwss.set_abm_immediate_disable(pipe_ctx);
949
950 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
951
952 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
953 if (pipe_ctx->stream_res.tg->funcs->set_drr)
954 pipe_ctx->stream_res.tg->funcs->set_drr(
955 pipe_ctx->stream_res.tg, NULL);
956 }
957
958 for (i = 0; i < dc->res_pool->pipe_count; i++)
959 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
960 break;
961
962 if (i == dc->res_pool->pipe_count)
963 return;
964
965 pipe_ctx->stream = NULL;
966 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
967 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
968 }
969
dcn10_hw_wa_force_recovery(struct dc * dc)970 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
971 {
972 struct hubp *hubp ;
973 unsigned int i;
974 bool need_recover = true;
975
976 if (!dc->debug.recovery_enabled)
977 return false;
978
979 for (i = 0; i < dc->res_pool->pipe_count; i++) {
980 struct pipe_ctx *pipe_ctx =
981 &dc->current_state->res_ctx.pipe_ctx[i];
982 if (pipe_ctx != NULL) {
983 hubp = pipe_ctx->plane_res.hubp;
984 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
985 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
986 /* one pipe underflow, we will reset all the pipes*/
987 need_recover = true;
988 }
989 }
990 }
991 }
992 if (!need_recover)
993 return false;
994 /*
995 DCHUBP_CNTL:HUBP_BLANK_EN=1
996 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
997 DCHUBP_CNTL:HUBP_DISABLE=1
998 DCHUBP_CNTL:HUBP_DISABLE=0
999 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1000 DCSURF_PRIMARY_SURFACE_ADDRESS
1001 DCHUBP_CNTL:HUBP_BLANK_EN=0
1002 */
1003
1004 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1005 struct pipe_ctx *pipe_ctx =
1006 &dc->current_state->res_ctx.pipe_ctx[i];
1007 if (pipe_ctx != NULL) {
1008 hubp = pipe_ctx->plane_res.hubp;
1009 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1010 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1011 hubp->funcs->set_hubp_blank_en(hubp, true);
1012 }
1013 }
1014 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1015 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1016
1017 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1018 struct pipe_ctx *pipe_ctx =
1019 &dc->current_state->res_ctx.pipe_ctx[i];
1020 if (pipe_ctx != NULL) {
1021 hubp = pipe_ctx->plane_res.hubp;
1022 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1023 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1024 hubp->funcs->hubp_disable_control(hubp, true);
1025 }
1026 }
1027 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1028 struct pipe_ctx *pipe_ctx =
1029 &dc->current_state->res_ctx.pipe_ctx[i];
1030 if (pipe_ctx != NULL) {
1031 hubp = pipe_ctx->plane_res.hubp;
1032 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1033 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1034 hubp->funcs->hubp_disable_control(hubp, true);
1035 }
1036 }
1037 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1038 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1039 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1040 struct pipe_ctx *pipe_ctx =
1041 &dc->current_state->res_ctx.pipe_ctx[i];
1042 if (pipe_ctx != NULL) {
1043 hubp = pipe_ctx->plane_res.hubp;
1044 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1045 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1046 hubp->funcs->set_hubp_blank_en(hubp, true);
1047 }
1048 }
1049 return true;
1050
1051 }
1052
dcn10_verify_allow_pstate_change_high(struct dc * dc)1053 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1054 {
1055 static bool should_log_hw_state; /* prevent hw state log by default */
1056
1057 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1058 int i = 0;
1059
1060 if (should_log_hw_state)
1061 dcn10_log_hw_state(dc, NULL);
1062
1063 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1064 BREAK_TO_DEBUGGER();
1065 if (dcn10_hw_wa_force_recovery(dc)) {
1066 /*check again*/
1067 if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1068 BREAK_TO_DEBUGGER();
1069 }
1070 }
1071 }
1072
1073 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1074 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1075 {
1076 struct dce_hwseq *hws = dc->hwseq;
1077 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1078 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1079 struct mpc *mpc = dc->res_pool->mpc;
1080 struct mpc_tree *mpc_tree_params;
1081 struct mpcc *mpcc_to_remove = NULL;
1082 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1083
1084 mpc_tree_params = &(opp->mpc_tree_params);
1085 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1086
1087 /*Already reset*/
1088 if (mpcc_to_remove == NULL)
1089 return;
1090
1091 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1092 if (opp != NULL)
1093 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1094
1095 dc->optimized_required = true;
1096
1097 if (hubp->funcs->hubp_disconnect)
1098 hubp->funcs->hubp_disconnect(hubp);
1099
1100 if (dc->debug.sanity_checks)
1101 hws->funcs.verify_allow_pstate_change_high(dc);
1102 }
1103
1104 /**
1105 * dcn10_plane_atomic_power_down - Power down plane components.
1106 *
1107 * @dc: dc struct reference. used for grab hwseq.
1108 * @dpp: dpp struct reference.
1109 * @hubp: hubp struct reference.
1110 *
1111 * Keep in mind that this operation requires a power gate configuration;
1112 * however, requests for switch power gate are precisely controlled to avoid
1113 * problems. For this reason, power gate request is usually disabled. This
1114 * function first needs to enable the power gate request before disabling DPP
1115 * and HUBP. Finally, it disables the power gate request again.
1116 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1117 void dcn10_plane_atomic_power_down(struct dc *dc,
1118 struct dpp *dpp,
1119 struct hubp *hubp)
1120 {
1121 struct dce_hwseq *hws = dc->hwseq;
1122 DC_LOGGER_INIT(dc->ctx->logger);
1123
1124 if (REG(DC_IP_REQUEST_CNTL)) {
1125 REG_SET(DC_IP_REQUEST_CNTL, 0,
1126 IP_REQUEST_EN, 1);
1127
1128 if (hws->funcs.dpp_pg_control)
1129 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1130
1131 if (hws->funcs.hubp_pg_control)
1132 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1133
1134 dpp->funcs->dpp_reset(dpp);
1135 REG_SET(DC_IP_REQUEST_CNTL, 0,
1136 IP_REQUEST_EN, 0);
1137 DC_LOG_DEBUG(
1138 "Power gated front end %d\n", hubp->inst);
1139 }
1140 }
1141
1142 /* disable HW used by plane.
1143 * note: cannot disable until disconnect is complete
1144 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1145 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1146 {
1147 struct dce_hwseq *hws = dc->hwseq;
1148 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1149 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1150 int opp_id = hubp->opp_id;
1151
1152 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1153
1154 hubp->funcs->hubp_clk_cntl(hubp, false);
1155
1156 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1157
1158 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1159 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1160 pipe_ctx->stream_res.opp,
1161 false);
1162
1163 hubp->power_gated = true;
1164 dc->optimized_required = false; /* We're powering off, no need to optimize */
1165
1166 hws->funcs.plane_atomic_power_down(dc,
1167 pipe_ctx->plane_res.dpp,
1168 pipe_ctx->plane_res.hubp);
1169
1170 pipe_ctx->stream = NULL;
1171 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1172 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1173 pipe_ctx->top_pipe = NULL;
1174 pipe_ctx->bottom_pipe = NULL;
1175 pipe_ctx->plane_state = NULL;
1176 }
1177
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1178 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1179 {
1180 struct dce_hwseq *hws = dc->hwseq;
1181 DC_LOGGER_INIT(dc->ctx->logger);
1182
1183 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1184 return;
1185
1186 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1187
1188 apply_DEGVIDCN10_253_wa(dc);
1189
1190 DC_LOG_DC("Power down front end %d\n",
1191 pipe_ctx->pipe_idx);
1192 }
1193
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1194 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1195 {
1196 int i;
1197 struct dce_hwseq *hws = dc->hwseq;
1198 bool can_apply_seamless_boot = false;
1199
1200 for (i = 0; i < context->stream_count; i++) {
1201 if (context->streams[i]->apply_seamless_boot_optimization) {
1202 can_apply_seamless_boot = true;
1203 break;
1204 }
1205 }
1206
1207 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1208 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1209 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1210
1211 /* There is assumption that pipe_ctx is not mapping irregularly
1212 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1213 * we will use the pipe, so don't disable
1214 */
1215 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1216 continue;
1217
1218 /* Blank controller using driver code instead of
1219 * command table.
1220 */
1221 if (tg->funcs->is_tg_enabled(tg)) {
1222 if (hws->funcs.init_blank != NULL) {
1223 hws->funcs.init_blank(dc, tg);
1224 tg->funcs->lock(tg);
1225 } else {
1226 tg->funcs->lock(tg);
1227 tg->funcs->set_blank(tg, true);
1228 hwss_wait_for_blank_complete(tg);
1229 }
1230 }
1231 }
1232
1233 /* num_opp will be equal to number of mpcc */
1234 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1235 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1236
1237 /* Cannot reset the MPC mux if seamless boot */
1238 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1239 continue;
1240
1241 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1242 dc->res_pool->mpc, i);
1243 }
1244
1245 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1246 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1247 struct hubp *hubp = dc->res_pool->hubps[i];
1248 struct dpp *dpp = dc->res_pool->dpps[i];
1249 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1250
1251 /* There is assumption that pipe_ctx is not mapping irregularly
1252 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1253 * we will use the pipe, so don't disable
1254 */
1255 if (can_apply_seamless_boot &&
1256 pipe_ctx->stream != NULL &&
1257 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1258 pipe_ctx->stream_res.tg)) {
1259 // Enable double buffering for OTG_BLANK no matter if
1260 // seamless boot is enabled or not to suppress global sync
1261 // signals when OTG blanked. This is to prevent pipe from
1262 // requesting data while in PSR.
1263 tg->funcs->tg_init(tg);
1264 hubp->power_gated = true;
1265 continue;
1266 }
1267
1268 /* Disable on the current state so the new one isn't cleared. */
1269 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1270
1271 dpp->funcs->dpp_reset(dpp);
1272
1273 pipe_ctx->stream_res.tg = tg;
1274 pipe_ctx->pipe_idx = i;
1275
1276 pipe_ctx->plane_res.hubp = hubp;
1277 pipe_ctx->plane_res.dpp = dpp;
1278 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1279 hubp->mpcc_id = dpp->inst;
1280 hubp->opp_id = OPP_ID_INVALID;
1281 hubp->power_gated = false;
1282
1283 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1284 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1285 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1286 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1287
1288 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1289
1290 if (tg->funcs->is_tg_enabled(tg))
1291 tg->funcs->unlock(tg);
1292
1293 dc->hwss.disable_plane(dc, pipe_ctx);
1294
1295 pipe_ctx->stream_res.tg = NULL;
1296 pipe_ctx->plane_res.hubp = NULL;
1297
1298 tg->funcs->tg_init(tg);
1299 }
1300 }
1301
dcn10_init_hw(struct dc * dc)1302 void dcn10_init_hw(struct dc *dc)
1303 {
1304 int i, j;
1305 struct abm *abm = dc->res_pool->abm;
1306 struct dmcu *dmcu = dc->res_pool->dmcu;
1307 struct dce_hwseq *hws = dc->hwseq;
1308 struct dc_bios *dcb = dc->ctx->dc_bios;
1309 struct resource_pool *res_pool = dc->res_pool;
1310 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1311 bool is_optimized_init_done = false;
1312
1313 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1314 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1315
1316 // Initialize the dccg
1317 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1318 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1319
1320 if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1321
1322 REG_WRITE(REFCLK_CNTL, 0);
1323 REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1324 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1325
1326 if (!dc->debug.disable_clock_gate) {
1327 /* enable all DCN clock gating */
1328 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1329
1330 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1331
1332 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1333 }
1334
1335 //Enable ability to power gate / don't force power on permanently
1336 if (hws->funcs.enable_power_gating_plane)
1337 hws->funcs.enable_power_gating_plane(hws, true);
1338
1339 return;
1340 }
1341
1342 if (!dcb->funcs->is_accelerated_mode(dcb))
1343 hws->funcs.disable_vga(dc->hwseq);
1344
1345 hws->funcs.bios_golden_init(dc);
1346
1347 if (dc->ctx->dc_bios->fw_info_valid) {
1348 res_pool->ref_clocks.xtalin_clock_inKhz =
1349 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1350
1351 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1352 if (res_pool->dccg && res_pool->hubbub) {
1353
1354 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1355 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1356 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1357
1358 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1359 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1360 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1361 } else {
1362 // Not all ASICs have DCCG sw component
1363 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1364 res_pool->ref_clocks.xtalin_clock_inKhz;
1365 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1366 res_pool->ref_clocks.xtalin_clock_inKhz;
1367 }
1368 }
1369 } else
1370 ASSERT_CRITICAL(false);
1371
1372 for (i = 0; i < dc->link_count; i++) {
1373 /* Power up AND update implementation according to the
1374 * required signal (which may be different from the
1375 * default signal on connector).
1376 */
1377 struct dc_link *link = dc->links[i];
1378
1379 if (!is_optimized_init_done)
1380 link->link_enc->funcs->hw_init(link->link_enc);
1381
1382 /* Check for enabled DIG to identify enabled display */
1383 if (link->link_enc->funcs->is_dig_enabled &&
1384 link->link_enc->funcs->is_dig_enabled(link->link_enc))
1385 link->link_status.link_active = true;
1386 }
1387
1388 /* Power gate DSCs */
1389 if (!is_optimized_init_done) {
1390 for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1391 if (hws->funcs.dsc_pg_control != NULL)
1392 hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1393 }
1394
1395 /* Enable outbox notification feature of dmub */
1396 if (dc->debug.enable_dmub_aux_for_legacy_ddc)
1397 dmub_enable_outbox_notification(dc);
1398
1399 /* we want to turn off all dp displays before doing detection */
1400 if (dc->config.power_down_display_on_boot) {
1401 uint8_t dpcd_power_state = '\0';
1402 enum dc_status status = DC_ERROR_UNEXPECTED;
1403
1404 for (i = 0; i < dc->link_count; i++) {
1405 if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1406 continue;
1407
1408 /* DP 2.0 requires that LTTPR Caps be read first */
1409 dp_retrieve_lttpr_cap(dc->links[i]);
1410
1411 /*
1412 * If any of the displays are lit up turn them off.
1413 * The reason is that some MST hubs cannot be turned off
1414 * completely until we tell them to do so.
1415 * If not turned off, then displays connected to MST hub
1416 * won't light up.
1417 */
1418 status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1419 &dpcd_power_state, sizeof(dpcd_power_state));
1420 if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1421 /* blank dp stream before power off receiver*/
1422 if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1423 unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1424
1425 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1426 if (fe == dc->res_pool->stream_enc[j]->id) {
1427 dc->res_pool->stream_enc[j]->funcs->dp_blank(
1428 dc->res_pool->stream_enc[j]);
1429 break;
1430 }
1431 }
1432 }
1433 dp_receiver_power_ctrl(dc->links[i], false);
1434 }
1435 }
1436 }
1437
1438 /* If taking control over from VBIOS, we may want to optimize our first
1439 * mode set, so we need to skip powering down pipes until we know which
1440 * pipes we want to use.
1441 * Otherwise, if taking control is not possible, we need to power
1442 * everything down.
1443 */
1444 if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1445 if (!is_optimized_init_done) {
1446 hws->funcs.init_pipes(dc, dc->current_state);
1447 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1448 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1449 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1450 }
1451 }
1452
1453 if (!is_optimized_init_done) {
1454
1455 for (i = 0; i < res_pool->audio_count; i++) {
1456 struct audio *audio = res_pool->audios[i];
1457
1458 audio->funcs->hw_init(audio);
1459 }
1460
1461 for (i = 0; i < dc->link_count; i++) {
1462 struct dc_link *link = dc->links[i];
1463
1464 if (link->panel_cntl)
1465 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1466 }
1467
1468 if (abm != NULL)
1469 abm->funcs->abm_init(abm, backlight);
1470
1471 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1472 dmcu->funcs->dmcu_init(dmcu);
1473 }
1474
1475 if (abm != NULL && dmcu != NULL)
1476 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1477
1478 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1479 if (!is_optimized_init_done)
1480 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1481
1482 if (!dc->debug.disable_clock_gate) {
1483 /* enable all DCN clock gating */
1484 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1485
1486 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1487
1488 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1489 }
1490 if (hws->funcs.enable_power_gating_plane)
1491 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1492
1493 if (dc->clk_mgr->funcs->notify_wm_ranges)
1494 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1495 }
1496
1497 /* In headless boot cases, DIG may be turned
1498 * on which causes HW/SW discrepancies.
1499 * To avoid this, power down hardware on boot
1500 * if DIG is turned on
1501 */
dcn10_power_down_on_boot(struct dc * dc)1502 void dcn10_power_down_on_boot(struct dc *dc)
1503 {
1504 struct dc_link *edp_links[MAX_NUM_EDP];
1505 struct dc_link *edp_link = NULL;
1506 int edp_num;
1507 int i = 0;
1508
1509 get_edp_links(dc, edp_links, &edp_num);
1510 if (edp_num)
1511 edp_link = edp_links[0];
1512
1513 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1514 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1515 dc->hwseq->funcs.edp_backlight_control &&
1516 dc->hwss.power_down &&
1517 dc->hwss.edp_power_control) {
1518 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1519 dc->hwss.power_down(dc);
1520 dc->hwss.edp_power_control(edp_link, false);
1521 } else {
1522 for (i = 0; i < dc->link_count; i++) {
1523 struct dc_link *link = dc->links[i];
1524
1525 if (link->link_enc->funcs->is_dig_enabled &&
1526 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1527 dc->hwss.power_down) {
1528 dc->hwss.power_down(dc);
1529 break;
1530 }
1531
1532 }
1533 }
1534
1535 /*
1536 * Call update_clocks with empty context
1537 * to send DISPLAY_OFF
1538 * Otherwise DISPLAY_OFF may not be asserted
1539 */
1540 if (dc->clk_mgr->funcs->set_low_power_state)
1541 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1542 }
1543
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1544 void dcn10_reset_hw_ctx_wrap(
1545 struct dc *dc,
1546 struct dc_state *context)
1547 {
1548 int i;
1549 struct dce_hwseq *hws = dc->hwseq;
1550
1551 /* Reset Back End*/
1552 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1553 struct pipe_ctx *pipe_ctx_old =
1554 &dc->current_state->res_ctx.pipe_ctx[i];
1555 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1556
1557 if (!pipe_ctx_old->stream)
1558 continue;
1559
1560 if (pipe_ctx_old->top_pipe)
1561 continue;
1562
1563 if (!pipe_ctx->stream ||
1564 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1565 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1566
1567 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1568 if (hws->funcs.enable_stream_gating)
1569 hws->funcs.enable_stream_gating(dc, pipe_ctx);
1570 if (old_clk)
1571 old_clk->funcs->cs_power_down(old_clk);
1572 }
1573 }
1574 }
1575
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1576 static bool patch_address_for_sbs_tb_stereo(
1577 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1578 {
1579 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1580 bool sec_split = pipe_ctx->top_pipe &&
1581 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1582 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1583 (pipe_ctx->stream->timing.timing_3d_format ==
1584 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1585 pipe_ctx->stream->timing.timing_3d_format ==
1586 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1587 *addr = plane_state->address.grph_stereo.left_addr;
1588 plane_state->address.grph_stereo.left_addr =
1589 plane_state->address.grph_stereo.right_addr;
1590 return true;
1591 } else {
1592 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1593 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1594 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1595 plane_state->address.grph_stereo.right_addr =
1596 plane_state->address.grph_stereo.left_addr;
1597 plane_state->address.grph_stereo.right_meta_addr =
1598 plane_state->address.grph_stereo.left_meta_addr;
1599 }
1600 }
1601 return false;
1602 }
1603
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1604 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1605 {
1606 bool addr_patched = false;
1607 PHYSICAL_ADDRESS_LOC addr;
1608 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1609
1610 if (plane_state == NULL)
1611 return;
1612
1613 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1614
1615 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1616 pipe_ctx->plane_res.hubp,
1617 &plane_state->address,
1618 plane_state->flip_immediate);
1619
1620 plane_state->status.requested_address = plane_state->address;
1621
1622 if (plane_state->flip_immediate)
1623 plane_state->status.current_address = plane_state->address;
1624
1625 if (addr_patched)
1626 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1627 }
1628
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1629 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1630 const struct dc_plane_state *plane_state)
1631 {
1632 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1633 const struct dc_transfer_func *tf = NULL;
1634 bool result = true;
1635
1636 if (dpp_base == NULL)
1637 return false;
1638
1639 if (plane_state->in_transfer_func)
1640 tf = plane_state->in_transfer_func;
1641
1642 if (plane_state->gamma_correction &&
1643 !dpp_base->ctx->dc->debug.always_use_regamma
1644 && !plane_state->gamma_correction->is_identity
1645 && dce_use_lut(plane_state->format))
1646 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1647
1648 if (tf == NULL)
1649 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1650 else if (tf->type == TF_TYPE_PREDEFINED) {
1651 switch (tf->tf) {
1652 case TRANSFER_FUNCTION_SRGB:
1653 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1654 break;
1655 case TRANSFER_FUNCTION_BT709:
1656 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1657 break;
1658 case TRANSFER_FUNCTION_LINEAR:
1659 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1660 break;
1661 case TRANSFER_FUNCTION_PQ:
1662 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1663 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1664 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1665 result = true;
1666 break;
1667 default:
1668 result = false;
1669 break;
1670 }
1671 } else if (tf->type == TF_TYPE_BYPASS) {
1672 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1673 } else {
1674 cm_helper_translate_curve_to_degamma_hw_format(tf,
1675 &dpp_base->degamma_params);
1676 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1677 &dpp_base->degamma_params);
1678 result = true;
1679 }
1680
1681 return result;
1682 }
1683
1684 #define MAX_NUM_HW_POINTS 0x200
1685
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1686 static void log_tf(struct dc_context *ctx,
1687 struct dc_transfer_func *tf, uint32_t hw_points_num)
1688 {
1689 // DC_LOG_GAMMA is default logging of all hw points
1690 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1691 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1692 int i = 0;
1693
1694 DC_LOGGER_INIT(ctx->logger);
1695 DC_LOG_GAMMA("Gamma Correction TF");
1696 DC_LOG_ALL_GAMMA("Logging all tf points...");
1697 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1698
1699 for (i = 0; i < hw_points_num; i++) {
1700 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1701 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1702 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1703 }
1704
1705 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1706 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1707 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1708 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1709 }
1710 }
1711
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1712 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1713 const struct dc_stream_state *stream)
1714 {
1715 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1716
1717 if (dpp == NULL)
1718 return false;
1719
1720 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1721
1722 if (stream->out_transfer_func &&
1723 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1724 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1725 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1726
1727 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1728 * update.
1729 */
1730 else if (cm_helper_translate_curve_to_hw_format(
1731 stream->out_transfer_func,
1732 &dpp->regamma_params, false)) {
1733 dpp->funcs->dpp_program_regamma_pwl(
1734 dpp,
1735 &dpp->regamma_params, OPP_REGAMMA_USER);
1736 } else
1737 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1738
1739 if (stream != NULL && stream->ctx != NULL &&
1740 stream->out_transfer_func != NULL) {
1741 log_tf(stream->ctx,
1742 stream->out_transfer_func,
1743 dpp->regamma_params.hw_points_num);
1744 }
1745
1746 return true;
1747 }
1748
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1749 void dcn10_pipe_control_lock(
1750 struct dc *dc,
1751 struct pipe_ctx *pipe,
1752 bool lock)
1753 {
1754 struct dce_hwseq *hws = dc->hwseq;
1755
1756 /* use TG master update lock to lock everything on the TG
1757 * therefore only top pipe need to lock
1758 */
1759 if (!pipe || pipe->top_pipe)
1760 return;
1761
1762 if (dc->debug.sanity_checks)
1763 hws->funcs.verify_allow_pstate_change_high(dc);
1764
1765 if (lock)
1766 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1767 else
1768 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1769
1770 if (dc->debug.sanity_checks)
1771 hws->funcs.verify_allow_pstate_change_high(dc);
1772 }
1773
1774 /**
1775 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1776 *
1777 * Software keepout workaround to prevent cursor update locking from stalling
1778 * out cursor updates indefinitely or from old values from being retained in
1779 * the case where the viewport changes in the same frame as the cursor.
1780 *
1781 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1782 * too close to VUPDATE, then stall out until VUPDATE finishes.
1783 *
1784 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1785 * to avoid the need for this workaround.
1786 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1787 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1788 {
1789 struct dc_stream_state *stream = pipe_ctx->stream;
1790 struct crtc_position position;
1791 uint32_t vupdate_start, vupdate_end;
1792 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1793 unsigned int us_per_line, us_vupdate;
1794
1795 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1796 return;
1797
1798 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1799 return;
1800
1801 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1802 &vupdate_end);
1803
1804 dc->hwss.get_position(&pipe_ctx, 1, &position);
1805 vpos = position.vertical_count;
1806
1807 /* Avoid wraparound calculation issues */
1808 vupdate_start += stream->timing.v_total;
1809 vupdate_end += stream->timing.v_total;
1810 vpos += stream->timing.v_total;
1811
1812 if (vpos <= vupdate_start) {
1813 /* VPOS is in VACTIVE or back porch. */
1814 lines_to_vupdate = vupdate_start - vpos;
1815 } else if (vpos > vupdate_end) {
1816 /* VPOS is in the front porch. */
1817 return;
1818 } else {
1819 /* VPOS is in VUPDATE. */
1820 lines_to_vupdate = 0;
1821 }
1822
1823 /* Calculate time until VUPDATE in microseconds. */
1824 us_per_line =
1825 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1826 us_to_vupdate = lines_to_vupdate * us_per_line;
1827
1828 /* 70 us is a conservative estimate of cursor update time*/
1829 if (us_to_vupdate > 70)
1830 return;
1831
1832 /* Stall out until the cursor update completes. */
1833 if (vupdate_end < vupdate_start)
1834 vupdate_end += stream->timing.v_total;
1835 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1836 udelay(us_to_vupdate + us_vupdate);
1837 }
1838
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1839 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1840 {
1841 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1842 if (!pipe || pipe->top_pipe)
1843 return;
1844
1845 /* Prevent cursor lock from stalling out cursor updates. */
1846 if (lock)
1847 delay_cursor_until_vupdate(dc, pipe);
1848
1849 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1850 union dmub_hw_lock_flags hw_locks = { 0 };
1851 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1852
1853 hw_locks.bits.lock_cursor = 1;
1854 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1855
1856 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1857 lock,
1858 &hw_locks,
1859 &inst_flags);
1860 } else
1861 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1862 pipe->stream_res.opp->inst, lock);
1863 }
1864
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1865 static bool wait_for_reset_trigger_to_occur(
1866 struct dc_context *dc_ctx,
1867 struct timing_generator *tg)
1868 {
1869 bool rc = false;
1870
1871 /* To avoid endless loop we wait at most
1872 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1873 const uint32_t frames_to_wait_on_triggered_reset = 10;
1874 int i;
1875
1876 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1877
1878 if (!tg->funcs->is_counter_moving(tg)) {
1879 DC_ERROR("TG counter is not moving!\n");
1880 break;
1881 }
1882
1883 if (tg->funcs->did_triggered_reset_occur(tg)) {
1884 rc = true;
1885 /* usually occurs at i=1 */
1886 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1887 i);
1888 break;
1889 }
1890
1891 /* Wait for one frame. */
1892 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1893 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1894 }
1895
1896 if (false == rc)
1897 DC_ERROR("GSL: Timeout on reset trigger!\n");
1898
1899 return rc;
1900 }
1901
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)1902 uint64_t reduceSizeAndFraction(
1903 uint64_t *numerator,
1904 uint64_t *denominator,
1905 bool checkUint32Bounary)
1906 {
1907 int i;
1908 bool ret = checkUint32Bounary == false;
1909 uint64_t max_int32 = 0xffffffff;
1910 uint64_t num, denom;
1911 static const uint16_t prime_numbers[] = {
1912 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
1913 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
1914 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
1915 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
1916 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
1917 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
1918 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
1919 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
1920 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
1921 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
1922 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
1923 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
1924 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
1925 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
1926 941, 947, 953, 967, 971, 977, 983, 991, 997};
1927 int count = ARRAY_SIZE(prime_numbers);
1928
1929 num = *numerator;
1930 denom = *denominator;
1931 for (i = 0; i < count; i++) {
1932 uint32_t num_remainder, denom_remainder;
1933 uint64_t num_result, denom_result;
1934 if (checkUint32Bounary &&
1935 num <= max_int32 && denom <= max_int32) {
1936 ret = true;
1937 break;
1938 }
1939 do {
1940 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
1941 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
1942 if (num_remainder == 0 && denom_remainder == 0) {
1943 num = num_result;
1944 denom = denom_result;
1945 }
1946 } while (num_remainder == 0 && denom_remainder == 0);
1947 }
1948 *numerator = num;
1949 *denominator = denom;
1950 return ret;
1951 }
1952
is_low_refresh_rate(struct pipe_ctx * pipe)1953 bool is_low_refresh_rate(struct pipe_ctx *pipe)
1954 {
1955 uint32_t master_pipe_refresh_rate =
1956 pipe->stream->timing.pix_clk_100hz * 100 /
1957 pipe->stream->timing.h_total /
1958 pipe->stream->timing.v_total;
1959 return master_pipe_refresh_rate <= 30;
1960 }
1961
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)1962 uint8_t get_clock_divider(struct pipe_ctx *pipe, bool account_low_refresh_rate)
1963 {
1964 uint32_t clock_divider = 1;
1965 uint32_t numpipes = 1;
1966
1967 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
1968 clock_divider *= 2;
1969
1970 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
1971 clock_divider *= 2;
1972
1973 while (pipe->next_odm_pipe) {
1974 pipe = pipe->next_odm_pipe;
1975 numpipes++;
1976 }
1977 clock_divider *= numpipes;
1978
1979 return clock_divider;
1980 }
1981
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])1982 int dcn10_align_pixel_clocks(
1983 struct dc *dc,
1984 int group_size,
1985 struct pipe_ctx *grouped_pipes[])
1986 {
1987 struct dc_context *dc_ctx = dc->ctx;
1988 int i, master = -1, embedded = -1;
1989 struct dc_crtc_timing hw_crtc_timing[MAX_PIPES] = {0};
1990 uint64_t phase[MAX_PIPES];
1991 uint64_t modulo[MAX_PIPES];
1992 unsigned int pclk;
1993
1994 uint32_t embedded_pix_clk_100hz;
1995 uint16_t embedded_h_total;
1996 uint16_t embedded_v_total;
1997 bool clamshell_closed = false;
1998 uint32_t dp_ref_clk_100hz =
1999 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2000
2001 if (dc->config.vblank_alignment_dto_params &&
2002 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2003 clamshell_closed =
2004 (dc->config.vblank_alignment_dto_params >> 63);
2005 embedded_h_total =
2006 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2007 embedded_v_total =
2008 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2009 embedded_pix_clk_100hz =
2010 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2011
2012 for (i = 0; i < group_size; i++) {
2013 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2014 grouped_pipes[i]->stream_res.tg,
2015 &hw_crtc_timing[i]);
2016 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2017 dc->res_pool->dp_clock_source,
2018 grouped_pipes[i]->stream_res.tg->inst,
2019 &pclk);
2020 hw_crtc_timing[i].pix_clk_100hz = pclk;
2021 if (dc_is_embedded_signal(
2022 grouped_pipes[i]->stream->signal)) {
2023 embedded = i;
2024 master = i;
2025 phase[i] = embedded_pix_clk_100hz*100;
2026 modulo[i] = dp_ref_clk_100hz*100;
2027 } else {
2028
2029 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2030 hw_crtc_timing[i].h_total*
2031 hw_crtc_timing[i].v_total;
2032 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2033 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2034 embedded_h_total*
2035 embedded_v_total;
2036
2037 if (reduceSizeAndFraction(&phase[i],
2038 &modulo[i], true) == false) {
2039 /*
2040 * this will help to stop reporting
2041 * this timing synchronizable
2042 */
2043 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2044 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2045 }
2046 }
2047 }
2048
2049 for (i = 0; i < group_size; i++) {
2050 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2051 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2052 dc->res_pool->dp_clock_source,
2053 grouped_pipes[i]->stream_res.tg->inst,
2054 phase[i], modulo[i]);
2055 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2056 dc->res_pool->dp_clock_source,
2057 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2058 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2059 pclk*get_clock_divider(grouped_pipes[i], false);
2060 if (master == -1)
2061 master = i;
2062 }
2063 }
2064
2065 }
2066 return master;
2067 }
2068
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2069 void dcn10_enable_vblanks_synchronization(
2070 struct dc *dc,
2071 int group_index,
2072 int group_size,
2073 struct pipe_ctx *grouped_pipes[])
2074 {
2075 struct dc_context *dc_ctx = dc->ctx;
2076 struct output_pixel_processor *opp;
2077 struct timing_generator *tg;
2078 int i, width, height, master;
2079
2080 for (i = 1; i < group_size; i++) {
2081 opp = grouped_pipes[i]->stream_res.opp;
2082 tg = grouped_pipes[i]->stream_res.tg;
2083 tg->funcs->get_otg_active_size(tg, &width, &height);
2084 if (opp->funcs->opp_program_dpg_dimensions)
2085 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2086 }
2087
2088 for (i = 0; i < group_size; i++) {
2089 if (grouped_pipes[i]->stream == NULL)
2090 continue;
2091 grouped_pipes[i]->stream->vblank_synchronized = false;
2092 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2093 }
2094
2095 DC_SYNC_INFO("Aligning DP DTOs\n");
2096
2097 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2098
2099 DC_SYNC_INFO("Synchronizing VBlanks\n");
2100
2101 if (master >= 0) {
2102 for (i = 0; i < group_size; i++) {
2103 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2104 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2105 grouped_pipes[master]->stream_res.tg,
2106 grouped_pipes[i]->stream_res.tg,
2107 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2108 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2109 get_clock_divider(grouped_pipes[master], false),
2110 get_clock_divider(grouped_pipes[i], false));
2111 grouped_pipes[i]->stream->vblank_synchronized = true;
2112 }
2113 grouped_pipes[master]->stream->vblank_synchronized = true;
2114 DC_SYNC_INFO("Sync complete\n");
2115 }
2116
2117 for (i = 1; i < group_size; i++) {
2118 opp = grouped_pipes[i]->stream_res.opp;
2119 tg = grouped_pipes[i]->stream_res.tg;
2120 tg->funcs->get_otg_active_size(tg, &width, &height);
2121 if (opp->funcs->opp_program_dpg_dimensions)
2122 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2123 }
2124 }
2125
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2126 void dcn10_enable_timing_synchronization(
2127 struct dc *dc,
2128 int group_index,
2129 int group_size,
2130 struct pipe_ctx *grouped_pipes[])
2131 {
2132 struct dc_context *dc_ctx = dc->ctx;
2133 struct output_pixel_processor *opp;
2134 struct timing_generator *tg;
2135 int i, width, height;
2136
2137 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2138
2139 for (i = 1; i < group_size; i++) {
2140 opp = grouped_pipes[i]->stream_res.opp;
2141 tg = grouped_pipes[i]->stream_res.tg;
2142 tg->funcs->get_otg_active_size(tg, &width, &height);
2143 if (opp->funcs->opp_program_dpg_dimensions)
2144 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2145 }
2146
2147 for (i = 0; i < group_size; i++) {
2148 if (grouped_pipes[i]->stream == NULL)
2149 continue;
2150 grouped_pipes[i]->stream->vblank_synchronized = false;
2151 }
2152
2153 for (i = 1; i < group_size; i++)
2154 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2155 grouped_pipes[i]->stream_res.tg,
2156 grouped_pipes[0]->stream_res.tg->inst);
2157
2158 DC_SYNC_INFO("Waiting for trigger\n");
2159
2160 /* Need to get only check 1 pipe for having reset as all the others are
2161 * synchronized. Look at last pipe programmed to reset.
2162 */
2163
2164 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2165 for (i = 1; i < group_size; i++)
2166 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2167 grouped_pipes[i]->stream_res.tg);
2168
2169 for (i = 1; i < group_size; i++) {
2170 opp = grouped_pipes[i]->stream_res.opp;
2171 tg = grouped_pipes[i]->stream_res.tg;
2172 tg->funcs->get_otg_active_size(tg, &width, &height);
2173 if (opp->funcs->opp_program_dpg_dimensions)
2174 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2175 }
2176
2177 DC_SYNC_INFO("Sync complete\n");
2178 }
2179
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2180 void dcn10_enable_per_frame_crtc_position_reset(
2181 struct dc *dc,
2182 int group_size,
2183 struct pipe_ctx *grouped_pipes[])
2184 {
2185 struct dc_context *dc_ctx = dc->ctx;
2186 int i;
2187
2188 DC_SYNC_INFO("Setting up\n");
2189 for (i = 0; i < group_size; i++)
2190 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2191 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2192 grouped_pipes[i]->stream_res.tg,
2193 0,
2194 &grouped_pipes[i]->stream->triggered_crtc_reset);
2195
2196 DC_SYNC_INFO("Waiting for trigger\n");
2197
2198 for (i = 0; i < group_size; i++)
2199 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2200
2201 DC_SYNC_INFO("Multi-display sync is complete\n");
2202 }
2203
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2204 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2205 struct vm_system_aperture_param *apt,
2206 struct dce_hwseq *hws)
2207 {
2208 PHYSICAL_ADDRESS_LOC physical_page_number;
2209 uint32_t logical_addr_low;
2210 uint32_t logical_addr_high;
2211
2212 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2213 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2214 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2215 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2216
2217 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2218 LOGICAL_ADDR, &logical_addr_low);
2219
2220 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2221 LOGICAL_ADDR, &logical_addr_high);
2222
2223 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2224 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2225 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2226 }
2227
2228 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2229 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2230 struct vm_context0_param *vm0,
2231 struct dce_hwseq *hws)
2232 {
2233 PHYSICAL_ADDRESS_LOC fb_base;
2234 PHYSICAL_ADDRESS_LOC fb_offset;
2235 uint32_t fb_base_value;
2236 uint32_t fb_offset_value;
2237
2238 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2239 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2240
2241 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2242 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2243 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2244 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2245
2246 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2247 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2248 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2249 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2250
2251 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2252 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2253 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2254 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2255
2256 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2257 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2258 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2259 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2260
2261 /*
2262 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2263 * Therefore we need to do
2264 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2265 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2266 */
2267 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2268 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2269 vm0->pte_base.quad_part += fb_base.quad_part;
2270 vm0->pte_base.quad_part -= fb_offset.quad_part;
2271 }
2272
2273
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2274 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2275 {
2276 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2277 struct vm_system_aperture_param apt = { {{ 0 } } };
2278 struct vm_context0_param vm0 = { { { 0 } } };
2279
2280 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2281 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2282
2283 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2284 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2285 }
2286
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2287 static void dcn10_enable_plane(
2288 struct dc *dc,
2289 struct pipe_ctx *pipe_ctx,
2290 struct dc_state *context)
2291 {
2292 struct dce_hwseq *hws = dc->hwseq;
2293
2294 if (dc->debug.sanity_checks) {
2295 hws->funcs.verify_allow_pstate_change_high(dc);
2296 }
2297
2298 undo_DEGVIDCN10_253_wa(dc);
2299
2300 power_on_plane(dc->hwseq,
2301 pipe_ctx->plane_res.hubp->inst);
2302
2303 /* enable DCFCLK current DCHUB */
2304 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2305
2306 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2307 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2308 pipe_ctx->stream_res.opp,
2309 true);
2310
2311 if (dc->config.gpu_vm_support)
2312 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2313
2314 if (dc->debug.sanity_checks) {
2315 hws->funcs.verify_allow_pstate_change_high(dc);
2316 }
2317
2318 if (!pipe_ctx->top_pipe
2319 && pipe_ctx->plane_state
2320 && pipe_ctx->plane_state->flip_int_enabled
2321 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2322 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2323
2324 }
2325
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2326 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2327 {
2328 int i = 0;
2329 struct dpp_grph_csc_adjustment adjust;
2330 memset(&adjust, 0, sizeof(adjust));
2331 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2332
2333
2334 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2335 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2336 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2337 adjust.temperature_matrix[i] =
2338 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2339 } else if (pipe_ctx->plane_state &&
2340 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2341 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2342 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2343 adjust.temperature_matrix[i] =
2344 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2345 }
2346
2347 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2348 }
2349
2350
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2351 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2352 {
2353 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2354 if (pipe_ctx->top_pipe) {
2355 struct pipe_ctx *top = pipe_ctx->top_pipe;
2356
2357 while (top->top_pipe)
2358 top = top->top_pipe; // Traverse to top pipe_ctx
2359 if (top->plane_state && top->plane_state->layer_index == 0)
2360 return true; // Front MPO plane not hidden
2361 }
2362 }
2363 return false;
2364 }
2365
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2366 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2367 {
2368 // Override rear plane RGB bias to fix MPO brightness
2369 uint16_t rgb_bias = matrix[3];
2370
2371 matrix[3] = 0;
2372 matrix[7] = 0;
2373 matrix[11] = 0;
2374 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2375 matrix[3] = rgb_bias;
2376 matrix[7] = rgb_bias;
2377 matrix[11] = rgb_bias;
2378 }
2379
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2380 void dcn10_program_output_csc(struct dc *dc,
2381 struct pipe_ctx *pipe_ctx,
2382 enum dc_color_space colorspace,
2383 uint16_t *matrix,
2384 int opp_id)
2385 {
2386 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2387 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2388
2389 /* MPO is broken with RGB colorspaces when OCSC matrix
2390 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2391 * Blending adds offsets from front + rear to rear plane
2392 *
2393 * Fix is to set RGB bias to 0 on rear plane, top plane
2394 * black value pixels add offset instead of rear + front
2395 */
2396
2397 int16_t rgb_bias = matrix[3];
2398 // matrix[3/7/11] are all the same offset value
2399
2400 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2401 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2402 } else {
2403 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2404 }
2405 }
2406 } else {
2407 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2408 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2409 }
2410 }
2411
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2412 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2413 {
2414 struct dc_bias_and_scale bns_params = {0};
2415
2416 // program the input csc
2417 dpp->funcs->dpp_setup(dpp,
2418 plane_state->format,
2419 EXPANSION_MODE_ZERO,
2420 plane_state->input_csc_color_matrix,
2421 plane_state->color_space,
2422 NULL);
2423
2424 //set scale and bias registers
2425 build_prescale_params(&bns_params, plane_state);
2426 if (dpp->funcs->dpp_program_bias_and_scale)
2427 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2428 }
2429
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,struct tg_color * color,int mpcc_id)2430 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2431 {
2432 struct mpc *mpc = dc->res_pool->mpc;
2433
2434 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2435 get_hdr_visual_confirm_color(pipe_ctx, color);
2436 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2437 get_surface_visual_confirm_color(pipe_ctx, color);
2438 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2439 get_surface_tile_visual_confirm_color(pipe_ctx, color);
2440 else
2441 color_space_to_black_color(
2442 dc, pipe_ctx->stream->output_color_space, color);
2443
2444 if (mpc->funcs->set_bg_color)
2445 mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2446 }
2447
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2448 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2449 {
2450 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2451 struct mpcc_blnd_cfg blnd_cfg = {{0}};
2452 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2453 int mpcc_id;
2454 struct mpcc *new_mpcc;
2455 struct mpc *mpc = dc->res_pool->mpc;
2456 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2457
2458 if (per_pixel_alpha)
2459 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2460 else
2461 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2462
2463 blnd_cfg.overlap_only = false;
2464 blnd_cfg.global_gain = 0xff;
2465
2466 if (pipe_ctx->plane_state->global_alpha)
2467 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2468 else
2469 blnd_cfg.global_alpha = 0xff;
2470
2471 /* DCN1.0 has output CM before MPC which seems to screw with
2472 * pre-multiplied alpha.
2473 */
2474 blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2475 pipe_ctx->stream->output_color_space)
2476 && per_pixel_alpha;
2477
2478
2479 /*
2480 * TODO: remove hack
2481 * Note: currently there is a bug in init_hw such that
2482 * on resume from hibernate, BIOS sets up MPCC0, and
2483 * we do mpcc_remove but the mpcc cannot go to idle
2484 * after remove. This cause us to pick mpcc1 here,
2485 * which causes a pstate hang for yet unknown reason.
2486 */
2487 mpcc_id = hubp->inst;
2488
2489 /* If there is no full update, don't need to touch MPC tree*/
2490 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2491 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2492 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2493 return;
2494 }
2495
2496 /* check if this MPCC is already being used */
2497 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2498 /* remove MPCC if being used */
2499 if (new_mpcc != NULL)
2500 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2501 else
2502 if (dc->debug.sanity_checks)
2503 mpc->funcs->assert_mpcc_idle_before_connect(
2504 dc->res_pool->mpc, mpcc_id);
2505
2506 /* Call MPC to insert new plane */
2507 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2508 mpc_tree_params,
2509 &blnd_cfg,
2510 NULL,
2511 NULL,
2512 hubp->inst,
2513 mpcc_id);
2514 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2515
2516 ASSERT(new_mpcc != NULL);
2517
2518 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2519 hubp->mpcc_id = mpcc_id;
2520 }
2521
update_scaler(struct pipe_ctx * pipe_ctx)2522 static void update_scaler(struct pipe_ctx *pipe_ctx)
2523 {
2524 bool per_pixel_alpha =
2525 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2526
2527 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2528 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2529 /* scaler configuration */
2530 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2531 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2532 }
2533
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2534 static void dcn10_update_dchubp_dpp(
2535 struct dc *dc,
2536 struct pipe_ctx *pipe_ctx,
2537 struct dc_state *context)
2538 {
2539 struct dce_hwseq *hws = dc->hwseq;
2540 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2541 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2542 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2543 struct plane_size size = plane_state->plane_size;
2544 unsigned int compat_level = 0;
2545 bool should_divided_by_2 = false;
2546
2547 /* depends on DML calculation, DPP clock value may change dynamically */
2548 /* If request max dpp clk is lower than current dispclk, no need to
2549 * divided by 2
2550 */
2551 if (plane_state->update_flags.bits.full_update) {
2552
2553 /* new calculated dispclk, dppclk are stored in
2554 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2555 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2556 * dcn_validate_bandwidth compute new dispclk, dppclk.
2557 * dispclk will put in use after optimize_bandwidth when
2558 * ramp_up_dispclk_with_dpp is called.
2559 * there are two places for dppclk be put in use. One location
2560 * is the same as the location as dispclk. Another is within
2561 * update_dchubp_dpp which happens between pre_bandwidth and
2562 * optimize_bandwidth.
2563 * dppclk updated within update_dchubp_dpp will cause new
2564 * clock values of dispclk and dppclk not be in use at the same
2565 * time. when clocks are decreased, this may cause dppclk is
2566 * lower than previous configuration and let pipe stuck.
2567 * for example, eDP + external dp, change resolution of DP from
2568 * 1920x1080x144hz to 1280x960x60hz.
2569 * before change: dispclk = 337889 dppclk = 337889
2570 * change mode, dcn_validate_bandwidth calculate
2571 * dispclk = 143122 dppclk = 143122
2572 * update_dchubp_dpp be executed before dispclk be updated,
2573 * dispclk = 337889, but dppclk use new value dispclk /2 =
2574 * 168944. this will cause pipe pstate warning issue.
2575 * solution: between pre_bandwidth and optimize_bandwidth, while
2576 * dispclk is going to be decreased, keep dppclk = dispclk
2577 **/
2578 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2579 dc->clk_mgr->clks.dispclk_khz)
2580 should_divided_by_2 = false;
2581 else
2582 should_divided_by_2 =
2583 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2584 dc->clk_mgr->clks.dispclk_khz / 2;
2585
2586 dpp->funcs->dpp_dppclk_control(
2587 dpp,
2588 should_divided_by_2,
2589 true);
2590
2591 if (dc->res_pool->dccg)
2592 dc->res_pool->dccg->funcs->update_dpp_dto(
2593 dc->res_pool->dccg,
2594 dpp->inst,
2595 pipe_ctx->plane_res.bw.dppclk_khz);
2596 else
2597 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2598 dc->clk_mgr->clks.dispclk_khz / 2 :
2599 dc->clk_mgr->clks.dispclk_khz;
2600 }
2601
2602 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2603 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2604 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2605 */
2606 if (plane_state->update_flags.bits.full_update) {
2607 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2608
2609 hubp->funcs->hubp_setup(
2610 hubp,
2611 &pipe_ctx->dlg_regs,
2612 &pipe_ctx->ttu_regs,
2613 &pipe_ctx->rq_regs,
2614 &pipe_ctx->pipe_dlg_param);
2615 hubp->funcs->hubp_setup_interdependent(
2616 hubp,
2617 &pipe_ctx->dlg_regs,
2618 &pipe_ctx->ttu_regs);
2619 }
2620
2621 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2622
2623 if (plane_state->update_flags.bits.full_update ||
2624 plane_state->update_flags.bits.bpp_change)
2625 dcn10_update_dpp(dpp, plane_state);
2626
2627 if (plane_state->update_flags.bits.full_update ||
2628 plane_state->update_flags.bits.per_pixel_alpha_change ||
2629 plane_state->update_flags.bits.global_alpha_change)
2630 hws->funcs.update_mpcc(dc, pipe_ctx);
2631
2632 if (plane_state->update_flags.bits.full_update ||
2633 plane_state->update_flags.bits.per_pixel_alpha_change ||
2634 plane_state->update_flags.bits.global_alpha_change ||
2635 plane_state->update_flags.bits.scaling_change ||
2636 plane_state->update_flags.bits.position_change) {
2637 update_scaler(pipe_ctx);
2638 }
2639
2640 if (plane_state->update_flags.bits.full_update ||
2641 plane_state->update_flags.bits.scaling_change ||
2642 plane_state->update_flags.bits.position_change) {
2643 hubp->funcs->mem_program_viewport(
2644 hubp,
2645 &pipe_ctx->plane_res.scl_data.viewport,
2646 &pipe_ctx->plane_res.scl_data.viewport_c);
2647 }
2648
2649 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2650 dc->hwss.set_cursor_position(pipe_ctx);
2651 dc->hwss.set_cursor_attribute(pipe_ctx);
2652
2653 if (dc->hwss.set_cursor_sdr_white_level)
2654 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2655 }
2656
2657 if (plane_state->update_flags.bits.full_update) {
2658 /*gamut remap*/
2659 dc->hwss.program_gamut_remap(pipe_ctx);
2660
2661 dc->hwss.program_output_csc(dc,
2662 pipe_ctx,
2663 pipe_ctx->stream->output_color_space,
2664 pipe_ctx->stream->csc_color_matrix.matrix,
2665 pipe_ctx->stream_res.opp->inst);
2666 }
2667
2668 if (plane_state->update_flags.bits.full_update ||
2669 plane_state->update_flags.bits.pixel_format_change ||
2670 plane_state->update_flags.bits.horizontal_mirror_change ||
2671 plane_state->update_flags.bits.rotation_change ||
2672 plane_state->update_flags.bits.swizzle_change ||
2673 plane_state->update_flags.bits.dcc_change ||
2674 plane_state->update_flags.bits.bpp_change ||
2675 plane_state->update_flags.bits.scaling_change ||
2676 plane_state->update_flags.bits.plane_size_change) {
2677 hubp->funcs->hubp_program_surface_config(
2678 hubp,
2679 plane_state->format,
2680 &plane_state->tiling_info,
2681 &size,
2682 plane_state->rotation,
2683 &plane_state->dcc,
2684 plane_state->horizontal_mirror,
2685 compat_level);
2686 }
2687
2688 hubp->power_gated = false;
2689
2690 hws->funcs.update_plane_addr(dc, pipe_ctx);
2691
2692 if (is_pipe_tree_visible(pipe_ctx))
2693 hubp->funcs->set_blank(hubp, false);
2694 }
2695
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2696 void dcn10_blank_pixel_data(
2697 struct dc *dc,
2698 struct pipe_ctx *pipe_ctx,
2699 bool blank)
2700 {
2701 enum dc_color_space color_space;
2702 struct tg_color black_color = {0};
2703 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2704 struct dc_stream_state *stream = pipe_ctx->stream;
2705
2706 /* program otg blank color */
2707 color_space = stream->output_color_space;
2708 color_space_to_black_color(dc, color_space, &black_color);
2709
2710 /*
2711 * The way 420 is packed, 2 channels carry Y component, 1 channel
2712 * alternate between Cb and Cr, so both channels need the pixel
2713 * value for Y
2714 */
2715 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2716 black_color.color_r_cr = black_color.color_g_y;
2717
2718
2719 if (stream_res->tg->funcs->set_blank_color)
2720 stream_res->tg->funcs->set_blank_color(
2721 stream_res->tg,
2722 &black_color);
2723
2724 if (!blank) {
2725 if (stream_res->tg->funcs->set_blank)
2726 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2727 if (stream_res->abm) {
2728 dc->hwss.set_pipe(pipe_ctx);
2729 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2730 }
2731 } else if (blank) {
2732 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2733 if (stream_res->tg->funcs->set_blank) {
2734 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2735 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2736 }
2737 }
2738 }
2739
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2740 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2741 {
2742 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2743 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2744 struct custom_float_format fmt;
2745
2746 fmt.exponenta_bits = 6;
2747 fmt.mantissa_bits = 12;
2748 fmt.sign = true;
2749
2750
2751 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2752 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2753
2754 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2755 pipe_ctx->plane_res.dpp, hw_mult);
2756 }
2757
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2758 void dcn10_program_pipe(
2759 struct dc *dc,
2760 struct pipe_ctx *pipe_ctx,
2761 struct dc_state *context)
2762 {
2763 struct dce_hwseq *hws = dc->hwseq;
2764
2765 if (pipe_ctx->top_pipe == NULL) {
2766 bool blank = !is_pipe_tree_visible(pipe_ctx);
2767
2768 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2769 pipe_ctx->stream_res.tg,
2770 pipe_ctx->pipe_dlg_param.vready_offset,
2771 pipe_ctx->pipe_dlg_param.vstartup_start,
2772 pipe_ctx->pipe_dlg_param.vupdate_offset,
2773 pipe_ctx->pipe_dlg_param.vupdate_width);
2774
2775 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2776 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2777
2778 if (hws->funcs.setup_vupdate_interrupt)
2779 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2780
2781 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2782 }
2783
2784 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2785 dcn10_enable_plane(dc, pipe_ctx, context);
2786
2787 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2788
2789 hws->funcs.set_hdr_multiplier(pipe_ctx);
2790
2791 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2792 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2793 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2794 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2795
2796 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2797 * only do gamma programming for full update.
2798 * TODO: This can be further optimized/cleaned up
2799 * Always call this for now since it does memcmp inside before
2800 * doing heavy calculation and programming
2801 */
2802 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2803 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2804 }
2805
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2806 void dcn10_wait_for_pending_cleared(struct dc *dc,
2807 struct dc_state *context)
2808 {
2809 struct pipe_ctx *pipe_ctx;
2810 struct timing_generator *tg;
2811 int i;
2812
2813 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2814 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2815 tg = pipe_ctx->stream_res.tg;
2816
2817 /*
2818 * Only wait for top pipe's tg penindg bit
2819 * Also skip if pipe is disabled.
2820 */
2821 if (pipe_ctx->top_pipe ||
2822 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2823 !tg->funcs->is_tg_enabled(tg))
2824 continue;
2825
2826 /*
2827 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2828 * For some reason waiting for OTG_UPDATE_PENDING cleared
2829 * seems to not trigger the update right away, and if we
2830 * lock again before VUPDATE then we don't get a separated
2831 * operation.
2832 */
2833 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2834 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2835 }
2836 }
2837
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2838 void dcn10_post_unlock_program_front_end(
2839 struct dc *dc,
2840 struct dc_state *context)
2841 {
2842 int i;
2843
2844 DC_LOGGER_INIT(dc->ctx->logger);
2845
2846 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2847 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2848
2849 if (!pipe_ctx->top_pipe &&
2850 !pipe_ctx->prev_odm_pipe &&
2851 pipe_ctx->stream) {
2852 struct timing_generator *tg = pipe_ctx->stream_res.tg;
2853
2854 if (context->stream_status[i].plane_count == 0)
2855 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
2856 }
2857 }
2858
2859 for (i = 0; i < dc->res_pool->pipe_count; i++)
2860 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
2861 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2862
2863 for (i = 0; i < dc->res_pool->pipe_count; i++)
2864 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
2865 dc->hwss.optimize_bandwidth(dc, context);
2866 break;
2867 }
2868
2869 if (dc->hwseq->wa.DEGVIDCN10_254)
2870 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2871 }
2872
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)2873 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2874 {
2875 uint8_t i;
2876
2877 for (i = 0; i < context->stream_count; i++) {
2878 if (context->streams[i]->timing.timing_3d_format
2879 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2880 /*
2881 * Disable stutter
2882 */
2883 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2884 break;
2885 }
2886 }
2887 }
2888
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)2889 void dcn10_prepare_bandwidth(
2890 struct dc *dc,
2891 struct dc_state *context)
2892 {
2893 struct dce_hwseq *hws = dc->hwseq;
2894 struct hubbub *hubbub = dc->res_pool->hubbub;
2895
2896 if (dc->debug.sanity_checks)
2897 hws->funcs.verify_allow_pstate_change_high(dc);
2898
2899 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2900 if (context->stream_count == 0)
2901 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2902
2903 dc->clk_mgr->funcs->update_clocks(
2904 dc->clk_mgr,
2905 context,
2906 false);
2907 }
2908
2909 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
2910 &context->bw_ctx.bw.dcn.watermarks,
2911 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2912 true);
2913 dcn10_stereo_hw_frame_pack_wa(dc, context);
2914
2915 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2916 dcn_bw_notify_pplib_of_wm_ranges(dc);
2917
2918 if (dc->debug.sanity_checks)
2919 hws->funcs.verify_allow_pstate_change_high(dc);
2920 }
2921
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)2922 void dcn10_optimize_bandwidth(
2923 struct dc *dc,
2924 struct dc_state *context)
2925 {
2926 struct dce_hwseq *hws = dc->hwseq;
2927 struct hubbub *hubbub = dc->res_pool->hubbub;
2928
2929 if (dc->debug.sanity_checks)
2930 hws->funcs.verify_allow_pstate_change_high(dc);
2931
2932 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2933 if (context->stream_count == 0)
2934 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2935
2936 dc->clk_mgr->funcs->update_clocks(
2937 dc->clk_mgr,
2938 context,
2939 true);
2940 }
2941
2942 hubbub->funcs->program_watermarks(hubbub,
2943 &context->bw_ctx.bw.dcn.watermarks,
2944 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2945 true);
2946
2947 dcn10_stereo_hw_frame_pack_wa(dc, context);
2948
2949 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2950 dcn_bw_notify_pplib_of_wm_ranges(dc);
2951
2952 if (dc->debug.sanity_checks)
2953 hws->funcs.verify_allow_pstate_change_high(dc);
2954 }
2955
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)2956 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2957 int num_pipes, struct dc_crtc_timing_adjust adjust)
2958 {
2959 int i = 0;
2960 struct drr_params params = {0};
2961 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2962 unsigned int event_triggers = 0x800;
2963 // Note DRR trigger events are generated regardless of whether num frames met.
2964 unsigned int num_frames = 2;
2965
2966 params.vertical_total_max = adjust.v_total_max;
2967 params.vertical_total_min = adjust.v_total_min;
2968 params.vertical_total_mid = adjust.v_total_mid;
2969 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
2970 /* TODO: If multiple pipes are to be supported, you need
2971 * some GSL stuff. Static screen triggers may be programmed differently
2972 * as well.
2973 */
2974 for (i = 0; i < num_pipes; i++) {
2975 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2976 pipe_ctx[i]->stream_res.tg, ¶ms);
2977 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
2978 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2979 pipe_ctx[i]->stream_res.tg,
2980 event_triggers, num_frames);
2981 }
2982 }
2983
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)2984 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2985 int num_pipes,
2986 struct crtc_position *position)
2987 {
2988 int i = 0;
2989
2990 /* TODO: handle pipes > 1
2991 */
2992 for (i = 0; i < num_pipes; i++)
2993 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2994 }
2995
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)2996 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2997 int num_pipes, const struct dc_static_screen_params *params)
2998 {
2999 unsigned int i;
3000 unsigned int triggers = 0;
3001
3002 if (params->triggers.surface_update)
3003 triggers |= 0x80;
3004 if (params->triggers.cursor_update)
3005 triggers |= 0x2;
3006 if (params->triggers.force_trigger)
3007 triggers |= 0x1;
3008
3009 for (i = 0; i < num_pipes; i++)
3010 pipe_ctx[i]->stream_res.tg->funcs->
3011 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3012 triggers, params->num_frames);
3013 }
3014
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3015 static void dcn10_config_stereo_parameters(
3016 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3017 {
3018 enum view_3d_format view_format = stream->view_format;
3019 enum dc_timing_3d_format timing_3d_format =\
3020 stream->timing.timing_3d_format;
3021 bool non_stereo_timing = false;
3022
3023 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3024 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3025 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3026 non_stereo_timing = true;
3027
3028 if (non_stereo_timing == false &&
3029 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3030
3031 flags->PROGRAM_STEREO = 1;
3032 flags->PROGRAM_POLARITY = 1;
3033 if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3034 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3035 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3036 enum display_dongle_type dongle = \
3037 stream->link->ddc->dongle_type;
3038 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3039 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3040 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3041 flags->DISABLE_STEREO_DP_SYNC = 1;
3042 }
3043 flags->RIGHT_EYE_POLARITY =\
3044 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3045 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3046 flags->FRAME_PACKED = 1;
3047 }
3048
3049 return;
3050 }
3051
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3052 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3053 {
3054 struct crtc_stereo_flags flags = { 0 };
3055 struct dc_stream_state *stream = pipe_ctx->stream;
3056
3057 dcn10_config_stereo_parameters(stream, &flags);
3058
3059 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3060 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3061 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3062 } else {
3063 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3064 }
3065
3066 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3067 pipe_ctx->stream_res.opp,
3068 flags.PROGRAM_STEREO == 1,
3069 &stream->timing);
3070
3071 pipe_ctx->stream_res.tg->funcs->program_stereo(
3072 pipe_ctx->stream_res.tg,
3073 &stream->timing,
3074 &flags);
3075
3076 return;
3077 }
3078
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3079 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3080 {
3081 int i;
3082
3083 for (i = 0; i < res_pool->pipe_count; i++) {
3084 if (res_pool->hubps[i]->inst == mpcc_inst)
3085 return res_pool->hubps[i];
3086 }
3087 ASSERT(false);
3088 return NULL;
3089 }
3090
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3091 void dcn10_wait_for_mpcc_disconnect(
3092 struct dc *dc,
3093 struct resource_pool *res_pool,
3094 struct pipe_ctx *pipe_ctx)
3095 {
3096 struct dce_hwseq *hws = dc->hwseq;
3097 int mpcc_inst;
3098
3099 if (dc->debug.sanity_checks) {
3100 hws->funcs.verify_allow_pstate_change_high(dc);
3101 }
3102
3103 if (!pipe_ctx->stream_res.opp)
3104 return;
3105
3106 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3107 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3108 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3109
3110 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3111 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3112 hubp->funcs->set_blank(hubp, true);
3113 }
3114 }
3115
3116 if (dc->debug.sanity_checks) {
3117 hws->funcs.verify_allow_pstate_change_high(dc);
3118 }
3119
3120 }
3121
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3122 bool dcn10_dummy_display_power_gating(
3123 struct dc *dc,
3124 uint8_t controller_id,
3125 struct dc_bios *dcb,
3126 enum pipe_gating_control power_gating)
3127 {
3128 return true;
3129 }
3130
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3131 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3132 {
3133 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3134 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3135 bool flip_pending;
3136 struct dc *dc = plane_state->ctx->dc;
3137
3138 if (plane_state == NULL)
3139 return;
3140
3141 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3142 pipe_ctx->plane_res.hubp);
3143
3144 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3145
3146 if (!flip_pending)
3147 plane_state->status.current_address = plane_state->status.requested_address;
3148
3149 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3150 tg->funcs->is_stereo_left_eye) {
3151 plane_state->status.is_right_eye =
3152 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3153 }
3154
3155 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3156 struct dce_hwseq *hwseq = dc->hwseq;
3157 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3158 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3159
3160 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3161 struct hubbub *hubbub = dc->res_pool->hubbub;
3162
3163 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3164 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3165 }
3166 }
3167 }
3168
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3169 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3170 {
3171 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3172
3173 /* In DCN, this programming sequence is owned by the hubbub */
3174 hubbub->funcs->update_dchub(hubbub, dh_data);
3175 }
3176
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3177 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3178 {
3179 struct pipe_ctx *test_pipe;
3180 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3181 const struct rect *r1 = &scl_data->recout, *r2;
3182 int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3183 int cur_layer = pipe_ctx->plane_state->layer_index;
3184 bool upper_pipe_exists = false;
3185 struct fixed31_32 one = dc_fixpt_from_int(1);
3186
3187 /**
3188 * Disable the cursor if there's another pipe above this with a
3189 * plane that contains this pipe's viewport to prevent double cursor
3190 * and incorrect scaling artifacts.
3191 */
3192 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3193 test_pipe = test_pipe->top_pipe) {
3194 if (!test_pipe->plane_state->visible)
3195 continue;
3196
3197 r2 = &test_pipe->plane_res.scl_data.recout;
3198 r2_r = r2->x + r2->width;
3199 r2_b = r2->y + r2->height;
3200
3201 if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3202 return true;
3203
3204 if (test_pipe->plane_state->layer_index < cur_layer)
3205 upper_pipe_exists = true;
3206 }
3207
3208 // if plane scaled, assume an upper plane can handle cursor if it exists.
3209 if (upper_pipe_exists &&
3210 (scl_data->ratios.horz.value != one.value ||
3211 scl_data->ratios.vert.value != one.value))
3212 return true;
3213
3214 return false;
3215 }
3216
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3217 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3218 {
3219 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3220 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3221 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3222 struct dc_cursor_mi_param param = {
3223 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3224 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3225 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3226 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3227 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3228 .rotation = pipe_ctx->plane_state->rotation,
3229 .mirror = pipe_ctx->plane_state->horizontal_mirror
3230 };
3231 bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3232 (pipe_ctx->bottom_pipe != NULL);
3233 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3234 (pipe_ctx->prev_odm_pipe != NULL);
3235
3236 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3237 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3238 int x_pos = pos_cpy.x;
3239 int y_pos = pos_cpy.y;
3240
3241 /**
3242 * DC cursor is stream space, HW cursor is plane space and drawn
3243 * as part of the framebuffer.
3244 *
3245 * Cursor position can't be negative, but hotspot can be used to
3246 * shift cursor out of the plane bounds. Hotspot must be smaller
3247 * than the cursor size.
3248 */
3249
3250 /**
3251 * Translate cursor from stream space to plane space.
3252 *
3253 * If the cursor is scaled then we need to scale the position
3254 * to be in the approximately correct place. We can't do anything
3255 * about the actual size being incorrect, that's a limitation of
3256 * the hardware.
3257 */
3258 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3259 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3260 pipe_ctx->plane_state->dst_rect.width;
3261 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3262 pipe_ctx->plane_state->dst_rect.height;
3263 } else {
3264 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3265 pipe_ctx->plane_state->dst_rect.width;
3266 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3267 pipe_ctx->plane_state->dst_rect.height;
3268 }
3269
3270 /**
3271 * If the cursor's source viewport is clipped then we need to
3272 * translate the cursor to appear in the correct position on
3273 * the screen.
3274 *
3275 * This translation isn't affected by scaling so it needs to be
3276 * done *after* we adjust the position for the scale factor.
3277 *
3278 * This is only done by opt-in for now since there are still
3279 * some usecases like tiled display that might enable the
3280 * cursor on both streams while expecting dc to clip it.
3281 */
3282 if (pos_cpy.translate_by_source) {
3283 x_pos += pipe_ctx->plane_state->src_rect.x;
3284 y_pos += pipe_ctx->plane_state->src_rect.y;
3285 }
3286
3287 /**
3288 * If the position is negative then we need to add to the hotspot
3289 * to shift the cursor outside the plane.
3290 */
3291
3292 if (x_pos < 0) {
3293 pos_cpy.x_hotspot -= x_pos;
3294 x_pos = 0;
3295 }
3296
3297 if (y_pos < 0) {
3298 pos_cpy.y_hotspot -= y_pos;
3299 y_pos = 0;
3300 }
3301
3302 pos_cpy.x = (uint32_t)x_pos;
3303 pos_cpy.y = (uint32_t)y_pos;
3304
3305 if (pipe_ctx->plane_state->address.type
3306 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3307 pos_cpy.enable = false;
3308
3309 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3310 pos_cpy.enable = false;
3311
3312 // Swap axis and mirror horizontally
3313 if (param.rotation == ROTATION_ANGLE_90) {
3314 uint32_t temp_x = pos_cpy.x;
3315
3316 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3317 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3318 pos_cpy.y = temp_x;
3319 }
3320 // Swap axis and mirror vertically
3321 else if (param.rotation == ROTATION_ANGLE_270) {
3322 uint32_t temp_y = pos_cpy.y;
3323 int viewport_height =
3324 pipe_ctx->plane_res.scl_data.viewport.height;
3325 int viewport_y =
3326 pipe_ctx->plane_res.scl_data.viewport.y;
3327
3328 /**
3329 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3330 * For pipe split cases:
3331 * - apply offset of viewport.y to normalize pos_cpy.x
3332 * - calculate the pos_cpy.y as before
3333 * - shift pos_cpy.y back by same offset to get final value
3334 * - since we iterate through both pipes, use the lower
3335 * viewport.y for offset
3336 * For non pipe split cases, use the same calculation for
3337 * pos_cpy.y as the 180 degree rotation case below,
3338 * but use pos_cpy.x as our input because we are rotating
3339 * 270 degrees
3340 */
3341 if (pipe_split_on || odm_combine_on) {
3342 int pos_cpy_x_offset;
3343 int other_pipe_viewport_y;
3344
3345 if (pipe_split_on) {
3346 if (pipe_ctx->bottom_pipe) {
3347 other_pipe_viewport_y =
3348 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3349 } else {
3350 other_pipe_viewport_y =
3351 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3352 }
3353 } else {
3354 if (pipe_ctx->next_odm_pipe) {
3355 other_pipe_viewport_y =
3356 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3357 } else {
3358 other_pipe_viewport_y =
3359 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3360 }
3361 }
3362 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3363 other_pipe_viewport_y : viewport_y;
3364 pos_cpy.x -= pos_cpy_x_offset;
3365 if (pos_cpy.x > viewport_height) {
3366 pos_cpy.x = pos_cpy.x - viewport_height;
3367 pos_cpy.y = viewport_height - pos_cpy.x;
3368 } else {
3369 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3370 }
3371 pos_cpy.y += pos_cpy_x_offset;
3372 } else {
3373 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3374 }
3375 pos_cpy.x = temp_y;
3376 }
3377 // Mirror horizontally and vertically
3378 else if (param.rotation == ROTATION_ANGLE_180) {
3379 int viewport_width =
3380 pipe_ctx->plane_res.scl_data.viewport.width;
3381 int viewport_x =
3382 pipe_ctx->plane_res.scl_data.viewport.x;
3383
3384 if (pipe_split_on || odm_combine_on) {
3385 if (pos_cpy.x >= viewport_width + viewport_x) {
3386 pos_cpy.x = 2 * viewport_width
3387 - pos_cpy.x + 2 * viewport_x;
3388 } else {
3389 uint32_t temp_x = pos_cpy.x;
3390
3391 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3392 if (temp_x >= viewport_x +
3393 (int)hubp->curs_attr.width || pos_cpy.x
3394 <= (int)hubp->curs_attr.width +
3395 pipe_ctx->plane_state->src_rect.x) {
3396 pos_cpy.x = temp_x + viewport_width;
3397 }
3398 }
3399 } else {
3400 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3401 }
3402
3403 /**
3404 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3405 * Calculation:
3406 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3407 * pos_cpy.y_new = viewport.y + delta_from_bottom
3408 * Simplify it as:
3409 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3410 */
3411 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3412 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3413 }
3414
3415 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3416 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3417 }
3418
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3419 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3420 {
3421 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3422
3423 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3424 pipe_ctx->plane_res.hubp, attributes);
3425 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3426 pipe_ctx->plane_res.dpp, attributes);
3427 }
3428
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3429 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3430 {
3431 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3432 struct fixed31_32 multiplier;
3433 struct dpp_cursor_attributes opt_attr = { 0 };
3434 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3435 struct custom_float_format fmt;
3436
3437 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3438 return;
3439
3440 fmt.exponenta_bits = 5;
3441 fmt.mantissa_bits = 10;
3442 fmt.sign = true;
3443
3444 if (sdr_white_level > 80) {
3445 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3446 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3447 }
3448
3449 opt_attr.scale = hw_scale;
3450 opt_attr.bias = 0;
3451
3452 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3453 pipe_ctx->plane_res.dpp, &opt_attr);
3454 }
3455
3456 /*
3457 * apply_front_porch_workaround TODO FPGA still need?
3458 *
3459 * This is a workaround for a bug that has existed since R5xx and has not been
3460 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3461 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3462 static void apply_front_porch_workaround(
3463 struct dc_crtc_timing *timing)
3464 {
3465 if (timing->flags.INTERLACE == 1) {
3466 if (timing->v_front_porch < 2)
3467 timing->v_front_porch = 2;
3468 } else {
3469 if (timing->v_front_porch < 1)
3470 timing->v_front_porch = 1;
3471 }
3472 }
3473
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3474 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3475 {
3476 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3477 struct dc_crtc_timing patched_crtc_timing;
3478 int vesa_sync_start;
3479 int asic_blank_end;
3480 int interlace_factor;
3481 int vertical_line_start;
3482
3483 patched_crtc_timing = *dc_crtc_timing;
3484 apply_front_porch_workaround(&patched_crtc_timing);
3485
3486 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3487
3488 vesa_sync_start = patched_crtc_timing.v_addressable +
3489 patched_crtc_timing.v_border_bottom +
3490 patched_crtc_timing.v_front_porch;
3491
3492 asic_blank_end = (patched_crtc_timing.v_total -
3493 vesa_sync_start -
3494 patched_crtc_timing.v_border_top)
3495 * interlace_factor;
3496
3497 vertical_line_start = asic_blank_end -
3498 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3499
3500 return vertical_line_start;
3501 }
3502
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3503 void dcn10_calc_vupdate_position(
3504 struct dc *dc,
3505 struct pipe_ctx *pipe_ctx,
3506 uint32_t *start_line,
3507 uint32_t *end_line)
3508 {
3509 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3510 int vline_int_offset_from_vupdate =
3511 pipe_ctx->stream->periodic_interrupt0.lines_offset;
3512 int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3513 int start_position;
3514
3515 if (vline_int_offset_from_vupdate > 0)
3516 vline_int_offset_from_vupdate--;
3517 else if (vline_int_offset_from_vupdate < 0)
3518 vline_int_offset_from_vupdate++;
3519
3520 start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3521
3522 if (start_position >= 0)
3523 *start_line = start_position;
3524 else
3525 *start_line = dc_crtc_timing->v_total + start_position - 1;
3526
3527 *end_line = *start_line + 2;
3528
3529 if (*end_line >= dc_crtc_timing->v_total)
3530 *end_line = 2;
3531 }
3532
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline,uint32_t * start_line,uint32_t * end_line)3533 static void dcn10_cal_vline_position(
3534 struct dc *dc,
3535 struct pipe_ctx *pipe_ctx,
3536 enum vline_select vline,
3537 uint32_t *start_line,
3538 uint32_t *end_line)
3539 {
3540 enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3541
3542 if (vline == VLINE0)
3543 ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3544 else if (vline == VLINE1)
3545 ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3546
3547 switch (ref_point) {
3548 case START_V_UPDATE:
3549 dcn10_calc_vupdate_position(
3550 dc,
3551 pipe_ctx,
3552 start_line,
3553 end_line);
3554 break;
3555 case START_V_SYNC:
3556 // Suppose to do nothing because vsync is 0;
3557 break;
3558 default:
3559 ASSERT(0);
3560 break;
3561 }
3562 }
3563
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline)3564 void dcn10_setup_periodic_interrupt(
3565 struct dc *dc,
3566 struct pipe_ctx *pipe_ctx,
3567 enum vline_select vline)
3568 {
3569 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3570
3571 if (vline == VLINE0) {
3572 uint32_t start_line = 0;
3573 uint32_t end_line = 0;
3574
3575 dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3576
3577 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3578
3579 } else if (vline == VLINE1) {
3580 pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3581 tg,
3582 pipe_ctx->stream->periodic_interrupt1.lines_offset);
3583 }
3584 }
3585
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3586 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3587 {
3588 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3589 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3590
3591 if (start_line < 0) {
3592 ASSERT(0);
3593 start_line = 0;
3594 }
3595
3596 if (tg->funcs->setup_vertical_interrupt2)
3597 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3598 }
3599
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3600 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3601 struct dc_link_settings *link_settings)
3602 {
3603 struct encoder_unblank_param params = { { 0 } };
3604 struct dc_stream_state *stream = pipe_ctx->stream;
3605 struct dc_link *link = stream->link;
3606 struct dce_hwseq *hws = link->dc->hwseq;
3607
3608 /* only 3 items below are used by unblank */
3609 params.timing = pipe_ctx->stream->timing;
3610
3611 params.link_settings.link_rate = link_settings->link_rate;
3612
3613 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3614 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3615 params.timing.pix_clk_100hz /= 2;
3616 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, ¶ms);
3617 }
3618
3619 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3620 hws->funcs.edp_backlight_control(link, true);
3621 }
3622 }
3623
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3624 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3625 const uint8_t *custom_sdp_message,
3626 unsigned int sdp_message_size)
3627 {
3628 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3629 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3630 pipe_ctx->stream_res.stream_enc,
3631 custom_sdp_message,
3632 sdp_message_size);
3633 }
3634 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3635 enum dc_status dcn10_set_clock(struct dc *dc,
3636 enum dc_clock_type clock_type,
3637 uint32_t clk_khz,
3638 uint32_t stepping)
3639 {
3640 struct dc_state *context = dc->current_state;
3641 struct dc_clock_config clock_cfg = {0};
3642 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3643
3644 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3645 return DC_FAIL_UNSUPPORTED_1;
3646
3647 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3648 context, clock_type, &clock_cfg);
3649
3650 if (clk_khz > clock_cfg.max_clock_khz)
3651 return DC_FAIL_CLK_EXCEED_MAX;
3652
3653 if (clk_khz < clock_cfg.min_clock_khz)
3654 return DC_FAIL_CLK_BELOW_MIN;
3655
3656 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3657 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3658
3659 /*update internal request clock for update clock use*/
3660 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3661 current_clocks->dispclk_khz = clk_khz;
3662 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3663 current_clocks->dppclk_khz = clk_khz;
3664 else
3665 return DC_ERROR_UNEXPECTED;
3666
3667 if (dc->clk_mgr->funcs->update_clocks)
3668 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3669 context, true);
3670 return DC_OK;
3671
3672 }
3673
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3674 void dcn10_get_clock(struct dc *dc,
3675 enum dc_clock_type clock_type,
3676 struct dc_clock_config *clock_cfg)
3677 {
3678 struct dc_state *context = dc->current_state;
3679
3680 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3681 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3682
3683 }
3684
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3685 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3686 {
3687 struct resource_pool *pool = dc->res_pool;
3688 int i;
3689
3690 for (i = 0; i < pool->pipe_count; i++) {
3691 struct hubp *hubp = pool->hubps[i];
3692 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3693
3694 hubp->funcs->hubp_read_state(hubp);
3695
3696 if (!s->blank_en)
3697 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3698 }
3699 }
3700