1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_hw_lock_mgr.h"
55 
56 #define DC_LOGGER_INIT(logger)
57 
58 #define CTX \
59 	hws->ctx
60 #define REG(reg)\
61 	hws->regs->reg
62 
63 #undef FN
64 #define FN(reg_name, field_name) \
65 	hws->shifts->field_name, hws->masks->field_name
66 
67 /*print is 17 wide, first two characters are spaces*/
68 #define DTN_INFO_MICRO_SEC(ref_cycle) \
69 	print_microsec(dc_ctx, log_ctx, ref_cycle)
70 
71 #define GAMMA_HW_POINTS_NUM 256
72 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)73 void print_microsec(struct dc_context *dc_ctx,
74 	struct dc_log_buffer_ctx *log_ctx,
75 	uint32_t ref_cycle)
76 {
77 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
78 	static const unsigned int frac = 1000;
79 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
80 
81 	DTN_INFO("  %11d.%03d",
82 			us_x10 / frac,
83 			us_x10 % frac);
84 }
85 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)86 void dcn10_lock_all_pipes(struct dc *dc,
87 	struct dc_state *context,
88 	bool lock)
89 {
90 	struct pipe_ctx *pipe_ctx;
91 	struct timing_generator *tg;
92 	int i;
93 
94 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
95 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
96 		tg = pipe_ctx->stream_res.tg;
97 
98 		/*
99 		 * Only lock the top pipe's tg to prevent redundant
100 		 * (un)locking. Also skip if pipe is disabled.
101 		 */
102 		if (pipe_ctx->top_pipe ||
103 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
104 		    !tg->funcs->is_tg_enabled(tg))
105 			continue;
106 
107 		if (lock)
108 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
109 		else
110 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
111 	}
112 }
113 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)114 static void log_mpc_crc(struct dc *dc,
115 	struct dc_log_buffer_ctx *log_ctx)
116 {
117 	struct dc_context *dc_ctx = dc->ctx;
118 	struct dce_hwseq *hws = dc->hwseq;
119 
120 	if (REG(MPC_CRC_RESULT_GB))
121 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
122 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
123 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
124 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
125 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
126 }
127 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)128 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
129 {
130 	struct dc_context *dc_ctx = dc->ctx;
131 	struct dcn_hubbub_wm wm;
132 	int i;
133 
134 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
135 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
136 
137 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
138 			"         sr_enter          sr_exit  dram_clk_change\n");
139 
140 	for (i = 0; i < 4; i++) {
141 		struct dcn_hubbub_wm_set *s;
142 
143 		s = &wm.sets[i];
144 		DTN_INFO("WM_Set[%d]:", s->wm_set);
145 		DTN_INFO_MICRO_SEC(s->data_urgent);
146 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
147 		DTN_INFO_MICRO_SEC(s->sr_enter);
148 		DTN_INFO_MICRO_SEC(s->sr_exit);
149 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
150 		DTN_INFO("\n");
151 	}
152 
153 	DTN_INFO("\n");
154 }
155 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)156 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
157 {
158 	struct dc_context *dc_ctx = dc->ctx;
159 	struct resource_pool *pool = dc->res_pool;
160 	int i;
161 
162 	DTN_INFO(
163 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
164 	for (i = 0; i < pool->pipe_count; i++) {
165 		struct hubp *hubp = pool->hubps[i];
166 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
167 
168 		hubp->funcs->hubp_read_state(hubp);
169 
170 		if (!s->blank_en) {
171 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
172 					hubp->inst,
173 					s->pixel_format,
174 					s->inuse_addr_hi,
175 					s->viewport_width,
176 					s->viewport_height,
177 					s->rotation_angle,
178 					s->h_mirror_en,
179 					s->sw_mode,
180 					s->dcc_en,
181 					s->blank_en,
182 					s->clock_en,
183 					s->ttu_disable,
184 					s->underflow_status);
185 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
186 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
187 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
188 			DTN_INFO("\n");
189 		}
190 	}
191 
192 	DTN_INFO("\n=========RQ========\n");
193 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
194 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
195 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
196 	for (i = 0; i < pool->pipe_count; i++) {
197 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
198 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
199 
200 		if (!s->blank_en)
201 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
202 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
203 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
204 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
205 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
206 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
207 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
208 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
209 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
210 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
211 	}
212 
213 	DTN_INFO("========DLG========\n");
214 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
215 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
216 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
217 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
218 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
219 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
220 			"  x_rp_dlay  x_rr_sfl\n");
221 	for (i = 0; i < pool->pipe_count; i++) {
222 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
223 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
224 
225 		if (!s->blank_en)
226 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
227 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
228 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
229 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
230 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
231 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
232 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
233 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
234 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
235 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
236 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
237 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
238 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
239 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
240 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
241 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
242 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
243 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
244 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
245 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
246 				dlg_regs->xfc_reg_remote_surface_flip_latency);
247 	}
248 
249 	DTN_INFO("========TTU========\n");
250 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
251 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
252 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
253 	for (i = 0; i < pool->pipe_count; i++) {
254 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
255 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
256 
257 		if (!s->blank_en)
258 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
259 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
260 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
261 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
262 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
263 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
264 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
265 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
266 	}
267 	DTN_INFO("\n");
268 }
269 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)270 void dcn10_log_hw_state(struct dc *dc,
271 	struct dc_log_buffer_ctx *log_ctx)
272 {
273 	struct dc_context *dc_ctx = dc->ctx;
274 	struct resource_pool *pool = dc->res_pool;
275 	int i;
276 
277 	DTN_INFO_BEGIN();
278 
279 	dcn10_log_hubbub_state(dc, log_ctx);
280 
281 	dcn10_log_hubp_states(dc, log_ctx);
282 
283 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
284 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
285 			"C31 C32   C33 C34\n");
286 	for (i = 0; i < pool->pipe_count; i++) {
287 		struct dpp *dpp = pool->dpps[i];
288 		struct dcn_dpp_state s = {0};
289 
290 		dpp->funcs->dpp_read_state(dpp, &s);
291 
292 		if (!s.is_enabled)
293 			continue;
294 
295 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
296 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
297 				dpp->inst,
298 				s.igam_input_format,
299 				(s.igam_lut_mode == 0) ? "BypassFixed" :
300 					((s.igam_lut_mode == 1) ? "BypassFloat" :
301 					((s.igam_lut_mode == 2) ? "RAM" :
302 					((s.igam_lut_mode == 3) ? "RAM" :
303 								 "Unknown"))),
304 				(s.dgam_lut_mode == 0) ? "Bypass" :
305 					((s.dgam_lut_mode == 1) ? "sRGB" :
306 					((s.dgam_lut_mode == 2) ? "Ycc" :
307 					((s.dgam_lut_mode == 3) ? "RAM" :
308 					((s.dgam_lut_mode == 4) ? "RAM" :
309 								 "Unknown")))),
310 				(s.rgam_lut_mode == 0) ? "Bypass" :
311 					((s.rgam_lut_mode == 1) ? "sRGB" :
312 					((s.rgam_lut_mode == 2) ? "Ycc" :
313 					((s.rgam_lut_mode == 3) ? "RAM" :
314 					((s.rgam_lut_mode == 4) ? "RAM" :
315 								 "Unknown")))),
316 				s.gamut_remap_mode,
317 				s.gamut_remap_c11_c12,
318 				s.gamut_remap_c13_c14,
319 				s.gamut_remap_c21_c22,
320 				s.gamut_remap_c23_c24,
321 				s.gamut_remap_c31_c32,
322 				s.gamut_remap_c33_c34);
323 		DTN_INFO("\n");
324 	}
325 	DTN_INFO("\n");
326 
327 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
328 	for (i = 0; i < pool->pipe_count; i++) {
329 		struct mpcc_state s = {0};
330 
331 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
332 		if (s.opp_id != 0xf)
333 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
334 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
335 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
336 				s.idle);
337 	}
338 	DTN_INFO("\n");
339 
340 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
341 
342 	for (i = 0; i < pool->timing_generator_count; i++) {
343 		struct timing_generator *tg = pool->timing_generators[i];
344 		struct dcn_otg_state s = {0};
345 		/* Read shared OTG state registers for all DCNx */
346 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
347 
348 		/*
349 		 * For DCN2 and greater, a register on the OPP is used to
350 		 * determine if the CRTC is blanked instead of the OTG. So use
351 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
352 		 *
353 		 * TODO: Implement DCN-specific read_otg_state hooks.
354 		 */
355 		if (pool->opps[i]->funcs->dpg_is_blanked)
356 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
357 		else
358 			s.blank_enabled = tg->funcs->is_blanked(tg);
359 
360 		//only print if OTG master is enabled
361 		if ((s.otg_enabled & 1) == 0)
362 			continue;
363 
364 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
365 				tg->inst,
366 				s.v_blank_start,
367 				s.v_blank_end,
368 				s.v_sync_a_start,
369 				s.v_sync_a_end,
370 				s.v_sync_a_pol,
371 				s.v_total_max,
372 				s.v_total_min,
373 				s.v_total_max_sel,
374 				s.v_total_min_sel,
375 				s.h_blank_start,
376 				s.h_blank_end,
377 				s.h_sync_a_start,
378 				s.h_sync_a_end,
379 				s.h_sync_a_pol,
380 				s.h_total,
381 				s.v_total,
382 				s.underflow_occurred_status,
383 				s.blank_enabled);
384 
385 		// Clear underflow for debug purposes
386 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
387 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
388 		// it from here without affecting the original intent.
389 		tg->funcs->clear_optc_underflow(tg);
390 	}
391 	DTN_INFO("\n");
392 
393 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
394 	// TODO: Update golden log header to reflect this name change
395 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
396 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
397 		struct display_stream_compressor *dsc = pool->dscs[i];
398 		struct dcn_dsc_state s = {0};
399 
400 		dsc->funcs->dsc_read_state(dsc, &s);
401 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
402 		dsc->inst,
403 			s.dsc_clock_en,
404 			s.dsc_slice_width,
405 			s.dsc_bits_per_pixel);
406 		DTN_INFO("\n");
407 	}
408 	DTN_INFO("\n");
409 
410 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
411 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
412 	for (i = 0; i < pool->stream_enc_count; i++) {
413 		struct stream_encoder *enc = pool->stream_enc[i];
414 		struct enc_state s = {0};
415 
416 		if (enc->funcs->enc_read_state) {
417 			enc->funcs->enc_read_state(enc, &s);
418 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
419 				enc->id,
420 				s.dsc_mode,
421 				s.sec_gsp_pps_line_num,
422 				s.vbid6_line_reference,
423 				s.vbid6_line_num,
424 				s.sec_gsp_pps_enable,
425 				s.sec_stream_enable);
426 			DTN_INFO("\n");
427 		}
428 	}
429 	DTN_INFO("\n");
430 
431 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
432 	for (i = 0; i < dc->link_count; i++) {
433 		struct link_encoder *lenc = dc->links[i]->link_enc;
434 
435 		struct link_enc_state s = {0};
436 
437 		if (lenc->funcs->read_state) {
438 			lenc->funcs->read_state(lenc, &s);
439 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
440 				i,
441 				s.dphy_fec_en,
442 				s.dphy_fec_ready_shadow,
443 				s.dphy_fec_active_status,
444 				s.dp_link_training_complete);
445 			DTN_INFO("\n");
446 		}
447 	}
448 	DTN_INFO("\n");
449 
450 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
451 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
452 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
453 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
454 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
455 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
456 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
457 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
458 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
459 
460 	log_mpc_crc(dc, log_ctx);
461 
462 	DTN_INFO_END();
463 }
464 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)465 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
466 {
467 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
468 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
469 
470 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
471 		tg->funcs->clear_optc_underflow(tg);
472 		return true;
473 	}
474 
475 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
476 		hubp->funcs->hubp_clear_underflow(hubp);
477 		return true;
478 	}
479 	return false;
480 }
481 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)482 void dcn10_enable_power_gating_plane(
483 	struct dce_hwseq *hws,
484 	bool enable)
485 {
486 	bool force_on = true; /* disable power gating */
487 
488 	if (enable)
489 		force_on = false;
490 
491 	/* DCHUBP0/1/2/3 */
492 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
493 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
494 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
495 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
496 
497 	/* DPP0/1/2/3 */
498 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
499 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
500 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
501 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
502 }
503 
dcn10_disable_vga(struct dce_hwseq * hws)504 void dcn10_disable_vga(
505 	struct dce_hwseq *hws)
506 {
507 	unsigned int in_vga1_mode = 0;
508 	unsigned int in_vga2_mode = 0;
509 	unsigned int in_vga3_mode = 0;
510 	unsigned int in_vga4_mode = 0;
511 
512 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
513 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
514 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
515 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
516 
517 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
518 			in_vga3_mode == 0 && in_vga4_mode == 0)
519 		return;
520 
521 	REG_WRITE(D1VGA_CONTROL, 0);
522 	REG_WRITE(D2VGA_CONTROL, 0);
523 	REG_WRITE(D3VGA_CONTROL, 0);
524 	REG_WRITE(D4VGA_CONTROL, 0);
525 
526 	/* HW Engineer's Notes:
527 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
528 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
529 	 *
530 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
531 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
532 	 */
533 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
534 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
535 }
536 
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)537 void dcn10_dpp_pg_control(
538 		struct dce_hwseq *hws,
539 		unsigned int dpp_inst,
540 		bool power_on)
541 {
542 	uint32_t power_gate = power_on ? 0 : 1;
543 	uint32_t pwr_status = power_on ? 0 : 2;
544 
545 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
546 		return;
547 	if (REG(DOMAIN1_PG_CONFIG) == 0)
548 		return;
549 
550 	switch (dpp_inst) {
551 	case 0: /* DPP0 */
552 		REG_UPDATE(DOMAIN1_PG_CONFIG,
553 				DOMAIN1_POWER_GATE, power_gate);
554 
555 		REG_WAIT(DOMAIN1_PG_STATUS,
556 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
557 				1, 1000);
558 		break;
559 	case 1: /* DPP1 */
560 		REG_UPDATE(DOMAIN3_PG_CONFIG,
561 				DOMAIN3_POWER_GATE, power_gate);
562 
563 		REG_WAIT(DOMAIN3_PG_STATUS,
564 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
565 				1, 1000);
566 		break;
567 	case 2: /* DPP2 */
568 		REG_UPDATE(DOMAIN5_PG_CONFIG,
569 				DOMAIN5_POWER_GATE, power_gate);
570 
571 		REG_WAIT(DOMAIN5_PG_STATUS,
572 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
573 				1, 1000);
574 		break;
575 	case 3: /* DPP3 */
576 		REG_UPDATE(DOMAIN7_PG_CONFIG,
577 				DOMAIN7_POWER_GATE, power_gate);
578 
579 		REG_WAIT(DOMAIN7_PG_STATUS,
580 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
581 				1, 1000);
582 		break;
583 	default:
584 		BREAK_TO_DEBUGGER();
585 		break;
586 	}
587 }
588 
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)589 void dcn10_hubp_pg_control(
590 		struct dce_hwseq *hws,
591 		unsigned int hubp_inst,
592 		bool power_on)
593 {
594 	uint32_t power_gate = power_on ? 0 : 1;
595 	uint32_t pwr_status = power_on ? 0 : 2;
596 
597 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
598 		return;
599 	if (REG(DOMAIN0_PG_CONFIG) == 0)
600 		return;
601 
602 	switch (hubp_inst) {
603 	case 0: /* DCHUBP0 */
604 		REG_UPDATE(DOMAIN0_PG_CONFIG,
605 				DOMAIN0_POWER_GATE, power_gate);
606 
607 		REG_WAIT(DOMAIN0_PG_STATUS,
608 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
609 				1, 1000);
610 		break;
611 	case 1: /* DCHUBP1 */
612 		REG_UPDATE(DOMAIN2_PG_CONFIG,
613 				DOMAIN2_POWER_GATE, power_gate);
614 
615 		REG_WAIT(DOMAIN2_PG_STATUS,
616 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
617 				1, 1000);
618 		break;
619 	case 2: /* DCHUBP2 */
620 		REG_UPDATE(DOMAIN4_PG_CONFIG,
621 				DOMAIN4_POWER_GATE, power_gate);
622 
623 		REG_WAIT(DOMAIN4_PG_STATUS,
624 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
625 				1, 1000);
626 		break;
627 	case 3: /* DCHUBP3 */
628 		REG_UPDATE(DOMAIN6_PG_CONFIG,
629 				DOMAIN6_POWER_GATE, power_gate);
630 
631 		REG_WAIT(DOMAIN6_PG_STATUS,
632 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
633 				1, 1000);
634 		break;
635 	default:
636 		BREAK_TO_DEBUGGER();
637 		break;
638 	}
639 }
640 
power_on_plane(struct dce_hwseq * hws,int plane_id)641 static void power_on_plane(
642 	struct dce_hwseq *hws,
643 	int plane_id)
644 {
645 	DC_LOGGER_INIT(hws->ctx->logger);
646 	if (REG(DC_IP_REQUEST_CNTL)) {
647 		REG_SET(DC_IP_REQUEST_CNTL, 0,
648 				IP_REQUEST_EN, 1);
649 		hws->funcs.dpp_pg_control(hws, plane_id, true);
650 		hws->funcs.hubp_pg_control(hws, plane_id, true);
651 		REG_SET(DC_IP_REQUEST_CNTL, 0,
652 				IP_REQUEST_EN, 0);
653 		DC_LOG_DEBUG(
654 				"Un-gated front end for pipe %d\n", plane_id);
655 	}
656 }
657 
undo_DEGVIDCN10_253_wa(struct dc * dc)658 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
659 {
660 	struct dce_hwseq *hws = dc->hwseq;
661 	struct hubp *hubp = dc->res_pool->hubps[0];
662 
663 	if (!hws->wa_state.DEGVIDCN10_253_applied)
664 		return;
665 
666 	hubp->funcs->set_blank(hubp, true);
667 
668 	REG_SET(DC_IP_REQUEST_CNTL, 0,
669 			IP_REQUEST_EN, 1);
670 
671 	hws->funcs.hubp_pg_control(hws, 0, false);
672 	REG_SET(DC_IP_REQUEST_CNTL, 0,
673 			IP_REQUEST_EN, 0);
674 
675 	hws->wa_state.DEGVIDCN10_253_applied = false;
676 }
677 
apply_DEGVIDCN10_253_wa(struct dc * dc)678 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
679 {
680 	struct dce_hwseq *hws = dc->hwseq;
681 	struct hubp *hubp = dc->res_pool->hubps[0];
682 	int i;
683 
684 	if (dc->debug.disable_stutter)
685 		return;
686 
687 	if (!hws->wa.DEGVIDCN10_253)
688 		return;
689 
690 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
691 		if (!dc->res_pool->hubps[i]->power_gated)
692 			return;
693 	}
694 
695 	/* all pipe power gated, apply work around to enable stutter. */
696 
697 	REG_SET(DC_IP_REQUEST_CNTL, 0,
698 			IP_REQUEST_EN, 1);
699 
700 	hws->funcs.hubp_pg_control(hws, 0, true);
701 	REG_SET(DC_IP_REQUEST_CNTL, 0,
702 			IP_REQUEST_EN, 0);
703 
704 	hubp->funcs->set_hubp_blank_en(hubp, false);
705 	hws->wa_state.DEGVIDCN10_253_applied = true;
706 }
707 
dcn10_bios_golden_init(struct dc * dc)708 void dcn10_bios_golden_init(struct dc *dc)
709 {
710 	struct dce_hwseq *hws = dc->hwseq;
711 	struct dc_bios *bp = dc->ctx->dc_bios;
712 	int i;
713 	bool allow_self_fresh_force_enable = true;
714 
715 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
716 		return;
717 
718 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
719 		allow_self_fresh_force_enable =
720 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
721 
722 
723 	/* WA for making DF sleep when idle after resume from S0i3.
724 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
725 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
726 	 * before calling command table and it changed to 1 after,
727 	 * it should be set back to 0.
728 	 */
729 
730 	/* initialize dcn global */
731 	bp->funcs->enable_disp_power_gating(bp,
732 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
733 
734 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
735 		/* initialize dcn per pipe */
736 		bp->funcs->enable_disp_power_gating(bp,
737 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
738 	}
739 
740 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
741 		if (allow_self_fresh_force_enable == false &&
742 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
743 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
744 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
745 
746 }
747 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)748 static void false_optc_underflow_wa(
749 		struct dc *dc,
750 		const struct dc_stream_state *stream,
751 		struct timing_generator *tg)
752 {
753 	int i;
754 	bool underflow;
755 
756 	if (!dc->hwseq->wa.false_optc_underflow)
757 		return;
758 
759 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
760 
761 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
762 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
763 
764 		if (old_pipe_ctx->stream != stream)
765 			continue;
766 
767 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
768 	}
769 
770 	if (tg->funcs->set_blank_data_double_buffer)
771 		tg->funcs->set_blank_data_double_buffer(tg, true);
772 
773 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
774 		tg->funcs->clear_optc_underflow(tg);
775 }
776 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)777 enum dc_status dcn10_enable_stream_timing(
778 		struct pipe_ctx *pipe_ctx,
779 		struct dc_state *context,
780 		struct dc *dc)
781 {
782 	struct dc_stream_state *stream = pipe_ctx->stream;
783 	enum dc_color_space color_space;
784 	struct tg_color black_color = {0};
785 
786 	/* by upper caller loop, pipe0 is parent pipe and be called first.
787 	 * back end is set up by for pipe0. Other children pipe share back end
788 	 * with pipe 0. No program is needed.
789 	 */
790 	if (pipe_ctx->top_pipe != NULL)
791 		return DC_OK;
792 
793 	/* TODO check if timing_changed, disable stream if timing changed */
794 
795 	/* HW program guide assume display already disable
796 	 * by unplug sequence. OTG assume stop.
797 	 */
798 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
799 
800 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
801 			pipe_ctx->clock_source,
802 			&pipe_ctx->stream_res.pix_clk_params,
803 			&pipe_ctx->pll_settings)) {
804 		BREAK_TO_DEBUGGER();
805 		return DC_ERROR_UNEXPECTED;
806 	}
807 
808 	pipe_ctx->stream_res.tg->funcs->program_timing(
809 			pipe_ctx->stream_res.tg,
810 			&stream->timing,
811 			pipe_ctx->pipe_dlg_param.vready_offset,
812 			pipe_ctx->pipe_dlg_param.vstartup_start,
813 			pipe_ctx->pipe_dlg_param.vupdate_offset,
814 			pipe_ctx->pipe_dlg_param.vupdate_width,
815 			pipe_ctx->stream->signal,
816 			true);
817 
818 #if 0 /* move to after enable_crtc */
819 	/* TODO: OPP FMT, ABM. etc. should be done here. */
820 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
821 
822 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
823 
824 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
825 				pipe_ctx->stream_res.opp,
826 				&stream->bit_depth_params,
827 				&stream->clamping);
828 #endif
829 	/* program otg blank color */
830 	color_space = stream->output_color_space;
831 	color_space_to_black_color(dc, color_space, &black_color);
832 
833 	/*
834 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
835 	 * alternate between Cb and Cr, so both channels need the pixel
836 	 * value for Y
837 	 */
838 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
839 		black_color.color_r_cr = black_color.color_g_y;
840 
841 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
842 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
843 				pipe_ctx->stream_res.tg,
844 				&black_color);
845 
846 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
847 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
848 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
849 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
850 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
851 	}
852 
853 	/* VTG is  within DCHUB command block. DCFCLK is always on */
854 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
855 		BREAK_TO_DEBUGGER();
856 		return DC_ERROR_UNEXPECTED;
857 	}
858 
859 	/* TODO program crtc source select for non-virtual signal*/
860 	/* TODO program FMT */
861 	/* TODO setup link_enc */
862 	/* TODO set stream attributes */
863 	/* TODO program audio */
864 	/* TODO enable stream if timing changed */
865 	/* TODO unblank stream if DP */
866 
867 	return DC_OK;
868 }
869 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)870 static void dcn10_reset_back_end_for_pipe(
871 		struct dc *dc,
872 		struct pipe_ctx *pipe_ctx,
873 		struct dc_state *context)
874 {
875 	int i;
876 	struct dc_link *link;
877 	DC_LOGGER_INIT(dc->ctx->logger);
878 	if (pipe_ctx->stream_res.stream_enc == NULL) {
879 		pipe_ctx->stream = NULL;
880 		return;
881 	}
882 
883 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
884 		link = pipe_ctx->stream->link;
885 		/* DPMS may already disable or */
886 		/* dpms_off status is incorrect due to fastboot
887 		 * feature. When system resume from S4 with second
888 		 * screen only, the dpms_off would be true but
889 		 * VBIOS lit up eDP, so check link status too.
890 		 */
891 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
892 			core_link_disable_stream(pipe_ctx);
893 		else if (pipe_ctx->stream_res.audio)
894 			dc->hwss.disable_audio_stream(pipe_ctx);
895 
896 		if (pipe_ctx->stream_res.audio) {
897 			/*disable az_endpoint*/
898 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
899 
900 			/*free audio*/
901 			if (dc->caps.dynamic_audio == true) {
902 				/*we have to dynamic arbitrate the audio endpoints*/
903 				/*we free the resource, need reset is_audio_acquired*/
904 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
905 						pipe_ctx->stream_res.audio, false);
906 				pipe_ctx->stream_res.audio = NULL;
907 			}
908 		}
909 	}
910 
911 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
912 	 * back end share by all pipes and will be disable only when disable
913 	 * parent pipe.
914 	 */
915 	if (pipe_ctx->top_pipe == NULL) {
916 
917 		if (pipe_ctx->stream_res.abm)
918 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
919 
920 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
921 
922 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
923 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
924 			pipe_ctx->stream_res.tg->funcs->set_drr(
925 					pipe_ctx->stream_res.tg, NULL);
926 	}
927 
928 	for (i = 0; i < dc->res_pool->pipe_count; i++)
929 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
930 			break;
931 
932 	if (i == dc->res_pool->pipe_count)
933 		return;
934 
935 	pipe_ctx->stream = NULL;
936 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
937 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
938 }
939 
dcn10_hw_wa_force_recovery(struct dc * dc)940 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
941 {
942 	struct hubp *hubp ;
943 	unsigned int i;
944 	bool need_recover = true;
945 
946 	if (!dc->debug.recovery_enabled)
947 		return false;
948 
949 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
950 		struct pipe_ctx *pipe_ctx =
951 			&dc->current_state->res_ctx.pipe_ctx[i];
952 		if (pipe_ctx != NULL) {
953 			hubp = pipe_ctx->plane_res.hubp;
954 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
955 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
956 					/* one pipe underflow, we will reset all the pipes*/
957 					need_recover = true;
958 				}
959 			}
960 		}
961 	}
962 	if (!need_recover)
963 		return false;
964 	/*
965 	DCHUBP_CNTL:HUBP_BLANK_EN=1
966 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
967 	DCHUBP_CNTL:HUBP_DISABLE=1
968 	DCHUBP_CNTL:HUBP_DISABLE=0
969 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
970 	DCSURF_PRIMARY_SURFACE_ADDRESS
971 	DCHUBP_CNTL:HUBP_BLANK_EN=0
972 	*/
973 
974 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
975 		struct pipe_ctx *pipe_ctx =
976 			&dc->current_state->res_ctx.pipe_ctx[i];
977 		if (pipe_ctx != NULL) {
978 			hubp = pipe_ctx->plane_res.hubp;
979 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
980 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
981 				hubp->funcs->set_hubp_blank_en(hubp, true);
982 		}
983 	}
984 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
985 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
986 
987 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
988 		struct pipe_ctx *pipe_ctx =
989 			&dc->current_state->res_ctx.pipe_ctx[i];
990 		if (pipe_ctx != NULL) {
991 			hubp = pipe_ctx->plane_res.hubp;
992 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
993 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
994 				hubp->funcs->hubp_disable_control(hubp, true);
995 		}
996 	}
997 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
998 		struct pipe_ctx *pipe_ctx =
999 			&dc->current_state->res_ctx.pipe_ctx[i];
1000 		if (pipe_ctx != NULL) {
1001 			hubp = pipe_ctx->plane_res.hubp;
1002 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1003 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1004 				hubp->funcs->hubp_disable_control(hubp, true);
1005 		}
1006 	}
1007 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1008 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1009 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1010 		struct pipe_ctx *pipe_ctx =
1011 			&dc->current_state->res_ctx.pipe_ctx[i];
1012 		if (pipe_ctx != NULL) {
1013 			hubp = pipe_ctx->plane_res.hubp;
1014 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1015 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1016 				hubp->funcs->set_hubp_blank_en(hubp, true);
1017 		}
1018 	}
1019 	return true;
1020 
1021 }
1022 
1023 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1024 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1025 {
1026 	static bool should_log_hw_state; /* prevent hw state log by default */
1027 
1028 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1029 		if (should_log_hw_state) {
1030 			dcn10_log_hw_state(dc, NULL);
1031 		}
1032 		BREAK_TO_DEBUGGER();
1033 		if (dcn10_hw_wa_force_recovery(dc)) {
1034 		/*check again*/
1035 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1036 				BREAK_TO_DEBUGGER();
1037 		}
1038 	}
1039 }
1040 
1041 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1042 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1043 {
1044 	struct dce_hwseq *hws = dc->hwseq;
1045 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1046 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1047 	struct mpc *mpc = dc->res_pool->mpc;
1048 	struct mpc_tree *mpc_tree_params;
1049 	struct mpcc *mpcc_to_remove = NULL;
1050 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1051 
1052 	mpc_tree_params = &(opp->mpc_tree_params);
1053 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1054 
1055 	/*Already reset*/
1056 	if (mpcc_to_remove == NULL)
1057 		return;
1058 
1059 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1060 	if (opp != NULL)
1061 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1062 
1063 	dc->optimized_required = true;
1064 
1065 	if (hubp->funcs->hubp_disconnect)
1066 		hubp->funcs->hubp_disconnect(hubp);
1067 
1068 	if (dc->debug.sanity_checks)
1069 		hws->funcs.verify_allow_pstate_change_high(dc);
1070 }
1071 
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1072 void dcn10_plane_atomic_power_down(struct dc *dc,
1073 		struct dpp *dpp,
1074 		struct hubp *hubp)
1075 {
1076 	struct dce_hwseq *hws = dc->hwseq;
1077 	DC_LOGGER_INIT(dc->ctx->logger);
1078 
1079 	if (REG(DC_IP_REQUEST_CNTL)) {
1080 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1081 				IP_REQUEST_EN, 1);
1082 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1083 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1084 		dpp->funcs->dpp_reset(dpp);
1085 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1086 				IP_REQUEST_EN, 0);
1087 		DC_LOG_DEBUG(
1088 				"Power gated front end %d\n", hubp->inst);
1089 	}
1090 }
1091 
1092 /* disable HW used by plane.
1093  * note:  cannot disable until disconnect is complete
1094  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1095 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1096 {
1097 	struct dce_hwseq *hws = dc->hwseq;
1098 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1099 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1100 	int opp_id = hubp->opp_id;
1101 
1102 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1103 
1104 	hubp->funcs->hubp_clk_cntl(hubp, false);
1105 
1106 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1107 
1108 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1109 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1110 				pipe_ctx->stream_res.opp,
1111 				false);
1112 
1113 	hubp->power_gated = true;
1114 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1115 
1116 	hws->funcs.plane_atomic_power_down(dc,
1117 			pipe_ctx->plane_res.dpp,
1118 			pipe_ctx->plane_res.hubp);
1119 
1120 	pipe_ctx->stream = NULL;
1121 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1122 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1123 	pipe_ctx->top_pipe = NULL;
1124 	pipe_ctx->bottom_pipe = NULL;
1125 	pipe_ctx->plane_state = NULL;
1126 }
1127 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1128 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1129 {
1130 	struct dce_hwseq *hws = dc->hwseq;
1131 	DC_LOGGER_INIT(dc->ctx->logger);
1132 
1133 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1134 		return;
1135 
1136 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1137 
1138 	apply_DEGVIDCN10_253_wa(dc);
1139 
1140 	DC_LOG_DC("Power down front end %d\n",
1141 					pipe_ctx->pipe_idx);
1142 }
1143 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1144 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1145 {
1146 	int i;
1147 	struct dce_hwseq *hws = dc->hwseq;
1148 	bool can_apply_seamless_boot = false;
1149 
1150 	for (i = 0; i < context->stream_count; i++) {
1151 		if (context->streams[i]->apply_seamless_boot_optimization) {
1152 			can_apply_seamless_boot = true;
1153 			break;
1154 		}
1155 	}
1156 
1157 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1158 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1159 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1160 
1161 		/* There is assumption that pipe_ctx is not mapping irregularly
1162 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1163 		 * we will use the pipe, so don't disable
1164 		 */
1165 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1166 			continue;
1167 
1168 		/* Blank controller using driver code instead of
1169 		 * command table.
1170 		 */
1171 		if (tg->funcs->is_tg_enabled(tg)) {
1172 			if (hws->funcs.init_blank != NULL) {
1173 				hws->funcs.init_blank(dc, tg);
1174 				tg->funcs->lock(tg);
1175 			} else {
1176 				tg->funcs->lock(tg);
1177 				tg->funcs->set_blank(tg, true);
1178 				hwss_wait_for_blank_complete(tg);
1179 			}
1180 		}
1181 	}
1182 
1183 	/* num_opp will be equal to number of mpcc */
1184 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1185 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1186 
1187 		/* Cannot reset the MPC mux if seamless boot */
1188 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1189 			continue;
1190 
1191 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1192 				dc->res_pool->mpc, i);
1193 	}
1194 
1195 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1196 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1197 		struct hubp *hubp = dc->res_pool->hubps[i];
1198 		struct dpp *dpp = dc->res_pool->dpps[i];
1199 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1200 
1201 		/* There is assumption that pipe_ctx is not mapping irregularly
1202 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1203 		 * we will use the pipe, so don't disable
1204 		 */
1205 		if (can_apply_seamless_boot &&
1206 			pipe_ctx->stream != NULL &&
1207 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1208 				pipe_ctx->stream_res.tg)) {
1209 			// Enable double buffering for OTG_BLANK no matter if
1210 			// seamless boot is enabled or not to suppress global sync
1211 			// signals when OTG blanked. This is to prevent pipe from
1212 			// requesting data while in PSR.
1213 			tg->funcs->tg_init(tg);
1214 			continue;
1215 		}
1216 
1217 		/* Disable on the current state so the new one isn't cleared. */
1218 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1219 
1220 		dpp->funcs->dpp_reset(dpp);
1221 
1222 		pipe_ctx->stream_res.tg = tg;
1223 		pipe_ctx->pipe_idx = i;
1224 
1225 		pipe_ctx->plane_res.hubp = hubp;
1226 		pipe_ctx->plane_res.dpp = dpp;
1227 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1228 		hubp->mpcc_id = dpp->inst;
1229 		hubp->opp_id = OPP_ID_INVALID;
1230 		hubp->power_gated = false;
1231 
1232 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1233 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1234 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1235 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1236 
1237 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1238 
1239 		if (tg->funcs->is_tg_enabled(tg))
1240 			tg->funcs->unlock(tg);
1241 
1242 		dc->hwss.disable_plane(dc, pipe_ctx);
1243 
1244 		pipe_ctx->stream_res.tg = NULL;
1245 		pipe_ctx->plane_res.hubp = NULL;
1246 
1247 		tg->funcs->tg_init(tg);
1248 	}
1249 }
1250 
dcn10_init_hw(struct dc * dc)1251 void dcn10_init_hw(struct dc *dc)
1252 {
1253 	int i, j;
1254 	struct abm *abm = dc->res_pool->abm;
1255 	struct dmcu *dmcu = dc->res_pool->dmcu;
1256 	struct dce_hwseq *hws = dc->hwseq;
1257 	struct dc_bios *dcb = dc->ctx->dc_bios;
1258 	struct resource_pool *res_pool = dc->res_pool;
1259 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1260 	bool   is_optimized_init_done = false;
1261 
1262 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1263 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1264 
1265 	// Initialize the dccg
1266 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1267 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1268 
1269 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1270 
1271 		REG_WRITE(REFCLK_CNTL, 0);
1272 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1273 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1274 
1275 		if (!dc->debug.disable_clock_gate) {
1276 			/* enable all DCN clock gating */
1277 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1278 
1279 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1280 
1281 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1282 		}
1283 
1284 		//Enable ability to power gate / don't force power on permanently
1285 		if (hws->funcs.enable_power_gating_plane)
1286 			hws->funcs.enable_power_gating_plane(hws, true);
1287 
1288 		return;
1289 	}
1290 
1291 	if (!dcb->funcs->is_accelerated_mode(dcb))
1292 		hws->funcs.disable_vga(dc->hwseq);
1293 
1294 	hws->funcs.bios_golden_init(dc);
1295 
1296 	if (dc->ctx->dc_bios->fw_info_valid) {
1297 		res_pool->ref_clocks.xtalin_clock_inKhz =
1298 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1299 
1300 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1301 			if (res_pool->dccg && res_pool->hubbub) {
1302 
1303 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1304 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1305 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1306 
1307 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1308 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1309 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1310 			} else {
1311 				// Not all ASICs have DCCG sw component
1312 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1313 						res_pool->ref_clocks.xtalin_clock_inKhz;
1314 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1315 						res_pool->ref_clocks.xtalin_clock_inKhz;
1316 			}
1317 		}
1318 	} else
1319 		ASSERT_CRITICAL(false);
1320 
1321 	for (i = 0; i < dc->link_count; i++) {
1322 		/* Power up AND update implementation according to the
1323 		 * required signal (which may be different from the
1324 		 * default signal on connector).
1325 		 */
1326 		struct dc_link *link = dc->links[i];
1327 
1328 		if (!is_optimized_init_done)
1329 			link->link_enc->funcs->hw_init(link->link_enc);
1330 
1331 		/* Check for enabled DIG to identify enabled display */
1332 		if (link->link_enc->funcs->is_dig_enabled &&
1333 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1334 			link->link_status.link_active = true;
1335 	}
1336 
1337 	/* Power gate DSCs */
1338 	if (!is_optimized_init_done) {
1339 		for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1340 			if (hws->funcs.dsc_pg_control != NULL)
1341 				hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1342 	}
1343 
1344 	/* we want to turn off all dp displays before doing detection */
1345 	if (dc->config.power_down_display_on_boot) {
1346 		uint8_t dpcd_power_state = '\0';
1347 		enum dc_status status = DC_ERROR_UNEXPECTED;
1348 
1349 		for (i = 0; i < dc->link_count; i++) {
1350 			if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
1351 				continue;
1352 
1353 			/*
1354 			 * If any of the displays are lit up turn them off.
1355 			 * The reason is that some MST hubs cannot be turned off
1356 			 * completely until we tell them to do so.
1357 			 * If not turned off, then displays connected to MST hub
1358 			 * won't light up.
1359 			 */
1360 			status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
1361 							&dpcd_power_state, sizeof(dpcd_power_state));
1362 			if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
1363 				/* blank dp stream before power off receiver*/
1364 				if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
1365 					unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
1366 
1367 					for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1368 						if (fe == dc->res_pool->stream_enc[j]->id) {
1369 							dc->res_pool->stream_enc[j]->funcs->dp_blank(
1370 										dc->res_pool->stream_enc[j]);
1371 							break;
1372 						}
1373 					}
1374 				}
1375 				dp_receiver_power_ctrl(dc->links[i], false);
1376 			}
1377 		}
1378 	}
1379 
1380 	/* If taking control over from VBIOS, we may want to optimize our first
1381 	 * mode set, so we need to skip powering down pipes until we know which
1382 	 * pipes we want to use.
1383 	 * Otherwise, if taking control is not possible, we need to power
1384 	 * everything down.
1385 	 */
1386 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1387 		if (!is_optimized_init_done) {
1388 			hws->funcs.init_pipes(dc, dc->current_state);
1389 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1390 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1391 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1392 		}
1393 	}
1394 
1395 	if (!is_optimized_init_done) {
1396 
1397 		for (i = 0; i < res_pool->audio_count; i++) {
1398 			struct audio *audio = res_pool->audios[i];
1399 
1400 			audio->funcs->hw_init(audio);
1401 		}
1402 
1403 		for (i = 0; i < dc->link_count; i++) {
1404 			struct dc_link *link = dc->links[i];
1405 
1406 			if (link->panel_cntl)
1407 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1408 		}
1409 
1410 		if (abm != NULL)
1411 			abm->funcs->abm_init(abm, backlight);
1412 
1413 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1414 			dmcu->funcs->dmcu_init(dmcu);
1415 	}
1416 
1417 	if (abm != NULL && dmcu != NULL)
1418 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1419 
1420 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1421 	if (!is_optimized_init_done)
1422 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1423 
1424 	if (!dc->debug.disable_clock_gate) {
1425 		/* enable all DCN clock gating */
1426 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1427 
1428 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1429 
1430 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1431 	}
1432 	if (hws->funcs.enable_power_gating_plane)
1433 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1434 
1435 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1436 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1437 
1438 #ifdef CONFIG_DRM_AMD_DC_DCN3_0
1439 	if (dc->clk_mgr->funcs->set_hard_max_memclk)
1440 		dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
1441 #endif
1442 
1443 }
1444 
1445 /* In headless boot cases, DIG may be turned
1446  * on which causes HW/SW discrepancies.
1447  * To avoid this, power down hardware on boot
1448  * if DIG is turned on and seamless boot not enabled
1449  */
dcn10_power_down_on_boot(struct dc * dc)1450 void dcn10_power_down_on_boot(struct dc *dc)
1451 {
1452 	int i = 0;
1453 	struct dc_link *edp_link;
1454 
1455 	if (!dc->config.power_down_display_on_boot)
1456 		return;
1457 
1458 	edp_link = get_edp_link(dc);
1459 	if (edp_link &&
1460 			edp_link->link_enc->funcs->is_dig_enabled &&
1461 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1462 			dc->hwseq->funcs.edp_backlight_control &&
1463 			dc->hwss.power_down &&
1464 			dc->hwss.edp_power_control) {
1465 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1466 		dc->hwss.power_down(dc);
1467 		dc->hwss.edp_power_control(edp_link, false);
1468 	} else {
1469 		for (i = 0; i < dc->link_count; i++) {
1470 			struct dc_link *link = dc->links[i];
1471 
1472 			if (link->link_enc->funcs->is_dig_enabled &&
1473 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1474 					dc->hwss.power_down) {
1475 				dc->hwss.power_down(dc);
1476 				break;
1477 			}
1478 
1479 		}
1480 	}
1481 
1482 	/*
1483 	 * Call update_clocks with empty context
1484 	 * to send DISPLAY_OFF
1485 	 * Otherwise DISPLAY_OFF may not be asserted
1486 	 */
1487 	if (dc->clk_mgr->funcs->set_low_power_state)
1488 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1489 }
1490 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1491 void dcn10_reset_hw_ctx_wrap(
1492 		struct dc *dc,
1493 		struct dc_state *context)
1494 {
1495 	int i;
1496 	struct dce_hwseq *hws = dc->hwseq;
1497 
1498 	/* Reset Back End*/
1499 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1500 		struct pipe_ctx *pipe_ctx_old =
1501 			&dc->current_state->res_ctx.pipe_ctx[i];
1502 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1503 
1504 		if (!pipe_ctx_old->stream)
1505 			continue;
1506 
1507 		if (pipe_ctx_old->top_pipe)
1508 			continue;
1509 
1510 		if (!pipe_ctx->stream ||
1511 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1512 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1513 
1514 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1515 			if (hws->funcs.enable_stream_gating)
1516 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1517 			if (old_clk)
1518 				old_clk->funcs->cs_power_down(old_clk);
1519 		}
1520 	}
1521 }
1522 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1523 static bool patch_address_for_sbs_tb_stereo(
1524 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1525 {
1526 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1527 	bool sec_split = pipe_ctx->top_pipe &&
1528 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1529 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1530 		(pipe_ctx->stream->timing.timing_3d_format ==
1531 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1532 		 pipe_ctx->stream->timing.timing_3d_format ==
1533 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1534 		*addr = plane_state->address.grph_stereo.left_addr;
1535 		plane_state->address.grph_stereo.left_addr =
1536 		plane_state->address.grph_stereo.right_addr;
1537 		return true;
1538 	} else {
1539 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1540 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1541 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1542 			plane_state->address.grph_stereo.right_addr =
1543 			plane_state->address.grph_stereo.left_addr;
1544 		}
1545 	}
1546 	return false;
1547 }
1548 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1549 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1550 {
1551 	bool addr_patched = false;
1552 	PHYSICAL_ADDRESS_LOC addr;
1553 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1554 
1555 	if (plane_state == NULL)
1556 		return;
1557 
1558 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1559 
1560 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1561 			pipe_ctx->plane_res.hubp,
1562 			&plane_state->address,
1563 			plane_state->flip_immediate);
1564 
1565 	plane_state->status.requested_address = plane_state->address;
1566 
1567 	if (plane_state->flip_immediate)
1568 		plane_state->status.current_address = plane_state->address;
1569 
1570 	if (addr_patched)
1571 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1572 }
1573 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1574 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1575 			const struct dc_plane_state *plane_state)
1576 {
1577 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1578 	const struct dc_transfer_func *tf = NULL;
1579 	bool result = true;
1580 
1581 	if (dpp_base == NULL)
1582 		return false;
1583 
1584 	if (plane_state->in_transfer_func)
1585 		tf = plane_state->in_transfer_func;
1586 
1587 	if (plane_state->gamma_correction &&
1588 		!dpp_base->ctx->dc->debug.always_use_regamma
1589 		&& !plane_state->gamma_correction->is_identity
1590 			&& dce_use_lut(plane_state->format))
1591 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1592 
1593 	if (tf == NULL)
1594 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1595 	else if (tf->type == TF_TYPE_PREDEFINED) {
1596 		switch (tf->tf) {
1597 		case TRANSFER_FUNCTION_SRGB:
1598 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1599 			break;
1600 		case TRANSFER_FUNCTION_BT709:
1601 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1602 			break;
1603 		case TRANSFER_FUNCTION_LINEAR:
1604 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1605 			break;
1606 		case TRANSFER_FUNCTION_PQ:
1607 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1608 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1609 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1610 			result = true;
1611 			break;
1612 		default:
1613 			result = false;
1614 			break;
1615 		}
1616 	} else if (tf->type == TF_TYPE_BYPASS) {
1617 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1618 	} else {
1619 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1620 					&dpp_base->degamma_params);
1621 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1622 				&dpp_base->degamma_params);
1623 		result = true;
1624 	}
1625 
1626 	return result;
1627 }
1628 
1629 #define MAX_NUM_HW_POINTS 0x200
1630 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1631 static void log_tf(struct dc_context *ctx,
1632 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1633 {
1634 	// DC_LOG_GAMMA is default logging of all hw points
1635 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1636 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1637 	int i = 0;
1638 
1639 	DC_LOGGER_INIT(ctx->logger);
1640 	DC_LOG_GAMMA("Gamma Correction TF");
1641 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1642 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1643 
1644 	for (i = 0; i < hw_points_num; i++) {
1645 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1646 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1647 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1648 	}
1649 
1650 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1651 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1652 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1653 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1654 	}
1655 }
1656 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1657 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1658 				const struct dc_stream_state *stream)
1659 {
1660 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1661 
1662 	if (dpp == NULL)
1663 		return false;
1664 
1665 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1666 
1667 	if (stream->out_transfer_func &&
1668 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1669 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1670 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1671 
1672 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1673 	 * update.
1674 	 */
1675 	else if (cm_helper_translate_curve_to_hw_format(
1676 			stream->out_transfer_func,
1677 			&dpp->regamma_params, false)) {
1678 		dpp->funcs->dpp_program_regamma_pwl(
1679 				dpp,
1680 				&dpp->regamma_params, OPP_REGAMMA_USER);
1681 	} else
1682 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1683 
1684 	if (stream != NULL && stream->ctx != NULL &&
1685 			stream->out_transfer_func != NULL) {
1686 		log_tf(stream->ctx,
1687 				stream->out_transfer_func,
1688 				dpp->regamma_params.hw_points_num);
1689 	}
1690 
1691 	return true;
1692 }
1693 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1694 void dcn10_pipe_control_lock(
1695 	struct dc *dc,
1696 	struct pipe_ctx *pipe,
1697 	bool lock)
1698 {
1699 	struct dce_hwseq *hws = dc->hwseq;
1700 
1701 	/* use TG master update lock to lock everything on the TG
1702 	 * therefore only top pipe need to lock
1703 	 */
1704 	if (!pipe || pipe->top_pipe)
1705 		return;
1706 
1707 	if (dc->debug.sanity_checks)
1708 		hws->funcs.verify_allow_pstate_change_high(dc);
1709 
1710 	if (lock)
1711 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1712 	else
1713 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1714 
1715 	if (dc->debug.sanity_checks)
1716 		hws->funcs.verify_allow_pstate_change_high(dc);
1717 }
1718 
1719 /**
1720  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1721  *
1722  * Software keepout workaround to prevent cursor update locking from stalling
1723  * out cursor updates indefinitely or from old values from being retained in
1724  * the case where the viewport changes in the same frame as the cursor.
1725  *
1726  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1727  * too close to VUPDATE, then stall out until VUPDATE finishes.
1728  *
1729  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1730  *       to avoid the need for this workaround.
1731  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1732 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1733 {
1734 	struct dc_stream_state *stream = pipe_ctx->stream;
1735 	struct crtc_position position;
1736 	uint32_t vupdate_start, vupdate_end;
1737 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1738 	unsigned int us_per_line, us_vupdate;
1739 
1740 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1741 		return;
1742 
1743 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1744 		return;
1745 
1746 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1747 				       &vupdate_end);
1748 
1749 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1750 	vpos = position.vertical_count;
1751 
1752 	/* Avoid wraparound calculation issues */
1753 	vupdate_start += stream->timing.v_total;
1754 	vupdate_end += stream->timing.v_total;
1755 	vpos += stream->timing.v_total;
1756 
1757 	if (vpos <= vupdate_start) {
1758 		/* VPOS is in VACTIVE or back porch. */
1759 		lines_to_vupdate = vupdate_start - vpos;
1760 	} else if (vpos > vupdate_end) {
1761 		/* VPOS is in the front porch. */
1762 		return;
1763 	} else {
1764 		/* VPOS is in VUPDATE. */
1765 		lines_to_vupdate = 0;
1766 	}
1767 
1768 	/* Calculate time until VUPDATE in microseconds. */
1769 	us_per_line =
1770 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1771 	us_to_vupdate = lines_to_vupdate * us_per_line;
1772 
1773 	/* 70 us is a conservative estimate of cursor update time*/
1774 	if (us_to_vupdate > 70)
1775 		return;
1776 
1777 	/* Stall out until the cursor update completes. */
1778 	if (vupdate_end < vupdate_start)
1779 		vupdate_end += stream->timing.v_total;
1780 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1781 	udelay(us_to_vupdate + us_vupdate);
1782 }
1783 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1784 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1785 {
1786 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1787 	if (!pipe || pipe->top_pipe)
1788 		return;
1789 
1790 	/* Prevent cursor lock from stalling out cursor updates. */
1791 	if (lock)
1792 		delay_cursor_until_vupdate(dc, pipe);
1793 
1794 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1795 		union dmub_hw_lock_flags hw_locks = { 0 };
1796 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1797 
1798 		hw_locks.bits.lock_cursor = 1;
1799 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1800 
1801 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1802 					lock,
1803 					&hw_locks,
1804 					&inst_flags);
1805 	} else
1806 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1807 				pipe->stream_res.opp->inst, lock);
1808 }
1809 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1810 static bool wait_for_reset_trigger_to_occur(
1811 	struct dc_context *dc_ctx,
1812 	struct timing_generator *tg)
1813 {
1814 	bool rc = false;
1815 
1816 	/* To avoid endless loop we wait at most
1817 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1818 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1819 	int i;
1820 
1821 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1822 
1823 		if (!tg->funcs->is_counter_moving(tg)) {
1824 			DC_ERROR("TG counter is not moving!\n");
1825 			break;
1826 		}
1827 
1828 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1829 			rc = true;
1830 			/* usually occurs at i=1 */
1831 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1832 					i);
1833 			break;
1834 		}
1835 
1836 		/* Wait for one frame. */
1837 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1838 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1839 	}
1840 
1841 	if (false == rc)
1842 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1843 
1844 	return rc;
1845 }
1846 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])1847 void dcn10_enable_timing_synchronization(
1848 	struct dc *dc,
1849 	int group_index,
1850 	int group_size,
1851 	struct pipe_ctx *grouped_pipes[])
1852 {
1853 	struct dc_context *dc_ctx = dc->ctx;
1854 	int i;
1855 
1856 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
1857 
1858 	for (i = 1; i < group_size; i++)
1859 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1860 				grouped_pipes[i]->stream_res.tg,
1861 				grouped_pipes[0]->stream_res.tg->inst);
1862 
1863 	DC_SYNC_INFO("Waiting for trigger\n");
1864 
1865 	/* Need to get only check 1 pipe for having reset as all the others are
1866 	 * synchronized. Look at last pipe programmed to reset.
1867 	 */
1868 
1869 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1870 	for (i = 1; i < group_size; i++)
1871 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1872 				grouped_pipes[i]->stream_res.tg);
1873 
1874 	DC_SYNC_INFO("Sync complete\n");
1875 }
1876 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])1877 void dcn10_enable_per_frame_crtc_position_reset(
1878 	struct dc *dc,
1879 	int group_size,
1880 	struct pipe_ctx *grouped_pipes[])
1881 {
1882 	struct dc_context *dc_ctx = dc->ctx;
1883 	int i;
1884 
1885 	DC_SYNC_INFO("Setting up\n");
1886 	for (i = 0; i < group_size; i++)
1887 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1888 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1889 					grouped_pipes[i]->stream_res.tg,
1890 					0,
1891 					&grouped_pipes[i]->stream->triggered_crtc_reset);
1892 
1893 	DC_SYNC_INFO("Waiting for trigger\n");
1894 
1895 	for (i = 0; i < group_size; i++)
1896 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1897 
1898 	DC_SYNC_INFO("Multi-display sync is complete\n");
1899 }
1900 
1901 /*static void print_rq_dlg_ttu(
1902 		struct dc *dc,
1903 		struct pipe_ctx *pipe_ctx)
1904 {
1905 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1906 			"\n============== DML TTU Output parameters [%d] ==============\n"
1907 			"qos_level_low_wm: %d, \n"
1908 			"qos_level_high_wm: %d, \n"
1909 			"min_ttu_vblank: %d, \n"
1910 			"qos_level_flip: %d, \n"
1911 			"refcyc_per_req_delivery_l: %d, \n"
1912 			"qos_level_fixed_l: %d, \n"
1913 			"qos_ramp_disable_l: %d, \n"
1914 			"refcyc_per_req_delivery_pre_l: %d, \n"
1915 			"refcyc_per_req_delivery_c: %d, \n"
1916 			"qos_level_fixed_c: %d, \n"
1917 			"qos_ramp_disable_c: %d, \n"
1918 			"refcyc_per_req_delivery_pre_c: %d\n"
1919 			"=============================================================\n",
1920 			pipe_ctx->pipe_idx,
1921 			pipe_ctx->ttu_regs.qos_level_low_wm,
1922 			pipe_ctx->ttu_regs.qos_level_high_wm,
1923 			pipe_ctx->ttu_regs.min_ttu_vblank,
1924 			pipe_ctx->ttu_regs.qos_level_flip,
1925 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1926 			pipe_ctx->ttu_regs.qos_level_fixed_l,
1927 			pipe_ctx->ttu_regs.qos_ramp_disable_l,
1928 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1929 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1930 			pipe_ctx->ttu_regs.qos_level_fixed_c,
1931 			pipe_ctx->ttu_regs.qos_ramp_disable_c,
1932 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1933 			);
1934 
1935 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1936 			"\n============== DML DLG Output parameters [%d] ==============\n"
1937 			"refcyc_h_blank_end: %d, \n"
1938 			"dlg_vblank_end: %d, \n"
1939 			"min_dst_y_next_start: %d, \n"
1940 			"refcyc_per_htotal: %d, \n"
1941 			"refcyc_x_after_scaler: %d, \n"
1942 			"dst_y_after_scaler: %d, \n"
1943 			"dst_y_prefetch: %d, \n"
1944 			"dst_y_per_vm_vblank: %d, \n"
1945 			"dst_y_per_row_vblank: %d, \n"
1946 			"ref_freq_to_pix_freq: %d, \n"
1947 			"vratio_prefetch: %d, \n"
1948 			"refcyc_per_pte_group_vblank_l: %d, \n"
1949 			"refcyc_per_meta_chunk_vblank_l: %d, \n"
1950 			"dst_y_per_pte_row_nom_l: %d, \n"
1951 			"refcyc_per_pte_group_nom_l: %d, \n",
1952 			pipe_ctx->pipe_idx,
1953 			pipe_ctx->dlg_regs.refcyc_h_blank_end,
1954 			pipe_ctx->dlg_regs.dlg_vblank_end,
1955 			pipe_ctx->dlg_regs.min_dst_y_next_start,
1956 			pipe_ctx->dlg_regs.refcyc_per_htotal,
1957 			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1958 			pipe_ctx->dlg_regs.dst_y_after_scaler,
1959 			pipe_ctx->dlg_regs.dst_y_prefetch,
1960 			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1961 			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1962 			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1963 			pipe_ctx->dlg_regs.vratio_prefetch,
1964 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1965 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1966 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1967 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1968 			);
1969 
1970 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1971 			"\ndst_y_per_meta_row_nom_l: %d, \n"
1972 			"refcyc_per_meta_chunk_nom_l: %d, \n"
1973 			"refcyc_per_line_delivery_pre_l: %d, \n"
1974 			"refcyc_per_line_delivery_l: %d, \n"
1975 			"vratio_prefetch_c: %d, \n"
1976 			"refcyc_per_pte_group_vblank_c: %d, \n"
1977 			"refcyc_per_meta_chunk_vblank_c: %d, \n"
1978 			"dst_y_per_pte_row_nom_c: %d, \n"
1979 			"refcyc_per_pte_group_nom_c: %d, \n"
1980 			"dst_y_per_meta_row_nom_c: %d, \n"
1981 			"refcyc_per_meta_chunk_nom_c: %d, \n"
1982 			"refcyc_per_line_delivery_pre_c: %d, \n"
1983 			"refcyc_per_line_delivery_c: %d \n"
1984 			"========================================================\n",
1985 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1986 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1987 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1988 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1989 			pipe_ctx->dlg_regs.vratio_prefetch_c,
1990 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1991 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1992 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1993 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1994 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1995 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1996 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1997 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1998 			);
1999 
2000 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
2001 			"\n============== DML RQ Output parameters [%d] ==============\n"
2002 			"chunk_size: %d \n"
2003 			"min_chunk_size: %d \n"
2004 			"meta_chunk_size: %d \n"
2005 			"min_meta_chunk_size: %d \n"
2006 			"dpte_group_size: %d \n"
2007 			"mpte_group_size: %d \n"
2008 			"swath_height: %d \n"
2009 			"pte_row_height_linear: %d \n"
2010 			"========================================================\n",
2011 			pipe_ctx->pipe_idx,
2012 			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
2013 			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
2014 			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
2015 			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
2016 			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
2017 			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
2018 			pipe_ctx->rq_regs.rq_regs_l.swath_height,
2019 			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
2020 			);
2021 }
2022 */
2023 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2024 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2025 		struct vm_system_aperture_param *apt,
2026 		struct dce_hwseq *hws)
2027 {
2028 	PHYSICAL_ADDRESS_LOC physical_page_number;
2029 	uint32_t logical_addr_low;
2030 	uint32_t logical_addr_high;
2031 
2032 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2033 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2034 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2035 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2036 
2037 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2038 			LOGICAL_ADDR, &logical_addr_low);
2039 
2040 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2041 			LOGICAL_ADDR, &logical_addr_high);
2042 
2043 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2044 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2045 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2046 }
2047 
2048 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2049 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2050 		struct vm_context0_param *vm0,
2051 		struct dce_hwseq *hws)
2052 {
2053 	PHYSICAL_ADDRESS_LOC fb_base;
2054 	PHYSICAL_ADDRESS_LOC fb_offset;
2055 	uint32_t fb_base_value;
2056 	uint32_t fb_offset_value;
2057 
2058 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2059 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2060 
2061 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2062 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2063 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2064 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2065 
2066 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2067 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2068 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2069 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2070 
2071 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2072 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2073 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2074 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2075 
2076 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2077 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2078 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2079 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2080 
2081 	/*
2082 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2083 	 * Therefore we need to do
2084 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2085 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2086 	 */
2087 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2088 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2089 	vm0->pte_base.quad_part += fb_base.quad_part;
2090 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2091 }
2092 
2093 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2094 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2095 {
2096 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2097 	struct vm_system_aperture_param apt = { {{ 0 } } };
2098 	struct vm_context0_param vm0 = { { { 0 } } };
2099 
2100 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2101 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2102 
2103 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2104 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2105 }
2106 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2107 static void dcn10_enable_plane(
2108 	struct dc *dc,
2109 	struct pipe_ctx *pipe_ctx,
2110 	struct dc_state *context)
2111 {
2112 	struct dce_hwseq *hws = dc->hwseq;
2113 
2114 	if (dc->debug.sanity_checks) {
2115 		hws->funcs.verify_allow_pstate_change_high(dc);
2116 	}
2117 
2118 	undo_DEGVIDCN10_253_wa(dc);
2119 
2120 	power_on_plane(dc->hwseq,
2121 		pipe_ctx->plane_res.hubp->inst);
2122 
2123 	/* enable DCFCLK current DCHUB */
2124 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2125 
2126 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2127 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2128 			pipe_ctx->stream_res.opp,
2129 			true);
2130 
2131 /* TODO: enable/disable in dm as per update type.
2132 	if (plane_state) {
2133 		DC_LOG_DC(dc->ctx->logger,
2134 				"Pipe:%d 0x%x: addr hi:0x%x, "
2135 				"addr low:0x%x, "
2136 				"src: %d, %d, %d,"
2137 				" %d; dst: %d, %d, %d, %d;\n",
2138 				pipe_ctx->pipe_idx,
2139 				plane_state,
2140 				plane_state->address.grph.addr.high_part,
2141 				plane_state->address.grph.addr.low_part,
2142 				plane_state->src_rect.x,
2143 				plane_state->src_rect.y,
2144 				plane_state->src_rect.width,
2145 				plane_state->src_rect.height,
2146 				plane_state->dst_rect.x,
2147 				plane_state->dst_rect.y,
2148 				plane_state->dst_rect.width,
2149 				plane_state->dst_rect.height);
2150 
2151 		DC_LOG_DC(dc->ctx->logger,
2152 				"Pipe %d: width, height, x, y         format:%d\n"
2153 				"viewport:%d, %d, %d, %d\n"
2154 				"recout:  %d, %d, %d, %d\n",
2155 				pipe_ctx->pipe_idx,
2156 				plane_state->format,
2157 				pipe_ctx->plane_res.scl_data.viewport.width,
2158 				pipe_ctx->plane_res.scl_data.viewport.height,
2159 				pipe_ctx->plane_res.scl_data.viewport.x,
2160 				pipe_ctx->plane_res.scl_data.viewport.y,
2161 				pipe_ctx->plane_res.scl_data.recout.width,
2162 				pipe_ctx->plane_res.scl_data.recout.height,
2163 				pipe_ctx->plane_res.scl_data.recout.x,
2164 				pipe_ctx->plane_res.scl_data.recout.y);
2165 		print_rq_dlg_ttu(dc, pipe_ctx);
2166 	}
2167 */
2168 	if (dc->config.gpu_vm_support)
2169 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2170 
2171 	if (dc->debug.sanity_checks) {
2172 		hws->funcs.verify_allow_pstate_change_high(dc);
2173 	}
2174 }
2175 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2176 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2177 {
2178 	int i = 0;
2179 	struct dpp_grph_csc_adjustment adjust;
2180 	memset(&adjust, 0, sizeof(adjust));
2181 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2182 
2183 
2184 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2185 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2186 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2187 			adjust.temperature_matrix[i] =
2188 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2189 	} else if (pipe_ctx->plane_state &&
2190 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2191 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2192 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2193 			adjust.temperature_matrix[i] =
2194 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2195 	}
2196 
2197 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2198 }
2199 
2200 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2201 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2202 {
2203 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2204 		if (pipe_ctx->top_pipe) {
2205 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2206 
2207 			while (top->top_pipe)
2208 				top = top->top_pipe; // Traverse to top pipe_ctx
2209 			if (top->plane_state && top->plane_state->layer_index == 0)
2210 				return true; // Front MPO plane not hidden
2211 		}
2212 	}
2213 	return false;
2214 }
2215 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2216 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2217 {
2218 	// Override rear plane RGB bias to fix MPO brightness
2219 	uint16_t rgb_bias = matrix[3];
2220 
2221 	matrix[3] = 0;
2222 	matrix[7] = 0;
2223 	matrix[11] = 0;
2224 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2225 	matrix[3] = rgb_bias;
2226 	matrix[7] = rgb_bias;
2227 	matrix[11] = rgb_bias;
2228 }
2229 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2230 void dcn10_program_output_csc(struct dc *dc,
2231 		struct pipe_ctx *pipe_ctx,
2232 		enum dc_color_space colorspace,
2233 		uint16_t *matrix,
2234 		int opp_id)
2235 {
2236 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2237 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2238 
2239 			/* MPO is broken with RGB colorspaces when OCSC matrix
2240 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2241 			 * Blending adds offsets from front + rear to rear plane
2242 			 *
2243 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2244 			 * black value pixels add offset instead of rear + front
2245 			 */
2246 
2247 			int16_t rgb_bias = matrix[3];
2248 			// matrix[3/7/11] are all the same offset value
2249 
2250 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2251 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2252 			} else {
2253 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2254 			}
2255 		}
2256 	} else {
2257 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2258 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2259 	}
2260 }
2261 
dcn10_get_surface_visual_confirm_color(const struct pipe_ctx * pipe_ctx,struct tg_color * color)2262 void dcn10_get_surface_visual_confirm_color(
2263 		const struct pipe_ctx *pipe_ctx,
2264 		struct tg_color *color)
2265 {
2266 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2267 
2268 	switch (pipe_ctx->plane_res.scl_data.format) {
2269 	case PIXEL_FORMAT_ARGB8888:
2270 		/* set border color to red */
2271 		color->color_r_cr = color_value;
2272 		break;
2273 
2274 	case PIXEL_FORMAT_ARGB2101010:
2275 		/* set border color to blue */
2276 		color->color_b_cb = color_value;
2277 		break;
2278 	case PIXEL_FORMAT_420BPP8:
2279 		/* set border color to green */
2280 		color->color_g_y = color_value;
2281 		break;
2282 	case PIXEL_FORMAT_420BPP10:
2283 		/* set border color to yellow */
2284 		color->color_g_y = color_value;
2285 		color->color_r_cr = color_value;
2286 		break;
2287 	case PIXEL_FORMAT_FP16:
2288 		/* set border color to white */
2289 		color->color_r_cr = color_value;
2290 		color->color_b_cb = color_value;
2291 		color->color_g_y = color_value;
2292 		break;
2293 	default:
2294 		break;
2295 	}
2296 }
2297 
dcn10_get_hdr_visual_confirm_color(struct pipe_ctx * pipe_ctx,struct tg_color * color)2298 void dcn10_get_hdr_visual_confirm_color(
2299 		struct pipe_ctx *pipe_ctx,
2300 		struct tg_color *color)
2301 {
2302 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2303 
2304 	// Determine the overscan color based on the top-most (desktop) plane's context
2305 	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2306 
2307 	while (top_pipe_ctx->top_pipe != NULL)
2308 		top_pipe_ctx = top_pipe_ctx->top_pipe;
2309 
2310 	switch (top_pipe_ctx->plane_res.scl_data.format) {
2311 	case PIXEL_FORMAT_ARGB2101010:
2312 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2313 			/* HDR10, ARGB2101010 - set border color to red */
2314 			color->color_r_cr = color_value;
2315 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2316 			/* FreeSync 2 ARGB2101010 - set border color to pink */
2317 			color->color_r_cr = color_value;
2318 			color->color_b_cb = color_value;
2319 		}
2320 		break;
2321 	case PIXEL_FORMAT_FP16:
2322 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2323 			/* HDR10, FP16 - set border color to blue */
2324 			color->color_b_cb = color_value;
2325 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2326 			/* FreeSync 2 HDR - set border color to green */
2327 			color->color_g_y = color_value;
2328 		}
2329 		break;
2330 	default:
2331 		/* SDR - set border color to Gray */
2332 		color->color_r_cr = color_value/2;
2333 		color->color_b_cb = color_value/2;
2334 		color->color_g_y = color_value/2;
2335 		break;
2336 	}
2337 }
2338 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2339 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2340 {
2341 	struct dc_bias_and_scale bns_params = {0};
2342 
2343 	// program the input csc
2344 	dpp->funcs->dpp_setup(dpp,
2345 			plane_state->format,
2346 			EXPANSION_MODE_ZERO,
2347 			plane_state->input_csc_color_matrix,
2348 			plane_state->color_space,
2349 			NULL);
2350 
2351 	//set scale and bias registers
2352 	build_prescale_params(&bns_params, plane_state);
2353 	if (dpp->funcs->dpp_program_bias_and_scale)
2354 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2355 }
2356 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2357 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2358 {
2359 	struct dce_hwseq *hws = dc->hwseq;
2360 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2361 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2362 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2363 	int mpcc_id;
2364 	struct mpcc *new_mpcc;
2365 	struct mpc *mpc = dc->res_pool->mpc;
2366 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2367 
2368 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2369 		hws->funcs.get_hdr_visual_confirm_color(
2370 				pipe_ctx, &blnd_cfg.black_color);
2371 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2372 		hws->funcs.get_surface_visual_confirm_color(
2373 				pipe_ctx, &blnd_cfg.black_color);
2374 	} else {
2375 		color_space_to_black_color(
2376 				dc, pipe_ctx->stream->output_color_space,
2377 				&blnd_cfg.black_color);
2378 	}
2379 
2380 	if (per_pixel_alpha)
2381 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2382 	else
2383 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2384 
2385 	blnd_cfg.overlap_only = false;
2386 	blnd_cfg.global_gain = 0xff;
2387 
2388 	if (pipe_ctx->plane_state->global_alpha)
2389 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2390 	else
2391 		blnd_cfg.global_alpha = 0xff;
2392 
2393 	/* DCN1.0 has output CM before MPC which seems to screw with
2394 	 * pre-multiplied alpha.
2395 	 */
2396 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2397 			pipe_ctx->stream->output_color_space)
2398 					&& per_pixel_alpha;
2399 
2400 
2401 	/*
2402 	 * TODO: remove hack
2403 	 * Note: currently there is a bug in init_hw such that
2404 	 * on resume from hibernate, BIOS sets up MPCC0, and
2405 	 * we do mpcc_remove but the mpcc cannot go to idle
2406 	 * after remove. This cause us to pick mpcc1 here,
2407 	 * which causes a pstate hang for yet unknown reason.
2408 	 */
2409 	mpcc_id = hubp->inst;
2410 
2411 	/* If there is no full update, don't need to touch MPC tree*/
2412 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2413 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2414 		return;
2415 	}
2416 
2417 	/* check if this MPCC is already being used */
2418 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2419 	/* remove MPCC if being used */
2420 	if (new_mpcc != NULL)
2421 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2422 	else
2423 		if (dc->debug.sanity_checks)
2424 			mpc->funcs->assert_mpcc_idle_before_connect(
2425 					dc->res_pool->mpc, mpcc_id);
2426 
2427 	/* Call MPC to insert new plane */
2428 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2429 			mpc_tree_params,
2430 			&blnd_cfg,
2431 			NULL,
2432 			NULL,
2433 			hubp->inst,
2434 			mpcc_id);
2435 
2436 	ASSERT(new_mpcc != NULL);
2437 
2438 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2439 	hubp->mpcc_id = mpcc_id;
2440 }
2441 
update_scaler(struct pipe_ctx * pipe_ctx)2442 static void update_scaler(struct pipe_ctx *pipe_ctx)
2443 {
2444 	bool per_pixel_alpha =
2445 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2446 
2447 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2448 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2449 	/* scaler configuration */
2450 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2451 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2452 }
2453 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2454 static void dcn10_update_dchubp_dpp(
2455 	struct dc *dc,
2456 	struct pipe_ctx *pipe_ctx,
2457 	struct dc_state *context)
2458 {
2459 	struct dce_hwseq *hws = dc->hwseq;
2460 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2461 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2462 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2463 	struct plane_size size = plane_state->plane_size;
2464 	unsigned int compat_level = 0;
2465 	bool should_divided_by_2 = false;
2466 
2467 	/* depends on DML calculation, DPP clock value may change dynamically */
2468 	/* If request max dpp clk is lower than current dispclk, no need to
2469 	 * divided by 2
2470 	 */
2471 	if (plane_state->update_flags.bits.full_update) {
2472 
2473 		/* new calculated dispclk, dppclk are stored in
2474 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2475 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2476 		 * dcn_validate_bandwidth compute new dispclk, dppclk.
2477 		 * dispclk will put in use after optimize_bandwidth when
2478 		 * ramp_up_dispclk_with_dpp is called.
2479 		 * there are two places for dppclk be put in use. One location
2480 		 * is the same as the location as dispclk. Another is within
2481 		 * update_dchubp_dpp which happens between pre_bandwidth and
2482 		 * optimize_bandwidth.
2483 		 * dppclk updated within update_dchubp_dpp will cause new
2484 		 * clock values of dispclk and dppclk not be in use at the same
2485 		 * time. when clocks are decreased, this may cause dppclk is
2486 		 * lower than previous configuration and let pipe stuck.
2487 		 * for example, eDP + external dp,  change resolution of DP from
2488 		 * 1920x1080x144hz to 1280x960x60hz.
2489 		 * before change: dispclk = 337889 dppclk = 337889
2490 		 * change mode, dcn_validate_bandwidth calculate
2491 		 *                dispclk = 143122 dppclk = 143122
2492 		 * update_dchubp_dpp be executed before dispclk be updated,
2493 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2494 		 * 168944. this will cause pipe pstate warning issue.
2495 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2496 		 * dispclk is going to be decreased, keep dppclk = dispclk
2497 		 **/
2498 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2499 				dc->clk_mgr->clks.dispclk_khz)
2500 			should_divided_by_2 = false;
2501 		else
2502 			should_divided_by_2 =
2503 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2504 					dc->clk_mgr->clks.dispclk_khz / 2;
2505 
2506 		dpp->funcs->dpp_dppclk_control(
2507 				dpp,
2508 				should_divided_by_2,
2509 				true);
2510 
2511 		if (dc->res_pool->dccg)
2512 			dc->res_pool->dccg->funcs->update_dpp_dto(
2513 					dc->res_pool->dccg,
2514 					dpp->inst,
2515 					pipe_ctx->plane_res.bw.dppclk_khz);
2516 		else
2517 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2518 						dc->clk_mgr->clks.dispclk_khz / 2 :
2519 							dc->clk_mgr->clks.dispclk_khz;
2520 	}
2521 
2522 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2523 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2524 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2525 	 */
2526 	if (plane_state->update_flags.bits.full_update) {
2527 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2528 
2529 		hubp->funcs->hubp_setup(
2530 			hubp,
2531 			&pipe_ctx->dlg_regs,
2532 			&pipe_ctx->ttu_regs,
2533 			&pipe_ctx->rq_regs,
2534 			&pipe_ctx->pipe_dlg_param);
2535 		hubp->funcs->hubp_setup_interdependent(
2536 			hubp,
2537 			&pipe_ctx->dlg_regs,
2538 			&pipe_ctx->ttu_regs);
2539 	}
2540 
2541 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2542 
2543 	if (plane_state->update_flags.bits.full_update ||
2544 		plane_state->update_flags.bits.bpp_change)
2545 		dcn10_update_dpp(dpp, plane_state);
2546 
2547 	if (plane_state->update_flags.bits.full_update ||
2548 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2549 		plane_state->update_flags.bits.global_alpha_change)
2550 		hws->funcs.update_mpcc(dc, pipe_ctx);
2551 
2552 	if (plane_state->update_flags.bits.full_update ||
2553 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2554 		plane_state->update_flags.bits.global_alpha_change ||
2555 		plane_state->update_flags.bits.scaling_change ||
2556 		plane_state->update_flags.bits.position_change) {
2557 		update_scaler(pipe_ctx);
2558 	}
2559 
2560 	if (plane_state->update_flags.bits.full_update ||
2561 		plane_state->update_flags.bits.scaling_change ||
2562 		plane_state->update_flags.bits.position_change) {
2563 		hubp->funcs->mem_program_viewport(
2564 			hubp,
2565 			&pipe_ctx->plane_res.scl_data.viewport,
2566 			&pipe_ctx->plane_res.scl_data.viewport_c);
2567 	}
2568 
2569 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2570 		dc->hwss.set_cursor_position(pipe_ctx);
2571 		dc->hwss.set_cursor_attribute(pipe_ctx);
2572 
2573 		if (dc->hwss.set_cursor_sdr_white_level)
2574 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2575 	}
2576 
2577 	if (plane_state->update_flags.bits.full_update) {
2578 		/*gamut remap*/
2579 		dc->hwss.program_gamut_remap(pipe_ctx);
2580 
2581 		dc->hwss.program_output_csc(dc,
2582 				pipe_ctx,
2583 				pipe_ctx->stream->output_color_space,
2584 				pipe_ctx->stream->csc_color_matrix.matrix,
2585 				pipe_ctx->stream_res.opp->inst);
2586 	}
2587 
2588 	if (plane_state->update_flags.bits.full_update ||
2589 		plane_state->update_flags.bits.pixel_format_change ||
2590 		plane_state->update_flags.bits.horizontal_mirror_change ||
2591 		plane_state->update_flags.bits.rotation_change ||
2592 		plane_state->update_flags.bits.swizzle_change ||
2593 		plane_state->update_flags.bits.dcc_change ||
2594 		plane_state->update_flags.bits.bpp_change ||
2595 		plane_state->update_flags.bits.scaling_change ||
2596 		plane_state->update_flags.bits.plane_size_change) {
2597 		hubp->funcs->hubp_program_surface_config(
2598 			hubp,
2599 			plane_state->format,
2600 			&plane_state->tiling_info,
2601 			&size,
2602 			plane_state->rotation,
2603 			&plane_state->dcc,
2604 			plane_state->horizontal_mirror,
2605 			compat_level);
2606 	}
2607 
2608 	hubp->power_gated = false;
2609 
2610 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2611 
2612 	if (is_pipe_tree_visible(pipe_ctx))
2613 		hubp->funcs->set_blank(hubp, false);
2614 }
2615 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2616 void dcn10_blank_pixel_data(
2617 		struct dc *dc,
2618 		struct pipe_ctx *pipe_ctx,
2619 		bool blank)
2620 {
2621 	enum dc_color_space color_space;
2622 	struct tg_color black_color = {0};
2623 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2624 	struct dc_stream_state *stream = pipe_ctx->stream;
2625 
2626 	/* program otg blank color */
2627 	color_space = stream->output_color_space;
2628 	color_space_to_black_color(dc, color_space, &black_color);
2629 
2630 	/*
2631 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2632 	 * alternate between Cb and Cr, so both channels need the pixel
2633 	 * value for Y
2634 	 */
2635 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2636 		black_color.color_r_cr = black_color.color_g_y;
2637 
2638 
2639 	if (stream_res->tg->funcs->set_blank_color)
2640 		stream_res->tg->funcs->set_blank_color(
2641 				stream_res->tg,
2642 				&black_color);
2643 
2644 	if (!blank) {
2645 		if (stream_res->tg->funcs->set_blank)
2646 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2647 		if (stream_res->abm) {
2648 			dc->hwss.set_pipe(pipe_ctx);
2649 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2650 		}
2651 	} else if (blank) {
2652 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2653 		if (stream_res->tg->funcs->set_blank) {
2654 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2655 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2656 		}
2657 	}
2658 }
2659 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2660 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2661 {
2662 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2663 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2664 	struct custom_float_format fmt;
2665 
2666 	fmt.exponenta_bits = 6;
2667 	fmt.mantissa_bits = 12;
2668 	fmt.sign = true;
2669 
2670 
2671 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2672 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2673 
2674 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2675 			pipe_ctx->plane_res.dpp, hw_mult);
2676 }
2677 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2678 void dcn10_program_pipe(
2679 		struct dc *dc,
2680 		struct pipe_ctx *pipe_ctx,
2681 		struct dc_state *context)
2682 {
2683 	struct dce_hwseq *hws = dc->hwseq;
2684 
2685 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2686 		dcn10_enable_plane(dc, pipe_ctx, context);
2687 
2688 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2689 
2690 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2691 
2692 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2693 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2694 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2695 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2696 
2697 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2698 	 * only do gamma programming for full update.
2699 	 * TODO: This can be further optimized/cleaned up
2700 	 * Always call this for now since it does memcmp inside before
2701 	 * doing heavy calculation and programming
2702 	 */
2703 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2704 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2705 }
2706 
dcn10_program_all_pipe_in_tree(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2707 static void dcn10_program_all_pipe_in_tree(
2708 		struct dc *dc,
2709 		struct pipe_ctx *pipe_ctx,
2710 		struct dc_state *context)
2711 {
2712 	struct dce_hwseq *hws = dc->hwseq;
2713 
2714 	if (pipe_ctx->top_pipe == NULL) {
2715 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2716 
2717 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2718 				pipe_ctx->stream_res.tg,
2719 				pipe_ctx->pipe_dlg_param.vready_offset,
2720 				pipe_ctx->pipe_dlg_param.vstartup_start,
2721 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2722 				pipe_ctx->pipe_dlg_param.vupdate_width);
2723 
2724 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2725 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2726 
2727 		if (hws->funcs.setup_vupdate_interrupt)
2728 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2729 
2730 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2731 	}
2732 
2733 	if (pipe_ctx->plane_state != NULL)
2734 		hws->funcs.program_pipe(dc, pipe_ctx, context);
2735 
2736 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2737 		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2738 }
2739 
dcn10_find_top_pipe_for_stream(struct dc * dc,struct dc_state * context,const struct dc_stream_state * stream)2740 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2741 		struct dc *dc,
2742 		struct dc_state *context,
2743 		const struct dc_stream_state *stream)
2744 {
2745 	int i;
2746 
2747 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2748 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2749 		struct pipe_ctx *old_pipe_ctx =
2750 				&dc->current_state->res_ctx.pipe_ctx[i];
2751 
2752 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2753 			continue;
2754 
2755 		if (pipe_ctx->stream != stream)
2756 			continue;
2757 
2758 		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2759 			return pipe_ctx;
2760 	}
2761 	return NULL;
2762 }
2763 
dcn10_disconnect_pipes(struct dc * dc,struct dc_state * context)2764 bool dcn10_disconnect_pipes(
2765 		struct dc *dc,
2766 		struct dc_state *context)
2767 {
2768 		bool found_pipe = false;
2769 		int i, j;
2770 		struct dce_hwseq *hws = dc->hwseq;
2771 		struct dc_state *old_ctx = dc->current_state;
2772 		bool mpcc_disconnected = false;
2773 		struct pipe_ctx *old_pipe;
2774 		struct pipe_ctx *new_pipe;
2775 		DC_LOGGER_INIT(dc->ctx->logger);
2776 
2777 		/* Set pipe update flags and lock pipes */
2778 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2779 			old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2780 			new_pipe = &context->res_ctx.pipe_ctx[i];
2781 			new_pipe->update_flags.raw = 0;
2782 
2783 			if (!old_pipe->plane_state && !new_pipe->plane_state)
2784 				continue;
2785 
2786 			if (old_pipe->plane_state && !new_pipe->plane_state)
2787 				new_pipe->update_flags.bits.disable = 1;
2788 
2789 			/* Check for scl update */
2790 			if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data)))
2791 					new_pipe->update_flags.bits.scaler = 1;
2792 
2793 			/* Check for vp update */
2794 			if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect))
2795 					|| memcmp(&old_pipe->plane_res.scl_data.viewport_c,
2796 						&new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect)))
2797 				new_pipe->update_flags.bits.viewport = 1;
2798 
2799 		}
2800 
2801 		if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2802 			/* Disconnect mpcc here only if losing pipe split*/
2803 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
2804 				if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable &&
2805 					old_ctx->res_ctx.pipe_ctx[i].top_pipe) {
2806 
2807 					/* Find the top pipe in the new ctx for the bottom pipe that we
2808 					 * want to remove by comparing the streams and planes. If both
2809 					 * pipes are being disabled then do it in the regular pipe
2810 					 * programming sequence
2811 					 */
2812 					for (j = 0; j < dc->res_pool->pipe_count; j++) {
2813 						if (old_ctx->res_ctx.pipe_ctx[i].top_pipe->stream == context->res_ctx.pipe_ctx[j].stream &&
2814 							old_ctx->res_ctx.pipe_ctx[i].top_pipe->plane_state == context->res_ctx.pipe_ctx[j].plane_state &&
2815 							!context->res_ctx.pipe_ctx[j].top_pipe &&
2816 							!context->res_ctx.pipe_ctx[j].update_flags.bits.disable) {
2817 							found_pipe = true;
2818 							break;
2819 						}
2820 					}
2821 
2822 					// Disconnect if the top pipe lost it's pipe split
2823 					if (found_pipe && !context->res_ctx.pipe_ctx[j].bottom_pipe) {
2824 						hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2825 						DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
2826 						mpcc_disconnected = true;
2827 					}
2828 				}
2829 				found_pipe = false;
2830 			}
2831 		}
2832 
2833 		if (mpcc_disconnected) {
2834 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
2835 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2836 				struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2837 				struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2838 				struct hubp *hubp = pipe_ctx->plane_res.hubp;
2839 
2840 				if (!pipe_ctx || !plane_state || !pipe_ctx->stream)
2841 					continue;
2842 
2843 				// Only update scaler and viewport here if we lose a pipe split.
2844 				// This is to prevent half the screen from being black when we
2845 				// unlock after disconnecting MPCC.
2846 				if (!(old_pipe && !pipe_ctx->top_pipe &&
2847 					!pipe_ctx->bottom_pipe && old_pipe->bottom_pipe))
2848 					continue;
2849 
2850 				if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) {
2851 					if (pipe_ctx->update_flags.bits.scaler ||
2852 						plane_state->update_flags.bits.scaling_change ||
2853 						plane_state->update_flags.bits.position_change ||
2854 						plane_state->update_flags.bits.per_pixel_alpha_change ||
2855 						pipe_ctx->stream->update_flags.bits.scaling) {
2856 
2857 						pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
2858 						ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
2859 						/* scaler configuration */
2860 						pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2861 						pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2862 					}
2863 
2864 					if (pipe_ctx->update_flags.bits.viewport ||
2865 						(context == dc->current_state && plane_state->update_flags.bits.position_change) ||
2866 						(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
2867 						(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
2868 
2869 						hubp->funcs->mem_program_viewport(
2870 							hubp,
2871 							&pipe_ctx->plane_res.scl_data.viewport,
2872 							&pipe_ctx->plane_res.scl_data.viewport_c);
2873 					}
2874 				}
2875 			}
2876 		}
2877 	return mpcc_disconnected;
2878 }
2879 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2880 void dcn10_wait_for_pending_cleared(struct dc *dc,
2881 		struct dc_state *context)
2882 {
2883 		struct pipe_ctx *pipe_ctx;
2884 		struct timing_generator *tg;
2885 		int i;
2886 
2887 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2888 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2889 			tg = pipe_ctx->stream_res.tg;
2890 
2891 			/*
2892 			 * Only wait for top pipe's tg penindg bit
2893 			 * Also skip if pipe is disabled.
2894 			 */
2895 			if (pipe_ctx->top_pipe ||
2896 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2897 			    !tg->funcs->is_tg_enabled(tg))
2898 				continue;
2899 
2900 			/*
2901 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2902 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2903 			 * seems to not trigger the update right away, and if we
2904 			 * lock again before VUPDATE then we don't get a separated
2905 			 * operation.
2906 			 */
2907 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2908 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2909 		}
2910 }
2911 
dcn10_apply_ctx_for_surface(struct dc * dc,const struct dc_stream_state * stream,int num_planes,struct dc_state * context)2912 void dcn10_apply_ctx_for_surface(
2913 		struct dc *dc,
2914 		const struct dc_stream_state *stream,
2915 		int num_planes,
2916 		struct dc_state *context)
2917 {
2918 	struct dce_hwseq *hws = dc->hwseq;
2919 	int i;
2920 	struct timing_generator *tg;
2921 	uint32_t underflow_check_delay_us;
2922 	bool interdependent_update = false;
2923 	struct pipe_ctx *top_pipe_to_program =
2924 			dcn10_find_top_pipe_for_stream(dc, context, stream);
2925 	DC_LOGGER_INIT(dc->ctx->logger);
2926 
2927 	// Clear pipe_ctx flag
2928 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2929 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2930 		pipe_ctx->update_flags.raw = 0;
2931 	}
2932 
2933 	if (!top_pipe_to_program)
2934 		return;
2935 
2936 	tg = top_pipe_to_program->stream_res.tg;
2937 
2938 	interdependent_update = top_pipe_to_program->plane_state &&
2939 		top_pipe_to_program->plane_state->update_flags.bits.full_update;
2940 
2941 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2942 
2943 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2944 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2945 
2946 	if (underflow_check_delay_us != 0xFFFFFFFF)
2947 		udelay(underflow_check_delay_us);
2948 
2949 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2950 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2951 
2952 	if (num_planes == 0) {
2953 		/* OTG blank before remove all front end */
2954 		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2955 	}
2956 
2957 	/* Disconnect unused mpcc */
2958 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2959 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2960 		struct pipe_ctx *old_pipe_ctx =
2961 				&dc->current_state->res_ctx.pipe_ctx[i];
2962 
2963 		if ((!pipe_ctx->plane_state ||
2964 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2965 		    old_pipe_ctx->plane_state &&
2966 		    old_pipe_ctx->stream_res.tg == tg) {
2967 
2968 			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2969 			pipe_ctx->update_flags.bits.disable = 1;
2970 
2971 			DC_LOG_DC("Reset mpcc for pipe %d\n",
2972 					old_pipe_ctx->pipe_idx);
2973 		}
2974 	}
2975 
2976 	if (num_planes > 0)
2977 		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2978 
2979 	/* Program secondary blending tree and writeback pipes */
2980 	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2981 		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2982 	if (interdependent_update)
2983 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2984 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2985 			/* Skip inactive pipes and ones already updated */
2986 			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2987 			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2988 				continue;
2989 
2990 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2991 				pipe_ctx->plane_res.hubp,
2992 				&pipe_ctx->dlg_regs,
2993 				&pipe_ctx->ttu_regs);
2994 		}
2995 }
2996 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2997 void dcn10_post_unlock_program_front_end(
2998 		struct dc *dc,
2999 		struct dc_state *context)
3000 {
3001 	int i;
3002 
3003 	DC_LOGGER_INIT(dc->ctx->logger);
3004 
3005 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3006 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3007 
3008 		if (!pipe_ctx->top_pipe &&
3009 			!pipe_ctx->prev_odm_pipe &&
3010 			pipe_ctx->stream) {
3011 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3012 
3013 			if (context->stream_status[i].plane_count == 0)
3014 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3015 		}
3016 	}
3017 
3018 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3019 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3020 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3021 
3022 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3023 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3024 			dc->hwss.optimize_bandwidth(dc, context);
3025 			break;
3026 		}
3027 
3028 	if (dc->hwseq->wa.DEGVIDCN10_254)
3029 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3030 }
3031 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3032 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3033 {
3034 	uint8_t i;
3035 
3036 	for (i = 0; i < context->stream_count; i++) {
3037 		if (context->streams[i]->timing.timing_3d_format
3038 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3039 			/*
3040 			 * Disable stutter
3041 			 */
3042 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3043 			break;
3044 		}
3045 	}
3046 }
3047 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3048 void dcn10_prepare_bandwidth(
3049 		struct dc *dc,
3050 		struct dc_state *context)
3051 {
3052 	struct dce_hwseq *hws = dc->hwseq;
3053 	struct hubbub *hubbub = dc->res_pool->hubbub;
3054 
3055 	if (dc->debug.sanity_checks)
3056 		hws->funcs.verify_allow_pstate_change_high(dc);
3057 
3058 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3059 		if (context->stream_count == 0)
3060 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3061 
3062 		dc->clk_mgr->funcs->update_clocks(
3063 				dc->clk_mgr,
3064 				context,
3065 				false);
3066 	}
3067 
3068 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3069 			&context->bw_ctx.bw.dcn.watermarks,
3070 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3071 			true);
3072 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3073 
3074 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3075 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3076 
3077 	if (dc->debug.sanity_checks)
3078 		hws->funcs.verify_allow_pstate_change_high(dc);
3079 }
3080 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3081 void dcn10_optimize_bandwidth(
3082 		struct dc *dc,
3083 		struct dc_state *context)
3084 {
3085 	struct dce_hwseq *hws = dc->hwseq;
3086 	struct hubbub *hubbub = dc->res_pool->hubbub;
3087 
3088 	if (dc->debug.sanity_checks)
3089 		hws->funcs.verify_allow_pstate_change_high(dc);
3090 
3091 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3092 		if (context->stream_count == 0)
3093 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3094 
3095 		dc->clk_mgr->funcs->update_clocks(
3096 				dc->clk_mgr,
3097 				context,
3098 				true);
3099 	}
3100 
3101 	hubbub->funcs->program_watermarks(hubbub,
3102 			&context->bw_ctx.bw.dcn.watermarks,
3103 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3104 			true);
3105 
3106 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3107 
3108 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
3109 		dcn_bw_notify_pplib_of_wm_ranges(dc);
3110 
3111 	if (dc->debug.sanity_checks)
3112 		hws->funcs.verify_allow_pstate_change_high(dc);
3113 }
3114 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,unsigned int vmin,unsigned int vmax,unsigned int vmid,unsigned int vmid_frame_number)3115 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3116 		int num_pipes, unsigned int vmin, unsigned int vmax,
3117 		unsigned int vmid, unsigned int vmid_frame_number)
3118 {
3119 	int i = 0;
3120 	struct drr_params params = {0};
3121 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3122 	unsigned int event_triggers = 0x800;
3123 	// Note DRR trigger events are generated regardless of whether num frames met.
3124 	unsigned int num_frames = 2;
3125 
3126 	params.vertical_total_max = vmax;
3127 	params.vertical_total_min = vmin;
3128 	params.vertical_total_mid = vmid;
3129 	params.vertical_total_mid_frame_num = vmid_frame_number;
3130 
3131 	/* TODO: If multiple pipes are to be supported, you need
3132 	 * some GSL stuff. Static screen triggers may be programmed differently
3133 	 * as well.
3134 	 */
3135 	for (i = 0; i < num_pipes; i++) {
3136 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3137 			pipe_ctx[i]->stream_res.tg, &params);
3138 		if (vmax != 0 && vmin != 0)
3139 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3140 					pipe_ctx[i]->stream_res.tg,
3141 					event_triggers, num_frames);
3142 	}
3143 }
3144 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3145 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3146 		int num_pipes,
3147 		struct crtc_position *position)
3148 {
3149 	int i = 0;
3150 
3151 	/* TODO: handle pipes > 1
3152 	 */
3153 	for (i = 0; i < num_pipes; i++)
3154 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3155 }
3156 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3157 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3158 		int num_pipes, const struct dc_static_screen_params *params)
3159 {
3160 	unsigned int i;
3161 	unsigned int triggers = 0;
3162 
3163 	if (params->triggers.surface_update)
3164 		triggers |= 0x80;
3165 	if (params->triggers.cursor_update)
3166 		triggers |= 0x2;
3167 	if (params->triggers.force_trigger)
3168 		triggers |= 0x1;
3169 
3170 	for (i = 0; i < num_pipes; i++)
3171 		pipe_ctx[i]->stream_res.tg->funcs->
3172 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3173 					triggers, params->num_frames);
3174 }
3175 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3176 static void dcn10_config_stereo_parameters(
3177 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3178 {
3179 	enum view_3d_format view_format = stream->view_format;
3180 	enum dc_timing_3d_format timing_3d_format =\
3181 			stream->timing.timing_3d_format;
3182 	bool non_stereo_timing = false;
3183 
3184 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3185 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3186 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3187 		non_stereo_timing = true;
3188 
3189 	if (non_stereo_timing == false &&
3190 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3191 
3192 		flags->PROGRAM_STEREO         = 1;
3193 		flags->PROGRAM_POLARITY       = 1;
3194 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3195 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3196 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3197 			enum display_dongle_type dongle = \
3198 					stream->link->ddc->dongle_type;
3199 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3200 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3201 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3202 				flags->DISABLE_STEREO_DP_SYNC = 1;
3203 		}
3204 		flags->RIGHT_EYE_POLARITY =\
3205 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3206 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3207 			flags->FRAME_PACKED = 1;
3208 	}
3209 
3210 	return;
3211 }
3212 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3213 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3214 {
3215 	struct crtc_stereo_flags flags = { 0 };
3216 	struct dc_stream_state *stream = pipe_ctx->stream;
3217 
3218 	dcn10_config_stereo_parameters(stream, &flags);
3219 
3220 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3221 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3222 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3223 	} else {
3224 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3225 	}
3226 
3227 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3228 		pipe_ctx->stream_res.opp,
3229 		flags.PROGRAM_STEREO == 1 ? true:false,
3230 		&stream->timing);
3231 
3232 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3233 		pipe_ctx->stream_res.tg,
3234 		&stream->timing,
3235 		&flags);
3236 
3237 	return;
3238 }
3239 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3240 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3241 {
3242 	int i;
3243 
3244 	for (i = 0; i < res_pool->pipe_count; i++) {
3245 		if (res_pool->hubps[i]->inst == mpcc_inst)
3246 			return res_pool->hubps[i];
3247 	}
3248 	ASSERT(false);
3249 	return NULL;
3250 }
3251 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3252 void dcn10_wait_for_mpcc_disconnect(
3253 		struct dc *dc,
3254 		struct resource_pool *res_pool,
3255 		struct pipe_ctx *pipe_ctx)
3256 {
3257 	struct dce_hwseq *hws = dc->hwseq;
3258 	int mpcc_inst;
3259 
3260 	if (dc->debug.sanity_checks) {
3261 		hws->funcs.verify_allow_pstate_change_high(dc);
3262 	}
3263 
3264 	if (!pipe_ctx->stream_res.opp)
3265 		return;
3266 
3267 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3268 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3269 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3270 
3271 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3272 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3273 			hubp->funcs->set_blank(hubp, true);
3274 		}
3275 	}
3276 
3277 	if (dc->debug.sanity_checks) {
3278 		hws->funcs.verify_allow_pstate_change_high(dc);
3279 	}
3280 
3281 }
3282 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3283 bool dcn10_dummy_display_power_gating(
3284 	struct dc *dc,
3285 	uint8_t controller_id,
3286 	struct dc_bios *dcb,
3287 	enum pipe_gating_control power_gating)
3288 {
3289 	return true;
3290 }
3291 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3292 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3293 {
3294 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3295 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3296 	bool flip_pending;
3297 	struct dc *dc = plane_state->ctx->dc;
3298 
3299 	if (plane_state == NULL)
3300 		return;
3301 
3302 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3303 					pipe_ctx->plane_res.hubp);
3304 
3305 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3306 
3307 	if (!flip_pending)
3308 		plane_state->status.current_address = plane_state->status.requested_address;
3309 
3310 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3311 			tg->funcs->is_stereo_left_eye) {
3312 		plane_state->status.is_right_eye =
3313 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3314 	}
3315 
3316 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3317 		struct dce_hwseq *hwseq = dc->hwseq;
3318 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3319 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3320 
3321 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3322 			struct hubbub *hubbub = dc->res_pool->hubbub;
3323 
3324 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3325 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3326 		}
3327 	}
3328 }
3329 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3330 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3331 {
3332 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3333 
3334 	/* In DCN, this programming sequence is owned by the hubbub */
3335 	hubbub->funcs->update_dchub(hubbub, dh_data);
3336 }
3337 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3338 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3339 {
3340 	struct pipe_ctx *test_pipe;
3341 	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
3342 	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
3343 
3344 	/**
3345 	 * Disable the cursor if there's another pipe above this with a
3346 	 * plane that contains this pipe's viewport to prevent double cursor
3347 	 * and incorrect scaling artifacts.
3348 	 */
3349 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3350 	     test_pipe = test_pipe->top_pipe) {
3351 		if (!test_pipe->plane_state->visible)
3352 			continue;
3353 
3354 		r2 = &test_pipe->plane_res.scl_data.recout;
3355 		r2_r = r2->x + r2->width;
3356 		r2_b = r2->y + r2->height;
3357 
3358 		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
3359 			return true;
3360 	}
3361 
3362 	return false;
3363 }
3364 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3365 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3366 {
3367 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3368 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3369 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3370 	struct dc_cursor_mi_param param = {
3371 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3372 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3373 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3374 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3375 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3376 		.rotation = pipe_ctx->plane_state->rotation,
3377 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3378 	};
3379 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
3380 		(pipe_ctx->bottom_pipe != NULL);
3381 
3382 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3383 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3384 	int x_pos = pos_cpy.x;
3385 	int y_pos = pos_cpy.y;
3386 
3387 	/**
3388 	 * DC cursor is stream space, HW cursor is plane space and drawn
3389 	 * as part of the framebuffer.
3390 	 *
3391 	 * Cursor position can't be negative, but hotspot can be used to
3392 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3393 	 * than the cursor size.
3394 	 */
3395 
3396 	/**
3397 	 * Translate cursor from stream space to plane space.
3398 	 *
3399 	 * If the cursor is scaled then we need to scale the position
3400 	 * to be in the approximately correct place. We can't do anything
3401 	 * about the actual size being incorrect, that's a limitation of
3402 	 * the hardware.
3403 	 */
3404 	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3405 			pipe_ctx->plane_state->dst_rect.width;
3406 	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3407 			pipe_ctx->plane_state->dst_rect.height;
3408 
3409 	/**
3410 	 * If the cursor's source viewport is clipped then we need to
3411 	 * translate the cursor to appear in the correct position on
3412 	 * the screen.
3413 	 *
3414 	 * This translation isn't affected by scaling so it needs to be
3415 	 * done *after* we adjust the position for the scale factor.
3416 	 *
3417 	 * This is only done by opt-in for now since there are still
3418 	 * some usecases like tiled display that might enable the
3419 	 * cursor on both streams while expecting dc to clip it.
3420 	 */
3421 	if (pos_cpy.translate_by_source) {
3422 		x_pos += pipe_ctx->plane_state->src_rect.x;
3423 		y_pos += pipe_ctx->plane_state->src_rect.y;
3424 	}
3425 
3426 	/**
3427 	 * If the position is negative then we need to add to the hotspot
3428 	 * to shift the cursor outside the plane.
3429 	 */
3430 
3431 	if (x_pos < 0) {
3432 		pos_cpy.x_hotspot -= x_pos;
3433 		x_pos = 0;
3434 	}
3435 
3436 	if (y_pos < 0) {
3437 		pos_cpy.y_hotspot -= y_pos;
3438 		y_pos = 0;
3439 	}
3440 
3441 	pos_cpy.x = (uint32_t)x_pos;
3442 	pos_cpy.y = (uint32_t)y_pos;
3443 
3444 	if (pipe_ctx->plane_state->address.type
3445 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3446 		pos_cpy.enable = false;
3447 
3448 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3449 		pos_cpy.enable = false;
3450 
3451 	// Swap axis and mirror horizontally
3452 	if (param.rotation == ROTATION_ANGLE_90) {
3453 		uint32_t temp_x = pos_cpy.x;
3454 
3455 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3456 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3457 		pos_cpy.y = temp_x;
3458 	}
3459 	// Swap axis and mirror vertically
3460 	else if (param.rotation == ROTATION_ANGLE_270) {
3461 		uint32_t temp_y = pos_cpy.y;
3462 		int viewport_height =
3463 			pipe_ctx->plane_res.scl_data.viewport.height;
3464 
3465 		if (pipe_split_on) {
3466 			if (pos_cpy.x > viewport_height) {
3467 				pos_cpy.x = pos_cpy.x - viewport_height;
3468 				pos_cpy.y = viewport_height - pos_cpy.x;
3469 			} else {
3470 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3471 			}
3472 		} else
3473 			pos_cpy.y = viewport_height - pos_cpy.x;
3474 		pos_cpy.x = temp_y;
3475 	}
3476 	// Mirror horizontally and vertically
3477 	else if (param.rotation == ROTATION_ANGLE_180) {
3478 		int viewport_width =
3479 			pipe_ctx->plane_res.scl_data.viewport.width;
3480 		int viewport_x =
3481 			pipe_ctx->plane_res.scl_data.viewport.x;
3482 
3483 		if (pipe_split_on) {
3484 			if (pos_cpy.x >= viewport_width + viewport_x) {
3485 				pos_cpy.x = 2 * viewport_width
3486 						- pos_cpy.x + 2 * viewport_x;
3487 			} else {
3488 				uint32_t temp_x = pos_cpy.x;
3489 
3490 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3491 				if (temp_x >= viewport_x +
3492 					(int)hubp->curs_attr.width || pos_cpy.x
3493 					<= (int)hubp->curs_attr.width +
3494 					pipe_ctx->plane_state->src_rect.x) {
3495 					pos_cpy.x = temp_x + viewport_width;
3496 				}
3497 			}
3498 		} else {
3499 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3500 		}
3501 		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3502 	}
3503 
3504 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3505 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3506 }
3507 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3508 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3509 {
3510 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3511 
3512 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3513 			pipe_ctx->plane_res.hubp, attributes);
3514 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3515 		pipe_ctx->plane_res.dpp, attributes);
3516 }
3517 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3518 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3519 {
3520 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3521 	struct fixed31_32 multiplier;
3522 	struct dpp_cursor_attributes opt_attr = { 0 };
3523 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3524 	struct custom_float_format fmt;
3525 
3526 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3527 		return;
3528 
3529 	fmt.exponenta_bits = 5;
3530 	fmt.mantissa_bits = 10;
3531 	fmt.sign = true;
3532 
3533 	if (sdr_white_level > 80) {
3534 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3535 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3536 	}
3537 
3538 	opt_attr.scale = hw_scale;
3539 	opt_attr.bias = 0;
3540 
3541 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3542 			pipe_ctx->plane_res.dpp, &opt_attr);
3543 }
3544 
3545 /*
3546  * apply_front_porch_workaround  TODO FPGA still need?
3547  *
3548  * This is a workaround for a bug that has existed since R5xx and has not been
3549  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3550  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3551 static void apply_front_porch_workaround(
3552 	struct dc_crtc_timing *timing)
3553 {
3554 	if (timing->flags.INTERLACE == 1) {
3555 		if (timing->v_front_porch < 2)
3556 			timing->v_front_porch = 2;
3557 	} else {
3558 		if (timing->v_front_porch < 1)
3559 			timing->v_front_porch = 1;
3560 	}
3561 }
3562 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3563 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3564 {
3565 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3566 	struct dc_crtc_timing patched_crtc_timing;
3567 	int vesa_sync_start;
3568 	int asic_blank_end;
3569 	int interlace_factor;
3570 	int vertical_line_start;
3571 
3572 	patched_crtc_timing = *dc_crtc_timing;
3573 	apply_front_porch_workaround(&patched_crtc_timing);
3574 
3575 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3576 
3577 	vesa_sync_start = patched_crtc_timing.v_addressable +
3578 			patched_crtc_timing.v_border_bottom +
3579 			patched_crtc_timing.v_front_porch;
3580 
3581 	asic_blank_end = (patched_crtc_timing.v_total -
3582 			vesa_sync_start -
3583 			patched_crtc_timing.v_border_top)
3584 			* interlace_factor;
3585 
3586 	vertical_line_start = asic_blank_end -
3587 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3588 
3589 	return vertical_line_start;
3590 }
3591 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3592 void dcn10_calc_vupdate_position(
3593 		struct dc *dc,
3594 		struct pipe_ctx *pipe_ctx,
3595 		uint32_t *start_line,
3596 		uint32_t *end_line)
3597 {
3598 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3599 	int vline_int_offset_from_vupdate =
3600 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3601 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3602 	int start_position;
3603 
3604 	if (vline_int_offset_from_vupdate > 0)
3605 		vline_int_offset_from_vupdate--;
3606 	else if (vline_int_offset_from_vupdate < 0)
3607 		vline_int_offset_from_vupdate++;
3608 
3609 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3610 
3611 	if (start_position >= 0)
3612 		*start_line = start_position;
3613 	else
3614 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3615 
3616 	*end_line = *start_line + 2;
3617 
3618 	if (*end_line >= dc_crtc_timing->v_total)
3619 		*end_line = 2;
3620 }
3621 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline,uint32_t * start_line,uint32_t * end_line)3622 static void dcn10_cal_vline_position(
3623 		struct dc *dc,
3624 		struct pipe_ctx *pipe_ctx,
3625 		enum vline_select vline,
3626 		uint32_t *start_line,
3627 		uint32_t *end_line)
3628 {
3629 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3630 
3631 	if (vline == VLINE0)
3632 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3633 	else if (vline == VLINE1)
3634 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3635 
3636 	switch (ref_point) {
3637 	case START_V_UPDATE:
3638 		dcn10_calc_vupdate_position(
3639 				dc,
3640 				pipe_ctx,
3641 				start_line,
3642 				end_line);
3643 		break;
3644 	case START_V_SYNC:
3645 		// Suppose to do nothing because vsync is 0;
3646 		break;
3647 	default:
3648 		ASSERT(0);
3649 		break;
3650 	}
3651 }
3652 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline)3653 void dcn10_setup_periodic_interrupt(
3654 		struct dc *dc,
3655 		struct pipe_ctx *pipe_ctx,
3656 		enum vline_select vline)
3657 {
3658 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3659 
3660 	if (vline == VLINE0) {
3661 		uint32_t start_line = 0;
3662 		uint32_t end_line = 0;
3663 
3664 		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3665 
3666 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3667 
3668 	} else if (vline == VLINE1) {
3669 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3670 				tg,
3671 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3672 	}
3673 }
3674 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3675 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3676 {
3677 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3678 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3679 
3680 	if (start_line < 0) {
3681 		ASSERT(0);
3682 		start_line = 0;
3683 	}
3684 
3685 	if (tg->funcs->setup_vertical_interrupt2)
3686 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3687 }
3688 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3689 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3690 		struct dc_link_settings *link_settings)
3691 {
3692 	struct encoder_unblank_param params = { { 0 } };
3693 	struct dc_stream_state *stream = pipe_ctx->stream;
3694 	struct dc_link *link = stream->link;
3695 	struct dce_hwseq *hws = link->dc->hwseq;
3696 
3697 	/* only 3 items below are used by unblank */
3698 	params.timing = pipe_ctx->stream->timing;
3699 
3700 	params.link_settings.link_rate = link_settings->link_rate;
3701 
3702 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3703 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3704 			params.timing.pix_clk_100hz /= 2;
3705 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3706 	}
3707 
3708 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3709 		hws->funcs.edp_backlight_control(link, true);
3710 	}
3711 }
3712 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3713 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3714 				const uint8_t *custom_sdp_message,
3715 				unsigned int sdp_message_size)
3716 {
3717 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3718 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3719 				pipe_ctx->stream_res.stream_enc,
3720 				custom_sdp_message,
3721 				sdp_message_size);
3722 	}
3723 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3724 enum dc_status dcn10_set_clock(struct dc *dc,
3725 			enum dc_clock_type clock_type,
3726 			uint32_t clk_khz,
3727 			uint32_t stepping)
3728 {
3729 	struct dc_state *context = dc->current_state;
3730 	struct dc_clock_config clock_cfg = {0};
3731 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3732 
3733 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3734 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3735 						context, clock_type, &clock_cfg);
3736 
3737 	if (!dc->clk_mgr->funcs->get_clock)
3738 		return DC_FAIL_UNSUPPORTED_1;
3739 
3740 	if (clk_khz > clock_cfg.max_clock_khz)
3741 		return DC_FAIL_CLK_EXCEED_MAX;
3742 
3743 	if (clk_khz < clock_cfg.min_clock_khz)
3744 		return DC_FAIL_CLK_BELOW_MIN;
3745 
3746 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3747 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3748 
3749 	/*update internal request clock for update clock use*/
3750 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3751 		current_clocks->dispclk_khz = clk_khz;
3752 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3753 		current_clocks->dppclk_khz = clk_khz;
3754 	else
3755 		return DC_ERROR_UNEXPECTED;
3756 
3757 	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3758 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3759 				context, true);
3760 	return DC_OK;
3761 
3762 }
3763 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3764 void dcn10_get_clock(struct dc *dc,
3765 			enum dc_clock_type clock_type,
3766 			struct dc_clock_config *clock_cfg)
3767 {
3768 	struct dc_state *context = dc->current_state;
3769 
3770 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3771 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3772 
3773 }
3774