1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "core_types.h"
29 #include "resource.h"
30 #include "custom_float.h"
31 #include "dcn10_hw_sequencer.h"
32 #include "dce110/dce110_hw_sequencer.h"
33 #include "dce/dce_hwseq.h"
34 #include "abm.h"
35 #include "dmcu.h"
36 #include "dcn10_optc.h"
37 #include "dcn10/dcn10_dpp.h"
38 #include "dcn10/dcn10_mpc.h"
39 #include "timing_generator.h"
40 #include "opp.h"
41 #include "ipp.h"
42 #include "mpc.h"
43 #include "reg_helper.h"
44 #include "dcn10_hubp.h"
45 #include "dcn10_hubbub.h"
46 #include "dcn10_cm_common.h"
47 #include "dc_link_dp.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 
51 
52 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
53 #include "dsc.h"
54 #endif
55 
56 #define DC_LOGGER_INIT(logger)
57 
58 #define CTX \
59 	hws->ctx
60 #define REG(reg)\
61 	hws->regs->reg
62 
63 #undef FN
64 #define FN(reg_name, field_name) \
65 	hws->shifts->field_name, hws->masks->field_name
66 
67 /*print is 17 wide, first two characters are spaces*/
68 #define DTN_INFO_MICRO_SEC(ref_cycle) \
69 	print_microsec(dc_ctx, log_ctx, ref_cycle)
70 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)71 void print_microsec(struct dc_context *dc_ctx,
72 	struct dc_log_buffer_ctx *log_ctx,
73 	uint32_t ref_cycle)
74 {
75 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
76 	static const unsigned int frac = 1000;
77 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
78 
79 	DTN_INFO("  %11d.%03d",
80 			us_x10 / frac,
81 			us_x10 % frac);
82 }
83 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)84 static void log_mpc_crc(struct dc *dc,
85 	struct dc_log_buffer_ctx *log_ctx)
86 {
87 	struct dc_context *dc_ctx = dc->ctx;
88 	struct dce_hwseq *hws = dc->hwseq;
89 
90 	if (REG(MPC_CRC_RESULT_GB))
91 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
92 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
93 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
94 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
95 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
96 }
97 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)98 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
99 {
100 	struct dc_context *dc_ctx = dc->ctx;
101 	struct dcn_hubbub_wm wm;
102 	int i;
103 
104 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
105 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
106 
107 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
108 			"         sr_enter          sr_exit  dram_clk_change\n");
109 
110 	for (i = 0; i < 4; i++) {
111 		struct dcn_hubbub_wm_set *s;
112 
113 		s = &wm.sets[i];
114 		DTN_INFO("WM_Set[%d]:", s->wm_set);
115 		DTN_INFO_MICRO_SEC(s->data_urgent);
116 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
117 		DTN_INFO_MICRO_SEC(s->sr_enter);
118 		DTN_INFO_MICRO_SEC(s->sr_exit);
119 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
120 		DTN_INFO("\n");
121 	}
122 
123 	DTN_INFO("\n");
124 }
125 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)126 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
127 {
128 	struct dc_context *dc_ctx = dc->ctx;
129 	struct resource_pool *pool = dc->res_pool;
130 	int i;
131 
132 	DTN_INFO("HUBP:  format  addr_hi  width  height"
133 			"  rot  mir  sw_mode  dcc_en  blank_en  ttu_dis  underflow"
134 			"   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
135 	for (i = 0; i < pool->pipe_count; i++) {
136 		struct hubp *hubp = pool->hubps[i];
137 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
138 
139 		hubp->funcs->hubp_read_state(hubp);
140 
141 		if (!s->blank_en) {
142 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh"
143 					"  %6d  %8d  %7d  %8xh",
144 					hubp->inst,
145 					s->pixel_format,
146 					s->inuse_addr_hi,
147 					s->viewport_width,
148 					s->viewport_height,
149 					s->rotation_angle,
150 					s->h_mirror_en,
151 					s->sw_mode,
152 					s->dcc_en,
153 					s->blank_en,
154 					s->ttu_disable,
155 					s->underflow_status);
156 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
157 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
158 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
159 			DTN_INFO("\n");
160 		}
161 	}
162 
163 	DTN_INFO("\n=========RQ========\n");
164 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
165 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
166 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
167 	for (i = 0; i < pool->pipe_count; i++) {
168 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
169 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
170 
171 		if (!s->blank_en)
172 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
173 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
174 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
175 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
176 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
177 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
178 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
179 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
180 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
181 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
182 	}
183 
184 	DTN_INFO("========DLG========\n");
185 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
186 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
187 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
188 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
189 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
190 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
191 			"  x_rp_dlay  x_rr_sfl\n");
192 	for (i = 0; i < pool->pipe_count; i++) {
193 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
194 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
195 
196 		if (!s->blank_en)
197 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
198 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
199 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
200 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
201 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
202 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
203 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
204 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
205 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
206 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
207 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
208 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
209 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
210 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
211 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
212 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
213 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
214 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
215 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
216 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
217 				dlg_regs->xfc_reg_remote_surface_flip_latency);
218 	}
219 
220 	DTN_INFO("========TTU========\n");
221 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
222 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
223 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
224 	for (i = 0; i < pool->pipe_count; i++) {
225 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
226 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
227 
228 		if (!s->blank_en)
229 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
230 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
231 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
232 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
233 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
234 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
235 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
236 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
237 	}
238 	DTN_INFO("\n");
239 }
240 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)241 void dcn10_log_hw_state(struct dc *dc,
242 	struct dc_log_buffer_ctx *log_ctx)
243 {
244 	struct dc_context *dc_ctx = dc->ctx;
245 	struct resource_pool *pool = dc->res_pool;
246 	int i;
247 
248 	DTN_INFO_BEGIN();
249 
250 	dcn10_log_hubbub_state(dc, log_ctx);
251 
252 	dcn10_log_hubp_states(dc, log_ctx);
253 
254 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
255 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
256 			"C31 C32   C33 C34\n");
257 	for (i = 0; i < pool->pipe_count; i++) {
258 		struct dpp *dpp = pool->dpps[i];
259 		struct dcn_dpp_state s = {0};
260 
261 		dpp->funcs->dpp_read_state(dpp, &s);
262 
263 		if (!s.is_enabled)
264 			continue;
265 
266 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
267 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
268 				dpp->inst,
269 				s.igam_input_format,
270 				(s.igam_lut_mode == 0) ? "BypassFixed" :
271 					((s.igam_lut_mode == 1) ? "BypassFloat" :
272 					((s.igam_lut_mode == 2) ? "RAM" :
273 					((s.igam_lut_mode == 3) ? "RAM" :
274 								 "Unknown"))),
275 				(s.dgam_lut_mode == 0) ? "Bypass" :
276 					((s.dgam_lut_mode == 1) ? "sRGB" :
277 					((s.dgam_lut_mode == 2) ? "Ycc" :
278 					((s.dgam_lut_mode == 3) ? "RAM" :
279 					((s.dgam_lut_mode == 4) ? "RAM" :
280 								 "Unknown")))),
281 				(s.rgam_lut_mode == 0) ? "Bypass" :
282 					((s.rgam_lut_mode == 1) ? "sRGB" :
283 					((s.rgam_lut_mode == 2) ? "Ycc" :
284 					((s.rgam_lut_mode == 3) ? "RAM" :
285 					((s.rgam_lut_mode == 4) ? "RAM" :
286 								 "Unknown")))),
287 				s.gamut_remap_mode,
288 				s.gamut_remap_c11_c12,
289 				s.gamut_remap_c13_c14,
290 				s.gamut_remap_c21_c22,
291 				s.gamut_remap_c23_c24,
292 				s.gamut_remap_c31_c32,
293 				s.gamut_remap_c33_c34);
294 		DTN_INFO("\n");
295 	}
296 	DTN_INFO("\n");
297 
298 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
299 	for (i = 0; i < pool->pipe_count; i++) {
300 		struct mpcc_state s = {0};
301 
302 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
303 		if (s.opp_id != 0xf)
304 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
305 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
306 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
307 				s.idle);
308 	}
309 	DTN_INFO("\n");
310 
311 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel"
312 			"  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow\n");
313 
314 	for (i = 0; i < pool->timing_generator_count; i++) {
315 		struct timing_generator *tg = pool->timing_generators[i];
316 		struct dcn_otg_state s = {0};
317 
318 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
319 
320 		//only print if OTG master is enabled
321 		if ((s.otg_enabled & 1) == 0)
322 			continue;
323 
324 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d"
325 				" %5d %5d %5d %5d  %9d\n",
326 				tg->inst,
327 				s.v_blank_start,
328 				s.v_blank_end,
329 				s.v_sync_a_start,
330 				s.v_sync_a_end,
331 				s.v_sync_a_pol,
332 				s.v_total_max,
333 				s.v_total_min,
334 				s.v_total_max_sel,
335 				s.v_total_min_sel,
336 				s.h_blank_start,
337 				s.h_blank_end,
338 				s.h_sync_a_start,
339 				s.h_sync_a_end,
340 				s.h_sync_a_pol,
341 				s.h_total,
342 				s.v_total,
343 				s.underflow_occurred_status);
344 
345 		// Clear underflow for debug purposes
346 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
347 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
348 		// it from here without affecting the original intent.
349 		tg->funcs->clear_optc_underflow(tg);
350 	}
351 	DTN_INFO("\n");
352 
353 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
354 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
355 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
356 		struct display_stream_compressor *dsc = pool->dscs[i];
357 		struct dcn_dsc_state s = {0};
358 
359 		dsc->funcs->dsc_read_state(dsc, &s);
360 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
361 		dsc->inst,
362 			s.dsc_clock_en,
363 			s.dsc_slice_width,
364 			s.dsc_bytes_per_pixel);
365 		DTN_INFO("\n");
366 	}
367 	DTN_INFO("\n");
368 
369 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
370 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
371 	for (i = 0; i < pool->stream_enc_count; i++) {
372 		struct stream_encoder *enc = pool->stream_enc[i];
373 		struct enc_state s = {0};
374 
375 		if (enc->funcs->enc_read_state) {
376 			enc->funcs->enc_read_state(enc, &s);
377 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
378 				enc->id,
379 				s.dsc_mode,
380 				s.sec_gsp_pps_line_num,
381 				s.vbid6_line_reference,
382 				s.vbid6_line_num,
383 				s.sec_gsp_pps_enable,
384 				s.sec_stream_enable);
385 			DTN_INFO("\n");
386 		}
387 	}
388 	DTN_INFO("\n");
389 
390 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS\n");
391 	for (i = 0; i < dc->link_count; i++) {
392 		struct link_encoder *lenc = dc->links[i]->link_enc;
393 
394 		struct link_enc_state s = {0};
395 
396 		if (lenc->funcs->read_state) {
397 			lenc->funcs->read_state(lenc, &s);
398 			DTN_INFO("[%-3d]: %-12d %-22d %-22d\n",
399 				i,
400 				s.dphy_fec_en,
401 				s.dphy_fec_ready_shadow,
402 				s.dphy_fec_active_status);
403 			DTN_INFO("\n");
404 		}
405 	}
406 	DTN_INFO("\n");
407 #endif
408 
409 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
410 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
411 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
412 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
413 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
414 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
415 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
416 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
417 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
418 
419 	log_mpc_crc(dc, log_ctx);
420 
421 	DTN_INFO_END();
422 }
423 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)424 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
425 {
426 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
427 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
428 
429 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
430 		tg->funcs->clear_optc_underflow(tg);
431 		return true;
432 	}
433 
434 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
435 		hubp->funcs->hubp_clear_underflow(hubp);
436 		return true;
437 	}
438 	return false;
439 }
440 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)441 static void dcn10_enable_power_gating_plane(
442 	struct dce_hwseq *hws,
443 	bool enable)
444 {
445 	bool force_on = 1; /* disable power gating */
446 
447 	if (enable)
448 		force_on = 0;
449 
450 	/* DCHUBP0/1/2/3 */
451 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
452 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
453 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
454 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
455 
456 	/* DPP0/1/2/3 */
457 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
458 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
459 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
460 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
461 }
462 
dcn10_disable_vga(struct dce_hwseq * hws)463 static void dcn10_disable_vga(
464 	struct dce_hwseq *hws)
465 {
466 	unsigned int in_vga1_mode = 0;
467 	unsigned int in_vga2_mode = 0;
468 	unsigned int in_vga3_mode = 0;
469 	unsigned int in_vga4_mode = 0;
470 
471 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
472 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
473 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
474 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
475 
476 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
477 			in_vga3_mode == 0 && in_vga4_mode == 0)
478 		return;
479 
480 	REG_WRITE(D1VGA_CONTROL, 0);
481 	REG_WRITE(D2VGA_CONTROL, 0);
482 	REG_WRITE(D3VGA_CONTROL, 0);
483 	REG_WRITE(D4VGA_CONTROL, 0);
484 
485 	/* HW Engineer's Notes:
486 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
487 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
488 	 *
489 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
490 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
491 	 */
492 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
493 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
494 }
495 
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)496 static void dcn10_dpp_pg_control(
497 		struct dce_hwseq *hws,
498 		unsigned int dpp_inst,
499 		bool power_on)
500 {
501 	uint32_t power_gate = power_on ? 0 : 1;
502 	uint32_t pwr_status = power_on ? 0 : 2;
503 
504 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
505 		return;
506 	if (REG(DOMAIN1_PG_CONFIG) == 0)
507 		return;
508 
509 	switch (dpp_inst) {
510 	case 0: /* DPP0 */
511 		REG_UPDATE(DOMAIN1_PG_CONFIG,
512 				DOMAIN1_POWER_GATE, power_gate);
513 
514 		REG_WAIT(DOMAIN1_PG_STATUS,
515 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
516 				1, 1000);
517 		break;
518 	case 1: /* DPP1 */
519 		REG_UPDATE(DOMAIN3_PG_CONFIG,
520 				DOMAIN3_POWER_GATE, power_gate);
521 
522 		REG_WAIT(DOMAIN3_PG_STATUS,
523 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
524 				1, 1000);
525 		break;
526 	case 2: /* DPP2 */
527 		REG_UPDATE(DOMAIN5_PG_CONFIG,
528 				DOMAIN5_POWER_GATE, power_gate);
529 
530 		REG_WAIT(DOMAIN5_PG_STATUS,
531 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
532 				1, 1000);
533 		break;
534 	case 3: /* DPP3 */
535 		REG_UPDATE(DOMAIN7_PG_CONFIG,
536 				DOMAIN7_POWER_GATE, power_gate);
537 
538 		REG_WAIT(DOMAIN7_PG_STATUS,
539 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
540 				1, 1000);
541 		break;
542 	default:
543 		BREAK_TO_DEBUGGER();
544 		break;
545 	}
546 }
547 
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)548 static void dcn10_hubp_pg_control(
549 		struct dce_hwseq *hws,
550 		unsigned int hubp_inst,
551 		bool power_on)
552 {
553 	uint32_t power_gate = power_on ? 0 : 1;
554 	uint32_t pwr_status = power_on ? 0 : 2;
555 
556 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
557 		return;
558 	if (REG(DOMAIN0_PG_CONFIG) == 0)
559 		return;
560 
561 	switch (hubp_inst) {
562 	case 0: /* DCHUBP0 */
563 		REG_UPDATE(DOMAIN0_PG_CONFIG,
564 				DOMAIN0_POWER_GATE, power_gate);
565 
566 		REG_WAIT(DOMAIN0_PG_STATUS,
567 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
568 				1, 1000);
569 		break;
570 	case 1: /* DCHUBP1 */
571 		REG_UPDATE(DOMAIN2_PG_CONFIG,
572 				DOMAIN2_POWER_GATE, power_gate);
573 
574 		REG_WAIT(DOMAIN2_PG_STATUS,
575 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
576 				1, 1000);
577 		break;
578 	case 2: /* DCHUBP2 */
579 		REG_UPDATE(DOMAIN4_PG_CONFIG,
580 				DOMAIN4_POWER_GATE, power_gate);
581 
582 		REG_WAIT(DOMAIN4_PG_STATUS,
583 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
584 				1, 1000);
585 		break;
586 	case 3: /* DCHUBP3 */
587 		REG_UPDATE(DOMAIN6_PG_CONFIG,
588 				DOMAIN6_POWER_GATE, power_gate);
589 
590 		REG_WAIT(DOMAIN6_PG_STATUS,
591 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
592 				1, 1000);
593 		break;
594 	default:
595 		BREAK_TO_DEBUGGER();
596 		break;
597 	}
598 }
599 
power_on_plane(struct dce_hwseq * hws,int plane_id)600 static void power_on_plane(
601 	struct dce_hwseq *hws,
602 	int plane_id)
603 {
604 	DC_LOGGER_INIT(hws->ctx->logger);
605 	if (REG(DC_IP_REQUEST_CNTL)) {
606 		REG_SET(DC_IP_REQUEST_CNTL, 0,
607 				IP_REQUEST_EN, 1);
608 		hws->ctx->dc->hwss.dpp_pg_control(hws, plane_id, true);
609 		hws->ctx->dc->hwss.hubp_pg_control(hws, plane_id, true);
610 		REG_SET(DC_IP_REQUEST_CNTL, 0,
611 				IP_REQUEST_EN, 0);
612 		DC_LOG_DEBUG(
613 				"Un-gated front end for pipe %d\n", plane_id);
614 	}
615 }
616 
undo_DEGVIDCN10_253_wa(struct dc * dc)617 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
618 {
619 	struct dce_hwseq *hws = dc->hwseq;
620 	struct hubp *hubp = dc->res_pool->hubps[0];
621 
622 	if (!hws->wa_state.DEGVIDCN10_253_applied)
623 		return;
624 
625 	hubp->funcs->set_blank(hubp, true);
626 
627 	REG_SET(DC_IP_REQUEST_CNTL, 0,
628 			IP_REQUEST_EN, 1);
629 
630 	dc->hwss.hubp_pg_control(hws, 0, false);
631 	REG_SET(DC_IP_REQUEST_CNTL, 0,
632 			IP_REQUEST_EN, 0);
633 
634 	hws->wa_state.DEGVIDCN10_253_applied = false;
635 }
636 
apply_DEGVIDCN10_253_wa(struct dc * dc)637 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
638 {
639 	struct dce_hwseq *hws = dc->hwseq;
640 	struct hubp *hubp = dc->res_pool->hubps[0];
641 	int i;
642 
643 	if (dc->debug.disable_stutter)
644 		return;
645 
646 	if (!hws->wa.DEGVIDCN10_253)
647 		return;
648 
649 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
650 		if (!dc->res_pool->hubps[i]->power_gated)
651 			return;
652 	}
653 
654 	/* all pipe power gated, apply work around to enable stutter. */
655 
656 	REG_SET(DC_IP_REQUEST_CNTL, 0,
657 			IP_REQUEST_EN, 1);
658 
659 	dc->hwss.hubp_pg_control(hws, 0, true);
660 	REG_SET(DC_IP_REQUEST_CNTL, 0,
661 			IP_REQUEST_EN, 0);
662 
663 	hubp->funcs->set_hubp_blank_en(hubp, false);
664 	hws->wa_state.DEGVIDCN10_253_applied = true;
665 }
666 
dcn10_bios_golden_init(struct dc * dc)667 static void dcn10_bios_golden_init(struct dc *dc)
668 {
669 	struct dc_bios *bp = dc->ctx->dc_bios;
670 	int i;
671 	bool allow_self_fresh_force_enable = true;
672 
673 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
674 		allow_self_fresh_force_enable =
675 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
676 
677 
678 	/* WA for making DF sleep when idle after resume from S0i3.
679 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
680 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
681 	 * before calling command table and it changed to 1 after,
682 	 * it should be set back to 0.
683 	 */
684 
685 	/* initialize dcn global */
686 	bp->funcs->enable_disp_power_gating(bp,
687 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
688 
689 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
690 		/* initialize dcn per pipe */
691 		bp->funcs->enable_disp_power_gating(bp,
692 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
693 	}
694 
695 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
696 		if (allow_self_fresh_force_enable == false &&
697 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
698 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
699 
700 }
701 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)702 static void false_optc_underflow_wa(
703 		struct dc *dc,
704 		const struct dc_stream_state *stream,
705 		struct timing_generator *tg)
706 {
707 	int i;
708 	bool underflow;
709 
710 	if (!dc->hwseq->wa.false_optc_underflow)
711 		return;
712 
713 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
714 
715 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
716 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
717 
718 		if (old_pipe_ctx->stream != stream)
719 			continue;
720 
721 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
722 	}
723 
724 	if (tg->funcs->set_blank_data_double_buffer)
725 		tg->funcs->set_blank_data_double_buffer(tg, true);
726 
727 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
728 		tg->funcs->clear_optc_underflow(tg);
729 }
730 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)731 static enum dc_status dcn10_enable_stream_timing(
732 		struct pipe_ctx *pipe_ctx,
733 		struct dc_state *context,
734 		struct dc *dc)
735 {
736 	struct dc_stream_state *stream = pipe_ctx->stream;
737 	enum dc_color_space color_space;
738 	struct tg_color black_color = {0};
739 
740 	/* by upper caller loop, pipe0 is parent pipe and be called first.
741 	 * back end is set up by for pipe0. Other children pipe share back end
742 	 * with pipe 0. No program is needed.
743 	 */
744 	if (pipe_ctx->top_pipe != NULL)
745 		return DC_OK;
746 
747 	/* TODO check if timing_changed, disable stream if timing changed */
748 
749 	/* HW program guide assume display already disable
750 	 * by unplug sequence. OTG assume stop.
751 	 */
752 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
753 
754 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
755 			pipe_ctx->clock_source,
756 			&pipe_ctx->stream_res.pix_clk_params,
757 			&pipe_ctx->pll_settings)) {
758 		BREAK_TO_DEBUGGER();
759 		return DC_ERROR_UNEXPECTED;
760 	}
761 
762 	pipe_ctx->stream_res.tg->funcs->program_timing(
763 			pipe_ctx->stream_res.tg,
764 			&stream->timing,
765 			pipe_ctx->pipe_dlg_param.vready_offset,
766 			pipe_ctx->pipe_dlg_param.vstartup_start,
767 			pipe_ctx->pipe_dlg_param.vupdate_offset,
768 			pipe_ctx->pipe_dlg_param.vupdate_width,
769 			pipe_ctx->stream->signal,
770 			true);
771 
772 #if 0 /* move to after enable_crtc */
773 	/* TODO: OPP FMT, ABM. etc. should be done here. */
774 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
775 
776 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
777 
778 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
779 				pipe_ctx->stream_res.opp,
780 				&stream->bit_depth_params,
781 				&stream->clamping);
782 #endif
783 	/* program otg blank color */
784 	color_space = stream->output_color_space;
785 	color_space_to_black_color(dc, color_space, &black_color);
786 
787 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
788 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
789 				pipe_ctx->stream_res.tg,
790 				&black_color);
791 
792 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
793 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
794 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
795 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
796 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
797 	}
798 
799 	/* VTG is  within DCHUB command block. DCFCLK is always on */
800 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
801 		BREAK_TO_DEBUGGER();
802 		return DC_ERROR_UNEXPECTED;
803 	}
804 
805 	/* TODO program crtc source select for non-virtual signal*/
806 	/* TODO program FMT */
807 	/* TODO setup link_enc */
808 	/* TODO set stream attributes */
809 	/* TODO program audio */
810 	/* TODO enable stream if timing changed */
811 	/* TODO unblank stream if DP */
812 
813 	return DC_OK;
814 }
815 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)816 static void dcn10_reset_back_end_for_pipe(
817 		struct dc *dc,
818 		struct pipe_ctx *pipe_ctx,
819 		struct dc_state *context)
820 {
821 	int i;
822 	DC_LOGGER_INIT(dc->ctx->logger);
823 	if (pipe_ctx->stream_res.stream_enc == NULL) {
824 		pipe_ctx->stream = NULL;
825 		return;
826 	}
827 
828 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
829 		/* DPMS may already disable */
830 		if (!pipe_ctx->stream->dpms_off)
831 			core_link_disable_stream(pipe_ctx);
832 		else if (pipe_ctx->stream_res.audio)
833 			dc->hwss.disable_audio_stream(pipe_ctx);
834 
835 		if (pipe_ctx->stream_res.audio) {
836 			/*disable az_endpoint*/
837 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
838 
839 			/*free audio*/
840 			if (dc->caps.dynamic_audio == true) {
841 				/*we have to dynamic arbitrate the audio endpoints*/
842 				/*we free the resource, need reset is_audio_acquired*/
843 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
844 						pipe_ctx->stream_res.audio, false);
845 				pipe_ctx->stream_res.audio = NULL;
846 			}
847 		}
848 	}
849 
850 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
851 	 * back end share by all pipes and will be disable only when disable
852 	 * parent pipe.
853 	 */
854 	if (pipe_ctx->top_pipe == NULL) {
855 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
856 
857 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
858 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
859 			pipe_ctx->stream_res.tg->funcs->set_drr(
860 					pipe_ctx->stream_res.tg, NULL);
861 	}
862 
863 	for (i = 0; i < dc->res_pool->pipe_count; i++)
864 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
865 			break;
866 
867 	if (i == dc->res_pool->pipe_count)
868 		return;
869 
870 	pipe_ctx->stream = NULL;
871 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
872 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
873 }
874 
dcn10_hw_wa_force_recovery(struct dc * dc)875 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
876 {
877 	struct hubp *hubp ;
878 	unsigned int i;
879 	bool need_recover = true;
880 
881 	if (!dc->debug.recovery_enabled)
882 		return false;
883 
884 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
885 		struct pipe_ctx *pipe_ctx =
886 			&dc->current_state->res_ctx.pipe_ctx[i];
887 		if (pipe_ctx != NULL) {
888 			hubp = pipe_ctx->plane_res.hubp;
889 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
890 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
891 					/* one pipe underflow, we will reset all the pipes*/
892 					need_recover = true;
893 				}
894 			}
895 		}
896 	}
897 	if (!need_recover)
898 		return false;
899 	/*
900 	DCHUBP_CNTL:HUBP_BLANK_EN=1
901 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
902 	DCHUBP_CNTL:HUBP_DISABLE=1
903 	DCHUBP_CNTL:HUBP_DISABLE=0
904 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
905 	DCSURF_PRIMARY_SURFACE_ADDRESS
906 	DCHUBP_CNTL:HUBP_BLANK_EN=0
907 	*/
908 
909 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
910 		struct pipe_ctx *pipe_ctx =
911 			&dc->current_state->res_ctx.pipe_ctx[i];
912 		if (pipe_ctx != NULL) {
913 			hubp = pipe_ctx->plane_res.hubp;
914 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
915 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
916 				hubp->funcs->set_hubp_blank_en(hubp, true);
917 		}
918 	}
919 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
920 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
921 
922 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
923 		struct pipe_ctx *pipe_ctx =
924 			&dc->current_state->res_ctx.pipe_ctx[i];
925 		if (pipe_ctx != NULL) {
926 			hubp = pipe_ctx->plane_res.hubp;
927 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
928 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
929 				hubp->funcs->hubp_disable_control(hubp, true);
930 		}
931 	}
932 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
933 		struct pipe_ctx *pipe_ctx =
934 			&dc->current_state->res_ctx.pipe_ctx[i];
935 		if (pipe_ctx != NULL) {
936 			hubp = pipe_ctx->plane_res.hubp;
937 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
938 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
939 				hubp->funcs->hubp_disable_control(hubp, true);
940 		}
941 	}
942 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
943 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
944 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
945 		struct pipe_ctx *pipe_ctx =
946 			&dc->current_state->res_ctx.pipe_ctx[i];
947 		if (pipe_ctx != NULL) {
948 			hubp = pipe_ctx->plane_res.hubp;
949 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
950 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
951 				hubp->funcs->set_hubp_blank_en(hubp, true);
952 		}
953 	}
954 	return true;
955 
956 }
957 
958 
dcn10_verify_allow_pstate_change_high(struct dc * dc)959 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
960 {
961 	static bool should_log_hw_state; /* prevent hw state log by default */
962 
963 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
964 		if (should_log_hw_state) {
965 			dcn10_log_hw_state(dc, NULL);
966 		}
967 		BREAK_TO_DEBUGGER();
968 		if (dcn10_hw_wa_force_recovery(dc)) {
969 		/*check again*/
970 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
971 				BREAK_TO_DEBUGGER();
972 		}
973 	}
974 }
975 
976 /* trigger HW to start disconnect plane from stream on the next vsync */
hwss1_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)977 void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
978 {
979 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
980 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
981 	struct mpc *mpc = dc->res_pool->mpc;
982 	struct mpc_tree *mpc_tree_params;
983 	struct mpcc *mpcc_to_remove = NULL;
984 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
985 
986 	mpc_tree_params = &(opp->mpc_tree_params);
987 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
988 
989 	/*Already reset*/
990 	if (mpcc_to_remove == NULL)
991 		return;
992 
993 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
994 	if (opp != NULL)
995 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
996 
997 	dc->optimized_required = true;
998 
999 	if (hubp->funcs->hubp_disconnect)
1000 		hubp->funcs->hubp_disconnect(hubp);
1001 
1002 	if (dc->debug.sanity_checks)
1003 		dcn10_verify_allow_pstate_change_high(dc);
1004 }
1005 
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1006 static void dcn10_plane_atomic_power_down(struct dc *dc,
1007 		struct dpp *dpp,
1008 		struct hubp *hubp)
1009 {
1010 	struct dce_hwseq *hws = dc->hwseq;
1011 	DC_LOGGER_INIT(dc->ctx->logger);
1012 
1013 	if (REG(DC_IP_REQUEST_CNTL)) {
1014 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1015 				IP_REQUEST_EN, 1);
1016 		dc->hwss.dpp_pg_control(hws, dpp->inst, false);
1017 		dc->hwss.hubp_pg_control(hws, hubp->inst, false);
1018 		dpp->funcs->dpp_reset(dpp);
1019 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1020 				IP_REQUEST_EN, 0);
1021 		DC_LOG_DEBUG(
1022 				"Power gated front end %d\n", hubp->inst);
1023 	}
1024 }
1025 
1026 /* disable HW used by plane.
1027  * note:  cannot disable until disconnect is complete
1028  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1029 static void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1030 {
1031 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1032 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1033 	int opp_id = hubp->opp_id;
1034 
1035 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1036 
1037 	hubp->funcs->hubp_clk_cntl(hubp, false);
1038 
1039 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1040 
1041 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1042 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1043 				pipe_ctx->stream_res.opp,
1044 				false);
1045 
1046 	hubp->power_gated = true;
1047 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1048 
1049 	dc->hwss.plane_atomic_power_down(dc,
1050 			pipe_ctx->plane_res.dpp,
1051 			pipe_ctx->plane_res.hubp);
1052 
1053 	pipe_ctx->stream = NULL;
1054 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1055 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1056 	pipe_ctx->top_pipe = NULL;
1057 	pipe_ctx->bottom_pipe = NULL;
1058 	pipe_ctx->plane_state = NULL;
1059 }
1060 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1061 static void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1062 {
1063 	DC_LOGGER_INIT(dc->ctx->logger);
1064 
1065 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1066 		return;
1067 
1068 	dc->hwss.plane_atomic_disable(dc, pipe_ctx);
1069 
1070 	apply_DEGVIDCN10_253_wa(dc);
1071 
1072 	DC_LOG_DC("Power down front end %d\n",
1073 					pipe_ctx->pipe_idx);
1074 }
1075 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1076 static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1077 {
1078 	int i;
1079 	bool can_apply_seamless_boot = false;
1080 
1081 	for (i = 0; i < context->stream_count; i++) {
1082 		if (context->streams[i]->apply_seamless_boot_optimization) {
1083 			can_apply_seamless_boot = true;
1084 			break;
1085 		}
1086 	}
1087 
1088 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1089 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1090 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1091 
1092 		/* There is assumption that pipe_ctx is not mapping irregularly
1093 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1094 		 * we will use the pipe, so don't disable
1095 		 */
1096 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1097 			continue;
1098 
1099 		/* Blank controller using driver code instead of
1100 		 * command table.
1101 		 */
1102 		if (tg->funcs->is_tg_enabled(tg)) {
1103 			if (dc->hwss.init_blank != NULL) {
1104 				dc->hwss.init_blank(dc, tg);
1105 				tg->funcs->lock(tg);
1106 			} else {
1107 				tg->funcs->lock(tg);
1108 				tg->funcs->set_blank(tg, true);
1109 				hwss_wait_for_blank_complete(tg);
1110 			}
1111 		}
1112 	}
1113 
1114 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1115 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1116 
1117 		/* Cannot reset the MPC mux if seamless boot */
1118 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1119 			continue;
1120 
1121 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1122 				dc->res_pool->mpc, i);
1123 	}
1124 
1125 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1126 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1127 		struct hubp *hubp = dc->res_pool->hubps[i];
1128 		struct dpp *dpp = dc->res_pool->dpps[i];
1129 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1130 
1131 		/* There is assumption that pipe_ctx is not mapping irregularly
1132 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1133 		 * we will use the pipe, so don't disable
1134 		 */
1135 		if (can_apply_seamless_boot &&
1136 			pipe_ctx->stream != NULL &&
1137 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1138 				pipe_ctx->stream_res.tg))
1139 			continue;
1140 
1141 		/* Disable on the current state so the new one isn't cleared. */
1142 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1143 
1144 		dpp->funcs->dpp_reset(dpp);
1145 
1146 		pipe_ctx->stream_res.tg = tg;
1147 		pipe_ctx->pipe_idx = i;
1148 
1149 		pipe_ctx->plane_res.hubp = hubp;
1150 		pipe_ctx->plane_res.dpp = dpp;
1151 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1152 		hubp->mpcc_id = dpp->inst;
1153 		hubp->opp_id = OPP_ID_INVALID;
1154 		hubp->power_gated = false;
1155 
1156 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1157 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1158 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1159 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1160 
1161 		dc->hwss.plane_atomic_disconnect(dc, pipe_ctx);
1162 
1163 		if (tg->funcs->is_tg_enabled(tg))
1164 			tg->funcs->unlock(tg);
1165 
1166 		dc->hwss.disable_plane(dc, pipe_ctx);
1167 
1168 		pipe_ctx->stream_res.tg = NULL;
1169 		pipe_ctx->plane_res.hubp = NULL;
1170 
1171 		tg->funcs->tg_init(tg);
1172 	}
1173 }
1174 
dcn10_init_hw(struct dc * dc)1175 static void dcn10_init_hw(struct dc *dc)
1176 {
1177 	int i;
1178 	struct abm *abm = dc->res_pool->abm;
1179 	struct dmcu *dmcu = dc->res_pool->dmcu;
1180 	struct dce_hwseq *hws = dc->hwseq;
1181 	struct dc_bios *dcb = dc->ctx->dc_bios;
1182 	struct resource_pool *res_pool = dc->res_pool;
1183 
1184 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1185 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1186 
1187 	// Initialize the dccg
1188 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1189 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1190 
1191 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1192 
1193 		REG_WRITE(REFCLK_CNTL, 0);
1194 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1195 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1196 
1197 		if (!dc->debug.disable_clock_gate) {
1198 			/* enable all DCN clock gating */
1199 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1200 
1201 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1202 
1203 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1204 		}
1205 
1206 		//Enable ability to power gate / don't force power on permanently
1207 		dc->hwss.enable_power_gating_plane(hws, true);
1208 
1209 		return;
1210 	}
1211 
1212 	if (!dcb->funcs->is_accelerated_mode(dcb))
1213 		dc->hwss.disable_vga(dc->hwseq);
1214 
1215 	dc->hwss.bios_golden_init(dc);
1216 	if (dc->ctx->dc_bios->fw_info_valid) {
1217 		res_pool->ref_clocks.xtalin_clock_inKhz =
1218 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1219 
1220 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1221 			if (res_pool->dccg && res_pool->hubbub) {
1222 
1223 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1224 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1225 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1226 
1227 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1228 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1229 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1230 			} else {
1231 				// Not all ASICs have DCCG sw component
1232 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1233 						res_pool->ref_clocks.xtalin_clock_inKhz;
1234 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1235 						res_pool->ref_clocks.xtalin_clock_inKhz;
1236 			}
1237 		}
1238 	} else
1239 		ASSERT_CRITICAL(false);
1240 
1241 	for (i = 0; i < dc->link_count; i++) {
1242 		/* Power up AND update implementation according to the
1243 		 * required signal (which may be different from the
1244 		 * default signal on connector).
1245 		 */
1246 		struct dc_link *link = dc->links[i];
1247 
1248 		link->link_enc->funcs->hw_init(link->link_enc);
1249 
1250 		/* Check for enabled DIG to identify enabled display */
1251 		if (link->link_enc->funcs->is_dig_enabled &&
1252 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1253 			link->link_status.link_active = true;
1254 	}
1255 
1256 	/* Power gate DSCs */
1257 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
1258 	for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1259 		if (dc->hwss.dsc_pg_control != NULL)
1260 			dc->hwss.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1261 #endif
1262 
1263 	/* If taking control over from VBIOS, we may want to optimize our first
1264 	 * mode set, so we need to skip powering down pipes until we know which
1265 	 * pipes we want to use.
1266 	 * Otherwise, if taking control is not possible, we need to power
1267 	 * everything down.
1268 	 */
1269 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1270 		dc->hwss.init_pipes(dc, dc->current_state);
1271 	}
1272 
1273 	for (i = 0; i < res_pool->audio_count; i++) {
1274 		struct audio *audio = res_pool->audios[i];
1275 
1276 		audio->funcs->hw_init(audio);
1277 	}
1278 
1279 	if (abm != NULL) {
1280 		abm->funcs->init_backlight(abm);
1281 		abm->funcs->abm_init(abm);
1282 	}
1283 
1284 	if (dmcu != NULL)
1285 		dmcu->funcs->dmcu_init(dmcu);
1286 
1287 	if (abm != NULL && dmcu != NULL)
1288 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1289 
1290 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1291 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1292 
1293 	if (!dc->debug.disable_clock_gate) {
1294 		/* enable all DCN clock gating */
1295 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1296 
1297 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1298 
1299 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1300 	}
1301 
1302 	dc->hwss.enable_power_gating_plane(dc->hwseq, true);
1303 }
1304 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1305 static void dcn10_reset_hw_ctx_wrap(
1306 		struct dc *dc,
1307 		struct dc_state *context)
1308 {
1309 	int i;
1310 
1311 	/* Reset Back End*/
1312 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1313 		struct pipe_ctx *pipe_ctx_old =
1314 			&dc->current_state->res_ctx.pipe_ctx[i];
1315 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1316 
1317 		if (!pipe_ctx_old->stream)
1318 			continue;
1319 
1320 		if (pipe_ctx_old->top_pipe)
1321 			continue;
1322 
1323 		if (!pipe_ctx->stream ||
1324 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1325 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1326 
1327 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1328 			if (dc->hwss.enable_stream_gating)
1329 				dc->hwss.enable_stream_gating(dc, pipe_ctx);
1330 			if (old_clk)
1331 				old_clk->funcs->cs_power_down(old_clk);
1332 		}
1333 	}
1334 }
1335 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1336 static bool patch_address_for_sbs_tb_stereo(
1337 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1338 {
1339 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1340 	bool sec_split = pipe_ctx->top_pipe &&
1341 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1342 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1343 		(pipe_ctx->stream->timing.timing_3d_format ==
1344 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1345 		 pipe_ctx->stream->timing.timing_3d_format ==
1346 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1347 		*addr = plane_state->address.grph_stereo.left_addr;
1348 		plane_state->address.grph_stereo.left_addr =
1349 		plane_state->address.grph_stereo.right_addr;
1350 		return true;
1351 	} else {
1352 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1353 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1354 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1355 			plane_state->address.grph_stereo.right_addr =
1356 			plane_state->address.grph_stereo.left_addr;
1357 		}
1358 	}
1359 	return false;
1360 }
1361 
1362 
1363 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1364 static void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1365 {
1366 	bool addr_patched = false;
1367 	PHYSICAL_ADDRESS_LOC addr;
1368 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1369 
1370 	if (plane_state == NULL)
1371 		return;
1372 
1373 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1374 
1375 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1376 			pipe_ctx->plane_res.hubp,
1377 			&plane_state->address,
1378 			plane_state->flip_immediate);
1379 
1380 	plane_state->status.requested_address = plane_state->address;
1381 
1382 	if (plane_state->flip_immediate)
1383 		plane_state->status.current_address = plane_state->address;
1384 
1385 	if (addr_patched)
1386 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1387 }
1388 
dcn10_set_input_transfer_func(struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1389 static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
1390 					  const struct dc_plane_state *plane_state)
1391 {
1392 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1393 	const struct dc_transfer_func *tf = NULL;
1394 	bool result = true;
1395 
1396 	if (dpp_base == NULL)
1397 		return false;
1398 
1399 	if (plane_state->in_transfer_func)
1400 		tf = plane_state->in_transfer_func;
1401 
1402 	if (plane_state->gamma_correction &&
1403 		!dpp_base->ctx->dc->debug.always_use_regamma
1404 		&& !plane_state->gamma_correction->is_identity
1405 			&& dce_use_lut(plane_state->format))
1406 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1407 
1408 	if (tf == NULL)
1409 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1410 	else if (tf->type == TF_TYPE_PREDEFINED) {
1411 		switch (tf->tf) {
1412 		case TRANSFER_FUNCTION_SRGB:
1413 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1414 			break;
1415 		case TRANSFER_FUNCTION_BT709:
1416 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1417 			break;
1418 		case TRANSFER_FUNCTION_LINEAR:
1419 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1420 			break;
1421 		case TRANSFER_FUNCTION_PQ:
1422 		default:
1423 			result = false;
1424 			break;
1425 		}
1426 	} else if (tf->type == TF_TYPE_BYPASS) {
1427 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1428 	} else {
1429 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1430 					&dpp_base->degamma_params);
1431 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1432 				&dpp_base->degamma_params);
1433 		result = true;
1434 	}
1435 
1436 	return result;
1437 }
1438 
1439 #define MAX_NUM_HW_POINTS 0x200
1440 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1441 static void log_tf(struct dc_context *ctx,
1442 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1443 {
1444 	// DC_LOG_GAMMA is default logging of all hw points
1445 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1446 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1447 	int i = 0;
1448 
1449 	DC_LOGGER_INIT(ctx->logger);
1450 	DC_LOG_GAMMA("Gamma Correction TF");
1451 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1452 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1453 
1454 	for (i = 0; i < hw_points_num; i++) {
1455 		DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
1456 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
1457 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
1458 	}
1459 
1460 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1461 		DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value);
1462 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value);
1463 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value);
1464 	}
1465 }
1466 
1467 static bool
dcn10_set_output_transfer_func(struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1468 dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
1469 			       const struct dc_stream_state *stream)
1470 {
1471 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1472 
1473 	if (dpp == NULL)
1474 		return false;
1475 
1476 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1477 
1478 	if (stream->out_transfer_func &&
1479 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1480 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1481 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1482 
1483 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1484 	 * update.
1485 	 */
1486 	else if (cm_helper_translate_curve_to_hw_format(
1487 			stream->out_transfer_func,
1488 			&dpp->regamma_params, false)) {
1489 		dpp->funcs->dpp_program_regamma_pwl(
1490 				dpp,
1491 				&dpp->regamma_params, OPP_REGAMMA_USER);
1492 	} else
1493 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1494 
1495 	if (stream != NULL && stream->ctx != NULL &&
1496 			stream->out_transfer_func != NULL) {
1497 		log_tf(stream->ctx,
1498 				stream->out_transfer_func,
1499 				dpp->regamma_params.hw_points_num);
1500 	}
1501 
1502 	return true;
1503 }
1504 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1505 static void dcn10_pipe_control_lock(
1506 	struct dc *dc,
1507 	struct pipe_ctx *pipe,
1508 	bool lock)
1509 {
1510 	/* use TG master update lock to lock everything on the TG
1511 	 * therefore only top pipe need to lock
1512 	 */
1513 	if (pipe->top_pipe)
1514 		return;
1515 
1516 	if (dc->debug.sanity_checks)
1517 		dcn10_verify_allow_pstate_change_high(dc);
1518 
1519 	if (lock)
1520 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1521 	else
1522 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1523 
1524 	if (dc->debug.sanity_checks)
1525 		dcn10_verify_allow_pstate_change_high(dc);
1526 }
1527 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1528 static bool wait_for_reset_trigger_to_occur(
1529 	struct dc_context *dc_ctx,
1530 	struct timing_generator *tg)
1531 {
1532 	bool rc = false;
1533 
1534 	/* To avoid endless loop we wait at most
1535 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1536 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1537 	int i;
1538 
1539 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1540 
1541 		if (!tg->funcs->is_counter_moving(tg)) {
1542 			DC_ERROR("TG counter is not moving!\n");
1543 			break;
1544 		}
1545 
1546 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1547 			rc = true;
1548 			/* usually occurs at i=1 */
1549 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1550 					i);
1551 			break;
1552 		}
1553 
1554 		/* Wait for one frame. */
1555 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1556 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1557 	}
1558 
1559 	if (false == rc)
1560 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1561 
1562 	return rc;
1563 }
1564 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])1565 static void dcn10_enable_timing_synchronization(
1566 	struct dc *dc,
1567 	int group_index,
1568 	int group_size,
1569 	struct pipe_ctx *grouped_pipes[])
1570 {
1571 	struct dc_context *dc_ctx = dc->ctx;
1572 	int i;
1573 
1574 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
1575 
1576 	for (i = 1; i < group_size; i++)
1577 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1578 				grouped_pipes[i]->stream_res.tg,
1579 				grouped_pipes[0]->stream_res.tg->inst);
1580 
1581 	DC_SYNC_INFO("Waiting for trigger\n");
1582 
1583 	/* Need to get only check 1 pipe for having reset as all the others are
1584 	 * synchronized. Look at last pipe programmed to reset.
1585 	 */
1586 
1587 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1588 	for (i = 1; i < group_size; i++)
1589 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1590 				grouped_pipes[i]->stream_res.tg);
1591 
1592 	DC_SYNC_INFO("Sync complete\n");
1593 }
1594 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])1595 static void dcn10_enable_per_frame_crtc_position_reset(
1596 	struct dc *dc,
1597 	int group_size,
1598 	struct pipe_ctx *grouped_pipes[])
1599 {
1600 	struct dc_context *dc_ctx = dc->ctx;
1601 	int i;
1602 
1603 	DC_SYNC_INFO("Setting up\n");
1604 	for (i = 0; i < group_size; i++)
1605 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1606 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1607 					grouped_pipes[i]->stream_res.tg,
1608 					0,
1609 					&grouped_pipes[i]->stream->triggered_crtc_reset);
1610 
1611 	DC_SYNC_INFO("Waiting for trigger\n");
1612 
1613 	for (i = 0; i < group_size; i++)
1614 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1615 
1616 	DC_SYNC_INFO("Multi-display sync is complete\n");
1617 }
1618 
1619 /*static void print_rq_dlg_ttu(
1620 		struct dc *core_dc,
1621 		struct pipe_ctx *pipe_ctx)
1622 {
1623 	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1624 			"\n============== DML TTU Output parameters [%d] ==============\n"
1625 			"qos_level_low_wm: %d, \n"
1626 			"qos_level_high_wm: %d, \n"
1627 			"min_ttu_vblank: %d, \n"
1628 			"qos_level_flip: %d, \n"
1629 			"refcyc_per_req_delivery_l: %d, \n"
1630 			"qos_level_fixed_l: %d, \n"
1631 			"qos_ramp_disable_l: %d, \n"
1632 			"refcyc_per_req_delivery_pre_l: %d, \n"
1633 			"refcyc_per_req_delivery_c: %d, \n"
1634 			"qos_level_fixed_c: %d, \n"
1635 			"qos_ramp_disable_c: %d, \n"
1636 			"refcyc_per_req_delivery_pre_c: %d\n"
1637 			"=============================================================\n",
1638 			pipe_ctx->pipe_idx,
1639 			pipe_ctx->ttu_regs.qos_level_low_wm,
1640 			pipe_ctx->ttu_regs.qos_level_high_wm,
1641 			pipe_ctx->ttu_regs.min_ttu_vblank,
1642 			pipe_ctx->ttu_regs.qos_level_flip,
1643 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1644 			pipe_ctx->ttu_regs.qos_level_fixed_l,
1645 			pipe_ctx->ttu_regs.qos_ramp_disable_l,
1646 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1647 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1648 			pipe_ctx->ttu_regs.qos_level_fixed_c,
1649 			pipe_ctx->ttu_regs.qos_ramp_disable_c,
1650 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1651 			);
1652 
1653 	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1654 			"\n============== DML DLG Output parameters [%d] ==============\n"
1655 			"refcyc_h_blank_end: %d, \n"
1656 			"dlg_vblank_end: %d, \n"
1657 			"min_dst_y_next_start: %d, \n"
1658 			"refcyc_per_htotal: %d, \n"
1659 			"refcyc_x_after_scaler: %d, \n"
1660 			"dst_y_after_scaler: %d, \n"
1661 			"dst_y_prefetch: %d, \n"
1662 			"dst_y_per_vm_vblank: %d, \n"
1663 			"dst_y_per_row_vblank: %d, \n"
1664 			"ref_freq_to_pix_freq: %d, \n"
1665 			"vratio_prefetch: %d, \n"
1666 			"refcyc_per_pte_group_vblank_l: %d, \n"
1667 			"refcyc_per_meta_chunk_vblank_l: %d, \n"
1668 			"dst_y_per_pte_row_nom_l: %d, \n"
1669 			"refcyc_per_pte_group_nom_l: %d, \n",
1670 			pipe_ctx->pipe_idx,
1671 			pipe_ctx->dlg_regs.refcyc_h_blank_end,
1672 			pipe_ctx->dlg_regs.dlg_vblank_end,
1673 			pipe_ctx->dlg_regs.min_dst_y_next_start,
1674 			pipe_ctx->dlg_regs.refcyc_per_htotal,
1675 			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1676 			pipe_ctx->dlg_regs.dst_y_after_scaler,
1677 			pipe_ctx->dlg_regs.dst_y_prefetch,
1678 			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1679 			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1680 			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1681 			pipe_ctx->dlg_regs.vratio_prefetch,
1682 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1683 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1684 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1685 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1686 			);
1687 
1688 	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1689 			"\ndst_y_per_meta_row_nom_l: %d, \n"
1690 			"refcyc_per_meta_chunk_nom_l: %d, \n"
1691 			"refcyc_per_line_delivery_pre_l: %d, \n"
1692 			"refcyc_per_line_delivery_l: %d, \n"
1693 			"vratio_prefetch_c: %d, \n"
1694 			"refcyc_per_pte_group_vblank_c: %d, \n"
1695 			"refcyc_per_meta_chunk_vblank_c: %d, \n"
1696 			"dst_y_per_pte_row_nom_c: %d, \n"
1697 			"refcyc_per_pte_group_nom_c: %d, \n"
1698 			"dst_y_per_meta_row_nom_c: %d, \n"
1699 			"refcyc_per_meta_chunk_nom_c: %d, \n"
1700 			"refcyc_per_line_delivery_pre_c: %d, \n"
1701 			"refcyc_per_line_delivery_c: %d \n"
1702 			"========================================================\n",
1703 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1704 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1705 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1706 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1707 			pipe_ctx->dlg_regs.vratio_prefetch_c,
1708 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1709 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1710 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1711 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1712 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1713 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1714 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1715 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1716 			);
1717 
1718 	DC_LOG_BANDWIDTH_CALCS(core_dc->ctx->logger,
1719 			"\n============== DML RQ Output parameters [%d] ==============\n"
1720 			"chunk_size: %d \n"
1721 			"min_chunk_size: %d \n"
1722 			"meta_chunk_size: %d \n"
1723 			"min_meta_chunk_size: %d \n"
1724 			"dpte_group_size: %d \n"
1725 			"mpte_group_size: %d \n"
1726 			"swath_height: %d \n"
1727 			"pte_row_height_linear: %d \n"
1728 			"========================================================\n",
1729 			pipe_ctx->pipe_idx,
1730 			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
1731 			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
1732 			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
1733 			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
1734 			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
1735 			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
1736 			pipe_ctx->rq_regs.rq_regs_l.swath_height,
1737 			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
1738 			);
1739 }
1740 */
1741 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)1742 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
1743 		struct vm_system_aperture_param *apt,
1744 		struct dce_hwseq *hws)
1745 {
1746 	PHYSICAL_ADDRESS_LOC physical_page_number;
1747 	uint32_t logical_addr_low;
1748 	uint32_t logical_addr_high;
1749 
1750 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1751 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
1752 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1753 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
1754 
1755 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1756 			LOGICAL_ADDR, &logical_addr_low);
1757 
1758 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1759 			LOGICAL_ADDR, &logical_addr_high);
1760 
1761 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
1762 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
1763 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
1764 }
1765 
1766 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)1767 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
1768 		struct vm_context0_param *vm0,
1769 		struct dce_hwseq *hws)
1770 {
1771 	PHYSICAL_ADDRESS_LOC fb_base;
1772 	PHYSICAL_ADDRESS_LOC fb_offset;
1773 	uint32_t fb_base_value;
1774 	uint32_t fb_offset_value;
1775 
1776 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
1777 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
1778 
1779 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
1780 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
1781 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
1782 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
1783 
1784 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1785 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
1786 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1787 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
1788 
1789 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1790 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
1791 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1792 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
1793 
1794 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1795 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
1796 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1797 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
1798 
1799 	/*
1800 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
1801 	 * Therefore we need to do
1802 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
1803 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
1804 	 */
1805 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
1806 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
1807 	vm0->pte_base.quad_part += fb_base.quad_part;
1808 	vm0->pte_base.quad_part -= fb_offset.quad_part;
1809 }
1810 
1811 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)1812 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
1813 {
1814 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1815 	struct vm_system_aperture_param apt = { {{ 0 } } };
1816 	struct vm_context0_param vm0 = { { { 0 } } };
1817 
1818 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
1819 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
1820 
1821 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
1822 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
1823 }
1824 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1825 static void dcn10_enable_plane(
1826 	struct dc *dc,
1827 	struct pipe_ctx *pipe_ctx,
1828 	struct dc_state *context)
1829 {
1830 	struct dce_hwseq *hws = dc->hwseq;
1831 
1832 	if (dc->debug.sanity_checks) {
1833 		dcn10_verify_allow_pstate_change_high(dc);
1834 	}
1835 
1836 	undo_DEGVIDCN10_253_wa(dc);
1837 
1838 	power_on_plane(dc->hwseq,
1839 		pipe_ctx->plane_res.hubp->inst);
1840 
1841 	/* enable DCFCLK current DCHUB */
1842 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1843 
1844 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
1845 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1846 			pipe_ctx->stream_res.opp,
1847 			true);
1848 
1849 /* TODO: enable/disable in dm as per update type.
1850 	if (plane_state) {
1851 		DC_LOG_DC(dc->ctx->logger,
1852 				"Pipe:%d 0x%x: addr hi:0x%x, "
1853 				"addr low:0x%x, "
1854 				"src: %d, %d, %d,"
1855 				" %d; dst: %d, %d, %d, %d;\n",
1856 				pipe_ctx->pipe_idx,
1857 				plane_state,
1858 				plane_state->address.grph.addr.high_part,
1859 				plane_state->address.grph.addr.low_part,
1860 				plane_state->src_rect.x,
1861 				plane_state->src_rect.y,
1862 				plane_state->src_rect.width,
1863 				plane_state->src_rect.height,
1864 				plane_state->dst_rect.x,
1865 				plane_state->dst_rect.y,
1866 				plane_state->dst_rect.width,
1867 				plane_state->dst_rect.height);
1868 
1869 		DC_LOG_DC(dc->ctx->logger,
1870 				"Pipe %d: width, height, x, y         format:%d\n"
1871 				"viewport:%d, %d, %d, %d\n"
1872 				"recout:  %d, %d, %d, %d\n",
1873 				pipe_ctx->pipe_idx,
1874 				plane_state->format,
1875 				pipe_ctx->plane_res.scl_data.viewport.width,
1876 				pipe_ctx->plane_res.scl_data.viewport.height,
1877 				pipe_ctx->plane_res.scl_data.viewport.x,
1878 				pipe_ctx->plane_res.scl_data.viewport.y,
1879 				pipe_ctx->plane_res.scl_data.recout.width,
1880 				pipe_ctx->plane_res.scl_data.recout.height,
1881 				pipe_ctx->plane_res.scl_data.recout.x,
1882 				pipe_ctx->plane_res.scl_data.recout.y);
1883 		print_rq_dlg_ttu(dc, pipe_ctx);
1884 	}
1885 */
1886 	if (dc->config.gpu_vm_support)
1887 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
1888 
1889 	if (dc->debug.sanity_checks) {
1890 		dcn10_verify_allow_pstate_change_high(dc);
1891 	}
1892 }
1893 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)1894 static void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
1895 {
1896 	int i = 0;
1897 	struct dpp_grph_csc_adjustment adjust;
1898 	memset(&adjust, 0, sizeof(adjust));
1899 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1900 
1901 
1902 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
1903 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1904 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1905 			adjust.temperature_matrix[i] =
1906 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1907 	}
1908 
1909 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
1910 }
1911 
1912 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)1913 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
1914 {
1915 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
1916 		if (pipe_ctx->top_pipe) {
1917 			struct pipe_ctx *top = pipe_ctx->top_pipe;
1918 
1919 			while (top->top_pipe)
1920 				top = top->top_pipe; // Traverse to top pipe_ctx
1921 			if (top->plane_state && top->plane_state->layer_index == 0)
1922 				return true; // Front MPO plane not hidden
1923 		}
1924 	}
1925 	return false;
1926 }
1927 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)1928 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
1929 {
1930 	// Override rear plane RGB bias to fix MPO brightness
1931 	uint16_t rgb_bias = matrix[3];
1932 
1933 	matrix[3] = 0;
1934 	matrix[7] = 0;
1935 	matrix[11] = 0;
1936 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
1937 	matrix[3] = rgb_bias;
1938 	matrix[7] = rgb_bias;
1939 	matrix[11] = rgb_bias;
1940 }
1941 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)1942 static void dcn10_program_output_csc(struct dc *dc,
1943 		struct pipe_ctx *pipe_ctx,
1944 		enum dc_color_space colorspace,
1945 		uint16_t *matrix,
1946 		int opp_id)
1947 {
1948 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
1949 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
1950 
1951 			/* MPO is broken with RGB colorspaces when OCSC matrix
1952 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
1953 			 * Blending adds offsets from front + rear to rear plane
1954 			 *
1955 			 * Fix is to set RGB bias to 0 on rear plane, top plane
1956 			 * black value pixels add offset instead of rear + front
1957 			 */
1958 
1959 			int16_t rgb_bias = matrix[3];
1960 			// matrix[3/7/11] are all the same offset value
1961 
1962 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
1963 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
1964 			} else {
1965 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
1966 			}
1967 		}
1968 	} else {
1969 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
1970 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
1971 	}
1972 }
1973 
is_lower_pipe_tree_visible(struct pipe_ctx * pipe_ctx)1974 bool is_lower_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1975 {
1976 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1977 		return true;
1978 	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
1979 		return true;
1980 	return false;
1981 }
1982 
is_upper_pipe_tree_visible(struct pipe_ctx * pipe_ctx)1983 bool is_upper_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1984 {
1985 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1986 		return true;
1987 	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
1988 		return true;
1989 	return false;
1990 }
1991 
is_pipe_tree_visible(struct pipe_ctx * pipe_ctx)1992 bool is_pipe_tree_visible(struct pipe_ctx *pipe_ctx)
1993 {
1994 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->visible)
1995 		return true;
1996 	if (pipe_ctx->top_pipe && is_upper_pipe_tree_visible(pipe_ctx->top_pipe))
1997 		return true;
1998 	if (pipe_ctx->bottom_pipe && is_lower_pipe_tree_visible(pipe_ctx->bottom_pipe))
1999 		return true;
2000 	return false;
2001 }
2002 
is_rgb_cspace(enum dc_color_space output_color_space)2003 bool is_rgb_cspace(enum dc_color_space output_color_space)
2004 {
2005 	switch (output_color_space) {
2006 	case COLOR_SPACE_SRGB:
2007 	case COLOR_SPACE_SRGB_LIMITED:
2008 	case COLOR_SPACE_2020_RGB_FULLRANGE:
2009 	case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
2010 	case COLOR_SPACE_ADOBERGB:
2011 		return true;
2012 	case COLOR_SPACE_YCBCR601:
2013 	case COLOR_SPACE_YCBCR709:
2014 	case COLOR_SPACE_YCBCR601_LIMITED:
2015 	case COLOR_SPACE_YCBCR709_LIMITED:
2016 	case COLOR_SPACE_2020_YCBCR:
2017 		return false;
2018 	default:
2019 		/* Add a case to switch */
2020 		BREAK_TO_DEBUGGER();
2021 		return false;
2022 	}
2023 }
2024 
dcn10_get_surface_visual_confirm_color(const struct pipe_ctx * pipe_ctx,struct tg_color * color)2025 void dcn10_get_surface_visual_confirm_color(
2026 		const struct pipe_ctx *pipe_ctx,
2027 		struct tg_color *color)
2028 {
2029 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2030 
2031 	switch (pipe_ctx->plane_res.scl_data.format) {
2032 	case PIXEL_FORMAT_ARGB8888:
2033 		/* set boarder color to red */
2034 		color->color_r_cr = color_value;
2035 		break;
2036 
2037 	case PIXEL_FORMAT_ARGB2101010:
2038 		/* set boarder color to blue */
2039 		color->color_b_cb = color_value;
2040 		break;
2041 	case PIXEL_FORMAT_420BPP8:
2042 		/* set boarder color to green */
2043 		color->color_g_y = color_value;
2044 		break;
2045 	case PIXEL_FORMAT_420BPP10:
2046 		/* set boarder color to yellow */
2047 		color->color_g_y = color_value;
2048 		color->color_r_cr = color_value;
2049 		break;
2050 	case PIXEL_FORMAT_FP16:
2051 		/* set boarder color to white */
2052 		color->color_r_cr = color_value;
2053 		color->color_b_cb = color_value;
2054 		color->color_g_y = color_value;
2055 		break;
2056 	default:
2057 		break;
2058 	}
2059 }
2060 
dcn10_get_hdr_visual_confirm_color(struct pipe_ctx * pipe_ctx,struct tg_color * color)2061 void dcn10_get_hdr_visual_confirm_color(
2062 		struct pipe_ctx *pipe_ctx,
2063 		struct tg_color *color)
2064 {
2065 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2066 
2067 	// Determine the overscan color based on the top-most (desktop) plane's context
2068 	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2069 
2070 	while (top_pipe_ctx->top_pipe != NULL)
2071 		top_pipe_ctx = top_pipe_ctx->top_pipe;
2072 
2073 	switch (top_pipe_ctx->plane_res.scl_data.format) {
2074 	case PIXEL_FORMAT_ARGB2101010:
2075 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2076 			/* HDR10, ARGB2101010 - set boarder color to red */
2077 			color->color_r_cr = color_value;
2078 		}
2079 		break;
2080 	case PIXEL_FORMAT_FP16:
2081 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2082 			/* HDR10, FP16 - set boarder color to blue */
2083 			color->color_b_cb = color_value;
2084 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2085 			/* FreeSync 2 HDR - set boarder color to green */
2086 			color->color_g_y = color_value;
2087 		}
2088 		break;
2089 	default:
2090 		/* SDR - set boarder color to Gray */
2091 		color->color_r_cr = color_value/2;
2092 		color->color_b_cb = color_value/2;
2093 		color->color_g_y = color_value/2;
2094 		break;
2095 	}
2096 }
2097 
fixed_point_to_int_frac(struct fixed31_32 arg,uint8_t integer_bits,uint8_t fractional_bits)2098 static uint16_t fixed_point_to_int_frac(
2099 	struct fixed31_32 arg,
2100 	uint8_t integer_bits,
2101 	uint8_t fractional_bits)
2102 {
2103 	int32_t numerator;
2104 	int32_t divisor = 1 << fractional_bits;
2105 
2106 	uint16_t result;
2107 
2108 	uint16_t d = (uint16_t)dc_fixpt_floor(
2109 		dc_fixpt_abs(
2110 			arg));
2111 
2112 	if (d <= (uint16_t)(1 << integer_bits) - (1 / (uint16_t)divisor))
2113 		numerator = (uint16_t)dc_fixpt_floor(
2114 			dc_fixpt_mul_int(
2115 				arg,
2116 				divisor));
2117 	else {
2118 		numerator = dc_fixpt_floor(
2119 			dc_fixpt_sub(
2120 				dc_fixpt_from_int(
2121 					1LL << integer_bits),
2122 				dc_fixpt_recip(
2123 					dc_fixpt_from_int(
2124 						divisor))));
2125 	}
2126 
2127 	if (numerator >= 0)
2128 		result = (uint16_t)numerator;
2129 	else
2130 		result = (uint16_t)(
2131 		(1 << (integer_bits + fractional_bits + 1)) + numerator);
2132 
2133 	if ((result != 0) && dc_fixpt_lt(
2134 		arg, dc_fixpt_zero))
2135 		result |= 1 << (integer_bits + fractional_bits);
2136 
2137 	return result;
2138 }
2139 
dcn10_build_prescale_params(struct dc_bias_and_scale * bias_and_scale,const struct dc_plane_state * plane_state)2140 void dcn10_build_prescale_params(struct  dc_bias_and_scale *bias_and_scale,
2141 		const struct dc_plane_state *plane_state)
2142 {
2143 	if (plane_state->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN
2144 			&& plane_state->format != SURFACE_PIXEL_FORMAT_INVALID
2145 			&& plane_state->input_csc_color_matrix.enable_adjustment
2146 			&& plane_state->coeff_reduction_factor.value != 0) {
2147 		bias_and_scale->scale_blue = fixed_point_to_int_frac(
2148 			dc_fixpt_mul(plane_state->coeff_reduction_factor,
2149 					dc_fixpt_from_fraction(256, 255)),
2150 				2,
2151 				13);
2152 		bias_and_scale->scale_red = bias_and_scale->scale_blue;
2153 		bias_and_scale->scale_green = bias_and_scale->scale_blue;
2154 	} else {
2155 		bias_and_scale->scale_blue = 0x2000;
2156 		bias_and_scale->scale_red = 0x2000;
2157 		bias_and_scale->scale_green = 0x2000;
2158 	}
2159 }
2160 
update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2161 static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2162 {
2163 	struct dc_bias_and_scale bns_params = {0};
2164 
2165 	// program the input csc
2166 	dpp->funcs->dpp_setup(dpp,
2167 			plane_state->format,
2168 			EXPANSION_MODE_ZERO,
2169 			plane_state->input_csc_color_matrix,
2170 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
2171 			plane_state->color_space,
2172 			NULL);
2173 #else
2174 			plane_state->color_space);
2175 #endif
2176 
2177 	//set scale and bias registers
2178 	dcn10_build_prescale_params(&bns_params, plane_state);
2179 	if (dpp->funcs->dpp_program_bias_and_scale)
2180 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2181 }
2182 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2183 static void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2184 {
2185 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2186 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2187 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2188 	int mpcc_id;
2189 	struct mpcc *new_mpcc;
2190 	struct mpc *mpc = dc->res_pool->mpc;
2191 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2192 
2193 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2194 		dcn10_get_hdr_visual_confirm_color(
2195 				pipe_ctx, &blnd_cfg.black_color);
2196 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2197 		dcn10_get_surface_visual_confirm_color(
2198 				pipe_ctx, &blnd_cfg.black_color);
2199 	} else {
2200 		color_space_to_black_color(
2201 				dc, pipe_ctx->stream->output_color_space,
2202 				&blnd_cfg.black_color);
2203 	}
2204 
2205 	if (per_pixel_alpha)
2206 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2207 	else
2208 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2209 
2210 	blnd_cfg.overlap_only = false;
2211 	blnd_cfg.global_gain = 0xff;
2212 
2213 	if (pipe_ctx->plane_state->global_alpha)
2214 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2215 	else
2216 		blnd_cfg.global_alpha = 0xff;
2217 
2218 	/* DCN1.0 has output CM before MPC which seems to screw with
2219 	 * pre-multiplied alpha.
2220 	 */
2221 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2222 			pipe_ctx->stream->output_color_space)
2223 					&& per_pixel_alpha;
2224 
2225 
2226 	/*
2227 	 * TODO: remove hack
2228 	 * Note: currently there is a bug in init_hw such that
2229 	 * on resume from hibernate, BIOS sets up MPCC0, and
2230 	 * we do mpcc_remove but the mpcc cannot go to idle
2231 	 * after remove. This cause us to pick mpcc1 here,
2232 	 * which causes a pstate hang for yet unknown reason.
2233 	 */
2234 	mpcc_id = hubp->inst;
2235 
2236 	/* If there is no full update, don't need to touch MPC tree*/
2237 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2238 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2239 		return;
2240 	}
2241 
2242 	/* check if this MPCC is already being used */
2243 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2244 	/* remove MPCC if being used */
2245 	if (new_mpcc != NULL)
2246 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2247 	else
2248 		if (dc->debug.sanity_checks)
2249 			mpc->funcs->assert_mpcc_idle_before_connect(
2250 					dc->res_pool->mpc, mpcc_id);
2251 
2252 	/* Call MPC to insert new plane */
2253 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2254 			mpc_tree_params,
2255 			&blnd_cfg,
2256 			NULL,
2257 			NULL,
2258 			hubp->inst,
2259 			mpcc_id);
2260 
2261 	ASSERT(new_mpcc != NULL);
2262 
2263 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2264 	hubp->mpcc_id = mpcc_id;
2265 }
2266 
update_scaler(struct pipe_ctx * pipe_ctx)2267 static void update_scaler(struct pipe_ctx *pipe_ctx)
2268 {
2269 	bool per_pixel_alpha =
2270 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2271 
2272 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2273 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2274 	/* scaler configuration */
2275 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2276 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2277 }
2278 
update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2279 void update_dchubp_dpp(
2280 	struct dc *dc,
2281 	struct pipe_ctx *pipe_ctx,
2282 	struct dc_state *context)
2283 {
2284 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2285 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2286 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2287 	struct plane_size size = plane_state->plane_size;
2288 	unsigned int compat_level = 0;
2289 
2290 	/* depends on DML calculation, DPP clock value may change dynamically */
2291 	/* If request max dpp clk is lower than current dispclk, no need to
2292 	 * divided by 2
2293 	 */
2294 	if (plane_state->update_flags.bits.full_update) {
2295 		bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2296 				dc->clk_mgr->clks.dispclk_khz / 2;
2297 
2298 		dpp->funcs->dpp_dppclk_control(
2299 				dpp,
2300 				should_divided_by_2,
2301 				true);
2302 
2303 		if (dc->res_pool->dccg)
2304 			dc->res_pool->dccg->funcs->update_dpp_dto(
2305 					dc->res_pool->dccg,
2306 					dpp->inst,
2307 					pipe_ctx->plane_res.bw.dppclk_khz,
2308 					false);
2309 		else
2310 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2311 						dc->clk_mgr->clks.dispclk_khz / 2 :
2312 							dc->clk_mgr->clks.dispclk_khz;
2313 	}
2314 
2315 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2316 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2317 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2318 	 */
2319 	if (plane_state->update_flags.bits.full_update) {
2320 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2321 
2322 		hubp->funcs->hubp_setup(
2323 			hubp,
2324 			&pipe_ctx->dlg_regs,
2325 			&pipe_ctx->ttu_regs,
2326 			&pipe_ctx->rq_regs,
2327 			&pipe_ctx->pipe_dlg_param);
2328 		hubp->funcs->hubp_setup_interdependent(
2329 			hubp,
2330 			&pipe_ctx->dlg_regs,
2331 			&pipe_ctx->ttu_regs);
2332 	}
2333 
2334 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2335 
2336 	if (plane_state->update_flags.bits.full_update ||
2337 		plane_state->update_flags.bits.bpp_change)
2338 		update_dpp(dpp, plane_state);
2339 
2340 	if (plane_state->update_flags.bits.full_update ||
2341 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2342 		plane_state->update_flags.bits.global_alpha_change)
2343 		dc->hwss.update_mpcc(dc, pipe_ctx);
2344 
2345 	if (plane_state->update_flags.bits.full_update ||
2346 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2347 		plane_state->update_flags.bits.global_alpha_change ||
2348 		plane_state->update_flags.bits.scaling_change ||
2349 		plane_state->update_flags.bits.position_change) {
2350 		update_scaler(pipe_ctx);
2351 	}
2352 
2353 	if (plane_state->update_flags.bits.full_update ||
2354 		plane_state->update_flags.bits.scaling_change ||
2355 		plane_state->update_flags.bits.position_change) {
2356 		hubp->funcs->mem_program_viewport(
2357 			hubp,
2358 			&pipe_ctx->plane_res.scl_data.viewport,
2359 			&pipe_ctx->plane_res.scl_data.viewport_c);
2360 	}
2361 
2362 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2363 		dc->hwss.set_cursor_position(pipe_ctx);
2364 		dc->hwss.set_cursor_attribute(pipe_ctx);
2365 
2366 		if (dc->hwss.set_cursor_sdr_white_level)
2367 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2368 	}
2369 
2370 	if (plane_state->update_flags.bits.full_update) {
2371 		/*gamut remap*/
2372 		dc->hwss.program_gamut_remap(pipe_ctx);
2373 
2374 		dc->hwss.program_output_csc(dc,
2375 				pipe_ctx,
2376 				pipe_ctx->stream->output_color_space,
2377 				pipe_ctx->stream->csc_color_matrix.matrix,
2378 				pipe_ctx->stream_res.opp->inst);
2379 	}
2380 
2381 	if (plane_state->update_flags.bits.full_update ||
2382 		plane_state->update_flags.bits.pixel_format_change ||
2383 		plane_state->update_flags.bits.horizontal_mirror_change ||
2384 		plane_state->update_flags.bits.rotation_change ||
2385 		plane_state->update_flags.bits.swizzle_change ||
2386 		plane_state->update_flags.bits.dcc_change ||
2387 		plane_state->update_flags.bits.bpp_change ||
2388 		plane_state->update_flags.bits.scaling_change ||
2389 		plane_state->update_flags.bits.plane_size_change) {
2390 		hubp->funcs->hubp_program_surface_config(
2391 			hubp,
2392 			plane_state->format,
2393 			&plane_state->tiling_info,
2394 			&size,
2395 			plane_state->rotation,
2396 			&plane_state->dcc,
2397 			plane_state->horizontal_mirror,
2398 			compat_level);
2399 	}
2400 
2401 	hubp->power_gated = false;
2402 
2403 	dc->hwss.update_plane_addr(dc, pipe_ctx);
2404 
2405 	if (is_pipe_tree_visible(pipe_ctx))
2406 		hubp->funcs->set_blank(hubp, false);
2407 }
2408 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2409 static void dcn10_blank_pixel_data(
2410 		struct dc *dc,
2411 		struct pipe_ctx *pipe_ctx,
2412 		bool blank)
2413 {
2414 	enum dc_color_space color_space;
2415 	struct tg_color black_color = {0};
2416 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2417 	struct dc_stream_state *stream = pipe_ctx->stream;
2418 
2419 	/* program otg blank color */
2420 	color_space = stream->output_color_space;
2421 	color_space_to_black_color(dc, color_space, &black_color);
2422 
2423 	/*
2424 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2425 	 * alternate between Cb and Cr, so both channels need the pixel
2426 	 * value for Y
2427 	 */
2428 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2429 		black_color.color_r_cr = black_color.color_g_y;
2430 
2431 
2432 	if (stream_res->tg->funcs->set_blank_color)
2433 		stream_res->tg->funcs->set_blank_color(
2434 				stream_res->tg,
2435 				&black_color);
2436 
2437 	if (!blank) {
2438 		if (stream_res->tg->funcs->set_blank)
2439 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2440 		if (stream_res->abm) {
2441 			stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
2442 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2443 		}
2444 	} else if (blank) {
2445 		if (stream_res->abm)
2446 			stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
2447 		if (stream_res->tg->funcs->set_blank)
2448 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2449 	}
2450 }
2451 
set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2452 void set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2453 {
2454 	struct fixed31_32 multiplier = dc_fixpt_from_fraction(
2455 			pipe_ctx->plane_state->sdr_white_level, 80);
2456 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2457 	struct custom_float_format fmt;
2458 
2459 	fmt.exponenta_bits = 6;
2460 	fmt.mantissa_bits = 12;
2461 	fmt.sign = true;
2462 
2463 	if (pipe_ctx->plane_state->sdr_white_level > 80)
2464 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2465 
2466 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2467 			pipe_ctx->plane_res.dpp, hw_mult);
2468 }
2469 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2470 void dcn10_program_pipe(
2471 		struct dc *dc,
2472 		struct pipe_ctx *pipe_ctx,
2473 		struct dc_state *context)
2474 {
2475 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2476 		dcn10_enable_plane(dc, pipe_ctx, context);
2477 
2478 	update_dchubp_dpp(dc, pipe_ctx, context);
2479 
2480 	set_hdr_multiplier(pipe_ctx);
2481 
2482 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2483 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2484 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2485 		dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state);
2486 
2487 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2488 	 * only do gamma programming for full update.
2489 	 * TODO: This can be further optimized/cleaned up
2490 	 * Always call this for now since it does memcmp inside before
2491 	 * doing heavy calculation and programming
2492 	 */
2493 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2494 		dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream);
2495 }
2496 
program_all_pipe_in_tree(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2497 static void program_all_pipe_in_tree(
2498 		struct dc *dc,
2499 		struct pipe_ctx *pipe_ctx,
2500 		struct dc_state *context)
2501 {
2502 	if (pipe_ctx->top_pipe == NULL) {
2503 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2504 
2505 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2506 				pipe_ctx->stream_res.tg,
2507 				pipe_ctx->pipe_dlg_param.vready_offset,
2508 				pipe_ctx->pipe_dlg_param.vstartup_start,
2509 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2510 				pipe_ctx->pipe_dlg_param.vupdate_width);
2511 
2512 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2513 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2514 
2515 		dc->hwss.blank_pixel_data(dc, pipe_ctx, blank);
2516 
2517 	}
2518 
2519 	if (pipe_ctx->plane_state != NULL)
2520 		dcn10_program_pipe(dc, pipe_ctx, context);
2521 
2522 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2523 		program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2524 }
2525 
find_top_pipe_for_stream(struct dc * dc,struct dc_state * context,const struct dc_stream_state * stream)2526 struct pipe_ctx *find_top_pipe_for_stream(
2527 		struct dc *dc,
2528 		struct dc_state *context,
2529 		const struct dc_stream_state *stream)
2530 {
2531 	int i;
2532 
2533 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2534 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2535 		struct pipe_ctx *old_pipe_ctx =
2536 				&dc->current_state->res_ctx.pipe_ctx[i];
2537 
2538 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2539 			continue;
2540 
2541 		if (pipe_ctx->stream != stream)
2542 			continue;
2543 
2544 		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2545 			return pipe_ctx;
2546 	}
2547 	return NULL;
2548 }
2549 
dcn10_apply_ctx_for_surface(struct dc * dc,const struct dc_stream_state * stream,int num_planes,struct dc_state * context)2550 static void dcn10_apply_ctx_for_surface(
2551 		struct dc *dc,
2552 		const struct dc_stream_state *stream,
2553 		int num_planes,
2554 		struct dc_state *context)
2555 {
2556 	int i;
2557 	struct timing_generator *tg;
2558 	uint32_t underflow_check_delay_us;
2559 	bool removed_pipe[4] = { false };
2560 	bool interdependent_update = false;
2561 	struct pipe_ctx *top_pipe_to_program =
2562 			find_top_pipe_for_stream(dc, context, stream);
2563 	DC_LOGGER_INIT(dc->ctx->logger);
2564 
2565 	if (!top_pipe_to_program)
2566 		return;
2567 
2568 	tg = top_pipe_to_program->stream_res.tg;
2569 
2570 	interdependent_update = top_pipe_to_program->plane_state &&
2571 		top_pipe_to_program->plane_state->update_flags.bits.full_update;
2572 
2573 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2574 
2575 	if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
2576 		ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
2577 
2578 	if (interdependent_update)
2579 		lock_all_pipes(dc, context, true);
2580 	else
2581 		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
2582 
2583 	if (underflow_check_delay_us != 0xFFFFFFFF)
2584 		udelay(underflow_check_delay_us);
2585 
2586 	if (underflow_check_delay_us != 0xFFFFFFFF && dc->hwss.did_underflow_occur)
2587 		ASSERT(dc->hwss.did_underflow_occur(dc, top_pipe_to_program));
2588 
2589 	if (num_planes == 0) {
2590 		/* OTG blank before remove all front end */
2591 		dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true);
2592 	}
2593 
2594 	/* Disconnect unused mpcc */
2595 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2596 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2597 		struct pipe_ctx *old_pipe_ctx =
2598 				&dc->current_state->res_ctx.pipe_ctx[i];
2599 		/*
2600 		 * Powergate reused pipes that are not powergated
2601 		 * fairly hacky right now, using opp_id as indicator
2602 		 * TODO: After move dc_post to dc_update, this will
2603 		 * be removed.
2604 		 */
2605 		if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
2606 			if (old_pipe_ctx->stream_res.tg == tg &&
2607 			    old_pipe_ctx->plane_res.hubp &&
2608 			    old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
2609 				dc->hwss.disable_plane(dc, old_pipe_ctx);
2610 		}
2611 
2612 		if ((!pipe_ctx->plane_state ||
2613 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2614 		    old_pipe_ctx->plane_state &&
2615 		    old_pipe_ctx->stream_res.tg == tg) {
2616 
2617 			dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
2618 			removed_pipe[i] = true;
2619 
2620 			DC_LOG_DC("Reset mpcc for pipe %d\n",
2621 					old_pipe_ctx->pipe_idx);
2622 		}
2623 	}
2624 
2625 	if (num_planes > 0)
2626 		program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2627 
2628 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
2629 	/* Program secondary blending tree and writeback pipes */
2630 	if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree))
2631 		dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context);
2632 #endif
2633 	if (interdependent_update)
2634 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2635 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2636 			/* Skip inactive pipes and ones already updated */
2637 			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2638 			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2639 				continue;
2640 
2641 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2642 				pipe_ctx->plane_res.hubp,
2643 				&pipe_ctx->dlg_regs,
2644 				&pipe_ctx->ttu_regs);
2645 		}
2646 
2647 	if (interdependent_update)
2648 		lock_all_pipes(dc, context, false);
2649 	else
2650 		dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
2651 
2652 	if (num_planes == 0)
2653 		false_optc_underflow_wa(dc, stream, tg);
2654 
2655 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2656 		if (removed_pipe[i])
2657 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2658 
2659 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2660 		if (removed_pipe[i]) {
2661 			dc->hwss.optimize_bandwidth(dc, context);
2662 			break;
2663 		}
2664 
2665 	if (dc->hwseq->wa.DEGVIDCN10_254)
2666 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2667 }
2668 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)2669 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2670 {
2671 	uint8_t i;
2672 
2673 	for (i = 0; i < context->stream_count; i++) {
2674 		if (context->streams[i]->timing.timing_3d_format
2675 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2676 			/*
2677 			 * Disable stutter
2678 			 */
2679 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2680 			break;
2681 		}
2682 	}
2683 }
2684 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)2685 static void dcn10_prepare_bandwidth(
2686 		struct dc *dc,
2687 		struct dc_state *context)
2688 {
2689 	struct hubbub *hubbub = dc->res_pool->hubbub;
2690 
2691 	if (dc->debug.sanity_checks)
2692 		dcn10_verify_allow_pstate_change_high(dc);
2693 
2694 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2695 		if (context->stream_count == 0)
2696 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2697 
2698 		dc->clk_mgr->funcs->update_clocks(
2699 				dc->clk_mgr,
2700 				context,
2701 				false);
2702 	}
2703 
2704 	hubbub->funcs->program_watermarks(hubbub,
2705 			&context->bw_ctx.bw.dcn.watermarks,
2706 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2707 			true);
2708 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2709 
2710 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2711 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2712 
2713 	if (dc->debug.sanity_checks)
2714 		dcn10_verify_allow_pstate_change_high(dc);
2715 }
2716 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)2717 static void dcn10_optimize_bandwidth(
2718 		struct dc *dc,
2719 		struct dc_state *context)
2720 {
2721 	struct hubbub *hubbub = dc->res_pool->hubbub;
2722 
2723 	if (dc->debug.sanity_checks)
2724 		dcn10_verify_allow_pstate_change_high(dc);
2725 
2726 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2727 		if (context->stream_count == 0)
2728 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2729 
2730 		dc->clk_mgr->funcs->update_clocks(
2731 				dc->clk_mgr,
2732 				context,
2733 				true);
2734 	}
2735 
2736 	hubbub->funcs->program_watermarks(hubbub,
2737 			&context->bw_ctx.bw.dcn.watermarks,
2738 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2739 			true);
2740 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2741 
2742 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2743 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2744 
2745 	if (dc->debug.sanity_checks)
2746 		dcn10_verify_allow_pstate_change_high(dc);
2747 }
2748 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,unsigned int vmin,unsigned int vmax,unsigned int vmid,unsigned int vmid_frame_number)2749 static void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2750 		int num_pipes, unsigned int vmin, unsigned int vmax,
2751 		unsigned int vmid, unsigned int vmid_frame_number)
2752 {
2753 	int i = 0;
2754 	struct drr_params params = {0};
2755 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2756 	unsigned int event_triggers = 0x800;
2757 
2758 	params.vertical_total_max = vmax;
2759 	params.vertical_total_min = vmin;
2760 	params.vertical_total_mid = vmid;
2761 	params.vertical_total_mid_frame_num = vmid_frame_number;
2762 
2763 	/* TODO: If multiple pipes are to be supported, you need
2764 	 * some GSL stuff. Static screen triggers may be programmed differently
2765 	 * as well.
2766 	 */
2767 	for (i = 0; i < num_pipes; i++) {
2768 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2769 			pipe_ctx[i]->stream_res.tg, &params);
2770 		if (vmax != 0 && vmin != 0)
2771 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2772 					pipe_ctx[i]->stream_res.tg,
2773 					event_triggers);
2774 	}
2775 }
2776 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)2777 static void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2778 		int num_pipes,
2779 		struct crtc_position *position)
2780 {
2781 	int i = 0;
2782 
2783 	/* TODO: handle pipes > 1
2784 	 */
2785 	for (i = 0; i < num_pipes; i++)
2786 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2787 }
2788 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_events * events)2789 static void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2790 		int num_pipes, const struct dc_static_screen_events *events)
2791 {
2792 	unsigned int i;
2793 	unsigned int value = 0;
2794 
2795 	if (events->surface_update)
2796 		value |= 0x80;
2797 	if (events->cursor_update)
2798 		value |= 0x2;
2799 	if (events->force_trigger)
2800 		value |= 0x1;
2801 
2802 	for (i = 0; i < num_pipes; i++)
2803 		pipe_ctx[i]->stream_res.tg->funcs->
2804 			set_static_screen_control(pipe_ctx[i]->stream_res.tg, value);
2805 }
2806 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)2807 static void dcn10_config_stereo_parameters(
2808 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
2809 {
2810 	enum view_3d_format view_format = stream->view_format;
2811 	enum dc_timing_3d_format timing_3d_format =\
2812 			stream->timing.timing_3d_format;
2813 	bool non_stereo_timing = false;
2814 
2815 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
2816 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2817 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
2818 		non_stereo_timing = true;
2819 
2820 	if (non_stereo_timing == false &&
2821 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
2822 
2823 		flags->PROGRAM_STEREO         = 1;
2824 		flags->PROGRAM_POLARITY       = 1;
2825 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
2826 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2827 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2828 			enum display_dongle_type dongle = \
2829 					stream->link->ddc->dongle_type;
2830 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2831 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2832 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
2833 				flags->DISABLE_STEREO_DP_SYNC = 1;
2834 		}
2835 		flags->RIGHT_EYE_POLARITY =\
2836 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
2837 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2838 			flags->FRAME_PACKED = 1;
2839 	}
2840 
2841 	return;
2842 }
2843 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)2844 static void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2845 {
2846 	struct crtc_stereo_flags flags = { 0 };
2847 	struct dc_stream_state *stream = pipe_ctx->stream;
2848 
2849 	dcn10_config_stereo_parameters(stream, &flags);
2850 
2851 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2852 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
2853 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2854 	} else {
2855 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2856 	}
2857 
2858 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
2859 		pipe_ctx->stream_res.opp,
2860 		flags.PROGRAM_STEREO == 1 ? true:false,
2861 		&stream->timing);
2862 
2863 	pipe_ctx->stream_res.tg->funcs->program_stereo(
2864 		pipe_ctx->stream_res.tg,
2865 		&stream->timing,
2866 		&flags);
2867 
2868 	return;
2869 }
2870 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)2871 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2872 {
2873 	int i;
2874 
2875 	for (i = 0; i < res_pool->pipe_count; i++) {
2876 		if (res_pool->hubps[i]->inst == mpcc_inst)
2877 			return res_pool->hubps[i];
2878 	}
2879 	ASSERT(false);
2880 	return NULL;
2881 }
2882 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)2883 static void dcn10_wait_for_mpcc_disconnect(
2884 		struct dc *dc,
2885 		struct resource_pool *res_pool,
2886 		struct pipe_ctx *pipe_ctx)
2887 {
2888 	int mpcc_inst;
2889 
2890 	if (dc->debug.sanity_checks) {
2891 		dcn10_verify_allow_pstate_change_high(dc);
2892 	}
2893 
2894 	if (!pipe_ctx->stream_res.opp)
2895 		return;
2896 
2897 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2898 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2899 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2900 
2901 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2902 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2903 			hubp->funcs->set_blank(hubp, true);
2904 		}
2905 	}
2906 
2907 	if (dc->debug.sanity_checks) {
2908 		dcn10_verify_allow_pstate_change_high(dc);
2909 	}
2910 
2911 }
2912 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)2913 static bool dcn10_dummy_display_power_gating(
2914 	struct dc *dc,
2915 	uint8_t controller_id,
2916 	struct dc_bios *dcb,
2917 	enum pipe_gating_control power_gating)
2918 {
2919 	return true;
2920 }
2921 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)2922 static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2923 {
2924 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2925 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
2926 	bool flip_pending;
2927 
2928 	if (plane_state == NULL)
2929 		return;
2930 
2931 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2932 					pipe_ctx->plane_res.hubp);
2933 
2934 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
2935 
2936 	if (!flip_pending)
2937 		plane_state->status.current_address = plane_state->status.requested_address;
2938 
2939 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2940 			tg->funcs->is_stereo_left_eye) {
2941 		plane_state->status.is_right_eye =
2942 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
2943 	}
2944 }
2945 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)2946 static void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2947 {
2948 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
2949 
2950 	/* In DCN, this programming sequence is owned by the hubbub */
2951 	hubbub->funcs->update_dchub(hubbub, dh_data);
2952 }
2953 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)2954 static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2955 {
2956 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2957 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2958 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2959 	struct dc_cursor_mi_param param = {
2960 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2961 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
2962 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
2963 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2964 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2965 		.rotation = pipe_ctx->plane_state->rotation,
2966 		.mirror = pipe_ctx->plane_state->horizontal_mirror
2967 	};
2968 	uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
2969 	uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
2970 	uint32_t x_offset = min(x_plane, pos_cpy.x);
2971 	uint32_t y_offset = min(y_plane, pos_cpy.y);
2972 
2973 	pos_cpy.x -= x_offset;
2974 	pos_cpy.y -= y_offset;
2975 	pos_cpy.x_hotspot += (x_plane - x_offset);
2976 	pos_cpy.y_hotspot += (y_plane - y_offset);
2977 
2978 	if (pipe_ctx->plane_state->address.type
2979 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2980 		pos_cpy.enable = false;
2981 
2982 	// Swap axis and mirror horizontally
2983 	if (param.rotation == ROTATION_ANGLE_90) {
2984 		uint32_t temp_x = pos_cpy.x;
2985 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
2986 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
2987 		pos_cpy.y = temp_x;
2988 	}
2989 	// Swap axis and mirror vertically
2990 	else if (param.rotation == ROTATION_ANGLE_270) {
2991 		uint32_t temp_y = pos_cpy.y;
2992 		if (pos_cpy.x >  pipe_ctx->plane_res.scl_data.viewport.height) {
2993 			pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height;
2994 			pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
2995 		} else {
2996 			pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x;
2997 		}
2998 		pos_cpy.x = temp_y;
2999 	}
3000 	// Mirror horizontally and vertically
3001 	else if (param.rotation == ROTATION_ANGLE_180) {
3002 		if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) {
3003 			pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width
3004 					- pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x;
3005 		} else {
3006 			uint32_t temp_x = pos_cpy.x;
3007 			pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x;
3008 			if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width
3009 					|| pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) {
3010 				pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width;
3011 			}
3012 		}
3013 		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3014 	}
3015 
3016 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3017 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3018 }
3019 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3020 static void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3021 {
3022 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3023 
3024 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3025 			pipe_ctx->plane_res.hubp, attributes);
3026 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3027 		pipe_ctx->plane_res.dpp, attributes);
3028 }
3029 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3030 static void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3031 {
3032 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3033 	struct fixed31_32 multiplier;
3034 	struct dpp_cursor_attributes opt_attr = { 0 };
3035 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3036 	struct custom_float_format fmt;
3037 
3038 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3039 		return;
3040 
3041 	fmt.exponenta_bits = 5;
3042 	fmt.mantissa_bits = 10;
3043 	fmt.sign = true;
3044 
3045 	if (sdr_white_level > 80) {
3046 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3047 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3048 	}
3049 
3050 	opt_attr.scale = hw_scale;
3051 	opt_attr.bias = 0;
3052 
3053 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3054 			pipe_ctx->plane_res.dpp, &opt_attr);
3055 }
3056 
3057 /**
3058 * apply_front_porch_workaround  TODO FPGA still need?
3059 *
3060 * This is a workaround for a bug that has existed since R5xx and has not been
3061 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3062 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3063 static void apply_front_porch_workaround(
3064 	struct dc_crtc_timing *timing)
3065 {
3066 	if (timing->flags.INTERLACE == 1) {
3067 		if (timing->v_front_porch < 2)
3068 			timing->v_front_porch = 2;
3069 	} else {
3070 		if (timing->v_front_porch < 1)
3071 			timing->v_front_porch = 1;
3072 	}
3073 }
3074 
get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3075 int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3076 {
3077 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3078 	struct dc_crtc_timing patched_crtc_timing;
3079 	int vesa_sync_start;
3080 	int asic_blank_end;
3081 	int interlace_factor;
3082 	int vertical_line_start;
3083 
3084 	patched_crtc_timing = *dc_crtc_timing;
3085 	apply_front_porch_workaround(&patched_crtc_timing);
3086 
3087 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3088 
3089 	vesa_sync_start = patched_crtc_timing.v_addressable +
3090 			patched_crtc_timing.v_border_bottom +
3091 			patched_crtc_timing.v_front_porch;
3092 
3093 	asic_blank_end = (patched_crtc_timing.v_total -
3094 			vesa_sync_start -
3095 			patched_crtc_timing.v_border_top)
3096 			* interlace_factor;
3097 
3098 	vertical_line_start = asic_blank_end -
3099 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3100 
3101 	return vertical_line_start;
3102 }
3103 
lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)3104 void lock_all_pipes(struct dc *dc,
3105 	struct dc_state *context,
3106 	bool lock)
3107 {
3108 	struct pipe_ctx *pipe_ctx;
3109 	struct timing_generator *tg;
3110 	int i;
3111 
3112 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3113 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
3114 		tg = pipe_ctx->stream_res.tg;
3115 		/*
3116 		 * Only lock the top pipe's tg to prevent redundant
3117 		 * (un)locking. Also skip if pipe is disabled.
3118 		 */
3119 		if (pipe_ctx->top_pipe ||
3120 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
3121 		    !tg->funcs->is_tg_enabled(tg))
3122 			continue;
3123 
3124 		if (lock)
3125 			tg->funcs->lock(tg);
3126 		else
3127 			tg->funcs->unlock(tg);
3128 	}
3129 }
3130 
calc_vupdate_position(struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3131 static void calc_vupdate_position(
3132 		struct pipe_ctx *pipe_ctx,
3133 		uint32_t *start_line,
3134 		uint32_t *end_line)
3135 {
3136 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3137 	int vline_int_offset_from_vupdate =
3138 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3139 	int vupdate_offset_from_vsync = get_vupdate_offset_from_vsync(pipe_ctx);
3140 	int start_position;
3141 
3142 	if (vline_int_offset_from_vupdate > 0)
3143 		vline_int_offset_from_vupdate--;
3144 	else if (vline_int_offset_from_vupdate < 0)
3145 		vline_int_offset_from_vupdate++;
3146 
3147 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3148 
3149 	if (start_position >= 0)
3150 		*start_line = start_position;
3151 	else
3152 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3153 
3154 	*end_line = *start_line + 2;
3155 
3156 	if (*end_line >= dc_crtc_timing->v_total)
3157 		*end_line = 2;
3158 }
3159 
cal_vline_position(struct pipe_ctx * pipe_ctx,enum vline_select vline,uint32_t * start_line,uint32_t * end_line)3160 static void cal_vline_position(
3161 		struct pipe_ctx *pipe_ctx,
3162 		enum vline_select vline,
3163 		uint32_t *start_line,
3164 		uint32_t *end_line)
3165 {
3166 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3167 
3168 	if (vline == VLINE0)
3169 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3170 	else if (vline == VLINE1)
3171 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3172 
3173 	switch (ref_point) {
3174 	case START_V_UPDATE:
3175 		calc_vupdate_position(
3176 				pipe_ctx,
3177 				start_line,
3178 				end_line);
3179 		break;
3180 	case START_V_SYNC:
3181 		// Suppose to do nothing because vsync is 0;
3182 		break;
3183 	default:
3184 		ASSERT(0);
3185 		break;
3186 	}
3187 }
3188 
dcn10_setup_periodic_interrupt(struct pipe_ctx * pipe_ctx,enum vline_select vline)3189 static void dcn10_setup_periodic_interrupt(
3190 		struct pipe_ctx *pipe_ctx,
3191 		enum vline_select vline)
3192 {
3193 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3194 
3195 	if (vline == VLINE0) {
3196 		uint32_t start_line = 0;
3197 		uint32_t end_line = 0;
3198 
3199 		cal_vline_position(pipe_ctx, vline, &start_line, &end_line);
3200 
3201 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3202 
3203 	} else if (vline == VLINE1) {
3204 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3205 				tg,
3206 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3207 	}
3208 }
3209 
dcn10_setup_vupdate_interrupt(struct pipe_ctx * pipe_ctx)3210 static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
3211 {
3212 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3213 	int start_line = get_vupdate_offset_from_vsync(pipe_ctx);
3214 
3215 	if (start_line < 0) {
3216 		ASSERT(0);
3217 		start_line = 0;
3218 	}
3219 
3220 	if (tg->funcs->setup_vertical_interrupt2)
3221 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3222 }
3223 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3224 static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3225 		struct dc_link_settings *link_settings)
3226 {
3227 	struct encoder_unblank_param params = { { 0 } };
3228 	struct dc_stream_state *stream = pipe_ctx->stream;
3229 	struct dc_link *link = stream->link;
3230 
3231 	/* only 3 items below are used by unblank */
3232 	params.timing = pipe_ctx->stream->timing;
3233 
3234 	params.link_settings.link_rate = link_settings->link_rate;
3235 
3236 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3237 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3238 			params.timing.pix_clk_100hz /= 2;
3239 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3240 	}
3241 
3242 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3243 		link->dc->hwss.edp_backlight_control(link, true);
3244 	}
3245 }
3246 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3247 static void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3248 				const uint8_t *custom_sdp_message,
3249 				unsigned int sdp_message_size)
3250 {
3251 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3252 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3253 				pipe_ctx->stream_res.stream_enc,
3254 				custom_sdp_message,
3255 				sdp_message_size);
3256 	}
3257 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3258 static enum dc_status dcn10_set_clock(struct dc *dc,
3259 			enum dc_clock_type clock_type,
3260 			uint32_t clk_khz,
3261 			uint32_t stepping)
3262 {
3263 	struct dc_state *context = dc->current_state;
3264 	struct dc_clock_config clock_cfg = {0};
3265 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3266 
3267 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3268 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3269 						context, clock_type, &clock_cfg);
3270 
3271 	if (!dc->clk_mgr->funcs->get_clock)
3272 		return DC_FAIL_UNSUPPORTED_1;
3273 
3274 	if (clk_khz > clock_cfg.max_clock_khz)
3275 		return DC_FAIL_CLK_EXCEED_MAX;
3276 
3277 	if (clk_khz < clock_cfg.min_clock_khz)
3278 		return DC_FAIL_CLK_BELOW_MIN;
3279 
3280 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3281 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3282 
3283 	/*update internal request clock for update clock use*/
3284 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3285 		current_clocks->dispclk_khz = clk_khz;
3286 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3287 		current_clocks->dppclk_khz = clk_khz;
3288 	else
3289 		return DC_ERROR_UNEXPECTED;
3290 
3291 	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3292 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3293 				context, true);
3294 	return DC_OK;
3295 
3296 }
3297 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3298 static void dcn10_get_clock(struct dc *dc,
3299 			enum dc_clock_type clock_type,
3300 			struct dc_clock_config *clock_cfg)
3301 {
3302 	struct dc_state *context = dc->current_state;
3303 
3304 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3305 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3306 
3307 }
3308 
3309 static const struct hw_sequencer_funcs dcn10_funcs = {
3310 	.program_gamut_remap = dcn10_program_gamut_remap,
3311 	.init_hw = dcn10_init_hw,
3312 	.init_pipes = dcn10_init_pipes,
3313 	.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
3314 	.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
3315 	.update_plane_addr = dcn10_update_plane_addr,
3316 	.plane_atomic_disconnect = hwss1_plane_atomic_disconnect,
3317 	.update_dchub = dcn10_update_dchub,
3318 	.update_mpcc = dcn10_update_mpcc,
3319 	.update_pending_status = dcn10_update_pending_status,
3320 	.set_input_transfer_func = dcn10_set_input_transfer_func,
3321 	.set_output_transfer_func = dcn10_set_output_transfer_func,
3322 	.program_output_csc = dcn10_program_output_csc,
3323 	.power_down = dce110_power_down,
3324 	.enable_accelerated_mode = dce110_enable_accelerated_mode,
3325 	.enable_timing_synchronization = dcn10_enable_timing_synchronization,
3326 	.enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
3327 	.update_info_frame = dce110_update_info_frame,
3328 	.send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
3329 	.enable_stream = dce110_enable_stream,
3330 	.disable_stream = dce110_disable_stream,
3331 	.unblank_stream = dcn10_unblank_stream,
3332 	.blank_stream = dce110_blank_stream,
3333 	.enable_audio_stream = dce110_enable_audio_stream,
3334 	.disable_audio_stream = dce110_disable_audio_stream,
3335 	.enable_display_power_gating = dcn10_dummy_display_power_gating,
3336 	.disable_plane = dcn10_disable_plane,
3337 	.blank_pixel_data = dcn10_blank_pixel_data,
3338 	.pipe_control_lock = dcn10_pipe_control_lock,
3339 	.prepare_bandwidth = dcn10_prepare_bandwidth,
3340 	.optimize_bandwidth = dcn10_optimize_bandwidth,
3341 	.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
3342 	.enable_stream_timing = dcn10_enable_stream_timing,
3343 	.set_drr = dcn10_set_drr,
3344 	.get_position = dcn10_get_position,
3345 	.set_static_screen_control = dcn10_set_static_screen_control,
3346 	.setup_stereo = dcn10_setup_stereo,
3347 	.set_avmute = dce110_set_avmute,
3348 	.log_hw_state = dcn10_log_hw_state,
3349 	.get_hw_state = dcn10_get_hw_state,
3350 	.clear_status_bits = dcn10_clear_status_bits,
3351 	.wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
3352 	.edp_backlight_control = dce110_edp_backlight_control,
3353 	.edp_power_control = dce110_edp_power_control,
3354 	.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
3355 	.set_cursor_position = dcn10_set_cursor_position,
3356 	.set_cursor_attribute = dcn10_set_cursor_attribute,
3357 	.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
3358 	.disable_stream_gating = NULL,
3359 	.enable_stream_gating = NULL,
3360 	.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
3361 	.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
3362 	.set_clock = dcn10_set_clock,
3363 	.get_clock = dcn10_get_clock,
3364 	.did_underflow_occur = dcn10_did_underflow_occur,
3365 	.init_blank = NULL,
3366 	.disable_vga = dcn10_disable_vga,
3367 	.bios_golden_init = dcn10_bios_golden_init,
3368 	.plane_atomic_disable = dcn10_plane_atomic_disable,
3369 	.plane_atomic_power_down = dcn10_plane_atomic_power_down,
3370 	.enable_power_gating_plane = dcn10_enable_power_gating_plane,
3371 	.dpp_pg_control = dcn10_dpp_pg_control,
3372 	.hubp_pg_control = dcn10_hubp_pg_control,
3373 	.dsc_pg_control = NULL,
3374 };
3375 
3376 
dcn10_hw_sequencer_construct(struct dc * dc)3377 void dcn10_hw_sequencer_construct(struct dc *dc)
3378 {
3379 	dc->hwss = dcn10_funcs;
3380 }
3381 
3382