1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include "dm_services.h"
27
28 #include "core_types.h"
29
30 #include "reg_helper.h"
31 #include "dcn10_dpp.h"
32 #include "basics/conversion.h"
33 #include "dcn10_cm_common.h"
34
35 #define NUM_PHASES 64
36 #define HORZ_MAX_TAPS 8
37 #define VERT_MAX_TAPS 8
38
39 #define BLACK_OFFSET_RGB_Y 0x0
40 #define BLACK_OFFSET_CBCR 0x8000
41
42 #define REG(reg)\
43 dpp->tf_regs->reg
44
45 #define CTX \
46 dpp->base.ctx
47
48 #undef FN
49 #define FN(reg_name, field_name) \
50 dpp->tf_shift->field_name, dpp->tf_mask->field_name
51
52 #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
53
54
55 enum dcn10_coef_filter_type_sel {
56 SCL_COEF_LUMA_VERT_FILTER = 0,
57 SCL_COEF_LUMA_HORZ_FILTER = 1,
58 SCL_COEF_CHROMA_VERT_FILTER = 2,
59 SCL_COEF_CHROMA_HORZ_FILTER = 3,
60 SCL_COEF_ALPHA_VERT_FILTER = 4,
61 SCL_COEF_ALPHA_HORZ_FILTER = 5
62 };
63
64 enum dscl_autocal_mode {
65 AUTOCAL_MODE_OFF = 0,
66
67 /* Autocal calculate the scaling ratio and initial phase and the
68 * DSCL_MODE_SEL must be set to 1
69 */
70 AUTOCAL_MODE_AUTOSCALE = 1,
71 /* Autocal perform auto centering without replication and the
72 * DSCL_MODE_SEL must be set to 0
73 */
74 AUTOCAL_MODE_AUTOCENTER = 2,
75 /* Autocal perform auto centering and auto replication and the
76 * DSCL_MODE_SEL must be set to 0
77 */
78 AUTOCAL_MODE_AUTOREPLICATE = 3
79 };
80
81 enum dscl_mode_sel {
82 DSCL_MODE_SCALING_444_BYPASS = 0,
83 DSCL_MODE_SCALING_444_RGB_ENABLE = 1,
84 DSCL_MODE_SCALING_444_YCBCR_ENABLE = 2,
85 DSCL_MODE_SCALING_420_YCBCR_ENABLE = 3,
86 DSCL_MODE_SCALING_420_LUMA_BYPASS = 4,
87 DSCL_MODE_SCALING_420_CHROMA_BYPASS = 5,
88 DSCL_MODE_DSCL_BYPASS = 6
89 };
90
91 static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
92 {COLOR_SPACE_SRGB,
93 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
94 {COLOR_SPACE_SRGB_LIMITED,
95 {0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
96 {COLOR_SPACE_YCBCR601,
97 {0x2cdd, 0x2000, 0, 0xe991, 0xe926, 0x2000, 0xf4fd, 0x10ef,
98 0, 0x2000, 0x38b4, 0xe3a6} },
99 {COLOR_SPACE_YCBCR601_LIMITED,
100 {0x3353, 0x2568, 0, 0xe400, 0xe5dc, 0x2568, 0xf367, 0x1108,
101 0, 0x2568, 0x40de, 0xdd3a} },
102 {COLOR_SPACE_YCBCR709,
103 {0x3265, 0x2000, 0, 0xe6ce, 0xf105, 0x2000, 0xfa01, 0xa7d, 0,
104 0x2000, 0x3b61, 0xe24f} },
105
106 {COLOR_SPACE_YCBCR709_LIMITED,
107 {0x39a6, 0x2568, 0, 0xe0d6, 0xeedd, 0x2568, 0xf925, 0x9a8, 0,
108 0x2568, 0x43ee, 0xdbb2} }
109 };
110
program_gamut_remap(struct dcn10_dpp * dpp,const uint16_t * regval,enum gamut_remap_select select)111 static void program_gamut_remap(
112 struct dcn10_dpp *dpp,
113 const uint16_t *regval,
114 enum gamut_remap_select select)
115 {
116 uint16_t selection = 0;
117 struct color_matrices_reg gam_regs;
118
119 if (regval == NULL || select == GAMUT_REMAP_BYPASS) {
120 REG_SET(CM_GAMUT_REMAP_CONTROL, 0,
121 CM_GAMUT_REMAP_MODE, 0);
122 return;
123 }
124 switch (select) {
125 case GAMUT_REMAP_COEFF:
126 selection = 1;
127 break;
128 case GAMUT_REMAP_COMA_COEFF:
129 selection = 2;
130 break;
131 case GAMUT_REMAP_COMB_COEFF:
132 selection = 3;
133 break;
134 default:
135 break;
136 }
137
138 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_GAMUT_REMAP_C11;
139 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_GAMUT_REMAP_C11;
140 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_GAMUT_REMAP_C12;
141 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_GAMUT_REMAP_C12;
142
143
144 if (select == GAMUT_REMAP_COEFF) {
145 gam_regs.csc_c11_c12 = REG(CM_GAMUT_REMAP_C11_C12);
146 gam_regs.csc_c33_c34 = REG(CM_GAMUT_REMAP_C33_C34);
147
148 cm_helper_program_color_matrices(
149 dpp->base.ctx,
150 regval,
151 &gam_regs);
152
153 } else if (select == GAMUT_REMAP_COMA_COEFF) {
154
155 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
156 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
157
158 cm_helper_program_color_matrices(
159 dpp->base.ctx,
160 regval,
161 &gam_regs);
162
163 } else {
164
165 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
166 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
167
168 cm_helper_program_color_matrices(
169 dpp->base.ctx,
170 regval,
171 &gam_regs);
172 }
173
174 REG_SET(
175 CM_GAMUT_REMAP_CONTROL, 0,
176 CM_GAMUT_REMAP_MODE, selection);
177
178 }
179
dpp1_cm_set_gamut_remap(struct dpp * dpp_base,const struct dpp_grph_csc_adjustment * adjust)180 void dpp1_cm_set_gamut_remap(
181 struct dpp *dpp_base,
182 const struct dpp_grph_csc_adjustment *adjust)
183 {
184 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
185 int i = 0;
186
187 if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW)
188 /* Bypass if type is bypass or hw */
189 program_gamut_remap(dpp, NULL, GAMUT_REMAP_BYPASS);
190 else {
191 struct fixed31_32 arr_matrix[12];
192 uint16_t arr_reg_val[12];
193
194 for (i = 0; i < 12; i++)
195 arr_matrix[i] = adjust->temperature_matrix[i];
196
197 convert_float_matrix(
198 arr_reg_val, arr_matrix, 12);
199
200 program_gamut_remap(dpp, arr_reg_val, GAMUT_REMAP_COEFF);
201 }
202 }
203
dpp1_cm_program_color_matrix(struct dcn10_dpp * dpp,const uint16_t * regval)204 static void dpp1_cm_program_color_matrix(
205 struct dcn10_dpp *dpp,
206 const uint16_t *regval)
207 {
208 uint32_t ocsc_mode;
209 uint32_t cur_mode;
210 struct color_matrices_reg gam_regs;
211
212 if (regval == NULL) {
213 BREAK_TO_DEBUGGER();
214 return;
215 }
216
217 /* determine which CSC matrix (ocsc or comb) we are using
218 * currently. select the alternate set to double buffer
219 * the CSC update so CSC is updated on frame boundary
220 */
221 REG_SET(CM_TEST_DEBUG_INDEX, 0,
222 CM_TEST_DEBUG_INDEX, 9);
223
224 REG_GET(CM_TEST_DEBUG_DATA,
225 CM_TEST_DEBUG_DATA_ID9_OCSC_MODE, &cur_mode);
226
227 if (cur_mode != 4)
228 ocsc_mode = 4;
229 else
230 ocsc_mode = 5;
231
232
233 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_OCSC_C11;
234 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_OCSC_C11;
235 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_OCSC_C12;
236 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_OCSC_C12;
237
238 if (ocsc_mode == 4) {
239
240 gam_regs.csc_c11_c12 = REG(CM_OCSC_C11_C12);
241 gam_regs.csc_c33_c34 = REG(CM_OCSC_C33_C34);
242
243 } else {
244
245 gam_regs.csc_c11_c12 = REG(CM_COMB_C11_C12);
246 gam_regs.csc_c33_c34 = REG(CM_COMB_C33_C34);
247
248 }
249
250 cm_helper_program_color_matrices(
251 dpp->base.ctx,
252 regval,
253 &gam_regs);
254
255 REG_SET(CM_OCSC_CONTROL, 0, CM_OCSC_MODE, ocsc_mode);
256
257 }
258
dpp1_cm_set_output_csc_default(struct dpp * dpp_base,enum dc_color_space colorspace)259 void dpp1_cm_set_output_csc_default(
260 struct dpp *dpp_base,
261 enum dc_color_space colorspace)
262 {
263 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
264 const uint16_t *regval = NULL;
265 int arr_size;
266
267 regval = find_color_matrix(colorspace, &arr_size);
268 if (regval == NULL) {
269 BREAK_TO_DEBUGGER();
270 return;
271 }
272
273 dpp1_cm_program_color_matrix(dpp, regval);
274 }
275
dpp1_cm_get_reg_field(struct dcn10_dpp * dpp,struct xfer_func_reg * reg)276 static void dpp1_cm_get_reg_field(
277 struct dcn10_dpp *dpp,
278 struct xfer_func_reg *reg)
279 {
280 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
281 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_LUT_OFFSET;
282 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
283 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
284 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
285 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_LUT_OFFSET;
286 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
287 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_RGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
288
289 reg->shifts.field_region_end = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_B;
290 reg->masks.field_region_end = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_B;
291 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
292 reg->masks.field_region_end_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_SLOPE_B;
293 reg->shifts.field_region_end_base = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
294 reg->masks.field_region_end_base = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_END_BASE_B;
295 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
296 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
297 reg->shifts.exp_region_start = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_B;
298 reg->masks.exp_region_start = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_B;
299 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
300 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_RGAM_RAMB_EXP_REGION_START_SEGMENT_B;
301 }
302
dpp1_cm_get_degamma_reg_field(struct dcn10_dpp * dpp,struct xfer_func_reg * reg)303 static void dpp1_cm_get_degamma_reg_field(
304 struct dcn10_dpp *dpp,
305 struct xfer_func_reg *reg)
306 {
307 reg->shifts.exp_region0_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
308 reg->masks.exp_region0_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET;
309 reg->shifts.exp_region0_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
310 reg->masks.exp_region0_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS;
311 reg->shifts.exp_region1_lut_offset = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
312 reg->masks.exp_region1_lut_offset = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET;
313 reg->shifts.exp_region1_num_segments = dpp->tf_shift->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
314 reg->masks.exp_region1_num_segments = dpp->tf_mask->CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS;
315
316 reg->shifts.field_region_end = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_B;
317 reg->masks.field_region_end = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_B;
318 reg->shifts.field_region_end_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
319 reg->masks.field_region_end_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B;
320 reg->shifts.field_region_end_base = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
321 reg->masks.field_region_end_base = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_END_BASE_B;
322 reg->shifts.field_region_linear_slope = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
323 reg->masks.field_region_linear_slope = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B;
324 reg->shifts.exp_region_start = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_B;
325 reg->masks.exp_region_start = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_B;
326 reg->shifts.exp_resion_start_segment = dpp->tf_shift->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
327 reg->masks.exp_resion_start_segment = dpp->tf_mask->CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B;
328 }
dpp1_cm_set_output_csc_adjustment(struct dpp * dpp_base,const uint16_t * regval)329 void dpp1_cm_set_output_csc_adjustment(
330 struct dpp *dpp_base,
331 const uint16_t *regval)
332 {
333 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
334
335 dpp1_cm_program_color_matrix(dpp, regval);
336 }
337
dpp1_cm_power_on_regamma_lut(struct dpp * dpp_base,bool power_on)338 void dpp1_cm_power_on_regamma_lut(struct dpp *dpp_base,
339 bool power_on)
340 {
341 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
342
343 REG_SET(CM_MEM_PWR_CTRL, 0,
344 RGAM_MEM_PWR_FORCE, power_on == true ? 0:1);
345
346 }
347
dpp1_cm_program_regamma_lut(struct dpp * dpp_base,const struct pwl_result_data * rgb,uint32_t num)348 void dpp1_cm_program_regamma_lut(struct dpp *dpp_base,
349 const struct pwl_result_data *rgb,
350 uint32_t num)
351 {
352 uint32_t i;
353 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
354
355 for (i = 0 ; i < num; i++) {
356 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].red_reg);
357 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].green_reg);
358 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].blue_reg);
359
360 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_red_reg);
361 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_green_reg);
362 REG_SET(CM_RGAM_LUT_DATA, 0, CM_RGAM_LUT_DATA, rgb[i].delta_blue_reg);
363
364 }
365
366 }
367
dpp1_cm_configure_regamma_lut(struct dpp * dpp_base,bool is_ram_a)368 void dpp1_cm_configure_regamma_lut(
369 struct dpp *dpp_base,
370 bool is_ram_a)
371 {
372 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
373
374 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
375 CM_RGAM_LUT_WRITE_EN_MASK, 7);
376 REG_UPDATE(CM_RGAM_LUT_WRITE_EN_MASK,
377 CM_RGAM_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
378 REG_SET(CM_RGAM_LUT_INDEX, 0, CM_RGAM_LUT_INDEX, 0);
379 }
380
381 /*program re gamma RAM A*/
dpp1_cm_program_regamma_luta_settings(struct dpp * dpp_base,const struct pwl_params * params)382 void dpp1_cm_program_regamma_luta_settings(
383 struct dpp *dpp_base,
384 const struct pwl_params *params)
385 {
386 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
387 struct xfer_func_reg gam_regs;
388
389 dpp1_cm_get_reg_field(dpp, &gam_regs);
390
391 gam_regs.start_cntl_b = REG(CM_RGAM_RAMA_START_CNTL_B);
392 gam_regs.start_cntl_g = REG(CM_RGAM_RAMA_START_CNTL_G);
393 gam_regs.start_cntl_r = REG(CM_RGAM_RAMA_START_CNTL_R);
394 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMA_SLOPE_CNTL_B);
395 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMA_SLOPE_CNTL_G);
396 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMA_SLOPE_CNTL_R);
397 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMA_END_CNTL1_B);
398 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMA_END_CNTL2_B);
399 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMA_END_CNTL1_G);
400 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMA_END_CNTL2_G);
401 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMA_END_CNTL1_R);
402 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMA_END_CNTL2_R);
403 gam_regs.region_start = REG(CM_RGAM_RAMA_REGION_0_1);
404 gam_regs.region_end = REG(CM_RGAM_RAMA_REGION_32_33);
405
406 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
407
408 }
409
410 /*program re gamma RAM B*/
dpp1_cm_program_regamma_lutb_settings(struct dpp * dpp_base,const struct pwl_params * params)411 void dpp1_cm_program_regamma_lutb_settings(
412 struct dpp *dpp_base,
413 const struct pwl_params *params)
414 {
415 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
416 struct xfer_func_reg gam_regs;
417
418 dpp1_cm_get_reg_field(dpp, &gam_regs);
419
420 gam_regs.start_cntl_b = REG(CM_RGAM_RAMB_START_CNTL_B);
421 gam_regs.start_cntl_g = REG(CM_RGAM_RAMB_START_CNTL_G);
422 gam_regs.start_cntl_r = REG(CM_RGAM_RAMB_START_CNTL_R);
423 gam_regs.start_slope_cntl_b = REG(CM_RGAM_RAMB_SLOPE_CNTL_B);
424 gam_regs.start_slope_cntl_g = REG(CM_RGAM_RAMB_SLOPE_CNTL_G);
425 gam_regs.start_slope_cntl_r = REG(CM_RGAM_RAMB_SLOPE_CNTL_R);
426 gam_regs.start_end_cntl1_b = REG(CM_RGAM_RAMB_END_CNTL1_B);
427 gam_regs.start_end_cntl2_b = REG(CM_RGAM_RAMB_END_CNTL2_B);
428 gam_regs.start_end_cntl1_g = REG(CM_RGAM_RAMB_END_CNTL1_G);
429 gam_regs.start_end_cntl2_g = REG(CM_RGAM_RAMB_END_CNTL2_G);
430 gam_regs.start_end_cntl1_r = REG(CM_RGAM_RAMB_END_CNTL1_R);
431 gam_regs.start_end_cntl2_r = REG(CM_RGAM_RAMB_END_CNTL2_R);
432 gam_regs.region_start = REG(CM_RGAM_RAMB_REGION_0_1);
433 gam_regs.region_end = REG(CM_RGAM_RAMB_REGION_32_33);
434
435 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
436 }
437
dpp1_program_input_csc(struct dpp * dpp_base,enum dc_color_space color_space,enum dcn10_input_csc_select input_select,const struct out_csc_color_matrix * tbl_entry)438 void dpp1_program_input_csc(
439 struct dpp *dpp_base,
440 enum dc_color_space color_space,
441 enum dcn10_input_csc_select input_select,
442 const struct out_csc_color_matrix *tbl_entry)
443 {
444 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
445 int i;
446 int arr_size = sizeof(dpp_input_csc_matrix)/sizeof(struct dpp_input_csc_matrix);
447 const uint16_t *regval = NULL;
448 uint32_t cur_select = 0;
449 enum dcn10_input_csc_select select;
450 struct color_matrices_reg gam_regs;
451
452 if (input_select == INPUT_CSC_SELECT_BYPASS) {
453 REG_SET(CM_ICSC_CONTROL, 0, CM_ICSC_MODE, 0);
454 return;
455 }
456
457 if (tbl_entry == NULL) {
458 for (i = 0; i < arr_size; i++)
459 if (dpp_input_csc_matrix[i].color_space == color_space) {
460 regval = dpp_input_csc_matrix[i].regval;
461 break;
462 }
463
464 if (regval == NULL) {
465 BREAK_TO_DEBUGGER();
466 return;
467 }
468 } else {
469 regval = tbl_entry->regval;
470 }
471
472 /* determine which CSC matrix (icsc or coma) we are using
473 * currently. select the alternate set to double buffer
474 * the CSC update so CSC is updated on frame boundary
475 */
476 REG_SET(CM_TEST_DEBUG_INDEX, 0,
477 CM_TEST_DEBUG_INDEX, 9);
478
479 REG_GET(CM_TEST_DEBUG_DATA,
480 CM_TEST_DEBUG_DATA_ID9_ICSC_MODE, &cur_select);
481
482 if (cur_select != INPUT_CSC_SELECT_ICSC)
483 select = INPUT_CSC_SELECT_ICSC;
484 else
485 select = INPUT_CSC_SELECT_COMA;
486
487 gam_regs.shifts.csc_c11 = dpp->tf_shift->CM_ICSC_C11;
488 gam_regs.masks.csc_c11 = dpp->tf_mask->CM_ICSC_C11;
489 gam_regs.shifts.csc_c12 = dpp->tf_shift->CM_ICSC_C12;
490 gam_regs.masks.csc_c12 = dpp->tf_mask->CM_ICSC_C12;
491
492 if (select == INPUT_CSC_SELECT_ICSC) {
493
494 gam_regs.csc_c11_c12 = REG(CM_ICSC_C11_C12);
495 gam_regs.csc_c33_c34 = REG(CM_ICSC_C33_C34);
496
497 } else {
498
499 gam_regs.csc_c11_c12 = REG(CM_COMA_C11_C12);
500 gam_regs.csc_c33_c34 = REG(CM_COMA_C33_C34);
501
502 }
503
504 cm_helper_program_color_matrices(
505 dpp->base.ctx,
506 regval,
507 &gam_regs);
508
509 REG_SET(CM_ICSC_CONTROL, 0,
510 CM_ICSC_MODE, select);
511 }
512
513 //keep here for now, decide multi dce support later
dpp1_program_bias_and_scale(struct dpp * dpp_base,struct dc_bias_and_scale * params)514 void dpp1_program_bias_and_scale(
515 struct dpp *dpp_base,
516 struct dc_bias_and_scale *params)
517 {
518 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
519
520 REG_SET_2(CM_BNS_VALUES_R, 0,
521 CM_BNS_SCALE_R, params->scale_red,
522 CM_BNS_BIAS_R, params->bias_red);
523
524 REG_SET_2(CM_BNS_VALUES_G, 0,
525 CM_BNS_SCALE_G, params->scale_green,
526 CM_BNS_BIAS_G, params->bias_green);
527
528 REG_SET_2(CM_BNS_VALUES_B, 0,
529 CM_BNS_SCALE_B, params->scale_blue,
530 CM_BNS_BIAS_B, params->bias_blue);
531
532 }
533
534 /*program de gamma RAM B*/
dpp1_program_degamma_lutb_settings(struct dpp * dpp_base,const struct pwl_params * params)535 void dpp1_program_degamma_lutb_settings(
536 struct dpp *dpp_base,
537 const struct pwl_params *params)
538 {
539 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
540 struct xfer_func_reg gam_regs;
541
542 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
543
544 gam_regs.start_cntl_b = REG(CM_DGAM_RAMB_START_CNTL_B);
545 gam_regs.start_cntl_g = REG(CM_DGAM_RAMB_START_CNTL_G);
546 gam_regs.start_cntl_r = REG(CM_DGAM_RAMB_START_CNTL_R);
547 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMB_SLOPE_CNTL_B);
548 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMB_SLOPE_CNTL_G);
549 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMB_SLOPE_CNTL_R);
550 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMB_END_CNTL1_B);
551 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMB_END_CNTL2_B);
552 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMB_END_CNTL1_G);
553 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMB_END_CNTL2_G);
554 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMB_END_CNTL1_R);
555 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMB_END_CNTL2_R);
556 gam_regs.region_start = REG(CM_DGAM_RAMB_REGION_0_1);
557 gam_regs.region_end = REG(CM_DGAM_RAMB_REGION_14_15);
558
559
560 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
561 }
562
563 /*program de gamma RAM A*/
dpp1_program_degamma_luta_settings(struct dpp * dpp_base,const struct pwl_params * params)564 void dpp1_program_degamma_luta_settings(
565 struct dpp *dpp_base,
566 const struct pwl_params *params)
567 {
568 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
569 struct xfer_func_reg gam_regs;
570
571 dpp1_cm_get_degamma_reg_field(dpp, &gam_regs);
572
573 gam_regs.start_cntl_b = REG(CM_DGAM_RAMA_START_CNTL_B);
574 gam_regs.start_cntl_g = REG(CM_DGAM_RAMA_START_CNTL_G);
575 gam_regs.start_cntl_r = REG(CM_DGAM_RAMA_START_CNTL_R);
576 gam_regs.start_slope_cntl_b = REG(CM_DGAM_RAMA_SLOPE_CNTL_B);
577 gam_regs.start_slope_cntl_g = REG(CM_DGAM_RAMA_SLOPE_CNTL_G);
578 gam_regs.start_slope_cntl_r = REG(CM_DGAM_RAMA_SLOPE_CNTL_R);
579 gam_regs.start_end_cntl1_b = REG(CM_DGAM_RAMA_END_CNTL1_B);
580 gam_regs.start_end_cntl2_b = REG(CM_DGAM_RAMA_END_CNTL2_B);
581 gam_regs.start_end_cntl1_g = REG(CM_DGAM_RAMA_END_CNTL1_G);
582 gam_regs.start_end_cntl2_g = REG(CM_DGAM_RAMA_END_CNTL2_G);
583 gam_regs.start_end_cntl1_r = REG(CM_DGAM_RAMA_END_CNTL1_R);
584 gam_regs.start_end_cntl2_r = REG(CM_DGAM_RAMA_END_CNTL2_R);
585 gam_regs.region_start = REG(CM_DGAM_RAMA_REGION_0_1);
586 gam_regs.region_end = REG(CM_DGAM_RAMA_REGION_14_15);
587
588 cm_helper_program_xfer_func(dpp->base.ctx, params, &gam_regs);
589 }
590
dpp1_power_on_degamma_lut(struct dpp * dpp_base,bool power_on)591 void dpp1_power_on_degamma_lut(
592 struct dpp *dpp_base,
593 bool power_on)
594 {
595 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
596
597 REG_SET(CM_MEM_PWR_CTRL, 0,
598 SHARED_MEM_PWR_DIS, power_on == true ? 0:1);
599
600 }
601
dpp1_enable_cm_block(struct dpp * dpp_base)602 static void dpp1_enable_cm_block(
603 struct dpp *dpp_base)
604 {
605 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
606
607 REG_UPDATE(CM_CMOUT_CONTROL, CM_CMOUT_ROUND_TRUNC_MODE, 8);
608 REG_UPDATE(CM_CONTROL, CM_BYPASS_EN, 0);
609 }
610
dpp1_set_degamma(struct dpp * dpp_base,enum ipp_degamma_mode mode)611 void dpp1_set_degamma(
612 struct dpp *dpp_base,
613 enum ipp_degamma_mode mode)
614 {
615 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
616 dpp1_enable_cm_block(dpp_base);
617
618 switch (mode) {
619 case IPP_DEGAMMA_MODE_BYPASS:
620 /* Setting de gamma bypass for now */
621 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 0);
622 break;
623 case IPP_DEGAMMA_MODE_HW_sRGB:
624 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 1);
625 break;
626 case IPP_DEGAMMA_MODE_HW_xvYCC:
627 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 2);
628 break;
629 default:
630 BREAK_TO_DEBUGGER();
631 break;
632 }
633 }
634
dpp1_degamma_ram_select(struct dpp * dpp_base,bool use_ram_a)635 void dpp1_degamma_ram_select(
636 struct dpp *dpp_base,
637 bool use_ram_a)
638 {
639 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
640
641 if (use_ram_a)
642 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 3);
643 else
644 REG_UPDATE(CM_DGAM_CONTROL, CM_DGAM_LUT_MODE, 4);
645
646 }
647
dpp1_degamma_ram_inuse(struct dpp * dpp_base,bool * ram_a_inuse)648 static bool dpp1_degamma_ram_inuse(
649 struct dpp *dpp_base,
650 bool *ram_a_inuse)
651 {
652 bool ret = false;
653 uint32_t status_reg = 0;
654 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
655
656 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
657 &status_reg);
658
659 if (status_reg == 9) {
660 *ram_a_inuse = true;
661 ret = true;
662 } else if (status_reg == 10) {
663 *ram_a_inuse = false;
664 ret = true;
665 }
666 return ret;
667 }
668
dpp1_program_degamma_lut(struct dpp * dpp_base,const struct pwl_result_data * rgb,uint32_t num,bool is_ram_a)669 void dpp1_program_degamma_lut(
670 struct dpp *dpp_base,
671 const struct pwl_result_data *rgb,
672 uint32_t num,
673 bool is_ram_a)
674 {
675 uint32_t i;
676
677 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
678 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_HOST_EN, 0);
679 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK,
680 CM_DGAM_LUT_WRITE_EN_MASK, 7);
681 REG_UPDATE(CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_LUT_WRITE_SEL,
682 is_ram_a == true ? 0:1);
683
684 REG_SET(CM_DGAM_LUT_INDEX, 0, CM_DGAM_LUT_INDEX, 0);
685 for (i = 0 ; i < num; i++) {
686 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].red_reg);
687 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].green_reg);
688 REG_SET(CM_DGAM_LUT_DATA, 0, CM_DGAM_LUT_DATA, rgb[i].blue_reg);
689
690 REG_SET(CM_DGAM_LUT_DATA, 0,
691 CM_DGAM_LUT_DATA, rgb[i].delta_red_reg);
692 REG_SET(CM_DGAM_LUT_DATA, 0,
693 CM_DGAM_LUT_DATA, rgb[i].delta_green_reg);
694 REG_SET(CM_DGAM_LUT_DATA, 0,
695 CM_DGAM_LUT_DATA, rgb[i].delta_blue_reg);
696 }
697 }
698
dpp1_set_degamma_pwl(struct dpp * dpp_base,const struct pwl_params * params)699 void dpp1_set_degamma_pwl(struct dpp *dpp_base,
700 const struct pwl_params *params)
701 {
702 bool is_ram_a = true;
703
704 dpp1_power_on_degamma_lut(dpp_base, true);
705 dpp1_enable_cm_block(dpp_base);
706 dpp1_degamma_ram_inuse(dpp_base, &is_ram_a);
707 if (is_ram_a == true)
708 dpp1_program_degamma_lutb_settings(dpp_base, params);
709 else
710 dpp1_program_degamma_luta_settings(dpp_base, params);
711
712 dpp1_program_degamma_lut(dpp_base, params->rgb_resulted,
713 params->hw_points_num, !is_ram_a);
714 dpp1_degamma_ram_select(dpp_base, !is_ram_a);
715 }
716
dpp1_full_bypass(struct dpp * dpp_base)717 void dpp1_full_bypass(struct dpp *dpp_base)
718 {
719 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
720
721 /* Input pixel format: ARGB8888 */
722 REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
723 CNVC_SURFACE_PIXEL_FORMAT, 0x8);
724
725 /* Zero expansion */
726 REG_SET_3(FORMAT_CONTROL, 0,
727 CNVC_BYPASS, 0,
728 FORMAT_CONTROL__ALPHA_EN, 0,
729 FORMAT_EXPANSION_MODE, 0);
730
731 /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */
732 if (dpp->tf_mask->CM_BYPASS_EN)
733 REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1);
734 #if defined(CONFIG_DRM_AMD_DC_DCN2_0)
735 else
736 REG_SET(CM_CONTROL, 0, CM_BYPASS, 1);
737 #endif
738
739 /* Setting degamma bypass for now */
740 REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0);
741 }
742
dpp1_ingamma_ram_inuse(struct dpp * dpp_base,bool * ram_a_inuse)743 static bool dpp1_ingamma_ram_inuse(struct dpp *dpp_base,
744 bool *ram_a_inuse)
745 {
746 bool in_use = false;
747 uint32_t status_reg = 0;
748 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
749
750 REG_GET(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_DGAM_CONFIG_STATUS,
751 &status_reg);
752
753 // 1 => IGAM_RAMA, 3 => IGAM_RAMA & DGAM_ROMA, 4 => IGAM_RAMA & DGAM_ROMB
754 if (status_reg == 1 || status_reg == 3 || status_reg == 4) {
755 *ram_a_inuse = true;
756 in_use = true;
757 // 2 => IGAM_RAMB, 5 => IGAM_RAMB & DGAM_ROMA, 6 => IGAM_RAMB & DGAM_ROMB
758 } else if (status_reg == 2 || status_reg == 5 || status_reg == 6) {
759 *ram_a_inuse = false;
760 in_use = true;
761 }
762 return in_use;
763 }
764
765 /*
766 * Input gamma LUT currently supports 256 values only. This means input color
767 * can have a maximum of 8 bits per channel (= 256 possible values) in order to
768 * have a one-to-one mapping with the LUT. Truncation will occur with color
769 * values greater than 8 bits.
770 *
771 * In the future, this function should support additional input gamma methods,
772 * such as piecewise linear mapping, and input gamma bypass.
773 */
dpp1_program_input_lut(struct dpp * dpp_base,const struct dc_gamma * gamma)774 void dpp1_program_input_lut(
775 struct dpp *dpp_base,
776 const struct dc_gamma *gamma)
777 {
778 int i;
779 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
780 bool rama_occupied = false;
781 uint32_t ram_num;
782 // Power on LUT memory.
783 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 1);
784 dpp1_enable_cm_block(dpp_base);
785 // Determine whether to use RAM A or RAM B
786 dpp1_ingamma_ram_inuse(dpp_base, &rama_occupied);
787 if (!rama_occupied)
788 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 0);
789 else
790 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_SEL, 1);
791 // RW mode is 256-entry LUT
792 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_RW_MODE, 0);
793 // IGAM Input format should be 8 bits per channel.
794 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_INPUT_FORMAT, 0);
795 // Do not mask any R,G,B values
796 REG_UPDATE(CM_IGAM_LUT_RW_CONTROL, CM_IGAM_LUT_WRITE_EN_MASK, 7);
797 // LUT-256, unsigned, integer, new u0.12 format
798 REG_UPDATE_3(
799 CM_IGAM_CONTROL,
800 CM_IGAM_LUT_FORMAT_R, 3,
801 CM_IGAM_LUT_FORMAT_G, 3,
802 CM_IGAM_LUT_FORMAT_B, 3);
803 // Start at index 0 of IGAM LUT
804 REG_UPDATE(CM_IGAM_LUT_RW_INDEX, CM_IGAM_LUT_RW_INDEX, 0);
805 for (i = 0; i < gamma->num_entries; i++) {
806 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
807 dc_fixpt_round(
808 gamma->entries.red[i]));
809 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
810 dc_fixpt_round(
811 gamma->entries.green[i]));
812 REG_SET(CM_IGAM_LUT_SEQ_COLOR, 0, CM_IGAM_LUT_SEQ_COLOR,
813 dc_fixpt_round(
814 gamma->entries.blue[i]));
815 }
816 // Power off LUT memory
817 REG_SET(CM_MEM_PWR_CTRL, 0, SHARED_MEM_PWR_DIS, 0);
818 // Enable IGAM LUT on ram we just wrote to. 2 => RAMA, 3 => RAMB
819 REG_UPDATE(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, rama_occupied ? 3 : 2);
820 REG_GET(CM_IGAM_CONTROL, CM_IGAM_LUT_MODE, &ram_num);
821 }
822
dpp1_set_hdr_multiplier(struct dpp * dpp_base,uint32_t multiplier)823 void dpp1_set_hdr_multiplier(
824 struct dpp *dpp_base,
825 uint32_t multiplier)
826 {
827 struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
828
829 REG_UPDATE(CM_HDR_MULT_COEF, CM_HDR_MULT_COEF, multiplier);
830 }
831