1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "clock_source.h"
37 #include "dc_bios_types.h"
38
39 #include "bios_parser_interface.h"
40 #include "include/irq_service_interface.h"
41 #include "transform.h"
42 #include "dmcu.h"
43 #include "dpp.h"
44 #include "timing_generator.h"
45 #include "abm.h"
46 #include "virtual/virtual_link_encoder.h"
47
48 #include "link_hwss.h"
49 #include "link_encoder.h"
50
51 #include "dc_link_ddc.h"
52 #include "dm_helpers.h"
53 #include "mem_input.h"
54 #include "hubp.h"
55
56 #include "dc_link_dp.h"
57 #define DC_LOGGER \
58 dc->ctx->logger
59
60
61 /*******************************************************************************
62 * Private functions
63 ******************************************************************************/
64
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)65 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
66 {
67 if (new > *original)
68 *original = new;
69 }
70
destroy_links(struct dc * dc)71 static void destroy_links(struct dc *dc)
72 {
73 uint32_t i;
74
75 for (i = 0; i < dc->link_count; i++) {
76 if (NULL != dc->links[i])
77 link_destroy(&dc->links[i]);
78 }
79 }
80
create_links(struct dc * dc,uint32_t num_virtual_links)81 static bool create_links(
82 struct dc *dc,
83 uint32_t num_virtual_links)
84 {
85 int i;
86 int connectors_num;
87 struct dc_bios *bios = dc->ctx->dc_bios;
88
89 dc->link_count = 0;
90
91 connectors_num = bios->funcs->get_connectors_number(bios);
92
93 if (connectors_num > ENUM_ID_COUNT) {
94 dm_error(
95 "DC: Number of connectors %d exceeds maximum of %d!\n",
96 connectors_num,
97 ENUM_ID_COUNT);
98 return false;
99 }
100
101 if (connectors_num == 0 && num_virtual_links == 0) {
102 dm_error("DC: Number of connectors is zero!\n");
103 }
104
105 dm_output_to_console(
106 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
107 __func__,
108 connectors_num,
109 num_virtual_links);
110
111 for (i = 0; i < connectors_num; i++) {
112 struct link_init_data link_init_params = {0};
113 struct dc_link *link;
114
115 link_init_params.ctx = dc->ctx;
116 /* next BIOS object table connector */
117 link_init_params.connector_index = i;
118 link_init_params.link_index = dc->link_count;
119 link_init_params.dc = dc;
120 link = link_create(&link_init_params);
121
122 if (link) {
123 dc->links[dc->link_count] = link;
124 link->dc = dc;
125 ++dc->link_count;
126 }
127 }
128
129 for (i = 0; i < num_virtual_links; i++) {
130 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
131 struct encoder_init_data enc_init = {0};
132
133 if (link == NULL) {
134 BREAK_TO_DEBUGGER();
135 goto failed_alloc;
136 }
137
138 link->link_index = dc->link_count;
139 dc->links[dc->link_count] = link;
140 dc->link_count++;
141
142 link->ctx = dc->ctx;
143 link->dc = dc;
144 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
145 link->link_id.type = OBJECT_TYPE_CONNECTOR;
146 link->link_id.id = CONNECTOR_ID_VIRTUAL;
147 link->link_id.enum_id = ENUM_ID_1;
148 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
149
150 if (!link->link_enc) {
151 BREAK_TO_DEBUGGER();
152 goto failed_alloc;
153 }
154
155 link->link_status.dpcd_caps = &link->dpcd_caps;
156
157 enc_init.ctx = dc->ctx;
158 enc_init.channel = CHANNEL_ID_UNKNOWN;
159 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
160 enc_init.transmitter = TRANSMITTER_UNKNOWN;
161 enc_init.connector = link->link_id;
162 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
163 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
164 enc_init.encoder.enum_id = ENUM_ID_1;
165 virtual_link_encoder_construct(link->link_enc, &enc_init);
166 }
167
168 return true;
169
170 failed_alloc:
171 return false;
172 }
173
174 /**
175 *****************************************************************************
176 * Function: dc_stream_adjust_vmin_vmax
177 *
178 * @brief
179 * Looks up the pipe context of dc_stream_state and updates the
180 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
181 * Rate, which is a power-saving feature that targets reducing panel
182 * refresh rate while the screen is static
183 *
184 * @param [in] dc: dc reference
185 * @param [in] stream: Initial dc stream state
186 * @param [in] adjust: Updated parameters for vertical_total_min and
187 * vertical_total_max
188 *****************************************************************************
189 */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state ** streams,int num_streams,int vmin,int vmax)190 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
191 struct dc_stream_state **streams, int num_streams,
192 int vmin, int vmax)
193 {
194 /* TODO: Support multiple streams */
195 struct dc_stream_state *stream = streams[0];
196 int i = 0;
197 bool ret = false;
198
199 for (i = 0; i < MAX_PIPES; i++) {
200 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
201
202 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
203 dc->hwss.set_drr(&pipe, 1, vmin, vmax);
204
205 /* build and update the info frame */
206 resource_build_info_frame(pipe);
207 dc->hwss.update_info_frame(pipe);
208
209 ret = true;
210 }
211 }
212 return ret;
213 }
214
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)215 bool dc_stream_get_crtc_position(struct dc *dc,
216 struct dc_stream_state **streams, int num_streams,
217 unsigned int *v_pos, unsigned int *nom_v_pos)
218 {
219 /* TODO: Support multiple streams */
220 struct dc_stream_state *stream = streams[0];
221 int i = 0;
222 bool ret = false;
223 struct crtc_position position;
224
225 for (i = 0; i < MAX_PIPES; i++) {
226 struct pipe_ctx *pipe =
227 &dc->current_state->res_ctx.pipe_ctx[i];
228
229 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
230 dc->hwss.get_position(&pipe, 1, &position);
231
232 *v_pos = position.vertical_count;
233 *nom_v_pos = position.nominal_vcount;
234 ret = true;
235 }
236 }
237 return ret;
238 }
239
240 /**
241 * dc_stream_configure_crc: Configure CRC capture for the given stream.
242 * @dc: DC Object
243 * @stream: The stream to configure CRC on.
244 * @enable: Enable CRC if true, disable otherwise.
245 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
246 * once.
247 *
248 * By default, only CRC0 is configured, and the entire frame is used to
249 * calculate the crc.
250 */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,bool enable,bool continuous)251 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
252 bool enable, bool continuous)
253 {
254 int i;
255 struct pipe_ctx *pipe;
256 struct crc_params param;
257 struct timing_generator *tg;
258
259 for (i = 0; i < MAX_PIPES; i++) {
260 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
261 if (pipe->stream == stream)
262 break;
263 }
264 /* Stream not found */
265 if (i == MAX_PIPES)
266 return false;
267
268 /* Always capture the full frame */
269 param.windowa_x_start = 0;
270 param.windowa_y_start = 0;
271 param.windowa_x_end = pipe->stream->timing.h_addressable;
272 param.windowa_y_end = pipe->stream->timing.v_addressable;
273 param.windowb_x_start = 0;
274 param.windowb_y_start = 0;
275 param.windowb_x_end = pipe->stream->timing.h_addressable;
276 param.windowb_y_end = pipe->stream->timing.v_addressable;
277
278 /* Default to the union of both windows */
279 param.selection = UNION_WINDOW_A_B;
280 param.continuous_mode = continuous;
281 param.enable = enable;
282
283 tg = pipe->stream_res.tg;
284
285 /* Only call if supported */
286 if (tg->funcs->configure_crc)
287 return tg->funcs->configure_crc(tg, ¶m);
288 DC_LOG_WARNING("CRC capture not supported.");
289 return false;
290 }
291
292 /**
293 * dc_stream_get_crc: Get CRC values for the given stream.
294 * @dc: DC object
295 * @stream: The DC stream state of the stream to get CRCs from.
296 * @r_cr, g_y, b_cb: CRC values for the three channels are stored here.
297 *
298 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
299 * Return false if stream is not found, or if CRCs are not enabled.
300 */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)301 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
302 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
303 {
304 int i;
305 struct pipe_ctx *pipe;
306 struct timing_generator *tg;
307
308 for (i = 0; i < MAX_PIPES; i++) {
309 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
310 if (pipe->stream == stream)
311 break;
312 }
313 /* Stream not found */
314 if (i == MAX_PIPES)
315 return false;
316
317 tg = pipe->stream_res.tg;
318
319 if (tg->funcs->get_crc)
320 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
321 DC_LOG_WARNING("CRC capture not supported.");
322 return false;
323 }
324
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)325 void dc_stream_set_dither_option(struct dc_stream_state *stream,
326 enum dc_dither_option option)
327 {
328 struct bit_depth_reduction_params params;
329 struct dc_link *link = stream->status.link;
330 struct pipe_ctx *pipes = NULL;
331 int i;
332
333 for (i = 0; i < MAX_PIPES; i++) {
334 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
335 stream) {
336 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
337 break;
338 }
339 }
340
341 if (!pipes)
342 return;
343 if (option > DITHER_OPTION_MAX)
344 return;
345
346 stream->dither_option = option;
347
348 memset(¶ms, 0, sizeof(params));
349 resource_build_bit_depth_reduction_params(stream, ¶ms);
350 stream->bit_depth_params = params;
351
352 if (pipes->plane_res.xfm &&
353 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
354 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
355 pipes->plane_res.xfm,
356 pipes->plane_res.scl_data.lb_params.depth,
357 &stream->bit_depth_params);
358 }
359
360 pipes->stream_res.opp->funcs->
361 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
362 }
363
dc_stream_set_static_screen_events(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_events * events)364 void dc_stream_set_static_screen_events(struct dc *dc,
365 struct dc_stream_state **streams,
366 int num_streams,
367 const struct dc_static_screen_events *events)
368 {
369 int i = 0;
370 int j = 0;
371 struct pipe_ctx *pipes_affected[MAX_PIPES];
372 int num_pipes_affected = 0;
373
374 for (i = 0; i < num_streams; i++) {
375 struct dc_stream_state *stream = streams[i];
376
377 for (j = 0; j < MAX_PIPES; j++) {
378 if (dc->current_state->res_ctx.pipe_ctx[j].stream
379 == stream) {
380 pipes_affected[num_pipes_affected++] =
381 &dc->current_state->res_ctx.pipe_ctx[j];
382 }
383 }
384 }
385
386 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, events);
387 }
388
dc_link_set_drive_settings(struct dc * dc,struct link_training_settings * lt_settings,const struct dc_link * link)389 void dc_link_set_drive_settings(struct dc *dc,
390 struct link_training_settings *lt_settings,
391 const struct dc_link *link)
392 {
393
394 int i;
395
396 for (i = 0; i < dc->link_count; i++) {
397 if (dc->links[i] == link)
398 break;
399 }
400
401 if (i >= dc->link_count)
402 ASSERT_CRITICAL(false);
403
404 dc_link_dp_set_drive_settings(dc->links[i], lt_settings);
405 }
406
dc_link_perform_link_training(struct dc * dc,struct dc_link_settings * link_setting,bool skip_video_pattern)407 void dc_link_perform_link_training(struct dc *dc,
408 struct dc_link_settings *link_setting,
409 bool skip_video_pattern)
410 {
411 int i;
412
413 for (i = 0; i < dc->link_count; i++)
414 dc_link_dp_perform_link_training(
415 dc->links[i],
416 link_setting,
417 skip_video_pattern);
418 }
419
dc_link_set_preferred_link_settings(struct dc * dc,struct dc_link_settings * link_setting,struct dc_link * link)420 void dc_link_set_preferred_link_settings(struct dc *dc,
421 struct dc_link_settings *link_setting,
422 struct dc_link *link)
423 {
424 struct dc_link_settings store_settings = *link_setting;
425 struct dc_stream_state *link_stream =
426 link->dc->current_state->res_ctx.pipe_ctx[0].stream;
427
428 link->preferred_link_setting = store_settings;
429 if (link_stream)
430 decide_link_settings(link_stream, &store_settings);
431
432 if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
433 (store_settings.link_rate != LINK_RATE_UNKNOWN))
434 dp_retrain_link_dp_test(link, &store_settings, false);
435 }
436
dc_link_enable_hpd(const struct dc_link * link)437 void dc_link_enable_hpd(const struct dc_link *link)
438 {
439 dc_link_dp_enable_hpd(link);
440 }
441
dc_link_disable_hpd(const struct dc_link * link)442 void dc_link_disable_hpd(const struct dc_link *link)
443 {
444 dc_link_dp_disable_hpd(link);
445 }
446
447
dc_link_set_test_pattern(struct dc_link * link,enum dp_test_pattern test_pattern,const struct link_training_settings * p_link_settings,const unsigned char * p_custom_pattern,unsigned int cust_pattern_size)448 void dc_link_set_test_pattern(struct dc_link *link,
449 enum dp_test_pattern test_pattern,
450 const struct link_training_settings *p_link_settings,
451 const unsigned char *p_custom_pattern,
452 unsigned int cust_pattern_size)
453 {
454 if (link != NULL)
455 dc_link_dp_set_test_pattern(
456 link,
457 test_pattern,
458 p_link_settings,
459 p_custom_pattern,
460 cust_pattern_size);
461 }
462
destruct(struct dc * dc)463 static void destruct(struct dc *dc)
464 {
465 dc_release_state(dc->current_state);
466 dc->current_state = NULL;
467
468 destroy_links(dc);
469
470 dc_destroy_resource_pool(dc);
471
472 if (dc->ctx->gpio_service)
473 dal_gpio_service_destroy(&dc->ctx->gpio_service);
474
475 if (dc->ctx->i2caux)
476 dal_i2caux_destroy(&dc->ctx->i2caux);
477
478 if (dc->ctx->created_bios)
479 dal_bios_parser_destroy(&dc->ctx->dc_bios);
480
481 kfree(dc->ctx);
482 dc->ctx = NULL;
483
484 kfree(dc->bw_vbios);
485 dc->bw_vbios = NULL;
486
487 kfree(dc->bw_dceip);
488 dc->bw_dceip = NULL;
489
490 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
491 kfree(dc->dcn_soc);
492 dc->dcn_soc = NULL;
493
494 kfree(dc->dcn_ip);
495 dc->dcn_ip = NULL;
496
497 #endif
498 }
499
construct(struct dc * dc,const struct dc_init_data * init_params)500 static bool construct(struct dc *dc,
501 const struct dc_init_data *init_params)
502 {
503 struct dc_context *dc_ctx;
504 struct bw_calcs_dceip *dc_dceip;
505 struct bw_calcs_vbios *dc_vbios;
506 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
507 struct dcn_soc_bounding_box *dcn_soc;
508 struct dcn_ip_params *dcn_ip;
509 #endif
510
511 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
512
513 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
514 if (!dc_dceip) {
515 dm_error("%s: failed to create dceip\n", __func__);
516 goto fail;
517 }
518
519 dc->bw_dceip = dc_dceip;
520
521 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
522 if (!dc_vbios) {
523 dm_error("%s: failed to create vbios\n", __func__);
524 goto fail;
525 }
526
527 dc->bw_vbios = dc_vbios;
528 #ifdef CONFIG_DRM_AMD_DC_DCN1_0
529 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
530 if (!dcn_soc) {
531 dm_error("%s: failed to create dcn_soc\n", __func__);
532 goto fail;
533 }
534
535 dc->dcn_soc = dcn_soc;
536
537 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
538 if (!dcn_ip) {
539 dm_error("%s: failed to create dcn_ip\n", __func__);
540 goto fail;
541 }
542
543 dc->dcn_ip = dcn_ip;
544 #endif
545
546 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
547 if (!dc_ctx) {
548 dm_error("%s: failed to create ctx\n", __func__);
549 goto fail;
550 }
551
552 dc_ctx->cgs_device = init_params->cgs_device;
553 dc_ctx->driver_context = init_params->driver;
554 dc_ctx->dc = dc;
555 dc_ctx->asic_id = init_params->asic_id;
556 dc_ctx->dc_sink_id_count = 0;
557 dc->ctx = dc_ctx;
558
559 dc->current_state = dc_create_state();
560
561 if (!dc->current_state) {
562 dm_error("%s: failed to create validate ctx\n", __func__);
563 goto fail;
564 }
565
566 /* Create logger */
567
568 dc_ctx->dce_environment = init_params->dce_environment;
569
570 dc_version = resource_parse_asic_id(init_params->asic_id);
571 dc_ctx->dce_version = dc_version;
572
573 /* Resource should construct all asic specific resources.
574 * This should be the only place where we need to parse the asic id
575 */
576 if (init_params->vbios_override)
577 dc_ctx->dc_bios = init_params->vbios_override;
578 else {
579 /* Create BIOS parser */
580 struct bp_init_data bp_init_data;
581
582 bp_init_data.ctx = dc_ctx;
583 bp_init_data.bios = init_params->asic_id.atombios_base_address;
584
585 dc_ctx->dc_bios = dal_bios_parser_create(
586 &bp_init_data, dc_version);
587
588 if (!dc_ctx->dc_bios) {
589 ASSERT_CRITICAL(false);
590 goto fail;
591 }
592
593 dc_ctx->created_bios = true;
594 }
595
596 /* Create I2C AUX */
597 dc_ctx->i2caux = dal_i2caux_create(dc_ctx);
598
599 if (!dc_ctx->i2caux) {
600 ASSERT_CRITICAL(false);
601 goto fail;
602 }
603
604 /* Create GPIO service */
605 dc_ctx->gpio_service = dal_gpio_service_create(
606 dc_version,
607 dc_ctx->dce_environment,
608 dc_ctx);
609
610 if (!dc_ctx->gpio_service) {
611 ASSERT_CRITICAL(false);
612 goto fail;
613 }
614
615 dc->res_pool = dc_create_resource_pool(
616 dc,
617 init_params->num_virtual_links,
618 dc_version,
619 init_params->asic_id);
620 if (!dc->res_pool)
621 goto fail;
622
623 dc_resource_state_construct(dc, dc->current_state);
624
625 if (!create_links(dc, init_params->num_virtual_links))
626 goto fail;
627
628 return true;
629
630 fail:
631
632 destruct(dc);
633 return false;
634 }
635
disable_dangling_plane(struct dc * dc,struct dc_state * context)636 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
637 {
638 int i, j;
639 struct dc_state *dangling_context = dc_create_state();
640 struct dc_state *current_ctx;
641
642 if (dangling_context == NULL)
643 return;
644
645 dc_resource_state_copy_construct(dc->current_state, dangling_context);
646
647 for (i = 0; i < dc->res_pool->pipe_count; i++) {
648 struct dc_stream_state *old_stream =
649 dc->current_state->res_ctx.pipe_ctx[i].stream;
650 bool should_disable = true;
651
652 for (j = 0; j < context->stream_count; j++) {
653 if (old_stream == context->streams[j]) {
654 should_disable = false;
655 break;
656 }
657 }
658 if (should_disable && old_stream) {
659 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
660 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
661 }
662 }
663
664 current_ctx = dc->current_state;
665 dc->current_state = dangling_context;
666 dc_release_state(current_ctx);
667 }
668
669 /*******************************************************************************
670 * Public functions
671 ******************************************************************************/
672
dc_create(const struct dc_init_data * init_params)673 struct dc *dc_create(const struct dc_init_data *init_params)
674 {
675 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
676 unsigned int full_pipe_count;
677
678 if (NULL == dc)
679 goto alloc_fail;
680
681 if (false == construct(dc, init_params))
682 goto construct_fail;
683
684 /*TODO: separate HW and SW initialization*/
685 dc->hwss.init_hw(dc);
686
687 full_pipe_count = dc->res_pool->pipe_count;
688 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
689 full_pipe_count--;
690 dc->caps.max_streams = min(
691 full_pipe_count,
692 dc->res_pool->stream_enc_count);
693
694 dc->caps.max_links = dc->link_count;
695 dc->caps.max_audios = dc->res_pool->audio_count;
696 dc->caps.linear_pitch_alignment = 64;
697
698 /* Populate versioning information */
699 dc->versions.dc_ver = DC_VER;
700
701 if (dc->res_pool->dmcu != NULL)
702 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
703
704 dc->config = init_params->flags;
705
706 DC_LOG_DC("Display Core initialized\n");
707
708
709 /* TODO: missing feature to be enabled */
710 dc->debug.disable_dfs_bypass = true;
711
712 return dc;
713
714 construct_fail:
715 kfree(dc);
716
717 alloc_fail:
718 return NULL;
719 }
720
dc_destroy(struct dc ** dc)721 void dc_destroy(struct dc **dc)
722 {
723 destruct(*dc);
724 kfree(*dc);
725 *dc = NULL;
726 }
727
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)728 static void enable_timing_multisync(
729 struct dc *dc,
730 struct dc_state *ctx)
731 {
732 int i = 0, multisync_count = 0;
733 int pipe_count = dc->res_pool->pipe_count;
734 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
735
736 for (i = 0; i < pipe_count; i++) {
737 if (!ctx->res_ctx.pipe_ctx[i].stream ||
738 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
739 continue;
740 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
741 continue;
742 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
743 multisync_count++;
744 }
745
746 if (multisync_count > 0) {
747 dc->hwss.enable_per_frame_crtc_position_reset(
748 dc, multisync_count, multisync_pipes);
749 }
750 }
751
program_timing_sync(struct dc * dc,struct dc_state * ctx)752 static void program_timing_sync(
753 struct dc *dc,
754 struct dc_state *ctx)
755 {
756 int i, j;
757 int group_index = 0;
758 int pipe_count = dc->res_pool->pipe_count;
759 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
760
761 for (i = 0; i < pipe_count; i++) {
762 if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
763 continue;
764
765 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
766 }
767
768 for (i = 0; i < pipe_count; i++) {
769 int group_size = 1;
770 struct pipe_ctx *pipe_set[MAX_PIPES];
771
772 if (!unsynced_pipes[i])
773 continue;
774
775 pipe_set[0] = unsynced_pipes[i];
776 unsynced_pipes[i] = NULL;
777
778 /* Add tg to the set, search rest of the tg's for ones with
779 * same timing, add all tgs with same timing to the group
780 */
781 for (j = i + 1; j < pipe_count; j++) {
782 if (!unsynced_pipes[j])
783 continue;
784
785 if (resource_are_streams_timing_synchronizable(
786 unsynced_pipes[j]->stream,
787 pipe_set[0]->stream)) {
788 pipe_set[group_size] = unsynced_pipes[j];
789 unsynced_pipes[j] = NULL;
790 group_size++;
791 }
792 }
793
794 /* set first unblanked pipe as master */
795 for (j = 0; j < group_size; j++) {
796 struct pipe_ctx *temp;
797
798 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
799 if (j == 0)
800 break;
801
802 temp = pipe_set[0];
803 pipe_set[0] = pipe_set[j];
804 pipe_set[j] = temp;
805 break;
806 }
807 }
808
809 /* remove any other unblanked pipes as they have already been synced */
810 for (j = j + 1; j < group_size; j++) {
811 if (pipe_set[j]->stream_res.tg->funcs->is_blanked && !pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg)) {
812 group_size--;
813 pipe_set[j] = pipe_set[group_size];
814 j--;
815 }
816 }
817
818 if (group_size > 1) {
819 dc->hwss.enable_timing_synchronization(
820 dc, group_index, group_size, pipe_set);
821 group_index++;
822 }
823 }
824 }
825
context_changed(struct dc * dc,struct dc_state * context)826 static bool context_changed(
827 struct dc *dc,
828 struct dc_state *context)
829 {
830 uint8_t i;
831
832 if (context->stream_count != dc->current_state->stream_count)
833 return true;
834
835 for (i = 0; i < dc->current_state->stream_count; i++) {
836 if (dc->current_state->streams[i] != context->streams[i])
837 return true;
838 }
839
840 return false;
841 }
842
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)843 bool dc_enable_stereo(
844 struct dc *dc,
845 struct dc_state *context,
846 struct dc_stream_state *streams[],
847 uint8_t stream_count)
848 {
849 bool ret = true;
850 int i, j;
851 struct pipe_ctx *pipe;
852
853 for (i = 0; i < MAX_PIPES; i++) {
854 if (context != NULL)
855 pipe = &context->res_ctx.pipe_ctx[i];
856 else
857 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
858 for (j = 0 ; pipe && j < stream_count; j++) {
859 if (streams[j] && streams[j] == pipe->stream &&
860 dc->hwss.setup_stereo)
861 dc->hwss.setup_stereo(pipe, dc);
862 }
863 }
864
865 return ret;
866 }
867
868 /*
869 * Applies given context to HW and copy it into current context.
870 * It's up to the user to release the src context afterwards.
871 */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)872 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
873 {
874 struct dc_bios *dcb = dc->ctx->dc_bios;
875 enum dc_status result = DC_ERROR_UNEXPECTED;
876 struct pipe_ctx *pipe;
877 int i, k, l;
878 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
879
880 disable_dangling_plane(dc, context);
881
882 for (i = 0; i < context->stream_count; i++)
883 dc_streams[i] = context->streams[i];
884
885 if (!dcb->funcs->is_accelerated_mode(dcb))
886 dc->hwss.enable_accelerated_mode(dc, context);
887
888 dc->hwss.set_bandwidth(dc, context, false);
889
890 /* re-program planes for existing stream, in case we need to
891 * free up plane resource for later use
892 */
893 for (i = 0; i < context->stream_count; i++) {
894 if (context->streams[i]->mode_changed)
895 continue;
896
897 dc->hwss.apply_ctx_for_surface(
898 dc, context->streams[i],
899 context->stream_status[i].plane_count,
900 context); /* use new pipe config in new context */
901 }
902
903 /* Program hardware */
904 dc->hwss.ready_shared_resources(dc, context);
905
906 for (i = 0; i < dc->res_pool->pipe_count; i++) {
907 pipe = &context->res_ctx.pipe_ctx[i];
908 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
909 }
910
911 result = dc->hwss.apply_ctx_to_hw(dc, context);
912
913 if (result != DC_OK)
914 return result;
915
916 if (context->stream_count > 1) {
917 enable_timing_multisync(dc, context);
918 program_timing_sync(dc, context);
919 }
920
921 /* Program all planes within new context*/
922 for (i = 0; i < context->stream_count; i++) {
923 const struct dc_sink *sink = context->streams[i]->sink;
924
925 if (!context->streams[i]->mode_changed)
926 continue;
927
928 dc->hwss.apply_ctx_for_surface(
929 dc, context->streams[i],
930 context->stream_status[i].plane_count,
931 context);
932
933 /*
934 * enable stereo
935 * TODO rework dc_enable_stereo call to work with validation sets?
936 */
937 for (k = 0; k < MAX_PIPES; k++) {
938 pipe = &context->res_ctx.pipe_ctx[k];
939
940 for (l = 0 ; pipe && l < context->stream_count; l++) {
941 if (context->streams[l] &&
942 context->streams[l] == pipe->stream &&
943 dc->hwss.setup_stereo)
944 dc->hwss.setup_stereo(pipe, dc);
945 }
946 }
947
948 CONN_MSG_MODE(sink->link, "{%dx%d, %dx%d@%dKhz}",
949 context->streams[i]->timing.h_addressable,
950 context->streams[i]->timing.v_addressable,
951 context->streams[i]->timing.h_total,
952 context->streams[i]->timing.v_total,
953 context->streams[i]->timing.pix_clk_khz);
954 }
955
956 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
957
958 /* pplib is notified if disp_num changed */
959 dc->hwss.set_bandwidth(dc, context, true);
960
961 dc_release_state(dc->current_state);
962
963 dc->current_state = context;
964
965 dc_retain_state(dc->current_state);
966
967 dc->hwss.optimize_shared_resources(dc);
968
969 return result;
970 }
971
dc_commit_state(struct dc * dc,struct dc_state * context)972 bool dc_commit_state(struct dc *dc, struct dc_state *context)
973 {
974 enum dc_status result = DC_ERROR_UNEXPECTED;
975 int i;
976
977 if (false == context_changed(dc, context))
978 return DC_OK;
979
980 DC_LOG_DC("%s: %d streams\n",
981 __func__, context->stream_count);
982
983 for (i = 0; i < context->stream_count; i++) {
984 struct dc_stream_state *stream = context->streams[i];
985
986 dc_stream_log(dc, stream);
987 }
988
989 result = dc_commit_state_no_check(dc, context);
990
991 return (result == DC_OK);
992 }
993
dc_post_update_surfaces_to_stream(struct dc * dc)994 bool dc_post_update_surfaces_to_stream(struct dc *dc)
995 {
996 int i;
997 struct dc_state *context = dc->current_state;
998
999 post_surface_trace(dc);
1000
1001 for (i = 0; i < dc->res_pool->pipe_count; i++)
1002 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1003 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1004 context->res_ctx.pipe_ctx[i].pipe_idx = i;
1005 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1006 }
1007
1008 dc->optimized_required = false;
1009
1010 dc->hwss.set_bandwidth(dc, context, true);
1011 return true;
1012 }
1013
dc_create_state(void)1014 struct dc_state *dc_create_state(void)
1015 {
1016 struct dc_state *context = kzalloc(sizeof(struct dc_state),
1017 GFP_KERNEL);
1018
1019 if (!context)
1020 return NULL;
1021
1022 kref_init(&context->refcount);
1023 return context;
1024 }
1025
dc_retain_state(struct dc_state * context)1026 void dc_retain_state(struct dc_state *context)
1027 {
1028 kref_get(&context->refcount);
1029 }
1030
dc_state_free(struct kref * kref)1031 static void dc_state_free(struct kref *kref)
1032 {
1033 struct dc_state *context = container_of(kref, struct dc_state, refcount);
1034 dc_resource_state_destruct(context);
1035 kfree(context);
1036 }
1037
dc_release_state(struct dc_state * context)1038 void dc_release_state(struct dc_state *context)
1039 {
1040 kref_put(&context->refcount, dc_state_free);
1041 }
1042
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)1043 static bool is_surface_in_context(
1044 const struct dc_state *context,
1045 const struct dc_plane_state *plane_state)
1046 {
1047 int j;
1048
1049 for (j = 0; j < MAX_PIPES; j++) {
1050 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1051
1052 if (plane_state == pipe_ctx->plane_state) {
1053 return true;
1054 }
1055 }
1056
1057 return false;
1058 }
1059
pixel_format_to_bpp(enum surface_pixel_format format)1060 static unsigned int pixel_format_to_bpp(enum surface_pixel_format format)
1061 {
1062 switch (format) {
1063 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
1064 case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
1065 return 12;
1066 case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
1067 case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
1068 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
1069 case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
1070 return 16;
1071 case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
1072 case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
1073 case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
1074 case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
1075 return 32;
1076 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
1077 case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
1078 case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
1079 return 64;
1080 default:
1081 ASSERT_CRITICAL(false);
1082 return -1;
1083 }
1084 }
1085
get_plane_info_update_type(const struct dc_surface_update * u)1086 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
1087 {
1088 union surface_update_flags *update_flags = &u->surface->update_flags;
1089
1090 if (!u->plane_info)
1091 return UPDATE_TYPE_FAST;
1092
1093 if (u->plane_info->color_space != u->surface->color_space)
1094 update_flags->bits.color_space_change = 1;
1095
1096 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror)
1097 update_flags->bits.horizontal_mirror_change = 1;
1098
1099 if (u->plane_info->rotation != u->surface->rotation)
1100 update_flags->bits.rotation_change = 1;
1101
1102 if (u->plane_info->format != u->surface->format)
1103 update_flags->bits.pixel_format_change = 1;
1104
1105 if (u->plane_info->stereo_format != u->surface->stereo_format)
1106 update_flags->bits.stereo_format_change = 1;
1107
1108 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha)
1109 update_flags->bits.per_pixel_alpha_change = 1;
1110
1111 if (u->plane_info->dcc.enable != u->surface->dcc.enable
1112 || u->plane_info->dcc.grph.independent_64b_blks != u->surface->dcc.grph.independent_64b_blks
1113 || u->plane_info->dcc.grph.meta_pitch != u->surface->dcc.grph.meta_pitch)
1114 update_flags->bits.dcc_change = 1;
1115
1116 if (pixel_format_to_bpp(u->plane_info->format) !=
1117 pixel_format_to_bpp(u->surface->format))
1118 /* different bytes per element will require full bandwidth
1119 * and DML calculation
1120 */
1121 update_flags->bits.bpp_change = 1;
1122
1123 if (u->gamma && dce_use_lut(u->plane_info->format))
1124 update_flags->bits.gamma_change = 1;
1125
1126 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
1127 sizeof(union dc_tiling_info)) != 0) {
1128 update_flags->bits.swizzle_change = 1;
1129 /* todo: below are HW dependent, we should add a hook to
1130 * DCE/N resource and validated there.
1131 */
1132 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR)
1133 /* swizzled mode requires RQ to be setup properly,
1134 * thus need to run DML to calculate RQ settings
1135 */
1136 update_flags->bits.bandwidth_change = 1;
1137 }
1138
1139 if (update_flags->bits.rotation_change
1140 || update_flags->bits.stereo_format_change
1141 || update_flags->bits.pixel_format_change
1142 || update_flags->bits.gamma_change
1143 || update_flags->bits.bpp_change
1144 || update_flags->bits.bandwidth_change
1145 || update_flags->bits.output_tf_change)
1146 return UPDATE_TYPE_FULL;
1147
1148 return UPDATE_TYPE_MED;
1149 }
1150
get_scaling_info_update_type(const struct dc_surface_update * u)1151 static enum surface_update_type get_scaling_info_update_type(
1152 const struct dc_surface_update *u)
1153 {
1154 union surface_update_flags *update_flags = &u->surface->update_flags;
1155
1156 if (!u->scaling_info)
1157 return UPDATE_TYPE_FAST;
1158
1159 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
1160 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
1161 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
1162 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height) {
1163 update_flags->bits.scaling_change = 1;
1164
1165 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
1166 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
1167 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
1168 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
1169 /* Making dst rect smaller requires a bandwidth change */
1170 update_flags->bits.bandwidth_change = 1;
1171 }
1172
1173 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
1174 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
1175
1176 update_flags->bits.scaling_change = 1;
1177 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
1178 && u->scaling_info->src_rect.height > u->surface->src_rect.height)
1179 /* Making src rect bigger requires a bandwidth change */
1180 update_flags->bits.clock_change = 1;
1181 }
1182
1183 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
1184 || u->scaling_info->src_rect.y != u->surface->src_rect.y
1185 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
1186 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
1187 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
1188 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
1189 update_flags->bits.position_change = 1;
1190
1191 if (update_flags->bits.clock_change
1192 || update_flags->bits.bandwidth_change)
1193 return UPDATE_TYPE_FULL;
1194
1195 if (update_flags->bits.scaling_change
1196 || update_flags->bits.position_change)
1197 return UPDATE_TYPE_MED;
1198
1199 return UPDATE_TYPE_FAST;
1200 }
1201
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)1202 static enum surface_update_type det_surface_update(const struct dc *dc,
1203 const struct dc_surface_update *u)
1204 {
1205 const struct dc_state *context = dc->current_state;
1206 enum surface_update_type type;
1207 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1208 union surface_update_flags *update_flags = &u->surface->update_flags;
1209
1210 update_flags->raw = 0; // Reset all flags
1211
1212 if (!is_surface_in_context(context, u->surface)) {
1213 update_flags->bits.new_plane = 1;
1214 return UPDATE_TYPE_FULL;
1215 }
1216
1217 type = get_plane_info_update_type(u);
1218 elevate_update_type(&overall_type, type);
1219
1220 type = get_scaling_info_update_type(u);
1221 elevate_update_type(&overall_type, type);
1222
1223 if (u->in_transfer_func)
1224 update_flags->bits.in_transfer_func_change = 1;
1225
1226 if (u->input_csc_color_matrix)
1227 update_flags->bits.input_csc_change = 1;
1228
1229 if (u->coeff_reduction_factor)
1230 update_flags->bits.coeff_reduction_change = 1;
1231
1232 if (update_flags->bits.in_transfer_func_change) {
1233 type = UPDATE_TYPE_MED;
1234 elevate_update_type(&overall_type, type);
1235 }
1236
1237 if (update_flags->bits.input_csc_change
1238 || update_flags->bits.coeff_reduction_change) {
1239 type = UPDATE_TYPE_FULL;
1240 elevate_update_type(&overall_type, type);
1241 }
1242
1243 return overall_type;
1244 }
1245
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)1246 static enum surface_update_type check_update_surfaces_for_stream(
1247 struct dc *dc,
1248 struct dc_surface_update *updates,
1249 int surface_count,
1250 struct dc_stream_update *stream_update,
1251 const struct dc_stream_status *stream_status)
1252 {
1253 int i;
1254 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
1255
1256 if (stream_status == NULL || stream_status->plane_count != surface_count)
1257 return UPDATE_TYPE_FULL;
1258
1259 if (stream_update)
1260 return UPDATE_TYPE_FULL;
1261
1262 for (i = 0 ; i < surface_count; i++) {
1263 enum surface_update_type type =
1264 det_surface_update(dc, &updates[i]);
1265
1266 if (type == UPDATE_TYPE_FULL)
1267 return type;
1268
1269 elevate_update_type(&overall_type, type);
1270 }
1271
1272 return overall_type;
1273 }
1274
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)1275 enum surface_update_type dc_check_update_surfaces_for_stream(
1276 struct dc *dc,
1277 struct dc_surface_update *updates,
1278 int surface_count,
1279 struct dc_stream_update *stream_update,
1280 const struct dc_stream_status *stream_status)
1281 {
1282 int i;
1283 enum surface_update_type type;
1284
1285 for (i = 0; i < surface_count; i++)
1286 updates[i].surface->update_flags.raw = 0;
1287
1288 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
1289 if (type == UPDATE_TYPE_FULL)
1290 for (i = 0; i < surface_count; i++)
1291 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
1292
1293 return type;
1294 }
1295
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)1296 static struct dc_stream_status *stream_get_status(
1297 struct dc_state *ctx,
1298 struct dc_stream_state *stream)
1299 {
1300 uint8_t i;
1301
1302 for (i = 0; i < ctx->stream_count; i++) {
1303 if (stream == ctx->streams[i]) {
1304 return &ctx->stream_status[i];
1305 }
1306 }
1307
1308 return NULL;
1309 }
1310
1311 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
1312
1313
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)1314 static void commit_planes_for_stream(struct dc *dc,
1315 struct dc_surface_update *srf_updates,
1316 int surface_count,
1317 struct dc_stream_state *stream,
1318 struct dc_stream_update *stream_update,
1319 enum surface_update_type update_type,
1320 struct dc_state *context)
1321 {
1322 int i, j;
1323 struct pipe_ctx *top_pipe_to_program = NULL;
1324
1325 if (update_type == UPDATE_TYPE_FULL) {
1326 dc->hwss.set_bandwidth(dc, context, false);
1327 context_clock_trace(dc, context);
1328 }
1329
1330 if (surface_count == 0) {
1331 /*
1332 * In case of turning off screen, no need to program front end a second time.
1333 * just return after program front end.
1334 */
1335 dc->hwss.apply_ctx_for_surface(dc, stream, surface_count, context);
1336 return;
1337 }
1338
1339 /* Full fe update*/
1340 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1341 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1342
1343 if (!pipe_ctx->top_pipe &&
1344 pipe_ctx->stream &&
1345 pipe_ctx->stream == stream) {
1346 struct dc_stream_status *stream_status = NULL;
1347
1348 top_pipe_to_program = pipe_ctx;
1349
1350 if (update_type == UPDATE_TYPE_FAST || !pipe_ctx->plane_state)
1351 continue;
1352
1353 stream_status =
1354 stream_get_status(context, pipe_ctx->stream);
1355
1356 dc->hwss.apply_ctx_for_surface(
1357 dc, pipe_ctx->stream, stream_status->plane_count, context);
1358
1359 if (stream_update && stream_update->abm_level && pipe_ctx->stream_res.abm) {
1360 if (pipe_ctx->stream_res.tg->funcs->is_blanked) {
1361 // if otg funcs defined check if blanked before programming
1362 if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
1363 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1364 pipe_ctx->stream_res.abm, stream->abm_level);
1365 } else
1366 pipe_ctx->stream_res.abm->funcs->set_abm_level(
1367 pipe_ctx->stream_res.abm, stream->abm_level);
1368 }
1369
1370 if (stream_update && stream_update->periodic_fn_vsync_delta &&
1371 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt)
1372 pipe_ctx->stream_res.tg->funcs->program_vline_interrupt(
1373 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing,
1374 pipe_ctx->stream->periodic_fn_vsync_delta);
1375 }
1376 }
1377
1378 if (update_type == UPDATE_TYPE_FULL)
1379 context_timing_trace(dc, &context->res_ctx);
1380
1381 /* Lock the top pipe while updating plane addrs, since freesync requires
1382 * plane addr update event triggers to be synchronized.
1383 * top_pipe_to_program is expected to never be NULL
1384 */
1385 if (update_type == UPDATE_TYPE_FAST) {
1386 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
1387
1388 /* Perform requested Updates */
1389 for (i = 0; i < surface_count; i++) {
1390 struct dc_plane_state *plane_state = srf_updates[i].surface;
1391
1392 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1393 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1394
1395 if (pipe_ctx->stream != stream)
1396 continue;
1397
1398 if (pipe_ctx->plane_state != plane_state)
1399 continue;
1400
1401 if (srf_updates[i].flip_addr)
1402 dc->hwss.update_plane_addr(dc, pipe_ctx);
1403 }
1404 }
1405
1406 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
1407 }
1408
1409 if (stream && stream_update && update_type > UPDATE_TYPE_FAST)
1410 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1411 struct pipe_ctx *pipe_ctx =
1412 &context->res_ctx.pipe_ctx[j];
1413
1414 if (pipe_ctx->stream != stream)
1415 continue;
1416
1417 if (stream_update->hdr_static_metadata) {
1418 resource_build_info_frame(pipe_ctx);
1419 dc->hwss.update_info_frame(pipe_ctx);
1420 }
1421 }
1422 }
1423
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_plane_state ** plane_states,struct dc_state * state)1424 void dc_commit_updates_for_stream(struct dc *dc,
1425 struct dc_surface_update *srf_updates,
1426 int surface_count,
1427 struct dc_stream_state *stream,
1428 struct dc_stream_update *stream_update,
1429 struct dc_plane_state **plane_states,
1430 struct dc_state *state)
1431 {
1432 const struct dc_stream_status *stream_status;
1433 enum surface_update_type update_type;
1434 struct dc_state *context;
1435 struct dc_context *dc_ctx = dc->ctx;
1436 int i, j;
1437
1438 stream_status = dc_stream_get_status(stream);
1439 context = dc->current_state;
1440
1441 update_type = dc_check_update_surfaces_for_stream(
1442 dc, srf_updates, surface_count, stream_update, stream_status);
1443
1444 if (update_type >= update_surface_trace_level)
1445 update_surface_trace(dc, srf_updates, surface_count);
1446
1447
1448 if (update_type >= UPDATE_TYPE_FULL) {
1449
1450 /* initialize scratch memory for building context */
1451 context = dc_create_state();
1452 if (context == NULL) {
1453 DC_ERROR("Failed to allocate new validate context!\n");
1454 return;
1455 }
1456
1457 dc_resource_state_copy_construct(state, context);
1458 }
1459
1460
1461 for (i = 0; i < surface_count; i++) {
1462 struct dc_plane_state *surface = srf_updates[i].surface;
1463
1464 /* TODO: On flip we don't build the state, so it still has the
1465 * old address. Which is why we are updating the address here
1466 */
1467 if (srf_updates[i].flip_addr) {
1468 surface->address = srf_updates[i].flip_addr->address;
1469 surface->flip_immediate = srf_updates[i].flip_addr->flip_immediate;
1470
1471 }
1472
1473 if (update_type >= UPDATE_TYPE_MED) {
1474 for (j = 0; j < dc->res_pool->pipe_count; j++) {
1475 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
1476
1477 if (pipe_ctx->plane_state != surface)
1478 continue;
1479
1480 resource_build_scaling_params(pipe_ctx);
1481 }
1482 }
1483 }
1484
1485 commit_planes_for_stream(
1486 dc,
1487 srf_updates,
1488 surface_count,
1489 stream,
1490 stream_update,
1491 update_type,
1492 context);
1493 /*update current_State*/
1494 if (dc->current_state != context) {
1495
1496 struct dc_state *old = dc->current_state;
1497
1498 dc->current_state = context;
1499 dc_release_state(old);
1500
1501 }
1502 /*let's use current_state to update watermark etc*/
1503 if (update_type >= UPDATE_TYPE_FULL)
1504 dc_post_update_surfaces_to_stream(dc);
1505
1506 return;
1507
1508 }
1509
dc_get_current_stream_count(struct dc * dc)1510 uint8_t dc_get_current_stream_count(struct dc *dc)
1511 {
1512 return dc->current_state->stream_count;
1513 }
1514
dc_get_stream_at_index(struct dc * dc,uint8_t i)1515 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
1516 {
1517 if (i < dc->current_state->stream_count)
1518 return dc->current_state->streams[i];
1519 return NULL;
1520 }
1521
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)1522 enum dc_irq_source dc_interrupt_to_irq_source(
1523 struct dc *dc,
1524 uint32_t src_id,
1525 uint32_t ext_id)
1526 {
1527 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
1528 }
1529
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)1530 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
1531 {
1532
1533 if (dc == NULL)
1534 return false;
1535
1536 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
1537 }
1538
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)1539 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
1540 {
1541 dal_irq_service_ack(dc->res_pool->irqs, src);
1542 }
1543
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)1544 void dc_set_power_state(
1545 struct dc *dc,
1546 enum dc_acpi_cm_power_state power_state)
1547 {
1548 struct kref refcount;
1549
1550 switch (power_state) {
1551 case DC_ACPI_CM_POWER_STATE_D0:
1552 dc_resource_state_construct(dc, dc->current_state);
1553
1554 dc->hwss.init_hw(dc);
1555 break;
1556 default:
1557
1558 dc->hwss.power_down(dc);
1559
1560 /* Zero out the current context so that on resume we start with
1561 * clean state, and dc hw programming optimizations will not
1562 * cause any trouble.
1563 */
1564
1565 /* Preserve refcount */
1566 refcount = dc->current_state->refcount;
1567 dc_resource_state_destruct(dc->current_state);
1568 memset(dc->current_state, 0,
1569 sizeof(*dc->current_state));
1570
1571 dc->current_state->refcount = refcount;
1572
1573 break;
1574 }
1575
1576 }
1577
dc_resume(struct dc * dc)1578 void dc_resume(struct dc *dc)
1579 {
1580
1581 uint32_t i;
1582
1583 for (i = 0; i < dc->link_count; i++)
1584 core_link_resume(dc->links[i]);
1585 }
1586
dc_submit_i2c(struct dc * dc,uint32_t link_index,struct i2c_command * cmd)1587 bool dc_submit_i2c(
1588 struct dc *dc,
1589 uint32_t link_index,
1590 struct i2c_command *cmd)
1591 {
1592
1593 struct dc_link *link = dc->links[link_index];
1594 struct ddc_service *ddc = link->ddc;
1595
1596 return dal_i2caux_submit_i2c_command(
1597 ddc->ctx->i2caux,
1598 ddc->ddc_pin,
1599 cmd);
1600 }
1601
link_add_remote_sink_helper(struct dc_link * dc_link,struct dc_sink * sink)1602 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
1603 {
1604 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
1605 BREAK_TO_DEBUGGER();
1606 return false;
1607 }
1608
1609 dc_sink_retain(sink);
1610
1611 dc_link->remote_sinks[dc_link->sink_count] = sink;
1612 dc_link->sink_count++;
1613
1614 return true;
1615 }
1616
dc_link_add_remote_sink(struct dc_link * link,const uint8_t * edid,int len,struct dc_sink_init_data * init_data)1617 struct dc_sink *dc_link_add_remote_sink(
1618 struct dc_link *link,
1619 const uint8_t *edid,
1620 int len,
1621 struct dc_sink_init_data *init_data)
1622 {
1623 struct dc_sink *dc_sink;
1624 enum dc_edid_status edid_status;
1625
1626 if (len > DC_MAX_EDID_BUFFER_SIZE) {
1627 dm_error("Max EDID buffer size breached!\n");
1628 return NULL;
1629 }
1630
1631 if (!init_data) {
1632 BREAK_TO_DEBUGGER();
1633 return NULL;
1634 }
1635
1636 if (!init_data->link) {
1637 BREAK_TO_DEBUGGER();
1638 return NULL;
1639 }
1640
1641 dc_sink = dc_sink_create(init_data);
1642
1643 if (!dc_sink)
1644 return NULL;
1645
1646 memmove(dc_sink->dc_edid.raw_edid, edid, len);
1647 dc_sink->dc_edid.length = len;
1648
1649 if (!link_add_remote_sink_helper(
1650 link,
1651 dc_sink))
1652 goto fail_add_sink;
1653
1654 edid_status = dm_helpers_parse_edid_caps(
1655 link->ctx,
1656 &dc_sink->dc_edid,
1657 &dc_sink->edid_caps);
1658
1659 /*
1660 * Treat device as no EDID device if EDID
1661 * parsing fails
1662 */
1663 if (edid_status != EDID_OK) {
1664 dc_sink->dc_edid.length = 0;
1665 dm_error("Bad EDID, status%d!\n", edid_status);
1666 }
1667
1668 return dc_sink;
1669
1670 fail_add_sink:
1671 dc_sink_release(dc_sink);
1672 return NULL;
1673 }
1674
dc_link_remove_remote_sink(struct dc_link * link,struct dc_sink * sink)1675 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
1676 {
1677 int i;
1678
1679 if (!link->sink_count) {
1680 BREAK_TO_DEBUGGER();
1681 return;
1682 }
1683
1684 for (i = 0; i < link->sink_count; i++) {
1685 if (link->remote_sinks[i] == sink) {
1686 dc_sink_release(sink);
1687 link->remote_sinks[i] = NULL;
1688
1689 /* shrink array to remove empty place */
1690 while (i < link->sink_count - 1) {
1691 link->remote_sinks[i] = link->remote_sinks[i+1];
1692 i++;
1693 }
1694 link->remote_sinks[i] = NULL;
1695 link->sink_count--;
1696 return;
1697 }
1698 }
1699 }
1700