1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "gpio_service_interface.h"
37 #include "clk_mgr.h"
38 #include "clock_source.h"
39 #include "dc_bios_types.h"
40
41 #include "bios_parser_interface.h"
42 #include "bios/bios_parser_helper.h"
43 #include "include/irq_service_interface.h"
44 #include "transform.h"
45 #include "dmcu.h"
46 #include "dpp.h"
47 #include "timing_generator.h"
48 #include "abm.h"
49 #include "virtual/virtual_link_encoder.h"
50 #include "hubp.h"
51
52 #include "link_hwss.h"
53 #include "link_encoder.h"
54 #include "link_enc_cfg.h"
55
56 #include "link.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
59
60 #include "dc_dmub_srv.h"
61
62 #include "dsc.h"
63
64 #include "vm_helper.h"
65
66 #include "dce/dce_i2c.h"
67
68 #include "dmub/dmub_srv.h"
69
70 #include "dce/dmub_psr.h"
71
72 #include "dce/dmub_hw_lock_mgr.h"
73
74 #include "dc_trace.h"
75
76 #include "hw_sequencer_private.h"
77
78 #include "dce/dmub_outbox.h"
79
80 #define CTX \
81 dc->ctx
82
83 #define DC_LOGGER \
84 dc->ctx->logger
85
86 static const char DC_BUILD_ID[] = "production-build";
87
88 /**
89 * DOC: Overview
90 *
91 * DC is the OS-agnostic component of the amdgpu DC driver.
92 *
93 * DC maintains and validates a set of structs representing the state of the
94 * driver and writes that state to AMD hardware
95 *
96 * Main DC HW structs:
97 *
98 * struct dc - The central struct. One per driver. Created on driver load,
99 * destroyed on driver unload.
100 *
101 * struct dc_context - One per driver.
102 * Used as a backpointer by most other structs in dc.
103 *
104 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
105 * plugpoints). Created on driver load, destroyed on driver unload.
106 *
107 * struct dc_sink - One per display. Created on boot or hotplug.
108 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
109 * (the display directly attached). It may also have one or more remote
110 * sinks (in the Multi-Stream Transport case)
111 *
112 * struct resource_pool - One per driver. Represents the hw blocks not in the
113 * main pipeline. Not directly accessible by dm.
114 *
115 * Main dc state structs:
116 *
117 * These structs can be created and destroyed as needed. There is a full set of
118 * these structs in dc->current_state representing the currently programmed state.
119 *
120 * struct dc_state - The global DC state to track global state information,
121 * such as bandwidth values.
122 *
123 * struct dc_stream_state - Represents the hw configuration for the pipeline from
124 * a framebuffer to a display. Maps one-to-one with dc_sink.
125 *
126 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
127 * and may have more in the Multi-Plane Overlay case.
128 *
129 * struct resource_context - Represents the programmable state of everything in
130 * the resource_pool. Not directly accessible by dm.
131 *
132 * struct pipe_ctx - A member of struct resource_context. Represents the
133 * internal hardware pipeline components. Each dc_plane_state has either
134 * one or two (in the pipe-split case).
135 */
136
137 /* Private functions */
138
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)139 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
140 {
141 if (new > *original)
142 *original = new;
143 }
144
destroy_links(struct dc * dc)145 static void destroy_links(struct dc *dc)
146 {
147 uint32_t i;
148
149 for (i = 0; i < dc->link_count; i++) {
150 if (NULL != dc->links[i])
151 dc->link_srv->destroy_link(&dc->links[i]);
152 }
153 }
154
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)155 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
156 {
157 int i;
158 uint32_t count = 0;
159
160 for (i = 0; i < num_links; i++) {
161 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
162 links[i]->is_internal_display)
163 count++;
164 }
165
166 return count;
167 }
168
get_seamless_boot_stream_count(struct dc_state * ctx)169 static int get_seamless_boot_stream_count(struct dc_state *ctx)
170 {
171 uint8_t i;
172 uint8_t seamless_boot_stream_count = 0;
173
174 for (i = 0; i < ctx->stream_count; i++)
175 if (ctx->streams[i]->apply_seamless_boot_optimization)
176 seamless_boot_stream_count++;
177
178 return seamless_boot_stream_count;
179 }
180
create_links(struct dc * dc,uint32_t num_virtual_links)181 static bool create_links(
182 struct dc *dc,
183 uint32_t num_virtual_links)
184 {
185 int i;
186 int connectors_num;
187 struct dc_bios *bios = dc->ctx->dc_bios;
188
189 dc->link_count = 0;
190
191 connectors_num = bios->funcs->get_connectors_number(bios);
192
193 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
194
195 if (connectors_num > ENUM_ID_COUNT) {
196 dm_error(
197 "DC: Number of connectors %d exceeds maximum of %d!\n",
198 connectors_num,
199 ENUM_ID_COUNT);
200 return false;
201 }
202
203 dm_output_to_console(
204 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
205 __func__,
206 connectors_num,
207 num_virtual_links);
208
209 for (i = 0; i < connectors_num; i++) {
210 struct link_init_data link_init_params = {0};
211 struct dc_link *link;
212
213 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
214
215 link_init_params.ctx = dc->ctx;
216 /* next BIOS object table connector */
217 link_init_params.connector_index = i;
218 link_init_params.link_index = dc->link_count;
219 link_init_params.dc = dc;
220 link = dc->link_srv->create_link(&link_init_params);
221
222 if (link) {
223 dc->links[dc->link_count] = link;
224 link->dc = dc;
225 ++dc->link_count;
226 }
227 }
228
229 DC_LOG_DC("BIOS object table - end");
230
231 /* Create a link for each usb4 dpia port */
232 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
233 struct link_init_data link_init_params = {0};
234 struct dc_link *link;
235
236 link_init_params.ctx = dc->ctx;
237 link_init_params.connector_index = i;
238 link_init_params.link_index = dc->link_count;
239 link_init_params.dc = dc;
240 link_init_params.is_dpia_link = true;
241
242 link = dc->link_srv->create_link(&link_init_params);
243 if (link) {
244 dc->links[dc->link_count] = link;
245 link->dc = dc;
246 ++dc->link_count;
247 }
248 }
249
250 for (i = 0; i < num_virtual_links; i++) {
251 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
252 struct encoder_init_data enc_init = {0};
253
254 if (link == NULL) {
255 BREAK_TO_DEBUGGER();
256 goto failed_alloc;
257 }
258
259 link->link_index = dc->link_count;
260 dc->links[dc->link_count] = link;
261 dc->link_count++;
262
263 link->ctx = dc->ctx;
264 link->dc = dc;
265 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
266 link->link_id.type = OBJECT_TYPE_CONNECTOR;
267 link->link_id.id = CONNECTOR_ID_VIRTUAL;
268 link->link_id.enum_id = ENUM_ID_1;
269 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
270
271 if (!link->link_enc) {
272 BREAK_TO_DEBUGGER();
273 goto failed_alloc;
274 }
275
276 link->link_status.dpcd_caps = &link->dpcd_caps;
277
278 enc_init.ctx = dc->ctx;
279 enc_init.channel = CHANNEL_ID_UNKNOWN;
280 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
281 enc_init.transmitter = TRANSMITTER_UNKNOWN;
282 enc_init.connector = link->link_id;
283 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
284 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
285 enc_init.encoder.enum_id = ENUM_ID_1;
286 virtual_link_encoder_construct(link->link_enc, &enc_init);
287 }
288
289 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
290
291 return true;
292
293 failed_alloc:
294 return false;
295 }
296
297 /* Create additional DIG link encoder objects if fewer than the platform
298 * supports were created during link construction. This can happen if the
299 * number of physical connectors is less than the number of DIGs.
300 */
create_link_encoders(struct dc * dc)301 static bool create_link_encoders(struct dc *dc)
302 {
303 bool res = true;
304 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
305 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
306 int i;
307
308 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
309 * link encoders and physical display endpoints and does not require
310 * additional link encoder objects.
311 */
312 if (num_usb4_dpia == 0)
313 return res;
314
315 /* Create as many link encoder objects as the platform supports. DPIA
316 * endpoints can be programmably mapped to any DIG.
317 */
318 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
319 for (i = 0; i < num_dig_link_enc; i++) {
320 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
321
322 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
323 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
324 (enum engine_id)(ENGINE_ID_DIGA + i));
325 if (link_enc) {
326 dc->res_pool->link_encoders[i] = link_enc;
327 dc->res_pool->dig_link_enc_count++;
328 } else {
329 res = false;
330 }
331 }
332 }
333 }
334
335 return res;
336 }
337
338 /* Destroy any additional DIG link encoder objects created by
339 * create_link_encoders().
340 * NB: Must only be called after destroy_links().
341 */
destroy_link_encoders(struct dc * dc)342 static void destroy_link_encoders(struct dc *dc)
343 {
344 unsigned int num_usb4_dpia;
345 unsigned int num_dig_link_enc;
346 int i;
347
348 if (!dc->res_pool)
349 return;
350
351 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
352 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
353
354 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
355 * link encoders and physical display endpoints and does not require
356 * additional link encoder objects.
357 */
358 if (num_usb4_dpia == 0)
359 return;
360
361 for (i = 0; i < num_dig_link_enc; i++) {
362 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
363
364 if (link_enc) {
365 link_enc->funcs->destroy(&link_enc);
366 dc->res_pool->link_encoders[i] = NULL;
367 dc->res_pool->dig_link_enc_count--;
368 }
369 }
370 }
371
dc_perf_trace_create(void)372 static struct dc_perf_trace *dc_perf_trace_create(void)
373 {
374 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
375 }
376
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)377 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
378 {
379 kfree(*perf_trace);
380 *perf_trace = NULL;
381 }
382
383 /**
384 * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
385 * @dc: dc reference
386 * @stream: Initial dc stream state
387 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
388 *
389 * Looks up the pipe context of dc_stream_state and updates the
390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391 * Rate, which is a power-saving feature that targets reducing panel
392 * refresh rate while the screen is static
393 *
394 * Return: %true if the pipe context is found and adjusted;
395 * %false if the pipe context is not found.
396 */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)397 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
398 struct dc_stream_state *stream,
399 struct dc_crtc_timing_adjust *adjust)
400 {
401 int i;
402
403 /*
404 * Don't adjust DRR while there's bandwidth optimizations pending to
405 * avoid conflicting with firmware updates.
406 */
407 if (dc->ctx->dce_version > DCE_VERSION_MAX)
408 if (dc->optimized_required || dc->wm_optimized_required)
409 return false;
410
411 stream->adjust.v_total_max = adjust->v_total_max;
412 stream->adjust.v_total_mid = adjust->v_total_mid;
413 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
414 stream->adjust.v_total_min = adjust->v_total_min;
415
416 for (i = 0; i < MAX_PIPES; i++) {
417 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
418
419 if (pipe->stream == stream && pipe->stream_res.tg) {
420 dc->hwss.set_drr(&pipe,
421 1,
422 *adjust);
423
424 return true;
425 }
426 }
427 return false;
428 }
429
430 /**
431 * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
432 * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
433 *
434 * @dc: [in] dc reference
435 * @stream: [in] Initial dc stream state
436 * @refresh_rate: [in] new refresh_rate
437 *
438 * Return: %true if the pipe context is found and there is an associated
439 * timing_generator for the DC;
440 * %false if the pipe context is not found or there is no
441 * timing_generator for the DC.
442 */
dc_stream_get_last_used_drr_vtotal(struct dc * dc,struct dc_stream_state * stream,uint32_t * refresh_rate)443 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
444 struct dc_stream_state *stream,
445 uint32_t *refresh_rate)
446 {
447 bool status = false;
448
449 int i = 0;
450
451 for (i = 0; i < MAX_PIPES; i++) {
452 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
453
454 if (pipe->stream == stream && pipe->stream_res.tg) {
455 /* Only execute if a function pointer has been defined for
456 * the DC version in question
457 */
458 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
459 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
460
461 status = true;
462
463 break;
464 }
465 }
466 }
467
468 return status;
469 }
470
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)471 bool dc_stream_get_crtc_position(struct dc *dc,
472 struct dc_stream_state **streams, int num_streams,
473 unsigned int *v_pos, unsigned int *nom_v_pos)
474 {
475 /* TODO: Support multiple streams */
476 const struct dc_stream_state *stream = streams[0];
477 int i;
478 bool ret = false;
479 struct crtc_position position;
480
481 for (i = 0; i < MAX_PIPES; i++) {
482 struct pipe_ctx *pipe =
483 &dc->current_state->res_ctx.pipe_ctx[i];
484
485 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
486 dc->hwss.get_position(&pipe, 1, &position);
487
488 *v_pos = position.vertical_count;
489 *nom_v_pos = position.nominal_vcount;
490 ret = true;
491 }
492 }
493 return ret;
494 }
495
496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
497 static inline void
dc_stream_forward_dmub_crc_window(struct dc_dmub_srv * dmub_srv,struct rect * rect,struct otg_phy_mux * mux_mapping,bool is_stop)498 dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
499 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
500 {
501 union dmub_rb_cmd cmd = {0};
502
503 cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
504 cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
505
506 if (is_stop) {
507 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
508 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
509 } else {
510 cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
511 cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
512 cmd.secure_display.roi_info.x_start = rect->x;
513 cmd.secure_display.roi_info.y_start = rect->y;
514 cmd.secure_display.roi_info.x_end = rect->x + rect->width;
515 cmd.secure_display.roi_info.y_end = rect->y + rect->height;
516 }
517
518 dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
519 }
520
521 static inline void
dc_stream_forward_dmcu_crc_window(struct dmcu * dmcu,struct rect * rect,struct otg_phy_mux * mux_mapping,bool is_stop)522 dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
523 struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
524 {
525 if (is_stop)
526 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
527 else
528 dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
529 }
530
531 bool
dc_stream_forward_crc_window(struct dc_stream_state * stream,struct rect * rect,bool is_stop)532 dc_stream_forward_crc_window(struct dc_stream_state *stream,
533 struct rect *rect, bool is_stop)
534 {
535 struct dmcu *dmcu;
536 struct dc_dmub_srv *dmub_srv;
537 struct otg_phy_mux mux_mapping;
538 struct pipe_ctx *pipe;
539 int i;
540 struct dc *dc = stream->ctx->dc;
541
542 for (i = 0; i < MAX_PIPES; i++) {
543 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
544 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
545 break;
546 }
547
548 /* Stream not found */
549 if (i == MAX_PIPES)
550 return false;
551
552 mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
553 mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
554
555 dmcu = dc->res_pool->dmcu;
556 dmub_srv = dc->ctx->dmub_srv;
557
558 /* forward to dmub */
559 if (dmub_srv)
560 dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
561 /* forward to dmcu */
562 else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
563 dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
564 else
565 return false;
566
567 return true;
568 }
569 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
570
571 /**
572 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
573 * @dc: DC Object
574 * @stream: The stream to configure CRC on.
575 * @enable: Enable CRC if true, disable otherwise.
576 * @crc_window: CRC window (x/y start/end) information
577 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
578 * once.
579 *
580 * By default, only CRC0 is configured, and the entire frame is used to
581 * calculate the CRC.
582 *
583 * Return: %false if the stream is not found or CRC capture is not supported;
584 * %true if the stream has been configured.
585 */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous)586 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
587 struct crc_params *crc_window, bool enable, bool continuous)
588 {
589 struct pipe_ctx *pipe;
590 struct crc_params param;
591 struct timing_generator *tg;
592
593 pipe = resource_get_otg_master_for_stream(
594 &dc->current_state->res_ctx, stream);
595
596 /* Stream not found */
597 if (pipe == NULL)
598 return false;
599
600 /* By default, capture the full frame */
601 param.windowa_x_start = 0;
602 param.windowa_y_start = 0;
603 param.windowa_x_end = pipe->stream->timing.h_addressable;
604 param.windowa_y_end = pipe->stream->timing.v_addressable;
605 param.windowb_x_start = 0;
606 param.windowb_y_start = 0;
607 param.windowb_x_end = pipe->stream->timing.h_addressable;
608 param.windowb_y_end = pipe->stream->timing.v_addressable;
609
610 if (crc_window) {
611 param.windowa_x_start = crc_window->windowa_x_start;
612 param.windowa_y_start = crc_window->windowa_y_start;
613 param.windowa_x_end = crc_window->windowa_x_end;
614 param.windowa_y_end = crc_window->windowa_y_end;
615 param.windowb_x_start = crc_window->windowb_x_start;
616 param.windowb_y_start = crc_window->windowb_y_start;
617 param.windowb_x_end = crc_window->windowb_x_end;
618 param.windowb_y_end = crc_window->windowb_y_end;
619 }
620
621 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
622 param.odm_mode = pipe->next_odm_pipe ? 1:0;
623
624 /* Default to the union of both windows */
625 param.selection = UNION_WINDOW_A_B;
626 param.continuous_mode = continuous;
627 param.enable = enable;
628
629 tg = pipe->stream_res.tg;
630
631 /* Only call if supported */
632 if (tg->funcs->configure_crc)
633 return tg->funcs->configure_crc(tg, ¶m);
634 DC_LOG_WARNING("CRC capture not supported.");
635 return false;
636 }
637
638 /**
639 * dc_stream_get_crc() - Get CRC values for the given stream.
640 *
641 * @dc: DC object.
642 * @stream: The DC stream state of the stream to get CRCs from.
643 * @r_cr: CRC value for the red component.
644 * @g_y: CRC value for the green component.
645 * @b_cb: CRC value for the blue component.
646 *
647 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
648 *
649 * Return:
650 * %false if stream is not found, or if CRCs are not enabled.
651 */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)652 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
653 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
654 {
655 int i;
656 struct pipe_ctx *pipe;
657 struct timing_generator *tg;
658
659 for (i = 0; i < MAX_PIPES; i++) {
660 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
661 if (pipe->stream == stream)
662 break;
663 }
664 /* Stream not found */
665 if (i == MAX_PIPES)
666 return false;
667
668 tg = pipe->stream_res.tg;
669
670 if (tg->funcs->get_crc)
671 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
672 DC_LOG_WARNING("CRC capture not supported.");
673 return false;
674 }
675
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)676 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
677 enum dc_dynamic_expansion option)
678 {
679 /* OPP FMT dyn expansion updates*/
680 int i;
681 struct pipe_ctx *pipe_ctx;
682
683 for (i = 0; i < MAX_PIPES; i++) {
684 if (dc->current_state->res_ctx.pipe_ctx[i].stream
685 == stream) {
686 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
687 pipe_ctx->stream_res.opp->dyn_expansion = option;
688 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
689 pipe_ctx->stream_res.opp,
690 COLOR_SPACE_YCBCR601,
691 stream->timing.display_color_depth,
692 stream->signal);
693 }
694 }
695 }
696
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)697 void dc_stream_set_dither_option(struct dc_stream_state *stream,
698 enum dc_dither_option option)
699 {
700 struct bit_depth_reduction_params params;
701 struct dc_link *link = stream->link;
702 struct pipe_ctx *pipes = NULL;
703 int i;
704
705 for (i = 0; i < MAX_PIPES; i++) {
706 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
707 stream) {
708 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
709 break;
710 }
711 }
712
713 if (!pipes)
714 return;
715 if (option > DITHER_OPTION_MAX)
716 return;
717
718 stream->dither_option = option;
719
720 memset(¶ms, 0, sizeof(params));
721 resource_build_bit_depth_reduction_params(stream, ¶ms);
722 stream->bit_depth_params = params;
723
724 if (pipes->plane_res.xfm &&
725 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
726 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
727 pipes->plane_res.xfm,
728 pipes->plane_res.scl_data.lb_params.depth,
729 &stream->bit_depth_params);
730 }
731
732 pipes->stream_res.opp->funcs->
733 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
734 }
735
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)736 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
737 {
738 int i;
739 bool ret = false;
740 struct pipe_ctx *pipes;
741
742 for (i = 0; i < MAX_PIPES; i++) {
743 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
744 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
745 dc->hwss.program_gamut_remap(pipes);
746 ret = true;
747 }
748 }
749
750 return ret;
751 }
752
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)753 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
754 {
755 int i;
756 bool ret = false;
757 struct pipe_ctx *pipes;
758
759 for (i = 0; i < MAX_PIPES; i++) {
760 if (dc->current_state->res_ctx.pipe_ctx[i].stream
761 == stream) {
762
763 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
764 dc->hwss.program_output_csc(dc,
765 pipes,
766 stream->output_color_space,
767 stream->csc_color_matrix.matrix,
768 pipes->stream_res.opp->inst);
769 ret = true;
770 }
771 }
772
773 return ret;
774 }
775
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)776 void dc_stream_set_static_screen_params(struct dc *dc,
777 struct dc_stream_state **streams,
778 int num_streams,
779 const struct dc_static_screen_params *params)
780 {
781 int i, j;
782 struct pipe_ctx *pipes_affected[MAX_PIPES];
783 int num_pipes_affected = 0;
784
785 for (i = 0; i < num_streams; i++) {
786 struct dc_stream_state *stream = streams[i];
787
788 for (j = 0; j < MAX_PIPES; j++) {
789 if (dc->current_state->res_ctx.pipe_ctx[j].stream
790 == stream) {
791 pipes_affected[num_pipes_affected++] =
792 &dc->current_state->res_ctx.pipe_ctx[j];
793 }
794 }
795 }
796
797 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
798 }
799
dc_destruct(struct dc * dc)800 static void dc_destruct(struct dc *dc)
801 {
802 // reset link encoder assignment table on destruct
803 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
804 link_enc_cfg_init(dc, dc->current_state);
805
806 if (dc->current_state) {
807 dc_release_state(dc->current_state);
808 dc->current_state = NULL;
809 }
810
811 destroy_links(dc);
812
813 destroy_link_encoders(dc);
814
815 if (dc->clk_mgr) {
816 dc_destroy_clk_mgr(dc->clk_mgr);
817 dc->clk_mgr = NULL;
818 }
819
820 dc_destroy_resource_pool(dc);
821
822 if (dc->link_srv)
823 link_destroy_link_service(&dc->link_srv);
824
825 if (dc->ctx->gpio_service)
826 dal_gpio_service_destroy(&dc->ctx->gpio_service);
827
828 if (dc->ctx->created_bios)
829 dal_bios_parser_destroy(&dc->ctx->dc_bios);
830
831 dc_perf_trace_destroy(&dc->ctx->perf_trace);
832
833 kfree(dc->ctx);
834 dc->ctx = NULL;
835
836 kfree(dc->bw_vbios);
837 dc->bw_vbios = NULL;
838
839 kfree(dc->bw_dceip);
840 dc->bw_dceip = NULL;
841
842 kfree(dc->dcn_soc);
843 dc->dcn_soc = NULL;
844
845 kfree(dc->dcn_ip);
846 dc->dcn_ip = NULL;
847
848 kfree(dc->vm_helper);
849 dc->vm_helper = NULL;
850
851 }
852
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)853 static bool dc_construct_ctx(struct dc *dc,
854 const struct dc_init_data *init_params)
855 {
856 struct dc_context *dc_ctx;
857
858 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
859 if (!dc_ctx)
860 return false;
861
862 dc_ctx->cgs_device = init_params->cgs_device;
863 dc_ctx->driver_context = init_params->driver;
864 dc_ctx->dc = dc;
865 dc_ctx->asic_id = init_params->asic_id;
866 dc_ctx->dc_sink_id_count = 0;
867 dc_ctx->dc_stream_id_count = 0;
868 dc_ctx->dce_environment = init_params->dce_environment;
869 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
870 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
871
872 /* Create logger */
873
874 dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
875
876 dc_ctx->perf_trace = dc_perf_trace_create();
877 if (!dc_ctx->perf_trace) {
878 kfree(dc_ctx);
879 ASSERT_CRITICAL(false);
880 return false;
881 }
882
883 dc->ctx = dc_ctx;
884
885 dc->link_srv = link_create_link_service();
886 if (!dc->link_srv)
887 return false;
888
889 return true;
890 }
891
dc_construct(struct dc * dc,const struct dc_init_data * init_params)892 static bool dc_construct(struct dc *dc,
893 const struct dc_init_data *init_params)
894 {
895 struct dc_context *dc_ctx;
896 struct bw_calcs_dceip *dc_dceip;
897 struct bw_calcs_vbios *dc_vbios;
898 struct dcn_soc_bounding_box *dcn_soc;
899 struct dcn_ip_params *dcn_ip;
900
901 dc->config = init_params->flags;
902
903 // Allocate memory for the vm_helper
904 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
905 if (!dc->vm_helper) {
906 dm_error("%s: failed to create dc->vm_helper\n", __func__);
907 goto fail;
908 }
909
910 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
911
912 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
913 if (!dc_dceip) {
914 dm_error("%s: failed to create dceip\n", __func__);
915 goto fail;
916 }
917
918 dc->bw_dceip = dc_dceip;
919
920 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
921 if (!dc_vbios) {
922 dm_error("%s: failed to create vbios\n", __func__);
923 goto fail;
924 }
925
926 dc->bw_vbios = dc_vbios;
927 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
928 if (!dcn_soc) {
929 dm_error("%s: failed to create dcn_soc\n", __func__);
930 goto fail;
931 }
932
933 dc->dcn_soc = dcn_soc;
934
935 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
936 if (!dcn_ip) {
937 dm_error("%s: failed to create dcn_ip\n", __func__);
938 goto fail;
939 }
940
941 dc->dcn_ip = dcn_ip;
942
943 if (!dc_construct_ctx(dc, init_params)) {
944 dm_error("%s: failed to create ctx\n", __func__);
945 goto fail;
946 }
947
948 dc_ctx = dc->ctx;
949
950 /* Resource should construct all asic specific resources.
951 * This should be the only place where we need to parse the asic id
952 */
953 if (init_params->vbios_override)
954 dc_ctx->dc_bios = init_params->vbios_override;
955 else {
956 /* Create BIOS parser */
957 struct bp_init_data bp_init_data;
958
959 bp_init_data.ctx = dc_ctx;
960 bp_init_data.bios = init_params->asic_id.atombios_base_address;
961
962 dc_ctx->dc_bios = dal_bios_parser_create(
963 &bp_init_data, dc_ctx->dce_version);
964
965 if (!dc_ctx->dc_bios) {
966 ASSERT_CRITICAL(false);
967 goto fail;
968 }
969
970 dc_ctx->created_bios = true;
971 }
972
973 dc->vendor_signature = init_params->vendor_signature;
974
975 /* Create GPIO service */
976 dc_ctx->gpio_service = dal_gpio_service_create(
977 dc_ctx->dce_version,
978 dc_ctx->dce_environment,
979 dc_ctx);
980
981 if (!dc_ctx->gpio_service) {
982 ASSERT_CRITICAL(false);
983 goto fail;
984 }
985
986 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
987 if (!dc->res_pool)
988 goto fail;
989
990 /* set i2c speed if not done by the respective dcnxxx__resource.c */
991 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
992 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
993
994 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
995 if (!dc->clk_mgr)
996 goto fail;
997 #ifdef CONFIG_DRM_AMD_DC_FP
998 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
999
1000 if (dc->res_pool->funcs->update_bw_bounding_box) {
1001 DC_FP_START();
1002 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1003 DC_FP_END();
1004 }
1005 #endif
1006
1007 /* Creation of current_state must occur after dc->dml
1008 * is initialized in dc_create_resource_pool because
1009 * on creation it copies the contents of dc->dml
1010 */
1011
1012 dc->current_state = dc_create_state(dc);
1013
1014 if (!dc->current_state) {
1015 dm_error("%s: failed to create validate ctx\n", __func__);
1016 goto fail;
1017 }
1018
1019 if (!create_links(dc, init_params->num_virtual_links))
1020 goto fail;
1021
1022 /* Create additional DIG link encoder objects if fewer than the platform
1023 * supports were created during link construction.
1024 */
1025 if (!create_link_encoders(dc))
1026 goto fail;
1027
1028 dc_resource_state_construct(dc, dc->current_state);
1029
1030 return true;
1031
1032 fail:
1033 return false;
1034 }
1035
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)1036 static void disable_all_writeback_pipes_for_stream(
1037 const struct dc *dc,
1038 struct dc_stream_state *stream,
1039 struct dc_state *context)
1040 {
1041 int i;
1042
1043 for (i = 0; i < stream->num_wb_info; i++)
1044 stream->writeback_info[i].wb_enabled = false;
1045 }
1046
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)1047 static void apply_ctx_interdependent_lock(struct dc *dc,
1048 struct dc_state *context,
1049 struct dc_stream_state *stream,
1050 bool lock)
1051 {
1052 int i;
1053
1054 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1055 if (dc->hwss.interdependent_update_lock)
1056 dc->hwss.interdependent_update_lock(dc, context, lock);
1057 else {
1058 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1059 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1060 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1061
1062 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1063 if (stream == pipe_ctx->stream) {
1064 if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
1065 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1066 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1067 }
1068 }
1069 }
1070 }
1071
phantom_pipe_blank(struct dc * dc,struct timing_generator * tg,int width,int height)1072 static void phantom_pipe_blank(
1073 struct dc *dc,
1074 struct timing_generator *tg,
1075 int width,
1076 int height)
1077 {
1078 struct dce_hwseq *hws = dc->hwseq;
1079 enum dc_color_space color_space;
1080 struct tg_color black_color = {0};
1081 struct output_pixel_processor *opp = NULL;
1082 uint32_t num_opps, opp_id_src0, opp_id_src1;
1083 uint32_t otg_active_width, otg_active_height;
1084 uint32_t i;
1085
1086 /* program opp dpg blank color */
1087 color_space = COLOR_SPACE_SRGB;
1088 color_space_to_black_color(dc, color_space, &black_color);
1089
1090 otg_active_width = width;
1091 otg_active_height = height;
1092
1093 /* get the OPTC source */
1094 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1095 ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
1096
1097 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1098 if (dc->res_pool->opps[i] != NULL && dc->res_pool->opps[i]->inst == opp_id_src0) {
1099 opp = dc->res_pool->opps[i];
1100 break;
1101 }
1102 }
1103
1104 if (opp && opp->funcs->opp_set_disp_pattern_generator)
1105 opp->funcs->opp_set_disp_pattern_generator(
1106 opp,
1107 CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
1108 CONTROLLER_DP_COLOR_SPACE_UDEFINED,
1109 COLOR_DEPTH_UNDEFINED,
1110 &black_color,
1111 otg_active_width,
1112 otg_active_height,
1113 0);
1114
1115 if (tg->funcs->is_tg_enabled(tg))
1116 hws->funcs.wait_for_blank_complete(opp);
1117 }
1118
dc_update_viusal_confirm_color(struct dc * dc,struct dc_state * context,struct pipe_ctx * pipe_ctx)1119 static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
1120 {
1121 if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
1122 memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
1123
1124 if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
1125 get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1126 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
1127 get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1128 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
1129 get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1130 else {
1131 if (dc->ctx->dce_version < DCN_VERSION_2_0)
1132 color_space_to_black_color(
1133 dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
1134 }
1135 if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
1136 if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
1137 get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
1138 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
1139 get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1140 else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
1141 get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
1142 }
1143 }
1144 }
1145
disable_dangling_plane(struct dc * dc,struct dc_state * context)1146 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1147 {
1148 int i, j;
1149 struct dc_state *dangling_context = dc_create_state(dc);
1150 struct dc_state *current_ctx;
1151 struct pipe_ctx *pipe;
1152 struct timing_generator *tg;
1153
1154 if (dangling_context == NULL)
1155 return;
1156
1157 dc_resource_state_copy_construct(dc->current_state, dangling_context);
1158
1159 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1160 struct dc_stream_state *old_stream =
1161 dc->current_state->res_ctx.pipe_ctx[i].stream;
1162 bool should_disable = true;
1163 bool pipe_split_change = false;
1164
1165 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1166 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1167 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1168 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1169 else
1170 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1171 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1172
1173 for (j = 0; j < context->stream_count; j++) {
1174 if (old_stream == context->streams[j]) {
1175 should_disable = false;
1176 break;
1177 }
1178 }
1179 if (!should_disable && pipe_split_change &&
1180 dc->current_state->stream_count != context->stream_count)
1181 should_disable = true;
1182
1183 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1184 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1185 struct pipe_ctx *old_pipe, *new_pipe;
1186
1187 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1188 new_pipe = &context->res_ctx.pipe_ctx[i];
1189
1190 if (old_pipe->plane_state && !new_pipe->plane_state)
1191 should_disable = true;
1192 }
1193
1194 if (should_disable && old_stream) {
1195 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1196 tg = pipe->stream_res.tg;
1197 /* When disabling plane for a phantom pipe, we must turn on the
1198 * phantom OTG so the disable programming gets the double buffer
1199 * update. Otherwise the pipe will be left in a partially disabled
1200 * state that can result in underflow or hang when enabling it
1201 * again for different use.
1202 */
1203 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1204 if (tg->funcs->enable_crtc) {
1205 int main_pipe_width, main_pipe_height;
1206
1207 main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
1208 main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
1209 phantom_pipe_blank(dc, tg, main_pipe_width, main_pipe_height);
1210 tg->funcs->enable_crtc(tg);
1211 }
1212 }
1213 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1214 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1215
1216 if (pipe->stream && pipe->plane_state)
1217 dc_update_viusal_confirm_color(dc, context, pipe);
1218
1219 if (dc->hwss.apply_ctx_for_surface) {
1220 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1221 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1222 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1223 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1224 }
1225 if (dc->hwss.program_front_end_for_ctx) {
1226 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1227 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1228 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1229 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1230 }
1231 /* We need to put the phantom OTG back into it's default (disabled) state or we
1232 * can get corruption when transition from one SubVP config to a different one.
1233 * The OTG is set to disable on falling edge of VUPDATE so the plane disable
1234 * will still get it's double buffer update.
1235 */
1236 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1237 if (tg->funcs->disable_phantom_crtc)
1238 tg->funcs->disable_phantom_crtc(tg);
1239 }
1240 }
1241 }
1242
1243 current_ctx = dc->current_state;
1244 dc->current_state = dangling_context;
1245 dc_release_state(current_ctx);
1246 }
1247
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)1248 static void disable_vbios_mode_if_required(
1249 struct dc *dc,
1250 struct dc_state *context)
1251 {
1252 unsigned int i, j;
1253
1254 /* check if timing_changed, disable stream*/
1255 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1256 struct dc_stream_state *stream = NULL;
1257 struct dc_link *link = NULL;
1258 struct pipe_ctx *pipe = NULL;
1259
1260 pipe = &context->res_ctx.pipe_ctx[i];
1261 stream = pipe->stream;
1262 if (stream == NULL)
1263 continue;
1264
1265 if (stream->apply_seamless_boot_optimization)
1266 continue;
1267
1268 // only looking for first odm pipe
1269 if (pipe->prev_odm_pipe)
1270 continue;
1271
1272 if (stream->link->local_sink &&
1273 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1274 link = stream->link;
1275 }
1276
1277 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1278 unsigned int enc_inst, tg_inst = 0;
1279 unsigned int pix_clk_100hz;
1280
1281 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1282 if (enc_inst != ENGINE_ID_UNKNOWN) {
1283 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1284 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1285 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1286 dc->res_pool->stream_enc[j]);
1287 break;
1288 }
1289 }
1290
1291 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1292 dc->res_pool->dp_clock_source,
1293 tg_inst, &pix_clk_100hz);
1294
1295 if (link->link_status.link_active) {
1296 uint32_t requested_pix_clk_100hz =
1297 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1298
1299 if (pix_clk_100hz != requested_pix_clk_100hz) {
1300 dc->link_srv->set_dpms_off(pipe);
1301 pipe->stream->dpms_off = false;
1302 }
1303 }
1304 }
1305 }
1306 }
1307 }
1308
wait_for_no_pipes_pending(struct dc * dc,struct dc_state * context)1309 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1310 {
1311 int i;
1312 PERF_TRACE();
1313 for (i = 0; i < MAX_PIPES; i++) {
1314 int count = 0;
1315 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1316
1317 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
1318 continue;
1319
1320 /* Timeout 100 ms */
1321 while (count < 100000) {
1322 /* Must set to false to start with, due to OR in update function */
1323 pipe->plane_state->status.is_flip_pending = false;
1324 dc->hwss.update_pending_status(pipe);
1325 if (!pipe->plane_state->status.is_flip_pending)
1326 break;
1327 udelay(1);
1328 count++;
1329 }
1330 ASSERT(!pipe->plane_state->status.is_flip_pending);
1331 }
1332 PERF_TRACE();
1333 }
1334
1335 /* Public functions */
1336
dc_create(const struct dc_init_data * init_params)1337 struct dc *dc_create(const struct dc_init_data *init_params)
1338 {
1339 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1340 unsigned int full_pipe_count;
1341
1342 if (!dc)
1343 return NULL;
1344
1345 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1346 if (!dc_construct_ctx(dc, init_params))
1347 goto destruct_dc;
1348 } else {
1349 if (!dc_construct(dc, init_params))
1350 goto destruct_dc;
1351
1352 full_pipe_count = dc->res_pool->pipe_count;
1353 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1354 full_pipe_count--;
1355 dc->caps.max_streams = min(
1356 full_pipe_count,
1357 dc->res_pool->stream_enc_count);
1358
1359 dc->caps.max_links = dc->link_count;
1360 dc->caps.max_audios = dc->res_pool->audio_count;
1361 dc->caps.linear_pitch_alignment = 64;
1362
1363 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1364
1365 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1366
1367 if (dc->res_pool->dmcu != NULL)
1368 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1369 }
1370
1371 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1372 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1373
1374 /* Populate versioning information */
1375 dc->versions.dc_ver = DC_VER;
1376
1377 dc->build_id = DC_BUILD_ID;
1378
1379 DC_LOG_DC("Display Core initialized\n");
1380
1381
1382
1383 return dc;
1384
1385 destruct_dc:
1386 dc_destruct(dc);
1387 kfree(dc);
1388 return NULL;
1389 }
1390
detect_edp_presence(struct dc * dc)1391 static void detect_edp_presence(struct dc *dc)
1392 {
1393 struct dc_link *edp_links[MAX_NUM_EDP];
1394 struct dc_link *edp_link = NULL;
1395 enum dc_connection_type type;
1396 int i;
1397 int edp_num;
1398
1399 dc_get_edp_links(dc, edp_links, &edp_num);
1400 if (!edp_num)
1401 return;
1402
1403 for (i = 0; i < edp_num; i++) {
1404 edp_link = edp_links[i];
1405 if (dc->config.edp_not_connected) {
1406 edp_link->edp_sink_present = false;
1407 } else {
1408 dc_link_detect_connection_type(edp_link, &type);
1409 edp_link->edp_sink_present = (type != dc_connection_none);
1410 }
1411 }
1412 }
1413
dc_hardware_init(struct dc * dc)1414 void dc_hardware_init(struct dc *dc)
1415 {
1416
1417 detect_edp_presence(dc);
1418 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1419 dc->hwss.init_hw(dc);
1420 }
1421
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1422 void dc_init_callbacks(struct dc *dc,
1423 const struct dc_callback_init *init_params)
1424 {
1425 dc->ctx->cp_psp = init_params->cp_psp;
1426 }
1427
dc_deinit_callbacks(struct dc * dc)1428 void dc_deinit_callbacks(struct dc *dc)
1429 {
1430 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1431 }
1432
dc_destroy(struct dc ** dc)1433 void dc_destroy(struct dc **dc)
1434 {
1435 dc_destruct(*dc);
1436 kfree(*dc);
1437 *dc = NULL;
1438 }
1439
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1440 static void enable_timing_multisync(
1441 struct dc *dc,
1442 struct dc_state *ctx)
1443 {
1444 int i, multisync_count = 0;
1445 int pipe_count = dc->res_pool->pipe_count;
1446 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1447
1448 for (i = 0; i < pipe_count; i++) {
1449 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1450 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1451 continue;
1452 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1453 continue;
1454 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1455 multisync_count++;
1456 }
1457
1458 if (multisync_count > 0) {
1459 dc->hwss.enable_per_frame_crtc_position_reset(
1460 dc, multisync_count, multisync_pipes);
1461 }
1462 }
1463
program_timing_sync(struct dc * dc,struct dc_state * ctx)1464 static void program_timing_sync(
1465 struct dc *dc,
1466 struct dc_state *ctx)
1467 {
1468 int i, j, k;
1469 int group_index = 0;
1470 int num_group = 0;
1471 int pipe_count = dc->res_pool->pipe_count;
1472 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1473
1474 for (i = 0; i < pipe_count; i++) {
1475 if (!ctx->res_ctx.pipe_ctx[i].stream
1476 || ctx->res_ctx.pipe_ctx[i].top_pipe
1477 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1478 continue;
1479
1480 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1481 }
1482
1483 for (i = 0; i < pipe_count; i++) {
1484 int group_size = 1;
1485 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1486 struct pipe_ctx *pipe_set[MAX_PIPES];
1487
1488 if (!unsynced_pipes[i])
1489 continue;
1490
1491 pipe_set[0] = unsynced_pipes[i];
1492 unsynced_pipes[i] = NULL;
1493
1494 /* Add tg to the set, search rest of the tg's for ones with
1495 * same timing, add all tgs with same timing to the group
1496 */
1497 for (j = i + 1; j < pipe_count; j++) {
1498 if (!unsynced_pipes[j])
1499 continue;
1500 if (sync_type != TIMING_SYNCHRONIZABLE &&
1501 dc->hwss.enable_vblanks_synchronization &&
1502 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1503 resource_are_vblanks_synchronizable(
1504 unsynced_pipes[j]->stream,
1505 pipe_set[0]->stream)) {
1506 sync_type = VBLANK_SYNCHRONIZABLE;
1507 pipe_set[group_size] = unsynced_pipes[j];
1508 unsynced_pipes[j] = NULL;
1509 group_size++;
1510 } else
1511 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1512 resource_are_streams_timing_synchronizable(
1513 unsynced_pipes[j]->stream,
1514 pipe_set[0]->stream)) {
1515 sync_type = TIMING_SYNCHRONIZABLE;
1516 pipe_set[group_size] = unsynced_pipes[j];
1517 unsynced_pipes[j] = NULL;
1518 group_size++;
1519 }
1520 }
1521
1522 /* set first unblanked pipe as master */
1523 for (j = 0; j < group_size; j++) {
1524 bool is_blanked;
1525
1526 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1527 is_blanked =
1528 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1529 else
1530 is_blanked =
1531 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1532 if (!is_blanked) {
1533 if (j == 0)
1534 break;
1535
1536 swap(pipe_set[0], pipe_set[j]);
1537 break;
1538 }
1539 }
1540
1541 for (k = 0; k < group_size; k++) {
1542 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1543
1544 status->timing_sync_info.group_id = num_group;
1545 status->timing_sync_info.group_size = group_size;
1546 if (k == 0)
1547 status->timing_sync_info.master = true;
1548 else
1549 status->timing_sync_info.master = false;
1550
1551 }
1552
1553 /* remove any other pipes that are already been synced */
1554 if (dc->config.use_pipe_ctx_sync_logic) {
1555 /* check pipe's syncd to decide which pipe to be removed */
1556 for (j = 1; j < group_size; j++) {
1557 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1558 group_size--;
1559 pipe_set[j] = pipe_set[group_size];
1560 j--;
1561 } else
1562 /* link slave pipe's syncd with master pipe */
1563 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1564 }
1565 } else {
1566 for (j = j + 1; j < group_size; j++) {
1567 bool is_blanked;
1568
1569 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1570 is_blanked =
1571 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1572 else
1573 is_blanked =
1574 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1575 if (!is_blanked) {
1576 group_size--;
1577 pipe_set[j] = pipe_set[group_size];
1578 j--;
1579 }
1580 }
1581 }
1582
1583 if (group_size > 1) {
1584 if (sync_type == TIMING_SYNCHRONIZABLE) {
1585 dc->hwss.enable_timing_synchronization(
1586 dc, group_index, group_size, pipe_set);
1587 } else
1588 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1589 dc->hwss.enable_vblanks_synchronization(
1590 dc, group_index, group_size, pipe_set);
1591 }
1592 group_index++;
1593 }
1594 num_group++;
1595 }
1596 }
1597
streams_changed(struct dc * dc,struct dc_stream_state * streams[],uint8_t stream_count)1598 static bool streams_changed(struct dc *dc,
1599 struct dc_stream_state *streams[],
1600 uint8_t stream_count)
1601 {
1602 uint8_t i;
1603
1604 if (stream_count != dc->current_state->stream_count)
1605 return true;
1606
1607 for (i = 0; i < dc->current_state->stream_count; i++) {
1608 if (dc->current_state->streams[i] != streams[i])
1609 return true;
1610 if (!streams[i]->link->link_state_valid)
1611 return true;
1612 }
1613
1614 return false;
1615 }
1616
dc_validate_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1617 bool dc_validate_boot_timing(const struct dc *dc,
1618 const struct dc_sink *sink,
1619 struct dc_crtc_timing *crtc_timing)
1620 {
1621 struct timing_generator *tg;
1622 struct stream_encoder *se = NULL;
1623
1624 struct dc_crtc_timing hw_crtc_timing = {0};
1625
1626 struct dc_link *link = sink->link;
1627 unsigned int i, enc_inst, tg_inst = 0;
1628
1629 /* Support seamless boot on EDP displays only */
1630 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1631 return false;
1632 }
1633
1634 if (dc->debug.force_odm_combine)
1635 return false;
1636
1637 /* Check for enabled DIG to identify enabled display */
1638 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1639 return false;
1640
1641 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1642
1643 if (enc_inst == ENGINE_ID_UNKNOWN)
1644 return false;
1645
1646 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1647 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1648
1649 se = dc->res_pool->stream_enc[i];
1650
1651 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1652 dc->res_pool->stream_enc[i]);
1653 break;
1654 }
1655 }
1656
1657 // tg_inst not found
1658 if (i == dc->res_pool->stream_enc_count)
1659 return false;
1660
1661 if (tg_inst >= dc->res_pool->timing_generator_count)
1662 return false;
1663
1664 if (tg_inst != link->link_enc->preferred_engine)
1665 return false;
1666
1667 tg = dc->res_pool->timing_generators[tg_inst];
1668
1669 if (!tg->funcs->get_hw_timing)
1670 return false;
1671
1672 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1673 return false;
1674
1675 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1676 return false;
1677
1678 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1679 return false;
1680
1681 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1682 return false;
1683
1684 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1685 return false;
1686
1687 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1688 return false;
1689
1690 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1691 return false;
1692
1693 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1694 return false;
1695
1696 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1697 return false;
1698
1699 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1700 return false;
1701
1702 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1703 return false;
1704
1705 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1706 return false;
1707
1708 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1709 return false;
1710
1711 /* block DSC for now, as VBIOS does not currently support DSC timings */
1712 if (crtc_timing->flags.DSC)
1713 return false;
1714
1715 if (dc_is_dp_signal(link->connector_signal)) {
1716 unsigned int pix_clk_100hz;
1717 uint32_t numOdmPipes = 1;
1718 uint32_t id_src[4] = {0};
1719
1720 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1721 dc->res_pool->dp_clock_source,
1722 tg_inst, &pix_clk_100hz);
1723
1724 if (tg->funcs->get_optc_source)
1725 tg->funcs->get_optc_source(tg,
1726 &numOdmPipes, &id_src[0], &id_src[1]);
1727
1728 if (numOdmPipes == 2)
1729 pix_clk_100hz *= 2;
1730 if (numOdmPipes == 4)
1731 pix_clk_100hz *= 4;
1732
1733 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1734 // slightly due to rounding issues in 10 kHz units.
1735 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1736 return false;
1737
1738 if (!se->funcs->dp_get_pixel_format)
1739 return false;
1740
1741 if (!se->funcs->dp_get_pixel_format(
1742 se,
1743 &hw_crtc_timing.pixel_encoding,
1744 &hw_crtc_timing.display_color_depth))
1745 return false;
1746
1747 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1748 return false;
1749
1750 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1751 return false;
1752 }
1753
1754 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1755 return false;
1756 }
1757
1758 if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
1759 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1760 return false;
1761 }
1762
1763 return true;
1764 }
1765
should_update_pipe_for_stream(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)1766 static inline bool should_update_pipe_for_stream(
1767 struct dc_state *context,
1768 struct pipe_ctx *pipe_ctx,
1769 struct dc_stream_state *stream)
1770 {
1771 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1772 }
1773
should_update_pipe_for_plane(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state)1774 static inline bool should_update_pipe_for_plane(
1775 struct dc_state *context,
1776 struct pipe_ctx *pipe_ctx,
1777 struct dc_plane_state *plane_state)
1778 {
1779 return (pipe_ctx->plane_state == plane_state);
1780 }
1781
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1782 void dc_enable_stereo(
1783 struct dc *dc,
1784 struct dc_state *context,
1785 struct dc_stream_state *streams[],
1786 uint8_t stream_count)
1787 {
1788 int i, j;
1789 struct pipe_ctx *pipe;
1790
1791 for (i = 0; i < MAX_PIPES; i++) {
1792 if (context != NULL) {
1793 pipe = &context->res_ctx.pipe_ctx[i];
1794 } else {
1795 context = dc->current_state;
1796 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1797 }
1798
1799 for (j = 0; pipe && j < stream_count; j++) {
1800 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1801 dc->hwss.setup_stereo)
1802 dc->hwss.setup_stereo(pipe, dc);
1803 }
1804 }
1805 }
1806
dc_trigger_sync(struct dc * dc,struct dc_state * context)1807 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1808 {
1809 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1810 enable_timing_multisync(dc, context);
1811 program_timing_sync(dc, context);
1812 }
1813 }
1814
get_stream_mask(struct dc * dc,struct dc_state * context)1815 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1816 {
1817 int i;
1818 unsigned int stream_mask = 0;
1819
1820 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1821 if (context->res_ctx.pipe_ctx[i].stream)
1822 stream_mask |= 1 << i;
1823 }
1824
1825 return stream_mask;
1826 }
1827
dc_z10_restore(const struct dc * dc)1828 void dc_z10_restore(const struct dc *dc)
1829 {
1830 if (dc->hwss.z10_restore)
1831 dc->hwss.z10_restore(dc);
1832 }
1833
dc_z10_save_init(struct dc * dc)1834 void dc_z10_save_init(struct dc *dc)
1835 {
1836 if (dc->hwss.z10_save_init)
1837 dc->hwss.z10_save_init(dc);
1838 }
1839
1840 /**
1841 * dc_commit_state_no_check - Apply context to the hardware
1842 *
1843 * @dc: DC object with the current status to be updated
1844 * @context: New state that will become the current status at the end of this function
1845 *
1846 * Applies given context to the hardware and copy it into current context.
1847 * It's up to the user to release the src context afterwards.
1848 *
1849 * Return: an enum dc_status result code for the operation
1850 */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)1851 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1852 {
1853 struct dc_bios *dcb = dc->ctx->dc_bios;
1854 enum dc_status result = DC_ERROR_UNEXPECTED;
1855 struct pipe_ctx *pipe;
1856 int i, k, l;
1857 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1858 struct dc_state *old_state;
1859 bool subvp_prev_use = false;
1860
1861 dc_z10_restore(dc);
1862 dc_allow_idle_optimizations(dc, false);
1863
1864 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1865 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1866
1867 /* Check old context for SubVP */
1868 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
1869 if (subvp_prev_use)
1870 break;
1871 }
1872
1873 for (i = 0; i < context->stream_count; i++)
1874 dc_streams[i] = context->streams[i];
1875
1876 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1877 disable_vbios_mode_if_required(dc, context);
1878 dc->hwss.enable_accelerated_mode(dc, context);
1879 }
1880
1881 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1882 context->stream_count == 0)
1883 dc->hwss.prepare_bandwidth(dc, context);
1884
1885 /* When SubVP is active, all HW programming must be done while
1886 * SubVP lock is acquired
1887 */
1888 if (dc->hwss.subvp_pipe_control_lock)
1889 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1890
1891 if (dc->debug.enable_double_buffered_dsc_pg_support)
1892 dc->hwss.update_dsc_pg(dc, context, false);
1893
1894 disable_dangling_plane(dc, context);
1895 /* re-program planes for existing stream, in case we need to
1896 * free up plane resource for later use
1897 */
1898 if (dc->hwss.apply_ctx_for_surface) {
1899 for (i = 0; i < context->stream_count; i++) {
1900 if (context->streams[i]->mode_changed)
1901 continue;
1902 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1903 dc->hwss.apply_ctx_for_surface(
1904 dc, context->streams[i],
1905 context->stream_status[i].plane_count,
1906 context); /* use new pipe config in new context */
1907 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1908 dc->hwss.post_unlock_program_front_end(dc, context);
1909 }
1910 }
1911
1912 /* Program hardware */
1913 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1914 pipe = &context->res_ctx.pipe_ctx[i];
1915 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1916 }
1917
1918 result = dc->hwss.apply_ctx_to_hw(dc, context);
1919
1920 if (result != DC_OK) {
1921 /* Application of dc_state to hardware stopped. */
1922 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1923 return result;
1924 }
1925
1926 dc_trigger_sync(dc, context);
1927
1928 /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
1929 for (i = 0; i < context->stream_count; i++) {
1930 uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
1931
1932 context->streams[i]->update_flags.raw = 0xFFFFFFFF;
1933 context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
1934 }
1935
1936 /* Program all planes within new context*/
1937 if (dc->hwss.program_front_end_for_ctx) {
1938 dc->hwss.interdependent_update_lock(dc, context, true);
1939 dc->hwss.program_front_end_for_ctx(dc, context);
1940 dc->hwss.interdependent_update_lock(dc, context, false);
1941 dc->hwss.post_unlock_program_front_end(dc, context);
1942 }
1943
1944 if (dc->hwss.commit_subvp_config)
1945 dc->hwss.commit_subvp_config(dc, context);
1946 if (dc->hwss.subvp_pipe_control_lock)
1947 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1948
1949 for (i = 0; i < context->stream_count; i++) {
1950 const struct dc_link *link = context->streams[i]->link;
1951
1952 if (!context->streams[i]->mode_changed)
1953 continue;
1954
1955 if (dc->hwss.apply_ctx_for_surface) {
1956 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1957 dc->hwss.apply_ctx_for_surface(
1958 dc, context->streams[i],
1959 context->stream_status[i].plane_count,
1960 context);
1961 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1962 dc->hwss.post_unlock_program_front_end(dc, context);
1963 }
1964
1965 /*
1966 * enable stereo
1967 * TODO rework dc_enable_stereo call to work with validation sets?
1968 */
1969 for (k = 0; k < MAX_PIPES; k++) {
1970 pipe = &context->res_ctx.pipe_ctx[k];
1971
1972 for (l = 0 ; pipe && l < context->stream_count; l++) {
1973 if (context->streams[l] &&
1974 context->streams[l] == pipe->stream &&
1975 dc->hwss.setup_stereo)
1976 dc->hwss.setup_stereo(pipe, dc);
1977 }
1978 }
1979
1980 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1981 context->streams[i]->timing.h_addressable,
1982 context->streams[i]->timing.v_addressable,
1983 context->streams[i]->timing.h_total,
1984 context->streams[i]->timing.v_total,
1985 context->streams[i]->timing.pix_clk_100hz / 10);
1986 }
1987
1988 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1989
1990 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1991 context->stream_count == 0) {
1992 /* Must wait for no flips to be pending before doing optimize bw */
1993 wait_for_no_pipes_pending(dc, context);
1994 /* pplib is notified if disp_num changed */
1995 dc->hwss.optimize_bandwidth(dc, context);
1996 }
1997
1998 if (dc->debug.enable_double_buffered_dsc_pg_support)
1999 dc->hwss.update_dsc_pg(dc, context, true);
2000
2001 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2002 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2003 else
2004 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2005
2006 context->stream_mask = get_stream_mask(dc, context);
2007
2008 if (context->stream_mask != dc->current_state->stream_mask)
2009 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
2010
2011 for (i = 0; i < context->stream_count; i++)
2012 context->streams[i]->mode_changed = false;
2013
2014 /* Clear update flags that were set earlier to avoid redundant programming */
2015 for (i = 0; i < context->stream_count; i++) {
2016 context->streams[i]->update_flags.raw = 0x0;
2017 }
2018
2019 old_state = dc->current_state;
2020 dc->current_state = context;
2021
2022 dc_release_state(old_state);
2023
2024 dc_retain_state(dc->current_state);
2025
2026 return result;
2027 }
2028
2029 static bool commit_minimal_transition_state(struct dc *dc,
2030 struct dc_state *transition_base_context);
2031
2032 /**
2033 * dc_commit_streams - Commit current stream state
2034 *
2035 * @dc: DC object with the commit state to be configured in the hardware
2036 * @streams: Array with a list of stream state
2037 * @stream_count: Total of streams
2038 *
2039 * Function responsible for commit streams change to the hardware.
2040 *
2041 * Return:
2042 * Return DC_OK if everything work as expected, otherwise, return a dc_status
2043 * code.
2044 */
dc_commit_streams(struct dc * dc,struct dc_stream_state * streams[],uint8_t stream_count)2045 enum dc_status dc_commit_streams(struct dc *dc,
2046 struct dc_stream_state *streams[],
2047 uint8_t stream_count)
2048 {
2049 int i, j;
2050 struct dc_state *context;
2051 enum dc_status res = DC_OK;
2052 struct dc_validation_set set[MAX_STREAMS] = {0};
2053 struct pipe_ctx *pipe;
2054 bool handle_exit_odm2to1 = false;
2055
2056 if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
2057 return res;
2058
2059 if (!streams_changed(dc, streams, stream_count))
2060 return res;
2061
2062 DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
2063
2064 for (i = 0; i < stream_count; i++) {
2065 struct dc_stream_state *stream = streams[i];
2066 struct dc_stream_status *status = dc_stream_get_status(stream);
2067
2068 dc_stream_log(dc, stream);
2069
2070 set[i].stream = stream;
2071
2072 if (status) {
2073 set[i].plane_count = status->plane_count;
2074 for (j = 0; j < status->plane_count; j++)
2075 set[i].plane_states[j] = status->plane_states[j];
2076 }
2077 }
2078
2079 /* ODM Combine 2:1 power optimization is only applied for single stream
2080 * scenario, it uses extra pipes than needed to reduce power consumption
2081 * We need to switch off this feature to make room for new streams.
2082 */
2083 if (stream_count > dc->current_state->stream_count &&
2084 dc->current_state->stream_count == 1) {
2085 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2086 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
2087 if (pipe->next_odm_pipe)
2088 handle_exit_odm2to1 = true;
2089 }
2090 }
2091
2092 if (handle_exit_odm2to1)
2093 res = commit_minimal_transition_state(dc, dc->current_state);
2094
2095 context = dc_create_state(dc);
2096 if (!context)
2097 goto context_alloc_fail;
2098
2099 dc_resource_state_copy_construct_current(dc, context);
2100
2101 res = dc_validate_with_context(dc, set, stream_count, context, false);
2102 if (res != DC_OK) {
2103 BREAK_TO_DEBUGGER();
2104 goto fail;
2105 }
2106
2107 res = dc_commit_state_no_check(dc, context);
2108
2109 for (i = 0; i < stream_count; i++) {
2110 for (j = 0; j < context->stream_count; j++) {
2111 if (streams[i]->stream_id == context->streams[j]->stream_id)
2112 streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
2113
2114 if (dc_is_embedded_signal(streams[i]->signal)) {
2115 struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
2116
2117 if (dc->hwss.is_abm_supported)
2118 status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
2119 else
2120 status->is_abm_supported = true;
2121 }
2122 }
2123 }
2124
2125 fail:
2126 dc_release_state(context);
2127
2128 context_alloc_fail:
2129
2130 DC_LOG_DC("%s Finished.\n", __func__);
2131
2132 return res;
2133 }
2134
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)2135 bool dc_acquire_release_mpc_3dlut(
2136 struct dc *dc, bool acquire,
2137 struct dc_stream_state *stream,
2138 struct dc_3dlut **lut,
2139 struct dc_transfer_func **shaper)
2140 {
2141 int pipe_idx;
2142 bool ret = false;
2143 bool found_pipe_idx = false;
2144 const struct resource_pool *pool = dc->res_pool;
2145 struct resource_context *res_ctx = &dc->current_state->res_ctx;
2146 int mpcc_id = 0;
2147
2148 if (pool && res_ctx) {
2149 if (acquire) {
2150 /*find pipe idx for the given stream*/
2151 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
2152 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
2153 found_pipe_idx = true;
2154 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
2155 break;
2156 }
2157 }
2158 } else
2159 found_pipe_idx = true;/*for release pipe_idx is not required*/
2160
2161 if (found_pipe_idx) {
2162 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
2163 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
2164 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
2165 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
2166 }
2167 }
2168 return ret;
2169 }
2170
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)2171 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
2172 {
2173 int i;
2174 struct pipe_ctx *pipe;
2175
2176 for (i = 0; i < MAX_PIPES; i++) {
2177 pipe = &context->res_ctx.pipe_ctx[i];
2178
2179 // Don't check flip pending on phantom pipes
2180 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
2181 continue;
2182
2183 /* Must set to false to start with, due to OR in update function */
2184 pipe->plane_state->status.is_flip_pending = false;
2185 dc->hwss.update_pending_status(pipe);
2186 if (pipe->plane_state->status.is_flip_pending)
2187 return true;
2188 }
2189 return false;
2190 }
2191
2192 /* Perform updates here which need to be deferred until next vupdate
2193 *
2194 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2195 * but forcing lut memory to shutdown state is immediate. This causes
2196 * single frame corruption as lut gets disabled mid-frame unless shutdown
2197 * is deferred until after entering bypass.
2198 */
process_deferred_updates(struct dc * dc)2199 static void process_deferred_updates(struct dc *dc)
2200 {
2201 int i = 0;
2202
2203 if (dc->debug.enable_mem_low_power.bits.cm) {
2204 ASSERT(dc->dcn_ip->max_num_dpp);
2205 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2206 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2207 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2208 }
2209 }
2210
dc_post_update_surfaces_to_stream(struct dc * dc)2211 void dc_post_update_surfaces_to_stream(struct dc *dc)
2212 {
2213 int i;
2214 struct dc_state *context = dc->current_state;
2215
2216 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2217 return;
2218
2219 post_surface_trace(dc);
2220
2221 /*
2222 * Only relevant for DCN behavior where we can guarantee the optimization
2223 * is safe to apply - retain the legacy behavior for DCE.
2224 */
2225
2226 if (dc->ctx->dce_version < DCE_VERSION_MAX)
2227 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2228 else {
2229 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2230
2231 if (is_flip_pending_in_pipes(dc, context))
2232 return;
2233
2234 for (i = 0; i < dc->res_pool->pipe_count; i++)
2235 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2236 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2237 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2238 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
2239 }
2240
2241 process_deferred_updates(dc);
2242
2243 dc->hwss.optimize_bandwidth(dc, context);
2244
2245 if (dc->debug.enable_double_buffered_dsc_pg_support)
2246 dc->hwss.update_dsc_pg(dc, context, true);
2247 }
2248
2249 dc->optimized_required = false;
2250 dc->wm_optimized_required = false;
2251 }
2252
init_state(struct dc * dc,struct dc_state * context)2253 static void init_state(struct dc *dc, struct dc_state *context)
2254 {
2255 /* Each context must have their own instance of VBA and in order to
2256 * initialize and obtain IP and SOC the base DML instance from DC is
2257 * initially copied into every context
2258 */
2259 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2260 }
2261
dc_create_state(struct dc * dc)2262 struct dc_state *dc_create_state(struct dc *dc)
2263 {
2264 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2265 GFP_KERNEL);
2266
2267 if (!context)
2268 return NULL;
2269
2270 init_state(dc, context);
2271
2272 kref_init(&context->refcount);
2273
2274 return context;
2275 }
2276
dc_copy_state(struct dc_state * src_ctx)2277 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2278 {
2279 int i, j;
2280 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2281
2282 if (!new_ctx)
2283 return NULL;
2284 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2285
2286 for (i = 0; i < MAX_PIPES; i++) {
2287 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2288
2289 if (cur_pipe->top_pipe)
2290 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2291
2292 if (cur_pipe->bottom_pipe)
2293 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2294
2295 if (cur_pipe->prev_odm_pipe)
2296 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2297
2298 if (cur_pipe->next_odm_pipe)
2299 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2300
2301 }
2302
2303 for (i = 0; i < new_ctx->stream_count; i++) {
2304 dc_stream_retain(new_ctx->streams[i]);
2305 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2306 dc_plane_state_retain(
2307 new_ctx->stream_status[i].plane_states[j]);
2308 }
2309
2310 kref_init(&new_ctx->refcount);
2311
2312 return new_ctx;
2313 }
2314
dc_retain_state(struct dc_state * context)2315 void dc_retain_state(struct dc_state *context)
2316 {
2317 kref_get(&context->refcount);
2318 }
2319
dc_state_free(struct kref * kref)2320 static void dc_state_free(struct kref *kref)
2321 {
2322 struct dc_state *context = container_of(kref, struct dc_state, refcount);
2323 dc_resource_state_destruct(context);
2324 kvfree(context);
2325 }
2326
dc_release_state(struct dc_state * context)2327 void dc_release_state(struct dc_state *context)
2328 {
2329 kref_put(&context->refcount, dc_state_free);
2330 }
2331
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)2332 bool dc_set_generic_gpio_for_stereo(bool enable,
2333 struct gpio_service *gpio_service)
2334 {
2335 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2336 struct gpio_pin_info pin_info;
2337 struct gpio *generic;
2338 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2339 GFP_KERNEL);
2340
2341 if (!config)
2342 return false;
2343 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2344
2345 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2346 kfree(config);
2347 return false;
2348 } else {
2349 generic = dal_gpio_service_create_generic_mux(
2350 gpio_service,
2351 pin_info.offset,
2352 pin_info.mask);
2353 }
2354
2355 if (!generic) {
2356 kfree(config);
2357 return false;
2358 }
2359
2360 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2361
2362 config->enable_output_from_mux = enable;
2363 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2364
2365 if (gpio_result == GPIO_RESULT_OK)
2366 gpio_result = dal_mux_setup_config(generic, config);
2367
2368 if (gpio_result == GPIO_RESULT_OK) {
2369 dal_gpio_close(generic);
2370 dal_gpio_destroy_generic_mux(&generic);
2371 kfree(config);
2372 return true;
2373 } else {
2374 dal_gpio_close(generic);
2375 dal_gpio_destroy_generic_mux(&generic);
2376 kfree(config);
2377 return false;
2378 }
2379 }
2380
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)2381 static bool is_surface_in_context(
2382 const struct dc_state *context,
2383 const struct dc_plane_state *plane_state)
2384 {
2385 int j;
2386
2387 for (j = 0; j < MAX_PIPES; j++) {
2388 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2389
2390 if (plane_state == pipe_ctx->plane_state) {
2391 return true;
2392 }
2393 }
2394
2395 return false;
2396 }
2397
get_plane_info_update_type(const struct dc_surface_update * u)2398 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2399 {
2400 union surface_update_flags *update_flags = &u->surface->update_flags;
2401 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2402
2403 if (!u->plane_info)
2404 return UPDATE_TYPE_FAST;
2405
2406 if (u->plane_info->color_space != u->surface->color_space) {
2407 update_flags->bits.color_space_change = 1;
2408 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2409 }
2410
2411 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2412 update_flags->bits.horizontal_mirror_change = 1;
2413 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2414 }
2415
2416 if (u->plane_info->rotation != u->surface->rotation) {
2417 update_flags->bits.rotation_change = 1;
2418 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2419 }
2420
2421 if (u->plane_info->format != u->surface->format) {
2422 update_flags->bits.pixel_format_change = 1;
2423 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2424 }
2425
2426 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2427 update_flags->bits.stereo_format_change = 1;
2428 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2429 }
2430
2431 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2432 update_flags->bits.per_pixel_alpha_change = 1;
2433 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2434 }
2435
2436 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2437 update_flags->bits.global_alpha_change = 1;
2438 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2439 }
2440
2441 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2442 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2443 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2444 /* During DCC on/off, stutter period is calculated before
2445 * DCC has fully transitioned. This results in incorrect
2446 * stutter period calculation. Triggering a full update will
2447 * recalculate stutter period.
2448 */
2449 update_flags->bits.dcc_change = 1;
2450 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2451 }
2452
2453 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2454 resource_pixel_format_to_bpp(u->surface->format)) {
2455 /* different bytes per element will require full bandwidth
2456 * and DML calculation
2457 */
2458 update_flags->bits.bpp_change = 1;
2459 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2460 }
2461
2462 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2463 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2464 update_flags->bits.plane_size_change = 1;
2465 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2466 }
2467
2468
2469 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2470 sizeof(union dc_tiling_info)) != 0) {
2471 update_flags->bits.swizzle_change = 1;
2472 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2473
2474 /* todo: below are HW dependent, we should add a hook to
2475 * DCE/N resource and validated there.
2476 */
2477 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2478 /* swizzled mode requires RQ to be setup properly,
2479 * thus need to run DML to calculate RQ settings
2480 */
2481 update_flags->bits.bandwidth_change = 1;
2482 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2483 }
2484 }
2485
2486 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2487 return update_type;
2488 }
2489
get_scaling_info_update_type(const struct dc_surface_update * u)2490 static enum surface_update_type get_scaling_info_update_type(
2491 const struct dc_surface_update *u)
2492 {
2493 union surface_update_flags *update_flags = &u->surface->update_flags;
2494
2495 if (!u->scaling_info)
2496 return UPDATE_TYPE_FAST;
2497
2498 if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2499 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2500 || u->scaling_info->scaling_quality.integer_scaling !=
2501 u->surface->scaling_quality.integer_scaling
2502 ) {
2503 update_flags->bits.scaling_change = 1;
2504
2505 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2506 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2507 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2508 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2509 /* Making dst rect smaller requires a bandwidth change */
2510 update_flags->bits.bandwidth_change = 1;
2511 }
2512
2513 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2514 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2515
2516 update_flags->bits.scaling_change = 1;
2517 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2518 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2519 /* Making src rect bigger requires a bandwidth change */
2520 update_flags->bits.clock_change = 1;
2521 }
2522
2523 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2524 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2525 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2526 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2527 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2528 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2529 update_flags->bits.position_change = 1;
2530
2531 if (update_flags->bits.clock_change
2532 || update_flags->bits.bandwidth_change
2533 || update_flags->bits.scaling_change)
2534 return UPDATE_TYPE_FULL;
2535
2536 if (update_flags->bits.position_change)
2537 return UPDATE_TYPE_MED;
2538
2539 return UPDATE_TYPE_FAST;
2540 }
2541
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2542 static enum surface_update_type det_surface_update(const struct dc *dc,
2543 const struct dc_surface_update *u)
2544 {
2545 const struct dc_state *context = dc->current_state;
2546 enum surface_update_type type;
2547 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2548 union surface_update_flags *update_flags = &u->surface->update_flags;
2549
2550 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2551 update_flags->raw = 0xFFFFFFFF;
2552 return UPDATE_TYPE_FULL;
2553 }
2554
2555 update_flags->raw = 0; // Reset all flags
2556
2557 type = get_plane_info_update_type(u);
2558 elevate_update_type(&overall_type, type);
2559
2560 type = get_scaling_info_update_type(u);
2561 elevate_update_type(&overall_type, type);
2562
2563 if (u->flip_addr) {
2564 update_flags->bits.addr_update = 1;
2565 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2566 update_flags->bits.tmz_changed = 1;
2567 elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2568 }
2569 }
2570 if (u->in_transfer_func)
2571 update_flags->bits.in_transfer_func_change = 1;
2572
2573 if (u->input_csc_color_matrix)
2574 update_flags->bits.input_csc_change = 1;
2575
2576 if (u->coeff_reduction_factor)
2577 update_flags->bits.coeff_reduction_change = 1;
2578
2579 if (u->gamut_remap_matrix)
2580 update_flags->bits.gamut_remap_change = 1;
2581
2582 if (u->gamma) {
2583 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2584
2585 if (u->plane_info)
2586 format = u->plane_info->format;
2587 else if (u->surface)
2588 format = u->surface->format;
2589
2590 if (dce_use_lut(format))
2591 update_flags->bits.gamma_change = 1;
2592 }
2593
2594 if (u->lut3d_func || u->func_shaper)
2595 update_flags->bits.lut_3d = 1;
2596
2597 if (u->hdr_mult.value)
2598 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2599 update_flags->bits.hdr_mult = 1;
2600 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2601 }
2602
2603 if (update_flags->bits.in_transfer_func_change) {
2604 type = UPDATE_TYPE_MED;
2605 elevate_update_type(&overall_type, type);
2606 }
2607
2608 if (update_flags->bits.lut_3d) {
2609 type = UPDATE_TYPE_FULL;
2610 elevate_update_type(&overall_type, type);
2611 }
2612
2613 if (dc->debug.enable_legacy_fast_update &&
2614 (update_flags->bits.gamma_change ||
2615 update_flags->bits.gamut_remap_change ||
2616 update_flags->bits.input_csc_change ||
2617 update_flags->bits.coeff_reduction_change)) {
2618 type = UPDATE_TYPE_FULL;
2619 elevate_update_type(&overall_type, type);
2620 }
2621 return overall_type;
2622 }
2623
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2624 static enum surface_update_type check_update_surfaces_for_stream(
2625 struct dc *dc,
2626 struct dc_surface_update *updates,
2627 int surface_count,
2628 struct dc_stream_update *stream_update,
2629 const struct dc_stream_status *stream_status)
2630 {
2631 int i;
2632 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2633
2634 if (dc->idle_optimizations_allowed)
2635 overall_type = UPDATE_TYPE_FULL;
2636
2637 if (stream_status == NULL || stream_status->plane_count != surface_count)
2638 overall_type = UPDATE_TYPE_FULL;
2639
2640 if (stream_update && stream_update->pending_test_pattern) {
2641 overall_type = UPDATE_TYPE_FULL;
2642 }
2643
2644 /* some stream updates require passive update */
2645 if (stream_update) {
2646 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2647
2648 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2649 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2650 stream_update->integer_scaling_update)
2651 su_flags->bits.scaling = 1;
2652
2653 if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2654 su_flags->bits.out_tf = 1;
2655
2656 if (stream_update->abm_level)
2657 su_flags->bits.abm_level = 1;
2658
2659 if (stream_update->dpms_off)
2660 su_flags->bits.dpms_off = 1;
2661
2662 if (stream_update->gamut_remap)
2663 su_flags->bits.gamut_remap = 1;
2664
2665 if (stream_update->wb_update)
2666 su_flags->bits.wb_update = 1;
2667
2668 if (stream_update->dsc_config)
2669 su_flags->bits.dsc_changed = 1;
2670
2671 if (stream_update->mst_bw_update)
2672 su_flags->bits.mst_bw = 1;
2673
2674 if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
2675 (stream_update->vrr_infopacket || stream_update->allow_freesync ||
2676 stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
2677 su_flags->bits.fams_changed = 1;
2678
2679 if (su_flags->raw != 0)
2680 overall_type = UPDATE_TYPE_FULL;
2681
2682 if (stream_update->output_csc_transform || stream_update->output_color_space)
2683 su_flags->bits.out_csc = 1;
2684
2685 /* Output transfer function changes do not require bandwidth recalculation,
2686 * so don't trigger a full update
2687 */
2688 if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
2689 su_flags->bits.out_tf = 1;
2690 }
2691
2692 for (i = 0 ; i < surface_count; i++) {
2693 enum surface_update_type type =
2694 det_surface_update(dc, &updates[i]);
2695
2696 elevate_update_type(&overall_type, type);
2697 }
2698
2699 return overall_type;
2700 }
2701
2702 /*
2703 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2704 *
2705 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2706 */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2707 enum surface_update_type dc_check_update_surfaces_for_stream(
2708 struct dc *dc,
2709 struct dc_surface_update *updates,
2710 int surface_count,
2711 struct dc_stream_update *stream_update,
2712 const struct dc_stream_status *stream_status)
2713 {
2714 int i;
2715 enum surface_update_type type;
2716
2717 if (stream_update)
2718 stream_update->stream->update_flags.raw = 0;
2719 for (i = 0; i < surface_count; i++)
2720 updates[i].surface->update_flags.raw = 0;
2721
2722 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2723 if (type == UPDATE_TYPE_FULL) {
2724 if (stream_update) {
2725 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2726 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2727 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2728 }
2729 for (i = 0; i < surface_count; i++)
2730 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2731 }
2732
2733 if (type == UPDATE_TYPE_FAST) {
2734 // If there's an available clock comparator, we use that.
2735 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2736 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2737 dc->optimized_required = true;
2738 // Else we fallback to mem compare.
2739 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2740 dc->optimized_required = true;
2741 }
2742
2743 dc->optimized_required |= dc->wm_optimized_required;
2744 }
2745
2746 return type;
2747 }
2748
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2749 static struct dc_stream_status *stream_get_status(
2750 struct dc_state *ctx,
2751 struct dc_stream_state *stream)
2752 {
2753 uint8_t i;
2754
2755 for (i = 0; i < ctx->stream_count; i++) {
2756 if (stream == ctx->streams[i]) {
2757 return &ctx->stream_status[i];
2758 }
2759 }
2760
2761 return NULL;
2762 }
2763
2764 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2765
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2766 static void copy_surface_update_to_plane(
2767 struct dc_plane_state *surface,
2768 struct dc_surface_update *srf_update)
2769 {
2770 if (srf_update->flip_addr) {
2771 surface->address = srf_update->flip_addr->address;
2772 surface->flip_immediate =
2773 srf_update->flip_addr->flip_immediate;
2774 surface->time.time_elapsed_in_us[surface->time.index] =
2775 srf_update->flip_addr->flip_timestamp_in_us -
2776 surface->time.prev_update_time_in_us;
2777 surface->time.prev_update_time_in_us =
2778 srf_update->flip_addr->flip_timestamp_in_us;
2779 surface->time.index++;
2780 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2781 surface->time.index = 0;
2782
2783 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2784 }
2785
2786 if (srf_update->scaling_info) {
2787 surface->scaling_quality =
2788 srf_update->scaling_info->scaling_quality;
2789 surface->dst_rect =
2790 srf_update->scaling_info->dst_rect;
2791 surface->src_rect =
2792 srf_update->scaling_info->src_rect;
2793 surface->clip_rect =
2794 srf_update->scaling_info->clip_rect;
2795 }
2796
2797 if (srf_update->plane_info) {
2798 surface->color_space =
2799 srf_update->plane_info->color_space;
2800 surface->format =
2801 srf_update->plane_info->format;
2802 surface->plane_size =
2803 srf_update->plane_info->plane_size;
2804 surface->rotation =
2805 srf_update->plane_info->rotation;
2806 surface->horizontal_mirror =
2807 srf_update->plane_info->horizontal_mirror;
2808 surface->stereo_format =
2809 srf_update->plane_info->stereo_format;
2810 surface->tiling_info =
2811 srf_update->plane_info->tiling_info;
2812 surface->visible =
2813 srf_update->plane_info->visible;
2814 surface->per_pixel_alpha =
2815 srf_update->plane_info->per_pixel_alpha;
2816 surface->global_alpha =
2817 srf_update->plane_info->global_alpha;
2818 surface->global_alpha_value =
2819 srf_update->plane_info->global_alpha_value;
2820 surface->dcc =
2821 srf_update->plane_info->dcc;
2822 surface->layer_index =
2823 srf_update->plane_info->layer_index;
2824 }
2825
2826 if (srf_update->gamma &&
2827 (surface->gamma_correction !=
2828 srf_update->gamma)) {
2829 memcpy(&surface->gamma_correction->entries,
2830 &srf_update->gamma->entries,
2831 sizeof(struct dc_gamma_entries));
2832 surface->gamma_correction->is_identity =
2833 srf_update->gamma->is_identity;
2834 surface->gamma_correction->num_entries =
2835 srf_update->gamma->num_entries;
2836 surface->gamma_correction->type =
2837 srf_update->gamma->type;
2838 }
2839
2840 if (srf_update->in_transfer_func &&
2841 (surface->in_transfer_func !=
2842 srf_update->in_transfer_func)) {
2843 surface->in_transfer_func->sdr_ref_white_level =
2844 srf_update->in_transfer_func->sdr_ref_white_level;
2845 surface->in_transfer_func->tf =
2846 srf_update->in_transfer_func->tf;
2847 surface->in_transfer_func->type =
2848 srf_update->in_transfer_func->type;
2849 memcpy(&surface->in_transfer_func->tf_pts,
2850 &srf_update->in_transfer_func->tf_pts,
2851 sizeof(struct dc_transfer_func_distributed_points));
2852 }
2853
2854 if (srf_update->func_shaper &&
2855 (surface->in_shaper_func !=
2856 srf_update->func_shaper))
2857 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2858 sizeof(*surface->in_shaper_func));
2859
2860 if (srf_update->lut3d_func &&
2861 (surface->lut3d_func !=
2862 srf_update->lut3d_func))
2863 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2864 sizeof(*surface->lut3d_func));
2865
2866 if (srf_update->hdr_mult.value)
2867 surface->hdr_mult =
2868 srf_update->hdr_mult;
2869
2870 if (srf_update->blend_tf &&
2871 (surface->blend_tf !=
2872 srf_update->blend_tf))
2873 memcpy(surface->blend_tf, srf_update->blend_tf,
2874 sizeof(*surface->blend_tf));
2875
2876 if (srf_update->input_csc_color_matrix)
2877 surface->input_csc_color_matrix =
2878 *srf_update->input_csc_color_matrix;
2879
2880 if (srf_update->coeff_reduction_factor)
2881 surface->coeff_reduction_factor =
2882 *srf_update->coeff_reduction_factor;
2883
2884 if (srf_update->gamut_remap_matrix)
2885 surface->gamut_remap_matrix =
2886 *srf_update->gamut_remap_matrix;
2887 }
2888
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)2889 static void copy_stream_update_to_stream(struct dc *dc,
2890 struct dc_state *context,
2891 struct dc_stream_state *stream,
2892 struct dc_stream_update *update)
2893 {
2894 struct dc_context *dc_ctx = dc->ctx;
2895
2896 if (update == NULL || stream == NULL)
2897 return;
2898
2899 if (update->src.height && update->src.width)
2900 stream->src = update->src;
2901
2902 if (update->dst.height && update->dst.width)
2903 stream->dst = update->dst;
2904
2905 if (update->out_transfer_func &&
2906 stream->out_transfer_func != update->out_transfer_func) {
2907 stream->out_transfer_func->sdr_ref_white_level =
2908 update->out_transfer_func->sdr_ref_white_level;
2909 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2910 stream->out_transfer_func->type =
2911 update->out_transfer_func->type;
2912 memcpy(&stream->out_transfer_func->tf_pts,
2913 &update->out_transfer_func->tf_pts,
2914 sizeof(struct dc_transfer_func_distributed_points));
2915 }
2916
2917 if (update->hdr_static_metadata)
2918 stream->hdr_static_metadata = *update->hdr_static_metadata;
2919
2920 if (update->abm_level)
2921 stream->abm_level = *update->abm_level;
2922
2923 if (update->periodic_interrupt)
2924 stream->periodic_interrupt = *update->periodic_interrupt;
2925
2926 if (update->gamut_remap)
2927 stream->gamut_remap_matrix = *update->gamut_remap;
2928
2929 /* Note: this being updated after mode set is currently not a use case
2930 * however if it arises OCSC would need to be reprogrammed at the
2931 * minimum
2932 */
2933 if (update->output_color_space)
2934 stream->output_color_space = *update->output_color_space;
2935
2936 if (update->output_csc_transform)
2937 stream->csc_color_matrix = *update->output_csc_transform;
2938
2939 if (update->vrr_infopacket)
2940 stream->vrr_infopacket = *update->vrr_infopacket;
2941
2942 if (update->allow_freesync)
2943 stream->allow_freesync = *update->allow_freesync;
2944
2945 if (update->vrr_active_variable)
2946 stream->vrr_active_variable = *update->vrr_active_variable;
2947
2948 if (update->vrr_active_fixed)
2949 stream->vrr_active_fixed = *update->vrr_active_fixed;
2950
2951 if (update->crtc_timing_adjust)
2952 stream->adjust = *update->crtc_timing_adjust;
2953
2954 if (update->dpms_off)
2955 stream->dpms_off = *update->dpms_off;
2956
2957 if (update->hfvsif_infopacket)
2958 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2959
2960 if (update->vtem_infopacket)
2961 stream->vtem_infopacket = *update->vtem_infopacket;
2962
2963 if (update->vsc_infopacket)
2964 stream->vsc_infopacket = *update->vsc_infopacket;
2965
2966 if (update->vsp_infopacket)
2967 stream->vsp_infopacket = *update->vsp_infopacket;
2968
2969 if (update->adaptive_sync_infopacket)
2970 stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
2971
2972 if (update->dither_option)
2973 stream->dither_option = *update->dither_option;
2974
2975 if (update->pending_test_pattern)
2976 stream->test_pattern = *update->pending_test_pattern;
2977 /* update current stream with writeback info */
2978 if (update->wb_update) {
2979 int i;
2980
2981 stream->num_wb_info = update->wb_update->num_wb_info;
2982 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2983 for (i = 0; i < stream->num_wb_info; i++)
2984 stream->writeback_info[i] =
2985 update->wb_update->writeback_info[i];
2986 }
2987 if (update->dsc_config) {
2988 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2989 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2990 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2991 update->dsc_config->num_slices_v != 0);
2992
2993 /* Use temporarry context for validating new DSC config */
2994 struct dc_state *dsc_validate_context = dc_create_state(dc);
2995
2996 if (dsc_validate_context) {
2997 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2998
2999 stream->timing.dsc_cfg = *update->dsc_config;
3000 stream->timing.flags.DSC = enable_dsc;
3001 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
3002 stream->timing.dsc_cfg = old_dsc_cfg;
3003 stream->timing.flags.DSC = old_dsc_enabled;
3004 update->dsc_config = NULL;
3005 }
3006
3007 dc_release_state(dsc_validate_context);
3008 } else {
3009 DC_ERROR("Failed to allocate new validate context for DSC change\n");
3010 update->dsc_config = NULL;
3011 }
3012 }
3013 }
3014
update_planes_and_stream_state(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type * new_update_type,struct dc_state ** new_context)3015 static bool update_planes_and_stream_state(struct dc *dc,
3016 struct dc_surface_update *srf_updates, int surface_count,
3017 struct dc_stream_state *stream,
3018 struct dc_stream_update *stream_update,
3019 enum surface_update_type *new_update_type,
3020 struct dc_state **new_context)
3021 {
3022 struct dc_state *context;
3023 int i, j;
3024 enum surface_update_type update_type;
3025 const struct dc_stream_status *stream_status;
3026 struct dc_context *dc_ctx = dc->ctx;
3027
3028 stream_status = dc_stream_get_status(stream);
3029
3030 if (!stream_status) {
3031 if (surface_count) /* Only an error condition if surf_count non-zero*/
3032 ASSERT(false);
3033
3034 return false; /* Cannot commit surface to stream that is not committed */
3035 }
3036
3037 context = dc->current_state;
3038
3039 update_type = dc_check_update_surfaces_for_stream(
3040 dc, srf_updates, surface_count, stream_update, stream_status);
3041
3042 /* update current stream with the new updates */
3043 copy_stream_update_to_stream(dc, context, stream, stream_update);
3044
3045 /* do not perform surface update if surface has invalid dimensions
3046 * (all zero) and no scaling_info is provided
3047 */
3048 if (surface_count > 0) {
3049 for (i = 0; i < surface_count; i++) {
3050 if ((srf_updates[i].surface->src_rect.width == 0 ||
3051 srf_updates[i].surface->src_rect.height == 0 ||
3052 srf_updates[i].surface->dst_rect.width == 0 ||
3053 srf_updates[i].surface->dst_rect.height == 0) &&
3054 (!srf_updates[i].scaling_info ||
3055 srf_updates[i].scaling_info->src_rect.width == 0 ||
3056 srf_updates[i].scaling_info->src_rect.height == 0 ||
3057 srf_updates[i].scaling_info->dst_rect.width == 0 ||
3058 srf_updates[i].scaling_info->dst_rect.height == 0)) {
3059 DC_ERROR("Invalid src/dst rects in surface update!\n");
3060 return false;
3061 }
3062 }
3063 }
3064
3065 if (update_type >= update_surface_trace_level)
3066 update_surface_trace(dc, srf_updates, surface_count);
3067
3068 if (update_type >= UPDATE_TYPE_FULL) {
3069 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
3070
3071 for (i = 0; i < surface_count; i++)
3072 new_planes[i] = srf_updates[i].surface;
3073
3074 /* initialize scratch memory for building context */
3075 context = dc_create_state(dc);
3076 if (context == NULL) {
3077 DC_ERROR("Failed to allocate new validate context!\n");
3078 return false;
3079 }
3080
3081 dc_resource_state_copy_construct(
3082 dc->current_state, context);
3083
3084 /* For each full update, remove all existing phantom pipes first.
3085 * Ensures that we have enough pipes for newly added MPO planes
3086 */
3087 if (dc->res_pool->funcs->remove_phantom_pipes)
3088 dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
3089
3090 /*remove old surfaces from context */
3091 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
3092
3093 BREAK_TO_DEBUGGER();
3094 goto fail;
3095 }
3096
3097 /* add surface to context */
3098 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
3099
3100 BREAK_TO_DEBUGGER();
3101 goto fail;
3102 }
3103 }
3104
3105 /* save update parameters into surface */
3106 for (i = 0; i < surface_count; i++) {
3107 struct dc_plane_state *surface = srf_updates[i].surface;
3108
3109 copy_surface_update_to_plane(surface, &srf_updates[i]);
3110
3111 if (update_type >= UPDATE_TYPE_MED) {
3112 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3113 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3114
3115 if (pipe_ctx->plane_state != surface)
3116 continue;
3117
3118 resource_build_scaling_params(pipe_ctx);
3119 }
3120 }
3121 }
3122
3123 if (update_type == UPDATE_TYPE_FULL) {
3124 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3125 /* For phantom pipes we remove and create a new set of phantom pipes
3126 * for each full update (because we don't know if we'll need phantom
3127 * pipes until after the first round of validation). However, if validation
3128 * fails we need to keep the existing phantom pipes (because we don't update
3129 * the dc->current_state).
3130 *
3131 * The phantom stream/plane refcount is decremented for validation because
3132 * we assume it'll be removed (the free comes when the dc_state is freed),
3133 * but if validation fails we have to increment back the refcount so it's
3134 * consistent.
3135 */
3136 if (dc->res_pool->funcs->retain_phantom_pipes)
3137 dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
3138 BREAK_TO_DEBUGGER();
3139 goto fail;
3140 }
3141 }
3142
3143 *new_context = context;
3144 *new_update_type = update_type;
3145
3146 return true;
3147
3148 fail:
3149 dc_release_state(context);
3150
3151 return false;
3152
3153 }
3154
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3155 static void commit_planes_do_stream_update(struct dc *dc,
3156 struct dc_stream_state *stream,
3157 struct dc_stream_update *stream_update,
3158 enum surface_update_type update_type,
3159 struct dc_state *context)
3160 {
3161 int j;
3162
3163 // Stream updates
3164 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3165 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3166
3167 if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
3168
3169 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3170 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3171
3172 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3173 stream_update->vrr_infopacket ||
3174 stream_update->vsc_infopacket ||
3175 stream_update->vsp_infopacket ||
3176 stream_update->hfvsif_infopacket ||
3177 stream_update->adaptive_sync_infopacket ||
3178 stream_update->vtem_infopacket) {
3179 resource_build_info_frame(pipe_ctx);
3180 dc->hwss.update_info_frame(pipe_ctx);
3181
3182 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3183 dc->link_srv->dp_trace_source_sequence(
3184 pipe_ctx->stream->link,
3185 DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3186 }
3187
3188 if (stream_update->hdr_static_metadata &&
3189 stream->use_dynamic_meta &&
3190 dc->hwss.set_dmdata_attributes &&
3191 pipe_ctx->stream->dmdata_address.quad_part != 0)
3192 dc->hwss.set_dmdata_attributes(pipe_ctx);
3193
3194 if (stream_update->gamut_remap)
3195 dc_stream_set_gamut_remap(dc, stream);
3196
3197 if (stream_update->output_csc_transform)
3198 dc_stream_program_csc_matrix(dc, stream);
3199
3200 if (stream_update->dither_option) {
3201 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3202 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3203 &pipe_ctx->stream->bit_depth_params);
3204 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3205 &stream->bit_depth_params,
3206 &stream->clamping);
3207 while (odm_pipe) {
3208 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3209 &stream->bit_depth_params,
3210 &stream->clamping);
3211 odm_pipe = odm_pipe->next_odm_pipe;
3212 }
3213 }
3214
3215
3216 /* Full fe update*/
3217 if (update_type == UPDATE_TYPE_FAST)
3218 continue;
3219
3220 if (stream_update->dsc_config)
3221 dc->link_srv->update_dsc_config(pipe_ctx);
3222
3223 if (stream_update->mst_bw_update) {
3224 if (stream_update->mst_bw_update->is_increase)
3225 dc->link_srv->increase_mst_payload(pipe_ctx,
3226 stream_update->mst_bw_update->mst_stream_bw);
3227 else
3228 dc->link_srv->reduce_mst_payload(pipe_ctx,
3229 stream_update->mst_bw_update->mst_stream_bw);
3230 }
3231
3232 if (stream_update->pending_test_pattern) {
3233 dc_link_dp_set_test_pattern(stream->link,
3234 stream->test_pattern.type,
3235 stream->test_pattern.color_space,
3236 stream->test_pattern.p_link_settings,
3237 stream->test_pattern.p_custom_pattern,
3238 stream->test_pattern.cust_pattern_size);
3239 }
3240
3241 if (stream_update->dpms_off) {
3242 if (*stream_update->dpms_off) {
3243 dc->link_srv->set_dpms_off(pipe_ctx);
3244 /* for dpms, keep acquired resources*/
3245 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3246 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3247
3248 dc->optimized_required = true;
3249
3250 } else {
3251 if (get_seamless_boot_stream_count(context) == 0)
3252 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3253 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3254 }
3255 } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
3256 && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
3257 /*
3258 * Workaround for firmware issue in some receivers where they don't pick up
3259 * correct output color space unless DP link is disabled/re-enabled
3260 */
3261 dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
3262 }
3263
3264 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3265 bool should_program_abm = true;
3266
3267 // if otg funcs defined check if blanked before programming
3268 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3269 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3270 should_program_abm = false;
3271
3272 if (should_program_abm) {
3273 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3274 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3275 } else {
3276 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3277 pipe_ctx->stream_res.abm, stream->abm_level);
3278 }
3279 }
3280 }
3281 }
3282 }
3283 }
3284
dc_dmub_should_send_dirty_rect_cmd(struct dc * dc,struct dc_stream_state * stream)3285 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3286 {
3287 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3288 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3289 && stream->ctx->dce_version >= DCN_VERSION_3_1)
3290 return true;
3291
3292 if (stream->link->replay_settings.config.replay_supported)
3293 return true;
3294
3295 return false;
3296 }
3297
dc_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context)3298 void dc_dmub_update_dirty_rect(struct dc *dc,
3299 int surface_count,
3300 struct dc_stream_state *stream,
3301 struct dc_surface_update *srf_updates,
3302 struct dc_state *context)
3303 {
3304 union dmub_rb_cmd cmd;
3305 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3306 unsigned int i, j;
3307 unsigned int panel_inst = 0;
3308
3309 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3310 return;
3311
3312 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3313 return;
3314
3315 memset(&cmd, 0x0, sizeof(cmd));
3316 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3317 cmd.update_dirty_rect.header.sub_type = 0;
3318 cmd.update_dirty_rect.header.payload_bytes =
3319 sizeof(cmd.update_dirty_rect) -
3320 sizeof(cmd.update_dirty_rect.header);
3321 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3322 for (i = 0; i < surface_count; i++) {
3323 struct dc_plane_state *plane_state = srf_updates[i].surface;
3324 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3325
3326 if (!srf_updates[i].surface || !flip_addr)
3327 continue;
3328 /* Do not send in immediate flip mode */
3329 if (srf_updates[i].surface->flip_immediate)
3330 continue;
3331
3332 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3333 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3334 sizeof(flip_addr->dirty_rects));
3335 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3336 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3337
3338 if (pipe_ctx->stream != stream)
3339 continue;
3340 if (pipe_ctx->plane_state != plane_state)
3341 continue;
3342
3343 update_dirty_rect->panel_inst = panel_inst;
3344 update_dirty_rect->pipe_idx = j;
3345 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
3346 }
3347 }
3348 }
3349
build_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context,struct dc_dmub_cmd dc_dmub_cmd[],unsigned int * dmub_cmd_count)3350 static void build_dmub_update_dirty_rect(
3351 struct dc *dc,
3352 int surface_count,
3353 struct dc_stream_state *stream,
3354 struct dc_surface_update *srf_updates,
3355 struct dc_state *context,
3356 struct dc_dmub_cmd dc_dmub_cmd[],
3357 unsigned int *dmub_cmd_count)
3358 {
3359 union dmub_rb_cmd cmd;
3360 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3361 unsigned int i, j;
3362 unsigned int panel_inst = 0;
3363
3364 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3365 return;
3366
3367 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3368 return;
3369
3370 memset(&cmd, 0x0, sizeof(cmd));
3371 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3372 cmd.update_dirty_rect.header.sub_type = 0;
3373 cmd.update_dirty_rect.header.payload_bytes =
3374 sizeof(cmd.update_dirty_rect) -
3375 sizeof(cmd.update_dirty_rect.header);
3376 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3377 for (i = 0; i < surface_count; i++) {
3378 struct dc_plane_state *plane_state = srf_updates[i].surface;
3379 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3380
3381 if (!srf_updates[i].surface || !flip_addr)
3382 continue;
3383 /* Do not send in immediate flip mode */
3384 if (srf_updates[i].surface->flip_immediate)
3385 continue;
3386 update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
3387 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3388 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3389 sizeof(flip_addr->dirty_rects));
3390 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3391 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3392
3393 if (pipe_ctx->stream != stream)
3394 continue;
3395 if (pipe_ctx->plane_state != plane_state)
3396 continue;
3397 update_dirty_rect->panel_inst = panel_inst;
3398 update_dirty_rect->pipe_idx = j;
3399 dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
3400 dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
3401 (*dmub_cmd_count)++;
3402 }
3403 }
3404 }
3405
3406
3407 /**
3408 * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
3409 *
3410 * @dc: Current DC state
3411 * @srf_updates: Array of surface updates
3412 * @surface_count: Number of surfaces that have an updated
3413 * @stream: Corresponding stream to be updated in the current flip
3414 * @context: New DC state to be programmed
3415 *
3416 * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
3417 * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
3418 *
3419 * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
3420 * to build an array of commands and have them sent while the OTG lock is acquired.
3421 *
3422 * Return: void
3423 */
build_dmub_cmd_list(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_state * context,struct dc_dmub_cmd dc_dmub_cmd[],unsigned int * dmub_cmd_count)3424 static void build_dmub_cmd_list(struct dc *dc,
3425 struct dc_surface_update *srf_updates,
3426 int surface_count,
3427 struct dc_stream_state *stream,
3428 struct dc_state *context,
3429 struct dc_dmub_cmd dc_dmub_cmd[],
3430 unsigned int *dmub_cmd_count)
3431 {
3432 // Initialize cmd count to 0
3433 *dmub_cmd_count = 0;
3434 build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
3435 }
3436
commit_planes_for_stream_fast(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3437 static void commit_planes_for_stream_fast(struct dc *dc,
3438 struct dc_surface_update *srf_updates,
3439 int surface_count,
3440 struct dc_stream_state *stream,
3441 struct dc_stream_update *stream_update,
3442 enum surface_update_type update_type,
3443 struct dc_state *context)
3444 {
3445 int i, j;
3446 struct pipe_ctx *top_pipe_to_program = NULL;
3447 dc_z10_restore(dc);
3448
3449 top_pipe_to_program = resource_get_otg_master_for_stream(
3450 &context->res_ctx,
3451 stream);
3452
3453 if (dc->debug.visual_confirm) {
3454 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3455 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3456
3457 if (pipe->stream && pipe->plane_state)
3458 dc_update_viusal_confirm_color(dc, context, pipe);
3459 }
3460 }
3461
3462 for (i = 0; i < surface_count; i++) {
3463 struct dc_plane_state *plane_state = srf_updates[i].surface;
3464 /*set logical flag for lock/unlock use*/
3465 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3466 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3467
3468 if (!pipe_ctx->plane_state)
3469 continue;
3470 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3471 continue;
3472 pipe_ctx->plane_state->triplebuffer_flips = false;
3473 if (update_type == UPDATE_TYPE_FAST &&
3474 dc->hwss.program_triplebuffer &&
3475 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3476 /*triple buffer for VUpdate only*/
3477 pipe_ctx->plane_state->triplebuffer_flips = true;
3478 }
3479 }
3480 }
3481
3482 build_dmub_cmd_list(dc,
3483 srf_updates,
3484 surface_count,
3485 stream,
3486 context,
3487 context->dc_dmub_cmd,
3488 &(context->dmub_cmd_count));
3489 hwss_build_fast_sequence(dc,
3490 context->dc_dmub_cmd,
3491 context->dmub_cmd_count,
3492 context->block_sequence,
3493 &(context->block_sequence_steps),
3494 top_pipe_to_program);
3495 hwss_execute_sequence(dc,
3496 context->block_sequence,
3497 context->block_sequence_steps);
3498 /* Clear update flags so next flip doesn't have redundant programming
3499 * (if there's no stream update, the update flags are not cleared).
3500 * Surface updates are cleared unconditionally at the beginning of each flip,
3501 * so no need to clear here.
3502 */
3503 if (top_pipe_to_program->stream)
3504 top_pipe_to_program->stream->update_flags.raw = 0;
3505 }
3506
wait_for_outstanding_hw_updates(struct dc * dc,const struct dc_state * dc_context)3507 static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
3508 {
3509 /*
3510 * This function calls HWSS to wait for any potentially double buffered
3511 * operations to complete. It should be invoked as a pre-amble prior
3512 * to full update programming before asserting any HW locks.
3513 */
3514 int pipe_idx;
3515 int opp_inst;
3516 int opp_count = dc->res_pool->pipe_count;
3517 struct hubp *hubp;
3518 int mpcc_inst;
3519 const struct pipe_ctx *pipe_ctx;
3520
3521 for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
3522 pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
3523
3524 if (!pipe_ctx->stream)
3525 continue;
3526
3527 if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
3528 pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
3529
3530 hubp = pipe_ctx->plane_res.hubp;
3531 if (!hubp)
3532 continue;
3533
3534 mpcc_inst = hubp->inst;
3535 // MPCC inst is equal to pipe index in practice
3536 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
3537 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
3538 dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
3539 dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
3540 break;
3541 }
3542 }
3543 }
3544 }
3545
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3546 static void commit_planes_for_stream(struct dc *dc,
3547 struct dc_surface_update *srf_updates,
3548 int surface_count,
3549 struct dc_stream_state *stream,
3550 struct dc_stream_update *stream_update,
3551 enum surface_update_type update_type,
3552 struct dc_state *context)
3553 {
3554 int i, j;
3555 struct pipe_ctx *top_pipe_to_program = NULL;
3556 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3557 bool subvp_prev_use = false;
3558 bool subvp_curr_use = false;
3559
3560 // Once we apply the new subvp context to hardware it won't be in the
3561 // dc->current_state anymore, so we have to cache it before we apply
3562 // the new SubVP context
3563 subvp_prev_use = false;
3564 dc_z10_restore(dc);
3565 if (update_type == UPDATE_TYPE_FULL)
3566 wait_for_outstanding_hw_updates(dc, context);
3567
3568 if (update_type == UPDATE_TYPE_FULL) {
3569 dc_allow_idle_optimizations(dc, false);
3570
3571 if (get_seamless_boot_stream_count(context) == 0)
3572 dc->hwss.prepare_bandwidth(dc, context);
3573
3574 if (dc->debug.enable_double_buffered_dsc_pg_support)
3575 dc->hwss.update_dsc_pg(dc, context, false);
3576
3577 context_clock_trace(dc, context);
3578 }
3579
3580 top_pipe_to_program = resource_get_otg_master_for_stream(
3581 &context->res_ctx,
3582 stream);
3583
3584 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3585 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3586
3587 // Check old context for SubVP
3588 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3589 if (subvp_prev_use)
3590 break;
3591 }
3592
3593 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3594 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3595
3596 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
3597 subvp_curr_use = true;
3598 break;
3599 }
3600 }
3601
3602 if (dc->debug.visual_confirm)
3603 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3604 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
3605
3606 if (pipe->stream && pipe->plane_state)
3607 dc_update_viusal_confirm_color(dc, context, pipe);
3608 }
3609
3610 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3611 struct pipe_ctx *mpcc_pipe;
3612 struct pipe_ctx *odm_pipe;
3613
3614 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3615 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3616 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3617 }
3618
3619 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3620 if (top_pipe_to_program &&
3621 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3622 if (should_use_dmub_lock(stream->link)) {
3623 union dmub_hw_lock_flags hw_locks = { 0 };
3624 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3625
3626 hw_locks.bits.lock_dig = 1;
3627 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3628
3629 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3630 true,
3631 &hw_locks,
3632 &inst_flags);
3633 } else
3634 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3635 top_pipe_to_program->stream_res.tg);
3636 }
3637
3638 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3639 if (dc->hwss.subvp_pipe_control_lock)
3640 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3641 dc->hwss.interdependent_update_lock(dc, context, true);
3642
3643 } else {
3644 if (dc->hwss.subvp_pipe_control_lock)
3645 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3646 /* Lock the top pipe while updating plane addrs, since freesync requires
3647 * plane addr update event triggers to be synchronized.
3648 * top_pipe_to_program is expected to never be NULL
3649 */
3650 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3651 }
3652
3653 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3654
3655 // Stream updates
3656 if (stream_update)
3657 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3658
3659 if (surface_count == 0) {
3660 /*
3661 * In case of turning off screen, no need to program front end a second time.
3662 * just return after program blank.
3663 */
3664 if (dc->hwss.apply_ctx_for_surface)
3665 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3666 if (dc->hwss.program_front_end_for_ctx)
3667 dc->hwss.program_front_end_for_ctx(dc, context);
3668
3669 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3670 dc->hwss.interdependent_update_lock(dc, context, false);
3671 } else {
3672 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3673 }
3674 dc->hwss.post_unlock_program_front_end(dc, context);
3675
3676 if (update_type != UPDATE_TYPE_FAST)
3677 if (dc->hwss.commit_subvp_config)
3678 dc->hwss.commit_subvp_config(dc, context);
3679
3680 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3681 * move the SubVP lock to after the phantom pipes have been setup
3682 */
3683 if (dc->hwss.subvp_pipe_control_lock)
3684 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
3685 NULL, subvp_prev_use);
3686 return;
3687 }
3688
3689 if (update_type != UPDATE_TYPE_FAST) {
3690 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3691 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3692
3693 if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
3694 dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
3695 pipe_ctx->stream && pipe_ctx->plane_state) {
3696 /* Only update visual confirm for SUBVP and Mclk switching here.
3697 * The bar appears on all pipes, so we need to update the bar on all displays,
3698 * so the information doesn't get stale.
3699 */
3700 dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
3701 pipe_ctx->plane_res.hubp->inst);
3702 }
3703 }
3704 }
3705
3706 for (i = 0; i < surface_count; i++) {
3707 struct dc_plane_state *plane_state = srf_updates[i].surface;
3708 /*set logical flag for lock/unlock use*/
3709 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3710 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3711 if (!pipe_ctx->plane_state)
3712 continue;
3713 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3714 continue;
3715 pipe_ctx->plane_state->triplebuffer_flips = false;
3716 if (update_type == UPDATE_TYPE_FAST &&
3717 dc->hwss.program_triplebuffer != NULL &&
3718 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3719 /*triple buffer for VUpdate only*/
3720 pipe_ctx->plane_state->triplebuffer_flips = true;
3721 }
3722 }
3723 if (update_type == UPDATE_TYPE_FULL) {
3724 /* force vsync flip when reconfiguring pipes to prevent underflow */
3725 plane_state->flip_immediate = false;
3726 }
3727 }
3728
3729 // Update Type FULL, Surface updates
3730 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3731 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3732
3733 if (!pipe_ctx->top_pipe &&
3734 !pipe_ctx->prev_odm_pipe &&
3735 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3736 struct dc_stream_status *stream_status = NULL;
3737
3738 if (!pipe_ctx->plane_state)
3739 continue;
3740
3741 /* Full fe update*/
3742 if (update_type == UPDATE_TYPE_FAST)
3743 continue;
3744
3745 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3746
3747 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3748 /*turn off triple buffer for full update*/
3749 dc->hwss.program_triplebuffer(
3750 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3751 }
3752 stream_status =
3753 stream_get_status(context, pipe_ctx->stream);
3754
3755 if (dc->hwss.apply_ctx_for_surface)
3756 dc->hwss.apply_ctx_for_surface(
3757 dc, pipe_ctx->stream, stream_status->plane_count, context);
3758 }
3759 }
3760 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3761 dc->hwss.program_front_end_for_ctx(dc, context);
3762 if (dc->debug.validate_dml_output) {
3763 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3764 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3765 if (cur_pipe->stream == NULL)
3766 continue;
3767
3768 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3769 cur_pipe->plane_res.hubp, dc->ctx,
3770 &context->res_ctx.pipe_ctx[i].rq_regs,
3771 &context->res_ctx.pipe_ctx[i].dlg_regs,
3772 &context->res_ctx.pipe_ctx[i].ttu_regs);
3773 }
3774 }
3775 }
3776
3777 // Update Type FAST, Surface updates
3778 if (update_type == UPDATE_TYPE_FAST) {
3779 if (dc->hwss.set_flip_control_gsl)
3780 for (i = 0; i < surface_count; i++) {
3781 struct dc_plane_state *plane_state = srf_updates[i].surface;
3782
3783 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3784 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3785
3786 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3787 continue;
3788
3789 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3790 continue;
3791
3792 // GSL has to be used for flip immediate
3793 dc->hwss.set_flip_control_gsl(pipe_ctx,
3794 pipe_ctx->plane_state->flip_immediate);
3795 }
3796 }
3797
3798 /* Perform requested Updates */
3799 for (i = 0; i < surface_count; i++) {
3800 struct dc_plane_state *plane_state = srf_updates[i].surface;
3801
3802 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3803 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3804
3805 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3806 continue;
3807
3808 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3809 continue;
3810
3811 /*program triple buffer after lock based on flip type*/
3812 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3813 /*only enable triplebuffer for fast_update*/
3814 dc->hwss.program_triplebuffer(
3815 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3816 }
3817 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3818 dc->hwss.update_plane_addr(dc, pipe_ctx);
3819 }
3820 }
3821 }
3822
3823 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3824 dc->hwss.interdependent_update_lock(dc, context, false);
3825 } else {
3826 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3827 }
3828
3829 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3830 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3831 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3832 top_pipe_to_program->stream_res.tg,
3833 CRTC_STATE_VACTIVE);
3834 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3835 top_pipe_to_program->stream_res.tg,
3836 CRTC_STATE_VBLANK);
3837 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3838 top_pipe_to_program->stream_res.tg,
3839 CRTC_STATE_VACTIVE);
3840
3841 if (should_use_dmub_lock(stream->link)) {
3842 union dmub_hw_lock_flags hw_locks = { 0 };
3843 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3844
3845 hw_locks.bits.lock_dig = 1;
3846 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3847
3848 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3849 false,
3850 &hw_locks,
3851 &inst_flags);
3852 } else
3853 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3854 top_pipe_to_program->stream_res.tg);
3855 }
3856
3857 if (subvp_curr_use) {
3858 /* If enabling subvp or transitioning from subvp->subvp, enable the
3859 * phantom streams before we program front end for the phantom pipes.
3860 */
3861 if (update_type != UPDATE_TYPE_FAST) {
3862 if (dc->hwss.enable_phantom_streams)
3863 dc->hwss.enable_phantom_streams(dc, context);
3864 }
3865 }
3866
3867 if (update_type != UPDATE_TYPE_FAST)
3868 dc->hwss.post_unlock_program_front_end(dc, context);
3869
3870 if (subvp_prev_use && !subvp_curr_use) {
3871 /* If disabling subvp, disable phantom streams after front end
3872 * programming has completed (we turn on phantom OTG in order
3873 * to complete the plane disable for phantom pipes).
3874 */
3875 dc->hwss.apply_ctx_to_hw(dc, context);
3876 }
3877
3878 if (update_type != UPDATE_TYPE_FAST)
3879 if (dc->hwss.commit_subvp_config)
3880 dc->hwss.commit_subvp_config(dc, context);
3881 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3882 * move the SubVP lock to after the phantom pipes have been setup
3883 */
3884 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3885 if (dc->hwss.subvp_pipe_control_lock)
3886 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3887 } else {
3888 if (dc->hwss.subvp_pipe_control_lock)
3889 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3890 }
3891
3892 // Fire manual trigger only when bottom plane is flipped
3893 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3894 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3895
3896 if (!pipe_ctx->plane_state)
3897 continue;
3898
3899 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3900 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3901 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3902 pipe_ctx->plane_state->skip_manual_trigger)
3903 continue;
3904
3905 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3906 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3907 }
3908 }
3909
3910 /**
3911 * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
3912 *
3913 * @dc: Used to get the current state status
3914 * @stream: Target stream, which we want to remove the attached planes
3915 * @surface_count: Number of surface update
3916 * @is_plane_addition: [in] Fill out with true if it is a plane addition case
3917 *
3918 * DCN32x and newer support a feature named Dynamic ODM which can conflict with
3919 * the MPO if used simultaneously in some specific configurations (e.g.,
3920 * 4k@144). This function checks if the incoming context requires applying a
3921 * transition state with unnecessary pipe splitting and ODM disabled to
3922 * circumvent our hardware limitations to prevent this edge case. If the OPP
3923 * associated with an MPCC might change due to plane additions, this function
3924 * returns true.
3925 *
3926 * Return:
3927 * Return true if OPP and MPCC might change, otherwise, return false.
3928 */
could_mpcc_tree_change_for_active_pipes(struct dc * dc,struct dc_stream_state * stream,int surface_count,bool * is_plane_addition)3929 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3930 struct dc_stream_state *stream,
3931 int surface_count,
3932 bool *is_plane_addition)
3933 {
3934
3935 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3936 bool force_minimal_pipe_splitting = false;
3937 bool subvp_active = false;
3938 uint32_t i;
3939
3940 *is_plane_addition = false;
3941
3942 if (cur_stream_status &&
3943 dc->current_state->stream_count > 0 &&
3944 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3945 /* determine if minimal transition is required due to MPC*/
3946 if (surface_count > 0) {
3947 if (cur_stream_status->plane_count > surface_count) {
3948 force_minimal_pipe_splitting = true;
3949 } else if (cur_stream_status->plane_count < surface_count) {
3950 force_minimal_pipe_splitting = true;
3951 *is_plane_addition = true;
3952 }
3953 }
3954 }
3955
3956 if (cur_stream_status &&
3957 dc->current_state->stream_count == 1 &&
3958 dc->debug.enable_single_display_2to1_odm_policy) {
3959 /* determine if minimal transition is required due to dynamic ODM*/
3960 if (surface_count > 0) {
3961 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
3962 force_minimal_pipe_splitting = true;
3963 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
3964 force_minimal_pipe_splitting = true;
3965 *is_plane_addition = true;
3966 }
3967 }
3968 }
3969
3970 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3971 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3972
3973 if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
3974 subvp_active = true;
3975 break;
3976 }
3977 }
3978
3979 /* For SubVP when adding or removing planes we need to add a minimal transition
3980 * (even when disabling all planes). Whenever disabling a phantom pipe, we
3981 * must use the minimal transition path to disable the pipe correctly.
3982 *
3983 * We want to use the minimal transition whenever subvp is active, not only if
3984 * a plane is being added / removed from a subvp stream (MPO plane can be added
3985 * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
3986 * a min transition to disable subvp.
3987 */
3988 if (cur_stream_status && subvp_active) {
3989 /* determine if minimal transition is required due to SubVP*/
3990 if (cur_stream_status->plane_count > surface_count) {
3991 force_minimal_pipe_splitting = true;
3992 } else if (cur_stream_status->plane_count < surface_count) {
3993 force_minimal_pipe_splitting = true;
3994 *is_plane_addition = true;
3995 }
3996 }
3997
3998 return force_minimal_pipe_splitting;
3999 }
4000
4001 /**
4002 * commit_minimal_transition_state - Create a transition pipe split state
4003 *
4004 * @dc: Used to get the current state status
4005 * @transition_base_context: New transition state
4006 *
4007 * In some specific configurations, such as pipe split on multi-display with
4008 * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
4009 * programming when moving to new planes. To mitigate those types of problems,
4010 * this function adds a transition state that minimizes pipe usage before
4011 * programming the new configuration. When adding a new plane, the current
4012 * state requires the least pipes, so it is applied without splitting. When
4013 * removing a plane, the new state requires the least pipes, so it is applied
4014 * without splitting.
4015 *
4016 * Return:
4017 * Return false if something is wrong in the transition state.
4018 */
commit_minimal_transition_state(struct dc * dc,struct dc_state * transition_base_context)4019 static bool commit_minimal_transition_state(struct dc *dc,
4020 struct dc_state *transition_base_context)
4021 {
4022 struct dc_state *transition_context = dc_create_state(dc);
4023 enum pipe_split_policy tmp_mpc_policy = 0;
4024 bool temp_dynamic_odm_policy = 0;
4025 bool temp_subvp_policy = 0;
4026 enum dc_status ret = DC_ERROR_UNEXPECTED;
4027 unsigned int i, j;
4028 unsigned int pipe_in_use = 0;
4029 bool subvp_in_use = false;
4030 bool odm_in_use = false;
4031
4032 if (!transition_context)
4033 return false;
4034 /* Setup:
4035 * Store the current ODM and MPC config in some temp variables to be
4036 * restored after we commit the transition state.
4037 */
4038
4039 /* check current pipes in use*/
4040 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4041 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
4042
4043 if (pipe->plane_state)
4044 pipe_in_use++;
4045 }
4046
4047 /* If SubVP is enabled and we are adding or removing planes from any main subvp
4048 * pipe, we must use the minimal transition.
4049 */
4050 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4051 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4052
4053 if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
4054 subvp_in_use = true;
4055 break;
4056 }
4057 }
4058
4059 /* If ODM is enabled and we are adding or removing planes from any ODM
4060 * pipe, we must use the minimal transition.
4061 */
4062 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4063 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4064
4065 if (pipe->stream && pipe->next_odm_pipe) {
4066 odm_in_use = true;
4067 break;
4068 }
4069 }
4070
4071 /* When the OS add a new surface if we have been used all of pipes with odm combine
4072 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
4073 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
4074 * call it again. Otherwise return true to skip.
4075 *
4076 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
4077 * enter/exit MPO when DCN still have enough resources.
4078 */
4079 if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
4080 dc_release_state(transition_context);
4081 return true;
4082 }
4083
4084 if (!dc->config.is_vmin_only_asic) {
4085 tmp_mpc_policy = dc->debug.pipe_split_policy;
4086 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
4087 }
4088
4089 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
4090 dc->debug.enable_single_display_2to1_odm_policy = false;
4091
4092 temp_subvp_policy = dc->debug.force_disable_subvp;
4093 dc->debug.force_disable_subvp = true;
4094
4095 dc_resource_state_copy_construct(transition_base_context, transition_context);
4096
4097 /* commit minimal state */
4098 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
4099 for (i = 0; i < transition_context->stream_count; i++) {
4100 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
4101
4102 for (j = 0; j < stream_status->plane_count; j++) {
4103 struct dc_plane_state *plane_state = stream_status->plane_states[j];
4104
4105 /* force vsync flip when reconfiguring pipes to prevent underflow
4106 * and corruption
4107 */
4108 plane_state->flip_immediate = false;
4109 }
4110 }
4111
4112 ret = dc_commit_state_no_check(dc, transition_context);
4113 }
4114
4115 /* always release as dc_commit_state_no_check retains in good case */
4116 dc_release_state(transition_context);
4117
4118 /* TearDown:
4119 * Restore original configuration for ODM and MPO.
4120 */
4121 if (!dc->config.is_vmin_only_asic)
4122 dc->debug.pipe_split_policy = tmp_mpc_policy;
4123
4124 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
4125 dc->debug.force_disable_subvp = temp_subvp_policy;
4126
4127 if (ret != DC_OK) {
4128 /* this should never happen */
4129 BREAK_TO_DEBUGGER();
4130 return false;
4131 }
4132
4133 /* force full surface update */
4134 for (i = 0; i < dc->current_state->stream_count; i++) {
4135 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
4136 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
4137 }
4138 }
4139
4140 return true;
4141 }
4142
4143 /**
4144 * update_seamless_boot_flags() - Helper function for updating seamless boot flags
4145 *
4146 * @dc: Current DC state
4147 * @context: New DC state to be programmed
4148 * @surface_count: Number of surfaces that have an updated
4149 * @stream: Corresponding stream to be updated in the current flip
4150 *
4151 * Updating seamless boot flags do not need to be part of the commit sequence. This
4152 * helper function will update the seamless boot flags on each flip (if required)
4153 * outside of the HW commit sequence (fast or slow).
4154 *
4155 * Return: void
4156 */
update_seamless_boot_flags(struct dc * dc,struct dc_state * context,int surface_count,struct dc_stream_state * stream)4157 static void update_seamless_boot_flags(struct dc *dc,
4158 struct dc_state *context,
4159 int surface_count,
4160 struct dc_stream_state *stream)
4161 {
4162 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
4163 /* Optimize seamless boot flag keeps clocks and watermarks high until
4164 * first flip. After first flip, optimization is required to lower
4165 * bandwidth. Important to note that it is expected UEFI will
4166 * only light up a single display on POST, therefore we only expect
4167 * one stream with seamless boot flag set.
4168 */
4169 if (stream->apply_seamless_boot_optimization) {
4170 stream->apply_seamless_boot_optimization = false;
4171
4172 if (get_seamless_boot_stream_count(context) == 0)
4173 dc->optimized_required = true;
4174 }
4175 }
4176 }
4177
populate_fast_updates(struct dc_fast_update * fast_update,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update)4178 static void populate_fast_updates(struct dc_fast_update *fast_update,
4179 struct dc_surface_update *srf_updates,
4180 int surface_count,
4181 struct dc_stream_update *stream_update)
4182 {
4183 int i = 0;
4184
4185 if (stream_update) {
4186 fast_update[0].out_transfer_func = stream_update->out_transfer_func;
4187 fast_update[0].output_csc_transform = stream_update->output_csc_transform;
4188 }
4189
4190 for (i = 0; i < surface_count; i++) {
4191 fast_update[i].flip_addr = srf_updates[i].flip_addr;
4192 fast_update[i].gamma = srf_updates[i].gamma;
4193 fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
4194 fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
4195 fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
4196 }
4197 }
4198
fast_updates_exist(struct dc_fast_update * fast_update,int surface_count)4199 static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
4200 {
4201 int i;
4202
4203 if (fast_update[0].out_transfer_func ||
4204 fast_update[0].output_csc_transform)
4205 return true;
4206
4207 for (i = 0; i < surface_count; i++) {
4208 if (fast_update[i].flip_addr ||
4209 fast_update[i].gamma ||
4210 fast_update[i].gamut_remap_matrix ||
4211 fast_update[i].input_csc_color_matrix ||
4212 fast_update[i].coeff_reduction_factor)
4213 return true;
4214 }
4215
4216 return false;
4217 }
4218
full_update_required(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update,struct dc_stream_state * stream)4219 static bool full_update_required(struct dc *dc,
4220 struct dc_surface_update *srf_updates,
4221 int surface_count,
4222 struct dc_stream_update *stream_update,
4223 struct dc_stream_state *stream)
4224 {
4225
4226 int i;
4227 struct dc_stream_status *stream_status;
4228 const struct dc_state *context = dc->current_state;
4229
4230 for (i = 0; i < surface_count; i++) {
4231 if (srf_updates &&
4232 (srf_updates[i].plane_info ||
4233 srf_updates[i].scaling_info ||
4234 (srf_updates[i].hdr_mult.value &&
4235 srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
4236 srf_updates[i].in_transfer_func ||
4237 srf_updates[i].func_shaper ||
4238 srf_updates[i].lut3d_func ||
4239 srf_updates[i].blend_tf ||
4240 srf_updates[i].surface->force_full_update ||
4241 (srf_updates[i].flip_addr &&
4242 srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
4243 !is_surface_in_context(context, srf_updates[i].surface)))
4244 return true;
4245 }
4246
4247 if (stream_update &&
4248 (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
4249 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
4250 stream_update->integer_scaling_update) ||
4251 stream_update->hdr_static_metadata ||
4252 stream_update->abm_level ||
4253 stream_update->periodic_interrupt ||
4254 stream_update->vrr_infopacket ||
4255 stream_update->vsc_infopacket ||
4256 stream_update->vsp_infopacket ||
4257 stream_update->hfvsif_infopacket ||
4258 stream_update->vtem_infopacket ||
4259 stream_update->adaptive_sync_infopacket ||
4260 stream_update->dpms_off ||
4261 stream_update->allow_freesync ||
4262 stream_update->vrr_active_variable ||
4263 stream_update->vrr_active_fixed ||
4264 stream_update->gamut_remap ||
4265 stream_update->output_color_space ||
4266 stream_update->dither_option ||
4267 stream_update->wb_update ||
4268 stream_update->dsc_config ||
4269 stream_update->mst_bw_update ||
4270 stream_update->func_shaper ||
4271 stream_update->lut3d_func ||
4272 stream_update->pending_test_pattern ||
4273 stream_update->crtc_timing_adjust))
4274 return true;
4275
4276 if (stream) {
4277 stream_status = dc_stream_get_status(stream);
4278 if (stream_status == NULL || stream_status->plane_count != surface_count)
4279 return true;
4280 }
4281 if (dc->idle_optimizations_allowed)
4282 return true;
4283
4284 return false;
4285 }
4286
fast_update_only(struct dc * dc,struct dc_fast_update * fast_update,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_update * stream_update,struct dc_stream_state * stream)4287 static bool fast_update_only(struct dc *dc,
4288 struct dc_fast_update *fast_update,
4289 struct dc_surface_update *srf_updates,
4290 int surface_count,
4291 struct dc_stream_update *stream_update,
4292 struct dc_stream_state *stream)
4293 {
4294 return fast_updates_exist(fast_update, surface_count)
4295 && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
4296 }
4297
dc_update_planes_and_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)4298 bool dc_update_planes_and_stream(struct dc *dc,
4299 struct dc_surface_update *srf_updates, int surface_count,
4300 struct dc_stream_state *stream,
4301 struct dc_stream_update *stream_update)
4302 {
4303 struct dc_state *context;
4304 enum surface_update_type update_type;
4305 int i;
4306 struct mall_temp_config mall_temp_config;
4307 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4308
4309 /* In cases where MPO and split or ODM are used transitions can
4310 * cause underflow. Apply stream configuration with minimal pipe
4311 * split first to avoid unsupported transitions for active pipes.
4312 */
4313 bool force_minimal_pipe_splitting = 0;
4314 bool is_plane_addition = 0;
4315
4316 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4317 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
4318 dc,
4319 stream,
4320 surface_count,
4321 &is_plane_addition);
4322
4323 /* on plane addition, minimal state is the current one */
4324 if (force_minimal_pipe_splitting && is_plane_addition &&
4325 !commit_minimal_transition_state(dc, dc->current_state))
4326 return false;
4327
4328 if (!update_planes_and_stream_state(
4329 dc,
4330 srf_updates,
4331 surface_count,
4332 stream,
4333 stream_update,
4334 &update_type,
4335 &context))
4336 return false;
4337
4338 /* on plane removal, minimal state is the new one */
4339 if (force_minimal_pipe_splitting && !is_plane_addition) {
4340 /* Since all phantom pipes are removed in full validation,
4341 * we have to save and restore the subvp/mall config when
4342 * we do a minimal transition since the flags marking the
4343 * pipe as subvp/phantom will be cleared (dc copy constructor
4344 * creates a shallow copy).
4345 */
4346 if (dc->res_pool->funcs->save_mall_state)
4347 dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
4348 if (!commit_minimal_transition_state(dc, context)) {
4349 dc_release_state(context);
4350 return false;
4351 }
4352 if (dc->res_pool->funcs->restore_mall_state)
4353 dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
4354
4355 /* If we do a minimal transition with plane removal and the context
4356 * has subvp we also have to retain back the phantom stream / planes
4357 * since the refcount is decremented as part of the min transition
4358 * (we commit a state with no subvp, so the phantom streams / planes
4359 * had to be removed).
4360 */
4361 if (dc->res_pool->funcs->retain_phantom_pipes)
4362 dc->res_pool->funcs->retain_phantom_pipes(dc, context);
4363 update_type = UPDATE_TYPE_FULL;
4364 }
4365
4366 update_seamless_boot_flags(dc, context, surface_count, stream);
4367 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4368 !dc->debug.enable_legacy_fast_update) {
4369 commit_planes_for_stream_fast(dc,
4370 srf_updates,
4371 surface_count,
4372 stream,
4373 stream_update,
4374 update_type,
4375 context);
4376 } else {
4377 commit_planes_for_stream(
4378 dc,
4379 srf_updates,
4380 surface_count,
4381 stream,
4382 stream_update,
4383 update_type,
4384 context);
4385 }
4386
4387 if (dc->current_state != context) {
4388
4389 /* Since memory free requires elevated IRQL, an interrupt
4390 * request is generated by mem free. If this happens
4391 * between freeing and reassigning the context, our vsync
4392 * interrupt will call into dc and cause a memory
4393 * corruption BSOD. Hence, we first reassign the context,
4394 * then free the old context.
4395 */
4396
4397 struct dc_state *old = dc->current_state;
4398
4399 dc->current_state = context;
4400 dc_release_state(old);
4401
4402 // clear any forced full updates
4403 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4404 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4405
4406 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4407 pipe_ctx->plane_state->force_full_update = false;
4408 }
4409 }
4410 return true;
4411 }
4412
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)4413 void dc_commit_updates_for_stream(struct dc *dc,
4414 struct dc_surface_update *srf_updates,
4415 int surface_count,
4416 struct dc_stream_state *stream,
4417 struct dc_stream_update *stream_update,
4418 struct dc_state *state)
4419 {
4420 const struct dc_stream_status *stream_status;
4421 enum surface_update_type update_type;
4422 struct dc_state *context;
4423 struct dc_context *dc_ctx = dc->ctx;
4424 int i, j;
4425 struct dc_fast_update fast_update[MAX_SURFACES] = {0};
4426
4427 populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
4428 stream_status = dc_stream_get_status(stream);
4429 context = dc->current_state;
4430
4431 update_type = dc_check_update_surfaces_for_stream(
4432 dc, srf_updates, surface_count, stream_update, stream_status);
4433
4434 /* TODO: Since change commit sequence can have a huge impact,
4435 * we decided to only enable it for DCN3x. However, as soon as
4436 * we get more confident about this change we'll need to enable
4437 * the new sequence for all ASICs.
4438 */
4439 if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
4440 /*
4441 * Previous frame finished and HW is ready for optimization.
4442 */
4443 if (update_type == UPDATE_TYPE_FAST)
4444 dc_post_update_surfaces_to_stream(dc);
4445
4446 dc_update_planes_and_stream(dc, srf_updates,
4447 surface_count, stream,
4448 stream_update);
4449 return;
4450 }
4451
4452 if (update_type >= update_surface_trace_level)
4453 update_surface_trace(dc, srf_updates, surface_count);
4454
4455
4456 if (update_type >= UPDATE_TYPE_FULL) {
4457
4458 /* initialize scratch memory for building context */
4459 context = dc_create_state(dc);
4460 if (context == NULL) {
4461 DC_ERROR("Failed to allocate new validate context!\n");
4462 return;
4463 }
4464
4465 dc_resource_state_copy_construct(state, context);
4466
4467 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4468 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
4469 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4470
4471 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
4472 new_pipe->plane_state->force_full_update = true;
4473 }
4474 } else if (update_type == UPDATE_TYPE_FAST) {
4475 /*
4476 * Previous frame finished and HW is ready for optimization.
4477 */
4478 dc_post_update_surfaces_to_stream(dc);
4479 }
4480
4481
4482 for (i = 0; i < surface_count; i++) {
4483 struct dc_plane_state *surface = srf_updates[i].surface;
4484
4485 copy_surface_update_to_plane(surface, &srf_updates[i]);
4486
4487 if (update_type >= UPDATE_TYPE_MED) {
4488 for (j = 0; j < dc->res_pool->pipe_count; j++) {
4489 struct pipe_ctx *pipe_ctx =
4490 &context->res_ctx.pipe_ctx[j];
4491
4492 if (pipe_ctx->plane_state != surface)
4493 continue;
4494
4495 resource_build_scaling_params(pipe_ctx);
4496 }
4497 }
4498 }
4499
4500 copy_stream_update_to_stream(dc, context, stream, stream_update);
4501
4502 if (update_type >= UPDATE_TYPE_FULL) {
4503 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
4504 DC_ERROR("Mode validation failed for stream update!\n");
4505 dc_release_state(context);
4506 return;
4507 }
4508 }
4509
4510 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
4511
4512 update_seamless_boot_flags(dc, context, surface_count, stream);
4513 if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
4514 !dc->debug.enable_legacy_fast_update) {
4515 commit_planes_for_stream_fast(dc,
4516 srf_updates,
4517 surface_count,
4518 stream,
4519 stream_update,
4520 update_type,
4521 context);
4522 } else {
4523 commit_planes_for_stream(
4524 dc,
4525 srf_updates,
4526 surface_count,
4527 stream,
4528 stream_update,
4529 update_type,
4530 context);
4531 }
4532 /*update current_State*/
4533 if (dc->current_state != context) {
4534
4535 struct dc_state *old = dc->current_state;
4536
4537 dc->current_state = context;
4538 dc_release_state(old);
4539
4540 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4541 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
4542
4543 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
4544 pipe_ctx->plane_state->force_full_update = false;
4545 }
4546 }
4547
4548 /* Legacy optimization path for DCE. */
4549 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
4550 dc_post_update_surfaces_to_stream(dc);
4551 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
4552 }
4553
4554 return;
4555
4556 }
4557
dc_get_current_stream_count(struct dc * dc)4558 uint8_t dc_get_current_stream_count(struct dc *dc)
4559 {
4560 return dc->current_state->stream_count;
4561 }
4562
dc_get_stream_at_index(struct dc * dc,uint8_t i)4563 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
4564 {
4565 if (i < dc->current_state->stream_count)
4566 return dc->current_state->streams[i];
4567 return NULL;
4568 }
4569
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)4570 enum dc_irq_source dc_interrupt_to_irq_source(
4571 struct dc *dc,
4572 uint32_t src_id,
4573 uint32_t ext_id)
4574 {
4575 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
4576 }
4577
4578 /*
4579 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
4580 */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)4581 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
4582 {
4583
4584 if (dc == NULL)
4585 return false;
4586
4587 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
4588 }
4589
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)4590 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
4591 {
4592 dal_irq_service_ack(dc->res_pool->irqs, src);
4593 }
4594
dc_power_down_on_boot(struct dc * dc)4595 void dc_power_down_on_boot(struct dc *dc)
4596 {
4597 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
4598 dc->hwss.power_down_on_boot)
4599 dc->hwss.power_down_on_boot(dc);
4600 }
4601
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)4602 void dc_set_power_state(
4603 struct dc *dc,
4604 enum dc_acpi_cm_power_state power_state)
4605 {
4606 struct kref refcount;
4607 struct display_mode_lib *dml;
4608
4609 if (!dc->current_state)
4610 return;
4611
4612 switch (power_state) {
4613 case DC_ACPI_CM_POWER_STATE_D0:
4614 dc_resource_state_construct(dc, dc->current_state);
4615
4616 dc_z10_restore(dc);
4617
4618 dc->hwss.init_hw(dc);
4619
4620 if (dc->hwss.init_sys_ctx != NULL &&
4621 dc->vm_pa_config.valid) {
4622 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
4623 }
4624
4625 break;
4626 default:
4627 ASSERT(dc->current_state->stream_count == 0);
4628 /* Zero out the current context so that on resume we start with
4629 * clean state, and dc hw programming optimizations will not
4630 * cause any trouble.
4631 */
4632 dml = kzalloc(sizeof(struct display_mode_lib),
4633 GFP_KERNEL);
4634
4635 ASSERT(dml);
4636 if (!dml)
4637 return;
4638
4639 /* Preserve refcount */
4640 refcount = dc->current_state->refcount;
4641 /* Preserve display mode lib */
4642 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
4643
4644 dc_resource_state_destruct(dc->current_state);
4645 memset(dc->current_state, 0,
4646 sizeof(*dc->current_state));
4647
4648 dc->current_state->refcount = refcount;
4649 dc->current_state->bw_ctx.dml = *dml;
4650
4651 kfree(dml);
4652
4653 break;
4654 }
4655 }
4656
dc_resume(struct dc * dc)4657 void dc_resume(struct dc *dc)
4658 {
4659 uint32_t i;
4660
4661 for (i = 0; i < dc->link_count; i++)
4662 dc->link_srv->resume(dc->links[i]);
4663 }
4664
dc_is_dmcu_initialized(struct dc * dc)4665 bool dc_is_dmcu_initialized(struct dc *dc)
4666 {
4667 struct dmcu *dmcu = dc->res_pool->dmcu;
4668
4669 if (dmcu)
4670 return dmcu->funcs->is_dmcu_initialized(dmcu);
4671 return false;
4672 }
4673
get_clock_requirements_for_state(struct dc_state * state,struct AsicStateEx * info)4674 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4675 {
4676 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4677 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4678 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4679 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4680 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4681 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4682 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4683 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4684 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4685 }
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)4686 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4687 {
4688 if (dc->hwss.set_clock)
4689 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4690 return DC_ERROR_UNEXPECTED;
4691 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)4692 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4693 {
4694 if (dc->hwss.get_clock)
4695 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4696 }
4697
4698 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)4699 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4700 {
4701 int i;
4702 bool allow_active;
4703
4704 for (i = 0; i < dc->current_state->stream_count ; i++) {
4705 struct dc_link *link;
4706 struct dc_stream_state *stream = dc->current_state->streams[i];
4707
4708 link = stream->link;
4709 if (!link)
4710 continue;
4711
4712 if (link->psr_settings.psr_feature_enabled) {
4713 if (enable && !link->psr_settings.psr_allow_active) {
4714 allow_active = true;
4715 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4716 return false;
4717 } else if (!enable && link->psr_settings.psr_allow_active) {
4718 allow_active = false;
4719 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4720 return false;
4721 }
4722 }
4723 }
4724
4725 return true;
4726 }
4727
dc_allow_idle_optimizations(struct dc * dc,bool allow)4728 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4729 {
4730 if (dc->debug.disable_idle_power_optimizations)
4731 return;
4732
4733 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4734 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4735 return;
4736
4737 if (allow == dc->idle_optimizations_allowed)
4738 return;
4739
4740 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4741 dc->idle_optimizations_allowed = allow;
4742 }
4743
4744 /* set min and max memory clock to lowest and highest DPM level, respectively */
dc_unlock_memory_clock_frequency(struct dc * dc)4745 void dc_unlock_memory_clock_frequency(struct dc *dc)
4746 {
4747 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4748 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4749
4750 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4751 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4752 }
4753
4754 /* set min memory clock to the min required for current mode, max to maxDPM */
dc_lock_memory_clock_frequency(struct dc * dc)4755 void dc_lock_memory_clock_frequency(struct dc *dc)
4756 {
4757 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4758 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4759
4760 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4761 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4762
4763 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4764 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4765 }
4766
blank_and_force_memclk(struct dc * dc,bool apply,unsigned int memclk_mhz)4767 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4768 {
4769 struct dc_state *context = dc->current_state;
4770 struct hubp *hubp;
4771 struct pipe_ctx *pipe;
4772 int i;
4773
4774 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4775 pipe = &context->res_ctx.pipe_ctx[i];
4776
4777 if (pipe->stream != NULL) {
4778 dc->hwss.disable_pixel_data(dc, pipe, true);
4779
4780 // wait for double buffer
4781 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4782 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4783 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4784
4785 hubp = pipe->plane_res.hubp;
4786 hubp->funcs->set_blank_regs(hubp, true);
4787 }
4788 }
4789
4790 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4791 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4792
4793 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4794 pipe = &context->res_ctx.pipe_ctx[i];
4795
4796 if (pipe->stream != NULL) {
4797 dc->hwss.disable_pixel_data(dc, pipe, false);
4798
4799 hubp = pipe->plane_res.hubp;
4800 hubp->funcs->set_blank_regs(hubp, false);
4801 }
4802 }
4803 }
4804
4805
4806 /**
4807 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4808 * @dc: pointer to dc of the dm calling this
4809 * @enable: True = transition to DC mode, false = transition back to AC mode
4810 *
4811 * Some SoCs define additional clock limits when in DC mode, DM should
4812 * invoke this function when the platform undergoes a power source transition
4813 * so DC can apply/unapply the limit. This interface may be disruptive to
4814 * the onscreen content.
4815 *
4816 * Context: Triggered by OS through DM interface, or manually by escape calls.
4817 * Need to hold a dclock when doing so.
4818 *
4819 * Return: none (void function)
4820 *
4821 */
dc_enable_dcmode_clk_limit(struct dc * dc,bool enable)4822 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4823 {
4824 unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
4825 bool p_state_change_support;
4826
4827 if (!dc->config.dc_mode_clk_limit_support)
4828 return;
4829
4830 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4831 for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
4832 if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
4833 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
4834 }
4835 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4836 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4837
4838 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4839 if (p_state_change_support) {
4840 if (funcMin <= softMax)
4841 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4842 // else: No-Op
4843 } else {
4844 if (funcMin <= softMax)
4845 blank_and_force_memclk(dc, true, softMax);
4846 // else: No-Op
4847 }
4848 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4849 if (p_state_change_support) {
4850 if (funcMin <= softMax)
4851 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4852 // else: No-Op
4853 } else {
4854 if (funcMin <= softMax)
4855 blank_and_force_memclk(dc, true, maxDPM);
4856 // else: No-Op
4857 }
4858 }
4859 dc->clk_mgr->dc_mode_softmax_enabled = enable;
4860 }
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,struct dc_plane_state * plane,struct dc_cursor_attributes * cursor_attr)4861 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4862 struct dc_cursor_attributes *cursor_attr)
4863 {
4864 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4865 return true;
4866 return false;
4867 }
4868
4869 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)4870 void dc_hardware_release(struct dc *dc)
4871 {
4872 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4873
4874 if (dc->hwss.hardware_release)
4875 dc->hwss.hardware_release(dc);
4876 }
4877
dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc * dc)4878 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4879 {
4880 if (dc->current_state)
4881 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4882 }
4883
4884 /**
4885 * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
4886 *
4887 * @dc: [in] dc structure
4888 *
4889 * Checks whether DMUB FW supports outbox notifications, if supported DM
4890 * should register outbox interrupt prior to actually enabling interrupts
4891 * via dc_enable_dmub_outbox
4892 *
4893 * Return:
4894 * True if DMUB FW supports outbox notifications, False otherwise
4895 */
dc_is_dmub_outbox_supported(struct dc * dc)4896 bool dc_is_dmub_outbox_supported(struct dc *dc)
4897 {
4898 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4899 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4900 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4901 !dc->debug.dpia_debug.bits.disable_dpia)
4902 return true;
4903
4904 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4905 !dc->debug.dpia_debug.bits.disable_dpia)
4906 return true;
4907
4908 /* dmub aux needs dmub notifications to be enabled */
4909 return dc->debug.enable_dmub_aux_for_legacy_ddc;
4910 }
4911
4912 /**
4913 * dc_enable_dmub_notifications - Check if dmub fw supports outbox
4914 *
4915 * @dc: [in] dc structure
4916 *
4917 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4918 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
4919 * API shall be removed after switching.
4920 *
4921 * Return:
4922 * True if DMUB FW supports outbox notifications, False otherwise
4923 */
dc_enable_dmub_notifications(struct dc * dc)4924 bool dc_enable_dmub_notifications(struct dc *dc)
4925 {
4926 return dc_is_dmub_outbox_supported(dc);
4927 }
4928
4929 /**
4930 * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
4931 *
4932 * @dc: [in] dc structure
4933 *
4934 * Enables DMUB unsolicited notifications to x86 via outbox.
4935 */
dc_enable_dmub_outbox(struct dc * dc)4936 void dc_enable_dmub_outbox(struct dc *dc)
4937 {
4938 struct dc_context *dc_ctx = dc->ctx;
4939
4940 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4941 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4942 }
4943
4944 /**
4945 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4946 * Sets port index appropriately for legacy DDC
4947 * @dc: dc structure
4948 * @link_index: link index
4949 * @payload: aux payload
4950 *
4951 * Returns: True if successful, False if failure
4952 */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)4953 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4954 uint32_t link_index,
4955 struct aux_payload *payload)
4956 {
4957 uint8_t action;
4958 union dmub_rb_cmd cmd = {0};
4959
4960 ASSERT(payload->length <= 16);
4961
4962 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4963 cmd.dp_aux_access.header.payload_bytes = 0;
4964 /* For dpia, ddc_pin is set to NULL */
4965 if (!dc->links[link_index]->ddc->ddc_pin)
4966 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4967 else
4968 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4969
4970 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4971 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4972 cmd.dp_aux_access.aux_control.timeout = 0;
4973 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4974 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4975 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4976
4977 /* set aux action */
4978 if (payload->i2c_over_aux) {
4979 if (payload->write) {
4980 if (payload->mot)
4981 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4982 else
4983 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4984 } else {
4985 if (payload->mot)
4986 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4987 else
4988 action = DP_AUX_REQ_ACTION_I2C_READ;
4989 }
4990 } else {
4991 if (payload->write)
4992 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4993 else
4994 action = DP_AUX_REQ_ACTION_DPCD_READ;
4995 }
4996
4997 cmd.dp_aux_access.aux_control.dpaux.action = action;
4998
4999 if (payload->length && payload->write) {
5000 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
5001 payload->data,
5002 payload->length
5003 );
5004 }
5005
5006 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5007
5008 return true;
5009 }
5010
get_link_index_from_dpia_port_index(const struct dc * dc,uint8_t dpia_port_index)5011 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
5012 uint8_t dpia_port_index)
5013 {
5014 uint8_t index, link_index = 0xFF;
5015
5016 for (index = 0; index < dc->link_count; index++) {
5017 /* ddc_hw_inst has dpia port index for dpia links
5018 * and ddc instance for legacy links
5019 */
5020 if (!dc->links[index]->ddc->ddc_pin) {
5021 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
5022 link_index = index;
5023 break;
5024 }
5025 }
5026 }
5027 ASSERT(link_index != 0xFF);
5028 return link_index;
5029 }
5030
5031 /**
5032 * dc_process_dmub_set_config_async - Submits set_config command
5033 *
5034 * @dc: [in] dc structure
5035 * @link_index: [in] link_index: link index
5036 * @payload: [in] aux payload
5037 * @notify: [out] set_config immediate reply
5038 *
5039 * Submits set_config command to dmub via inbox message.
5040 *
5041 * Return:
5042 * True if successful, False if failure
5043 */
dc_process_dmub_set_config_async(struct dc * dc,uint32_t link_index,struct set_config_cmd_payload * payload,struct dmub_notification * notify)5044 bool dc_process_dmub_set_config_async(struct dc *dc,
5045 uint32_t link_index,
5046 struct set_config_cmd_payload *payload,
5047 struct dmub_notification *notify)
5048 {
5049 union dmub_rb_cmd cmd = {0};
5050 bool is_cmd_complete = true;
5051
5052 /* prepare SET_CONFIG command */
5053 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
5054 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
5055
5056 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
5057 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
5058 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
5059
5060 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
5061 /* command is not processed by dmub */
5062 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
5063 return is_cmd_complete;
5064 }
5065
5066 /* command processed by dmub, if ret_status is 1, it is completed instantly */
5067 if (cmd.set_config_access.header.ret_status == 1)
5068 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
5069 else
5070 /* cmd pending, will receive notification via outbox */
5071 is_cmd_complete = false;
5072
5073 return is_cmd_complete;
5074 }
5075
5076 /**
5077 * dc_process_dmub_set_mst_slots - Submits MST solt allocation
5078 *
5079 * @dc: [in] dc structure
5080 * @link_index: [in] link index
5081 * @mst_alloc_slots: [in] mst slots to be allotted
5082 * @mst_slots_in_use: [out] mst slots in use returned in failure case
5083 *
5084 * Submits mst slot allocation command to dmub via inbox message
5085 *
5086 * Return:
5087 * DC_OK if successful, DC_ERROR if failure
5088 */
dc_process_dmub_set_mst_slots(const struct dc * dc,uint32_t link_index,uint8_t mst_alloc_slots,uint8_t * mst_slots_in_use)5089 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
5090 uint32_t link_index,
5091 uint8_t mst_alloc_slots,
5092 uint8_t *mst_slots_in_use)
5093 {
5094 union dmub_rb_cmd cmd = {0};
5095
5096 /* prepare MST_ALLOC_SLOTS command */
5097 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
5098 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
5099
5100 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
5101 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
5102
5103 if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
5104 /* command is not processed by dmub */
5105 return DC_ERROR_UNEXPECTED;
5106
5107 /* command processed by dmub, if ret_status is 1 */
5108 if (cmd.set_config_access.header.ret_status != 1)
5109 /* command processing error */
5110 return DC_ERROR_UNEXPECTED;
5111
5112 /* command processed and we have a status of 2, mst not enabled in dpia */
5113 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
5114 return DC_FAIL_UNSUPPORTED_1;
5115
5116 /* previously configured mst alloc and used slots did not match */
5117 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
5118 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
5119 return DC_NOT_SUPPORTED;
5120 }
5121
5122 return DC_OK;
5123 }
5124
5125 /**
5126 * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
5127 *
5128 * @dc: [in] dc structure
5129 * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
5130 *
5131 * Submits dpia hpd int enable command to dmub via inbox message
5132 */
dc_process_dmub_dpia_hpd_int_enable(const struct dc * dc,uint32_t hpd_int_enable)5133 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
5134 uint32_t hpd_int_enable)
5135 {
5136 union dmub_rb_cmd cmd = {0};
5137
5138 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
5139 cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
5140
5141 dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
5142
5143 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
5144 }
5145
5146 /**
5147 * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
5148 *
5149 * @dc: [in] dc structure
5150 *
5151 *
5152 */
dc_print_dmub_diagnostic_data(const struct dc * dc)5153 void dc_print_dmub_diagnostic_data(const struct dc *dc)
5154 {
5155 dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
5156 }
5157
5158 /**
5159 * dc_disable_accelerated_mode - disable accelerated mode
5160 * @dc: dc structure
5161 */
dc_disable_accelerated_mode(struct dc * dc)5162 void dc_disable_accelerated_mode(struct dc *dc)
5163 {
5164 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
5165 }
5166
5167
5168 /**
5169 * dc_notify_vsync_int_state - notifies vsync enable/disable state
5170 * @dc: dc structure
5171 * @stream: stream where vsync int state changed
5172 * @enable: whether vsync is enabled or disabled
5173 *
5174 * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
5175 * interrupts after steady state is reached.
5176 */
dc_notify_vsync_int_state(struct dc * dc,struct dc_stream_state * stream,bool enable)5177 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
5178 {
5179 int i;
5180 int edp_num;
5181 struct pipe_ctx *pipe = NULL;
5182 struct dc_link *link = stream->sink->link;
5183 struct dc_link *edp_links[MAX_NUM_EDP];
5184
5185
5186 if (link->psr_settings.psr_feature_enabled)
5187 return;
5188
5189 if (link->replay_settings.replay_feature_enabled)
5190 return;
5191
5192 /*find primary pipe associated with stream*/
5193 for (i = 0; i < MAX_PIPES; i++) {
5194 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5195
5196 if (pipe->stream == stream && pipe->stream_res.tg)
5197 break;
5198 }
5199
5200 if (i == MAX_PIPES) {
5201 ASSERT(0);
5202 return;
5203 }
5204
5205 dc_get_edp_links(dc, edp_links, &edp_num);
5206
5207 /* Determine panel inst */
5208 for (i = 0; i < edp_num; i++) {
5209 if (edp_links[i] == link)
5210 break;
5211 }
5212
5213 if (i == edp_num) {
5214 return;
5215 }
5216
5217 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
5218 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
5219 }
5220
5221 /*****************************************************************************
5222 * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
5223 * ABM
5224 * @dc: dc structure
5225 * @stream: stream where vsync int state changed
5226 * @pData: abm hw states
5227 *
5228 ****************************************************************************/
dc_abm_save_restore(struct dc * dc,struct dc_stream_state * stream,struct abm_save_restore * pData)5229 bool dc_abm_save_restore(
5230 struct dc *dc,
5231 struct dc_stream_state *stream,
5232 struct abm_save_restore *pData)
5233 {
5234 int i;
5235 int edp_num;
5236 struct pipe_ctx *pipe = NULL;
5237 struct dc_link *link = stream->sink->link;
5238 struct dc_link *edp_links[MAX_NUM_EDP];
5239
5240
5241 /*find primary pipe associated with stream*/
5242 for (i = 0; i < MAX_PIPES; i++) {
5243 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
5244
5245 if (pipe->stream == stream && pipe->stream_res.tg)
5246 break;
5247 }
5248
5249 if (i == MAX_PIPES) {
5250 ASSERT(0);
5251 return false;
5252 }
5253
5254 dc_get_edp_links(dc, edp_links, &edp_num);
5255
5256 /* Determine panel inst */
5257 for (i = 0; i < edp_num; i++)
5258 if (edp_links[i] == link)
5259 break;
5260
5261 if (i == edp_num)
5262 return false;
5263
5264 if (pipe->stream_res.abm &&
5265 pipe->stream_res.abm->funcs->save_restore)
5266 return pipe->stream_res.abm->funcs->save_restore(
5267 pipe->stream_res.abm,
5268 i,
5269 pData);
5270 return false;
5271 }
5272
dc_query_current_properties(struct dc * dc,struct dc_current_properties * properties)5273 void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
5274 {
5275 unsigned int i;
5276 bool subvp_in_use = false;
5277
5278 for (i = 0; i < dc->current_state->stream_count; i++) {
5279 if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
5280 subvp_in_use = true;
5281 break;
5282 }
5283 }
5284 properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
5285 }
5286
5287