1 /*
2  * Copyright 2012-15 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/version.h>
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_dp_mst_helper.h>
29 #include <drm/drm_dp_helper.h>
30 #include "dm_services.h"
31 #include "amdgpu.h"
32 #include "amdgpu_dm.h"
33 #include "amdgpu_dm_mst_types.h"
34 
35 #include "dc.h"
36 #include "dm_helpers.h"
37 
38 #include "dc_link_ddc.h"
39 
40 #include "i2caux_interface.h"
41 #if defined(CONFIG_DEBUG_FS)
42 #include "amdgpu_dm_debugfs.h"
43 #endif
44 
45 #if defined(CONFIG_DRM_AMD_DC_DCN)
46 #include "dc/dcn20/dcn20_resource.h"
47 #endif
48 
dm_dp_aux_transfer(struct drm_dp_aux * aux,struct drm_dp_aux_msg * msg)49 static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
50 				  struct drm_dp_aux_msg *msg)
51 {
52 	ssize_t result = 0;
53 	struct aux_payload payload;
54 	enum aux_channel_operation_result operation_result;
55 
56 	if (WARN_ON(msg->size > 16))
57 		return -E2BIG;
58 
59 	payload.address = msg->address;
60 	payload.data = msg->buffer;
61 	payload.length = msg->size;
62 	payload.reply = &msg->reply;
63 	payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
64 	payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
65 	payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
66 	payload.defer_delay = 0;
67 
68 	result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
69 				      &operation_result);
70 
71 	if (payload.write && result >= 0)
72 		result = msg->size;
73 
74 	if (result < 0)
75 		switch (operation_result) {
76 		case AUX_CHANNEL_OPERATION_SUCCEEDED:
77 			break;
78 		case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
79 		case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
80 			result = -EIO;
81 			break;
82 		case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
83 		case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE:
84 			result = -EBUSY;
85 			break;
86 		case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
87 			result = -ETIMEDOUT;
88 			break;
89 		}
90 
91 	return result;
92 }
93 
94 static void
dm_dp_mst_connector_destroy(struct drm_connector * connector)95 dm_dp_mst_connector_destroy(struct drm_connector *connector)
96 {
97 	struct amdgpu_dm_connector *aconnector =
98 		to_amdgpu_dm_connector(connector);
99 
100 	if (aconnector->dc_sink) {
101 		dc_link_remove_remote_sink(aconnector->dc_link,
102 					   aconnector->dc_sink);
103 		dc_sink_release(aconnector->dc_sink);
104 	}
105 
106 	kfree(aconnector->edid);
107 
108 	drm_connector_cleanup(connector);
109 	drm_dp_mst_put_port_malloc(aconnector->port);
110 	kfree(aconnector);
111 }
112 
113 static int
amdgpu_dm_mst_connector_late_register(struct drm_connector * connector)114 amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
115 {
116 	struct amdgpu_dm_connector *amdgpu_dm_connector =
117 		to_amdgpu_dm_connector(connector);
118 	int r;
119 
120 	r = drm_dp_mst_connector_late_register(connector,
121 					       amdgpu_dm_connector->port);
122 	if (r < 0)
123 		return r;
124 
125 #if defined(CONFIG_DEBUG_FS)
126 	connector_debugfs_init(amdgpu_dm_connector);
127 #endif
128 
129 	return 0;
130 }
131 
132 static void
amdgpu_dm_mst_connector_early_unregister(struct drm_connector * connector)133 amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
134 {
135 	struct amdgpu_dm_connector *amdgpu_dm_connector =
136 		to_amdgpu_dm_connector(connector);
137 	struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
138 
139 	drm_dp_mst_connector_early_unregister(connector, port);
140 }
141 
142 static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
143 	.fill_modes = drm_helper_probe_single_connector_modes,
144 	.destroy = dm_dp_mst_connector_destroy,
145 	.reset = amdgpu_dm_connector_funcs_reset,
146 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
147 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
148 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
149 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
150 	.late_register = amdgpu_dm_mst_connector_late_register,
151 	.early_unregister = amdgpu_dm_mst_connector_early_unregister,
152 };
153 
154 #if defined(CONFIG_DRM_AMD_DC_DCN)
validate_dsc_caps_on_connector(struct amdgpu_dm_connector * aconnector)155 static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
156 {
157 	struct dc_sink *dc_sink = aconnector->dc_sink;
158 	struct drm_dp_mst_port *port = aconnector->port;
159 	u8 dsc_caps[16] = { 0 };
160 
161 	aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
162 #if defined(CONFIG_HP_HOOK_WORKAROUND)
163 	/*
164 	 * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
165 	 * because it only check the dsc/fec caps of the "port variable" and not the dock
166 	 *
167 	 * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
168 	 *
169 	 * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
170 	 *
171 	 */
172 
173 	if (!aconnector->dsc_aux && !port->parent->port_parent)
174 		aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
175 #endif
176 	if (!aconnector->dsc_aux)
177 		return false;
178 
179 	if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
180 		return false;
181 
182 	if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
183 				   dsc_caps, NULL,
184 				   &dc_sink->dsc_caps.dsc_dec_caps))
185 		return false;
186 
187 	return true;
188 }
189 #endif
190 
dm_dp_mst_get_modes(struct drm_connector * connector)191 static int dm_dp_mst_get_modes(struct drm_connector *connector)
192 {
193 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
194 	int ret = 0;
195 
196 	if (!aconnector)
197 		return drm_add_edid_modes(connector, NULL);
198 
199 	if (!aconnector->edid) {
200 		struct edid *edid;
201 		edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
202 
203 		if (!edid) {
204 			drm_connector_update_edid_property(
205 				&aconnector->base,
206 				NULL);
207 			return ret;
208 		}
209 
210 		aconnector->edid = edid;
211 	}
212 
213 	if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) {
214 		dc_sink_release(aconnector->dc_sink);
215 		aconnector->dc_sink = NULL;
216 	}
217 
218 	if (!aconnector->dc_sink) {
219 		struct dc_sink *dc_sink;
220 		struct dc_sink_init_data init_params = {
221 				.link = aconnector->dc_link,
222 				.sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
223 		dc_sink = dc_link_add_remote_sink(
224 			aconnector->dc_link,
225 			(uint8_t *)aconnector->edid,
226 			(aconnector->edid->extensions + 1) * EDID_LENGTH,
227 			&init_params);
228 
229 		dc_sink->priv = aconnector;
230 		/* dc_link_add_remote_sink returns a new reference */
231 		aconnector->dc_sink = dc_sink;
232 
233 		if (aconnector->dc_sink) {
234 			amdgpu_dm_update_freesync_caps(
235 					connector, aconnector->edid);
236 
237 #if defined(CONFIG_DRM_AMD_DC_DCN)
238 			if (!validate_dsc_caps_on_connector(aconnector))
239 				memset(&aconnector->dc_sink->dsc_caps,
240 				       0, sizeof(aconnector->dc_sink->dsc_caps));
241 #endif
242 		}
243 	}
244 
245 	drm_connector_update_edid_property(
246 					&aconnector->base, aconnector->edid);
247 
248 	ret = drm_add_edid_modes(connector, aconnector->edid);
249 
250 	return ret;
251 }
252 
253 static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector * connector,struct drm_connector_state * connector_state)254 dm_mst_atomic_best_encoder(struct drm_connector *connector,
255 			   struct drm_connector_state *connector_state)
256 {
257 	struct drm_device *dev = connector->dev;
258 	struct amdgpu_device *adev = drm_to_adev(dev);
259 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
260 
261 	return &adev->dm.mst_encoders[acrtc->crtc_id].base;
262 }
263 
264 static int
dm_dp_mst_detect(struct drm_connector * connector,struct drm_modeset_acquire_ctx * ctx,bool force)265 dm_dp_mst_detect(struct drm_connector *connector,
266 		 struct drm_modeset_acquire_ctx *ctx, bool force)
267 {
268 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
269 	struct amdgpu_dm_connector *master = aconnector->mst_port;
270 
271 	return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
272 				      aconnector->port);
273 }
274 
dm_dp_mst_atomic_check(struct drm_connector * connector,struct drm_atomic_state * state)275 static int dm_dp_mst_atomic_check(struct drm_connector *connector,
276 				struct drm_atomic_state *state)
277 {
278 	struct drm_connector_state *new_conn_state =
279 			drm_atomic_get_new_connector_state(state, connector);
280 	struct drm_connector_state *old_conn_state =
281 			drm_atomic_get_old_connector_state(state, connector);
282 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
283 	struct drm_crtc_state *new_crtc_state;
284 	struct drm_dp_mst_topology_mgr *mst_mgr;
285 	struct drm_dp_mst_port *mst_port;
286 
287 	mst_port = aconnector->port;
288 	mst_mgr = &aconnector->mst_port->mst_mgr;
289 
290 	if (!old_conn_state->crtc)
291 		return 0;
292 
293 	if (new_conn_state->crtc) {
294 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
295 		if (!new_crtc_state ||
296 		    !drm_atomic_crtc_needs_modeset(new_crtc_state) ||
297 		    new_crtc_state->enable)
298 			return 0;
299 		}
300 
301 	return drm_dp_atomic_release_vcpi_slots(state,
302 						mst_mgr,
303 						mst_port);
304 }
305 
306 static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = {
307 	.get_modes = dm_dp_mst_get_modes,
308 	.mode_valid = amdgpu_dm_connector_mode_valid,
309 	.atomic_best_encoder = dm_mst_atomic_best_encoder,
310 	.detect_ctx = dm_dp_mst_detect,
311 	.atomic_check = dm_dp_mst_atomic_check,
312 };
313 
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)314 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
315 {
316 	drm_encoder_cleanup(encoder);
317 	kfree(encoder);
318 }
319 
320 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
321 	.destroy = amdgpu_dm_encoder_destroy,
322 };
323 
324 void
dm_dp_create_fake_mst_encoders(struct amdgpu_device * adev)325 dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
326 {
327 	struct drm_device *dev = adev_to_drm(adev);
328 	int i;
329 
330 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
331 		struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
332 		struct drm_encoder *encoder = &amdgpu_encoder->base;
333 
334 		encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
335 
336 		drm_encoder_init(
337 			dev,
338 			&amdgpu_encoder->base,
339 			&amdgpu_dm_encoder_funcs,
340 			DRM_MODE_ENCODER_DPMST,
341 			NULL);
342 
343 		drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
344 	}
345 }
346 
347 static struct drm_connector *
dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr * mgr,struct drm_dp_mst_port * port,const char * pathprop)348 dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
349 			struct drm_dp_mst_port *port,
350 			const char *pathprop)
351 {
352 	struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
353 	struct drm_device *dev = master->base.dev;
354 	struct amdgpu_device *adev = drm_to_adev(dev);
355 	struct amdgpu_dm_connector *aconnector;
356 	struct drm_connector *connector;
357 	int i;
358 
359 	aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
360 	if (!aconnector)
361 		return NULL;
362 
363 	connector = &aconnector->base;
364 	aconnector->port = port;
365 	aconnector->mst_port = master;
366 
367 	if (drm_connector_init(
368 		dev,
369 		connector,
370 		&dm_dp_mst_connector_funcs,
371 		DRM_MODE_CONNECTOR_DisplayPort)) {
372 		kfree(aconnector);
373 		return NULL;
374 	}
375 	drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs);
376 
377 	amdgpu_dm_connector_init_helper(
378 		&adev->dm,
379 		aconnector,
380 		DRM_MODE_CONNECTOR_DisplayPort,
381 		master->dc_link,
382 		master->connector_id);
383 
384 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
385 		drm_connector_attach_encoder(&aconnector->base,
386 					     &adev->dm.mst_encoders[i].base);
387 	}
388 
389 	connector->max_bpc_property = master->base.max_bpc_property;
390 	if (connector->max_bpc_property)
391 		drm_connector_attach_max_bpc_property(connector, 8, 16);
392 
393 	connector->vrr_capable_property = master->base.vrr_capable_property;
394 	if (connector->vrr_capable_property)
395 		drm_connector_attach_vrr_capable_property(connector);
396 
397 	drm_object_attach_property(
398 		&connector->base,
399 		dev->mode_config.path_property,
400 		0);
401 	drm_object_attach_property(
402 		&connector->base,
403 		dev->mode_config.tile_property,
404 		0);
405 
406 	drm_connector_set_path_property(connector, pathprop);
407 
408 	/*
409 	 * Initialize connector state before adding the connectror to drm and
410 	 * framebuffer lists
411 	 */
412 	amdgpu_dm_connector_funcs_reset(connector);
413 
414 	drm_dp_mst_get_port_malloc(port);
415 
416 	return connector;
417 }
418 
419 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
420 	.add_connector = dm_dp_add_mst_connector,
421 };
422 
amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int link_index)423 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
424 				       struct amdgpu_dm_connector *aconnector,
425 				       int link_index)
426 {
427 	aconnector->dm_dp_aux.aux.name =
428 		kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
429 			  link_index);
430 	aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
431 	aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
432 
433 	drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
434 	drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
435 				      &aconnector->base);
436 
437 	if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
438 		return;
439 
440 	aconnector->mst_mgr.cbs = &dm_mst_cbs;
441 	drm_dp_mst_topology_mgr_init(
442 		&aconnector->mst_mgr,
443 		adev_to_drm(dm->adev),
444 		&aconnector->dm_dp_aux.aux,
445 		16,
446 		4,
447 		aconnector->connector_id);
448 
449 	drm_connector_attach_dp_subconnector_property(&aconnector->base);
450 }
451 
dm_mst_get_pbn_divider(struct dc_link * link)452 int dm_mst_get_pbn_divider(struct dc_link *link)
453 {
454 	if (!link)
455 		return 0;
456 
457 	return dc_link_bandwidth_kbps(link,
458 			dc_link_get_link_cap(link)) / (8 * 1000 * 54);
459 }
460 
461 #if defined(CONFIG_DRM_AMD_DC_DCN)
462 
463 struct dsc_mst_fairness_params {
464 	struct dc_crtc_timing *timing;
465 	struct dc_sink *sink;
466 	struct dc_dsc_bw_range bw_range;
467 	bool compression_possible;
468 	struct drm_dp_mst_port *port;
469 	enum dsc_clock_force_state clock_force_enable;
470 	uint32_t num_slices_h;
471 	uint32_t num_slices_v;
472 	uint32_t bpp_overwrite;
473 };
474 
475 struct dsc_mst_fairness_vars {
476 	int pbn;
477 	bool dsc_enabled;
478 	int bpp_x16;
479 };
480 
kbps_to_peak_pbn(int kbps)481 static int kbps_to_peak_pbn(int kbps)
482 {
483 	u64 peak_kbps = kbps;
484 
485 	peak_kbps *= 1006;
486 	peak_kbps = div_u64(peak_kbps, 1000);
487 	return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000));
488 }
489 
set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count)490 static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params,
491 		struct dsc_mst_fairness_vars *vars,
492 		int count)
493 {
494 	int i;
495 
496 	for (i = 0; i < count; i++) {
497 		memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
498 		if (vars[i].dsc_enabled && dc_dsc_compute_config(
499 					params[i].sink->ctx->dc->res_pool->dscs[0],
500 					&params[i].sink->dsc_caps.dsc_dec_caps,
501 					params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
502 					0,
503 					params[i].timing,
504 					&params[i].timing->dsc_cfg)) {
505 			params[i].timing->flags.DSC = 1;
506 
507 			if (params[i].bpp_overwrite)
508 				params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite;
509 			else
510 				params[i].timing->dsc_cfg.bits_per_pixel = vars[i].bpp_x16;
511 
512 			if (params[i].num_slices_h)
513 				params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h;
514 
515 			if (params[i].num_slices_v)
516 				params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v;
517 		} else {
518 			params[i].timing->flags.DSC = 0;
519 		}
520 	}
521 }
522 
bpp_x16_from_pbn(struct dsc_mst_fairness_params param,int pbn)523 static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
524 {
525 	struct dc_dsc_config dsc_config;
526 	u64 kbps;
527 
528 	kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
529 	dc_dsc_compute_config(
530 			param.sink->ctx->dc->res_pool->dscs[0],
531 			&param.sink->dsc_caps.dsc_dec_caps,
532 			param.sink->ctx->dc->debug.dsc_min_slice_height_override,
533 			(int) kbps, param.timing, &dsc_config);
534 
535 	return dsc_config.bits_per_pixel;
536 }
537 
increase_dsc_bpp(struct drm_atomic_state * state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count)538 static void increase_dsc_bpp(struct drm_atomic_state *state,
539 			     struct dc_link *dc_link,
540 			     struct dsc_mst_fairness_params *params,
541 			     struct dsc_mst_fairness_vars *vars,
542 			     int count)
543 {
544 	int i;
545 	bool bpp_increased[MAX_PIPES];
546 	int initial_slack[MAX_PIPES];
547 	int min_initial_slack;
548 	int next_index;
549 	int remaining_to_increase = 0;
550 	int pbn_per_timeslot;
551 	int link_timeslots_used;
552 	int fair_pbn_alloc;
553 
554 	pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link);
555 
556 	for (i = 0; i < count; i++) {
557 		if (vars[i].dsc_enabled) {
558 			initial_slack[i] = kbps_to_peak_pbn(params[i].bw_range.max_kbps) - vars[i].pbn;
559 			bpp_increased[i] = false;
560 			remaining_to_increase += 1;
561 		} else {
562 			initial_slack[i] = 0;
563 			bpp_increased[i] = true;
564 		}
565 	}
566 
567 	while (remaining_to_increase) {
568 		next_index = -1;
569 		min_initial_slack = -1;
570 		for (i = 0; i < count; i++) {
571 			if (!bpp_increased[i]) {
572 				if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) {
573 					min_initial_slack = initial_slack[i];
574 					next_index = i;
575 				}
576 			}
577 		}
578 
579 		if (next_index == -1)
580 			break;
581 
582 		link_timeslots_used = 0;
583 
584 		for (i = 0; i < count; i++)
585 			link_timeslots_used += DIV_ROUND_UP(vars[i].pbn, pbn_per_timeslot);
586 
587 		fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot;
588 
589 		if (initial_slack[next_index] > fair_pbn_alloc) {
590 			vars[next_index].pbn += fair_pbn_alloc;
591 			if (drm_dp_atomic_find_vcpi_slots(state,
592 							  params[next_index].port->mgr,
593 							  params[next_index].port,
594 							  vars[next_index].pbn,
595 							  pbn_per_timeslot) < 0)
596 				return;
597 			if (!drm_dp_mst_atomic_check(state)) {
598 				vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn);
599 			} else {
600 				vars[next_index].pbn -= fair_pbn_alloc;
601 				if (drm_dp_atomic_find_vcpi_slots(state,
602 								  params[next_index].port->mgr,
603 								  params[next_index].port,
604 								  vars[next_index].pbn,
605 								  pbn_per_timeslot) < 0)
606 					return;
607 			}
608 		} else {
609 			vars[next_index].pbn += initial_slack[next_index];
610 			if (drm_dp_atomic_find_vcpi_slots(state,
611 							  params[next_index].port->mgr,
612 							  params[next_index].port,
613 							  vars[next_index].pbn,
614 							  pbn_per_timeslot) < 0)
615 				return;
616 			if (!drm_dp_mst_atomic_check(state)) {
617 				vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16;
618 			} else {
619 				vars[next_index].pbn -= initial_slack[next_index];
620 				if (drm_dp_atomic_find_vcpi_slots(state,
621 								  params[next_index].port->mgr,
622 								  params[next_index].port,
623 								  vars[next_index].pbn,
624 								  pbn_per_timeslot) < 0)
625 					return;
626 			}
627 		}
628 
629 		bpp_increased[next_index] = true;
630 		remaining_to_increase--;
631 	}
632 }
633 
try_disable_dsc(struct drm_atomic_state * state,struct dc_link * dc_link,struct dsc_mst_fairness_params * params,struct dsc_mst_fairness_vars * vars,int count)634 static void try_disable_dsc(struct drm_atomic_state *state,
635 			    struct dc_link *dc_link,
636 			    struct dsc_mst_fairness_params *params,
637 			    struct dsc_mst_fairness_vars *vars,
638 			    int count)
639 {
640 	int i;
641 	bool tried[MAX_PIPES];
642 	int kbps_increase[MAX_PIPES];
643 	int max_kbps_increase;
644 	int next_index;
645 	int remaining_to_try = 0;
646 
647 	for (i = 0; i < count; i++) {
648 		if (vars[i].dsc_enabled
649 				&& vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
650 				&& params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
651 			kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
652 			tried[i] = false;
653 			remaining_to_try += 1;
654 		} else {
655 			kbps_increase[i] = 0;
656 			tried[i] = true;
657 		}
658 	}
659 
660 	while (remaining_to_try) {
661 		next_index = -1;
662 		max_kbps_increase = -1;
663 		for (i = 0; i < count; i++) {
664 			if (!tried[i]) {
665 				if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) {
666 					max_kbps_increase = kbps_increase[i];
667 					next_index = i;
668 				}
669 			}
670 		}
671 
672 		if (next_index == -1)
673 			break;
674 
675 		vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps);
676 		if (drm_dp_atomic_find_vcpi_slots(state,
677 						  params[next_index].port->mgr,
678 						  params[next_index].port,
679 						  vars[next_index].pbn,
680 						  dm_mst_get_pbn_divider(dc_link)) < 0)
681 			return;
682 
683 		if (!drm_dp_mst_atomic_check(state)) {
684 			vars[next_index].dsc_enabled = false;
685 			vars[next_index].bpp_x16 = 0;
686 		} else {
687 			vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps);
688 			if (drm_dp_atomic_find_vcpi_slots(state,
689 							  params[next_index].port->mgr,
690 							  params[next_index].port,
691 							  vars[next_index].pbn,
692 							  dm_mst_get_pbn_divider(dc_link)) < 0)
693 				return;
694 		}
695 
696 		tried[next_index] = true;
697 		remaining_to_try--;
698 	}
699 }
700 
compute_mst_dsc_configs_for_link(struct drm_atomic_state * state,struct dc_state * dc_state,struct dc_link * dc_link)701 static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
702 					     struct dc_state *dc_state,
703 					     struct dc_link *dc_link)
704 {
705 	int i;
706 	struct dc_stream_state *stream;
707 	struct dsc_mst_fairness_params params[MAX_PIPES];
708 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
709 	struct amdgpu_dm_connector *aconnector;
710 	int count = 0;
711 	bool debugfs_overwrite = false;
712 
713 	memset(params, 0, sizeof(params));
714 
715 	/* Set up params */
716 	for (i = 0; i < dc_state->stream_count; i++) {
717 		struct dc_dsc_policy dsc_policy = {0};
718 
719 		stream = dc_state->streams[i];
720 
721 		if (stream->link != dc_link)
722 			continue;
723 
724 		stream->timing.flags.DSC = 0;
725 
726 		params[count].timing = &stream->timing;
727 		params[count].sink = stream->sink;
728 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
729 		params[count].port = aconnector->port;
730 		params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable;
731 		if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE)
732 			debugfs_overwrite = true;
733 		params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
734 		params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
735 		params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel;
736 		params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
737 		dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
738 		if (!dc_dsc_compute_bandwidth_range(
739 				stream->sink->ctx->dc->res_pool->dscs[0],
740 				stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
741 				dsc_policy.min_target_bpp,
742 				dsc_policy.max_target_bpp,
743 				&stream->sink->dsc_caps.dsc_dec_caps,
744 				&stream->timing, &params[count].bw_range))
745 			params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
746 
747 		count++;
748 	}
749 	/* Try no compression */
750 	for (i = 0; i < count; i++) {
751 		vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
752 		vars[i].dsc_enabled = false;
753 		vars[i].bpp_x16 = 0;
754 		if (drm_dp_atomic_find_vcpi_slots(state,
755 						 params[i].port->mgr,
756 						 params[i].port,
757 						 vars[i].pbn,
758 						 dm_mst_get_pbn_divider(dc_link)) < 0)
759 			return false;
760 	}
761 	if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) {
762 		set_dsc_configs_from_fairness_vars(params, vars, count);
763 		return true;
764 	}
765 
766 	/* Try max compression */
767 	for (i = 0; i < count; i++) {
768 		if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
769 			vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps);
770 			vars[i].dsc_enabled = true;
771 			vars[i].bpp_x16 = params[i].bw_range.min_target_bpp_x16;
772 			if (drm_dp_atomic_find_vcpi_slots(state,
773 							  params[i].port->mgr,
774 							  params[i].port,
775 							  vars[i].pbn,
776 							  dm_mst_get_pbn_divider(dc_link)) < 0)
777 				return false;
778 		} else {
779 			vars[i].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps);
780 			vars[i].dsc_enabled = false;
781 			vars[i].bpp_x16 = 0;
782 			if (drm_dp_atomic_find_vcpi_slots(state,
783 							  params[i].port->mgr,
784 							  params[i].port,
785 							  vars[i].pbn,
786 							  dm_mst_get_pbn_divider(dc_link)) < 0)
787 				return false;
788 		}
789 	}
790 	if (drm_dp_mst_atomic_check(state))
791 		return false;
792 
793 	/* Optimize degree of compression */
794 	increase_dsc_bpp(state, dc_link, params, vars, count);
795 
796 	try_disable_dsc(state, dc_link, params, vars, count);
797 
798 	set_dsc_configs_from_fairness_vars(params, vars, count);
799 
800 	return true;
801 }
802 
compute_mst_dsc_configs_for_state(struct drm_atomic_state * state,struct dc_state * dc_state)803 bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
804 				       struct dc_state *dc_state)
805 {
806 	int i, j;
807 	struct dc_stream_state *stream;
808 	bool computed_streams[MAX_PIPES];
809 	struct amdgpu_dm_connector *aconnector;
810 
811 	for (i = 0; i < dc_state->stream_count; i++)
812 		computed_streams[i] = false;
813 
814 	for (i = 0; i < dc_state->stream_count; i++) {
815 		stream = dc_state->streams[i];
816 
817 		if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
818 			continue;
819 
820 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
821 
822 		if (!aconnector || !aconnector->dc_sink)
823 			continue;
824 
825 		if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
826 			continue;
827 
828 		if (computed_streams[i])
829 			continue;
830 
831 		mutex_lock(&aconnector->mst_mgr.lock);
832 		if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
833 			mutex_unlock(&aconnector->mst_mgr.lock);
834 			return false;
835 		}
836 		mutex_unlock(&aconnector->mst_mgr.lock);
837 
838 		for (j = 0; j < dc_state->stream_count; j++) {
839 			if (dc_state->streams[j]->link == stream->link)
840 				computed_streams[j] = true;
841 		}
842 	}
843 
844 	for (i = 0; i < dc_state->stream_count; i++) {
845 		stream = dc_state->streams[i];
846 
847 		if (stream->timing.flags.DSC == 1)
848 			dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
849 	}
850 
851 	return true;
852 }
853 
854 #endif
855